summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/OWNERS2
-rwxr-xr-xdeps/v8/src/SConscript413
-rw-r--r--deps/v8/src/accessors.cc406
-rw-r--r--deps/v8/src/accessors.h110
-rw-r--r--deps/v8/src/allocation-site-scopes.cc108
-rw-r--r--deps/v8/src/allocation-site-scopes.h115
-rw-r--r--deps/v8/src/allocation-tracker.cc279
-rw-r--r--deps/v8/src/allocation-tracker.h138
-rw-r--r--deps/v8/src/allocation.cc6
-rw-r--r--deps/v8/src/api.cc4378
-rw-r--r--deps/v8/src/api.h180
-rw-r--r--deps/v8/src/apinatives.js66
-rw-r--r--deps/v8/src/apiutils.h29
-rw-r--r--deps/v8/src/arguments.cc120
-rw-r--r--deps/v8/src/arguments.h230
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h151
-rw-r--r--deps/v8/src/arm/assembler-arm.cc1276
-rw-r--r--deps/v8/src/arm/assembler-arm.h620
-rw-r--r--deps/v8/src/arm/builtins-arm.cc836
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc5308
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h462
-rw-r--r--deps/v8/src/arm/codegen-arm.cc583
-rw-r--r--deps/v8/src/arm/codegen-arm.h27
-rw-r--r--deps/v8/src/arm/constants-arm.cc12
-rw-r--r--deps/v8/src/arm/constants-arm.h100
-rw-r--r--deps/v8/src/arm/cpu-arm.cc11
-rw-r--r--deps/v8/src/arm/debug-arm.cc49
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc975
-rw-r--r--deps/v8/src/arm/disasm-arm.cc353
-rw-r--r--deps/v8/src/arm/frames-arm.cc18
-rw-r--r--deps/v8/src/arm/frames-arm.h49
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc1156
-rw-r--r--deps/v8/src/arm/ic-arm.cc433
-rw-r--r--deps/v8/src/arm/lithium-arm.cc1559
-rw-r--r--deps/v8/src/arm/lithium-arm.h1622
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc4166
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h207
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.cc19
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.h2
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc1729
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h486
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc252
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h22
-rw-r--r--deps/v8/src/arm/simulator-arm.cc977
-rw-r--r--deps/v8/src/arm/simulator-arm.h54
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc3731
-rw-r--r--deps/v8/src/array-iterator.js126
-rw-r--r--deps/v8/src/array.js451
-rw-r--r--deps/v8/src/arraybuffer.js111
-rw-r--r--deps/v8/src/assembler.cc410
-rw-r--r--deps/v8/src/assembler.h235
-rw-r--r--deps/v8/src/assert-scope.h184
-rw-r--r--deps/v8/src/ast.cc369
-rw-r--r--deps/v8/src/ast.h1586
-rw-r--r--deps/v8/src/atomicops.h17
-rw-r--r--deps/v8/src/atomicops_internals_mips_gcc.h17
-rw-r--r--deps/v8/src/atomicops_internals_tsan.h400
-rw-r--r--deps/v8/src/atomicops_internals_x86_gcc.cc8
-rw-r--r--deps/v8/src/atomicops_internals_x86_gcc.h2
-rw-r--r--deps/v8/src/bignum-dtoa.cc2
-rw-r--r--deps/v8/src/bignum.cc10
-rw-r--r--deps/v8/src/bootstrapper.cc1056
-rw-r--r--deps/v8/src/bootstrapper.h32
-rw-r--r--deps/v8/src/builtins.cc1117
-rw-r--r--deps/v8/src/builtins.h190
-rw-r--r--deps/v8/src/cached-powers.cc2
-rw-r--r--deps/v8/src/char-predicates-inl.h12
-rw-r--r--deps/v8/src/char-predicates.h2
-rw-r--r--deps/v8/src/checks.cc71
-rw-r--r--deps/v8/src/checks.h40
-rw-r--r--deps/v8/src/circular-queue-inl.h55
-rw-r--r--deps/v8/src/circular-queue.cc122
-rw-r--r--deps/v8/src/circular-queue.h73
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc1266
-rw-r--r--deps/v8/src/code-stubs.cc1017
-rw-r--r--deps/v8/src/code-stubs.h1719
-rw-r--r--deps/v8/src/code.h4
-rw-r--r--deps/v8/src/codegen.cc101
-rw-r--r--deps/v8/src/codegen.h16
-rw-r--r--deps/v8/src/collection.js246
-rw-r--r--deps/v8/src/compilation-cache.cc31
-rw-r--r--deps/v8/src/compilation-cache.h5
-rw-r--r--deps/v8/src/compiler.cc825
-rw-r--r--deps/v8/src/compiler.h318
-rw-r--r--deps/v8/src/contexts.cc91
-rw-r--r--deps/v8/src/contexts.h103
-rw-r--r--deps/v8/src/conversions-inl.h48
-rw-r--r--deps/v8/src/conversions.cc23
-rw-r--r--deps/v8/src/conversions.h11
-rw-r--r--deps/v8/src/counters.cc46
-rw-r--r--deps/v8/src/counters.h130
-rw-r--r--deps/v8/src/cpu-profiler-inl.h39
-rw-r--r--deps/v8/src/cpu-profiler.cc646
-rw-r--r--deps/v8/src/cpu-profiler.h238
-rw-r--r--deps/v8/src/cpu.cc466
-rw-r--r--deps/v8/src/cpu.h91
-rw-r--r--deps/v8/src/d8-debug.cc78
-rw-r--r--deps/v8/src/d8-debug.h20
-rw-r--r--deps/v8/src/d8-posix.cc110
-rw-r--r--deps/v8/src/d8-readline.cc43
-rw-r--r--deps/v8/src/d8.cc1552
-rw-r--r--deps/v8/src/d8.gyp30
-rw-r--r--deps/v8/src/d8.h164
-rw-r--r--deps/v8/src/d8.js688
-rw-r--r--deps/v8/src/data-flow.h57
-rw-r--r--deps/v8/src/date.js51
-rw-r--r--deps/v8/src/dateparser.cc1
-rw-r--r--deps/v8/src/debug-agent.cc130
-rw-r--r--deps/v8/src/debug-agent.h36
-rw-r--r--deps/v8/src/debug-debugger.js230
-rw-r--r--deps/v8/src/debug.cc531
-rw-r--r--deps/v8/src/debug.h73
-rw-r--r--deps/v8/src/defaults.cc70
-rw-r--r--deps/v8/src/deoptimizer.cc2525
-rw-r--r--deps/v8/src/deoptimizer.h461
-rw-r--r--deps/v8/src/disassembler.cc65
-rw-r--r--deps/v8/src/disassembler.h2
-rw-r--r--deps/v8/src/dtoa.cc2
-rw-r--r--deps/v8/src/effects.h361
-rw-r--r--deps/v8/src/elements-kind.cc10
-rw-r--r--deps/v8/src/elements-kind.h14
-rw-r--r--deps/v8/src/elements.cc908
-rw-r--r--deps/v8/src/elements.h51
-rw-r--r--deps/v8/src/execution.cc249
-rw-r--r--deps/v8/src/execution.h66
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc52
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.h4
-rw-r--r--deps/v8/src/extensions/gc-extension.cc23
-rw-r--r--deps/v8/src/extensions/gc-extension.h6
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc16
-rw-r--r--deps/v8/src/extensions/statistics-extension.h2
-rw-r--r--deps/v8/src/factory.cc611
-rw-r--r--deps/v8/src/factory.h252
-rw-r--r--deps/v8/src/fixed-dtoa.cc2
-rw-r--r--deps/v8/src/flag-definitions.h391
-rw-r--r--deps/v8/src/flags.cc78
-rw-r--r--deps/v8/src/flags.h3
-rw-r--r--deps/v8/src/frames-inl.h132
-rw-r--r--deps/v8/src/frames.cc706
-rw-r--r--deps/v8/src/frames.h397
-rw-r--r--deps/v8/src/full-codegen.cc543
-rw-r--r--deps/v8/src/full-codegen.h174
-rw-r--r--deps/v8/src/func-name-inferrer.cc6
-rw-r--r--deps/v8/src/gdb-jit.cc196
-rw-r--r--deps/v8/src/gdb-jit.h6
-rw-r--r--deps/v8/src/generator.js92
-rw-r--r--deps/v8/src/global-handles.cc696
-rw-r--r--deps/v8/src/global-handles.h299
-rw-r--r--deps/v8/src/globals.h202
-rw-r--r--deps/v8/src/handles-inl.h157
-rw-r--r--deps/v8/src/handles.cc474
-rw-r--r--deps/v8/src/handles.h138
-rw-r--r--deps/v8/src/harmony-array.js124
-rw-r--r--deps/v8/src/harmony-math.js60
-rw-r--r--deps/v8/src/harmony-string.js154
-rw-r--r--deps/v8/src/heap-inl.h417
-rw-r--r--deps/v8/src/heap-profiler.cc218
-rw-r--r--deps/v8/src/heap-profiler.h85
-rw-r--r--deps/v8/src/heap-snapshot-generator-inl.h88
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc3035
-rw-r--r--deps/v8/src/heap-snapshot-generator.h705
-rw-r--r--deps/v8/src/heap.cc3151
-rw-r--r--deps/v8/src/heap.h1129
-rw-r--r--deps/v8/src/hydrogen-alias-analysis.h96
-rw-r--r--deps/v8/src/hydrogen-bce.cc437
-rw-r--r--deps/v8/src/hydrogen-bce.h74
-rw-r--r--deps/v8/src/hydrogen-bch.cc402
-rw-r--r--deps/v8/src/hydrogen-bch.h (renamed from deps/v8/src/inspector.h)41
-rw-r--r--deps/v8/src/hydrogen-canonicalize.cc78
-rw-r--r--deps/v8/src/hydrogen-canonicalize.h51
-rw-r--r--deps/v8/src/hydrogen-check-elimination.cc357
-rw-r--r--deps/v8/src/hydrogen-check-elimination.h52
-rw-r--r--deps/v8/src/hydrogen-dce.cc129
-rw-r--r--deps/v8/src/hydrogen-dce.h57
-rw-r--r--deps/v8/src/hydrogen-dehoist.cc80
-rw-r--r--deps/v8/src/hydrogen-dehoist.h51
-rw-r--r--deps/v8/src/hydrogen-environment-liveness.cc244
-rw-r--r--deps/v8/src/hydrogen-environment-liveness.h88
-rw-r--r--deps/v8/src/hydrogen-escape-analysis.cc333
-rw-r--r--deps/v8/src/hydrogen-escape-analysis.h91
-rw-r--r--deps/v8/src/hydrogen-flow-engine.h235
-rw-r--r--deps/v8/src/hydrogen-gvn.cc853
-rw-r--r--deps/v8/src/hydrogen-gvn.h89
-rw-r--r--deps/v8/src/hydrogen-infer-representation.cc184
-rw-r--r--deps/v8/src/hydrogen-infer-representation.h57
-rw-r--r--deps/v8/src/hydrogen-infer-types.cc77
-rw-r--r--deps/v8/src/hydrogen-infer-types.h (renamed from deps/v8/src/platform-tls-mac.h)43
-rw-r--r--deps/v8/src/hydrogen-instructions.cc3149
-rw-r--r--deps/v8/src/hydrogen-instructions.h6329
-rw-r--r--deps/v8/src/hydrogen-load-elimination.cc510
-rw-r--r--deps/v8/src/hydrogen-load-elimination.h50
-rw-r--r--deps/v8/src/hydrogen-mark-deoptimize.cc84
-rw-r--r--deps/v8/src/hydrogen-mark-deoptimize.h75
-rw-r--r--deps/v8/src/hydrogen-mark-unreachable.cc77
-rw-r--r--deps/v8/src/hydrogen-mark-unreachable.h53
-rw-r--r--deps/v8/src/hydrogen-minus-zero.cc83
-rw-r--r--deps/v8/src/hydrogen-minus-zero.h56
-rw-r--r--deps/v8/src/hydrogen-osr.cc125
-rw-r--r--deps/v8/src/hydrogen-osr.h77
-rw-r--r--deps/v8/src/hydrogen-range-analysis.cc200
-rw-r--r--deps/v8/src/hydrogen-range-analysis.h (renamed from deps/v8/src/inspector.cc)44
-rw-r--r--deps/v8/src/hydrogen-redundant-phi.cc88
-rw-r--r--deps/v8/src/hydrogen-redundant-phi.h56
-rw-r--r--deps/v8/src/hydrogen-removable-simulates.cc94
-rw-r--r--deps/v8/src/hydrogen-removable-simulates.h51
-rw-r--r--deps/v8/src/hydrogen-representation-changes.cc197
-rw-r--r--deps/v8/src/hydrogen-representation-changes.h55
-rw-r--r--deps/v8/src/hydrogen-sce.cc (renamed from deps/v8/src/platform-tls-win32.h)56
-rw-r--r--deps/v8/src/hydrogen-sce.h (renamed from deps/v8/src/v8preparserdll-main.cc)31
-rw-r--r--deps/v8/src/hydrogen-uint32-analysis.cc236
-rw-r--r--deps/v8/src/hydrogen-uint32-analysis.h59
-rw-r--r--deps/v8/src/hydrogen.cc9632
-rw-r--r--deps/v8/src/hydrogen.h1809
-rw-r--r--deps/v8/src/i18n.cc1070
-rw-r--r--deps/v8/src/i18n.h154
-rw-r--r--deps/v8/src/i18n.js2116
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h126
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc517
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h323
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc850
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc4810
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h272
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc829
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h18
-rw-r--r--deps/v8/src/ia32/cpu-ia32.cc14
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc20
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc983
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc44
-rw-r--r--deps/v8/src/ia32/frames-ia32.cc16
-rw-r--r--deps/v8/src/ia32/frames-ia32.h49
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc976
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc395
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc4733
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h311
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc121
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.h6
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc1517
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h1328
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc1198
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h246
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc151
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.h7
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc3154
-rw-r--r--deps/v8/src/ic-inl.h34
-rw-r--r--deps/v8/src/ic.cc3262
-rw-r--r--deps/v8/src/ic.h723
-rw-r--r--deps/v8/src/icu_util.cc62
-rw-r--r--deps/v8/src/icu_util.h (renamed from deps/v8/src/platform-posix.h)15
-rw-r--r--deps/v8/src/incremental-marking-inl.h27
-rw-r--r--deps/v8/src/incremental-marking.cc338
-rw-r--r--deps/v8/src/incremental-marking.h38
-rw-r--r--deps/v8/src/interface.cc17
-rw-r--r--deps/v8/src/interface.h35
-rw-r--r--deps/v8/src/interpreter-irregexp.cc16
-rw-r--r--deps/v8/src/isolate-inl.h23
-rw-r--r--deps/v8/src/isolate.cc877
-rw-r--r--deps/v8/src/isolate.h354
-rw-r--r--deps/v8/src/json-parser.h389
-rw-r--r--deps/v8/src/json-stringifier.h855
-rw-r--r--deps/v8/src/json.js163
-rw-r--r--deps/v8/src/jsregexp-inl.h106
-rw-r--r--deps/v8/src/jsregexp.cc500
-rw-r--r--deps/v8/src/jsregexp.h154
-rw-r--r--deps/v8/src/lazy-instance.h19
-rw-r--r--deps/v8/src/list-inl.h21
-rw-r--r--deps/v8/src/list.h8
-rw-r--r--deps/v8/src/lithium-allocator-inl.h21
-rw-r--r--deps/v8/src/lithium-allocator.cc482
-rw-r--r--deps/v8/src/lithium-allocator.h130
-rw-r--r--deps/v8/src/lithium-codegen.cc150
-rw-r--r--deps/v8/src/lithium-codegen.h96
-rw-r--r--deps/v8/src/lithium.cc112
-rw-r--r--deps/v8/src/lithium.h339
-rw-r--r--deps/v8/src/liveedit-debugger.js113
-rw-r--r--deps/v8/src/liveedit.cc523
-rw-r--r--deps/v8/src/liveedit.h4
-rw-r--r--deps/v8/src/liveobjectlist-inl.h126
-rw-r--r--deps/v8/src/liveobjectlist.cc2631
-rw-r--r--deps/v8/src/liveobjectlist.h319
-rw-r--r--deps/v8/src/log-inl.h1
-rw-r--r--deps/v8/src/log-utils.cc148
-rw-r--r--deps/v8/src/log-utils.h96
-rw-r--r--deps/v8/src/log.cc1991
-rw-r--r--deps/v8/src/log.h290
-rw-r--r--deps/v8/src/macro-assembler.h50
-rw-r--r--deps/v8/src/macros.py22
-rw-r--r--deps/v8/src/mark-compact-inl.h2
-rw-r--r--deps/v8/src/mark-compact.cc1601
-rw-r--r--deps/v8/src/mark-compact.h283
-rw-r--r--deps/v8/src/math.js108
-rw-r--r--deps/v8/src/messages.cc53
-rw-r--r--deps/v8/src/messages.h9
-rw-r--r--deps/v8/src/messages.js692
-rw-r--r--deps/v8/src/mips/OWNERS2
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h106
-rw-r--r--deps/v8/src/mips/assembler-mips.cc201
-rw-r--r--deps/v8/src/mips/assembler-mips.h198
-rw-r--r--deps/v8/src/mips/builtins-mips.cc797
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc5192
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h440
-rw-r--r--deps/v8/src/mips/codegen-mips.cc300
-rw-r--r--deps/v8/src/mips/codegen-mips.h27
-rw-r--r--deps/v8/src/mips/constants-mips.cc8
-rw-r--r--deps/v8/src/mips/constants-mips.h27
-rw-r--r--deps/v8/src/mips/cpu-mips.cc10
-rw-r--r--deps/v8/src/mips/debug-mips.cc22
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc928
-rw-r--r--deps/v8/src/mips/disasm-mips.cc22
-rw-r--r--deps/v8/src/mips/frames-mips.cc17
-rw-r--r--deps/v8/src/mips/frames-mips.h57
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc1037
-rw-r--r--deps/v8/src/mips/ic-mips.cc402
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc3669
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h217
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.cc13
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.h2
-rw-r--r--deps/v8/src/mips/lithium-mips.cc1493
-rw-r--r--deps/v8/src/mips/lithium-mips.h1567
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc1492
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h308
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc176
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.h13
-rw-r--r--deps/v8/src/mips/simulator-mips.cc380
-rw-r--r--deps/v8/src/mips/simulator-mips.h19
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc3725
-rw-r--r--deps/v8/src/mirror-debugger.js53
-rw-r--r--deps/v8/src/mksnapshot.cc123
-rw-r--r--deps/v8/src/msan.h (renamed from deps/v8/src/platform-tls.h)33
-rw-r--r--deps/v8/src/object-observe.js568
-rw-r--r--deps/v8/src/objects-debug.cc292
-rw-r--r--deps/v8/src/objects-inl.h2083
-rw-r--r--deps/v8/src/objects-printer.cc368
-rw-r--r--deps/v8/src/objects-visiting-inl.h331
-rw-r--r--deps/v8/src/objects-visiting.cc32
-rw-r--r--deps/v8/src/objects-visiting.h90
-rw-r--r--deps/v8/src/objects.cc10283
-rw-r--r--deps/v8/src/objects.h4590
-rw-r--r--deps/v8/src/optimizing-compiler-thread.cc346
-rw-r--r--deps/v8/src/optimizing-compiler-thread.h130
-rw-r--r--deps/v8/src/parser.cc1898
-rw-r--r--deps/v8/src/parser.h186
-rw-r--r--deps/v8/src/platform-cygwin.cc624
-rw-r--r--deps/v8/src/platform-freebsd.cc591
-rw-r--r--deps/v8/src/platform-linux.cc935
-rw-r--r--deps/v8/src/platform-macos.cc646
-rw-r--r--deps/v8/src/platform-nullos.cc511
-rw-r--r--deps/v8/src/platform-openbsd.cc649
-rw-r--r--deps/v8/src/platform-posix.cc587
-rw-r--r--deps/v8/src/platform-solaris.cc577
-rw-r--r--deps/v8/src/platform-win32.cc964
-rw-r--r--deps/v8/src/platform.h490
-rw-r--r--deps/v8/src/platform/condition-variable.cc345
-rw-r--r--deps/v8/src/platform/condition-variable.h140
-rw-r--r--deps/v8/src/platform/elapsed-timer.h120
-rw-r--r--deps/v8/src/platform/mutex.cc214
-rw-r--r--deps/v8/src/platform/mutex.h238
-rw-r--r--deps/v8/src/platform/semaphore.cc214
-rw-r--r--deps/v8/src/platform/semaphore.h126
-rw-r--r--deps/v8/src/platform/socket.cc224
-rw-r--r--deps/v8/src/platform/socket.h101
-rw-r--r--deps/v8/src/platform/time.cc591
-rw-r--r--deps/v8/src/platform/time.h416
-rw-r--r--deps/v8/src/preparse-data.cc9
-rw-r--r--deps/v8/src/preparse-data.h2
-rw-r--r--deps/v8/src/preparser-api.cc214
-rw-r--r--deps/v8/src/preparser.cc938
-rw-r--r--deps/v8/src/preparser.h438
-rw-r--r--deps/v8/src/prettyprinter.cc218
-rw-r--r--deps/v8/src/prettyprinter.h10
-rw-r--r--deps/v8/src/profile-generator-inl.h93
-rw-r--r--deps/v8/src/profile-generator.cc3281
-rw-r--r--deps/v8/src/profile-generator.h955
-rw-r--r--deps/v8/src/property-details.h154
-rw-r--r--deps/v8/src/property.cc13
-rw-r--r--deps/v8/src/property.h243
-rw-r--r--deps/v8/src/proxy.js52
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.cc32
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.h6
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc15
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.h5
-rw-r--r--deps/v8/src/regexp-macro-assembler.cc37
-rw-r--r--deps/v8/src/regexp-macro-assembler.h15
-rw-r--r--deps/v8/src/regexp-stack.cc17
-rw-r--r--deps/v8/src/regexp.js52
-rw-r--r--deps/v8/src/rewriter.cc32
-rw-r--r--deps/v8/src/runtime-profiler.cc200
-rw-r--r--deps/v8/src/runtime-profiler.h90
-rw-r--r--deps/v8/src/runtime.cc7323
-rw-r--r--deps/v8/src/runtime.h289
-rw-r--r--deps/v8/src/runtime.js51
-rw-r--r--deps/v8/src/safepoint-table.cc10
-rw-r--r--deps/v8/src/safepoint-table.h2
-rw-r--r--deps/v8/src/sampler.cc687
-rw-r--r--deps/v8/src/sampler.h150
-rw-r--r--deps/v8/src/scanner-character-streams.cc2
-rw-r--r--[-rwxr-xr-x]deps/v8/src/scanner.cc174
-rw-r--r--deps/v8/src/scanner.h112
-rw-r--r--deps/v8/src/scopeinfo.cc95
-rw-r--r--deps/v8/src/scopeinfo.h67
-rw-r--r--deps/v8/src/scopes.cc321
-rw-r--r--deps/v8/src/scopes.h66
-rw-r--r--deps/v8/src/serialize.cc365
-rw-r--r--deps/v8/src/serialize.h77
-rw-r--r--deps/v8/src/smart-pointers.h17
-rw-r--r--deps/v8/src/snapshot-common.cc25
-rw-r--r--deps/v8/src/snapshot-empty.cc2
-rw-r--r--deps/v8/src/snapshot.h4
-rw-r--r--deps/v8/src/spaces-inl.h78
-rw-r--r--deps/v8/src/spaces.cc984
-rw-r--r--deps/v8/src/spaces.h468
-rw-r--r--deps/v8/src/splay-tree-inl.h9
-rw-r--r--deps/v8/src/splay-tree.h14
-rw-r--r--deps/v8/src/store-buffer-inl.h11
-rw-r--r--deps/v8/src/store-buffer.cc146
-rw-r--r--deps/v8/src/store-buffer.h39
-rw-r--r--deps/v8/src/string-search.h22
-rw-r--r--deps/v8/src/string-stream.cc148
-rw-r--r--deps/v8/src/string-stream.h41
-rw-r--r--deps/v8/src/string.js276
-rw-r--r--deps/v8/src/strtod.cc6
-rw-r--r--deps/v8/src/stub-cache.cc1572
-rw-r--r--deps/v8/src/stub-cache.h814
-rw-r--r--deps/v8/src/sweeper-thread.cc108
-rw-r--r--deps/v8/src/sweeper-thread.h70
-rw-r--r--deps/v8/src/symbol.js87
-rw-r--r--deps/v8/src/third_party/vtune/ittnotify_config.h484
-rw-r--r--deps/v8/src/third_party/vtune/ittnotify_types.h113
-rw-r--r--deps/v8/src/third_party/vtune/jitprofiling.cc499
-rw-r--r--deps/v8/src/third_party/vtune/jitprofiling.h298
-rw-r--r--deps/v8/src/third_party/vtune/v8-vtune.h69
-rw-r--r--deps/v8/src/third_party/vtune/v8vtune.gyp59
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.cc283
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.h82
-rw-r--r--deps/v8/src/token.h27
-rw-r--r--deps/v8/src/transitions-inl.h38
-rw-r--r--deps/v8/src/transitions.cc32
-rw-r--r--deps/v8/src/transitions.h61
-rw-r--r--deps/v8/src/type-info.cc522
-rw-r--r--deps/v8/src/type-info.h108
-rw-r--r--deps/v8/src/typedarray.js597
-rw-r--r--deps/v8/src/types.cc551
-rw-r--r--deps/v8/src/types.h351
-rw-r--r--deps/v8/src/typing.cc719
-rw-r--r--deps/v8/src/typing.h102
-rw-r--r--deps/v8/src/unbound-queue-inl.h26
-rw-r--r--deps/v8/src/unbound-queue.h6
-rw-r--r--deps/v8/src/unicode-inl.h181
-rw-r--r--deps/v8/src/unicode.cc136
-rw-r--r--deps/v8/src/unicode.h137
-rw-r--r--deps/v8/src/unique.h332
-rw-r--r--deps/v8/src/uri.h309
-rw-r--r--deps/v8/src/uri.js95
-rw-r--r--deps/v8/src/utils.cc13
-rw-r--r--deps/v8/src/utils.h157
-rw-r--r--deps/v8/src/utils/random-number-generator.cc153
-rw-r--r--deps/v8/src/utils/random-number-generator.h110
-rw-r--r--deps/v8/src/v8-counters.cc58
-rw-r--r--deps/v8/src/v8-counters.h117
-rw-r--r--deps/v8/src/v8.cc168
-rw-r--r--deps/v8/src/v8.h45
-rw-r--r--deps/v8/src/v8conversions.cc48
-rw-r--r--deps/v8/src/v8conversions.h13
-rw-r--r--deps/v8/src/v8dll-main.cc6
-rw-r--r--deps/v8/src/v8globals.h99
-rw-r--r--deps/v8/src/v8memory.h8
-rw-r--r--deps/v8/src/v8natives.js505
-rw-r--r--deps/v8/src/v8threads.cc54
-rw-r--r--deps/v8/src/v8threads.h6
-rw-r--r--deps/v8/src/v8utils.cc99
-rw-r--r--deps/v8/src/v8utils.h241
-rw-r--r--deps/v8/src/variables.cc8
-rw-r--r--deps/v8/src/variables.h4
-rw-r--r--deps/v8/src/version.cc14
-rw-r--r--deps/v8/src/vm-state-inl.h50
-rw-r--r--deps/v8/src/vm-state.h14
-rw-r--r--deps/v8/src/win32-headers.h11
-rw-r--r--deps/v8/src/win32-math.cc23
-rw-r--r--deps/v8/src/win32-math.h14
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h151
-rw-r--r--deps/v8/src/x64/assembler-x64.cc428
-rw-r--r--deps/v8/src/x64/assembler-x64.h295
-rw-r--r--deps/v8/src/x64/builtins-x64.cc907
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc4216
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h264
-rw-r--r--deps/v8/src/x64/codegen-x64.cc223
-rw-r--r--deps/v8/src/x64/codegen-x64.h92
-rw-r--r--deps/v8/src/x64/cpu-x64.cc14
-rw-r--r--deps/v8/src/x64/debug-x64.cc37
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc930
-rw-r--r--deps/v8/src/x64/disasm-x64.cc136
-rw-r--r--deps/v8/src/x64/frames-x64.cc16
-rw-r--r--deps/v8/src/x64/frames-x64.h61
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc1030
-rw-r--r--deps/v8/src/x64/ic-x64.cc598
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc3591
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h195
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc26
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.h2
-rw-r--r--deps/v8/src/x64/lithium-x64.cc1329
-rw-r--r--deps/v8/src/x64/lithium-x64.h1224
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc1303
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h310
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc164
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.h6
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc3230
-rw-r--r--deps/v8/src/zone-inl.h14
-rw-r--r--deps/v8/src/zone.cc78
-rw-r--r--deps/v8/src/zone.h78
508 files changed, 141777 insertions, 106519 deletions
diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS
new file mode 100644
index 000000000..f38fecad4
--- /dev/null
+++ b/deps/v8/src/OWNERS
@@ -0,0 +1,2 @@
+per-file i18n.*=cira@chromium.org
+per-file i18n.*=mnita@google.com
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
deleted file mode 100755
index 16bfb55b3..000000000
--- a/deps/v8/src/SConscript
+++ /dev/null
@@ -1,413 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import sys
-from os.path import join, dirname, abspath
-root_dir = dirname(File('SConstruct').rfile().abspath)
-sys.path.append(join(root_dir, 'tools'))
-import js2c
-Import('context')
-Import('tools')
-Import('d8_env')
-
-
-SOURCES = {
- 'all': Split("""
- accessors.cc
- allocation.cc
- api.cc
- assembler.cc
- ast.cc
- atomicops_internals_x86_gcc.cc
- bignum-dtoa.cc
- bignum.cc
- bootstrapper.cc
- builtins.cc
- cached-powers.cc
- checks.cc
- circular-queue.cc
- code-stubs.cc
- codegen.cc
- compilation-cache.cc
- compiler.cc
- contexts.cc
- conversions.cc
- counters.cc
- cpu-profiler.cc
- data-flow.cc
- date.cc
- dateparser.cc
- debug-agent.cc
- debug.cc
- deoptimizer.cc
- disassembler.cc
- diy-fp.cc
- dtoa.cc
- elements-kind.cc
- elements.cc
- execution.cc
- extensions/externalize-string-extension.cc
- extensions/gc-extension.cc
- extensions/statistics-extension.cc
- factory.cc
- fast-dtoa.cc
- fixed-dtoa.cc
- flags.cc
- frames.cc
- full-codegen.cc
- func-name-inferrer.cc
- gdb-jit.cc
- global-handles.cc
- handles.cc
- heap-profiler.cc
- heap.cc
- hydrogen-instructions.cc
- hydrogen.cc
- ic.cc
- incremental-marking.cc
- inspector.cc
- interface.cc
- interpreter-irregexp.cc
- isolate.cc
- jsregexp.cc
- lithium-allocator.cc
- lithium.cc
- liveedit.cc
- liveobjectlist.cc
- log-utils.cc
- log.cc
- mark-compact.cc
- messages.cc
- objects-printer.cc
- objects-visiting.cc
- objects.cc
- once.cc
- optimizing-compiler-thread.cc
- parser.cc
- preparse-data.cc
- preparser.cc
- profile-generator.cc
- property.cc
- regexp-macro-assembler-irregexp.cc
- regexp-macro-assembler.cc
- regexp-stack.cc
- rewriter.cc
- runtime-profiler.cc
- runtime.cc
- safepoint-table.cc
- scanner-character-streams.cc
- scanner.cc
- scopeinfo.cc
- scopes.cc
- serialize.cc
- snapshot-common.cc
- spaces.cc
- store-buffer.cc
- string-search.cc
- string-stream.cc
- strtod.cc
- stub-cache.cc
- token.cc
- transitions.cc
- type-info.cc
- unicode.cc
- utils.cc
- v8-counters.cc
- v8.cc
- v8conversions.cc
- v8threads.cc
- v8utils.cc
- variables.cc
- version.cc
- zone.cc
- """),
- 'arch:arm': Split("""
- arm/builtins-arm.cc
- arm/code-stubs-arm.cc
- arm/codegen-arm.cc
- arm/constants-arm.cc
- arm/cpu-arm.cc
- arm/debug-arm.cc
- arm/deoptimizer-arm.cc
- arm/disasm-arm.cc
- arm/frames-arm.cc
- arm/full-codegen-arm.cc
- arm/ic-arm.cc
- arm/lithium-arm.cc
- arm/lithium-codegen-arm.cc
- arm/lithium-gap-resolver-arm.cc
- arm/macro-assembler-arm.cc
- arm/regexp-macro-assembler-arm.cc
- arm/stub-cache-arm.cc
- arm/assembler-arm.cc
- """),
- 'arch:mips': Split("""
- mips/assembler-mips.cc
- mips/builtins-mips.cc
- mips/code-stubs-mips.cc
- mips/codegen-mips.cc
- mips/constants-mips.cc
- mips/cpu-mips.cc
- mips/debug-mips.cc
- mips/deoptimizer-mips.cc
- mips/disasm-mips.cc
- mips/frames-mips.cc
- mips/full-codegen-mips.cc
- mips/ic-mips.cc
- mips/lithium-codegen-mips.cc
- mips/lithium-gap-resolver-mips.cc
- mips/lithium-mips.cc
- mips/macro-assembler-mips.cc
- mips/regexp-macro-assembler-mips.cc
- mips/stub-cache-mips.cc
- """),
- 'arch:ia32': Split("""
- ia32/assembler-ia32.cc
- ia32/builtins-ia32.cc
- ia32/code-stubs-ia32.cc
- ia32/codegen-ia32.cc
- ia32/cpu-ia32.cc
- ia32/debug-ia32.cc
- ia32/deoptimizer-ia32.cc
- ia32/disasm-ia32.cc
- ia32/frames-ia32.cc
- ia32/full-codegen-ia32.cc
- ia32/ic-ia32.cc
- ia32/lithium-codegen-ia32.cc
- ia32/lithium-gap-resolver-ia32.cc
- ia32/lithium-ia32.cc
- ia32/macro-assembler-ia32.cc
- ia32/regexp-macro-assembler-ia32.cc
- ia32/stub-cache-ia32.cc
- """),
- 'arch:x64': Split("""
- x64/assembler-x64.cc
- x64/builtins-x64.cc
- x64/code-stubs-x64.cc
- x64/codegen-x64.cc
- x64/cpu-x64.cc
- x64/debug-x64.cc
- x64/deoptimizer-x64.cc
- x64/disasm-x64.cc
- x64/frames-x64.cc
- x64/full-codegen-x64.cc
- x64/ic-x64.cc
- x64/lithium-codegen-x64.cc
- x64/lithium-gap-resolver-x64.cc
- x64/lithium-x64.cc
- x64/macro-assembler-x64.cc
- x64/regexp-macro-assembler-x64.cc
- x64/stub-cache-x64.cc
- """),
- 'simulator:arm': ['arm/simulator-arm.cc'],
- 'simulator:mips': ['mips/simulator-mips.cc'],
- 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
- 'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
- 'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
- 'os:android': ['platform-linux.cc', 'platform-posix.cc'],
- 'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
- 'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
- 'os:cygwin': ['platform-cygwin.cc', 'platform-posix.cc'],
- 'os:nullos': ['platform-nullos.cc'],
- 'os:win32': ['platform-win32.cc', 'win32-math.cc'],
- 'mode:release': [],
- 'mode:debug': [
- 'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc'
- ]
-}
-
-
-PREPARSER_SOURCES = {
- 'all': Split("""
- allocation.cc
- bignum.cc
- bignum-dtoa.cc
- cached-powers.cc
- conversions.cc
- diy-fp.cc
- dtoa.cc
- fast-dtoa.cc
- fixed-dtoa.cc
- preparse-data.cc
- preparser.cc
- preparser-api.cc
- scanner.cc
- strtod.cc
- token.cc
- unicode.cc
- utils.cc
- """),
- 'os:win32': ['win32-math.cc']
-}
-
-
-D8_LIGHT_FILES = {
- 'all': [
- 'd8.cc'
- ]
-}
-
-
-D8_FULL_FILES = {
- 'all': [
- 'd8.cc', 'd8-debug.cc'
- ],
- 'os:linux': [
- 'd8-posix.cc'
- ],
- 'os:macos': [
- 'd8-posix.cc'
- ],
- 'os:android': [
- 'd8-posix.cc'
- ],
- 'os:freebsd': [
- 'd8-posix.cc'
- ],
- 'os:openbsd': [
- 'd8-posix.cc'
- ],
- 'os:solaris': [
- 'd8-posix.cc'
- ],
- 'os:cygwin': [
- 'd8-posix.cc'
- ],
- 'os:win32': [
- 'd8-windows.cc'
- ],
- 'os:nullos': [
- 'd8-windows.cc' # Empty implementation at the moment.
- ],
- 'console:readline': [
- 'd8-readline.cc'
- ]
-}
-
-
-LIBRARY_FILES = '''
-runtime.js
-v8natives.js
-array.js
-string.js
-uri.js
-math.js
-messages.js
-apinatives.js
-date.js
-regexp.js
-json.js
-liveedit-debugger.js
-mirror-debugger.js
-debug-debugger.js
-'''.split()
-
-
-EXPERIMENTAL_LIBRARY_FILES = '''
-proxy.js
-collection.js
-'''.split()
-
-
-def Abort(message):
- print message
- sys.exit(1)
-
-
-def ConfigureObjectFiles():
- env = Environment(tools=tools)
- env.Replace(**context.flags['v8'])
- context.ApplyEnvOverrides(env)
- env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
- env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
-
- def BuildJS2CEnv(type):
- js2c_env = { 'TYPE': type, 'COMPRESSION': 'off' }
- if 'COMPRESS_STARTUP_DATA_BZ2' in env['CPPDEFINES']:
- js2c_env['COMPRESSION'] = 'bz2'
- return js2c_env
-
- # Build the standard platform-independent source files.
- source_files = context.GetRelevantSources(SOURCES)
- d8_js = env.JS2C('d8-js.cc', 'd8.js', **{'TYPE': 'D8', 'COMPRESSION': 'off'})
- d8_js_obj = context.ConfigureObject(env, d8_js, CPPPATH=['.'])
- if context.options['library'] == 'shared':
- d8_files = context.GetRelevantSources(D8_LIGHT_FILES)
- d8_objs = []
- else:
- d8_files = context.GetRelevantSources(D8_FULL_FILES)
- d8_objs = [d8_js_obj]
- d8_objs.append(context.ConfigureObject(d8_env, [d8_files]))
-
- # Combine the JavaScript library files into a single C++ file and
- # compile it.
- library_files = [s for s in LIBRARY_FILES]
- library_files.append('macros.py')
- libraries_src = env.JS2C(
- ['libraries.cc'], library_files, **BuildJS2CEnv('CORE'))
- libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.'])
-
- # Combine the experimental JavaScript library files into a C++ file
- # and compile it.
- experimental_library_files = [ s for s in EXPERIMENTAL_LIBRARY_FILES ]
- experimental_library_files.append('macros.py')
- experimental_libraries_src = env.JS2C(['experimental-libraries.cc'],
- experimental_library_files,
- **BuildJS2CEnv('EXPERIMENTAL'))
- experimental_libraries_obj = context.ConfigureObject(env, experimental_libraries_src, CPPPATH=['.'])
-
- source_objs = context.ConfigureObject(env, source_files)
- non_snapshot_files = [source_objs]
-
- preparser_source_files = context.GetRelevantSources(PREPARSER_SOURCES)
- preparser_objs = context.ConfigureObject(env, preparser_source_files)
-
- # Create snapshot if necessary. For cross compilation you should either
- # do without snapshots and take the performance hit or you should build a
- # host VM with the simulator=arm and snapshot=on options and then take the
- # resulting snapshot.cc file from obj/release and put it in the src
- # directory. Then rebuild the VM with the cross compiler and specify
- # snapshot=nobuild on the scons command line.
- empty_snapshot_obj = context.ConfigureObject(env, 'snapshot-empty.cc')
- mksnapshot_env = env.Copy()
- mksnapshot_env.Replace(**context.flags['mksnapshot'])
- mksnapshot_src = 'mksnapshot.cc'
- mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, experimental_libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
- if context.use_snapshot:
- if context.build_snapshot:
- snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
- else:
- snapshot_cc = 'snapshot.cc'
- snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
- else:
- snapshot_obj = empty_snapshot_obj
- library_objs = [non_snapshot_files, libraries_obj, experimental_libraries_obj, snapshot_obj]
- return (library_objs, d8_objs, [mksnapshot], preparser_objs)
-
-
-(library_objs, d8_objs, mksnapshot, preparser_objs) = ConfigureObjectFiles()
-Return('library_objs d8_objs mksnapshot preparser_objs')
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 1bc9221a2..50232661c 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -42,8 +42,8 @@ namespace internal {
template <class C>
-static C* FindInstanceOf(Object* obj) {
- for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype()) {
+static C* FindInstanceOf(Isolate* isolate, Object* obj) {
+ for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype(isolate)) {
if (Is<C>(cur)) return C::cast(cur);
}
return NULL;
@@ -51,78 +51,146 @@ static C* FindInstanceOf(Object* obj) {
// Entry point that never should be called.
-MaybeObject* Accessors::IllegalSetter(JSObject*, Object*, void*) {
+MaybeObject* Accessors::IllegalSetter(Isolate* isolate,
+ JSObject*,
+ Object*,
+ void*) {
UNREACHABLE();
return NULL;
}
-Object* Accessors::IllegalGetAccessor(Object* object, void*) {
+Object* Accessors::IllegalGetAccessor(Isolate* isolate,
+ Object* object,
+ void*) {
UNREACHABLE();
return object;
}
-MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
+MaybeObject* Accessors::ReadOnlySetAccessor(Isolate* isolate,
+ JSObject*,
+ Object* value,
+ void*) {
// According to ECMA-262, section 8.6.2.2, page 28, setting
// read-only properties must be silently ignored.
return value;
}
+static V8_INLINE bool CheckForName(Handle<String> name,
+ String* property_name,
+ int offset,
+ int* object_offset) {
+ if (name->Equals(property_name)) {
+ *object_offset = offset;
+ return true;
+ }
+ return false;
+}
+
+
+bool Accessors::IsJSObjectFieldAccessor(
+ Handle<Map> map, Handle<String> name,
+ int* object_offset) {
+ Isolate* isolate = map->GetIsolate();
+ switch (map->instance_type()) {
+ case JS_ARRAY_TYPE:
+ return
+ CheckForName(name, isolate->heap()->length_string(),
+ JSArray::kLengthOffset, object_offset);
+ case JS_TYPED_ARRAY_TYPE:
+ return
+ CheckForName(name, isolate->heap()->length_string(),
+ JSTypedArray::kLengthOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->byte_length_string(),
+ JSTypedArray::kByteLengthOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->byte_offset_string(),
+ JSTypedArray::kByteOffsetOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->buffer_string(),
+ JSTypedArray::kBufferOffset, object_offset);
+ case JS_ARRAY_BUFFER_TYPE:
+ return
+ CheckForName(name, isolate->heap()->byte_length_string(),
+ JSArrayBuffer::kByteLengthOffset, object_offset);
+ case JS_DATA_VIEW_TYPE:
+ return
+ CheckForName(name, isolate->heap()->byte_length_string(),
+ JSDataView::kByteLengthOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->byte_offset_string(),
+ JSDataView::kByteOffsetOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->buffer_string(),
+ JSDataView::kBufferOffset, object_offset);
+ default: {
+ if (map->instance_type() < FIRST_NONSTRING_TYPE) {
+ return
+ CheckForName(name, isolate->heap()->length_string(),
+ String::kLengthOffset, object_offset);
+ }
+ return false;
+ }
+ }
+}
+
+
//
// Accessors::ArrayLength
//
-MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
+MaybeObject* Accessors::ArrayGetLength(Isolate* isolate,
+ Object* object,
+ void*) {
// Traverse the prototype chain until we reach an array.
- JSArray* holder = FindInstanceOf<JSArray>(object);
+ JSArray* holder = FindInstanceOf<JSArray>(isolate, object);
return holder == NULL ? Smi::FromInt(0) : holder->length();
}
// The helper function will 'flatten' Number objects.
-Object* Accessors::FlattenNumber(Object* value) {
+Object* Accessors::FlattenNumber(Isolate* isolate, Object* value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
JSValue* wrapper = JSValue::cast(value);
- ASSERT(Isolate::Current()->context()->native_context()->number_function()->
+ ASSERT(wrapper->GetIsolate()->context()->native_context()->number_function()->
has_initial_map());
- Map* number_map = Isolate::Current()->context()->native_context()->
+ Map* number_map = isolate->context()->native_context()->
number_function()->initial_map();
if (wrapper->map() == number_map) return wrapper->value();
return value;
}
-MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
- Isolate* isolate = object->GetIsolate();
-
+MaybeObject* Accessors::ArraySetLength(Isolate* isolate,
+ JSObject* object,
+ Object* value,
+ void*) {
// This means one of the object's prototypes is a JSArray and the
// object does not have a 'length' property. Calling SetProperty
// causes an infinite loop.
if (!object->IsJSArray()) {
- return object->SetLocalPropertyIgnoreAttributes(
- isolate->heap()->length_symbol(), value, NONE);
+ return object->SetLocalPropertyIgnoreAttributesTrampoline(
+ isolate->heap()->length_string(), value, NONE);
}
- value = FlattenNumber(value);
+ value = FlattenNumber(isolate, value);
// Need to call methods that may trigger GC.
HandleScope scope(isolate);
// Protect raw pointers.
- Handle<JSObject> object_handle(object, isolate);
+ Handle<JSArray> array_handle(JSArray::cast(object), isolate);
Handle<Object> value_handle(value, isolate);
bool has_exception;
- Handle<Object> uint32_v = Execution::ToUint32(value_handle, &has_exception);
+ Handle<Object> uint32_v =
+ Execution::ToUint32(isolate, value_handle, &has_exception);
if (has_exception) return Failure::Exception();
- Handle<Object> number_v = Execution::ToNumber(value_handle, &has_exception);
+ Handle<Object> number_v =
+ Execution::ToNumber(isolate, value_handle, &has_exception);
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
- return Handle<JSArray>::cast(object_handle)->SetElementsLength(*uint32_v);
+ return array_handle->SetElementsLength(*uint32_v);
}
return isolate->Throw(
*isolate->factory()->NewRangeError("invalid_array_length",
@@ -142,7 +210,9 @@ const AccessorDescriptor Accessors::ArrayLength = {
//
-MaybeObject* Accessors::StringGetLength(Object* object, void*) {
+MaybeObject* Accessors::StringGetLength(Isolate* isolate,
+ Object* object,
+ void*) {
Object* value = object;
if (object->IsJSValue()) value = JSValue::cast(object)->value();
if (value->IsString()) return Smi::FromInt(String::cast(value)->length());
@@ -164,7 +234,9 @@ const AccessorDescriptor Accessors::StringLength = {
//
-MaybeObject* Accessors::ScriptGetSource(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetSource(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->source();
}
@@ -182,7 +254,9 @@ const AccessorDescriptor Accessors::ScriptSource = {
//
-MaybeObject* Accessors::ScriptGetName(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetName(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->name();
}
@@ -200,7 +274,7 @@ const AccessorDescriptor Accessors::ScriptName = {
//
-MaybeObject* Accessors::ScriptGetId(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetId(Isolate* isolate, Object* object, void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->id();
}
@@ -218,7 +292,9 @@ const AccessorDescriptor Accessors::ScriptId = {
//
-MaybeObject* Accessors::ScriptGetLineOffset(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetLineOffset(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->line_offset();
}
@@ -236,7 +312,9 @@ const AccessorDescriptor Accessors::ScriptLineOffset = {
//
-MaybeObject* Accessors::ScriptGetColumnOffset(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetColumnOffset(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->column_offset();
}
@@ -254,7 +332,9 @@ const AccessorDescriptor Accessors::ScriptColumnOffset = {
//
-MaybeObject* Accessors::ScriptGetData(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetData(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->data();
}
@@ -272,7 +352,9 @@ const AccessorDescriptor Accessors::ScriptData = {
//
-MaybeObject* Accessors::ScriptGetType(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetType(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->type();
}
@@ -290,9 +372,11 @@ const AccessorDescriptor Accessors::ScriptType = {
//
-MaybeObject* Accessors::ScriptGetCompilationType(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetCompilationType(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->compilation_type();
+ return Smi::FromInt(Script::cast(script)->compilation_type());
}
@@ -308,9 +392,10 @@ const AccessorDescriptor Accessors::ScriptCompilationType = {
//
-MaybeObject* Accessors::ScriptGetLineEnds(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetLineEnds(Isolate* isolate,
+ Object* object,
+ void*) {
JSValue* wrapper = JSValue::cast(object);
- Isolate* isolate = wrapper->GetIsolate();
HandleScope scope(isolate);
Handle<Script> script(Script::cast(wrapper->value()), isolate);
InitScriptLineEnds(script);
@@ -337,7 +422,9 @@ const AccessorDescriptor Accessors::ScriptLineEnds = {
//
-MaybeObject* Accessors::ScriptGetContextData(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetContextData(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->context_data();
}
@@ -355,7 +442,9 @@ const AccessorDescriptor Accessors::ScriptContextData = {
//
-MaybeObject* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetEvalFromScript(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
if (!Script::cast(script)->eval_from_shared()->IsUndefined()) {
Handle<SharedFunctionInfo> eval_from_shared(
@@ -366,7 +455,7 @@ MaybeObject* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
return *GetScriptWrapper(eval_from_script);
}
}
- return HEAP->undefined_value();
+ return isolate->heap()->undefined_value();
}
@@ -382,14 +471,16 @@ const AccessorDescriptor Accessors::ScriptEvalFromScript = {
//
-MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Object* object, void*) {
- HandleScope scope;
- Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
+MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Isolate* isolate,
+ Object* object,
+ void*) {
+ Script* raw_script = Script::cast(JSValue::cast(object)->value());
+ HandleScope scope(isolate);
+ Handle<Script> script(raw_script);
// If this is not a script compiled through eval there is no eval position.
- int compilation_type = Smi::cast(script->compilation_type())->value();
- if (compilation_type != Script::COMPILATION_TYPE_EVAL) {
- return HEAP->undefined_value();
+ if (script->compilation_type() != Script::COMPILATION_TYPE_EVAL) {
+ return script->GetHeap()->undefined_value();
}
// Get the function from where eval was called and find the source position
@@ -413,7 +504,9 @@ const AccessorDescriptor Accessors::ScriptEvalFromScriptPosition = {
//
-MaybeObject* Accessors::ScriptGetEvalFromFunctionName(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetEvalFromFunctionName(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(
Script::cast(script)->eval_from_shared()));
@@ -440,49 +533,88 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
//
-MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
- Heap* heap = Isolate::Current()->heap();
- JSFunction* function = FindInstanceOf<JSFunction>(object);
- if (function == NULL) return heap->undefined_value();
- while (!function->should_have_prototype()) {
- function = FindInstanceOf<JSFunction>(function->GetPrototype());
+Handle<Object> Accessors::FunctionGetPrototype(Handle<JSFunction> function) {
+ CALL_HEAP_FUNCTION(function->GetIsolate(),
+ Accessors::FunctionGetPrototype(function->GetIsolate(),
+ *function,
+ NULL),
+ Object);
+}
+
+
+Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
+ Handle<Object> prototype) {
+ ASSERT(function->should_have_prototype());
+ CALL_HEAP_FUNCTION(function->GetIsolate(),
+ Accessors::FunctionSetPrototype(function->GetIsolate(),
+ *function,
+ *prototype,
+ NULL),
+ Object);
+}
+
+
+MaybeObject* Accessors::FunctionGetPrototype(Isolate* isolate,
+ Object* object,
+ void*) {
+ JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
+ if (function_raw == NULL) return isolate->heap()->undefined_value();
+ while (!function_raw->should_have_prototype()) {
+ function_raw = FindInstanceOf<JSFunction>(isolate,
+ function_raw->GetPrototype());
// There has to be one because we hit the getter.
- ASSERT(function != NULL);
+ ASSERT(function_raw != NULL);
}
- if (!function->has_prototype()) {
- Object* prototype;
- { MaybeObject* maybe_prototype = heap->AllocateFunctionPrototype(function);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- }
- Object* result;
- { MaybeObject* maybe_result = function->SetPrototype(prototype);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ if (!function_raw->has_prototype()) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> function(function_raw);
+ Handle<Object> proto = isolate->factory()->NewFunctionPrototype(function);
+ JSFunction::SetPrototype(function, proto);
+ function_raw = *function;
}
- return function->prototype();
+ return function_raw->prototype();
}
-MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
- Object* value,
+MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
+ JSObject* object,
+ Object* value_raw,
void*) {
- Heap* heap = object->GetHeap();
- JSFunction* function = FindInstanceOf<JSFunction>(object);
- if (function == NULL) return heap->undefined_value();
- if (!function->should_have_prototype()) {
+ Heap* heap = isolate->heap();
+ JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
+ if (function_raw == NULL) return heap->undefined_value();
+ if (!function_raw->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
- return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(),
- value,
- NONE);
+ return object->SetLocalPropertyIgnoreAttributesTrampoline(
+ heap->prototype_string(), value_raw, NONE);
+ }
+
+ HandleScope scope(isolate);
+ Handle<JSFunction> function(function_raw, isolate);
+ Handle<Object> value(value_raw, isolate);
+
+ Handle<Object> old_value;
+ bool is_observed =
+ FLAG_harmony_observation &&
+ *function == object &&
+ function->map()->is_observed();
+ if (is_observed) {
+ if (function->has_prototype())
+ old_value = handle(function->prototype(), isolate);
+ else
+ old_value = isolate->factory()->NewFunctionPrototype(function);
}
- Object* prototype;
- { MaybeObject* maybe_prototype = function->SetPrototype(value);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
+ JSFunction::SetPrototype(function, value);
+ ASSERT(function->prototype() == *value);
+
+ if (is_observed && !old_value->SameValue(*value)) {
+ JSObject::EnqueueChangeRecord(
+ function, "updated", isolate->factory()->prototype_string(), old_value);
}
- ASSERT(function->prototype() == value);
- return function;
+
+ return *function;
}
@@ -498,8 +630,10 @@ const AccessorDescriptor Accessors::FunctionPrototype = {
//
-MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
- JSFunction* function = FindInstanceOf<JSFunction>(object);
+MaybeObject* Accessors::FunctionGetLength(Isolate* isolate,
+ Object* object,
+ void*) {
+ JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);
if (function == NULL) return Smi::FromInt(0);
// Check if already compiled.
if (function->shared()->is_compiled()) {
@@ -507,7 +641,7 @@ MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
}
// If the function isn't compiled yet, the length is not computed correctly
// yet. Compile it now and return the right length.
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSFunction> handle(function);
if (JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
return Smi::FromInt(handle->shared()->length());
@@ -528,9 +662,13 @@ const AccessorDescriptor Accessors::FunctionLength = {
//
-MaybeObject* Accessors::FunctionGetName(Object* object, void*) {
- JSFunction* holder = FindInstanceOf<JSFunction>(object);
- return holder == NULL ? HEAP->undefined_value() : holder->shared()->name();
+MaybeObject* Accessors::FunctionGetName(Isolate* isolate,
+ Object* object,
+ void*) {
+ JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
+ return holder == NULL
+ ? isolate->heap()->undefined_value()
+ : holder->shared()->name();
}
@@ -546,11 +684,21 @@ const AccessorDescriptor Accessors::FunctionName = {
//
+Handle<Object> Accessors::FunctionGetArguments(Handle<JSFunction> function) {
+ CALL_HEAP_FUNCTION(function->GetIsolate(),
+ Accessors::FunctionGetArguments(function->GetIsolate(),
+ *function,
+ NULL),
+ Object);
+}
+
+
static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
JavaScriptFrame* frame,
Handle<JSFunction> inlined_function,
int inlined_frame_index) {
- Factory* factory = Isolate::Current()->factory();
+ Isolate* isolate = inlined_function->GetIsolate();
+ Factory* factory = isolate->factory();
Vector<SlotRef> args_slots =
SlotRef::ComputeSlotMappingForArguments(
frame,
@@ -561,7 +709,7 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
factory->NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = factory->NewFixedArray(args_count);
for (int i = 0; i < args_count; ++i) {
- Handle<Object> value = args_slots[i].GetValue();
+ Handle<Object> value = args_slots[i].GetValue(isolate);
array->set(i, *value);
}
arguments->set_elements(*array);
@@ -572,10 +720,11 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
}
-MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
+MaybeObject* Accessors::FunctionGetArguments(Isolate* isolate,
+ Object* object,
+ void*) {
HandleScope scope(isolate);
- JSFunction* holder = FindInstanceOf<JSFunction>(object);
+ JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
if (holder == NULL) return isolate->heap()->undefined_value();
Handle<JSFunction> function(holder, isolate);
@@ -601,7 +750,7 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
// If there is an arguments variable in the stack, we return that.
Handle<ScopeInfo> scope_info(function->shared()->scope_info());
int index = scope_info->StackSlotIndex(
- isolate->heap()->arguments_symbol());
+ isolate->heap()->arguments_string());
if (index >= 0) {
Handle<Object> arguments(frame->GetExpression(index), isolate);
if (!arguments->IsArgumentsMarker()) return *arguments;
@@ -649,22 +798,9 @@ const AccessorDescriptor Accessors::FunctionArguments = {
//
-static MaybeObject* CheckNonStrictCallerOrThrow(
- Isolate* isolate,
- JSFunction* caller) {
- DisableAssertNoAllocation enable_allocation;
- if (!caller->shared()->is_classic_mode()) {
- return isolate->Throw(
- *isolate->factory()->NewTypeError("strict_caller",
- HandleVector<Object>(NULL, 0)));
- }
- return caller;
-}
-
-
class FrameFunctionIterator {
public:
- FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise)
+ FrameFunctionIterator(Isolate* isolate, const DisallowHeapAllocation& promise)
: frame_iterator_(isolate),
functions_(2),
index_(0) {
@@ -708,16 +844,17 @@ class FrameFunctionIterator {
};
-MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
+MaybeObject* Accessors::FunctionGetCaller(Isolate* isolate,
+ Object* object,
+ void*) {
HandleScope scope(isolate);
- AssertNoAllocation no_alloc;
- JSFunction* holder = FindInstanceOf<JSFunction>(object);
+ DisallowHeapAllocation no_allocation;
+ JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
if (holder == NULL) return isolate->heap()->undefined_value();
if (holder->shared()->native()) return isolate->heap()->null_value();
Handle<JSFunction> function(holder, isolate);
- FrameFunctionIterator it(isolate, no_alloc);
+ FrameFunctionIterator it(isolate, no_allocation);
// Find the function from the frames.
if (!it.Find(*function)) {
@@ -748,7 +885,14 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
if (caller->shared()->bound()) {
return isolate->heap()->null_value();
}
- return CheckNonStrictCallerOrThrow(isolate, caller);
+ // Censor if the caller is not a classic mode function.
+ // Change from ES5, which used to throw, see:
+ // https://bugs.ecmascript.org/show_bug.cgi?id=310
+ if (!caller->shared()->is_classic_mode()) {
+ return isolate->heap()->null_value();
+ }
+
+ return caller;
}
@@ -760,64 +904,33 @@ const AccessorDescriptor Accessors::FunctionCaller = {
//
-// Accessors::ObjectPrototype
-//
-
-
-MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
- Object* current = receiver->GetPrototype();
- while (current->IsJSObject() &&
- JSObject::cast(current)->map()->is_hidden_prototype()) {
- current = current->GetPrototype();
- }
- return current;
-}
-
-
-MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver,
- Object* value,
- void*) {
- const bool skip_hidden_prototypes = true;
- // To be consistent with other Set functions, return the value.
- return receiver->SetPrototype(value, skip_hidden_prototypes);
-}
-
-
-const AccessorDescriptor Accessors::ObjectPrototype = {
- ObjectGetPrototype,
- ObjectSetPrototype,
- 0
-};
-
-
-//
// Accessors::MakeModuleExport
//
-static v8::Handle<v8::Value> ModuleGetExport(
+static void ModuleGetExport(
v8::Local<v8::String> property,
- const v8::AccessorInfo& info) {
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
ASSERT(context->IsModuleContext());
int slot = info.Data()->Int32Value();
Object* value = context->get(slot);
+ Isolate* isolate = instance->GetIsolate();
if (value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
- Isolate* isolate = instance->GetIsolate();
isolate->ScheduleThrow(
*isolate->factory()->NewReferenceError("not_defined",
HandleVector(&name, 1)));
- return v8::Handle<v8::Value>();
+ return;
}
- return v8::Utils::ToLocal(Handle<Object>(value));
+ info.GetReturnValue().Set(v8::Utils::ToLocal(Handle<Object>(value, isolate)));
}
static void ModuleSetExport(
v8::Local<v8::String> property,
v8::Local<v8::Value> value,
- const v8::AccessorInfo& info) {
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
ASSERT(context->IsModuleContext());
@@ -839,15 +952,16 @@ Handle<AccessorInfo> Accessors::MakeModuleExport(
Handle<String> name,
int index,
PropertyAttributes attributes) {
- Factory* factory = name->GetIsolate()->factory();
- Handle<AccessorInfo> info = factory->NewAccessorInfo();
+ Isolate* isolate = name->GetIsolate();
+ Factory* factory = isolate->factory();
+ Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
info->set_property_attributes(attributes);
info->set_all_can_read(true);
info->set_all_can_write(true);
info->set_name(*name);
info->set_data(Smi::FromInt(index));
- Handle<Object> getter = v8::FromCData(&ModuleGetExport);
- Handle<Object> setter = v8::FromCData(&ModuleSetExport);
+ Handle<Object> getter = v8::FromCData(isolate, &ModuleGetExport);
+ Handle<Object> setter = v8::FromCData(isolate, &ModuleSetExport);
info->set_getter(*getter);
if (!(attributes & ReadOnly)) info->set_setter(*setter);
return info;
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 250f742fa..b2dee2793 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -56,8 +56,7 @@ namespace internal {
V(ScriptContextData) \
V(ScriptEvalFromScript) \
V(ScriptEvalFromScriptPosition) \
- V(ScriptEvalFromFunctionName) \
- V(ObjectPrototype)
+ V(ScriptEvalFromFunctionName)
// Accessors contains all predefined proxy accessors.
@@ -78,49 +77,88 @@ class Accessors : public AllStatic {
};
// Accessor functions called directly from the runtime system.
- MUST_USE_RESULT static MaybeObject* FunctionGetPrototype(Object* object,
- void*);
- MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object,
- Object* value,
- void*);
- static MaybeObject* FunctionGetArguments(Object* object, void*);
+ static Handle<Object> FunctionSetPrototype(Handle<JSFunction> object,
+ Handle<Object> value);
+ static Handle<Object> FunctionGetPrototype(Handle<JSFunction> object);
+ static Handle<Object> FunctionGetArguments(Handle<JSFunction> object);
// Accessor infos.
static Handle<AccessorInfo> MakeModuleExport(
Handle<String> name, int index, PropertyAttributes attributes);
+ // Returns true for properties that are accessors to object fields.
+ // If true, *object_offset contains offset of object field.
+ static bool IsJSObjectFieldAccessor(
+ Handle<Map> map, Handle<String> name,
+ int* object_offset);
+
+
private:
// Accessor functions only used through the descriptor.
- static MaybeObject* FunctionGetLength(Object* object, void*);
- static MaybeObject* FunctionGetName(Object* object, void*);
- static MaybeObject* FunctionGetCaller(Object* object, void*);
- MUST_USE_RESULT static MaybeObject* ArraySetLength(JSObject* object,
- Object* value, void*);
- static MaybeObject* ArrayGetLength(Object* object, void*);
- static MaybeObject* StringGetLength(Object* object, void*);
- static MaybeObject* ScriptGetName(Object* object, void*);
- static MaybeObject* ScriptGetId(Object* object, void*);
- static MaybeObject* ScriptGetSource(Object* object, void*);
- static MaybeObject* ScriptGetLineOffset(Object* object, void*);
- static MaybeObject* ScriptGetColumnOffset(Object* object, void*);
- static MaybeObject* ScriptGetData(Object* object, void*);
- static MaybeObject* ScriptGetType(Object* object, void*);
- static MaybeObject* ScriptGetCompilationType(Object* object, void*);
- static MaybeObject* ScriptGetLineEnds(Object* object, void*);
- static MaybeObject* ScriptGetContextData(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromScript(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromScriptPosition(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromFunctionName(Object* object, void*);
- static MaybeObject* ObjectGetPrototype(Object* receiver, void*);
- static MaybeObject* ObjectSetPrototype(JSObject* receiver,
- Object* value,
- void*);
+ static MaybeObject* FunctionSetPrototype(Isolate* isolate,
+ JSObject* object,
+ Object*,
+ void*);
+ static MaybeObject* FunctionGetPrototype(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* FunctionGetLength(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* FunctionGetName(Isolate* isolate, Object* object, void*);
+ static MaybeObject* FunctionGetArguments(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* FunctionGetCaller(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ArraySetLength(Isolate* isolate,
+ JSObject* object,
+ Object*,
+ void*);
+ static MaybeObject* ArrayGetLength(Isolate* isolate, Object* object, void*);
+ static MaybeObject* StringGetLength(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetName(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetId(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetSource(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetLineOffset(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetColumnOffset(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetData(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetType(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetCompilationType(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetLineEnds(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetContextData(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetEvalFromScript(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetEvalFromScriptPosition(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetEvalFromFunctionName(Isolate* isolate,
+ Object* object,
+ void*);
// Helper functions.
- static Object* FlattenNumber(Object* value);
- static MaybeObject* IllegalSetter(JSObject*, Object*, void*);
- static Object* IllegalGetAccessor(Object* object, void*);
- static MaybeObject* ReadOnlySetAccessor(JSObject*, Object* value, void*);
+ static Object* FlattenNumber(Isolate* isolate, Object* value);
+ static MaybeObject* IllegalSetter(Isolate* isolate,
+ JSObject*,
+ Object*,
+ void*);
+ static Object* IllegalGetAccessor(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ReadOnlySetAccessor(Isolate* isolate,
+ JSObject*,
+ Object* value,
+ void*);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/allocation-site-scopes.cc b/deps/v8/src/allocation-site-scopes.cc
new file mode 100644
index 000000000..8097045b2
--- /dev/null
+++ b/deps/v8/src/allocation-site-scopes.cc
@@ -0,0 +1,108 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "allocation-site-scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+Handle<AllocationSite> AllocationSiteCreationContext::EnterNewScope() {
+ Handle<AllocationSite> scope_site;
+ if (top().is_null()) {
+ // We are creating the top level AllocationSite as opposed to a nested
+ // AllocationSite.
+ InitializeTraversal(isolate()->factory()->NewAllocationSite());
+ scope_site = Handle<AllocationSite>(*top(), isolate());
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("*** Creating top level AllocationSite %p\n",
+ static_cast<void*>(*scope_site));
+ }
+ } else {
+ ASSERT(!current().is_null());
+ scope_site = isolate()->factory()->NewAllocationSite();
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("Creating nested site (top, current, new) (%p, %p, %p)\n",
+ static_cast<void*>(*top()),
+ static_cast<void*>(*current()),
+ static_cast<void*>(*scope_site));
+ }
+ current()->set_nested_site(*scope_site);
+ update_current_site(*scope_site);
+ }
+ ASSERT(!scope_site.is_null());
+ return scope_site;
+}
+
+
+void AllocationSiteCreationContext::ExitScope(
+ Handle<AllocationSite> scope_site,
+ Handle<JSObject> object) {
+ if (!object.is_null() && !object->IsFailure()) {
+ bool top_level = !scope_site.is_null() &&
+ top().is_identical_to(scope_site);
+
+ scope_site->set_transition_info(*object);
+ if (FLAG_trace_creation_allocation_sites) {
+ if (top_level) {
+ PrintF("*** Setting AllocationSite %p transition_info %p\n",
+ static_cast<void*>(*scope_site),
+ static_cast<void*>(*object));
+ } else {
+ PrintF("Setting AllocationSite (%p, %p) transition_info %p\n",
+ static_cast<void*>(*top()),
+ static_cast<void*>(*scope_site),
+ static_cast<void*>(*object));
+ }
+ }
+ }
+}
+
+
+Handle<AllocationSite> AllocationSiteUsageContext::EnterNewScope() {
+ if (top().is_null()) {
+ InitializeTraversal(top_site_);
+ } else {
+ // Advance current site
+ Object* nested_site = current()->nested_site();
+ // Something is wrong if we advance to the end of the list here.
+ ASSERT(nested_site->IsAllocationSite());
+ update_current_site(AllocationSite::cast(nested_site));
+ }
+ return Handle<AllocationSite>(*current(), isolate());
+}
+
+
+void AllocationSiteUsageContext::ExitScope(
+ Handle<AllocationSite> scope_site,
+ Handle<JSObject> object) {
+ // This assert ensures that we are pointing at the right sub-object in a
+ // recursive walk of a nested literal.
+ ASSERT(object.is_null() || *object == scope_site->transition_info());
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/allocation-site-scopes.h b/deps/v8/src/allocation-site-scopes.h
new file mode 100644
index 000000000..1c3afdf36
--- /dev/null
+++ b/deps/v8/src/allocation-site-scopes.h
@@ -0,0 +1,115 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ALLOCATION_SITE_SCOPES_H_
+#define V8_ALLOCATION_SITE_SCOPES_H_
+
+#include "ast.h"
+#include "handles.h"
+#include "objects.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+
+// AllocationSiteContext is the base class for walking and copying a nested
+// boilerplate with AllocationSite and AllocationMemento support.
+class AllocationSiteContext {
+ public:
+ AllocationSiteContext(Isolate* isolate, bool activated) {
+ isolate_ = isolate;
+ activated_ = activated;
+ };
+ virtual ~AllocationSiteContext() {}
+
+ Handle<AllocationSite> top() { return top_; }
+ Handle<AllocationSite> current() { return current_; }
+
+ // If activated, then recursively create mementos
+ bool activated() const { return activated_; }
+
+ // Returns the AllocationSite that matches this scope.
+ virtual Handle<AllocationSite> EnterNewScope() = 0;
+
+ // scope_site should be the handle returned by the matching EnterNewScope()
+ virtual void ExitScope(Handle<AllocationSite> scope_site,
+ Handle<JSObject> object) = 0;
+
+ protected:
+ void update_current_site(AllocationSite* site) {
+ *(current_.location()) = site;
+ }
+
+ Isolate* isolate() { return isolate_; }
+ void InitializeTraversal(Handle<AllocationSite> site) {
+ top_ = site;
+ current_ = Handle<AllocationSite>(*top_, isolate());
+ }
+
+ private:
+ Isolate* isolate_;
+ Handle<AllocationSite> top_;
+ Handle<AllocationSite> current_;
+ bool activated_;
+};
+
+
+// AllocationSiteCreationContext aids in the creation of AllocationSites to
+// accompany object literals.
+class AllocationSiteCreationContext : public AllocationSiteContext {
+ public:
+ explicit AllocationSiteCreationContext(Isolate* isolate)
+ : AllocationSiteContext(isolate, true) { }
+
+ virtual Handle<AllocationSite> EnterNewScope() V8_OVERRIDE;
+ virtual void ExitScope(Handle<AllocationSite> site,
+ Handle<JSObject> object) V8_OVERRIDE;
+};
+
+
+// AllocationSiteUsageContext aids in the creation of AllocationMementos placed
+// behind some/all components of a copied object literal.
+class AllocationSiteUsageContext : public AllocationSiteContext {
+ public:
+ AllocationSiteUsageContext(Isolate* isolate, Handle<AllocationSite> site,
+ bool activated)
+ : AllocationSiteContext(isolate, activated),
+ top_site_(site) { }
+
+ virtual Handle<AllocationSite> EnterNewScope() V8_OVERRIDE;
+ virtual void ExitScope(Handle<AllocationSite> site,
+ Handle<JSObject> object) V8_OVERRIDE;
+
+ private:
+ Handle<AllocationSite> top_site_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ALLOCATION_SITE_SCOPES_H_
diff --git a/deps/v8/src/allocation-tracker.cc b/deps/v8/src/allocation-tracker.cc
new file mode 100644
index 000000000..586ce3c45
--- /dev/null
+++ b/deps/v8/src/allocation-tracker.cc
@@ -0,0 +1,279 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "allocation-tracker.h"
+
+#include "heap-snapshot-generator.h"
+#include "frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+AllocationTraceNode::AllocationTraceNode(
+ AllocationTraceTree* tree, SnapshotObjectId shared_function_info_id)
+ : tree_(tree),
+ function_id_(shared_function_info_id),
+ total_size_(0),
+ allocation_count_(0),
+ id_(tree->next_node_id()) {
+}
+
+
+AllocationTraceNode::~AllocationTraceNode() {
+}
+
+
+AllocationTraceNode* AllocationTraceNode::FindChild(SnapshotObjectId id) {
+ for (int i = 0; i < children_.length(); i++) {
+ AllocationTraceNode* node = children_[i];
+ if (node->function_id() == id) return node;
+ }
+ return NULL;
+}
+
+
+AllocationTraceNode* AllocationTraceNode::FindOrAddChild(SnapshotObjectId id) {
+ AllocationTraceNode* child = FindChild(id);
+ if (child == NULL) {
+ child = new AllocationTraceNode(tree_, id);
+ children_.Add(child);
+ }
+ return child;
+}
+
+
+void AllocationTraceNode::AddAllocation(unsigned size) {
+ total_size_ += size;
+ ++allocation_count_;
+}
+
+
+void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
+ OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
+ if (tracker != NULL) {
+ const char* name = "<unknown function>";
+ if (function_id_ != 0) {
+ AllocationTracker::FunctionInfo* info =
+ tracker->GetFunctionInfo(function_id_);
+ if (info != NULL) {
+ name = info->name;
+ }
+ }
+ OS::Print("%s #%u", name, id_);
+ } else {
+ OS::Print("%u #%u", function_id_, id_);
+ }
+ OS::Print("\n");
+ indent += 2;
+ for (int i = 0; i < children_.length(); i++) {
+ children_[i]->Print(indent, tracker);
+ }
+}
+
+
+AllocationTraceTree::AllocationTraceTree()
+ : next_node_id_(1),
+ root_(this, 0) {
+}
+
+
+AllocationTraceTree::~AllocationTraceTree() {
+}
+
+
+AllocationTraceNode* AllocationTraceTree::AddPathFromEnd(
+ const Vector<SnapshotObjectId>& path) {
+ AllocationTraceNode* node = root();
+ for (SnapshotObjectId* entry = path.start() + path.length() - 1;
+ entry != path.start() - 1;
+ --entry) {
+ node = node->FindOrAddChild(*entry);
+ }
+ return node;
+}
+
+
+void AllocationTraceTree::Print(AllocationTracker* tracker) {
+ OS::Print("[AllocationTraceTree:]\n");
+ OS::Print("Total size | Allocation count | Function id | id\n");
+ root()->Print(0, tracker);
+}
+
+void AllocationTracker::DeleteUnresolvedLocation(
+ UnresolvedLocation** location) {
+ delete *location;
+}
+
+
+AllocationTracker::FunctionInfo::FunctionInfo()
+ : name(""),
+ script_name(""),
+ script_id(0),
+ line(-1),
+ column(-1) {
+}
+
+
+static bool AddressesMatch(void* key1, void* key2) {
+ return key1 == key2;
+}
+
+
+AllocationTracker::AllocationTracker(
+ HeapObjectsMap* ids, StringsStorage* names)
+ : ids_(ids),
+ names_(names),
+ id_to_function_info_(AddressesMatch) {
+}
+
+
+AllocationTracker::~AllocationTracker() {
+ unresolved_locations_.Iterate(DeleteUnresolvedLocation);
+}
+
+
+void AllocationTracker::PrepareForSerialization() {
+ List<UnresolvedLocation*> copy(unresolved_locations_.length());
+ copy.AddAll(unresolved_locations_);
+ unresolved_locations_.Clear();
+ for (int i = 0; i < copy.length(); i++) {
+ copy[i]->Resolve();
+ delete copy[i];
+ }
+}
+
+
+void AllocationTracker::NewObjectEvent(Address addr, int size) {
+ DisallowHeapAllocation no_allocation;
+ Heap* heap = ids_->heap();
+
+ // Mark the new block as FreeSpace to make sure the heap is iterable
+ // while we are capturing stack trace.
+ FreeListNode::FromAddress(addr)->set_size(heap, size);
+ ASSERT_EQ(HeapObject::FromAddress(addr)->Size(), size);
+ ASSERT(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
+
+ Isolate* isolate = heap->isolate();
+ int length = 0;
+ StackTraceFrameIterator it(isolate);
+ while (!it.done() && length < kMaxAllocationTraceLength) {
+ JavaScriptFrame* frame = it.frame();
+ SharedFunctionInfo* shared = frame->function()->shared();
+ SnapshotObjectId id = ids_->FindEntry(shared->address());
+ allocation_trace_buffer_[length++] = id;
+ AddFunctionInfo(shared, id);
+ it.Advance();
+ }
+ AllocationTraceNode* top_node = trace_tree_.AddPathFromEnd(
+ Vector<SnapshotObjectId>(allocation_trace_buffer_, length));
+ top_node->AddAllocation(size);
+}
+
+
+static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
+ return ComputeIntegerHash(static_cast<uint32_t>(id),
+ v8::internal::kZeroHashSeed);
+}
+
+
+AllocationTracker::FunctionInfo* AllocationTracker::GetFunctionInfo(
+ SnapshotObjectId id) {
+ HashMap::Entry* entry = id_to_function_info_.Lookup(
+ reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), false);
+ if (entry == NULL) {
+ return NULL;
+ }
+ return reinterpret_cast<FunctionInfo*>(entry->value);
+}
+
+
+void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
+ SnapshotObjectId id) {
+ HashMap::Entry* entry = id_to_function_info_.Lookup(
+ reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), true);
+ if (entry->value == NULL) {
+ FunctionInfo* info = new FunctionInfo();
+ info->name = names_->GetFunctionName(shared->DebugName());
+ if (shared->script()->IsScript()) {
+ Script* script = Script::cast(shared->script());
+ if (script->name()->IsName()) {
+ Name* name = Name::cast(script->name());
+ info->script_name = names_->GetName(name);
+ }
+ info->script_id = script->id()->value();
+ // Converting start offset into line and column may cause heap
+ // allocations so we postpone them until snapshot serialization.
+ unresolved_locations_.Add(new UnresolvedLocation(
+ script,
+ shared->start_position(),
+ info));
+ }
+ entry->value = info;
+ }
+}
+
+
+AllocationTracker::UnresolvedLocation::UnresolvedLocation(
+ Script* script, int start, FunctionInfo* info)
+ : start_position_(start),
+ info_(info) {
+ script_ = Handle<Script>::cast(
+ script->GetIsolate()->global_handles()->Create(script));
+ GlobalHandles::MakeWeak(
+ reinterpret_cast<Object**>(script_.location()),
+ this, &HandleWeakScript);
+}
+
+
+AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
+ if (!script_.is_null()) {
+ script_->GetIsolate()->global_handles()->Destroy(
+ reinterpret_cast<Object**>(script_.location()));
+ }
+}
+
+
+void AllocationTracker::UnresolvedLocation::Resolve() {
+ if (script_.is_null()) return;
+ info_->line = GetScriptLineNumber(script_, start_position_);
+ info_->column = GetScriptColumnNumber(script_, start_position_);
+}
+
+
+void AllocationTracker::UnresolvedLocation::HandleWeakScript(
+ v8::Isolate* isolate,
+ v8::Persistent<v8::Value>* obj,
+ void* data) {
+ UnresolvedLocation* location = reinterpret_cast<UnresolvedLocation*>(data);
+ location->script_ = Handle<Script>::null();
+ obj->Dispose();
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/allocation-tracker.h b/deps/v8/src/allocation-tracker.h
new file mode 100644
index 000000000..617cf902e
--- /dev/null
+++ b/deps/v8/src/allocation-tracker.h
@@ -0,0 +1,138 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ALLOCATION_TRACKER_H_
+#define V8_ALLOCATION_TRACKER_H_
+
+namespace v8 {
+namespace internal {
+
+class HeapObjectsMap;
+
+class AllocationTraceTree;
+
+class AllocationTraceNode {
+ public:
+ AllocationTraceNode(AllocationTraceTree* tree,
+ SnapshotObjectId shared_function_info_id);
+ ~AllocationTraceNode();
+ AllocationTraceNode* FindChild(SnapshotObjectId shared_function_info_id);
+ AllocationTraceNode* FindOrAddChild(SnapshotObjectId shared_function_info_id);
+ void AddAllocation(unsigned size);
+
+ SnapshotObjectId function_id() const { return function_id_; }
+ unsigned allocation_size() const { return total_size_; }
+ unsigned allocation_count() const { return allocation_count_; }
+ unsigned id() const { return id_; }
+ Vector<AllocationTraceNode*> children() const { return children_.ToVector(); }
+
+ void Print(int indent, AllocationTracker* tracker);
+
+ private:
+ AllocationTraceTree* tree_;
+ SnapshotObjectId function_id_;
+ unsigned total_size_;
+ unsigned allocation_count_;
+ unsigned id_;
+ List<AllocationTraceNode*> children_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationTraceNode);
+};
+
+
+class AllocationTraceTree {
+ public:
+ AllocationTraceTree();
+ ~AllocationTraceTree();
+ AllocationTraceNode* AddPathFromEnd(const Vector<SnapshotObjectId>& path);
+ AllocationTraceNode* root() { return &root_; }
+ unsigned next_node_id() { return next_node_id_++; }
+ void Print(AllocationTracker* tracker);
+
+ private:
+ unsigned next_node_id_;
+ AllocationTraceNode root_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationTraceTree);
+};
+
+
+class AllocationTracker {
+ public:
+ struct FunctionInfo {
+ FunctionInfo();
+ const char* name;
+ const char* script_name;
+ int script_id;
+ int line;
+ int column;
+ };
+
+ AllocationTracker(HeapObjectsMap* ids, StringsStorage* names);
+ ~AllocationTracker();
+
+ void PrepareForSerialization();
+ void NewObjectEvent(Address addr, int size);
+
+ AllocationTraceTree* trace_tree() { return &trace_tree_; }
+ HashMap* id_to_function_info() { return &id_to_function_info_; }
+ FunctionInfo* GetFunctionInfo(SnapshotObjectId id);
+
+ private:
+ void AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
+
+ class UnresolvedLocation {
+ public:
+ UnresolvedLocation(Script* script, int start, FunctionInfo* info);
+ ~UnresolvedLocation();
+ void Resolve();
+
+ private:
+ static void HandleWeakScript(v8::Isolate* isolate,
+ v8::Persistent<v8::Value>* obj,
+ void* data);
+ Handle<Script> script_;
+ int start_position_;
+ FunctionInfo* info_;
+ };
+ static void DeleteUnresolvedLocation(UnresolvedLocation** location);
+
+ static const int kMaxAllocationTraceLength = 64;
+ HeapObjectsMap* ids_;
+ StringsStorage* names_;
+ AllocationTraceTree trace_tree_;
+ SnapshotObjectId allocation_trace_buffer_[kMaxAllocationTraceLength];
+ HashMap id_to_function_info_;
+ List<UnresolvedLocation*> unresolved_locations_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationTracker);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ALLOCATION_TRACKER_H_
+
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index 6c7a08cec..94aaad3fd 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -28,8 +28,8 @@
#include "allocation.h"
#include <stdlib.h> // For free, malloc.
-#include <string.h> // For memcpy.
#include "checks.h"
+#include "platform.h"
#include "utils.h"
namespace v8 {
@@ -85,7 +85,7 @@ void AllStatic::operator delete(void* p) {
char* StrDup(const char* str) {
int length = StrLength(str);
char* result = NewArray<char>(length + 1);
- memcpy(result, str, length);
+ OS::MemCopy(result, str, length);
result[length] = '\0';
return result;
}
@@ -95,7 +95,7 @@ char* StrNDup(const char* str, int n) {
int length = StrLength(str);
if (n < length) length = n;
char* result = NewArray<char>(length + 1);
- memcpy(result, str, length);
+ OS::MemCopy(result, str, length);
result[length] = '\0';
return result;
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index f1683984e..8a73877ee 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -27,34 +27,42 @@
#include "api.h"
-#include <math.h> // For isnan.
#include <string.h> // For memcpy, strlen.
+#include <cmath> // For isnan.
#include "../include/v8-debug.h"
#include "../include/v8-profiler.h"
#include "../include/v8-testing.h"
+#include "assert-scope.h"
#include "bootstrapper.h"
#include "code-stubs.h"
#include "compiler.h"
#include "conversions-inl.h"
#include "counters.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "deoptimizer.h"
#include "execution.h"
#include "global-handles.h"
#include "heap-profiler.h"
+#include "heap-snapshot-generator-inl.h"
+#include "icu_util.h"
+#include "json-parser.h"
#include "messages.h"
#ifdef COMPRESS_STARTUP_DATA_BZ2
#include "natives.h"
#endif
#include "parser.h"
#include "platform.h"
+#include "platform/time.h"
#include "profile-generator-inl.h"
#include "property-details.h"
#include "property.h"
+#include "runtime.h"
#include "runtime-profiler.h"
#include "scanner-character-streams.h"
#include "snapshot.h"
#include "unicode-inl.h"
+#include "utils/random-number-generator.h"
#include "v8threads.h"
#include "version.h"
#include "vm-state-inl.h"
@@ -62,17 +70,14 @@
#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
-#define ENTER_V8(isolate) \
- ASSERT((isolate)->IsInitialized()); \
- i::VMState __state__((isolate), i::OTHER)
-#define LEAVE_V8(isolate) \
- i::VMState __state__((isolate), i::EXTERNAL)
+#define ENTER_V8(isolate) \
+ ASSERT((isolate)->IsInitialized()); \
+ i::VMState<i::OTHER> __state__((isolate))
namespace v8 {
#define ON_BAILOUT(isolate, location, code) \
- if (IsDeadCheck(isolate, location) || \
- IsExecutionTerminatingCheck(isolate)) { \
+ if (IsExecutionTerminatingCheck(isolate)) { \
code; \
UNREACHABLE(); \
}
@@ -128,8 +133,13 @@ namespace v8 {
static void DefaultFatalErrorHandler(const char* location,
const char* message) {
- i::VMState __state__(i::Isolate::Current(), i::OTHER);
- API_Fatal(location, message);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (isolate->IsInitialized()) {
+ i::VMState<i::OTHER> state(isolate);
+ API_Fatal(location, message);
+ } else {
+ API_Fatal(location, message);
+ }
}
@@ -177,6 +187,10 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
heap_stats.cell_space_size = &cell_space_size;
intptr_t cell_space_capacity;
heap_stats.cell_space_capacity = &cell_space_capacity;
+ intptr_t property_cell_space_size;
+ heap_stats.property_cell_space_size = &property_cell_space_size;
+ intptr_t property_cell_space_capacity;
+ heap_stats.property_cell_space_capacity = &property_cell_space_capacity;
intptr_t lo_space_size;
heap_stats.lo_space_size = &lo_space_size;
int global_handle_count;
@@ -202,31 +216,32 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
int end_marker;
heap_stats.end_marker = &end_marker;
i::Isolate* isolate = i::Isolate::Current();
- // BUG(1718):
- // Don't use the take_snapshot since we don't support HeapIterator here
- // without doing a special GC.
- isolate->heap()->RecordStats(&heap_stats, false);
- i::V8::SetFatalError();
- FatalErrorCallback callback = GetFatalErrorHandler();
- {
- LEAVE_V8(isolate);
- callback(location, "Allocation failed - process out of memory");
+ if (isolate->heap()->HasBeenSetUp()) {
+ // BUG(1718): Don't use the take_snapshot since we don't support
+ // HeapIterator here without doing a special GC.
+ isolate->heap()->RecordStats(&heap_stats, false);
}
+ isolate->SignalFatalError();
+ FatalErrorCallback callback = GetFatalErrorHandler();
+ const char* message = "Allocation failed - process out of memory";
+ callback(location, message);
// If the callback returns, we stop execution.
- UNREACHABLE();
+ FATAL("API fatal error handler returned after process out of memory");
}
bool Utils::ReportApiFailure(const char* location, const char* message) {
FatalErrorCallback callback = GetFatalErrorHandler();
callback(location, message);
- i::V8::SetFatalError();
+ i::Isolate* isolate = i::Isolate::Current();
+ isolate->SignalFatalError();
return false;
}
bool V8::IsDead() {
- return i::V8::IsDead();
+ i::Isolate* isolate = i::Isolate::Current();
+ return isolate->IsDead();
}
@@ -237,13 +252,6 @@ static inline bool ApiCheck(bool condition,
}
-static bool ReportV8Dead(const char* location) {
- FatalErrorCallback callback = GetFatalErrorHandler();
- callback(location, "V8 is no longer usable");
- return true;
-}
-
-
static bool ReportEmptyHandle(const char* location) {
FatalErrorCallback callback = GetFatalErrorHandler();
callback(location, "Reading from empty handle");
@@ -251,24 +259,6 @@ static bool ReportEmptyHandle(const char* location) {
}
-/**
- * IsDeadCheck checks that the vm is usable. If, for instance, the vm has been
- * out of memory at some point this check will fail. It should be called on
- * entry to all methods that touch anything in the heap, except destructors
- * which you sometimes can't avoid calling after the vm has crashed. Functions
- * that call EnsureInitialized or ON_BAILOUT don't have to also call
- * IsDeadCheck. ON_BAILOUT has the advantage over EnsureInitialized that you
- * can arrange to return if the VM is dead. This is needed to ensure that no VM
- * heap allocations are attempted on a dead VM. EnsureInitialized has the
- * advantage over ON_BAILOUT that it actually initializes the VM if this has not
- * yet been done.
- */
-static inline bool IsDeadCheck(i::Isolate* isolate, const char* location) {
- return !isolate->IsInitialized()
- && i::V8::IsDead() ? ReportV8Dead(location) : false;
-}
-
-
static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
if (!isolate->IsInitialized()) return false;
if (isolate->has_scheduled_exception()) {
@@ -288,25 +278,31 @@ static inline bool EmptyCheck(const char* location, const v8::Data* obj) {
return (obj == 0) ? ReportEmptyHandle(location) : false;
}
+
// --- S t a t i c s ---
-static bool InitializeHelper() {
- if (i::Snapshot::Initialize()) return true;
+static bool InitializeHelper(i::Isolate* isolate) {
+ // If the isolate has a function entry hook, it needs to re-build all its
+ // code stubs with entry hooks embedded, so let's deserialize a snapshot.
+ if (isolate == NULL || isolate->function_entry_hook() == NULL) {
+ if (i::Snapshot::Initialize())
+ return true;
+ }
return i::V8::Initialize(NULL);
}
static inline bool EnsureInitializedForIsolate(i::Isolate* isolate,
const char* location) {
- if (IsDeadCheck(isolate, location)) return false;
if (isolate != NULL) {
if (isolate->IsInitialized()) return true;
}
ASSERT(isolate == i::Isolate::Current());
- return ApiCheck(InitializeHelper(), location, "Error initializing V8");
+ return ApiCheck(InitializeHelper(isolate), location, "Error initializing V8");
}
+
// Some initializing API functions are called early and may be
// called on a thread different from static initializer thread.
// If Isolate API is used, Isolate::Enter() will initialize TLS so
@@ -383,6 +379,7 @@ enum CompressedStartupDataItems {
kCompressedStartupDataCount
};
+
int V8::GetCompressedStartupDataCount() {
#ifdef COMPRESS_STARTUP_DATA_BZ2
return kCompressedStartupDataCount;
@@ -465,14 +462,6 @@ void V8::SetAllowCodeGenerationFromStringsCallback(
}
-#ifdef DEBUG
-void ImplementationUtilities::ZapHandleRange(i::Object** begin,
- i::Object** end) {
- i::HandleScope::ZapRange(begin, end);
-}
-#endif
-
-
void V8::SetFlagsFromString(const char* str, int length) {
i::FlagList::SetFlagsFromString(str, length);
}
@@ -484,19 +473,7 @@ void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) {
v8::Handle<Value> ThrowException(v8::Handle<v8::Value> value) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::ThrowException()")) {
- return v8::Handle<Value>();
- }
- ENTER_V8(isolate);
- // If we're passed an empty handle, we throw an undefined exception
- // to deal more gracefully with out of memory situations.
- if (value.IsEmpty()) {
- isolate->ScheduleThrow(isolate->heap()->undefined_value());
- } else {
- isolate->ScheduleThrow(*Utils::OpenHandle(*value));
- }
- return v8::Undefined();
+ return v8::Isolate::GetCurrent()->ThrowException(value);
}
@@ -551,8 +528,7 @@ v8::Handle<Primitive> Undefined() {
if (!EnsureInitializedForIsolate(isolate, "v8::Undefined()")) {
return v8::Handle<v8::Primitive>();
}
- return v8::Handle<Primitive>(ToApi<Primitive>(
- isolate->factory()->undefined_value()));
+ return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
}
@@ -561,8 +537,7 @@ v8::Handle<Primitive> Null() {
if (!EnsureInitializedForIsolate(isolate, "v8::Null()")) {
return v8::Handle<v8::Primitive>();
}
- return v8::Handle<Primitive>(
- ToApi<Primitive>(isolate->factory()->null_value()));
+ return ToApiHandle<Primitive>(isolate->factory()->null_value());
}
@@ -571,8 +546,7 @@ v8::Handle<Boolean> True() {
if (!EnsureInitializedForIsolate(isolate, "v8::True()")) {
return v8::Handle<Boolean>();
}
- return v8::Handle<Boolean>(
- ToApi<Boolean>(isolate->factory()->true_value()));
+ return ToApiHandle<Boolean>(isolate->factory()->true_value());
}
@@ -581,8 +555,7 @@ v8::Handle<Boolean> False() {
if (!EnsureInitializedForIsolate(isolate, "v8::False()")) {
return v8::Handle<Boolean>();
}
- return v8::Handle<Boolean>(
- ToApi<Boolean>(isolate->factory()->false_value()));
+ return ToApiHandle<Boolean>(isolate->factory()->false_value());
}
@@ -601,7 +574,8 @@ bool SetResourceConstraints(ResourceConstraints* constraints) {
int max_executable_size = constraints->max_executable_size();
if (young_space_size != 0 || old_gen_size != 0 || max_executable_size != 0) {
// After initialization it's too late to change Heap constraints.
- ASSERT(!isolate->IsInitialized());
+ // TODO(rmcilroy): fix this assert.
+ // ASSERT(!isolate->IsInitialized());
bool result = isolate->heap()->ConfigureHeap(young_space_size / 2,
old_gen_size,
max_executable_size);
@@ -615,79 +589,73 @@ bool SetResourceConstraints(ResourceConstraints* constraints) {
}
-i::Object** V8::GlobalizeReference(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "V8::Persistent::New")) return NULL;
+i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
LOG_API(isolate, "Persistent::New");
- i::Handle<i::Object> result =
- isolate->global_handles()->Create(*obj);
+ i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
+#ifdef DEBUG
+ (*obj)->Verify();
+#endif // DEBUG
return result.location();
}
-void V8::MakeWeak(i::Object** object, void* parameters,
- WeakReferenceCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "MakeWeak");
- isolate->global_handles()->MakeWeak(object, parameters,
- callback);
+i::Object** V8::CopyPersistent(i::Object** obj) {
+ i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(obj);
+#ifdef DEBUG
+ (*obj)->Verify();
+#endif // DEBUG
+ return result.location();
}
-void V8::ClearWeak(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "ClearWeak");
- isolate->global_handles()->ClearWeakness(obj);
+void V8::MakeWeak(i::Object** object,
+ void* parameters,
+ WeakCallback weak_callback,
+ RevivableCallback weak_reference_callback) {
+ i::GlobalHandles::MakeWeak(object,
+ parameters,
+ weak_callback,
+ weak_reference_callback);
}
-void V8::MarkIndependent(i::Object** object) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "MakeIndependent");
- isolate->global_handles()->MarkIndependent(object);
+void V8::ClearWeak(i::Object** obj) {
+ i::GlobalHandles::ClearWeakness(obj);
}
-bool V8::IsGlobalIndependent(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "IsGlobalIndependent");
- if (!isolate->IsInitialized()) return false;
- return i::GlobalHandles::IsIndependent(obj);
+void V8::DisposeGlobal(i::Object** obj) {
+ i::GlobalHandles::Destroy(obj);
}
-bool V8::IsGlobalNearDeath(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "IsGlobalNearDeath");
- if (!isolate->IsInitialized()) return false;
- return i::GlobalHandles::IsNearDeath(obj);
+void V8::Eternalize(Isolate* v8_isolate, Value* value, int* index) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ i::Object* object = *Utils::OpenHandle(value);
+ isolate->eternal_handles()->Create(isolate, object, index);
}
-bool V8::IsGlobalWeak(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "IsGlobalWeak");
- if (!isolate->IsInitialized()) return false;
- return i::GlobalHandles::IsWeak(obj);
+Local<Value> V8::GetEternal(Isolate* v8_isolate, int index) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ return Utils::ToLocal(isolate->eternal_handles()->Get(index));
}
-void V8::DisposeGlobal(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "DisposeGlobal");
- if (!isolate->IsInitialized()) return;
- isolate->global_handles()->Destroy(obj);
-}
-
// --- H a n d l e s ---
-HandleScope::HandleScope() {
- i::Isolate* isolate = i::Isolate::Current();
- API_ENTRY_CHECK(isolate, "HandleScope::HandleScope");
+HandleScope::HandleScope(Isolate* isolate) {
+ Initialize(isolate);
+}
+
+
+void HandleScope::Initialize(Isolate* isolate) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ API_ENTRY_CHECK(internal_isolate, "HandleScope::HandleScope");
v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
- isolate_ = isolate;
+ internal_isolate->handle_scope_data();
+ isolate_ = internal_isolate;
prev_next_ = current->next;
prev_limit_ = current->limit;
is_closed_ = false;
@@ -703,103 +671,149 @@ HandleScope::~HandleScope() {
void HandleScope::Leave() {
- ASSERT(isolate_ == i::Isolate::Current());
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
- current->level--;
- ASSERT(current->level >= 0);
- current->next = prev_next_;
- if (current->limit != prev_limit_) {
- current->limit = prev_limit_;
- i::HandleScope::DeleteExtensions(isolate_);
+ return i::HandleScope::CloseScope(isolate_, prev_next_, prev_limit_);
+}
+
+
+int HandleScope::NumberOfHandles() {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!EnsureInitializedForIsolate(isolate, "HandleScope::NumberOfHandles")) {
+ return 0;
}
+ return i::HandleScope::NumberOfHandles(isolate);
+}
-#ifdef DEBUG
- i::HandleScope::ZapRange(prev_next_, prev_limit_);
-#endif
+
+i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) {
+ return i::HandleScope::CreateHandle(isolate, value);
}
-int HandleScope::NumberOfHandles() {
- EnsureInitializedForIsolate(
- i::Isolate::Current(), "HandleScope::NumberOfHandles");
- return i::HandleScope::NumberOfHandles();
+i::Object** HandleScope::CreateHandle(i::HeapObject* heap_object,
+ i::Object* value) {
+ ASSERT(heap_object->IsHeapObject());
+ return i::HandleScope::CreateHandle(heap_object->GetIsolate(), value);
}
-i::Object** HandleScope::CreateHandle(i::Object* value) {
- return i::HandleScope::CreateHandle(value, i::Isolate::Current());
+EscapableHandleScope::EscapableHandleScope(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ escape_slot_ = CreateHandle(isolate, isolate->heap()->the_hole_value());
+ Initialize(v8_isolate);
}
-i::Object** HandleScope::CreateHandle(i::HeapObject* value) {
- ASSERT(value->IsHeapObject());
- return reinterpret_cast<i::Object**>(
- i::HandleScope::CreateHandle(value, value->GetIsolate()));
+i::Object** EscapableHandleScope::Escape(i::Object** escape_value) {
+ ApiCheck(*escape_slot_ == isolate_->heap()->the_hole_value(),
+ "EscapeableHandleScope::Escape",
+ "Escape value set twice");
+ if (escape_value == NULL) {
+ *escape_slot_ = isolate_->heap()->undefined_value();
+ return NULL;
+ }
+ *escape_slot_ = *escape_value;
+ return escape_slot_;
}
void Context::Enter() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Context::Enter()")) return;
ENTER_V8(isolate);
-
isolate->handle_scope_implementer()->EnterContext(env);
-
isolate->handle_scope_implementer()->SaveContext(isolate->context());
isolate->set_context(*env);
}
void Context::Exit() {
- // Exit is essentially a static function and doesn't use the
- // receiver, so we have to get the current isolate from the thread
- // local.
+ // TODO(dcarney): fix this once chrome is fixed.
i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return;
-
- if (!ApiCheck(isolate->handle_scope_implementer()->LeaveLastContext(),
+ i::Handle<i::Context> context = i::Handle<i::Context>::null();
+ ENTER_V8(isolate);
+ if (!ApiCheck(isolate->handle_scope_implementer()->LeaveContext(context),
"v8::Context::Exit()",
"Cannot exit non-entered context")) {
return;
}
-
// Content of 'last_context' could be NULL.
i::Context* last_context =
isolate->handle_scope_implementer()->RestoreContext();
isolate->set_context(last_context);
- isolate->set_context_exit_happened(true);
}
-void Context::SetData(v8::Handle<Value> data) {
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- i::Isolate* isolate = env->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Context::SetData()")) return;
- i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
- ASSERT(env->IsNativeContext());
- if (env->IsNativeContext()) {
- env->set_data(*raw_data);
- }
+static void* DecodeSmiToAligned(i::Object* value, const char* location) {
+ ApiCheck(value->IsSmi(), location, "Not a Smi");
+ return reinterpret_cast<void*>(value);
}
-v8::Local<v8::Value> Context::GetData() {
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- i::Isolate* isolate = env->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Context::GetData()")) {
- return Local<Value>();
- }
- ASSERT(env->IsNativeContext());
- if (!env->IsNativeContext()) {
- return Local<Value>();
+static i::Smi* EncodeAlignedAsSmi(void* value, const char* location) {
+ i::Smi* smi = reinterpret_cast<i::Smi*>(value);
+ ApiCheck(smi->IsSmi(), location, "Pointer is not aligned");
+ return smi;
+}
+
+
+static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
+ int index,
+ bool can_grow,
+ const char* location) {
+ i::Handle<i::Context> env = Utils::OpenHandle(context);
+ bool ok =
+ ApiCheck(env->IsNativeContext(), location, "Not a native context") &&
+ ApiCheck(index >= 0, location, "Negative index");
+ if (!ok) return i::Handle<i::FixedArray>();
+ i::Handle<i::FixedArray> data(env->embedder_data());
+ if (index < data->length()) return data;
+ if (!can_grow) {
+ Utils::ReportApiFailure(location, "Index too large");
+ return i::Handle<i::FixedArray>();
}
- i::Handle<i::Object> result(env->data(), isolate);
+ int new_size = i::Max(index, data->length() << 1) + 1;
+ data = env->GetIsolate()->factory()->CopySizeFixedArray(data, new_size);
+ env->set_embedder_data(*data);
+ return data;
+}
+
+
+v8::Local<v8::Value> Context::SlowGetEmbedderData(int index) {
+ const char* location = "v8::Context::GetEmbedderData()";
+ i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
+ if (data.is_null()) return Local<Value>();
+ i::Handle<i::Object> result(data->get(index), data->GetIsolate());
return Utils::ToLocal(result);
}
+void Context::SetEmbedderData(int index, v8::Handle<Value> value) {
+ const char* location = "v8::Context::SetEmbedderData()";
+ i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
+ if (data.is_null()) return;
+ i::Handle<i::Object> val = Utils::OpenHandle(*value);
+ data->set(index, *val);
+ ASSERT_EQ(*Utils::OpenHandle(*value),
+ *Utils::OpenHandle(*GetEmbedderData(index)));
+}
+
+
+void* Context::SlowGetAlignedPointerFromEmbedderData(int index) {
+ const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()";
+ i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
+ if (data.is_null()) return NULL;
+ return DecodeSmiToAligned(data->get(index), location);
+}
+
+
+void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
+ const char* location = "v8::Context::SetAlignedPointerInEmbedderData()";
+ i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
+ data->set(index, EncodeAlignedAsSmi(value, location));
+ ASSERT_EQ(value, GetAlignedPointerFromEmbedderData(index));
+}
+
+
i::Object** v8::HandleScope::RawClose(i::Object** value) {
if (!ApiCheck(!is_closed_,
"v8::HandleScope::Close()",
@@ -821,7 +835,7 @@ i::Object** v8::HandleScope::RawClose(i::Object** value) {
}
// Allocate a new handle on the previous handle block.
- i::Handle<i::Object> handle(result);
+ i::Handle<i::Object> handle(result, isolate_);
return handle.location();
}
@@ -875,7 +889,8 @@ void NeanderArray::add(i::Handle<i::Object> value) {
int length = this->length();
int size = obj_.size();
if (length == size - 1) {
- i::Handle<i::FixedArray> new_elms = FACTORY->NewFixedArray(2 * size);
+ i::Factory* factory = i::Isolate::Current()->factory();
+ i::Handle<i::FixedArray> new_elms = factory->NewFixedArray(2 * size);
for (int i = 0; i < length; i++)
new_elms->set(i + 1, get(i));
obj_.value()->set_elements(*new_elms);
@@ -899,21 +914,60 @@ static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
}
-void Template::Set(v8::Handle<String> name, v8::Handle<Data> value,
- v8::PropertyAttribute attribute) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Template::Set()")) return;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list());
+static void TemplateSet(i::Isolate* isolate,
+ v8::Template* templ,
+ int length,
+ v8::Handle<v8::Data>* data) {
+ i::Handle<i::Object> list(Utils::OpenHandle(templ)->property_list(), isolate);
if (list->IsUndefined()) {
list = NeanderArray().value();
- Utils::OpenHandle(this)->set_property_list(*list);
+ Utils::OpenHandle(templ)->set_property_list(*list);
}
NeanderArray array(list);
- array.add(Utils::OpenHandle(*name));
- array.add(Utils::OpenHandle(*value));
- array.add(Utils::OpenHandle(*v8::Integer::New(attribute)));
+ array.add(Utils::OpenHandle(*v8::Integer::New(length)));
+ for (int i = 0; i < length; i++) {
+ i::Handle<i::Object> value = data[i].IsEmpty() ?
+ i::Handle<i::Object>(isolate->factory()->undefined_value()) :
+ Utils::OpenHandle(*data[i]);
+ array.add(value);
+ }
+}
+
+
+void Template::Set(v8::Handle<String> name,
+ v8::Handle<Data> value,
+ v8::PropertyAttribute attribute) {
+ i::Isolate* isolate = i::Isolate::Current();
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ const int kSize = 3;
+ v8::Handle<v8::Data> data[kSize] = {
+ name,
+ value,
+ v8::Integer::New(attribute)};
+ TemplateSet(isolate, this, kSize, data);
+}
+
+
+void Template::SetAccessorProperty(
+ v8::Local<v8::String> name,
+ v8::Local<FunctionTemplate> getter,
+ v8::Local<FunctionTemplate> setter,
+ v8::PropertyAttribute attribute,
+ v8::AccessControl access_control) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ENTER_V8(isolate);
+ ASSERT(!name.IsEmpty());
+ ASSERT(!getter.IsEmpty() || !setter.IsEmpty());
+ i::HandleScope scope(isolate);
+ const int kSize = 5;
+ v8::Handle<v8::Data> data[kSize] = {
+ name,
+ getter,
+ setter,
+ v8::Integer::New(attribute),
+ v8::Integer::New(access_control)};
+ TemplateSet(isolate, this, kSize, data);
}
@@ -927,53 +981,70 @@ static void InitializeFunctionTemplate(
Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::PrototypeTemplate()")) {
- return Local<ObjectTemplate>();
- }
ENTER_V8(isolate);
- i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template());
+ i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(),
+ isolate);
if (result->IsUndefined()) {
result = Utils::OpenHandle(*ObjectTemplate::New());
Utils::OpenHandle(this)->set_prototype_template(*result);
}
- return Local<ObjectTemplate>(ToApi<ObjectTemplate>(result));
+ return ToApiHandle<ObjectTemplate>(result);
}
void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::Inherit()")) return;
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_parent_template(*Utils::OpenHandle(*value));
}
-Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
- v8::Handle<Value> data, v8::Handle<Signature> signature) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
- LOG_API(isolate, "FunctionTemplate::New");
- ENTER_V8(isolate);
+static Local<FunctionTemplate> FunctionTemplateNew(
+ i::Isolate* isolate,
+ FunctionCallback callback,
+ v8::Handle<Value> data,
+ v8::Handle<Signature> signature,
+ int length,
+ bool do_not_cache) {
i::Handle<i::Struct> struct_obj =
isolate->factory()->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
i::Handle<i::FunctionTemplateInfo> obj =
i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
InitializeFunctionTemplate(obj);
- int next_serial_number = isolate->next_serial_number();
- isolate->set_next_serial_number(next_serial_number + 1);
+ obj->set_do_not_cache(do_not_cache);
+ int next_serial_number = 0;
+ if (!do_not_cache) {
+ next_serial_number = isolate->next_serial_number() + 1;
+ isolate->set_next_serial_number(next_serial_number);
+ }
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback != 0) {
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
Utils::ToLocal(obj)->SetCallHandler(callback, data);
}
+ obj->set_length(length);
obj->set_undetectable(false);
obj->set_needs_access_check(false);
-
if (!signature.IsEmpty())
obj->set_signature(*Utils::OpenHandle(*signature));
return Utils::ToLocal(obj);
}
+Local<FunctionTemplate> FunctionTemplate::New(
+ FunctionCallback callback,
+ v8::Handle<Value> data,
+ v8::Handle<Signature> signature,
+ int length) {
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
+ LOG_API(isolate, "FunctionTemplate::New");
+ ENTER_V8(isolate);
+ return FunctionTemplateNew(
+ isolate, callback, data, signature, length, false);
+}
+
Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
int argc, Handle<FunctionTemplate> argv[]) {
@@ -1004,6 +1075,122 @@ Local<AccessorSignature> AccessorSignature::New(
}
+template<typename Operation>
+static Local<Operation> NewDescriptor(
+ Isolate* isolate,
+ const i::DeclaredAccessorDescriptorData& data,
+ Data* previous_descriptor) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::DeclaredAccessorDescriptor> previous =
+ i::Handle<i::DeclaredAccessorDescriptor>();
+ if (previous_descriptor != NULL) {
+ previous = Utils::OpenHandle(
+ static_cast<DeclaredAccessorDescriptor*>(previous_descriptor));
+ }
+ i::Handle<i::DeclaredAccessorDescriptor> descriptor =
+ i::DeclaredAccessorDescriptor::Create(internal_isolate, data, previous);
+ return Utils::Convert<i::DeclaredAccessorDescriptor, Operation>(descriptor);
+}
+
+
+Local<RawOperationDescriptor>
+ ObjectOperationDescriptor::NewInternalFieldDereference(
+ Isolate* isolate,
+ int internal_field) {
+ i::DeclaredAccessorDescriptorData data;
+ data.type = i::kDescriptorObjectDereference;
+ data.object_dereference_descriptor.internal_field = internal_field;
+ return NewDescriptor<RawOperationDescriptor>(isolate, data, NULL);
+}
+
+
+Local<RawOperationDescriptor> RawOperationDescriptor::NewRawShift(
+ Isolate* isolate,
+ int16_t byte_offset) {
+ i::DeclaredAccessorDescriptorData data;
+ data.type = i::kDescriptorPointerShift;
+ data.pointer_shift_descriptor.byte_offset = byte_offset;
+ return NewDescriptor<RawOperationDescriptor>(isolate, data, this);
+}
+
+
+Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewHandleDereference(
+ Isolate* isolate) {
+ i::DeclaredAccessorDescriptorData data;
+ data.type = i::kDescriptorReturnObject;
+ return NewDescriptor<DeclaredAccessorDescriptor>(isolate, data, this);
+}
+
+
+Local<RawOperationDescriptor> RawOperationDescriptor::NewRawDereference(
+ Isolate* isolate) {
+ i::DeclaredAccessorDescriptorData data;
+ data.type = i::kDescriptorPointerDereference;
+ return NewDescriptor<RawOperationDescriptor>(isolate, data, this);
+}
+
+
+Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewPointerCompare(
+ Isolate* isolate,
+ void* compare_value) {
+ i::DeclaredAccessorDescriptorData data;
+ data.type = i::kDescriptorPointerCompare;
+ data.pointer_compare_descriptor.compare_value = compare_value;
+ return NewDescriptor<DeclaredAccessorDescriptor>(isolate, data, this);
+}
+
+
+Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewPrimitiveValue(
+ Isolate* isolate,
+ DeclaredAccessorDescriptorDataType data_type,
+ uint8_t bool_offset) {
+ i::DeclaredAccessorDescriptorData data;
+ data.type = i::kDescriptorPrimitiveValue;
+ data.primitive_value_descriptor.data_type = data_type;
+ data.primitive_value_descriptor.bool_offset = bool_offset;
+ return NewDescriptor<DeclaredAccessorDescriptor>(isolate, data, this);
+}
+
+
+template<typename T>
+static Local<DeclaredAccessorDescriptor> NewBitmaskCompare(
+ Isolate* isolate,
+ T bitmask,
+ T compare_value,
+ RawOperationDescriptor* operation) {
+ i::DeclaredAccessorDescriptorData data;
+ data.type = i::kDescriptorBitmaskCompare;
+ data.bitmask_compare_descriptor.bitmask = bitmask;
+ data.bitmask_compare_descriptor.compare_value = compare_value;
+ data.bitmask_compare_descriptor.size = sizeof(T);
+ return NewDescriptor<DeclaredAccessorDescriptor>(isolate, data, operation);
+}
+
+
+Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewBitmaskCompare8(
+ Isolate* isolate,
+ uint8_t bitmask,
+ uint8_t compare_value) {
+ return NewBitmaskCompare(isolate, bitmask, compare_value, this);
+}
+
+
+Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewBitmaskCompare16(
+ Isolate* isolate,
+ uint16_t bitmask,
+ uint16_t compare_value) {
+ return NewBitmaskCompare(isolate, bitmask, compare_value, this);
+}
+
+
+Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewBitmaskCompare32(
+ Isolate* isolate,
+ uint32_t bitmask,
+ uint32_t compare_value) {
+ return NewBitmaskCompare(isolate, bitmask, compare_value, this);
+}
+
+
Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) {
Handle<FunctionTemplate> types[1] = { type };
return TypeSwitch::New(1, types);
@@ -1043,15 +1230,14 @@ int TypeSwitch::match(v8::Handle<Value> value) {
#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
- i::Handle<i::Object> foreign = FromCData(cdata); \
+ i::Handle<i::Object> foreign = FromCData(obj->GetIsolate(), cdata); \
(obj)->setter(*foreign); \
} while (false)
-void FunctionTemplate::SetCallHandler(InvocationCallback callback,
+void FunctionTemplate::SetCallHandler(FunctionCallback callback,
v8::Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
@@ -1059,25 +1245,20 @@ void FunctionTemplate::SetCallHandler(InvocationCallback callback,
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
obj->set_data(*Utils::OpenHandle(*data));
Utils::OpenHandle(this)->set_call_code(*obj);
}
-static i::Handle<i::AccessorInfo> MakeAccessorInfo(
- v8::Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- v8::Handle<Value> data,
- v8::AccessControl settings,
- v8::PropertyAttribute attributes,
- v8::Handle<AccessorSignature> signature) {
- i::Handle<i::AccessorInfo> obj = FACTORY->NewAccessorInfo();
- SET_FIELD_WRAPPED(obj, set_getter, getter);
- SET_FIELD_WRAPPED(obj, set_setter, setter);
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
+static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
+ i::Handle<i::AccessorInfo> obj,
+ v8::Handle<String> name,
+ v8::AccessControl settings,
+ v8::PropertyAttribute attributes,
+ v8::Handle<AccessorSignature> signature) {
obj->set_name(*Utils::OpenHandle(*name));
if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
@@ -1090,55 +1271,71 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
}
-void FunctionTemplate::AddInstancePropertyAccessor(
+template<typename Getter, typename Setter>
+static i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
+ Getter getter,
+ Setter setter,
v8::Handle<Value> data,
v8::AccessControl settings,
v8::PropertyAttribute attributes,
v8::Handle<AccessorSignature> signature) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
- return;
+ i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
+ i::Handle<i::ExecutableAccessorInfo> obj =
+ isolate->factory()->NewExecutableAccessorInfo();
+ SET_FIELD_WRAPPED(obj, set_getter, getter);
+ SET_FIELD_WRAPPED(obj, set_setter, setter);
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
+ obj->set_data(*Utils::OpenHandle(*data));
+ return SetAccessorInfoProperties(obj, name, settings, attributes, signature);
+}
- i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name, getter, setter, data,
- settings, attributes,
- signature);
- i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors());
- if (list->IsUndefined()) {
- list = NeanderArray().value();
- Utils::OpenHandle(this)->set_property_accessors(*list);
- }
- NeanderArray array(list);
- array.add(obj);
+
+static i::Handle<i::AccessorInfo> MakeAccessorInfo(
+ v8::Handle<String> name,
+ v8::Handle<v8::DeclaredAccessorDescriptor> descriptor,
+ void* setter_ignored,
+ void* data_ignored,
+ v8::AccessControl settings,
+ v8::PropertyAttribute attributes,
+ v8::Handle<AccessorSignature> signature) {
+ i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
+ if (descriptor.IsEmpty()) return i::Handle<i::DeclaredAccessorInfo>();
+ i::Handle<i::DeclaredAccessorInfo> obj =
+ isolate->factory()->NewDeclaredAccessorInfo();
+ obj->set_descriptor(*Utils::OpenHandle(*descriptor));
+ return SetAccessorInfoProperties(obj, name, settings, attributes, signature);
}
Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::InstanceTemplate()")
- || EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
+ if (EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
return Local<ObjectTemplate>();
ENTER_V8(isolate);
- if (Utils::OpenHandle(this)->instance_template()->IsUndefined()) {
+ i::Handle<i::FunctionTemplateInfo> handle = Utils::OpenHandle(this);
+ if (handle->instance_template()->IsUndefined()) {
Local<ObjectTemplate> templ =
- ObjectTemplate::New(v8::Handle<FunctionTemplate>(this));
- Utils::OpenHandle(this)->set_instance_template(*Utils::OpenHandle(*templ));
+ ObjectTemplate::New(ToApiHandle<FunctionTemplate>(handle));
+ handle->set_instance_template(*Utils::OpenHandle(*templ));
}
- i::Handle<i::ObjectTemplateInfo> result(i::ObjectTemplateInfo::cast(
- Utils::OpenHandle(this)->instance_template()));
+ i::Handle<i::ObjectTemplateInfo> result(
+ i::ObjectTemplateInfo::cast(handle->instance_template()));
return Utils::ToLocal(result);
}
+void FunctionTemplate::SetLength(int length) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ENTER_V8(isolate);
+ Utils::OpenHandle(this)->set_length(length);
+}
+
+
void FunctionTemplate::SetClassName(Handle<String> name) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetClassName()")) return;
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_class_name(*Utils::OpenHandle(*name));
}
@@ -1146,9 +1343,6 @@ void FunctionTemplate::SetClassName(Handle<String> name) {
void FunctionTemplate::SetHiddenPrototype(bool value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetHiddenPrototype()")) {
- return;
- }
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_hidden_prototype(value);
}
@@ -1156,94 +1350,15 @@ void FunctionTemplate::SetHiddenPrototype(bool value) {
void FunctionTemplate::ReadOnlyPrototype() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetPrototypeAttributes()")) {
- return;
- }
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_read_only_prototype(true);
}
-void FunctionTemplate::SetNamedInstancePropertyHandler(
- NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data) {
+void FunctionTemplate::RemovePrototype() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetNamedInstancePropertyHandler()")) {
- return;
- }
ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
- i::Handle<i::InterceptorInfo> obj =
- i::Handle<i::InterceptorInfo>::cast(struct_obj);
-
- if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
- if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
- if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
- if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
- if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
-
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_named_property_handler(*obj);
-}
-
-
-void FunctionTemplate::SetIndexedInstancePropertyHandler(
- IndexedPropertyGetter getter,
- IndexedPropertySetter setter,
- IndexedPropertyQuery query,
- IndexedPropertyDeleter remover,
- IndexedPropertyEnumerator enumerator,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetIndexedInstancePropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
- i::Handle<i::InterceptorInfo> obj =
- i::Handle<i::InterceptorInfo>::cast(struct_obj);
-
- if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
- if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
- if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
- if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
- if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
-
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_indexed_property_handler(*obj);
-}
-
-
-void FunctionTemplate::SetInstanceCallAsFunctionHandler(
- InvocationCallback callback,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetInstanceCallAsFunctionHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
- i::Handle<i::CallHandlerInfo> obj =
- i::Handle<i::CallHandlerInfo>::cast(struct_obj);
- SET_FIELD_WRAPPED(obj, set_callback, callback);
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_instance_call_handler(*obj);
+ Utils::OpenHandle(this)->set_remove_prototype(true);
}
@@ -1258,9 +1373,6 @@ Local<ObjectTemplate> ObjectTemplate::New() {
Local<ObjectTemplate> ObjectTemplate::New(
v8::Handle<FunctionTemplate> constructor) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::New()")) {
- return Local<ObjectTemplate>();
- }
EnsureInitializedForIsolate(isolate, "v8::ObjectTemplate::New()");
LOG_API(isolate, "ObjectTemplate::New");
ENTER_V8(isolate);
@@ -1278,69 +1390,141 @@ Local<ObjectTemplate> ObjectTemplate::New(
// Ensure that the object template has a constructor. If no
// constructor is available we create one.
-static void EnsureConstructor(ObjectTemplate* object_template) {
- if (Utils::OpenHandle(object_template)->constructor()->IsUndefined()) {
- Local<FunctionTemplate> templ = FunctionTemplate::New();
- i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
- constructor->set_instance_template(*Utils::OpenHandle(object_template));
- Utils::OpenHandle(object_template)->set_constructor(*constructor);
+static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
+ ObjectTemplate* object_template) {
+ i::Object* obj = Utils::OpenHandle(object_template)->constructor();
+ if (!obj ->IsUndefined()) {
+ i::FunctionTemplateInfo* info = i::FunctionTemplateInfo::cast(obj);
+ return i::Handle<i::FunctionTemplateInfo>(info, info->GetIsolate());
+ }
+ Local<FunctionTemplate> templ = FunctionTemplate::New();
+ i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
+ constructor->set_instance_template(*Utils::OpenHandle(object_template));
+ Utils::OpenHandle(object_template)->set_constructor(*constructor);
+ return constructor;
+}
+
+
+static inline void AddPropertyToTemplate(
+ i::Handle<i::TemplateInfo> info,
+ i::Handle<i::AccessorInfo> obj) {
+ i::Handle<i::Object> list(info->property_accessors(), info->GetIsolate());
+ if (list->IsUndefined()) {
+ list = NeanderArray().value();
+ info->set_property_accessors(*list);
}
+ NeanderArray array(list);
+ array.add(obj);
+}
+
+
+static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
+ Template* template_obj) {
+ return Utils::OpenHandle(template_obj);
+}
+
+
+// TODO(dcarney): remove this with ObjectTemplate::SetAccessor
+static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
+ ObjectTemplate* object_template) {
+ EnsureConstructor(object_template);
+ return Utils::OpenHandle(object_template);
+}
+
+
+template<typename Setter, typename Getter, typename Data, typename Template>
+static bool TemplateSetAccessor(
+ Template* template_obj,
+ v8::Local<String> name,
+ Getter getter,
+ Setter setter,
+ Data data,
+ AccessControl settings,
+ PropertyAttribute attribute,
+ v8::Local<AccessorSignature> signature) {
+ i::Isolate* isolate = Utils::OpenHandle(template_obj)->GetIsolate();
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(
+ name, getter, setter, data, settings, attribute, signature);
+ if (obj.is_null()) return false;
+ i::Handle<i::TemplateInfo> info = GetTemplateInfo(template_obj);
+ AddPropertyToTemplate(info, obj);
+ return true;
+}
+
+
+bool Template::SetDeclaredAccessor(
+ Local<String> name,
+ Local<DeclaredAccessorDescriptor> descriptor,
+ PropertyAttribute attribute,
+ Local<AccessorSignature> signature,
+ AccessControl settings) {
+ void* null = NULL;
+ return TemplateSetAccessor(
+ this, name, descriptor, null, null, settings, attribute, signature);
+}
+
+
+void Template::SetNativeDataProperty(v8::Local<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter,
+ v8::Handle<Value> data,
+ PropertyAttribute attribute,
+ v8::Local<AccessorSignature> signature,
+ AccessControl settings) {
+ TemplateSetAccessor(
+ this, name, getter, setter, data, settings, attribute, signature);
}
void ObjectTemplate::SetAccessor(v8::Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter,
v8::Handle<Value> data,
AccessControl settings,
PropertyAttribute attribute,
v8::Handle<AccessorSignature> signature) {
+ TemplateSetAccessor(
+ this, name, getter, setter, data, settings, attribute, signature);
+}
+
+
+void ObjectTemplate::SetNamedPropertyHandler(
+ NamedPropertyGetterCallback getter,
+ NamedPropertySetterCallback setter,
+ NamedPropertyQueryCallback query,
+ NamedPropertyDeleterCallback remover,
+ NamedPropertyEnumeratorCallback enumerator,
+ Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
+ Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->AddInstancePropertyAccessor(name,
- getter,
- setter,
- data,
- settings,
- attribute,
- signature);
-}
-
-
-void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) {
- return;
+ i::Handle<i::Struct> struct_obj =
+ isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
+ i::Handle<i::InterceptorInfo> obj =
+ i::Handle<i::InterceptorInfo>::cast(struct_obj);
+
+ if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
+ if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
+ if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
+ if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
+ if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
+
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter,
- setter,
- query,
- remover,
- enumerator,
- data);
+ obj->set_data(*Utils::OpenHandle(*data));
+ cons->set_named_property_handler(*obj);
}
void ObjectTemplate::MarkAsUndetectable() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::MarkAsUndetectable()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
@@ -1357,9 +1541,6 @@ void ObjectTemplate::SetAccessCheckCallbacks(
Handle<Value> data,
bool turned_on_by_default) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessCheckCallbacks()")) {
- return;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
@@ -1372,7 +1553,9 @@ void ObjectTemplate::SetAccessCheckCallbacks(
SET_FIELD_WRAPPED(info, set_named_callback, named_callback);
SET_FIELD_WRAPPED(info, set_indexed_callback, indexed_callback);
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
info->set_data(*Utils::OpenHandle(*data));
i::FunctionTemplateInfo* constructor =
@@ -1384,62 +1567,67 @@ void ObjectTemplate::SetAccessCheckCallbacks(
void ObjectTemplate::SetIndexedPropertyHandler(
- IndexedPropertyGetter getter,
- IndexedPropertySetter setter,
- IndexedPropertyQuery query,
- IndexedPropertyDeleter remover,
- IndexedPropertyEnumerator enumerator,
+ IndexedPropertyGetterCallback getter,
+ IndexedPropertySetterCallback setter,
+ IndexedPropertyQueryCallback query,
+ IndexedPropertyDeleterCallback remover,
+ IndexedPropertyEnumeratorCallback enumerator,
Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetIndexedPropertyHandler()")) {
- return;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
+ Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetIndexedInstancePropertyHandler(getter,
- setter,
- query,
- remover,
- enumerator,
- data);
+ i::Handle<i::Struct> struct_obj =
+ isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
+ i::Handle<i::InterceptorInfo> obj =
+ i::Handle<i::InterceptorInfo>::cast(struct_obj);
+
+ if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
+ if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
+ if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
+ if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
+ if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
+
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
+ obj->set_data(*Utils::OpenHandle(*data));
+ cons->set_indexed_property_handler(*obj);
}
-void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback,
+void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::ObjectTemplate::SetCallAsFunctionHandler()")) {
- return;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
+ Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetInstanceCallAsFunctionHandler(callback, data);
+ i::Handle<i::Struct> struct_obj =
+ isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
+ i::Handle<i::CallHandlerInfo> obj =
+ i::Handle<i::CallHandlerInfo>::cast(struct_obj);
+ SET_FIELD_WRAPPED(obj, set_callback, callback);
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
+ obj->set_data(*Utils::OpenHandle(*data));
+ cons->set_instance_call_handler(*obj);
}
int ObjectTemplate::InternalFieldCount() {
- if (IsDeadCheck(Utils::OpenHandle(this)->GetIsolate(),
- "v8::ObjectTemplate::InternalFieldCount()")) {
- return 0;
- }
return i::Smi::cast(Utils::OpenHandle(this)->internal_field_count())->value();
}
void ObjectTemplate::SetInternalFieldCount(int value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetInternalFieldCount()")) {
- return;
- }
if (!ApiCheck(i::Smi::IsValid(value),
"v8::ObjectTemplate::SetInternalFieldCount()",
"Invalid internal field count")) {
@@ -1459,22 +1647,26 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
// --- S c r i p t D a t a ---
-ScriptData* ScriptData::PreCompile(const char* input, int length) {
+ScriptData* ScriptData::PreCompile(v8::Isolate* isolate,
+ const char* input,
+ int length) {
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const unsigned char*>(input), length);
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
+ return i::PreParserApi::PreParse(
+ reinterpret_cast<i::Isolate*>(isolate), &stream);
}
ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
i::Handle<i::String> str = Utils::OpenHandle(*source);
+ i::Isolate* isolate = str->GetIsolate();
if (str->IsExternalTwoByteString()) {
i::ExternalTwoByteStringUtf16CharacterStream stream(
i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
+ return i::PreParserApi::PreParse(isolate, &stream);
} else {
i::GenericStringUtf16CharacterStream stream(str, 0, str->length());
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
+ return i::PreParserApi::PreParse(isolate, &stream);
}
}
@@ -1493,7 +1685,8 @@ ScriptData* ScriptData::New(const char* data, int length) {
}
// Copy the data to align it.
unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
- i::OS::MemCopy(deserialized_data, data, length);
+ i::CopyBytes(reinterpret_cast<char*>(deserialized_data),
+ data, static_cast<size_t>(length));
return new i::ScriptDataImpl(
i::Vector<unsigned>(deserialized_data, deserialized_data_length));
@@ -1507,16 +1700,17 @@ Local<Script> Script::New(v8::Handle<String> source,
v8::ScriptOrigin* origin,
v8::ScriptData* pre_data,
v8::Handle<String> script_data) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ i::Isolate* isolate = str->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
LOG_API(isolate, "Script::New");
ENTER_V8(isolate);
i::SharedFunctionInfo* raw_result = NULL;
{ i::HandleScope scope(isolate);
- i::Handle<i::String> str = Utils::OpenHandle(*source);
i::Handle<i::Object> name_obj;
int line_offset = 0;
int column_offset = 0;
+ bool is_shared_cross_origin = false;
if (origin != NULL) {
if (!origin->ResourceName().IsEmpty()) {
name_obj = Utils::OpenHandle(*origin->ResourceName());
@@ -1528,6 +1722,11 @@ Local<Script> Script::New(v8::Handle<String> source,
column_offset =
static_cast<int>(origin->ResourceColumnOffset()->Value());
}
+ if (!origin->ResourceIsSharedCrossOrigin().IsEmpty()) {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ is_shared_cross_origin =
+ origin->ResourceIsSharedCrossOrigin() == v8::True(v8_isolate);
+ }
}
EXCEPTION_PREAMBLE(isolate);
i::ScriptDataImpl* pre_data_impl =
@@ -1544,6 +1743,7 @@ Local<Script> Script::New(v8::Handle<String> source,
name_obj,
line_offset,
column_offset,
+ is_shared_cross_origin,
isolate->global_context(),
NULL,
pre_data_impl,
@@ -1554,7 +1754,7 @@ Local<Script> Script::New(v8::Handle<String> source,
raw_result = *result;
}
i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
- return Local<Script>(ToApi<Script>(result));
+ return ToApiHandle<Script>(result);
}
@@ -1569,7 +1769,8 @@ Local<Script> Script::Compile(v8::Handle<String> source,
v8::ScriptOrigin* origin,
v8::ScriptData* pre_data,
v8::Handle<String> script_data) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ i::Isolate* isolate = str->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::Compile()", return Local<Script>());
LOG_API(isolate, "Script::Compile");
ENTER_V8(isolate);
@@ -1583,7 +1784,7 @@ Local<Script> Script::Compile(v8::Handle<String> source,
isolate->factory()->NewFunctionFromSharedFunctionInfo(
function,
isolate->global_context());
- return Local<Script>(ToApi<Script>(result));
+ return ToApiHandle<Script>(result);
}
@@ -1596,14 +1797,19 @@ Local<Script> Script::Compile(v8::Handle<String> source,
Local<Value> Script::Run() {
- i::Isolate* isolate = i::Isolate::Current();
+ // If execution is terminating, Compile(script)->Run() requires this check.
+ if (this == NULL) return Local<Value>();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
LOG_API(isolate, "Script::Run");
ENTER_V8(isolate);
+ i::Logger::TimerEventScope timer_scope(
+ isolate, i::Logger::TimerEventScope::v8_execute);
i::Object* raw_result = NULL;
{
i::HandleScope scope(isolate);
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSFunction> fun;
if (obj->IsSharedFunctionInfo()) {
i::Handle<i::SharedFunctionInfo>
@@ -1616,8 +1822,8 @@ Local<Value> Script::Run() {
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> receiver(
isolate->context()->global_proxy(), isolate);
- i::Handle<i::Object> result =
- i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
+ i::Handle<i::Object> result = i::Execution::Call(
+ isolate, fun, receiver, 0, NULL, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
raw_result = *result;
}
@@ -1641,7 +1847,9 @@ static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) {
Local<Value> Script::Id() {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::Id()", return Local<Value>());
LOG_API(isolate, "Script::Id");
i::Object* raw_id = NULL;
@@ -1649,16 +1857,63 @@ Local<Value> Script::Id() {
i::HandleScope scope(isolate);
i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- i::Handle<i::Object> id(script->id());
+ i::Handle<i::Object> id(script->id(), isolate);
raw_id = *id;
}
- i::Handle<i::Object> id(raw_id);
+ i::Handle<i::Object> id(raw_id, isolate);
return Utils::ToLocal(id);
}
+int Script::GetId() {
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Script::Id()", return -1);
+ LOG_API(isolate, "Script::Id");
+ {
+ i::HandleScope scope(isolate);
+ i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
+ i::Handle<i::Script> script(i::Script::cast(function_info->script()));
+ return script->id()->value();
+ }
+}
+
+
+int Script::GetLineNumber(int code_pos) {
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Script::GetLineNumber()", return -1);
+ LOG_API(isolate, "Script::GetLineNumber");
+ if (obj->IsScript()) {
+ i::Handle<i::Script> script = i::Handle<i::Script>(i::Script::cast(*obj));
+ return i::GetScriptLineNumber(script, code_pos);
+ } else {
+ return -1;
+ }
+}
+
+
+Handle<Value> Script::GetScriptName() {
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Script::GetName()", return Handle<String>());
+ LOG_API(isolate, "Script::GetName");
+ if (obj->IsScript()) {
+ i::Object* name = i::Script::cast(*obj)->name();
+ return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
+ } else {
+ return Handle<String>();
+ }
+}
+
+
void Script::SetData(v8::Handle<String> data) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::SetData()", return);
LOG_API(isolate, "Script::SetData");
{
@@ -1677,12 +1932,12 @@ void Script::SetData(v8::Handle<String> data) {
v8::TryCatch::TryCatch()
: isolate_(i::Isolate::Current()),
next_(isolate_->try_catch_handler_address()),
- exception_(isolate_->heap()->the_hole_value()),
- message_(i::Smi::FromInt(0)),
is_verbose_(false),
can_continue_(true),
capture_message_(true),
- rethrow_(false) {
+ rethrow_(false),
+ has_terminated_(false) {
+ Reset();
isolate_->RegisterTryCatchHandler(this);
}
@@ -1690,10 +1945,20 @@ v8::TryCatch::TryCatch()
v8::TryCatch::~TryCatch() {
ASSERT(isolate_ == i::Isolate::Current());
if (rethrow_) {
- v8::HandleScope scope;
- v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception());
+ v8::Isolate* isolate = reinterpret_cast<Isolate*>(isolate_);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(isolate, Exception());
+ if (HasCaught() && capture_message_) {
+ // If an exception was caught and rethrow_ is indicated, the saved
+ // message, script, and location need to be restored to Isolate TLS
+ // for reuse. capture_message_ needs to be disabled so that DoThrow()
+ // does not create a new message.
+ isolate_->thread_local_top()->rethrowing_message_ = true;
+ isolate_->RestorePendingMessageFromTryCatch(this);
+ }
isolate_->UnregisterTryCatchHandler(this);
- v8::ThrowException(exc);
+ reinterpret_cast<Isolate*>(isolate_)->ThrowException(exc);
+ ASSERT(!isolate_->thread_local_top()->rethrowing_message_);
} else {
isolate_->UnregisterTryCatchHandler(this);
}
@@ -1710,10 +1975,15 @@ bool v8::TryCatch::CanContinue() const {
}
+bool v8::TryCatch::HasTerminated() const {
+ return has_terminated_;
+}
+
+
v8::Handle<v8::Value> v8::TryCatch::ReThrow() {
if (!HasCaught()) return v8::Local<v8::Value>();
rethrow_ = true;
- return v8::Undefined();
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate_));
}
@@ -1736,9 +2006,9 @@ v8::Local<Value> v8::TryCatch::StackTrace() const {
if (!raw_obj->IsJSObject()) return v8::Local<Value>();
i::HandleScope scope(isolate_);
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
- i::Handle<i::String> name = isolate_->factory()->LookupAsciiSymbol("stack");
- if (!obj->HasProperty(*name)) return v8::Local<Value>();
- i::Handle<i::Object> value = i::GetProperty(obj, name);
+ i::Handle<i::String> name = isolate_->factory()->stack_string();
+ if (!i::JSReceiver::HasProperty(obj, name)) return v8::Local<Value>();
+ i::Handle<i::Object> value = i::GetProperty(isolate_, obj, name);
if (value.is_null()) return v8::Local<Value>();
return v8::Utils::ToLocal(scope.CloseAndEscape(value));
} else {
@@ -1749,8 +2019,9 @@ v8::Local<Value> v8::TryCatch::StackTrace() const {
v8::Local<v8::Message> v8::TryCatch::Message() const {
ASSERT(isolate_ == i::Isolate::Current());
- if (HasCaught() && message_ != i::Smi::FromInt(0)) {
- i::Object* message = reinterpret_cast<i::Object*>(message_);
+ i::Object* message = reinterpret_cast<i::Object*>(message_obj_);
+ ASSERT(message->IsJSMessageObject() || message->IsTheHole());
+ if (HasCaught() && !message->IsTheHole()) {
return v8::Utils::MessageToLocal(i::Handle<i::Object>(message, isolate_));
} else {
return v8::Local<v8::Message>();
@@ -1760,8 +2031,12 @@ v8::Local<v8::Message> v8::TryCatch::Message() const {
void v8::TryCatch::Reset() {
ASSERT(isolate_ == i::Isolate::Current());
- exception_ = isolate_->heap()->the_hole_value();
- message_ = i::Smi::FromInt(0);
+ i::Object* the_hole = isolate_->heap()->the_hole_value();
+ exception_ = the_hole;
+ message_obj_ = the_hole;
+ message_script_ = the_hole;
+ message_start_pos_ = 0;
+ message_end_pos_ = 0;
}
@@ -1782,9 +2057,9 @@ Local<String> Message::Get() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Message::Get()", return Local<String>());
ENTER_V8(isolate);
- HandleScope scope;
+ HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(obj);
+ i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(isolate, obj);
Local<String> result = Utils::ToLocal(raw_result);
return scope.Close(result);
}
@@ -1792,48 +2067,42 @@ Local<String> Message::Get() const {
v8::Handle<Value> Message::GetScriptResourceName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceName()")) {
- return Local<String>();
- }
ENTER_V8(isolate);
- HandleScope scope;
+ HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
// Return this.script.name.
i::Handle<i::JSValue> script =
- i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script()));
- i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name());
+ i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(),
+ isolate));
+ i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name(),
+ isolate);
return scope.Close(Utils::ToLocal(resource_name));
}
v8::Handle<Value> Message::GetScriptData() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceData()")) {
- return Local<Value>();
- }
ENTER_V8(isolate);
- HandleScope scope;
+ HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
// Return this.script.data.
i::Handle<i::JSValue> script =
- i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script()));
- i::Handle<i::Object> data(i::Script::cast(script->value())->data());
+ i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(),
+ isolate));
+ i::Handle<i::Object> data(i::Script::cast(script->value())->data(), isolate);
return scope.Close(Utils::ToLocal(data));
}
v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStackTrace()")) {
- return Local<v8::StackTrace>();
- }
ENTER_V8(isolate);
- HandleScope scope;
+ HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::Object> stackFramesObj(message->stack_frames());
+ i::Handle<i::Object> stackFramesObj(message->stack_frames(), isolate);
if (!stackFramesObj->IsJSArray()) return v8::Handle<v8::StackTrace>();
i::Handle<i::JSArray> stackTrace =
i::Handle<i::JSArray>::cast(stackFramesObj);
@@ -1847,13 +2116,14 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> argv[],
bool* has_pending_exception) {
i::Isolate* isolate = i::Isolate::Current();
- i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name);
+ i::Handle<i::String> fmt_str =
+ isolate->factory()->InternalizeUtf8String(name);
i::Object* object_fun =
isolate->js_builtins_object()->GetPropertyNoExceptionThrown(*fmt_str);
i::Handle<i::JSFunction> fun =
i::Handle<i::JSFunction>(i::JSFunction::cast(object_fun));
- i::Handle<i::Object> value =
- i::Execution::Call(fun, recv, argc, argv, has_pending_exception);
+ i::Handle<i::Object> value = i::Execution::Call(
+ isolate, fun, recv, argc, argv, has_pending_exception);
return value;
}
@@ -1887,7 +2157,6 @@ int Message::GetLineNumber() const {
int Message::GetStartPosition() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStartPosition()")) return 0;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSMessageObject> message =
@@ -1898,7 +2167,6 @@ int Message::GetStartPosition() const {
int Message::GetEndPosition() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetEndPosition()")) return 0;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSMessageObject> message =
@@ -1909,9 +2177,6 @@ int Message::GetEndPosition() const {
int Message::GetStartColumn() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStartColumn()")) {
- return kNoColumnInfo;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
@@ -1927,7 +2192,6 @@ int Message::GetStartColumn() const {
int Message::GetEndColumn() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetEndColumn()")) return kNoColumnInfo;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
@@ -1945,11 +2209,24 @@ int Message::GetEndColumn() const {
}
+bool Message::IsSharedCrossOrigin() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::JSMessageObject> message =
+ i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
+ i::Handle<i::JSValue> script =
+ i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(),
+ isolate));
+ return i::Script::cast(script->value())->is_shared_cross_origin();
+}
+
+
Local<String> Message::GetSourceLine() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Message::GetSourceLine()", return Local<String>());
ENTER_V8(isolate);
- HandleScope scope;
+ HandleScope scope(reinterpret_cast<Isolate*>(isolate));
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result = CallV8HeapFunction("GetSourceLine",
Utils::OpenHandle(this),
@@ -1965,7 +2242,6 @@ Local<String> Message::GetSourceLine() const {
void Message::PrintCurrentStackTrace(FILE* out) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Message::PrintCurrentStackTrace()")) return;
ENTER_V8(isolate);
isolate->PrintCurrentStackTrace(out);
}
@@ -1975,13 +2251,10 @@ void Message::PrintCurrentStackTrace(FILE* out) {
Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::GetFrame()")) {
- return Local<StackFrame>();
- }
ENTER_V8(isolate);
- HandleScope scope;
+ HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSArray> self = Utils::OpenHandle(this);
- i::Object* raw_object = self->GetElementNoExceptionThrown(index);
+ i::Object* raw_object = self->GetElementNoExceptionThrown(isolate, index);
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_object));
return scope.Close(Utils::StackFrameToLocal(obj));
}
@@ -1989,7 +2262,6 @@ Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
int StackTrace::GetFrameCount() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::GetFrameCount()")) return -1;
ENTER_V8(isolate);
return i::Smi::cast(Utils::OpenHandle(this)->length())->value();
}
@@ -1997,7 +2269,6 @@ int StackTrace::GetFrameCount() const {
Local<Array> StackTrace::AsArray() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::AsArray()")) Local<Array>();
ENTER_V8(isolate);
return Utils::ToLocal(Utils::OpenHandle(this));
}
@@ -2006,9 +2277,6 @@ Local<Array> StackTrace::AsArray() {
Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
StackTraceOptions options) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::StackTrace::CurrentStackTrace()")) {
- Local<StackTrace>();
- }
ENTER_V8(isolate);
i::Handle<i::JSArray> stackTrace =
isolate->CaptureCurrentStackTrace(frame_limit, options);
@@ -2020,9 +2288,6 @@ Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
int StackFrame::GetLineNumber() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetLineNumber()")) {
- return Message::kNoLineNumberInfo;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2036,9 +2301,6 @@ int StackFrame::GetLineNumber() const {
int StackFrame::GetColumn() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetColumn()")) {
- return Message::kNoColumnInfo;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2050,13 +2312,23 @@ int StackFrame::GetColumn() const {
}
-Local<String> StackFrame::GetScriptName() const {
+int StackFrame::GetScriptId() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptName()")) {
- return Local<String>();
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> scriptId = GetProperty(self, "scriptId");
+ if (!scriptId->IsSmi()) {
+ return Message::kNoScriptIdInfo;
}
+ return i::Smi::cast(*scriptId)->value();
+}
+
+
+Local<String> StackFrame::GetScriptName() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
- HandleScope scope;
+ HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name = GetProperty(self, "scriptName");
if (!name->IsString()) {
@@ -2068,11 +2340,8 @@ Local<String> StackFrame::GetScriptName() const {
Local<String> StackFrame::GetScriptNameOrSourceURL() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptNameOrSourceURL()")) {
- return Local<String>();
- }
ENTER_V8(isolate);
- HandleScope scope;
+ HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name = GetProperty(self, "scriptNameOrSourceURL");
if (!name->IsString()) {
@@ -2084,11 +2353,8 @@ Local<String> StackFrame::GetScriptNameOrSourceURL() const {
Local<String> StackFrame::GetFunctionName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetFunctionName()")) {
- return Local<String>();
- }
ENTER_V8(isolate);
- HandleScope scope;
+ HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name = GetProperty(self, "functionName");
if (!name->IsString()) {
@@ -2100,7 +2366,6 @@ Local<String> StackFrame::GetFunctionName() const {
bool StackFrame::IsEval() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::IsEval()")) return false;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2111,7 +2376,6 @@ bool StackFrame::IsEval() const {
bool StackFrame::IsConstructor() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::IsConstructor()")) return false;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2120,12 +2384,32 @@ bool StackFrame::IsConstructor() const {
}
+// --- J S O N ---
+
+Local<Value> JSON::Parse(Local<String> json_string) {
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::JSON::Parse");
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::String> source = i::Handle<i::String>(
+ FlattenGetString(Utils::OpenHandle(*json_string)));
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> result;
+ if (source->IsSeqOneByteString()) {
+ result = i::JsonParser<true>::Parse(source);
+ } else {
+ result = i::JsonParser<false>::Parse(source);
+ }
+ has_pending_exception = result.is_null();
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
+ return Utils::ToLocal(
+ i::Handle<i::Object>::cast(scope.CloseAndEscape(result)));
+}
+
+
// --- D a t a ---
bool Value::FullIsUndefined() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUndefined()")) {
- return false;
- }
bool result = Utils::OpenHandle(this)->IsUndefined();
ASSERT_EQ(result, QuickIsUndefined());
return result;
@@ -2133,7 +2417,6 @@ bool Value::FullIsUndefined() const {
bool Value::FullIsNull() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNull()")) return false;
bool result = Utils::OpenHandle(this)->IsNull();
ASSERT_EQ(result, QuickIsNull());
return result;
@@ -2141,69 +2424,102 @@ bool Value::FullIsNull() const {
bool Value::IsTrue() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsTrue()")) return false;
return Utils::OpenHandle(this)->IsTrue();
}
bool Value::IsFalse() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFalse()")) return false;
return Utils::OpenHandle(this)->IsFalse();
}
bool Value::IsFunction() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFunction()")) {
- return false;
- }
return Utils::OpenHandle(this)->IsJSFunction();
}
bool Value::FullIsString() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsString()")) return false;
bool result = Utils::OpenHandle(this)->IsString();
ASSERT_EQ(result, QuickIsString());
return result;
}
+bool Value::IsSymbol() const {
+ return Utils::OpenHandle(this)->IsSymbol();
+}
+
+
bool Value::IsArray() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArray()")) return false;
return Utils::OpenHandle(this)->IsJSArray();
}
+bool Value::IsArrayBuffer() const {
+ return Utils::OpenHandle(this)->IsJSArrayBuffer();
+}
+
+
+bool Value::IsArrayBufferView() const {
+ return Utils::OpenHandle(this)->IsJSArrayBufferView();
+}
+
+
+bool Value::IsTypedArray() const {
+ return Utils::OpenHandle(this)->IsJSTypedArray();
+}
+
+
+#define TYPED_ARRAY_LIST(F) \
+F(Uint8Array, kExternalUnsignedByteArray) \
+F(Int8Array, kExternalByteArray) \
+F(Uint16Array, kExternalUnsignedShortArray) \
+F(Int16Array, kExternalShortArray) \
+F(Uint32Array, kExternalUnsignedIntArray) \
+F(Int32Array, kExternalIntArray) \
+F(Float32Array, kExternalFloatArray) \
+F(Float64Array, kExternalDoubleArray) \
+F(Uint8ClampedArray, kExternalPixelArray)
+
+
+#define VALUE_IS_TYPED_ARRAY(TypedArray, type_const) \
+ bool Value::Is##TypedArray() const { \
+ i::Handle<i::Object> obj = Utils::OpenHandle(this); \
+ if (!obj->IsJSTypedArray()) return false; \
+ return i::JSTypedArray::cast(*obj)->type() == type_const; \
+ }
+
+TYPED_ARRAY_LIST(VALUE_IS_TYPED_ARRAY)
+
+#undef VALUE_IS_TYPED_ARRAY
+
+
+bool Value::IsDataView() const {
+ return Utils::OpenHandle(this)->IsJSDataView();
+}
+
+
bool Value::IsObject() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsObject()")) return false;
return Utils::OpenHandle(this)->IsJSObject();
}
bool Value::IsNumber() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNumber()")) return false;
return Utils::OpenHandle(this)->IsNumber();
}
bool Value::IsBoolean() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsBoolean()")) {
- return false;
- }
return Utils::OpenHandle(this)->IsBoolean();
}
bool Value::IsExternal() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) {
- return false;
- }
- return Utils::OpenHandle(this)->IsForeign();
+ return Utils::OpenHandle(this)->IsExternal();
}
bool Value::IsInt32() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsInt32()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return true;
if (obj->IsNumber()) {
@@ -2220,7 +2536,6 @@ bool Value::IsInt32() const {
bool Value::IsUint32() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUint32()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
if (obj->IsNumber()) {
@@ -2238,47 +2553,56 @@ bool Value::IsUint32() const {
bool Value::IsDate() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsDate()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(isolate->heap()->Date_symbol());
+ return obj->HasSpecificClassOf(isolate->heap()->Date_string());
}
bool Value::IsStringObject() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsStringObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(isolate->heap()->String_symbol());
+ return obj->HasSpecificClassOf(isolate->heap()->String_string());
+}
+
+
+bool Value::IsSymbolObject() const {
+ // TODO(svenpanne): these and other test functions should be written such
+ // that they do not use Isolate::Current().
+ i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return obj->HasSpecificClassOf(isolate->heap()->Symbol_string());
}
bool Value::IsNumberObject() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsNumberObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(isolate->heap()->Number_symbol());
+ return obj->HasSpecificClassOf(isolate->heap()->Number_string());
}
static i::Object* LookupBuiltin(i::Isolate* isolate,
const char* builtin_name) {
- i::Handle<i::String> symbol =
- isolate->factory()->LookupAsciiSymbol(builtin_name);
+ i::Handle<i::String> string =
+ isolate->factory()->InternalizeUtf8String(builtin_name);
i::Handle<i::JSBuiltinsObject> builtins = isolate->js_builtins_object();
- return builtins->GetPropertyNoExceptionThrown(*symbol);
+ return builtins->GetPropertyNoExceptionThrown(*string);
}
static bool CheckConstructor(i::Isolate* isolate,
i::Handle<i::JSObject> obj,
const char* class_name) {
- return obj->map()->constructor() == LookupBuiltin(isolate, class_name);
+ i::Object* constr = obj->map()->constructor();
+ if (!constr->IsJSFunction()) return false;
+ i::JSFunction* func = i::JSFunction::cast(constr);
+ return func->shared()->native() &&
+ constr == LookupBuiltin(isolate, class_name);
}
bool Value::IsNativeError() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsNativeError()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsJSObject()) {
i::Handle<i::JSObject> js_obj(i::JSObject::cast(*obj));
@@ -2297,14 +2621,12 @@ bool Value::IsNativeError() const {
bool Value::IsBooleanObject() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsBooleanObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(isolate->heap()->Boolean_symbol());
+ return obj->HasSpecificClassOf(isolate->heap()->Boolean_string());
}
bool Value::IsRegExp() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsRegExp()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->IsJSRegExp();
}
@@ -2317,16 +2639,13 @@ Local<String> Value::ToString() const {
str = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToString()")) {
- return Local<String>();
- }
LOG_API(isolate, "ToString");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- str = i::Execution::ToString(obj, &has_pending_exception);
+ str = i::Execution::ToString(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
}
- return Local<String>(ToApi<String>(str));
+ return ToApiHandle<String>(str);
}
@@ -2337,16 +2656,13 @@ Local<String> Value::ToDetailString() const {
str = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToDetailString()")) {
- return Local<String>();
- }
LOG_API(isolate, "ToDetailString");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- str = i::Execution::ToDetailString(obj, &has_pending_exception);
+ str = i::Execution::ToDetailString(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
}
- return Local<String>(ToApi<String>(str));
+ return ToApiHandle<String>(str);
}
@@ -2357,32 +2673,27 @@ Local<v8::Object> Value::ToObject() const {
val = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToObject()")) {
- return Local<v8::Object>();
- }
LOG_API(isolate, "ToObject");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- val = i::Execution::ToObject(obj, &has_pending_exception);
+ val = i::Execution::ToObject(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
}
- return Local<v8::Object>(ToApi<Object>(val));
+ return ToApiHandle<Object>(val);
}
Local<Boolean> Value::ToBoolean() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsBoolean()) {
- return Local<Boolean>(ToApi<Boolean>(obj));
+ return ToApiHandle<Boolean>(obj);
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToBoolean()")) {
- return Local<Boolean>();
- }
LOG_API(isolate, "ToBoolean");
ENTER_V8(isolate);
- i::Handle<i::Object> val = i::Execution::ToBoolean(obj);
- return Local<Boolean>(ToApi<Boolean>(val));
+ i::Handle<i::Object> val =
+ isolate->factory()->ToBoolean(obj->BooleanValue());
+ return ToApiHandle<Boolean>(val);
}
}
@@ -2394,16 +2705,13 @@ Local<Number> Value::ToNumber() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToNumber()")) {
- return Local<Number>();
- }
LOG_API(isolate, "ToNumber");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToNumber(obj, &has_pending_exception);
+ num = i::Execution::ToNumber(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Number>());
}
- return Local<Number>(ToApi<Number>(num));
+ return ToApiHandle<Number>(num);
}
@@ -2414,28 +2722,32 @@ Local<Integer> Value::ToInteger() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToInteger()")) return Local<Integer>();
LOG_API(isolate, "ToInteger");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInteger(obj, &has_pending_exception);
+ num = i::Execution::ToInteger(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Integer>());
}
- return Local<Integer>(ToApi<Integer>(num));
+ return ToApiHandle<Integer>(num);
+}
+
+
+void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
+ ApiCheck(isolate != NULL && isolate->IsInitialized() && !isolate->IsDead(),
+ "v8::internal::Internals::CheckInitialized()",
+ "Isolate is not initialized or V8 has died");
}
void External::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsForeign(),
+ ApiCheck(Utils::OpenHandle(that)->IsExternal(),
"v8::External::Cast()",
"Could not convert to external");
}
void v8::Object::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Object::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSObject(),
"v8::Object::Cast()",
@@ -2444,7 +2756,6 @@ void v8::Object::CheckCast(Value* that) {
void v8::Function::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Function::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSFunction(),
"v8::Function::Cast()",
@@ -2453,7 +2764,6 @@ void v8::Function::CheckCast(Value* that) {
void v8::String::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::String::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsString(),
"v8::String::Cast()",
@@ -2461,8 +2771,15 @@ void v8::String::CheckCast(v8::Value* that) {
}
+void v8::Symbol::CheckCast(v8::Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsSymbol(),
+ "v8::Symbol::Cast()",
+ "Could not convert to symbol");
+}
+
+
void v8::Number::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(),
"v8::Number::Cast()",
@@ -2471,7 +2788,6 @@ void v8::Number::CheckCast(v8::Value* that) {
void v8::Integer::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(),
"v8::Integer::Cast()",
@@ -2480,7 +2796,6 @@ void v8::Integer::CheckCast(v8::Value* that) {
void v8::Array::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Array::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSArray(),
"v8::Array::Cast()",
@@ -2488,11 +2803,57 @@ void v8::Array::CheckCast(Value* that) {
}
+void v8::ArrayBuffer::CheckCast(Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsJSArrayBuffer(),
+ "v8::ArrayBuffer::Cast()",
+ "Could not convert to ArrayBuffer");
+}
+
+
+void v8::ArrayBufferView::CheckCast(Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsJSArrayBufferView(),
+ "v8::ArrayBufferView::Cast()",
+ "Could not convert to ArrayBufferView");
+}
+
+
+void v8::TypedArray::CheckCast(Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsJSTypedArray(),
+ "v8::TypedArray::Cast()",
+ "Could not convert to TypedArray");
+}
+
+
+#define CHECK_TYPED_ARRAY_CAST(ApiClass, typeConst) \
+ void v8::ApiClass::CheckCast(Value* that) { \
+ i::Handle<i::Object> obj = Utils::OpenHandle(that); \
+ ApiCheck(obj->IsJSTypedArray() && \
+ i::JSTypedArray::cast(*obj)->type() == typeConst, \
+ "v8::" #ApiClass "::Cast()", \
+ "Could not convert to " #ApiClass); \
+ }
+
+
+TYPED_ARRAY_LIST(CHECK_TYPED_ARRAY_CAST)
+
+#undef CHECK_TYPED_ARRAY_CAST
+
+
+void v8::DataView::CheckCast(Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsJSDataView(),
+ "v8::DataView::Cast()",
+ "Could not convert to DataView");
+}
+
+
void v8::Date::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Date::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_symbol()),
+ ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_string()),
"v8::Date::Cast()",
"Could not convert to date");
}
@@ -2500,19 +2861,26 @@ void v8::Date::CheckCast(v8::Value* that) {
void v8::StringObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::StringObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_symbol()),
+ ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_string()),
"v8::StringObject::Cast()",
"Could not convert to StringObject");
}
+void v8::SymbolObject::CheckCast(v8::Value* that) {
+ i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Symbol_string()),
+ "v8::SymbolObject::Cast()",
+ "Could not convert to SymbolObject");
+}
+
+
void v8::NumberObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::NumberObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_symbol()),
+ ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_string()),
"v8::NumberObject::Cast()",
"Could not convert to NumberObject");
}
@@ -2520,16 +2888,14 @@ void v8::NumberObject::CheckCast(v8::Value* that) {
void v8::BooleanObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::BooleanObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_symbol()),
+ ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_string()),
"v8::BooleanObject::Cast()",
"Could not convert to BooleanObject");
}
void v8::RegExp::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSRegExp(),
"v8::RegExp::Cast()",
@@ -2538,17 +2904,7 @@ void v8::RegExp::CheckCast(v8::Value* that) {
bool Value::BooleanValue() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsBoolean()) {
- return obj->IsTrue();
- } else {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::BooleanValue()")) return false;
- LOG_API(isolate, "BooleanValue");
- ENTER_V8(isolate);
- i::Handle<i::Object> value = i::Execution::ToBoolean(obj);
- return value->IsTrue();
- }
+ return Utils::OpenHandle(this)->BooleanValue();
}
@@ -2559,13 +2915,10 @@ double Value::NumberValue() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::NumberValue()")) {
- return i::OS::nan_value();
- }
LOG_API(isolate, "NumberValue");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToNumber(obj, &has_pending_exception);
+ num = i::Execution::ToNumber(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, i::OS::nan_value());
}
return num->Number();
@@ -2579,11 +2932,10 @@ int64_t Value::IntegerValue() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IntegerValue()")) return 0;
LOG_API(isolate, "IntegerValue");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInteger(obj, &has_pending_exception);
+ num = i::Execution::ToInteger(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
}
if (num->IsSmi()) {
@@ -2601,14 +2953,13 @@ Local<Int32> Value::ToInt32() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToInt32()")) return Local<Int32>();
LOG_API(isolate, "ToInt32");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInt32(obj, &has_pending_exception);
+ num = i::Execution::ToInt32(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Int32>());
}
- return Local<Int32>(ToApi<Int32>(num));
+ return ToApiHandle<Int32>(num);
}
@@ -2619,14 +2970,13 @@ Local<Uint32> Value::ToUint32() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToUint32()")) return Local<Uint32>();
LOG_API(isolate, "ToUInt32");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToUint32(obj, &has_pending_exception);
+ num = i::Execution::ToUint32(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
}
- return Local<Uint32>(ToApi<Uint32>(num));
+ return ToApiHandle<Uint32>(num);
}
@@ -2637,19 +2987,18 @@ Local<Uint32> Value::ToArrayIndex() const {
return Local<Uint32>();
}
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToArrayIndex()")) return Local<Uint32>();
LOG_API(isolate, "ToArrayIndex");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> string_obj =
- i::Execution::ToString(obj, &has_pending_exception);
+ i::Execution::ToString(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
i::Handle<i::String> str = i::Handle<i::String>::cast(string_obj);
uint32_t index;
if (str->AsArrayIndex(&index)) {
i::Handle<i::Object> value;
if (index <= static_cast<uint32_t>(i::Smi::kMaxValue)) {
- value = i::Handle<i::Object>(i::Smi::FromInt(index));
+ value = i::Handle<i::Object>(i::Smi::FromInt(index), isolate);
} else {
value = isolate->factory()->NewNumber(index);
}
@@ -2665,12 +3014,11 @@ int32_t Value::Int32Value() const {
return i::Smi::cast(*obj)->value();
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Int32Value()")) return 0;
LOG_API(isolate, "Int32Value (slow)");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> num =
- i::Execution::ToInt32(obj, &has_pending_exception);
+ i::Execution::ToInt32(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
if (num->IsSmi()) {
return i::Smi::cast(*num)->value();
@@ -2683,9 +3031,8 @@ int32_t Value::Int32Value() const {
bool Value::Equals(Handle<Value> that) const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Equals()")
- || EmptyCheck("v8::Value::Equals()", this)
- || EmptyCheck("v8::Value::Equals()", that)) {
+ if (EmptyCheck("v8::Value::Equals()", this) ||
+ EmptyCheck("v8::Value::Equals()", that)) {
return false;
}
LOG_API(isolate, "Equals");
@@ -2710,9 +3057,8 @@ bool Value::Equals(Handle<Value> that) const {
bool Value::StrictEquals(Handle<Value> that) const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::StrictEquals()")
- || EmptyCheck("v8::Value::StrictEquals()", this)
- || EmptyCheck("v8::Value::StrictEquals()", that)) {
+ if (EmptyCheck("v8::Value::StrictEquals()", this) ||
+ EmptyCheck("v8::Value::StrictEquals()", that)) {
return false;
}
LOG_API(isolate, "StrictEquals");
@@ -2724,7 +3070,7 @@ bool Value::StrictEquals(Handle<Value> that) const {
double x = obj->Number();
double y = other->Number();
// Must check explicitly for NaN:s on Windows, but -0 works fine.
- return x == y && !isnan(x) && !isnan(y);
+ return x == y && !std::isnan(x) && !std::isnan(y);
} else if (*obj == *other) { // Also covers Booleans.
return true;
} else if (obj->IsSmi()) {
@@ -2740,18 +3086,30 @@ bool Value::StrictEquals(Handle<Value> that) const {
}
+bool Value::SameValue(Handle<Value> that) const {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (EmptyCheck("v8::Value::SameValue()", this) ||
+ EmptyCheck("v8::Value::SameValue()", that)) {
+ return false;
+ }
+ LOG_API(isolate, "SameValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> other = Utils::OpenHandle(*that);
+ return obj->SameValue(*other);
+}
+
+
uint32_t Value::Uint32Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Uint32Value()")) return 0;
LOG_API(isolate, "Uint32Value");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> num =
- i::Execution::ToUint32(obj, &has_pending_exception);
+ i::Execution::ToUint32(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
if (num->IsSmi()) {
return i::Smi::cast(*num)->value();
@@ -2773,6 +3131,7 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::SetProperty(
+ isolate,
self,
key_obj,
value_obj,
@@ -2839,7 +3198,7 @@ bool v8::Object::ForceDelete(v8::Handle<Value> key) {
// value with DontDelete properties. We have to deoptimize all contexts
// because of possible cross-context inlined functions.
if (self->IsJSGlobalProxy() || self->IsGlobalObject()) {
- i::Deoptimizer::DeoptimizeAll();
+ i::Deoptimizer::DeoptimizeAll(isolate);
}
EXCEPTION_PREAMBLE(isolate);
@@ -2857,7 +3216,7 @@ Local<Value> v8::Object::Get(v8::Handle<Value> key) {
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::GetProperty(self, key_obj);
+ i::Handle<i::Object> result = i::GetProperty(isolate, self, key_obj);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
@@ -2870,7 +3229,7 @@ Local<Value> v8::Object::Get(uint32_t index) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::Object::GetElement(self, index);
+ i::Handle<i::Object> result = i::Object::GetElement(isolate, self, index);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
@@ -2885,13 +3244,13 @@ PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- if (!key_obj->IsString()) {
+ if (!key_obj->IsName()) {
EXCEPTION_PREAMBLE(isolate);
- key_obj = i::Execution::ToString(key_obj, &has_pending_exception);
+ key_obj = i::Execution::ToString(isolate, key_obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
}
- i::Handle<i::String> key_string = i::Handle<i::String>::cast(key_obj);
- PropertyAttributes result = self->GetPropertyAttribute(*key_string);
+ i::Handle<i::Name> key_name = i::Handle<i::Name>::cast(key_obj);
+ PropertyAttributes result = self->GetPropertyAttribute(*key_name);
if (result == ABSENT) return static_cast<PropertyAttribute>(NONE);
return static_cast<PropertyAttribute>(result);
}
@@ -2903,7 +3262,7 @@ Local<Value> v8::Object::GetPrototype() {
return Local<v8::Value>());
ENTER_V8(isolate);
i::Handle<i::Object> self = Utils::OpenHandle(this);
- i::Handle<i::Object> result(self->GetPrototype());
+ i::Handle<i::Object> result(self->GetPrototype(isolate), isolate);
return Utils::ToLocal(result);
}
@@ -2918,7 +3277,7 @@ bool v8::Object::SetPrototype(Handle<Value> value) {
// to propagate outside.
TryCatch try_catch;
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::SetPrototype(self, value_obj);
+ i::Handle<i::Object> result = i::JSObject::SetPrototype(self, value_obj);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
@@ -2992,10 +3351,10 @@ Local<String> v8::Object::ObjectProtoToString() {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> name(self->class_name());
+ i::Handle<i::Object> name(self->class_name(), isolate);
// Native implementation of Object.prototype.toString (v8natives.js):
- // var c = %ClassOf(this);
+ // var c = %_ClassOf(this);
// if (c === 'Arguments') c = 'Object';
// return "[object " + c + "]";
@@ -3004,7 +3363,7 @@ Local<String> v8::Object::ObjectProtoToString() {
} else {
i::Handle<i::String> class_name = i::Handle<i::String>::cast(name);
- if (class_name->IsEqualTo(i::CStrVector("Arguments"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Arguments"))) {
return v8::String::New("[object Object]");
} else {
@@ -3013,7 +3372,7 @@ Local<String> v8::Object::ObjectProtoToString() {
const char* postfix = "]";
int prefix_len = i::StrLength(prefix);
- int str_len = str->Length();
+ int str_len = str->Utf8Length();
int postfix_len = i::StrLength(postfix);
int buf_len = prefix_len + str_len + postfix_len;
@@ -3021,15 +3380,15 @@ Local<String> v8::Object::ObjectProtoToString() {
// Write prefix.
char* ptr = buf.start();
- memcpy(ptr, prefix, prefix_len * v8::internal::kCharSize);
+ i::OS::MemCopy(ptr, prefix, prefix_len * v8::internal::kCharSize);
ptr += prefix_len;
// Write real content.
- str->WriteAscii(ptr, 0, str_len);
+ str->WriteUtf8(ptr, str_len);
ptr += str_len;
// Write postfix.
- memcpy(ptr, postfix, postfix_len * v8::internal::kCharSize);
+ i::OS::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize);
// Copy the buffer into a heap-allocated string and return it.
Local<String> result = v8::String::New(buf.start(), buf_len);
@@ -3045,7 +3404,7 @@ Local<Value> v8::Object::GetConstructor() {
return Local<v8::Function>());
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> constructor(self->GetConstructor());
+ i::Handle<i::Object> constructor(self->GetConstructor(), isolate);
return Utils::ToLocal(constructor);
}
@@ -3061,24 +3420,32 @@ Local<String> v8::Object::GetConstructorName() {
}
-bool v8::Object::Delete(v8::Handle<String> key) {
+bool v8::Object::Delete(v8::Handle<Value> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::Delete()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- return i::JSObject::DeleteProperty(self, key_obj)->IsTrue();
+ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> obj = i::DeleteProperty(self, key_obj);
+ has_pending_exception = obj.is_null();
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
+ return obj->IsTrue();
}
-bool v8::Object::Has(v8::Handle<String> key) {
+bool v8::Object::Has(v8::Handle<Value> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::Has()", return false);
ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- return self->HasProperty(*key_obj);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> obj = i::HasProperty(self, key_obj);
+ has_pending_exception = obj.is_null();
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
+ return obj->IsTrue();
}
@@ -3087,9 +3454,9 @@ bool v8::Object::Delete(uint32_t index) {
ON_BAILOUT(isolate, "v8::Object::DeleteProperty()",
return false);
ENTER_V8(isolate);
- HandleScope scope;
+ HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return i::JSObject::DeleteElement(self, index)->IsTrue();
+ return i::JSReceiver::DeleteElement(self, index)->IsTrue();
}
@@ -3097,38 +3464,62 @@ bool v8::Object::Has(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasProperty()", return false);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return self->HasElement(index);
+ return i::JSReceiver::HasElement(self, index);
}
-bool Object::SetAccessor(Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- v8::Handle<Value> data,
- AccessControl settings,
- PropertyAttribute attributes) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+template<typename Setter, typename Getter, typename Data>
+static inline bool ObjectSetAccessor(Object* obj,
+ Handle<String> name,
+ Setter getter,
+ Getter setter,
+ Data data,
+ AccessControl settings,
+ PropertyAttribute attributes) {
+ i::Isolate* isolate = Utils::OpenHandle(obj)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
v8::Handle<AccessorSignature> signature;
- i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name, getter, setter, data,
- settings, attributes,
- signature);
- bool fast = Utils::OpenHandle(this)->HasFastProperties();
- i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
+ i::Handle<i::AccessorInfo> info = MakeAccessorInfo(
+ name, getter, setter, data, settings, attributes, signature);
+ if (info.is_null()) return false;
+ bool fast = Utils::OpenHandle(obj)->HasFastProperties();
+ i::Handle<i::Object> result =
+ i::JSObject::SetAccessor(Utils::OpenHandle(obj), info);
if (result.is_null() || result->IsUndefined()) return false;
- if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(this), 0);
+ if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(obj), 0);
return true;
}
+bool Object::SetAccessor(Handle<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter,
+ v8::Handle<Value> data,
+ AccessControl settings,
+ PropertyAttribute attributes) {
+ return ObjectSetAccessor(
+ this, name, getter, setter, data, settings, attributes);
+}
+
+
+bool Object::SetDeclaredAccessor(Local<String> name,
+ Local<DeclaredAccessorDescriptor> descriptor,
+ PropertyAttribute attributes,
+ AccessControl settings) {
+ void* null = NULL;
+ return ObjectSetAccessor(
+ this, name, descriptor, null, null, settings, attributes);
+}
+
+
bool v8::Object::HasOwnProperty(Handle<String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()",
return false);
- return Utils::OpenHandle(this)->HasLocalProperty(
- *Utils::OpenHandle(*key));
+ return i::JSReceiver::HasLocalProperty(
+ Utils::OpenHandle(this), Utils::OpenHandle(*key));
}
@@ -3136,16 +3527,16 @@ bool v8::Object::HasRealNamedProperty(Handle<String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()",
return false);
- return Utils::OpenHandle(this)->HasRealNamedProperty(
- *Utils::OpenHandle(*key));
+ return i::JSObject::HasRealNamedProperty(Utils::OpenHandle(this),
+ Utils::OpenHandle(*key));
}
bool v8::Object::HasRealIndexedProperty(uint32_t index) {
- ON_BAILOUT(Utils::OpenHandle(this)->GetIsolate(),
- "v8::Object::HasRealIndexedProperty()",
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::HasRealIndexedProperty()",
return false);
- return Utils::OpenHandle(this)->HasRealElementProperty(index);
+ return i::JSObject::HasRealElementProperty(Utils::OpenHandle(this), index);
}
@@ -3155,8 +3546,8 @@ bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) {
"v8::Object::HasRealNamedCallbackProperty()",
return false);
ENTER_V8(isolate);
- return Utils::OpenHandle(this)->HasRealNamedCallbackProperty(
- *Utils::OpenHandle(*key));
+ return i::JSObject::HasRealNamedCallbackProperty(Utils::OpenHandle(this),
+ Utils::OpenHandle(*key));
}
@@ -3259,7 +3650,7 @@ Local<v8::Object> v8::Object::Clone() {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::JSObject> result = i::Copy(self);
+ i::Handle<i::JSObject> result = i::JSObject::Copy(self);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
return Utils::ToLocal(result);
@@ -3311,10 +3702,11 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
+ i::Handle<i::String> key_string =
+ isolate->factory()->InternalizeString(key_obj);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::Handle<i::Object> result =
- i::JSObject::SetHiddenProperty(self, key_symbol, value_obj);
+ i::JSObject::SetHiddenProperty(self, key_string, value_obj);
return *result == *self;
}
@@ -3326,9 +3718,10 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
- i::Handle<i::Object> result(self->GetHiddenProperty(*key_symbol));
- if (result->IsUndefined()) return v8::Local<v8::Value>();
+ i::Handle<i::String> key_string =
+ isolate->factory()->InternalizeString(key_obj);
+ i::Handle<i::Object> result(self->GetHiddenProperty(*key_string), isolate);
+ if (result->IsTheHole()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
}
@@ -3340,8 +3733,9 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
- self->DeleteHiddenProperty(*key_symbol);
+ i::Handle<i::String> key_string =
+ isolate->factory()->InternalizeString(key_obj);
+ i::JSObject::DeleteHiddenProperty(self, key_string);
return true;
}
@@ -3552,11 +3946,11 @@ bool v8::Object::IsCallable() {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
if (obj->IsJSFunction()) return true;
- return i::Execution::GetFunctionDelegate(obj)->IsJSFunction();
+ return i::Execution::GetFunctionDelegate(isolate, obj)->IsJSFunction();
}
-Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
+Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Value> recv,
int argc,
v8::Handle<v8::Value> argv[]) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -3564,6 +3958,8 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
return Local<v8::Value>());
LOG_API(isolate, "Object::CallAsFunction");
ENTER_V8(isolate);
+ i::Logger::TimerEventScope timer_scope(
+ isolate, i::Logger::TimerEventScope::v8_execute);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
@@ -3574,15 +3970,15 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
fun = i::Handle<i::JSFunction>::cast(obj);
} else {
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> delegate =
- i::Execution::TryGetFunctionDelegate(obj, &has_pending_exception);
+ i::Handle<i::Object> delegate = i::Execution::TryGetFunctionDelegate(
+ isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
fun = i::Handle<i::JSFunction>::cast(delegate);
recv_obj = obj;
}
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
+ i::Handle<i::Object> returned = i::Execution::Call(
+ isolate, fun, recv_obj, argc, args, &has_pending_exception, true);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
return Utils::ToLocal(scope.CloseAndEscape(returned));
}
@@ -3595,6 +3991,8 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
return Local<v8::Object>());
LOG_API(isolate, "Object::CallAsConstructor");
ENTER_V8(isolate);
+ i::Logger::TimerEventScope timer_scope(
+ isolate, i::Logger::TimerEventScope::v8_execute);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
@@ -3609,14 +4007,14 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
i::Handle<i::JSObject>::cast(returned)));
}
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> delegate =
- i::Execution::TryGetConstructorDelegate(obj, &has_pending_exception);
+ i::Handle<i::Object> delegate = i::Execution::TryGetConstructorDelegate(
+ isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
if (!delegate->IsUndefined()) {
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(delegate);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::Call(fun, obj, argc, args, &has_pending_exception);
+ i::Handle<i::Object> returned = i::Execution::Call(
+ isolate, fun, obj, argc, args, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
ASSERT(!delegate->IsUndefined());
return Utils::ToLocal(scope.CloseAndEscape(returned));
@@ -3625,6 +4023,19 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
}
+Local<Function> Function::New(Isolate* v8_isolate,
+ FunctionCallback callback,
+ Local<Value> data,
+ int length) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ LOG_API(isolate, "Function::New");
+ ENTER_V8(isolate);
+ return FunctionTemplateNew(
+ isolate, callback, data, Local<Signature>(), length, true)->
+ GetFunction();
+}
+
+
Local<v8::Object> Function::NewInstance() const {
return NewInstance(0, NULL);
}
@@ -3637,7 +4048,9 @@ Local<v8::Object> Function::NewInstance(int argc,
return Local<v8::Object>());
LOG_API(isolate, "Function::NewInstance");
ENTER_V8(isolate);
- HandleScope scope;
+ i::Logger::TimerEventScope timer_scope(
+ isolate, i::Logger::TimerEventScope::v8_execute);
+ HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -3649,12 +4062,14 @@ Local<v8::Object> Function::NewInstance(int argc,
}
-Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
+Local<v8::Value> Function::Call(v8::Handle<v8::Value> recv, int argc,
v8::Handle<v8::Value> argv[]) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>());
LOG_API(isolate, "Function::Call");
ENTER_V8(isolate);
+ i::Logger::TimerEventScope timer_scope(
+ isolate, i::Logger::TimerEventScope::v8_execute);
i::Object* raw_result = NULL;
{
i::HandleScope scope(isolate);
@@ -3663,12 +4078,12 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
+ i::Handle<i::Object> returned = i::Execution::Call(
+ isolate, fun, recv_obj, argc, args, &has_pending_exception, true);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>());
raw_result = *returned;
}
- i::Handle<i::Object> result(raw_result);
+ i::Handle<i::Object> result(raw_result, isolate);
return Utils::ToLocal(result);
}
@@ -3684,13 +4099,38 @@ void Function::SetName(v8::Handle<v8::String> name) {
Handle<Value> Function::GetName() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name()));
+ return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name(),
+ func->GetIsolate()));
}
Handle<Value> Function::GetInferredName() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name()));
+ return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name(),
+ func->GetIsolate()));
+}
+
+
+Handle<Value> Function::GetDisplayName() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Function::GetDisplayName()",
+ return ToApiHandle<Primitive>(
+ isolate->factory()->undefined_value()));
+ ENTER_V8(isolate);
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ i::Handle<i::String> property_name =
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("displayName"));
+ i::LookupResult lookup(isolate);
+ func->LookupRealNamedProperty(*property_name, &lookup);
+ if (lookup.IsFound()) {
+ i::Object* value = lookup.GetLazyValue();
+ if (value && value->IsString()) {
+ i::String* name = i::String::cast(value);
+ if (name->length() > 0) return Utils::ToLocal(i::Handle<i::String>(name));
+ }
+ }
+ return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
}
@@ -3698,8 +4138,9 @@ ScriptOrigin Function::GetScriptOrigin() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ i::Handle<i::Object> scriptName = GetScriptNameOrSourceURL(script);
v8::ScriptOrigin origin(
- Utils::ToLocal(i::Handle<i::Object>(script->name())),
+ Utils::ToLocal(scriptName),
v8::Integer::New(script->line_offset()->value()),
v8::Integer::New(script->column_offset()->value()));
return origin;
@@ -3730,322 +4171,602 @@ int Function::GetScriptColumnNumber() const {
return kLineOffsetNotFound;
}
+
+bool Function::IsBuiltin() const {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ return func->IsBuiltin();
+}
+
+
Handle<Value> Function::GetScriptId() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- if (!func->shared()->script()->IsScript())
- return v8::Undefined();
+ i::Isolate* isolate = func->GetIsolate();
+ if (!func->shared()->script()->IsScript()) {
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
+ i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ return Utils::ToLocal(i::Handle<i::Object>(script->id(), isolate));
+}
+
+
+int Function::ScriptId() const {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ if (!func->shared()->script()->IsScript()) return v8::Script::kNoScriptId;
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return Utils::ToLocal(i::Handle<i::Object>(script->id()));
+ return script->id()->value();
}
+
int String::Length() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::Length()")) return 0;
return str->length();
}
-int String::Utf8Length() const {
+bool String::IsOneByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::Utf8Length()")) return 0;
- return i::Utf8Length(str);
-}
-
-
-// Will fail with a negative answer if the recursion depth is too high.
-static int RecursivelySerializeToUtf8(i::String* string,
- char* buffer,
- int start,
- int end,
- int recursion_budget,
- int32_t previous_character,
- int32_t* last_character) {
- int utf8_bytes = 0;
- while (true) {
- if (string->IsAsciiRepresentation()) {
- i::String::WriteToFlat(string, buffer, start, end);
- *last_character = unibrow::Utf16::kNoPreviousCharacter;
- return utf8_bytes + end - start;
+ return str->HasOnlyOneByteChars();
+}
+
+
+// Helpers for ContainsOnlyOneByteHelper
+template<size_t size> struct OneByteMask;
+template<> struct OneByteMask<4> {
+ static const uint32_t value = 0xFF00FF00;
+};
+template<> struct OneByteMask<8> {
+ static const uint64_t value = V8_2PART_UINT64_C(0xFF00FF00, FF00FF00);
+};
+static const uintptr_t kOneByteMask = OneByteMask<sizeof(uintptr_t)>::value;
+static const uintptr_t kAlignmentMask = sizeof(uintptr_t) - 1;
+static inline bool Unaligned(const uint16_t* chars) {
+ return reinterpret_cast<const uintptr_t>(chars) & kAlignmentMask;
+}
+
+
+static inline const uint16_t* Align(const uint16_t* chars) {
+ return reinterpret_cast<uint16_t*>(
+ reinterpret_cast<uintptr_t>(chars) & ~kAlignmentMask);
+}
+
+class ContainsOnlyOneByteHelper {
+ public:
+ ContainsOnlyOneByteHelper() : is_one_byte_(true) {}
+ bool Check(i::String* string) {
+ i::ConsString* cons_string = i::String::VisitFlat(this, string, 0);
+ if (cons_string == NULL) return is_one_byte_;
+ return CheckCons(cons_string);
+ }
+ void VisitOneByteString(const uint8_t* chars, int length) {
+ // Nothing to do.
+ }
+ void VisitTwoByteString(const uint16_t* chars, int length) {
+ // Accumulated bits.
+ uintptr_t acc = 0;
+ // Align to uintptr_t.
+ const uint16_t* end = chars + length;
+ while (Unaligned(chars) && chars != end) {
+ acc |= *chars++;
}
- switch (i::StringShape(string).representation_tag()) {
- case i::kExternalStringTag: {
- const uint16_t* data = i::ExternalTwoByteString::cast(string)->
- ExternalTwoByteStringGetData(0);
- char* current = buffer;
- for (int i = start; i < end; i++) {
- uint16_t character = data[i];
- current +=
- unibrow::Utf8::Encode(current, character, previous_character);
- previous_character = character;
- }
- *last_character = previous_character;
- return static_cast<int>(utf8_bytes + current - buffer);
+ // Read word aligned in blocks,
+ // checking the return value at the end of each block.
+ const uint16_t* aligned_end = Align(end);
+ const int increment = sizeof(uintptr_t)/sizeof(uint16_t);
+ const int inner_loops = 16;
+ while (chars + inner_loops*increment < aligned_end) {
+ for (int i = 0; i < inner_loops; i++) {
+ acc |= *reinterpret_cast<const uintptr_t*>(chars);
+ chars += increment;
+ }
+ // Check for early return.
+ if ((acc & kOneByteMask) != 0) {
+ is_one_byte_ = false;
+ return;
}
- case i::kSeqStringTag: {
- const uint16_t* data =
- i::SeqTwoByteString::cast(string)->SeqTwoByteStringGetData(0);
- char* current = buffer;
- for (int i = start; i < end; i++) {
- uint16_t character = data[i];
- current +=
- unibrow::Utf8::Encode(current, character, previous_character);
- previous_character = character;
+ }
+ // Read the rest.
+ while (chars != end) {
+ acc |= *chars++;
+ }
+ // Check result.
+ if ((acc & kOneByteMask) != 0) is_one_byte_ = false;
+ }
+
+ private:
+ bool CheckCons(i::ConsString* cons_string) {
+ while (true) {
+ // Check left side if flat.
+ i::String* left = cons_string->first();
+ i::ConsString* left_as_cons =
+ i::String::VisitFlat(this, left, 0);
+ if (!is_one_byte_) return false;
+ // Check right side if flat.
+ i::String* right = cons_string->second();
+ i::ConsString* right_as_cons =
+ i::String::VisitFlat(this, right, 0);
+ if (!is_one_byte_) return false;
+ // Standard recurse/iterate trick.
+ if (left_as_cons != NULL && right_as_cons != NULL) {
+ if (left->length() < right->length()) {
+ CheckCons(left_as_cons);
+ cons_string = right_as_cons;
+ } else {
+ CheckCons(right_as_cons);
+ cons_string = left_as_cons;
}
- *last_character = previous_character;
- return static_cast<int>(utf8_bytes + current - buffer);
+ // Check fast return.
+ if (!is_one_byte_) return false;
+ continue;
}
- case i::kSlicedStringTag: {
- i::SlicedString* slice = i::SlicedString::cast(string);
- unsigned offset = slice->offset();
- string = slice->parent();
- start += offset;
- end += offset;
+ // Descend left in place.
+ if (left_as_cons != NULL) {
+ cons_string = left_as_cons;
continue;
}
- case i::kConsStringTag: {
- i::ConsString* cons_string = i::ConsString::cast(string);
- i::String* first = cons_string->first();
- int boundary = first->length();
- if (start >= boundary) {
- // Only need RHS.
- string = cons_string->second();
- start -= boundary;
- end -= boundary;
- continue;
- } else if (end <= boundary) {
- // Only need LHS.
- string = first;
- } else {
- if (recursion_budget == 0) return -1;
- int extra_utf8_bytes =
- RecursivelySerializeToUtf8(first,
- buffer,
- start,
- boundary,
- recursion_budget - 1,
- previous_character,
- &previous_character);
- if (extra_utf8_bytes < 0) return extra_utf8_bytes;
- buffer += extra_utf8_bytes;
- utf8_bytes += extra_utf8_bytes;
- string = cons_string->second();
- start = 0;
- end -= boundary;
- }
+ // Descend right in place.
+ if (right_as_cons != NULL) {
+ cons_string = right_as_cons;
+ continue;
}
+ // Terminate.
+ break;
}
+ return is_one_byte_;
}
- UNREACHABLE();
- return 0;
-}
+ bool is_one_byte_;
+ DISALLOW_COPY_AND_ASSIGN(ContainsOnlyOneByteHelper);
+};
-bool String::MayContainNonAscii() const {
+bool String::ContainsOnlyOneByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) {
- return false;
- }
- return !str->HasOnlyAsciiChars();
+ if (str->HasOnlyOneByteChars()) return true;
+ ContainsOnlyOneByteHelper helper;
+ return helper.Check(*str);
}
-int String::WriteUtf8(char* buffer,
- int capacity,
- int* nchars_ref,
- int options) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::WriteUtf8()")) return 0;
- LOG_API(isolate, "String::WriteUtf8");
- ENTER_V8(isolate);
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (options & HINT_MANY_WRITES_EXPECTED) {
- FlattenString(str); // Flatten the string for efficiency.
+class Utf8LengthHelper : public i::AllStatic {
+ public:
+ enum State {
+ kEndsWithLeadingSurrogate = 1 << 0,
+ kStartsWithTrailingSurrogate = 1 << 1,
+ kLeftmostEdgeIsCalculated = 1 << 2,
+ kRightmostEdgeIsCalculated = 1 << 3,
+ kLeftmostEdgeIsSurrogate = 1 << 4,
+ kRightmostEdgeIsSurrogate = 1 << 5
+ };
+
+ static const uint8_t kInitialState = 0;
+
+ static inline bool EndsWithSurrogate(uint8_t state) {
+ return state & kEndsWithLeadingSurrogate;
+ }
+
+ static inline bool StartsWithSurrogate(uint8_t state) {
+ return state & kStartsWithTrailingSurrogate;
}
- int string_length = str->length();
- if (str->IsAsciiRepresentation()) {
- int len;
- if (capacity == -1) {
- capacity = str->length() + 1;
- len = string_length;
+
+ class Visitor {
+ public:
+ inline explicit Visitor()
+ : utf8_length_(0),
+ state_(kInitialState) {}
+
+ void VisitOneByteString(const uint8_t* chars, int length) {
+ int utf8_length = 0;
+ // Add in length 1 for each non-ASCII character.
+ for (int i = 0; i < length; i++) {
+ utf8_length += *chars++ >> 7;
+ }
+ // Add in length 1 for each character.
+ utf8_length_ = utf8_length + length;
+ state_ = kInitialState;
+ }
+
+ void VisitTwoByteString(const uint16_t* chars, int length) {
+ int utf8_length = 0;
+ int last_character = unibrow::Utf16::kNoPreviousCharacter;
+ for (int i = 0; i < length; i++) {
+ uint16_t c = chars[i];
+ utf8_length += unibrow::Utf8::Length(c, last_character);
+ last_character = c;
+ }
+ utf8_length_ = utf8_length;
+ uint8_t state = 0;
+ if (unibrow::Utf16::IsTrailSurrogate(chars[0])) {
+ state |= kStartsWithTrailingSurrogate;
+ }
+ if (unibrow::Utf16::IsLeadSurrogate(chars[length-1])) {
+ state |= kEndsWithLeadingSurrogate;
+ }
+ state_ = state;
+ }
+
+ static i::ConsString* VisitFlat(i::String* string,
+ int* length,
+ uint8_t* state) {
+ Visitor visitor;
+ i::ConsString* cons_string = i::String::VisitFlat(&visitor, string);
+ *length = visitor.utf8_length_;
+ *state = visitor.state_;
+ return cons_string;
+ }
+
+ private:
+ int utf8_length_;
+ uint8_t state_;
+ DISALLOW_COPY_AND_ASSIGN(Visitor);
+ };
+
+ static inline void MergeLeafLeft(int* length,
+ uint8_t* state,
+ uint8_t leaf_state) {
+ bool edge_surrogate = StartsWithSurrogate(leaf_state);
+ if (!(*state & kLeftmostEdgeIsCalculated)) {
+ ASSERT(!(*state & kLeftmostEdgeIsSurrogate));
+ *state |= kLeftmostEdgeIsCalculated
+ | (edge_surrogate ? kLeftmostEdgeIsSurrogate : 0);
+ } else if (EndsWithSurrogate(*state) && edge_surrogate) {
+ *length -= unibrow::Utf8::kBytesSavedByCombiningSurrogates;
+ }
+ if (EndsWithSurrogate(leaf_state)) {
+ *state |= kEndsWithLeadingSurrogate;
} else {
- len = i::Min(capacity, str->length());
+ *state &= ~kEndsWithLeadingSurrogate;
+ }
+ }
+
+ static inline void MergeLeafRight(int* length,
+ uint8_t* state,
+ uint8_t leaf_state) {
+ bool edge_surrogate = EndsWithSurrogate(leaf_state);
+ if (!(*state & kRightmostEdgeIsCalculated)) {
+ ASSERT(!(*state & kRightmostEdgeIsSurrogate));
+ *state |= (kRightmostEdgeIsCalculated
+ | (edge_surrogate ? kRightmostEdgeIsSurrogate : 0));
+ } else if (edge_surrogate && StartsWithSurrogate(*state)) {
+ *length -= unibrow::Utf8::kBytesSavedByCombiningSurrogates;
}
- i::String::WriteToFlat(*str, buffer, 0, len);
- if (nchars_ref != NULL) *nchars_ref = len;
- if (!(options & NO_NULL_TERMINATION) && capacity > len) {
- buffer[len] = '\0';
- return len + 1;
+ if (StartsWithSurrogate(leaf_state)) {
+ *state |= kStartsWithTrailingSurrogate;
+ } else {
+ *state &= ~kStartsWithTrailingSurrogate;
}
- return len;
}
- if (capacity == -1 || capacity / 3 >= string_length) {
- int32_t previous = unibrow::Utf16::kNoPreviousCharacter;
- const int kMaxRecursion = 100;
- int utf8_bytes =
- RecursivelySerializeToUtf8(*str,
- buffer,
- 0,
- string_length,
- kMaxRecursion,
- previous,
- &previous);
- if (utf8_bytes >= 0) {
- // Success serializing with recursion.
- if ((options & NO_NULL_TERMINATION) == 0 &&
- (capacity > utf8_bytes || capacity == -1)) {
- buffer[utf8_bytes++] = '\0';
- }
- if (nchars_ref != NULL) *nchars_ref = string_length;
- return utf8_bytes;
+ static inline void MergeTerminal(int* length,
+ uint8_t state,
+ uint8_t* state_out) {
+ ASSERT((state & kLeftmostEdgeIsCalculated) &&
+ (state & kRightmostEdgeIsCalculated));
+ if (EndsWithSurrogate(state) && StartsWithSurrogate(state)) {
+ *length -= unibrow::Utf8::kBytesSavedByCombiningSurrogates;
}
- FlattenString(str);
- // Recurse once. This time around the string is flat and the serializing
- // with recursion will certainly succeed.
- return WriteUtf8(buffer, capacity, nchars_ref, options);
- } else if (capacity >= string_length) {
- // First check that the buffer is large enough. If it is, then recurse
- // once without a capacity limit, which will get into the other branch of
- // this 'if'.
- int utf8_bytes = i::Utf8Length(str);
- if ((options & NO_NULL_TERMINATION) == 0) utf8_bytes++;
- if (utf8_bytes <= capacity) {
- return WriteUtf8(buffer, -1, nchars_ref, options);
+ *state_out = kInitialState |
+ (state & kLeftmostEdgeIsSurrogate ? kStartsWithTrailingSurrogate : 0) |
+ (state & kRightmostEdgeIsSurrogate ? kEndsWithLeadingSurrogate : 0);
+ }
+
+ static int Calculate(i::ConsString* current, uint8_t* state_out) {
+ using namespace internal;
+ int total_length = 0;
+ uint8_t state = kInitialState;
+ while (true) {
+ i::String* left = current->first();
+ i::String* right = current->second();
+ uint8_t right_leaf_state;
+ uint8_t left_leaf_state;
+ int leaf_length;
+ ConsString* left_as_cons =
+ Visitor::VisitFlat(left, &leaf_length, &left_leaf_state);
+ if (left_as_cons == NULL) {
+ total_length += leaf_length;
+ MergeLeafLeft(&total_length, &state, left_leaf_state);
+ }
+ ConsString* right_as_cons =
+ Visitor::VisitFlat(right, &leaf_length, &right_leaf_state);
+ if (right_as_cons == NULL) {
+ total_length += leaf_length;
+ MergeLeafRight(&total_length, &state, right_leaf_state);
+ if (left_as_cons != NULL) {
+ // 1 Leaf node. Descend in place.
+ current = left_as_cons;
+ continue;
+ } else {
+ // Terminal node.
+ MergeTerminal(&total_length, state, state_out);
+ return total_length;
+ }
+ } else if (left_as_cons == NULL) {
+ // 1 Leaf node. Descend in place.
+ current = right_as_cons;
+ continue;
+ }
+ // Both strings are ConsStrings.
+ // Recurse on smallest.
+ if (left->length() < right->length()) {
+ total_length += Calculate(left_as_cons, &left_leaf_state);
+ MergeLeafLeft(&total_length, &state, left_leaf_state);
+ current = right_as_cons;
+ } else {
+ total_length += Calculate(right_as_cons, &right_leaf_state);
+ MergeLeafRight(&total_length, &state, right_leaf_state);
+ current = left_as_cons;
+ }
}
+ UNREACHABLE();
+ return 0;
}
- // Slow case.
- i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
- isolate->string_tracker()->RecordWrite(str);
+ static inline int Calculate(i::ConsString* current) {
+ uint8_t state = kInitialState;
+ return Calculate(current, &state);
+ }
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Utf8LengthHelper);
+};
- write_input_buffer.Reset(0, *str);
- int len = str->length();
- // Encode the first K - 3 bytes directly into the buffer since we
- // know there's room for them. If no capacity is given we copy all
- // of them here.
- int fast_end = capacity - (unibrow::Utf8::kMaxEncodedSize - 1);
- int i;
- int pos = 0;
- int nchars = 0;
- int previous = unibrow::Utf16::kNoPreviousCharacter;
- for (i = 0; i < len && (capacity == -1 || pos < fast_end); i++) {
- i::uc32 c = write_input_buffer.GetNext();
- int written = unibrow::Utf8::Encode(buffer + pos, c, previous);
- pos += written;
- nchars++;
- previous = c;
- }
- if (i < len) {
- // For the last characters we need to check the length for each one
- // because they may be longer than the remaining space in the
- // buffer.
- char intermediate[unibrow::Utf8::kMaxEncodedSize];
- for (; i < len && pos < capacity; i++) {
- i::uc32 c = write_input_buffer.GetNext();
- if (unibrow::Utf16::IsTrailSurrogate(c) &&
- unibrow::Utf16::IsLeadSurrogate(previous)) {
- // We can't use the intermediate buffer here because the encoding
- // of surrogate pairs is done under assumption that you can step
- // back and fix the UTF8 stream. Luckily we only need space for one
- // more byte, so there is always space.
- ASSERT(pos < capacity);
- int written = unibrow::Utf8::Encode(buffer + pos, c, previous);
- ASSERT(written == 1);
- pos += written;
- nchars++;
+
+static int Utf8Length(i::String* str, i::Isolate* isolate) {
+ int length = str->length();
+ if (length == 0) return 0;
+ uint8_t state;
+ i::ConsString* cons_string =
+ Utf8LengthHelper::Visitor::VisitFlat(str, &length, &state);
+ if (cons_string == NULL) return length;
+ return Utf8LengthHelper::Calculate(cons_string);
+}
+
+
+int String::Utf8Length() const {
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ i::Isolate* isolate = str->GetIsolate();
+ return v8::Utf8Length(*str, isolate);
+}
+
+
+class Utf8WriterVisitor {
+ public:
+ Utf8WriterVisitor(
+ char* buffer, int capacity, bool skip_capacity_check)
+ : early_termination_(false),
+ last_character_(unibrow::Utf16::kNoPreviousCharacter),
+ buffer_(buffer),
+ start_(buffer),
+ capacity_(capacity),
+ skip_capacity_check_(capacity == -1 || skip_capacity_check),
+ utf16_chars_read_(0) {
+ }
+
+ static int WriteEndCharacter(uint16_t character,
+ int last_character,
+ int remaining,
+ char* const buffer) {
+ using namespace unibrow;
+ ASSERT(remaining > 0);
+ // We can't use a local buffer here because Encode needs to modify
+ // previous characters in the stream. We know, however, that
+ // exactly one character will be advanced.
+ if (Utf16::IsTrailSurrogate(character) &&
+ Utf16::IsLeadSurrogate(last_character)) {
+ int written = Utf8::Encode(buffer, character, last_character);
+ ASSERT(written == 1);
+ return written;
+ }
+ // Use a scratch buffer to check the required characters.
+ char temp_buffer[Utf8::kMaxEncodedSize];
+ // Can't encode using last_character as gcc has array bounds issues.
+ int written = Utf8::Encode(temp_buffer,
+ character,
+ Utf16::kNoPreviousCharacter);
+ // Won't fit.
+ if (written > remaining) return 0;
+ // Copy over the character from temp_buffer.
+ for (int j = 0; j < written; j++) {
+ buffer[j] = temp_buffer[j];
+ }
+ return written;
+ }
+
+ template<typename Char>
+ void Visit(const Char* chars, const int length) {
+ using namespace unibrow;
+ ASSERT(!early_termination_);
+ if (length == 0) return;
+ // Copy state to stack.
+ char* buffer = buffer_;
+ int last_character =
+ sizeof(Char) == 1 ? Utf16::kNoPreviousCharacter : last_character_;
+ int i = 0;
+ // Do a fast loop where there is no exit capacity check.
+ while (true) {
+ int fast_length;
+ if (skip_capacity_check_) {
+ fast_length = length;
} else {
- int written =
- unibrow::Utf8::Encode(intermediate,
- c,
- unibrow::Utf16::kNoPreviousCharacter);
- if (pos + written <= capacity) {
- for (int j = 0; j < written; j++) {
- buffer[pos + j] = intermediate[j];
- }
- pos += written;
- nchars++;
- } else {
- // We've reached the end of the buffer
- break;
+ int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
+ // Need enough space to write everything but one character.
+ STATIC_ASSERT(Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit == 3);
+ int max_size_per_char = sizeof(Char) == 1 ? 2 : 3;
+ int writable_length =
+ (remaining_capacity - max_size_per_char)/max_size_per_char;
+ // Need to drop into slow loop.
+ if (writable_length <= 0) break;
+ fast_length = i + writable_length;
+ if (fast_length > length) fast_length = length;
+ }
+ // Write the characters to the stream.
+ if (sizeof(Char) == 1) {
+ for (; i < fast_length; i++) {
+ buffer +=
+ Utf8::EncodeOneByte(buffer, static_cast<uint8_t>(*chars++));
+ ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_);
}
+ } else {
+ for (; i < fast_length; i++) {
+ uint16_t character = *chars++;
+ buffer += Utf8::Encode(buffer, character, last_character);
+ last_character = character;
+ ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_);
+ }
+ }
+ // Array is fully written. Exit.
+ if (fast_length == length) {
+ // Write state back out to object.
+ last_character_ = last_character;
+ buffer_ = buffer;
+ utf16_chars_read_ += length;
+ return;
}
- previous = c;
}
+ ASSERT(!skip_capacity_check_);
+ // Slow loop. Must check capacity on each iteration.
+ int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
+ ASSERT(remaining_capacity >= 0);
+ for (; i < length && remaining_capacity > 0; i++) {
+ uint16_t character = *chars++;
+ int written = WriteEndCharacter(character,
+ last_character,
+ remaining_capacity,
+ buffer);
+ if (written == 0) {
+ early_termination_ = true;
+ break;
+ }
+ buffer += written;
+ remaining_capacity -= written;
+ last_character = character;
+ }
+ // Write state back out to object.
+ last_character_ = last_character;
+ buffer_ = buffer;
+ utf16_chars_read_ += i;
+ }
+
+ inline bool IsDone() {
+ return early_termination_;
}
- if (nchars_ref != NULL) *nchars_ref = nchars;
- if (!(options & NO_NULL_TERMINATION) &&
- (i == len && (capacity == -1 || pos < capacity))) {
- buffer[pos++] = '\0';
+
+ inline void VisitOneByteString(const uint8_t* chars, int length) {
+ Visit(chars, length);
}
- return pos;
+
+ inline void VisitTwoByteString(const uint16_t* chars, int length) {
+ Visit(chars, length);
+ }
+
+ int CompleteWrite(bool write_null, int* utf16_chars_read_out) {
+ // Write out number of utf16 characters written to the stream.
+ if (utf16_chars_read_out != NULL) {
+ *utf16_chars_read_out = utf16_chars_read_;
+ }
+ // Only null terminate if all of the string was written and there's space.
+ if (write_null &&
+ !early_termination_ &&
+ (capacity_ == -1 || (buffer_ - start_) < capacity_)) {
+ *buffer_++ = '\0';
+ }
+ return static_cast<int>(buffer_ - start_);
+ }
+
+ private:
+ bool early_termination_;
+ int last_character_;
+ char* buffer_;
+ char* const start_;
+ int capacity_;
+ bool const skip_capacity_check_;
+ int utf16_chars_read_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Utf8WriterVisitor);
+};
+
+
+static bool RecursivelySerializeToUtf8(i::String* current,
+ Utf8WriterVisitor* writer,
+ int recursion_budget) {
+ while (!writer->IsDone()) {
+ i::ConsString* cons_string = i::String::VisitFlat(writer, current);
+ if (cons_string == NULL) return true; // Leaf node.
+ if (recursion_budget <= 0) return false;
+ // Must write the left branch first.
+ i::String* first = cons_string->first();
+ bool success = RecursivelySerializeToUtf8(first,
+ writer,
+ recursion_budget - 1);
+ if (!success) return false;
+ // Inline tail recurse for right branch.
+ current = cons_string->second();
+ }
+ return true;
}
-int String::WriteAscii(char* buffer,
- int start,
- int length,
- int options) const {
+int String::WriteUtf8(char* buffer,
+ int capacity,
+ int* nchars_ref,
+ int options) const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::WriteAscii()")) return 0;
- LOG_API(isolate, "String::WriteAscii");
+ LOG_API(isolate, "String::WriteUtf8");
ENTER_V8(isolate);
- ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
- isolate->string_tracker()->RecordWrite(str);
if (options & HINT_MANY_WRITES_EXPECTED) {
FlattenString(str); // Flatten the string for efficiency.
}
-
- if (str->IsAsciiRepresentation()) {
- // WriteToFlat is faster than using the StringInputBuffer.
- if (length == -1) length = str->length() + 1;
- int len = i::Min(length, str->length() - start);
- i::String::WriteToFlat(*str, buffer, start, start + len);
- if (!(options & PRESERVE_ASCII_NULL)) {
- for (int i = 0; i < len; i++) {
- if (buffer[i] == '\0') buffer[i] = ' ';
+ const int string_length = str->length();
+ bool write_null = !(options & NO_NULL_TERMINATION);
+ // First check if we can just write the string without checking capacity.
+ if (capacity == -1 || capacity / 3 >= string_length) {
+ Utf8WriterVisitor writer(buffer, capacity, true);
+ const int kMaxRecursion = 100;
+ bool success = RecursivelySerializeToUtf8(*str, &writer, kMaxRecursion);
+ if (success) return writer.CompleteWrite(write_null, nchars_ref);
+ } else if (capacity >= string_length) {
+ // First check that the buffer is large enough.
+ int utf8_bytes = v8::Utf8Length(*str, str->GetIsolate());
+ if (utf8_bytes <= capacity) {
+ // ASCII fast path.
+ if (utf8_bytes == string_length) {
+ WriteOneByte(reinterpret_cast<uint8_t*>(buffer), 0, capacity, options);
+ if (nchars_ref != NULL) *nchars_ref = string_length;
+ if (write_null && (utf8_bytes+1 <= capacity)) {
+ return string_length + 1;
+ }
+ return string_length;
}
+ if (write_null && (utf8_bytes+1 > capacity)) {
+ options |= NO_NULL_TERMINATION;
+ }
+ // Recurse once without a capacity limit.
+ // This will get into the first branch above.
+ // TODO(dcarney) Check max left rec. in Utf8Length and fall through.
+ return WriteUtf8(buffer, -1, nchars_ref, options);
}
- if (!(options & NO_NULL_TERMINATION) && length > len) {
- buffer[len] = '\0';
- }
- return len;
- }
-
- i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
- int end = length;
- if ((length == -1) || (length > str->length() - start)) {
- end = str->length() - start;
- }
- if (end < 0) return 0;
- write_input_buffer.Reset(start, *str);
- int i;
- for (i = 0; i < end; i++) {
- char c = static_cast<char>(write_input_buffer.GetNext());
- if (c == '\0' && !(options & PRESERVE_ASCII_NULL)) c = ' ';
- buffer[i] = c;
- }
- if (!(options & NO_NULL_TERMINATION) && (length == -1 || i < length)) {
- buffer[i] = '\0';
}
- return i;
+ // Recursive slow path can potentially be unreasonable slow. Flatten.
+ str = FlattenGetString(str);
+ Utf8WriterVisitor writer(buffer, capacity, false);
+ i::String::VisitFlat(&writer, *str);
+ return writer.CompleteWrite(write_null, nchars_ref);
}
-int String::Write(uint16_t* buffer,
- int start,
- int length,
- int options) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::Write()")) return 0;
+template<typename CharType>
+static inline int WriteHelper(const String* string,
+ CharType* buffer,
+ int start,
+ int length,
+ int options) {
+ i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate();
LOG_API(isolate, "String::Write");
ENTER_V8(isolate);
ASSERT(start >= 0 && length >= -1);
- i::Handle<i::String> str = Utils::OpenHandle(this);
+ i::Handle<i::String> str = Utils::OpenHandle(string);
isolate->string_tracker()->RecordWrite(str);
- if (options & HINT_MANY_WRITES_EXPECTED) {
+ if (options & String::HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
+ // using StringCharacterStream or Get(i) to access the characters.
FlattenString(str);
}
int end = start + length;
@@ -4053,7 +4774,7 @@ int String::Write(uint16_t* buffer,
end = str->length();
if (end < 0) return 0;
i::String::WriteToFlat(*str, buffer, start, end);
- if (!(options & NO_NULL_TERMINATION) &&
+ if (!(options & String::NO_NULL_TERMINATION) &&
(length == -1 || end - start < length)) {
buffer[end - start] = '\0';
}
@@ -4061,11 +4782,24 @@ int String::Write(uint16_t* buffer,
}
+int String::WriteOneByte(uint8_t* buffer,
+ int start,
+ int length,
+ int options) const {
+ return WriteHelper(this, buffer, start, length, options);
+}
+
+
+int String::Write(uint16_t* buffer,
+ int start,
+ int length,
+ int options) const {
+ return WriteHelper(this, buffer, start, length, options);
+}
+
+
bool v8::String::IsExternal() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternal()")) {
- return false;
- }
EnsureInitializedForIsolate(str->GetIsolate(), "v8::String::IsExternal()");
return i::StringShape(*str).IsExternalTwoByte();
}
@@ -4073,9 +4807,6 @@ bool v8::String::IsExternal() const {
bool v8::String::IsExternalAscii() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternalAscii()")) {
- return false;
- }
return i::StringShape(*str).IsExternalAscii();
}
@@ -4111,7 +4842,7 @@ void v8::String::VerifyExternalStringResourceBase(
expectedEncoding = TWO_BYTE_ENCODING;
} else {
expected = NULL;
- expectedEncoding = str->IsAsciiRepresentation() ? ASCII_ENCODING
+ expectedEncoding = str->IsOneByteRepresentation() ? ASCII_ENCODING
: TWO_BYTE_ENCODING;
}
CHECK_EQ(expected, value);
@@ -4121,10 +4852,6 @@ void v8::String::VerifyExternalStringResourceBase(
const v8::String::ExternalAsciiStringResource*
v8::String::GetExternalAsciiStringResource() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(),
- "v8::String::GetExternalAsciiStringResource()")) {
- return NULL;
- }
if (i::StringShape(*str).IsExternalAscii()) {
const void* resource =
i::Handle<i::ExternalAsciiString>::cast(str)->resource();
@@ -4135,22 +4862,26 @@ const v8::String::ExternalAsciiStringResource*
}
+Local<Value> Symbol::Name() const {
+ i::Handle<i::Symbol> sym = Utils::OpenHandle(this);
+ i::Handle<i::Object> name(sym->name(), sym->GetIsolate());
+ return Utils::ToLocal(name);
+}
+
+
double Number::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->Number();
}
bool Boolean::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Boolean::Value()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->IsTrue();
}
int64_t Integer::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
@@ -4161,7 +4892,6 @@ int64_t Integer::Value() const {
int32_t Int32::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Int32::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
@@ -4172,7 +4902,6 @@ int32_t Int32::Value() const {
uint32_t Uint32::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Uint32::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
@@ -4184,82 +4913,60 @@ uint32_t Uint32::Value() const {
int v8::Object::InternalFieldCount() {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (IsDeadCheck(obj->GetIsolate(), "v8::Object::InternalFieldCount()")) {
- return 0;
- }
return obj->GetInternalFieldCount();
}
-Local<Value> v8::Object::CheckedGetInternalField(int index) {
+static bool InternalFieldOK(i::Handle<i::JSObject> obj,
+ int index,
+ const char* location) {
+ return ApiCheck(index < obj->GetInternalFieldCount(),
+ location,
+ "Internal field out of bounds");
+}
+
+
+Local<Value> v8::Object::SlowGetInternalField(int index) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (IsDeadCheck(obj->GetIsolate(), "v8::Object::GetInternalField()")) {
- return Local<Value>();
- }
- if (!ApiCheck(index < obj->GetInternalFieldCount(),
- "v8::Object::GetInternalField()",
- "Reading internal field out of bounds")) {
- return Local<Value>();
- }
- i::Handle<i::Object> value(obj->GetInternalField(index));
- Local<Value> result = Utils::ToLocal(value);
-#ifdef DEBUG
- Local<Value> unchecked = UncheckedGetInternalField(index);
- ASSERT(unchecked.IsEmpty() || (unchecked == result));
-#endif
- return result;
+ const char* location = "v8::Object::GetInternalField()";
+ if (!InternalFieldOK(obj, index, location)) return Local<Value>();
+ i::Handle<i::Object> value(obj->GetInternalField(index), obj->GetIsolate());
+ return Utils::ToLocal(value);
}
void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Object::SetInternalField()")) {
- return;
- }
- if (!ApiCheck(index < obj->GetInternalFieldCount(),
- "v8::Object::SetInternalField()",
- "Writing internal field out of bounds")) {
- return;
- }
- ENTER_V8(isolate);
+ const char* location = "v8::Object::SetInternalField()";
+ if (!InternalFieldOK(obj, index, location)) return;
i::Handle<i::Object> val = Utils::OpenHandle(*value);
obj->SetInternalField(index, *val);
+ ASSERT_EQ(value, GetInternalField(index));
}
-static bool CanBeEncodedAsSmi(void* ptr) {
- const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
- return ((address & i::kEncodablePointerMask) == 0);
+void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) {
+ i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ const char* location = "v8::Object::GetAlignedPointerFromInternalField()";
+ if (!InternalFieldOK(obj, index, location)) return NULL;
+ return DecodeSmiToAligned(obj->GetInternalField(index), location);
}
-static i::Smi* EncodeAsSmi(void* ptr) {
- ASSERT(CanBeEncodedAsSmi(ptr));
- const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
- i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift);
- ASSERT(i::Internals::HasSmiTag(result));
- ASSERT_EQ(result, i::Smi::FromInt(result->value()));
- ASSERT_EQ(ptr, i::Internals::GetExternalPointerFromSmi(result));
- return result;
+void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
+ i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ const char* location = "v8::Object::SetAlignedPointerInInternalField()";
+ if (!InternalFieldOK(obj, index, location)) return;
+ obj->SetInternalField(index, EncodeAlignedAsSmi(value, location));
+ ASSERT_EQ(value, GetAlignedPointerFromInternalField(index));
}
-void v8::Object::SetPointerInInternalField(int index, void* value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- if (CanBeEncodedAsSmi(value)) {
- Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value));
- } else {
- HandleScope scope;
- i::Handle<i::Foreign> foreign =
- isolate->factory()->NewForeign(
- reinterpret_cast<i::Address>(value), i::TENURED);
- if (!foreign.is_null()) {
- Utils::OpenHandle(this)->SetInternalField(index, *foreign);
- }
- }
- ASSERT_EQ(value, GetPointerFromInternalField(index));
+static void* ExternalValue(i::Object* obj) {
+ // Obscure semantics for undefined, but somehow checked in our unit tests...
+ if (obj->IsUndefined()) return NULL;
+ i::Object* foreign = i::JSObject::cast(obj)->GetInternalField(0);
+ return i::Foreign::cast(foreign)->foreign_address();
}
@@ -4271,12 +4978,12 @@ bool v8::V8::Initialize() {
if (isolate != NULL && isolate->IsInitialized()) {
return true;
}
- return InitializeHelper();
+ return InitializeHelper(isolate);
}
-void v8::V8::SetEntropySource(EntropySource source) {
- i::V8::SetEntropySource(source);
+void v8::V8::SetEntropySource(EntropySource entropy_source) {
+ i::RandomNumberGenerator::SetEntropySource(entropy_source);
}
@@ -4286,8 +4993,26 @@ void v8::V8::SetReturnAddressLocationResolver(
}
-bool v8::V8::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
- return i::ProfileEntryHookStub::SetFunctionEntryHook(entry_hook);
+bool v8::V8::SetFunctionEntryHook(Isolate* ext_isolate,
+ FunctionEntryHook entry_hook) {
+ ASSERT(ext_isolate != NULL);
+ ASSERT(entry_hook != NULL);
+
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(ext_isolate);
+
+ // The entry hook can only be set before the Isolate is initialized, as
+ // otherwise the Isolate's code stubs generated at initialization won't
+ // contain entry hooks.
+ if (isolate->IsInitialized())
+ return false;
+
+ // Setting an entry hook is a one-way operation, once set, it cannot be
+ // changed or unset.
+ if (isolate->function_entry_hook() != NULL)
+ return false;
+
+ isolate->set_function_entry_hook(entry_hook);
+ return true;
}
@@ -4299,6 +5024,15 @@ void v8::V8::SetJitCodeEventHandler(
isolate->logger()->SetCodeEventHandler(options, event_handler);
}
+void v8::V8::SetArrayBufferAllocator(
+ ArrayBuffer::Allocator* allocator) {
+ if (!ApiCheck(i::V8::ArrayBufferAllocator() == NULL,
+ "v8::V8::SetArrayBufferAllocator",
+ "ArrayBufferAllocator might only be set once"))
+ return;
+ i::V8::SetArrayBufferAllocator(allocator);
+}
+
bool v8::V8::Dispose() {
i::Isolate* isolate = i::Isolate::Current();
@@ -4314,66 +5048,62 @@ bool v8::V8::Dispose() {
HeapStatistics::HeapStatistics(): total_heap_size_(0),
total_heap_size_executable_(0),
+ total_physical_size_(0),
used_heap_size_(0),
heap_size_limit_(0) { }
-void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
- if (!i::Isolate::Current()->IsInitialized()) {
- // Isolate is unitialized thus heap is not configured yet.
- heap_statistics->set_total_heap_size(0);
- heap_statistics->set_total_heap_size_executable(0);
- heap_statistics->set_used_heap_size(0);
- heap_statistics->set_heap_size_limit(0);
- return;
- }
-
- i::Heap* heap = i::Isolate::Current()->heap();
- heap_statistics->set_total_heap_size(heap->CommittedMemory());
- heap_statistics->set_total_heap_size_executable(
- heap->CommittedMemoryExecutable());
- heap_statistics->set_used_heap_size(heap->SizeOfObjects());
- heap_statistics->set_heap_size_limit(heap->MaxReserved());
-}
-
-
void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::V8::VisitExternalResources");
isolate->heap()->VisitExternalResources(visitor);
}
+class VisitorAdapter : public i::ObjectVisitor {
+ public:
+ explicit VisitorAdapter(PersistentHandleVisitor* visitor)
+ : visitor_(visitor) {}
+ virtual void VisitPointers(i::Object** start, i::Object** end) {
+ UNREACHABLE();
+ }
+ virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) {
+ Value* value = ToApi<Value>(i::Handle<i::Object>(p));
+ visitor_->VisitPersistentHandle(
+ reinterpret_cast<Persistent<Value>*>(&value), class_id);
+ }
+ private:
+ PersistentHandleVisitor* visitor_;
+};
+
+
void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId");
+ i::DisallowHeapAllocation no_allocation;
- i::AssertNoAllocation no_allocation;
-
- class VisitorAdapter : public i::ObjectVisitor {
- public:
- explicit VisitorAdapter(PersistentHandleVisitor* visitor)
- : visitor_(visitor) {}
- virtual void VisitPointers(i::Object** start, i::Object** end) {
- UNREACHABLE();
- }
- virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) {
- visitor_->VisitPersistentHandle(ToApi<Value>(i::Handle<i::Object>(p)),
- class_id);
- }
- private:
- PersistentHandleVisitor* visitor_;
- } visitor_adapter(visitor);
+ VisitorAdapter visitor_adapter(visitor);
isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter);
}
+void v8::V8::VisitHandlesForPartialDependence(
+ Isolate* exported_isolate, PersistentHandleVisitor* visitor) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate);
+ ASSERT(isolate == i::Isolate::Current());
+ i::DisallowHeapAllocation no_allocation;
+
+ VisitorAdapter visitor_adapter(visitor);
+ isolate->global_handles()->IterateAllRootsInNewSpaceWithClassIds(
+ &visitor_adapter);
+}
+
+
bool v8::V8::IdleNotification(int hint) {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
i::Isolate* isolate = i::Isolate::Current();
if (isolate == NULL || !isolate->IsInitialized()) return true;
- return i::V8::IdleNotification(hint);
+ if (!i::FLAG_use_idle_notification) return true;
+ return isolate->heap()->IdleNotification(hint);
}
@@ -4391,35 +5121,24 @@ int v8::V8::ContextDisposedNotification() {
}
-const char* v8::V8::GetVersion() {
- return i::Version::GetVersion();
+bool v8::V8::InitializeICU() {
+ return i::InitializeICU();
}
-static i::Handle<i::FunctionTemplateInfo>
- EnsureConstructor(i::Handle<i::ObjectTemplateInfo> templ) {
- if (templ->constructor()->IsUndefined()) {
- Local<FunctionTemplate> constructor = FunctionTemplate::New();
- Utils::OpenHandle(*constructor)->set_instance_template(*templ);
- templ->set_constructor(*Utils::OpenHandle(*constructor));
- }
- return i::Handle<i::FunctionTemplateInfo>(
- i::FunctionTemplateInfo::cast(templ->constructor()));
+const char* v8::V8::GetVersion() {
+ return i::Version::GetVersion();
}
-Persistent<Context> v8::Context::New(
+static i::Handle<i::Context> CreateEnvironment(
+ i::Isolate* isolate,
v8::ExtensionConfiguration* extensions,
v8::Handle<ObjectTemplate> global_template,
v8::Handle<Value> global_object) {
- i::Isolate::EnsureDefaultIsolate();
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Context::New()");
- LOG_API(isolate, "Context::New");
- ON_BAILOUT(isolate, "v8::Context::New()", return Persistent<Context>());
+ i::Handle<i::Context> env;
// Enter V8 via an ENTER_V8 scope.
- i::Handle<i::Context> env;
{
ENTER_V8(isolate);
v8::Handle<ObjectTemplate> proxy_template = global_template;
@@ -4428,13 +5147,11 @@ Persistent<Context> v8::Context::New(
if (!global_template.IsEmpty()) {
// Make sure that the global_template has a constructor.
- global_constructor =
- EnsureConstructor(Utils::OpenHandle(*global_template));
+ global_constructor = EnsureConstructor(*global_template);
// Create a fresh template for the global proxy object.
proxy_template = ObjectTemplate::New();
- proxy_constructor =
- EnsureConstructor(Utils::OpenHandle(*proxy_template));
+ proxy_constructor = EnsureConstructor(*proxy_template);
// Set the global template to be the prototype template of
// global proxy template.
@@ -4457,7 +5174,6 @@ Persistent<Context> v8::Context::New(
// Create the environment.
env = isolate->bootstrapper()->CreateEnvironment(
- isolate,
Utils::OpenHandle(*global_object, true),
proxy_template,
extensions);
@@ -4475,18 +5191,28 @@ Persistent<Context> v8::Context::New(
}
// Leave V8.
- if (env.is_null()) {
- return Persistent<Context>();
- }
- return Persistent<Context>(Utils::ToLocal(env));
+ return env;
+}
+
+Local<Context> v8::Context::New(
+ v8::Isolate* external_isolate,
+ v8::ExtensionConfiguration* extensions,
+ v8::Handle<ObjectTemplate> global_template,
+ v8::Handle<Value> global_object) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
+ EnsureInitializedForIsolate(isolate, "v8::Context::New()");
+ LOG_API(isolate, "Context::New");
+ ON_BAILOUT(isolate, "v8::Context::New()", return Local<Context>());
+ i::HandleScope scope(isolate);
+ i::Handle<i::Context> env =
+ CreateEnvironment(isolate, extensions, global_template, global_object);
+ if (env.is_null()) return Local<Context>();
+ return Utils::ToLocal(scope.CloseAndEscape(env));
}
void v8::Context::SetSecurityToken(Handle<Value> token) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::SetSecurityToken()")) {
- return;
- }
ENTER_V8(isolate);
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Handle<i::Object> token_handle = Utils::OpenHandle(*token);
@@ -4496,10 +5222,6 @@ void v8::Context::SetSecurityToken(Handle<Value> token) {
void v8::Context::UseDefaultSecurityToken() {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::UseDefaultSecurityToken()")) {
- return;
- }
ENTER_V8(isolate);
i::Handle<i::Context> env = Utils::OpenHandle(this);
env->set_security_token(env->global_object());
@@ -4508,12 +5230,9 @@ void v8::Context::UseDefaultSecurityToken() {
Handle<Value> v8::Context::GetSecurityToken() {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetSecurityToken()")) {
- return Handle<Value>();
- }
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Object* security_token = env->security_token();
- i::Handle<i::Object> token_handle(security_token);
+ i::Handle<i::Object> token_handle(security_token, isolate);
return Utils::ToLocal(token_handle);
}
@@ -4529,74 +5248,53 @@ bool Context::InContext() {
}
+v8::Isolate* Context::GetIsolate() {
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ return reinterpret_cast<Isolate*>(env->GetIsolate());
+}
+
+
v8::Local<v8::Context> Context::GetEntered() {
i::Isolate* isolate = i::Isolate::Current();
if (!EnsureInitializedForIsolate(isolate, "v8::Context::GetEntered()")) {
return Local<Context>();
}
- i::Handle<i::Object> last =
- isolate->handle_scope_implementer()->LastEnteredContext();
- if (last.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(last);
- return Utils::ToLocal(context);
+ return reinterpret_cast<Isolate*>(isolate)->GetEnteredContext();
}
v8::Local<v8::Context> Context::GetCurrent() {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetCurrent()")) {
- return Local<Context>();
- }
- i::Handle<i::Object> current = isolate->native_context();
- if (current.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
- return Utils::ToLocal(context);
+ return reinterpret_cast<Isolate*>(isolate)->GetCurrentContext();
}
v8::Local<v8::Context> Context::GetCalling() {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetCalling()")) {
- return Local<Context>();
- }
- i::Handle<i::Object> calling =
- isolate->GetCallingNativeContext();
- if (calling.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
- return Utils::ToLocal(context);
+ return reinterpret_cast<Isolate*>(isolate)->GetCallingContext();
}
v8::Local<v8::Object> Context::Global() {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Context::Global()")) {
- return Local<v8::Object>();
- }
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Handle<i::Object> global(context->global_proxy());
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
+ i::Handle<i::Object> global(context->global_proxy(), isolate);
return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
}
void Context::DetachGlobal() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::DetachGlobal()")) return;
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
isolate->bootstrapper()->DetachGlobal(context);
}
void Context::ReattachGlobal(Handle<Object> global_object) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::ReattachGlobal()")) return;
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
i::Handle<i::JSGlobalProxy> global_proxy =
i::Handle<i::JSGlobalProxy>::cast(Utils::OpenHandle(*global_object));
isolate->bootstrapper()->ReattachGlobal(context, global_proxy);
@@ -4604,59 +5302,28 @@ void Context::ReattachGlobal(Handle<Object> global_object) {
void Context::AllowCodeGenerationFromStrings(bool allow) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::AllowCodeGenerationFromStrings()")) {
- return;
- }
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
context->set_allow_code_gen_from_strings(
allow ? isolate->heap()->true_value() : isolate->heap()->false_value());
}
bool Context::IsCodeGenerationFromStringsAllowed() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::IsCodeGenerationFromStringsAllowed()")) {
- return false;
- }
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
return !context->allow_code_gen_from_strings()->IsFalse();
}
void Context::SetErrorMessageForCodeGenerationFromStrings(
Handle<String> error) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::SetErrorMessageForCodeGenerationFromStrings()")) {
- return;
- }
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Handle<i::Object> error_handle = Utils::OpenHandle(*error);
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Handle<i::String> error_handle = Utils::OpenHandle(*error);
context->set_error_message_for_code_gen_from_strings(*error_handle);
}
-void V8::SetWrapperClassId(i::Object** global_handle, uint16_t class_id) {
- i::GlobalHandles::SetWrapperClassId(global_handle, class_id);
-}
-
-
-uint16_t V8::GetWrapperClassId(internal::Object** global_handle) {
- return i::GlobalHandles::GetWrapperClassId(global_handle);
-}
-
-
Local<v8::Object> ObjectTemplate::NewInstance() {
i::Isolate* isolate = i::Isolate::Current();
ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()",
@@ -4695,159 +5362,157 @@ bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
}
-static Local<External> ExternalNewImpl(void* data) {
- return Utils::ToLocal(FACTORY->NewForeign(static_cast<i::Address>(data)));
+Local<External> v8::External::New(void* value) {
+ STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::External::New()");
+ LOG_API(isolate, "External::New");
+ ENTER_V8(isolate);
+ i::Handle<i::JSObject> external = isolate->factory()->NewExternal(value);
+ return Utils::ExternalToLocal(external);
}
-static void* ExternalValueImpl(i::Handle<i::Object> obj) {
- return reinterpret_cast<void*>(i::Foreign::cast(*obj)->foreign_address());
+
+void* External::Value() const {
+ return ExternalValue(*Utils::OpenHandle(this));
}
-Local<Value> v8::External::Wrap(void* data) {
+Local<String> v8::String::Empty() {
i::Isolate* isolate = i::Isolate::Current();
- STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
- EnsureInitializedForIsolate(isolate, "v8::External::Wrap()");
- LOG_API(isolate, "External::Wrap");
- ENTER_V8(isolate);
-
- v8::Local<v8::Value> result = CanBeEncodedAsSmi(data)
- ? Utils::ToLocal(i::Handle<i::Object>(EncodeAsSmi(data)))
- : v8::Local<v8::Value>(ExternalNewImpl(data));
-
- ASSERT_EQ(data, Unwrap(result));
- return result;
+ if (!EnsureInitializedForIsolate(isolate, "v8::String::Empty()")) {
+ return v8::Local<String>();
+ }
+ LOG_API(isolate, "String::Empty()");
+ return Utils::ToLocal(isolate->factory()->empty_string());
}
-void* v8::Object::SlowGetPointerFromInternalField(int index) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Object* value = obj->GetInternalField(index);
- if (value->IsSmi()) {
- return i::Internals::GetExternalPointerFromSmi(value);
- } else if (value->IsForeign()) {
- return reinterpret_cast<void*>(i::Foreign::cast(value)->foreign_address());
- } else {
- return NULL;
- }
+// anonymous namespace for string creation helper functions
+namespace {
+
+inline int StringLength(const char* string) {
+ return i::StrLength(string);
}
-void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Unwrap()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
- void* result;
- if (obj->IsSmi()) {
- result = i::Internals::GetExternalPointerFromSmi(*obj);
- } else if (obj->IsForeign()) {
- result = ExternalValueImpl(obj);
- } else {
- result = NULL;
- }
- ASSERT_EQ(result, QuickUnwrap(wrapper));
- return result;
+inline int StringLength(const uint8_t* string) {
+ return i::StrLength(reinterpret_cast<const char*>(string));
}
-Local<External> v8::External::New(void* data) {
- STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::External::New()");
- LOG_API(isolate, "External::New");
- ENTER_V8(isolate);
- return ExternalNewImpl(data);
+inline int StringLength(const uint16_t* string) {
+ int length = 0;
+ while (string[length] != '\0')
+ length++;
+ return length;
}
-void* External::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return ExternalValueImpl(obj);
+inline i::Handle<i::String> NewString(i::Factory* factory,
+ String::NewStringType type,
+ i::Vector<const char> string) {
+ if (type ==String::kInternalizedString) {
+ return factory->InternalizeUtf8String(string);
+ }
+ return factory->NewStringFromUtf8(string);
}
-Local<String> v8::String::Empty() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::String::Empty()")) {
- return v8::Local<String>();
+inline i::Handle<i::String> NewString(i::Factory* factory,
+ String::NewStringType type,
+ i::Vector<const uint8_t> string) {
+ if (type == String::kInternalizedString) {
+ return factory->InternalizeOneByteString(string);
}
- LOG_API(isolate, "String::Empty()");
- return Utils::ToLocal(isolate->factory()->empty_symbol());
+ return factory->NewStringFromOneByte(string);
}
-Local<String> v8::String::New(const char* data, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::New()");
- LOG_API(isolate, "String::New(char)");
- if (length == 0) return Empty();
- ENTER_V8(isolate);
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> result =
- isolate->factory()->NewStringFromUtf8(
- i::Vector<const char>(data, length));
- return Utils::ToLocal(result);
+inline i::Handle<i::String> NewString(i::Factory* factory,
+ String::NewStringType type,
+ i::Vector<const uint16_t> string) {
+ if (type == String::kInternalizedString) {
+ return factory->InternalizeTwoByteString(string);
+ }
+ return factory->NewStringFromTwoByte(string);
}
-Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
- i::Handle<i::String> left_string = Utils::OpenHandle(*left);
- i::Isolate* isolate = left_string->GetIsolate();
- EnsureInitializedForIsolate(isolate, "v8::String::New()");
- LOG_API(isolate, "String::New(char)");
+template<typename Char>
+inline Local<String> NewString(Isolate* v8_isolate,
+ const char* location,
+ const char* env,
+ const Char* data,
+ String::NewStringType type,
+ int length) {
+ i::Isolate* isolate = reinterpret_cast<internal::Isolate*>(v8_isolate);
+ EnsureInitializedForIsolate(isolate, location);
+ LOG_API(isolate, env);
+ if (length == 0 && type != String::kUndetectableString) {
+ return String::Empty();
+ }
ENTER_V8(isolate);
- i::Handle<i::String> right_string = Utils::OpenHandle(*right);
- i::Handle<i::String> result = isolate->factory()->NewConsString(left_string,
- right_string);
+ if (length == -1) length = StringLength(data);
+ i::Handle<i::String> result = NewString(
+ isolate->factory(), type, i::Vector<const Char>(data, length));
+ if (type == String::kUndetectableString) {
+ result->MarkAsUndetectable();
+ }
return Utils::ToLocal(result);
}
+} // anonymous namespace
-Local<String> v8::String::NewUndetectable(const char* data, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()");
- LOG_API(isolate, "String::NewUndetectable(char)");
- ENTER_V8(isolate);
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> result =
- isolate->factory()->NewStringFromUtf8(
- i::Vector<const char>(data, length));
- result->MarkAsUndetectable();
- return Utils::ToLocal(result);
+
+Local<String> String::NewFromUtf8(Isolate* isolate,
+ const char* data,
+ NewStringType type,
+ int length) {
+ return NewString(isolate,
+ "v8::String::NewFromUtf8()",
+ "String::NewFromUtf8",
+ data,
+ type,
+ length);
}
-static int TwoByteStringLength(const uint16_t* data) {
- int length = 0;
- while (data[length] != '\0') length++;
- return length;
+Local<String> String::NewFromOneByte(Isolate* isolate,
+ const uint8_t* data,
+ NewStringType type,
+ int length) {
+ return NewString(isolate,
+ "v8::String::NewFromOneByte()",
+ "String::NewFromOneByte",
+ data,
+ type,
+ length);
}
-Local<String> v8::String::New(const uint16_t* data, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::New()");
- LOG_API(isolate, "String::New(uint16_)");
- if (length == 0) return Empty();
- ENTER_V8(isolate);
- if (length == -1) length = TwoByteStringLength(data);
- i::Handle<i::String> result =
- isolate->factory()->NewStringFromTwoByte(
- i::Vector<const uint16_t>(data, length));
- return Utils::ToLocal(result);
+Local<String> String::NewFromTwoByte(Isolate* isolate,
+ const uint16_t* data,
+ NewStringType type,
+ int length) {
+ return NewString(isolate,
+ "v8::String::NewFromTwoByte()",
+ "String::NewFromTwoByte",
+ data,
+ type,
+ length);
}
-Local<String> v8::String::NewUndetectable(const uint16_t* data, int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()");
- LOG_API(isolate, "String::NewUndetectable(uint16_)");
+Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
+ i::Handle<i::String> left_string = Utils::OpenHandle(*left);
+ i::Isolate* isolate = left_string->GetIsolate();
+ EnsureInitializedForIsolate(isolate, "v8::String::New()");
+ LOG_API(isolate, "String::New(char)");
ENTER_V8(isolate);
- if (length == -1) length = TwoByteStringLength(data);
- i::Handle<i::String> result =
- isolate->factory()->NewStringFromTwoByte(
- i::Vector<const uint16_t>(data, length));
- result->MarkAsUndetectable();
+ i::Handle<i::String> right_string = Utils::OpenHandle(*right);
+ i::Handle<i::String> result = isolate->factory()->NewConsString(left_string,
+ right_string);
return Utils::ToLocal(result);
}
@@ -4868,6 +5533,23 @@ i::Handle<i::String> NewExternalAsciiStringHandle(i::Isolate* isolate,
}
+bool RedirectToExternalString(i::Isolate* isolate,
+ i::Handle<i::String> parent,
+ i::Handle<i::String> external) {
+ if (parent->IsConsString()) {
+ i::Handle<i::ConsString> cons = i::Handle<i::ConsString>::cast(parent);
+ cons->set_first(*external);
+ cons->set_second(isolate->heap()->empty_string());
+ } else {
+ ASSERT(parent->IsSlicedString());
+ i::Handle<i::SlicedString> slice = i::Handle<i::SlicedString>::cast(parent);
+ slice->set_parent(*external);
+ slice->set_offset(0);
+ }
+ return true;
+}
+
+
Local<String> v8::String::NewExternal(
v8::String::ExternalStringResource* resource) {
i::Isolate* isolate = i::Isolate::Current();
@@ -4884,7 +5566,6 @@ Local<String> v8::String::NewExternal(
bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false;
if (i::StringShape(*obj).IsExternalTwoByte()) {
return false; // Already an external string.
}
@@ -4896,9 +5577,23 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
return false;
}
CHECK(resource && resource->data());
- bool result = obj->MakeExternal(resource);
- if (result && !obj->IsSymbol()) {
- isolate->heap()->external_string_table()->AddString(*obj);
+
+ bool result;
+ i::Handle<i::String> external;
+ if (isolate->heap()->old_pointer_space()->Contains(*obj)) {
+ // We do not allow external strings in the old pointer space. Instead of
+ // converting the string in-place, we keep the cons/sliced string and
+ // point it to a newly-allocated external string.
+ external = NewExternalStringHandle(isolate, resource);
+ result = RedirectToExternalString(isolate, obj, external);
+ } else {
+ result = obj->MakeExternal(resource);
+ external = obj;
+ }
+
+ ASSERT(external->IsExternalString());
+ if (result && !external->IsInternalizedString()) {
+ isolate->heap()->external_string_table()->AddString(*external);
}
return result;
}
@@ -4921,7 +5616,6 @@ bool v8::String::MakeExternal(
v8::String::ExternalAsciiStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false;
if (i::StringShape(*obj).IsExternalTwoByte()) {
return false; // Already an external string.
}
@@ -4933,9 +5627,23 @@ bool v8::String::MakeExternal(
return false;
}
CHECK(resource && resource->data());
- bool result = obj->MakeExternal(resource);
- if (result && !obj->IsSymbol()) {
- isolate->heap()->external_string_table()->AddString(*obj);
+
+ bool result;
+ i::Handle<i::String> external;
+ if (isolate->heap()->old_pointer_space()->Contains(*obj)) {
+ // We do not allow external strings in the old pointer space. Instead of
+ // converting the string in-place, we keep the cons/sliced string and
+ // point it to a newly-allocated external string.
+ external = NewExternalAsciiStringHandle(isolate, resource);
+ result = RedirectToExternalString(isolate, obj, external);
+ } else {
+ result = obj->MakeExternal(resource);
+ external = obj;
+ }
+
+ ASSERT(external->IsExternalString());
+ if (result && !external->IsInternalizedString()) {
+ isolate->heap()->external_string_table()->AddString(*external);
}
return result;
}
@@ -4945,7 +5653,12 @@ bool v8::String::CanMakeExternal() {
if (!internal::FLAG_clever_optimizations) return false;
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false;
+
+ // TODO(yangguo): Externalizing sliced/cons strings allocates.
+ // This rule can be removed when all code that can
+ // trigger an access check is handlified and therefore GC safe.
+ if (isolate->heap()->old_pointer_space()->Contains(*obj)) return false;
+
if (isolate->string_tracker()->IsFreshUnusedString(obj)) return false;
int size = obj->Size(); // Byte size of the original string.
if (size < i::ExternalString::kShortSize) return false;
@@ -4976,9 +5689,8 @@ Local<v8::Value> v8::NumberObject::New(double value) {
}
-double v8::NumberObject::NumberValue() const {
+double v8::NumberObject::ValueOf() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::NumberObject::NumberValue()")) return 0;
LOG_API(isolate, "NumberObject::NumberValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -4991,16 +5703,17 @@ Local<v8::Value> v8::BooleanObject::New(bool value) {
EnsureInitializedForIsolate(isolate, "v8::BooleanObject::New()");
LOG_API(isolate, "BooleanObject::New");
ENTER_V8(isolate);
- i::Handle<i::Object> boolean(value ? isolate->heap()->true_value()
- : isolate->heap()->false_value());
+ i::Handle<i::Object> boolean(value
+ ? isolate->heap()->true_value()
+ : isolate->heap()->false_value(),
+ isolate);
i::Handle<i::Object> obj = isolate->factory()->ToObject(boolean);
return Utils::ToLocal(obj);
}
-bool v8::BooleanObject::BooleanValue() const {
+bool v8::BooleanObject::ValueOf() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::BooleanObject::BooleanValue()")) return 0;
LOG_API(isolate, "BooleanObject::BooleanValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -5019,11 +5732,8 @@ Local<v8::Value> v8::StringObject::New(Handle<String> value) {
}
-Local<v8::String> v8::StringObject::StringValue() const {
+Local<v8::String> v8::StringObject::ValueOf() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::StringObject::StringValue()")) {
- return Local<v8::String>();
- }
LOG_API(isolate, "StringObject::StringValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -5032,26 +5742,46 @@ Local<v8::String> v8::StringObject::StringValue() const {
}
+Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Handle<Symbol> value) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::SymbolObject::New()");
+ LOG_API(i_isolate, "SymbolObject::New");
+ ENTER_V8(i_isolate);
+ i::Handle<i::Object> obj =
+ i_isolate->factory()->ToObject(Utils::OpenHandle(*value));
+ return Utils::ToLocal(obj);
+}
+
+
+Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "SymbolObject::SymbolValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+ return Utils::ToLocal(
+ i::Handle<i::Symbol>(i::Symbol::cast(jsvalue->value())));
+}
+
+
Local<v8::Value> v8::Date::New(double time) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Date::New()");
LOG_API(isolate, "Date::New");
- if (isnan(time)) {
+ if (std::isnan(time)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
time = i::OS::nan_value();
}
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj =
- i::Execution::NewDate(time, &has_pending_exception);
+ i::Execution::NewDate(isolate, time, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Value>());
return Utils::ToLocal(obj);
}
-double v8::Date::NumberValue() const {
+double v8::Date::ValueOf() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Date::NumberValue()")) return 0;
LOG_API(isolate, "Date::NumberValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj);
@@ -5071,7 +5801,8 @@ void v8::Date::DateTimeConfigurationChangeNotification() {
i::HandleScope scope(isolate);
// Get the function ResetDateCache (defined in date.js).
i::Handle<i::String> func_name_str =
- isolate->factory()->LookupAsciiSymbol("ResetDateCache");
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("ResetDateCache"));
i::MaybeObject* result =
isolate->js_builtins_object()->GetProperty(*func_name_str);
i::Object* object_func;
@@ -5095,14 +5826,15 @@ void v8::Date::DateTimeConfigurationChangeNotification() {
static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
- char flags_buf[3];
+ i::Isolate* isolate = i::Isolate::Current();
+ uint8_t flags_buf[3];
int num_flags = 0;
if ((flags & RegExp::kGlobal) != 0) flags_buf[num_flags++] = 'g';
if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm';
if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i';
ASSERT(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf)));
- return FACTORY->LookupSymbol(
- i::Vector<const char>(flags_buf, num_flags));
+ return isolate->factory()->InternalizeOneByteString(
+ i::Vector<const uint8_t>(flags_buf, num_flags));
}
@@ -5123,10 +5855,6 @@ Local<v8::RegExp> v8::RegExp::New(Handle<String> pattern,
Local<v8::String> v8::RegExp::GetSource() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::RegExp::GetSource()")) {
- return Local<v8::String>();
- }
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
return Utils::ToLocal(i::Handle<i::String>(obj->Pattern()));
}
@@ -5143,9 +5871,6 @@ REGEXP_FLAG_ASSERT_EQ(kMultiline, MULTILINE);
#undef REGEXP_FLAG_ASSERT_EQ
v8::RegExp::Flags v8::RegExp::GetFlags() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::GetFlags()")) {
- return v8::RegExp::kNone;
- }
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
return static_cast<RegExp::Flags>(obj->GetFlags().value());
}
@@ -5166,8 +5891,6 @@ Local<v8::Array> v8::Array::New(int length) {
uint32_t v8::Array::Length() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Array::Length()")) return 0;
i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
i::Object* length = obj->length();
if (length->IsSmi()) {
@@ -5193,21 +5916,236 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
EXCEPTION_PREAMBLE(isolate);
ENTER_V8(isolate);
- i::Handle<i::JSObject> result = i::Copy(paragon_handle);
+ i::Handle<i::JSObject> result = i::JSObject::Copy(paragon_handle);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
return Utils::ToLocal(result);
}
-Local<String> v8::String::NewSymbol(const char* data, int length) {
+bool v8::ArrayBuffer::IsExternal() const {
+ return Utils::OpenHandle(this)->is_external();
+}
+
+
+v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
+ i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
+ ApiCheck(!obj->is_external(),
+ "v8::ArrayBuffer::Externalize",
+ "ArrayBuffer already externalized");
+ obj->set_is_external(true);
+ size_t byte_length = static_cast<size_t>(obj->byte_length()->Number());
+ Contents contents;
+ contents.data_ = obj->backing_store();
+ contents.byte_length_ = byte_length;
+ return contents;
+}
+
+
+void v8::ArrayBuffer::Neuter() {
+ i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
+ i::Isolate* isolate = obj->GetIsolate();
+ ApiCheck(obj->is_external(),
+ "v8::ArrayBuffer::Neuter",
+ "Only externalized ArrayBuffers can be neutered");
+ LOG_API(obj->GetIsolate(), "v8::ArrayBuffer::Neuter()");
+ ENTER_V8(isolate);
+
+ for (i::Handle<i::Object> view_obj(obj->weak_first_view(), isolate);
+ !view_obj->IsUndefined();) {
+ i::Handle<i::JSArrayBufferView> view(i::JSArrayBufferView::cast(*view_obj));
+ if (view->IsJSTypedArray()) {
+ i::JSTypedArray::cast(*view)->Neuter();
+ } else if (view->IsJSDataView()) {
+ i::JSDataView::cast(*view)->Neuter();
+ } else {
+ UNREACHABLE();
+ }
+ view_obj = i::handle(view->weak_next(), isolate);
+ }
+ obj->Neuter();
+}
+
+
+size_t v8::ArrayBuffer::ByteLength() const {
+ i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
+ return static_cast<size_t>(obj->byte_length()->Number());
+}
+
+
+Local<ArrayBuffer> v8::ArrayBuffer::New(size_t byte_length) {
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::ArrayBuffer::New(size_t)");
+ LOG_API(isolate, "v8::ArrayBuffer::New(size_t)");
+ ENTER_V8(isolate);
+ i::Handle<i::JSArrayBuffer> obj =
+ isolate->factory()->NewJSArrayBuffer();
+ i::Runtime::SetupArrayBufferAllocatingData(isolate, obj, byte_length);
+ return Utils::ToLocal(obj);
+}
+
+
+Local<ArrayBuffer> v8::ArrayBuffer::New(void* data, size_t byte_length) {
i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewSymbol()");
- LOG_API(isolate, "String::NewSymbol(char)");
+ EnsureInitializedForIsolate(isolate, "v8::ArrayBuffer::New(void*, size_t)");
+ LOG_API(isolate, "v8::ArrayBuffer::New(void*, size_t)");
ENTER_V8(isolate);
+ i::Handle<i::JSArrayBuffer> obj =
+ isolate->factory()->NewJSArrayBuffer();
+ i::Runtime::SetupArrayBuffer(isolate, obj, true, data, byte_length);
+ return Utils::ToLocal(obj);
+}
+
+
+Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
+ i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
+ ASSERT(obj->buffer()->IsJSArrayBuffer());
+ i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
+ return Utils::ToLocal(buffer);
+}
+
+
+size_t v8::ArrayBufferView::ByteOffset() {
+ i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
+ return static_cast<size_t>(obj->byte_offset()->Number());
+}
+
+
+size_t v8::ArrayBufferView::ByteLength() {
+ i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
+ return static_cast<size_t>(obj->byte_length()->Number());
+}
+
+
+size_t v8::TypedArray::Length() {
+ i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+ return static_cast<size_t>(obj->length()->Number());
+}
+
+
+static inline void SetupArrayBufferView(
+ i::Isolate* isolate,
+ i::Handle<i::JSArrayBufferView> obj,
+ i::Handle<i::JSArrayBuffer> buffer,
+ size_t byte_offset,
+ size_t byte_length) {
+ ASSERT(byte_offset + byte_length <=
+ static_cast<size_t>(buffer->byte_length()->Number()));
+
+ obj->set_buffer(*buffer);
+
+ obj->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*obj);
+
+ i::Handle<i::Object> byte_offset_object =
+ isolate->factory()->NewNumberFromSize(byte_offset);
+ obj->set_byte_offset(*byte_offset_object);
+
+ i::Handle<i::Object> byte_length_object =
+ isolate->factory()->NewNumberFromSize(byte_length);
+ obj->set_byte_length(*byte_length_object);
+}
+
+template<typename ElementType,
+ ExternalArrayType array_type,
+ i::ElementsKind elements_kind>
+i::Handle<i::JSTypedArray> NewTypedArray(
+ i::Isolate* isolate,
+ Handle<ArrayBuffer> array_buffer, size_t byte_offset, size_t length) {
+ i::Handle<i::JSTypedArray> obj =
+ isolate->factory()->NewJSTypedArray(array_type);
+ i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
+
+ ASSERT(byte_offset % sizeof(ElementType) == 0);
+
+ SetupArrayBufferView(
+ isolate, obj, buffer, byte_offset, length * sizeof(ElementType));
+
+ i::Handle<i::Object> length_object =
+ isolate->factory()->NewNumberFromSize(length);
+ obj->set_length(*length_object);
+
+ i::Handle<i::ExternalArray> elements =
+ isolate->factory()->NewExternalArray(
+ static_cast<int>(length), array_type,
+ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
+ obj->set_elements(*elements);
+ return obj;
+}
+
+
+#define TYPED_ARRAY_NEW(TypedArray, element_type, array_type, elements_kind) \
+ Local<TypedArray> TypedArray::New(Handle<ArrayBuffer> array_buffer, \
+ size_t byte_offset, size_t length) { \
+ i::Isolate* isolate = i::Isolate::Current(); \
+ EnsureInitializedForIsolate(isolate, \
+ "v8::" #TypedArray "::New(Handle<ArrayBuffer>, size_t, size_t)"); \
+ LOG_API(isolate, \
+ "v8::" #TypedArray "::New(Handle<ArrayBuffer>, size_t, size_t)"); \
+ ENTER_V8(isolate); \
+ i::Handle<i::JSTypedArray> obj = \
+ NewTypedArray<element_type, array_type, elements_kind>( \
+ isolate, array_buffer, byte_offset, length); \
+ return Utils::ToLocal##TypedArray(obj); \
+ }
+
+
+TYPED_ARRAY_NEW(Uint8Array, uint8_t, kExternalUnsignedByteArray,
+ i::EXTERNAL_UNSIGNED_BYTE_ELEMENTS)
+TYPED_ARRAY_NEW(Uint8ClampedArray, uint8_t, kExternalPixelArray,
+ i::EXTERNAL_PIXEL_ELEMENTS)
+TYPED_ARRAY_NEW(Int8Array, int8_t, kExternalByteArray,
+ i::EXTERNAL_BYTE_ELEMENTS)
+TYPED_ARRAY_NEW(Uint16Array, uint16_t, kExternalUnsignedShortArray,
+ i::EXTERNAL_UNSIGNED_SHORT_ELEMENTS)
+TYPED_ARRAY_NEW(Int16Array, int16_t, kExternalShortArray,
+ i::EXTERNAL_SHORT_ELEMENTS)
+TYPED_ARRAY_NEW(Uint32Array, uint32_t, kExternalUnsignedIntArray,
+ i::EXTERNAL_UNSIGNED_INT_ELEMENTS)
+TYPED_ARRAY_NEW(Int32Array, int32_t, kExternalIntArray,
+ i::EXTERNAL_INT_ELEMENTS)
+TYPED_ARRAY_NEW(Float32Array, float, kExternalFloatArray,
+ i::EXTERNAL_FLOAT_ELEMENTS)
+TYPED_ARRAY_NEW(Float64Array, double, kExternalDoubleArray,
+ i::EXTERNAL_DOUBLE_ELEMENTS)
+
+#undef TYPED_ARRAY_NEW
+
+Local<DataView> DataView::New(Handle<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t byte_length) {
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(
+ isolate, "v8::DataView::New(void*, size_t, size_t)");
+ LOG_API(isolate, "v8::DataView::New(void*, size_t, size_t)");
+ ENTER_V8(isolate);
+ i::Handle<i::JSDataView> obj = isolate->factory()->NewJSDataView();
+ i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
+ SetupArrayBufferView(
+ isolate, obj, buffer, byte_offset, byte_length);
+ return Utils::ToLocal(obj);
+}
+
+
+Local<Symbol> v8::Symbol::New(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
+ LOG_API(i_isolate, "Symbol::New()");
+ ENTER_V8(i_isolate);
+ i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
+ return Utils::ToLocal(result);
+}
+
+
+Local<Symbol> v8::Symbol::New(Isolate* isolate, const char* data, int length) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
+ LOG_API(i_isolate, "Symbol::New(char)");
+ ENTER_V8(i_isolate);
if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> result =
- isolate->factory()->LookupSymbol(i::Vector<const char>(data, length));
+ i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
+ i::Vector<const char>(data, length));
+ i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
+ result->set_name(*name);
return Utils::ToLocal(result);
}
@@ -5215,12 +6153,19 @@ Local<String> v8::String::NewSymbol(const char* data, int length) {
Local<Number> v8::Number::New(double value) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Number::New()");
- if (isnan(value)) {
+ return Number::New(reinterpret_cast<Isolate*>(isolate), value);
+}
+
+
+Local<Number> v8::Number::New(Isolate* isolate, double value) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ASSERT(internal_isolate->IsInitialized());
+ if (std::isnan(value)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
value = i::OS::nan_value();
}
- ENTER_V8(isolate);
- i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
+ ENTER_V8(internal_isolate);
+ i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
return Utils::NumberToLocal(result);
}
@@ -5265,6 +6210,18 @@ Local<Integer> v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) {
}
+#ifdef DEBUG
+v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate) {
+ disallow_heap_allocation_ = new i::DisallowHeapAllocation();
+}
+
+
+v8::AssertNoGCScope::~AssertNoGCScope() {
+ delete static_cast<i::DisallowHeapAllocation*>(disallow_heap_allocation_);
+}
+#endif
+
+
void V8::IgnoreOutOfMemoryException() {
EnterIsolateIfNeeded()->set_ignore_out_of_memory(true);
}
@@ -5318,114 +6275,191 @@ void V8::SetCaptureStackTraceForUncaughtExceptions(
void V8::SetCounterFunction(CounterLookupCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetCounterFunction()")) return;
isolate->stats_table()->SetCounterFunction(callback);
}
+
void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetCreateHistogramFunction()")) return;
isolate->stats_table()->SetCreateHistogramFunction(callback);
isolate->InitializeLoggingAndCounters();
isolate->counters()->ResetHistograms();
}
+
void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetAddHistogramSampleFunction()")) return;
isolate->stats_table()->
SetAddHistogramSampleFunction(callback);
}
-void V8::EnableSlidingStateWindow() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::EnableSlidingStateWindow()")) return;
- isolate->logger()->EnableSlidingStateWindow();
-}
-
-
void V8::SetFailedAccessCheckCallbackFunction(
FailedAccessCheckCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetFailedAccessCheckCallbackFunction()")) {
- return;
- }
isolate->SetFailedAccessCheckCallback(callback);
}
-void V8::AddObjectGroup(Persistent<Value>* objects,
- size_t length,
- RetainedObjectInfo* info) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
- STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
- isolate->global_handles()->AddObjectGroup(
- reinterpret_cast<i::Object***>(objects), length, info);
-}
-
-void V8::AddImplicitReferences(Persistent<Object> parent,
- Persistent<Value>* children,
- size_t length) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddImplicitReferences()")) return;
- STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
- isolate->global_handles()->AddImplicitReferences(
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(*parent)).location(),
- reinterpret_cast<i::Object***>(children), length);
+intptr_t Isolate::AdjustAmountOfExternalAllocatedMemory(
+ intptr_t change_in_bytes) {
+ i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
+ return heap->AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
}
intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() ||
- IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
+ if (isolate == NULL || !isolate->IsInitialized()) {
return 0;
}
- return isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
- change_in_bytes);
+ Isolate* isolate_ext = reinterpret_cast<Isolate*>(isolate);
+ return isolate_ext->AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
}
-void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return;
- isolate->heap()->SetGlobalGCPrologueCallback(callback);
+HeapProfiler* Isolate::GetHeapProfiler() {
+ i::HeapProfiler* heap_profiler =
+ reinterpret_cast<i::Isolate*>(this)->heap_profiler();
+ return reinterpret_cast<HeapProfiler*>(heap_profiler);
}
-void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCEpilogueCallback()")) return;
- isolate->heap()->SetGlobalGCEpilogueCallback(callback);
+CpuProfiler* Isolate::GetCpuProfiler() {
+ i::CpuProfiler* cpu_profiler =
+ reinterpret_cast<i::Isolate*>(this)->cpu_profiler();
+ return reinterpret_cast<CpuProfiler*>(cpu_profiler);
+}
+
+
+bool Isolate::InContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->context() != NULL;
+}
+
+
+v8::Local<v8::Context> Isolate::GetCurrentContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Context* context = isolate->context();
+ if (context == NULL) return Local<Context>();
+ i::Context* native_context = context->global_object()->native_context();
+ if (native_context == NULL) return Local<Context>();
+ return Utils::ToLocal(i::Handle<i::Context>(native_context));
+}
+
+
+v8::Local<v8::Context> Isolate::GetCallingContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Handle<i::Object> calling = isolate->GetCallingNativeContext();
+ if (calling.is_null()) return Local<Context>();
+ return Utils::ToLocal(i::Handle<i::Context>::cast(calling));
+}
+
+
+v8::Local<v8::Context> Isolate::GetEnteredContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Handle<i::Object> last =
+ isolate->handle_scope_implementer()->LastEnteredContext();
+ if (last.is_null()) return Local<Context>();
+ return Utils::ToLocal(i::Handle<i::Context>::cast(last));
+}
+
+
+v8::Local<Value> Isolate::ThrowException(v8::Local<v8::Value> value) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ ENTER_V8(isolate);
+ // If we're passed an empty handle, we throw an undefined exception
+ // to deal more gracefully with out of memory situations.
+ if (value.IsEmpty()) {
+ isolate->ScheduleThrow(isolate->heap()->undefined_value());
+ } else {
+ isolate->ScheduleThrow(*Utils::OpenHandle(*value));
+ }
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+}
+
+
+void Isolate::SetObjectGroupId(internal::Object** object, UniqueId id) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
+ internal_isolate->global_handles()->SetObjectGroupId(
+ v8::internal::Handle<v8::internal::Object>(object).location(),
+ id);
+}
+
+
+void Isolate::SetReferenceFromGroup(UniqueId id, internal::Object** object) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
+ internal_isolate->global_handles()->SetReferenceFromGroup(
+ id,
+ v8::internal::Handle<v8::internal::Object>(object).location());
+}
+
+
+void Isolate::SetReference(internal::Object** parent,
+ internal::Object** child) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Object** parent_location =
+ v8::internal::Handle<v8::internal::Object>(parent).location();
+ internal_isolate->global_handles()->SetReference(
+ reinterpret_cast<i::HeapObject**>(parent_location),
+ v8::internal::Handle<v8::internal::Object>(child).location());
+}
+
+
+void Isolate::AddGCPrologueCallback(GCPrologueCallback callback,
+ GCType gc_type) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->AddGCPrologueCallback(callback, gc_type);
+}
+
+
+void Isolate::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->RemoveGCPrologueCallback(callback);
+}
+
+
+void Isolate::AddGCEpilogueCallback(GCEpilogueCallback callback,
+ GCType gc_type) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->AddGCEpilogueCallback(callback, gc_type);
+}
+
+
+void Isolate::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->RemoveGCEpilogueCallback(callback);
}
void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddGCPrologueCallback()")) return;
- isolate->heap()->AddGCPrologueCallback(callback, gc_type);
+ isolate->heap()->AddGCPrologueCallback(
+ reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback),
+ gc_type,
+ false);
}
void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveGCPrologueCallback()")) return;
- isolate->heap()->RemoveGCPrologueCallback(callback);
+ isolate->heap()->RemoveGCPrologueCallback(
+ reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback));
}
void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddGCEpilogueCallback()")) return;
- isolate->heap()->AddGCEpilogueCallback(callback, gc_type);
+ isolate->heap()->AddGCEpilogueCallback(
+ reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback),
+ gc_type,
+ false);
}
void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveGCEpilogueCallback()")) return;
- isolate->heap()->RemoveGCEpilogueCallback(callback);
+ isolate->heap()->RemoveGCEpilogueCallback(
+ reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback));
}
@@ -5433,7 +6467,6 @@ void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space,
AllocationAction action) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddMemoryAllocationCallback()")) return;
isolate->memory_allocator()->AddMemoryAllocationCallback(
callback, space, action);
}
@@ -5441,7 +6474,6 @@ void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveMemoryAllocationCallback()")) return;
isolate->memory_allocator()->RemoveMemoryAllocationCallback(
callback);
}
@@ -5449,62 +6481,15 @@ void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
if (callback == NULL) return;
- i::Isolate::EnsureDefaultIsolate();
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddLeaveScriptCallback()")) return;
i::V8::AddCallCompletedCallback(callback);
}
void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
- i::Isolate::EnsureDefaultIsolate();
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveLeaveScriptCallback()")) return;
i::V8::RemoveCallCompletedCallback(callback);
}
-void V8::PauseProfiler() {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->logger()->PauseProfiler();
-}
-
-
-void V8::ResumeProfiler() {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->logger()->ResumeProfiler();
-}
-
-
-bool V8::IsProfilerPaused() {
- i::Isolate* isolate = i::Isolate::Current();
- return isolate->logger()->IsProfilerPaused();
-}
-
-
-int V8::GetCurrentThreadId() {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "V8::GetCurrentThreadId()");
- return isolate->thread_id().ToInteger();
-}
-
-
-void V8::TerminateExecution(int thread_id) {
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return;
- API_ENTRY_CHECK(isolate, "V8::TerminateExecution()");
- // If the thread_id identifies the current thread just terminate
- // execution right away. Otherwise, ask the thread manager to
- // terminate the thread with the given id if any.
- i::ThreadId internal_tid = i::ThreadId::FromInteger(thread_id);
- if (isolate->thread_id().Equals(internal_tid)) {
- isolate->stack_guard()->TerminateExecution();
- } else {
- isolate->thread_manager()->TerminateExecution(internal_tid);
- }
-}
-
-
void V8::TerminateExecution(Isolate* isolate) {
// If no isolate is supplied, use the default isolate.
if (isolate != NULL) {
@@ -5522,6 +6507,12 @@ bool V8::IsExecutionTerminating(Isolate* isolate) {
}
+void V8::CancelTerminateExecution(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i_isolate->stack_guard()->CancelTerminateExecution();
+}
+
+
Isolate* Isolate::GetCurrent() {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
return reinterpret_cast<Isolate*>(isolate);
@@ -5557,10 +6548,29 @@ void Isolate::Exit() {
}
+void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ if (!isolate->IsInitialized()) {
+ heap_statistics->total_heap_size_ = 0;
+ heap_statistics->total_heap_size_executable_ = 0;
+ heap_statistics->total_physical_size_ = 0;
+ heap_statistics->used_heap_size_ = 0;
+ heap_statistics->heap_size_limit_ = 0;
+ return;
+ }
+ i::Heap* heap = isolate->heap();
+ heap_statistics->total_heap_size_ = heap->CommittedMemory();
+ heap_statistics->total_heap_size_executable_ =
+ heap->CommittedMemoryExecutable();
+ heap_statistics->total_physical_size_ = heap->CommittedPhysicalMemory();
+ heap_statistics->used_heap_size_ = heap->SizeOfObjects();
+ heap_statistics->heap_size_limit_ = heap->MaxReserved();
+}
+
+
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::Utf8Value::Utf8Value()")) return;
if (obj.IsEmpty()) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -5568,7 +6578,7 @@ String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
Handle<String> str = obj->ToString();
if (str.IsEmpty()) return;
i::Handle<i::String> i_str = Utils::OpenHandle(*str);
- length_ = i::Utf8Length(i_str);
+ length_ = v8::Utf8Length(*i_str, isolate);
str_ = i::NewArray<char>(length_ + 1);
str->WriteUtf8(str_);
}
@@ -5582,16 +6592,16 @@ String::Utf8Value::~Utf8Value() {
String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::AsciiValue::AsciiValue()")) return;
if (obj.IsEmpty()) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
TryCatch try_catch;
Handle<String> str = obj->ToString();
if (str.IsEmpty()) return;
- length_ = str->Length();
+ length_ = str->Utf8Length();
str_ = i::NewArray<char>(length_ + 1);
- str->WriteAscii(str_);
+ str->WriteUtf8(str_);
+ ASSERT(i::String::NonAsciiStart(str_, length_) >= length_);
}
@@ -5603,7 +6613,6 @@ String::AsciiValue::~AsciiValue() {
String::Value::Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::Value::Value()")) return;
if (obj.IsEmpty()) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -5620,6 +6629,7 @@ String::Value::~Value() {
i::DeleteArray(str_);
}
+
Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "RangeError");
@@ -5632,10 +6642,11 @@ Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
i::Handle<i::Object> result = isolate->factory()->NewRangeError(message);
error = *result;
}
- i::Handle<i::Object> result(error);
+ i::Handle<i::Object> result(error, isolate);
return Utils::ToLocal(result);
}
+
Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "ReferenceError");
@@ -5649,10 +6660,11 @@ Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
isolate->factory()->NewReferenceError(message);
error = *result;
}
- i::Handle<i::Object> result(error);
+ i::Handle<i::Object> result(error, isolate);
return Utils::ToLocal(result);
}
+
Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "SyntaxError");
@@ -5665,10 +6677,11 @@ Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
i::Handle<i::Object> result = isolate->factory()->NewSyntaxError(message);
error = *result;
}
- i::Handle<i::Object> result(error);
+ i::Handle<i::Object> result(error, isolate);
return Utils::ToLocal(result);
}
+
Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "TypeError");
@@ -5681,10 +6694,11 @@ Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
i::Handle<i::Object> result = isolate->factory()->NewTypeError(message);
error = *result;
}
- i::Handle<i::Object> result(error);
+ i::Handle<i::Object> result(error, isolate);
return Utils::ToLocal(result);
}
+
Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "Error");
@@ -5697,7 +6711,7 @@ Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
i::Handle<i::Object> result = isolate->factory()->NewError(message);
error = *result;
}
- i::Handle<i::Object> result(error);
+ i::Handle<i::Object> result(error, isolate);
return Utils::ToLocal(result);
}
@@ -5706,37 +6720,6 @@ Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
#ifdef ENABLE_DEBUGGER_SUPPORT
-static void EventCallbackWrapper(const v8::Debug::EventDetails& event_details) {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->debug_event_callback() != NULL) {
- isolate->debug_event_callback()(event_details.GetEvent(),
- event_details.GetExecutionState(),
- event_details.GetEventData(),
- event_details.GetCallbackData());
- }
-}
-
-
-bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener()");
- ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
- ENTER_V8(isolate);
-
- isolate->set_debug_event_callback(that);
-
- i::HandleScope scope(isolate);
- i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
- if (that != NULL) {
- foreign =
- isolate->factory()->NewForeign(FUNCTION_ADDR(EventCallbackWrapper));
- }
- isolate->debugger()->SetEventListener(foreign,
- Utils::OpenHandle(*data, true));
- return true;
-}
-
-
bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener2()");
@@ -5796,40 +6779,21 @@ void Debug::DebugBreakForCommand(ClientData* data, Isolate* isolate) {
}
-static void MessageHandlerWrapper(const v8::Debug::Message& message) {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->message_handler()) {
- v8::String::Value json(message.GetJSON());
- (isolate->message_handler())(*json, json.length(), message.GetClientData());
- }
-}
-
-
-void Debug::SetMessageHandler(v8::Debug::MessageHandler handler,
- bool message_handler_thread) {
+void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
ENTER_V8(isolate);
-
- // Message handler thread not supported any more. Parameter temporally left in
- // the API for client compatibility reasons.
- CHECK(!message_handler_thread);
-
- // TODO(sgjesse) support the old message handler API through a simple wrapper.
- isolate->set_message_handler(handler);
- if (handler != NULL) {
- isolate->debugger()->SetMessageHandler(MessageHandlerWrapper);
- } else {
- isolate->debugger()->SetMessageHandler(NULL);
- }
+ isolate->debugger()->SetMessageHandler(handler);
}
-void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
- ENTER_V8(isolate);
- isolate->debugger()->SetMessageHandler(handler);
+void Debug::SendCommand(Isolate* isolate,
+ const uint16_t* command,
+ int length,
+ ClientData* client_data) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->debugger()->ProcessCommand(
+ i::Vector<const uint16_t>(command, length), client_data);
}
@@ -5853,7 +6817,8 @@ void Debug::SetHostDispatchHandler(HostDispatchHandler handler,
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Debug::SetHostDispatchHandler");
ENTER_V8(isolate);
- isolate->debugger()->SetHostDispatchHandler(handler, period);
+ isolate->debugger()->SetHostDispatchHandler(
+ handler, i::TimeDelta::FromMilliseconds(period));
}
@@ -5895,13 +6860,13 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
if (!isolate->IsInitialized()) return Local<Value>();
ON_BAILOUT(isolate, "v8::Debug::GetMirror()", return Local<Value>());
ENTER_V8(isolate);
- v8::HandleScope scope;
+ v8::HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Debug* isolate_debug = isolate->debug();
isolate_debug->Load();
i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
- i::Handle<i::String> name =
- isolate->factory()->LookupAsciiSymbol("MakeMirror");
- i::Handle<i::Object> fun_obj = i::GetProperty(debug, name);
+ i::Handle<i::String> name = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("MakeMirror"));
+ i::Handle<i::Object> fun_obj = i::GetProperty(isolate, debug, name);
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj);
v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun);
const int kArgc = 1;
@@ -5927,7 +6892,7 @@ void Debug::DisableAgent() {
void Debug::ProcessDebugMessages() {
- i::Execution::ProcessDebugMessages(true);
+ i::Execution::ProcessDebugMessages(i::Isolate::Current(), true);
}
@@ -5957,81 +6922,72 @@ void Debug::SetLiveEditEnabled(bool enable, Isolate* isolate) {
Handle<String> CpuProfileNode::GetFunctionName() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetFunctionName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
const i::CodeEntry* entry = node->entry();
if (!entry->has_name_prefix()) {
- return Handle<String>(ToApi<String>(
- isolate->factory()->LookupAsciiSymbol(entry->name())));
+ return ToApiHandle<String>(
+ isolate->factory()->InternalizeUtf8String(entry->name()));
} else {
- return Handle<String>(ToApi<String>(isolate->factory()->NewConsString(
- isolate->factory()->LookupAsciiSymbol(entry->name_prefix()),
- isolate->factory()->LookupAsciiSymbol(entry->name()))));
+ return ToApiHandle<String>(isolate->factory()->NewConsString(
+ isolate->factory()->InternalizeUtf8String(entry->name_prefix()),
+ isolate->factory()->InternalizeUtf8String(entry->name())));
}
}
-Handle<String> CpuProfileNode::GetScriptResourceName() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptResourceName");
+int CpuProfileNode::GetScriptId() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
- node->entry()->resource_name())));
+ const i::CodeEntry* entry = node->entry();
+ return entry->script_id();
}
-int CpuProfileNode::GetLineNumber() const {
+Handle<String> CpuProfileNode::GetScriptResourceName() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetLineNumber");
- return reinterpret_cast<const i::ProfileNode*>(this)->entry()->line_number();
+ const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+ return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
+ node->entry()->resource_name()));
}
-double CpuProfileNode::GetTotalTime() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalTime");
- return reinterpret_cast<const i::ProfileNode*>(this)->GetTotalMillis();
+int CpuProfileNode::GetLineNumber() const {
+ return reinterpret_cast<const i::ProfileNode*>(this)->entry()->line_number();
}
-double CpuProfileNode::GetSelfTime() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfTime");
- return reinterpret_cast<const i::ProfileNode*>(this)->GetSelfMillis();
+int CpuProfileNode::GetColumnNumber() const {
+ return reinterpret_cast<const i::ProfileNode*>(this)->
+ entry()->column_number();
}
-double CpuProfileNode::GetTotalSamplesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalSamplesCount");
- return reinterpret_cast<const i::ProfileNode*>(this)->total_ticks();
+const char* CpuProfileNode::GetBailoutReason() const {
+ const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+ return node->entry()->bailout_reason();
}
-double CpuProfileNode::GetSelfSamplesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfSamplesCount");
+unsigned CpuProfileNode::GetHitCount() const {
return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks();
}
unsigned CpuProfileNode::GetCallUid() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetCallUid");
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->GetCallUid();
}
+unsigned CpuProfileNode::GetNodeId() const {
+ return reinterpret_cast<const i::ProfileNode*>(this)->id();
+}
+
+
int CpuProfileNode::GetChildrenCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetChildrenCount");
return reinterpret_cast<const i::ProfileNode*>(this)->children()->length();
}
const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetChild");
const i::ProfileNode* child =
reinterpret_cast<const i::ProfileNode*>(this)->children()->at(index);
return reinterpret_cast<const CpuProfileNode*>(child);
@@ -6040,99 +6996,104 @@ const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
void CpuProfile::Delete() {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::Delete");
- i::CpuProfiler::DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
- if (i::CpuProfiler::GetProfilesCount() == 0 &&
- !i::CpuProfiler::HasDetachedProfiles()) {
+ i::CpuProfiler* profiler = isolate->cpu_profiler();
+ ASSERT(profiler != NULL);
+ profiler->DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
+ if (profiler->GetProfilesCount() == 0) {
// If this was the last profile, clean up all accessory data as well.
- i::CpuProfiler::DeleteAllProfiles();
+ profiler->DeleteAllProfiles();
}
}
unsigned CpuProfile::GetUid() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetUid");
return reinterpret_cast<const i::CpuProfile*>(this)->uid();
}
Handle<String> CpuProfile::GetTitle() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetTitle");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
- profile->title())));
+ return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
+ profile->title()));
}
-const CpuProfileNode* CpuProfile::GetBottomUpRoot() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetBottomUpRoot");
+const CpuProfileNode* CpuProfile::GetTopDownRoot() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return reinterpret_cast<const CpuProfileNode*>(profile->bottom_up()->root());
+ return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root());
}
-const CpuProfileNode* CpuProfile::GetTopDownRoot() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetTopDownRoot");
+const CpuProfileNode* CpuProfile::GetSample(int index) const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root());
+ return reinterpret_cast<const CpuProfileNode*>(profile->sample(index));
}
-int CpuProfiler::GetProfilesCount() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::GetProfilesCount");
- return i::CpuProfiler::GetProfilesCount();
+int64_t CpuProfile::GetStartTime() const {
+ const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
+ return (profile->start_time() - i::Time::UnixEpoch()).InMicroseconds();
}
-const CpuProfile* CpuProfiler::GetProfile(int index,
- Handle<Value> security_token) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::GetProfile");
- return reinterpret_cast<const CpuProfile*>(
- i::CpuProfiler::GetProfile(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
- index));
+int64_t CpuProfile::GetEndTime() const {
+ const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
+ return (profile->end_time() - i::Time::UnixEpoch()).InMicroseconds();
}
-const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
- Handle<Value> security_token) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::FindProfile");
+int CpuProfile::GetSamplesCount() const {
+ return reinterpret_cast<const i::CpuProfile*>(this)->samples_count();
+}
+
+
+int CpuProfiler::GetProfileCount() {
+ return reinterpret_cast<i::CpuProfiler*>(this)->GetProfilesCount();
+}
+
+
+void CpuProfiler::SetSamplingInterval(int us) {
+ ASSERT(us >= 0);
+ return reinterpret_cast<i::CpuProfiler*>(this)->set_sampling_interval(
+ i::TimeDelta::FromMicroseconds(us));
+}
+
+
+const CpuProfile* CpuProfiler::GetCpuProfile(int index) {
return reinterpret_cast<const CpuProfile*>(
- i::CpuProfiler::FindProfile(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
- uid));
+ reinterpret_cast<i::CpuProfiler*>(this)->GetProfile(index));
}
-void CpuProfiler::StartProfiling(Handle<String> title) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::StartProfiling");
- i::CpuProfiler::StartProfiling(*Utils::OpenHandle(*title));
+void CpuProfiler::StartCpuProfiling(Handle<String> title, bool record_samples) {
+ reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
+ *Utils::OpenHandle(*title), record_samples);
}
-const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
- Handle<Value> security_token) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::StopProfiling");
+const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
return reinterpret_cast<const CpuProfile*>(
- i::CpuProfiler::StopProfiling(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
+ reinterpret_cast<i::CpuProfiler*>(this)->StopProfiling(
*Utils::OpenHandle(*title)));
}
-void CpuProfiler::DeleteAllProfiles() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfiler::DeleteAllProfiles");
- i::CpuProfiler::DeleteAllProfiles();
+void CpuProfiler::DeleteAllCpuProfiles() {
+ reinterpret_cast<i::CpuProfiler*>(this)->DeleteAllProfiles();
+}
+
+
+void CpuProfiler::SetIdle(bool is_idle) {
+ i::Isolate* isolate = reinterpret_cast<i::CpuProfiler*>(this)->isolate();
+ i::StateTag state = isolate->current_vm_state();
+ ASSERT(state == i::EXTERNAL || state == i::IDLE);
+ if (isolate->js_entry_sp() != NULL) return;
+ if (is_idle) {
+ isolate->set_current_vm_state(i::IDLE);
+ } else if (state == i::IDLE) {
+ isolate->set_current_vm_state(i::EXTERNAL);
+ }
}
@@ -6143,44 +7104,38 @@ static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
HeapGraphEdge::Type HeapGraphEdge::GetType() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetType");
return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type());
}
Handle<Value> HeapGraphEdge::GetName() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetName");
i::HeapGraphEdge* edge = ToInternal(this);
switch (edge->type()) {
case i::HeapGraphEdge::kContextVariable:
case i::HeapGraphEdge::kInternal:
case i::HeapGraphEdge::kProperty:
case i::HeapGraphEdge::kShortcut:
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
- edge->name())));
+ return ToApiHandle<String>(
+ isolate->factory()->InternalizeUtf8String(edge->name()));
case i::HeapGraphEdge::kElement:
case i::HeapGraphEdge::kHidden:
- return Handle<Number>(ToApi<Number>(isolate->factory()->NewNumberFromInt(
- edge->index())));
+ case i::HeapGraphEdge::kWeak:
+ return ToApiHandle<Number>(
+ isolate->factory()->NewNumberFromInt(edge->index()));
default: UNREACHABLE();
}
- return v8::Undefined();
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode");
const i::HeapEntry* from = ToInternal(this)->from();
return reinterpret_cast<const HeapGraphNode*>(from);
}
const HeapGraphNode* HeapGraphEdge::GetToNode() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetToNode");
const i::HeapEntry* to = ToInternal(this)->to();
return reinterpret_cast<const HeapGraphNode*>(to);
}
@@ -6193,44 +7148,33 @@ static i::HeapEntry* ToInternal(const HeapGraphNode* entry) {
HeapGraphNode::Type HeapGraphNode::GetType() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetType");
return static_cast<HeapGraphNode::Type>(ToInternal(this)->type());
}
Handle<String> HeapGraphNode::GetName() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetName");
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
- ToInternal(this)->name())));
+ return ToApiHandle<String>(
+ isolate->factory()->InternalizeUtf8String(ToInternal(this)->name()));
}
SnapshotObjectId HeapGraphNode::GetId() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetId");
return ToInternal(this)->id();
}
int HeapGraphNode::GetSelfSize() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetSelfSize");
return ToInternal(this)->self_size();
}
int HeapGraphNode::GetChildrenCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
return ToInternal(this)->children().length();
}
const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>(
ToInternal(this)->children()[index]);
}
@@ -6238,11 +7182,10 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue");
i::Handle<i::HeapObject> object = ToInternal(this)->GetHeapObject();
- return v8::Handle<Value>(!object.is_null() ?
- ToApi<Value>(object) : ToApi<Value>(
- isolate->factory()->undefined_value()));
+ return !object.is_null() ?
+ ToApiHandle<Value>(object) :
+ ToApiHandle<Value>(isolate->factory()->undefined_value());
}
@@ -6254,79 +7197,56 @@ static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
void HeapSnapshot::Delete() {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::Delete");
- if (i::HeapProfiler::GetSnapshotsCount() > 1) {
+ if (isolate->heap_profiler()->GetSnapshotsCount() > 1) {
ToInternal(this)->Delete();
} else {
// If this is the last snapshot, clean up all accessory data as well.
- i::HeapProfiler::DeleteAllSnapshots();
+ isolate->heap_profiler()->DeleteAllSnapshots();
}
}
-HeapSnapshot::Type HeapSnapshot::GetType() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetType");
- return static_cast<HeapSnapshot::Type>(ToInternal(this)->type());
-}
-
-
unsigned HeapSnapshot::GetUid() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetUid");
return ToInternal(this)->uid();
}
Handle<String> HeapSnapshot::GetTitle() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetTitle");
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
- ToInternal(this)->title())));
+ return ToApiHandle<String>(
+ isolate->factory()->InternalizeUtf8String(ToInternal(this)->title()));
}
const HeapGraphNode* HeapSnapshot::GetRoot() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetHead");
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root());
}
const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
return reinterpret_cast<const HeapGraphNode*>(
ToInternal(this)->GetEntryById(id));
}
int HeapSnapshot::GetNodesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
return ToInternal(this)->entries().length();
}
const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
return reinterpret_cast<const HeapGraphNode*>(
&ToInternal(this)->entries().at(index));
}
SnapshotObjectId HeapSnapshot::GetMaxSnapshotJSObjectId() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetMaxSnapshotJSObjectId");
return ToInternal(this)->max_snapshot_js_object_id();
}
void HeapSnapshot::Serialize(OutputStream* stream,
HeapSnapshot::SerializationFormat format) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::Serialize");
ApiCheck(format == kJSON,
"v8::HeapSnapshot::Serialize",
"Unknown serialization format");
@@ -6341,99 +7261,79 @@ void HeapSnapshot::Serialize(OutputStream* stream,
}
-int HeapProfiler::GetSnapshotsCount() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotsCount");
- return i::HeapProfiler::GetSnapshotsCount();
+int HeapProfiler::GetSnapshotCount() {
+ return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotsCount();
}
-const HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshot");
+const HeapSnapshot* HeapProfiler::GetHeapSnapshot(int index) {
return reinterpret_cast<const HeapSnapshot*>(
- i::HeapProfiler::GetSnapshot(index));
+ reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshot(index));
}
-const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::FindSnapshot");
+SnapshotObjectId HeapProfiler::GetObjectId(Handle<Value> value) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(*value);
+ return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotObjectId(obj);
+}
+
+
+const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(
+ Handle<String> title,
+ ActivityControl* control,
+ ObjectNameResolver* resolver) {
return reinterpret_cast<const HeapSnapshot*>(
- i::HeapProfiler::FindSnapshot(uid));
+ reinterpret_cast<i::HeapProfiler*>(this)->TakeSnapshot(
+ *Utils::OpenHandle(*title), control, resolver));
}
-SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Value> value) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotObjectId");
- i::Handle<i::Object> obj = Utils::OpenHandle(*value);
- return i::HeapProfiler::GetSnapshotObjectId(obj);
+void HeapProfiler::StartTrackingHeapObjects() {
+ reinterpret_cast<i::HeapProfiler*>(this)->StartHeapObjectsTracking();
}
-const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
- HeapSnapshot::Type type,
- ActivityControl* control) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot");
- i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
- switch (type) {
- case HeapSnapshot::kFull:
- internal_type = i::HeapSnapshot::kFull;
- break;
- default:
- UNREACHABLE();
- }
- return reinterpret_cast<const HeapSnapshot*>(
- i::HeapProfiler::TakeSnapshot(
- *Utils::OpenHandle(*title), internal_type, control));
+void HeapProfiler::StopTrackingHeapObjects() {
+ reinterpret_cast<i::HeapProfiler*>(this)->StopHeapObjectsTracking();
}
-void HeapProfiler::StartHeapObjectsTracking() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::StartHeapObjectsTracking");
- i::HeapProfiler::StartHeapObjectsTracking();
+SnapshotObjectId HeapProfiler::GetHeapStats(OutputStream* stream) {
+ return reinterpret_cast<i::HeapProfiler*>(this)->PushHeapObjectsStats(stream);
}
-void HeapProfiler::StopHeapObjectsTracking() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::StopHeapObjectsTracking");
- i::HeapProfiler::StopHeapObjectsTracking();
+void HeapProfiler::DeleteAllHeapSnapshots() {
+ reinterpret_cast<i::HeapProfiler*>(this)->DeleteAllSnapshots();
}
-SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::PushHeapObjectsStats");
- return i::HeapProfiler::PushHeapObjectsStats(stream);
+void HeapProfiler::SetWrapperClassInfoProvider(uint16_t class_id,
+ WrapperInfoCallback callback) {
+ reinterpret_cast<i::HeapProfiler*>(this)->DefineWrapperClass(class_id,
+ callback);
}
-void HeapProfiler::DeleteAllSnapshots() {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots");
- i::HeapProfiler::DeleteAllSnapshots();
+size_t HeapProfiler::GetProfilerMemorySize() {
+ return reinterpret_cast<i::HeapProfiler*>(this)->
+ GetMemorySizeUsedByProfiler();
}
-void HeapProfiler::DefineWrapperClass(uint16_t class_id,
- WrapperInfoCallback callback) {
- i::Isolate::Current()->heap_profiler()->DefineWrapperClass(class_id,
- callback);
+void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
+ RetainedObjectInfo* info) {
+ reinterpret_cast<i::HeapProfiler*>(this)->SetRetainedObjectInfo(id, info);
}
-int HeapProfiler::GetPersistentHandleCount() {
- i::Isolate* isolate = i::Isolate::Current();
- return isolate->global_handles()->NumberOfGlobalHandles();
+void HeapProfiler::StartRecordingHeapAllocations() {
+ reinterpret_cast<i::HeapProfiler*>(this)->StartHeapAllocationsRecording();
}
-size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
- return i::HeapProfiler::GetMemorySizeUsedByProfiler();
+void HeapProfiler::StopRecordingHeapAllocations() {
+ reinterpret_cast<i::HeapProfiler*>(this)->StopHeapAllocationsRecording();
}
@@ -6445,6 +7345,7 @@ void Testing::SetStressRunType(Testing::StressType type) {
internal::Testing::set_stress_type(type);
}
+
int Testing::GetStressRuns() {
if (internal::FLAG_stress_runs != 0) return internal::FLAG_stress_runs;
#ifdef DEBUG
@@ -6497,8 +7398,11 @@ void Testing::PrepareStressRun(int run) {
}
+// TODO(svenpanne) Deprecate this.
void Testing::DeoptimizeAll() {
- internal::Deoptimizer::DeoptimizeAll();
+ i::Isolate* isolate = i::Isolate::Current();
+ i::HandleScope scope(isolate);
+ internal::Deoptimizer::DeoptimizeAll(isolate);
}
@@ -6514,7 +7418,7 @@ char* HandleScopeImplementer::ArchiveThread(char* storage) {
v8::ImplementationUtilities::HandleScopeData* current =
isolate_->handle_scope_data();
handle_scope_data_ = *current;
- memcpy(storage, this, sizeof(*this));
+ OS::MemCopy(storage, this, sizeof(*this));
ResetAfterArchive();
current->Initialize();
@@ -6529,7 +7433,7 @@ int HandleScopeImplementer::ArchiveSpacePerThread() {
char* HandleScopeImplementer::RestoreThread(char* storage) {
- memcpy(this, storage, sizeof(*this));
+ OS::MemCopy(this, storage, sizeof(*this));
*isolate_->handle_scope_data() = handle_scope_data_;
return storage + ArchiveSpacePerThread();
}
@@ -6543,7 +7447,7 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
for (int i = blocks()->length() - 2; i >= 0; --i) {
Object** block = blocks()->at(i);
if (last_handle_before_deferred_block_ != NULL &&
- (last_handle_before_deferred_block_ < &block[kHandleBlockSize]) &&
+ (last_handle_before_deferred_block_ <= &block[kHandleBlockSize]) &&
(last_handle_before_deferred_block_ >= block)) {
v->VisitPointers(block, last_handle_before_deferred_block_);
ASSERT(!found_block_before_deferred);
@@ -6563,9 +7467,11 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
v->VisitPointers(blocks()->last(), handle_scope_data_.next);
}
- if (!saved_contexts_.is_empty()) {
- Object** start = reinterpret_cast<Object**>(&saved_contexts_.first());
- v->VisitPointers(start, start + saved_contexts_.length());
+ List<Context*>* context_lists[2] = { &saved_contexts_, &entered_contexts_};
+ for (unsigned i = 0; i < ARRAY_SIZE(context_lists); i++) {
+ if (context_lists[i]->is_empty()) continue;
+ Object** start = reinterpret_cast<Object**>(&context_lists[i]->first());
+ v->VisitPointers(start, start + context_lists[i]->length());
}
}
@@ -6593,8 +7499,7 @@ DeferredHandles* HandleScopeImplementer::Detach(Object** prev_limit) {
while (!blocks_.is_empty()) {
Object** block_start = blocks_.last();
Object** block_limit = &block_start[kHandleBlockSize];
- // We should not need to check for NoHandleAllocation here. Assert
- // this.
+ // We should not need to check for SealHandleScope here. Assert this.
ASSERT(prev_limit == block_limit ||
!(block_start <= prev_limit && prev_limit <= block_limit));
if (prev_limit == block_limit) break;
@@ -6625,7 +7530,7 @@ DeferredHandles::~DeferredHandles() {
isolate_->UnlinkDeferredHandles(this);
for (int i = 0; i < blocks_.length(); i++) {
-#ifdef DEBUG
+#ifdef ENABLE_HANDLE_ZAPPING
HandleScope::ZapRange(blocks_[i], &blocks_[i][kHandleBlockSize]);
#endif
isolate_->handle_scope_implementer()->ReturnBlock(blocks_[i]);
@@ -6647,4 +7552,29 @@ void DeferredHandles::Iterate(ObjectVisitor* v) {
}
+void InvokeAccessorGetterCallback(
+ v8::Local<v8::String> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info,
+ v8::AccessorGetterCallback getter) {
+ // Leaving JavaScript.
+ Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
+ Address getter_address = reinterpret_cast<Address>(reinterpret_cast<intptr_t>(
+ getter));
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, getter_address);
+ getter(property, info);
+}
+
+
+void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
+ v8::FunctionCallback callback) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
+ Address callback_address =
+ reinterpret_cast<Address>(reinterpret_cast<intptr_t>(callback));
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, callback_address);
+ callback(info);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 7197b6cb5..9197bafbc 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -125,9 +125,10 @@ template <typename T> inline T ToCData(v8::internal::Object* obj) {
template <typename T>
-inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
+inline v8::internal::Handle<v8::internal::Object> FromCData(
+ v8::internal::Isolate* isolate, T obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
- return FACTORY->NewForeign(
+ return isolate->factory()->NewForeign(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
}
@@ -149,12 +150,10 @@ class RegisteredExtension {
static void UnregisterAll();
Extension* extension() { return extension_; }
RegisteredExtension* next() { return next_; }
- RegisteredExtension* next_auto() { return next_auto_; }
static RegisteredExtension* first_extension() { return first_extension_; }
private:
Extension* extension_;
RegisteredExtension* next_;
- RegisteredExtension* next_auto_;
static RegisteredExtension* first_extension_;
};
@@ -170,14 +169,29 @@ class RegisteredExtension {
V(RegExp, JSRegExp) \
V(Object, JSObject) \
V(Array, JSArray) \
+ V(ArrayBuffer, JSArrayBuffer) \
+ V(ArrayBufferView, JSArrayBufferView) \
+ V(TypedArray, JSTypedArray) \
+ V(Uint8Array, JSTypedArray) \
+ V(Uint8ClampedArray, JSTypedArray) \
+ V(Int8Array, JSTypedArray) \
+ V(Uint16Array, JSTypedArray) \
+ V(Int16Array, JSTypedArray) \
+ V(Uint32Array, JSTypedArray) \
+ V(Int32Array, JSTypedArray) \
+ V(Float32Array, JSTypedArray) \
+ V(Float64Array, JSTypedArray) \
+ V(DataView, JSDataView) \
V(String, String) \
+ V(Symbol, Symbol) \
V(Script, Object) \
V(Function, JSFunction) \
V(Message, JSObject) \
V(Context, Context) \
V(External, Foreign) \
V(StackTrace, JSArray) \
- V(StackFrame, JSObject)
+ V(StackFrame, JSObject) \
+ V(DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
class Utils {
@@ -195,14 +209,42 @@ class Utils {
v8::internal::Handle<v8::internal::JSFunction> obj);
static inline Local<String> ToLocal(
v8::internal::Handle<v8::internal::String> obj);
+ static inline Local<Symbol> ToLocal(
+ v8::internal::Handle<v8::internal::Symbol> obj);
static inline Local<RegExp> ToLocal(
v8::internal::Handle<v8::internal::JSRegExp> obj);
static inline Local<Object> ToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
- static inline Local<External> ToLocal(
- v8::internal::Handle<v8::internal::Foreign> obj);
+ static inline Local<ArrayBuffer> ToLocal(
+ v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
+ static inline Local<ArrayBufferView> ToLocal(
+ v8::internal::Handle<v8::internal::JSArrayBufferView> obj);
+ static inline Local<DataView> ToLocal(
+ v8::internal::Handle<v8::internal::JSDataView> obj);
+
+ static inline Local<TypedArray> ToLocal(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Uint8Array> ToLocalUint8Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Uint8ClampedArray> ToLocalUint8ClampedArray(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Int8Array> ToLocalInt8Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Uint16Array> ToLocalUint16Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Int16Array> ToLocalInt16Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Uint32Array> ToLocalUint32Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Int32Array> ToLocalInt32Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Float32Array> ToLocalFloat32Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Float64Array> ToLocalFloat64Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<StackTrace> StackTraceToLocal(
@@ -225,6 +267,10 @@ class Utils {
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<TypeSwitch> ToLocal(
v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
+ static inline Local<External> ExternalToLocal(
+ v8::internal::Handle<v8::internal::JSObject> obj);
+ static inline Local<DeclaredAccessorDescriptor> ToLocal(
+ v8::internal::Handle<v8::internal::DeclaredAccessorDescriptor> obj);
#define DECLARE_OPEN_HANDLE(From, To) \
static inline v8::internal::Handle<v8::internal::To> \
@@ -233,13 +279,31 @@ class Utils {
OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
#undef DECLARE_OPEN_HANDLE
-};
+ template<class From, class To>
+ static inline Local<To> Convert(v8::internal::Handle<From> obj) {
+ ASSERT(obj.is_null() || !obj->IsTheHole());
+ return Local<To>(reinterpret_cast<To*>(obj.location()));
+ }
-template <class T>
-inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
- return reinterpret_cast<T*>(obj.location());
-}
+ template <class T>
+ static inline v8::internal::Handle<v8::internal::Object> OpenPersistent(
+ const v8::Persistent<T>& persistent) {
+ return v8::internal::Handle<v8::internal::Object>(
+ reinterpret_cast<v8::internal::Object**>(persistent.val_));
+ }
+
+ template <class T>
+ static inline v8::internal::Handle<v8::internal::Object> OpenPersistent(
+ v8::Persistent<T>* persistent) {
+ return OpenPersistent(*persistent);
+ }
+
+ template <class From, class To>
+ static inline v8::internal::Handle<To> OpenHandle(v8::Local<From> handle) {
+ return OpenHandle(*handle);
+ }
+};
template <class T>
@@ -253,22 +317,57 @@ v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
}
+template <class T>
+inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
+ return reinterpret_cast<T*>(obj.location());
+}
+
+template <class T>
+inline v8::Local<T> ToApiHandle(
+ v8::internal::Handle<v8::internal::Object> obj) {
+ return Utils::Convert<v8::internal::Object, T>(obj);
+}
+
+
// Implementations of ToLocal
#define MAKE_TO_LOCAL(Name, From, To) \
Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
- ASSERT(obj.is_null() || !obj->IsTheHole()); \
- return Local<To>(reinterpret_cast<To*>(obj.location())); \
+ return Convert<v8::internal::From, v8::To>(obj); \
+ }
+
+
+#define MAKE_TO_LOCAL_TYPED_ARRAY(TypedArray, typeConst) \
+ Local<v8::TypedArray> Utils::ToLocal##TypedArray( \
+ v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
+ ASSERT(obj->type() == typeConst); \
+ return Convert<v8::internal::JSTypedArray, v8::TypedArray>(obj); \
}
+
MAKE_TO_LOCAL(ToLocal, Context, Context)
MAKE_TO_LOCAL(ToLocal, Object, Value)
MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
MAKE_TO_LOCAL(ToLocal, String, String)
+MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
-MAKE_TO_LOCAL(ToLocal, Foreign, External)
+MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
+MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
+MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
+MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
+
+MAKE_TO_LOCAL_TYPED_ARRAY(Uint8Array, kExternalUnsignedByteArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Uint8ClampedArray, kExternalPixelArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Int8Array, kExternalByteArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Uint16Array, kExternalUnsignedShortArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Int16Array, kExternalShortArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Uint32Array, kExternalUnsignedIntArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Int32Array, kExternalIntArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Float32Array, kExternalFloatArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Float64Array, kExternalDoubleArray)
+
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
@@ -280,7 +379,10 @@ MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
+MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
+MAKE_TO_LOCAL(ToLocal, DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
+#undef MAKE_TO_LOCAL_TYPED_ARRAY
#undef MAKE_TO_LOCAL
@@ -290,6 +392,9 @@ MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
const v8::From* that, bool allow_empty_handle) { \
EXTRA_CHECK(allow_empty_handle || that != NULL); \
+ EXTRA_CHECK(that == NULL || \
+ !(*reinterpret_cast<v8::internal::To**>( \
+ const_cast<v8::From*>(that)))->IsFailure()); \
return v8::internal::Handle<v8::internal::To>( \
reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
}
@@ -437,12 +542,12 @@ class HandleScopeImplementer {
inline void DecrementCallDepth() {call_depth_--;}
inline bool CallDepthIsZero() { return call_depth_ == 0; }
- inline void EnterContext(Handle<Object> context);
- inline bool LeaveLastContext();
+ inline void EnterContext(Handle<Context> context);
+ inline bool LeaveContext(Handle<Context> context);
// Returns the last entered context or an empty handle if no
// contexts have been entered.
- inline Handle<Object> LastEnteredContext();
+ inline Handle<Context> LastEnteredContext();
inline void SaveContext(Context* context);
inline Context* RestoreContext();
@@ -487,7 +592,7 @@ class HandleScopeImplementer {
Isolate* isolate_;
List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts.
- List<Handle<Object> > entered_contexts_;
+ List<Context*> entered_contexts_;
// Used as a stack to keep track of saved contexts.
List<Context*> saved_contexts_;
Object** spare_;
@@ -525,21 +630,23 @@ bool HandleScopeImplementer::HasSavedContexts() {
}
-void HandleScopeImplementer::EnterContext(Handle<Object> context) {
- entered_contexts_.Add(context);
+void HandleScopeImplementer::EnterContext(Handle<Context> context) {
+ entered_contexts_.Add(*context);
}
-bool HandleScopeImplementer::LeaveLastContext() {
+bool HandleScopeImplementer::LeaveContext(Handle<Context> context) {
if (entered_contexts_.is_empty()) return false;
+ // TODO(dcarney): figure out what's wrong here
+ // if (entered_contexts_.last() != *context) return false;
entered_contexts_.RemoveLast();
return true;
}
-Handle<Object> HandleScopeImplementer::LastEnteredContext() {
- if (entered_contexts_.is_empty()) return Handle<Object>::null();
- return entered_contexts_.last();
+Handle<Context> HandleScopeImplementer::LastEnteredContext() {
+ if (entered_contexts_.is_empty()) return Handle<Context>::null();
+ return Handle<Context>(entered_contexts_.last());
}
@@ -558,15 +665,20 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
internal::Object** block_start = blocks_.last();
internal::Object** block_limit = block_start + kHandleBlockSize;
#ifdef DEBUG
- // NoHandleAllocation may make the prev_limit to point inside the block.
- if (block_start <= prev_limit && prev_limit <= block_limit) break;
+ // SealHandleScope may make the prev_limit to point inside the block.
+ if (block_start <= prev_limit && prev_limit <= block_limit) {
+#ifdef ENABLE_HANDLE_ZAPPING
+ internal::HandleScope::ZapRange(prev_limit, block_limit);
+#endif
+ break;
+ }
#else
if (prev_limit == block_limit) break;
#endif
blocks_.RemoveLast();
-#ifdef DEBUG
- v8::ImplementationUtilities::ZapHandleRange(block_start, block_limit);
+#ifdef ENABLE_HANDLE_ZAPPING
+ internal::HandleScope::ZapRange(block_start, block_limit);
#endif
if (spare_ != NULL) {
DeleteArray(spare_);
@@ -578,6 +690,16 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
}
+// Interceptor functions called from generated inline caches to notify
+// CPU profiler that external callbacks are invoked.
+void InvokeAccessorGetterCallback(
+ v8::Local<v8::String> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info,
+ v8::AccessorGetterCallback getter);
+
+void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
+ v8::FunctionCallback callback);
+
class Testing {
public:
static v8::Testing::StressType stress_type() { return stress_type_; }
diff --git a/deps/v8/src/apinatives.js b/deps/v8/src/apinatives.js
index adefab6fa..6431901bf 100644
--- a/deps/v8/src/apinatives.js
+++ b/deps/v8/src/apinatives.js
@@ -71,28 +71,33 @@ function InstantiateFunction(data, name) {
(serialNumber in cache) && (cache[serialNumber] != kUninitialized);
if (!isFunctionCached) {
try {
- cache[serialNumber] = null;
var fun = %CreateApiFunction(data);
if (name) %FunctionSetName(fun, name);
- cache[serialNumber] = fun;
- var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
var flags = %GetTemplateField(data, kApiFlagOffset);
- // Note: Do not directly use an object template as a condition, our
- // internal ToBoolean doesn't handle that!
- fun.prototype = typeof prototype === 'undefined' ?
- {} : Instantiate(prototype);
- if (flags & (1 << kReadOnlyPrototypeBit)) {
- %FunctionSetReadOnlyPrototype(fun);
- }
- %SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
- var parent = %GetTemplateField(data, kApiParentTemplateOffset);
- // Note: Do not directly use a function template as a condition, our
- // internal ToBoolean doesn't handle that!
- if (!(typeof parent === 'undefined')) {
- var parent_fun = Instantiate(parent);
- fun.prototype.__proto__ = parent_fun.prototype;
+ var doNotCache = flags & (1 << kDoNotCacheBit);
+ if (!doNotCache) cache[serialNumber] = fun;
+ if (flags & (1 << kRemovePrototypeBit)) {
+ %FunctionRemovePrototype(fun);
+ } else {
+ var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
+ // Note: Do not directly use an object template as a condition, our
+ // internal ToBoolean doesn't handle that!
+ fun.prototype = typeof prototype === 'undefined' ?
+ {} : Instantiate(prototype);
+ if (flags & (1 << kReadOnlyPrototypeBit)) {
+ %FunctionSetReadOnlyPrototype(fun);
+ }
+ %SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
+ var parent = %GetTemplateField(data, kApiParentTemplateOffset);
+ // Note: Do not directly use a function template as a condition, our
+ // internal ToBoolean doesn't handle that!
+ if (!(typeof parent === 'undefined')) {
+ var parent_fun = Instantiate(parent);
+ %SetPrototype(fun.prototype, parent_fun.prototype);
+ }
}
ConfigureTemplateInstance(fun, data);
+ if (doNotCache) return fun;
} catch (e) {
cache[serialNumber] = kUninitialized;
throw e;
@@ -104,19 +109,32 @@ function InstantiateFunction(data, name) {
function ConfigureTemplateInstance(obj, data) {
var properties = %GetTemplateField(data, kApiPropertyListOffset);
- if (properties) {
- // Disable access checks while instantiating the object.
- var requires_access_checks = %DisableAccessChecks(obj);
- try {
- for (var i = 0; i < properties[0]; i += 3) {
+ if (!properties) return;
+ // Disable access checks while instantiating the object.
+ var requires_access_checks = %DisableAccessChecks(obj);
+ try {
+ for (var i = 1; i < properties[0];) {
+ var length = properties[i];
+ if (length == 3) {
var name = properties[i + 1];
var prop_data = properties[i + 2];
var attributes = properties[i + 3];
var value = Instantiate(prop_data, name);
%SetProperty(obj, name, value, attributes);
+ } else if (length == 5) {
+ var name = properties[i + 1];
+ var getter = properties[i + 2];
+ var setter = properties[i + 3];
+ var attribute = properties[i + 4];
+ var access_control = properties[i + 5];
+ %SetAccessorProperty(
+ obj, name, getter, setter, attribute, access_control);
+ } else {
+ throw "Bad properties array";
}
- } finally {
- if (requires_access_checks) %EnableAccessChecks(obj);
+ i += length + 1;
}
+ } finally {
+ if (requires_access_checks) %EnableAccessChecks(obj);
}
}
diff --git a/deps/v8/src/apiutils.h b/deps/v8/src/apiutils.h
index 71c0e1c2c..076558564 100644
--- a/deps/v8/src/apiutils.h
+++ b/deps/v8/src/apiutils.h
@@ -39,38 +39,9 @@ class ImplementationUtilities {
return that->names_;
}
- // Packs additional parameters for the NewArguments function. |implicit_args|
- // is a pointer to the last element of 4-elements array controlled by GC.
- static void PrepareArgumentsData(internal::Object** implicit_args,
- internal::Isolate* isolate,
- internal::Object* data,
- internal::JSFunction* callee,
- internal::Object* holder) {
- implicit_args[v8::Arguments::kDataIndex] = data;
- implicit_args[v8::Arguments::kCalleeIndex] = callee;
- implicit_args[v8::Arguments::kHolderIndex] = holder;
- implicit_args[v8::Arguments::kIsolateIndex] =
- reinterpret_cast<internal::Object*>(isolate);
- }
-
- static v8::Arguments NewArguments(internal::Object** implicit_args,
- internal::Object** argv, int argc,
- bool is_construct_call) {
- ASSERT(implicit_args[v8::Arguments::kCalleeIndex]->IsJSFunction());
- ASSERT(implicit_args[v8::Arguments::kHolderIndex]->IsHeapObject());
- // The implicit isolate argument is not tagged and looks like a SMI.
- ASSERT(implicit_args[v8::Arguments::kIsolateIndex]->IsSmi());
-
- return v8::Arguments(implicit_args, argv, argc, is_construct_call);
- }
-
// Introduce an alias for the handle scope data to allow non-friends
// to access the HandleScope data.
typedef v8::HandleScope::Data HandleScopeData;
-
-#ifdef DEBUG
- static void ZapHandleRange(internal::Object** begin, internal::Object** end);
-#endif
};
} // namespace v8
diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/arguments.cc
new file mode 100644
index 000000000..3a4d73315
--- /dev/null
+++ b/deps/v8/src/arguments.cc
@@ -0,0 +1,120 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "arguments.h"
+
+#include "vm-state-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+template<typename T>
+template<typename V>
+v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
+ // Check the ReturnValue.
+ Object** handle = &this->begin()[kReturnValueOffset];
+ // Nothing was set, return empty handle as per previous behaviour.
+ if ((*handle)->IsTheHole()) return v8::Handle<V>();
+ return Utils::Convert<Object, V>(Handle<Object>(handle));
+}
+
+
+v8::Handle<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
+ Isolate* isolate = this->isolate();
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+ FunctionCallbackInfo<v8::Value> info(begin(),
+ argv_,
+ argc_,
+ is_construct_call_);
+ f(info);
+ return GetReturnValue<v8::Value>(isolate);
+}
+
+
+#define WRITE_CALL_0(Function, ReturnValue) \
+v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f) { \
+ Isolate* isolate = this->isolate(); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
+ f(info); \
+ return GetReturnValue<ReturnValue>(isolate); \
+}
+
+
+#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
+v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
+ Arg1 arg1) { \
+ Isolate* isolate = this->isolate(); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
+ f(arg1, info); \
+ return GetReturnValue<ReturnValue>(isolate); \
+}
+
+
+#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
+v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
+ Arg1 arg1, \
+ Arg2 arg2) { \
+ Isolate* isolate = this->isolate(); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
+ f(arg1, arg2, info); \
+ return GetReturnValue<ReturnValue>(isolate); \
+}
+
+
+#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
+void PropertyCallbackArguments::Call(Function f, \
+ Arg1 arg1, \
+ Arg2 arg2) { \
+ Isolate* isolate = this->isolate(); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
+ f(arg1, arg2, info); \
+}
+
+
+FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
+FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
+FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2)
+FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
+
+#undef WRITE_CALL_0
+#undef WRITE_CALL_1
+#undef WRITE_CALL_2
+#undef WRITE_CALL_2_VOID
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index f8fb00c57..92e57401f 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -52,7 +52,8 @@ class Arguments BASE_EMBEDDED {
Object*& operator[] (int index) {
ASSERT(0 <= index && index < length_);
- return arguments_[-index];
+ return *(reinterpret_cast<Object**>(reinterpret_cast<intptr_t>(arguments_) -
+ index * kPointerSize));
}
template <class S> Handle<S> at(int index) {
@@ -82,48 +83,225 @@ class Arguments BASE_EMBEDDED {
};
+// For each type of callback, we have a list of arguments
+// They are used to generate the Call() functions below
+// These aren't included in the list as they have duplicate signatures
+// F(NamedPropertyEnumeratorCallback, ...)
+// F(NamedPropertyGetterCallback, ...)
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \
+ F(IndexedPropertyEnumeratorCallback, v8::Array) \
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \
+ F(AccessorGetterCallback, v8::Value, v8::Local<v8::String>) \
+ F(NamedPropertyQueryCallback, \
+ v8::Integer, \
+ v8::Local<v8::String>) \
+ F(NamedPropertyDeleterCallback, \
+ v8::Boolean, \
+ v8::Local<v8::String>) \
+ F(IndexedPropertyGetterCallback, \
+ v8::Value, \
+ uint32_t) \
+ F(IndexedPropertyQueryCallback, \
+ v8::Integer, \
+ uint32_t) \
+ F(IndexedPropertyDeleterCallback, \
+ v8::Boolean, \
+ uint32_t) \
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_2(F) \
+ F(NamedPropertySetterCallback, \
+ v8::Value, \
+ v8::Local<v8::String>, \
+ v8::Local<v8::Value>) \
+ F(IndexedPropertySetterCallback, \
+ v8::Value, \
+ uint32_t, \
+ v8::Local<v8::Value>) \
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \
+ F(AccessorSetterCallback, \
+ void, \
+ v8::Local<v8::String>, \
+ v8::Local<v8::Value>) \
+
+
// Custom arguments replicate a small segment of stack that can be
// accessed through an Arguments object the same way the actual stack
// can.
-class CustomArguments : public Relocatable {
+template<int kArrayLength>
+class CustomArgumentsBase : public Relocatable {
public:
- inline CustomArguments(Isolate* isolate,
- Object* data,
- Object* self,
- JSObject* holder) : Relocatable(isolate) {
- ASSERT(reinterpret_cast<Object*>(isolate)->IsSmi());
- values_[3] = self;
- values_[2] = holder;
- values_[1] = data;
- values_[0] = reinterpret_cast<Object*>(isolate);
+ virtual inline void IterateInstance(ObjectVisitor* v) {
+ v->VisitPointers(values_, values_ + kArrayLength);
}
+ protected:
+ inline Object** begin() { return values_; }
+ explicit inline CustomArgumentsBase(Isolate* isolate)
+ : Relocatable(isolate) {}
+ Object* values_[kArrayLength];
+};
- inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) {
-#ifdef DEBUG
- for (size_t i = 0; i < ARRAY_SIZE(values_); i++) {
- values_[i] = reinterpret_cast<Object*>(kZapValue);
- }
-#endif
+
+template<typename T>
+class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
+ public:
+ static const int kReturnValueOffset = T::kReturnValueIndex;
+
+ typedef CustomArgumentsBase<T::kArgsLength> Super;
+ ~CustomArguments() {
+ this->begin()[kReturnValueOffset] =
+ reinterpret_cast<Object*>(kHandleZapValue);
}
- void IterateInstance(ObjectVisitor* v);
- Object** end() { return values_ + ARRAY_SIZE(values_) - 1; }
+ protected:
+ explicit inline CustomArguments(Isolate* isolate) : Super(isolate) {}
- private:
- Object* values_[4];
+ template<typename V>
+ v8::Handle<V> GetReturnValue(Isolate* isolate);
+
+ inline Isolate* isolate() {
+ return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]);
+ }
};
-#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
-Type Name(Arguments args, Isolate* isolate)
+class PropertyCallbackArguments
+ : public CustomArguments<PropertyCallbackInfo<Value> > {
+ public:
+ typedef PropertyCallbackInfo<Value> T;
+ typedef CustomArguments<T> Super;
+ static const int kArgsLength = T::kArgsLength;
+ static const int kThisIndex = T::kThisIndex;
+ static const int kHolderIndex = T::kHolderIndex;
+ static const int kDataIndex = T::kDataIndex;
+ static const int kReturnValueDefaultValueIndex =
+ T::kReturnValueDefaultValueIndex;
+ static const int kIsolateIndex = T::kIsolateIndex;
+
+ PropertyCallbackArguments(Isolate* isolate,
+ Object* data,
+ Object* self,
+ JSObject* holder)
+ : Super(isolate) {
+ Object** values = this->begin();
+ values[T::kThisIndex] = self;
+ values[T::kHolderIndex] = holder;
+ values[T::kDataIndex] = data;
+ values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
+ // Here the hole is set as default value.
+ // It cannot escape into js as it's remove in Call below.
+ values[T::kReturnValueDefaultValueIndex] =
+ isolate->heap()->the_hole_value();
+ values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
+ ASSERT(values[T::kHolderIndex]->IsHeapObject());
+ ASSERT(values[T::kIsolateIndex]->IsSmi());
+ }
+ /*
+ * The following Call functions wrap the calling of all callbacks to handle
+ * calling either the old or the new style callbacks depending on which one
+ * has been registered.
+ * For old callbacks which return an empty handle, the ReturnValue is checked
+ * and used if it's been set to anything inside the callback.
+ * New style callbacks always use the return value.
+ */
+#define WRITE_CALL_0(Function, ReturnValue) \
+ v8::Handle<ReturnValue> Call(Function f); \
-#define RUNTIME_FUNCTION(Type, Name) \
-Type Name(Arguments args, Isolate* isolate)
+#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
+ v8::Handle<ReturnValue> Call(Function f, Arg1 arg1); \
+#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
+ v8::Handle<ReturnValue> Call(Function f, Arg1 arg1, Arg2 arg2); \
+
+#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
+ void Call(Function f, Arg1 arg1, Arg2 arg2); \
+
+FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
+FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
+FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2)
+FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
+
+#undef WRITE_CALL_0
+#undef WRITE_CALL_1
+#undef WRITE_CALL_2
+#undef WRITE_CALL_2_VOID
+};
+
+
+class FunctionCallbackArguments
+ : public CustomArguments<FunctionCallbackInfo<Value> > {
+ public:
+ typedef FunctionCallbackInfo<Value> T;
+ typedef CustomArguments<T> Super;
+ static const int kArgsLength = T::kArgsLength;
+ static const int kHolderIndex = T::kHolderIndex;
+ static const int kDataIndex = T::kDataIndex;
+ static const int kReturnValueDefaultValueIndex =
+ T::kReturnValueDefaultValueIndex;
+ static const int kIsolateIndex = T::kIsolateIndex;
+ static const int kCalleeIndex = T::kCalleeIndex;
+ static const int kContextSaveIndex = T::kContextSaveIndex;
+
+ FunctionCallbackArguments(internal::Isolate* isolate,
+ internal::Object* data,
+ internal::JSFunction* callee,
+ internal::Object* holder,
+ internal::Object** argv,
+ int argc,
+ bool is_construct_call)
+ : Super(isolate),
+ argv_(argv),
+ argc_(argc),
+ is_construct_call_(is_construct_call) {
+ Object** values = begin();
+ values[T::kDataIndex] = data;
+ values[T::kCalleeIndex] = callee;
+ values[T::kHolderIndex] = holder;
+ values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
+ values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
+ // Here the hole is set as default value.
+ // It cannot escape into js as it's remove in Call below.
+ values[T::kReturnValueDefaultValueIndex] =
+ isolate->heap()->the_hole_value();
+ values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
+ ASSERT(values[T::kCalleeIndex]->IsJSFunction());
+ ASSERT(values[T::kHolderIndex]->IsHeapObject());
+ ASSERT(values[T::kIsolateIndex]->IsSmi());
+ }
+
+ /*
+ * The following Call function wraps the calling of all callbacks to handle
+ * calling either the old or the new style callbacks depending on which one
+ * has been registered.
+ * For old callbacks which return an empty handle, the ReturnValue is checked
+ * and used if it's been set to anything inside the callback.
+ * New style callbacks always use the return value.
+ */
+ v8::Handle<v8::Value> Call(FunctionCallback f);
+
+ private:
+ internal::Object** argv_;
+ int argc_;
+ bool is_construct_call_;
+};
+
+
+#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
+Type Name(int args_length, Object** args_object, Isolate* isolate)
-#define RUNTIME_ARGUMENTS(isolate, args) args, isolate
+#define RUNTIME_FUNCTION(Type, Name) \
+static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
+Type Name(int args_length, Object** args_object, Isolate* isolate) { \
+ Arguments args(args_length, args_object); \
+ return __RT_impl_##Name(args, isolate); \
+} \
+static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
+#define RUNTIME_ARGUMENTS(isolate, args) \
+ args.length(), args.arguments(), isolate
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 6268c332c..e3b39f407 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -47,13 +47,42 @@ namespace v8 {
namespace internal {
+int Register::NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+}
+
+
+int DwVfpRegister::NumRegisters() {
+ return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
+}
+
+
+int DwVfpRegister::NumAllocatableRegisters() {
+ return NumRegisters() - kNumReservedRegisters;
+}
+
+
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kScratchDoubleReg));
+ if (reg.code() > kDoubleRegZero.code()) {
+ return reg.code() - kNumReservedRegisters;
+ }
return reg.code();
}
+DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
+ kNumReservedRegisters - 1);
+ if (index >= kDoubleRegZero.code()) {
+ return from_code(index + kNumReservedRegisters);
+ }
+ return from_code(index);
+}
+
+
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
@@ -66,13 +95,13 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
return Assembler::target_address_at(pc_);
}
Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_));
@@ -85,9 +114,8 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(
- reinterpret_cast<intptr_t>(target) & ~3));
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ Assembler::set_target_address_at(pc_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -121,6 +149,7 @@ Object** RelocInfo::target_object_address() {
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
@@ -138,24 +167,35 @@ Address* RelocInfo::target_reference_address() {
}
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode mode) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ if (target_address() != target) set_target_address(target, mode);
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ ASSERT(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
+ return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
@@ -166,6 +206,31 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
}
+static const int kNoCodeAgeSequenceLength = 3;
+
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on Arm.
+ return Handle<Object>();
+}
+
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ return Code::GetCodeFromTargetAddress(
+ Memory::Address_at(pc_ + Assembler::kInstrSize *
+ (kNoCodeAgeSequenceLength - 1)));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Memory::Address_at(pc_ + Assembler::kInstrSize *
+ (kNoCodeAgeSequenceLength - 1)) =
+ stub->instruction_start();
+}
+
+
Address RelocInfo::call_address() {
// The 2 instructions offset assumes patched debug break slot or return
// sequence.
@@ -207,19 +272,11 @@ Object** RelocInfo::call_object_address() {
bool RelocInfo::IsPatchedReturnSequence() {
Instr current_instr = Assembler::instr_at(pc_);
Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
-#ifdef USE_BLX
// A patched return sequence is:
// ldr ip, [pc, #0]
// blx ip
return ((current_instr & kLdrPCMask) == kLdrPCPattern)
&& ((next_instr & kBlxRegMask) == kBlxRegPattern);
-#else
- // A patched return sequence is:
- // mov lr, pc
- // ldr pc, [pc, #-4]
- return (current_instr == kMovLrPc)
- && ((next_instr & kLdrPCMask) == kLdrPCPattern);
-#endif
}
@@ -229,26 +286,27 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
-void RelocInfo::Visit(ObjectVisitor* visitor) {
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
+ isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
}
@@ -261,10 +319,12 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@@ -273,7 +333,7 @@ void RelocInfo::Visit(Heap* heap) {
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
@@ -296,7 +356,7 @@ Operand::Operand(const ExternalReference& f) {
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
@@ -345,14 +405,11 @@ Address Assembler::target_pointer_address_at(Address pc) {
instr = Memory::int32_at(target_pc);
}
-#ifdef USE_BLX
- // If we have a blx instruction, the instruction before it is
- // what needs to be patched.
+ // With a blx instruction, the instruction before is what needs to be patched.
if ((instr & kBlxRegMask) == kBlxRegPattern) {
target_pc -= kInstrSize;
instr = Memory::int32_at(target_pc);
}
-#endif
ASSERT(IsLdrPcImmediateOffset(instr));
int offset = instr & 0xfff; // offset_12 is unsigned
@@ -379,7 +436,6 @@ Address Assembler::target_pointer_at(Address pc) {
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
-#ifdef USE_BLX
// Call sequence on V7 or later is :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
@@ -398,18 +454,10 @@ Address Assembler::target_address_from_return_address(Address pc) {
ASSERT(IsMovW(Memory::int32_at(candidate)) &&
IsMovT(Memory::int32_at(candidate + kInstrSize)));
return candidate;
-#else
- // Call sequence is:
- // mov lr, pc
- // ldr pc, [pc, #...] @ call address
- // @ return address
- return pc - kInstrSize;
-#endif
}
Address Assembler::return_address_from_call_start(Address pc) {
-#ifdef USE_BLX
if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
return pc + kInstrSize * 2;
} else {
@@ -417,9 +465,6 @@ Address Assembler::return_address_from_call_start(Address pc) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
return pc + kInstrSize * 3;
}
-#else
- return pc + kInstrSize;
-#endif
}
@@ -473,14 +518,12 @@ void Assembler::set_target_pointer_at(Address pc, Address target) {
Address Assembler::target_address_at(Address pc) {
- return reinterpret_cast<Address>(
- reinterpret_cast<intptr_t>(target_pointer_at(pc)) & ~3);
+ return target_pointer_at(pc);
}
void Assembler::set_target_address_at(Address pc, Address target) {
- set_target_pointer_at(pc, reinterpret_cast<Address>(
- reinterpret_cast<intptr_t>(target) & ~3));
+ set_target_pointer_at(pc, target);
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 9be62a404..05b25ae2d 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -36,9 +36,10 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
+#include "macro-assembler.h"
#include "serialize.h"
namespace v8 {
@@ -48,7 +49,15 @@ namespace internal {
bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
+unsigned CpuFeatures::cross_compile_ = 0;
+unsigned CpuFeatures::cache_line_size_ = 64;
+
+
+ExternalReference ExternalReference::cpu_features() {
+ ASSERT(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
// Get the CPU features enabled by the build. For cross compilation the
@@ -58,26 +67,21 @@ unsigned CpuFeatures::found_by_runtime_probing_ = 0;
static unsigned CpuFeaturesImpliedByCompiler() {
unsigned answer = 0;
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
- answer |= 1u << ARMv7;
+ if (FLAG_enable_armv7) {
+ answer |= 1u << ARMv7;
+ }
#endif // CAN_USE_ARMV7_INSTRUCTIONS
#ifdef CAN_USE_VFP3_INSTRUCTIONS
- answer |= 1u << VFP3 | 1u << VFP2 | 1u << ARMv7;
+ if (FLAG_enable_vfp3) {
+ answer |= 1u << VFP3 | 1u << ARMv7;
+ }
#endif // CAN_USE_VFP3_INSTRUCTIONS
-#ifdef CAN_USE_VFP2_INSTRUCTIONS
- answer |= 1u << VFP2;
-#endif // CAN_USE_VFP2_INSTRUCTIONS
-
-#ifdef __arm__
- // If the compiler is allowed to use VFP then we can use VFP too in our code
- // generation even when generating snapshots. ARMv7 and hardware floating
- // point support implies VFPv3, see ARM DDI 0406B, page A1-6.
-#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
- && !defined(__SOFTFP__)
- answer |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
-#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
- // && !defined(__SOFTFP__)
-#endif // _arm__
- if (answer & (1u << ARMv7)) {
+#ifdef CAN_USE_VFP32DREGS
+ if (FLAG_enable_32dregs) {
+ answer |= 1u << VFP32DREGS;
+ }
+#endif // CAN_USE_VFP32DREGS
+ if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
answer |= 1u << UNALIGNED_ACCESSES;
}
@@ -85,8 +89,19 @@ static unsigned CpuFeaturesImpliedByCompiler() {
}
+const char* DwVfpRegister::AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
+ kNumReservedRegisters - 1);
+ if (index >= kDoubleRegZero.code())
+ index += kNumReservedRegisters;
+
+ return VFPRegisters::Name(index, true);
+}
+
+
void CpuFeatures::Probe() {
- unsigned standard_features = static_cast<unsigned>(
+ uint64_t standard_features = static_cast<unsigned>(
OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
ASSERT(supported_ == 0 || supported_ == standard_features);
#ifdef DEBUG
@@ -100,6 +115,8 @@ void CpuFeatures::Probe() {
if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot).
+ printf(" ");
+ PrintFeatures();
return;
}
@@ -107,53 +124,165 @@ void CpuFeatures::Probe() {
// For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
// enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
if (FLAG_enable_vfp3) {
- supported_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
+ supported_ |=
+ static_cast<uint64_t>(1) << VFP3 |
+ static_cast<uint64_t>(1) << ARMv7;
+ }
+ if (FLAG_enable_neon) {
+ supported_ |= 1u << NEON;
}
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) {
- supported_ |= 1u << ARMv7;
+ supported_ |= static_cast<uint64_t>(1) << ARMv7;
}
if (FLAG_enable_sudiv) {
- supported_ |= 1u << SUDIV;
+ supported_ |= static_cast<uint64_t>(1) << SUDIV;
}
if (FLAG_enable_movw_movt) {
- supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
+ supported_ |= static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
+ }
+
+ if (FLAG_enable_32dregs) {
+ supported_ |= static_cast<uint64_t>(1) << VFP32DREGS;
+ }
+
+ if (FLAG_enable_unaligned_accesses) {
+ supported_ |= static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
}
+
#else // __arm__
// Probe for additional features not already known to be available.
- if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
+ CPU cpu;
+ if (!IsSupported(VFP3) && FLAG_enable_vfp3 && cpu.has_vfp3()) {
// This implementation also sets the VFP flags if runtime
- // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI
+ // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
// 0406B, page A1-6.
- found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
- } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) {
- found_by_runtime_probing_ |= 1u << VFP2;
+ found_by_runtime_probing_only_ |=
+ static_cast<uint64_t>(1) << VFP3 |
+ static_cast<uint64_t>(1) << ARMv7;
+ }
+
+ if (!IsSupported(NEON) && FLAG_enable_neon && cpu.has_neon()) {
+ found_by_runtime_probing_only_ |= 1u << NEON;
+ }
+
+ if (!IsSupported(ARMv7) && FLAG_enable_armv7 && cpu.architecture() >= 7) {
+ found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
}
- if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
- found_by_runtime_probing_ |= 1u << ARMv7;
+ if (!IsSupported(SUDIV) && FLAG_enable_sudiv && cpu.has_idiva()) {
+ found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV;
}
- if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) {
- found_by_runtime_probing_ |= 1u << SUDIV;
+ if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses
+ && cpu.architecture() >= 7) {
+ found_by_runtime_probing_only_ |=
+ static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
}
- if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) {
- found_by_runtime_probing_ |= 1u << UNALIGNED_ACCESSES;
+ // Use movw/movt for QUALCOMM ARMv7 cores.
+ if (cpu.implementer() == CPU::QUALCOMM &&
+ cpu.architecture() >= 7 &&
+ FLAG_enable_movw_movt) {
+ found_by_runtime_probing_only_ |=
+ static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
}
- if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER &&
- OS::ArmCpuHasFeature(ARMv7)) {
- found_by_runtime_probing_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
+ // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
+ if (cpu.implementer() == CPU::ARM &&
+ (cpu.part() == CPU::ARM_CORTEX_A5 ||
+ cpu.part() == CPU::ARM_CORTEX_A9)) {
+ cache_line_size_ = 32;
}
- supported_ |= found_by_runtime_probing_;
+ if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs && cpu.has_vfp3_d32()) {
+ found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
+ }
+
+ supported_ |= found_by_runtime_probing_only_;
+#endif
+
+ // Assert that VFP3 implies ARMv7.
+ ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7));
+}
+
+
+void CpuFeatures::PrintTarget() {
+ const char* arm_arch = NULL;
+ const char* arm_test = "";
+ const char* arm_fpu = "";
+ const char* arm_thumb = "";
+ const char* arm_float_abi = NULL;
+
+#if defined CAN_USE_ARMV7_INSTRUCTIONS
+ arm_arch = "arm v7";
+#else
+ arm_arch = "arm v6";
#endif
- // Assert that VFP3 implies VFP2 and ARMv7.
- ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7)));
+#ifdef __arm__
+
+# ifdef ARM_TEST
+ arm_test = " test";
+# endif
+# if defined __ARM_NEON__
+ arm_fpu = " neon";
+# elif defined CAN_USE_VFP3_INSTRUCTIONS
+ arm_fpu = " vfp3";
+# else
+ arm_fpu = " vfp2";
+# endif
+# if (defined __thumb__) || (defined __thumb2__)
+ arm_thumb = " thumb";
+# endif
+ arm_float_abi = OS::ArmUsingHardFloat() ? "hard" : "softfp";
+
+#else // __arm__
+
+ arm_test = " simulator";
+# if defined CAN_USE_VFP3_INSTRUCTIONS
+# if defined CAN_USE_VFP32DREGS
+ arm_fpu = " vfp3";
+# else
+ arm_fpu = " vfp3-d16";
+# endif
+# else
+ arm_fpu = " vfp2";
+# endif
+# if USE_EABI_HARDFLOAT == 1
+ arm_float_abi = "hard";
+# else
+ arm_float_abi = "softfp";
+# endif
+
+#endif // __arm__
+
+ printf("target%s %s%s%s %s\n",
+ arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi);
+}
+
+
+void CpuFeatures::PrintFeatures() {
+ printf(
+ "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
+ "MOVW_MOVT_IMMEDIATE_LOADS=%d",
+ CpuFeatures::IsSupported(ARMv7),
+ CpuFeatures::IsSupported(VFP3),
+ CpuFeatures::IsSupported(VFP32DREGS),
+ CpuFeatures::IsSupported(NEON),
+ CpuFeatures::IsSupported(SUDIV),
+ CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
+ CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
+#ifdef __arm__
+ bool eabi_hardfloat = OS::ArmUsingHardFloat();
+#elif USE_EABI_HARDFLOAT
+ bool eabi_hardfloat = true;
+#else
+ bool eabi_hardfloat = false;
+#endif
+ printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
}
@@ -197,17 +326,18 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// See assembler-arm-inl.h for inlined constructors
Operand::Operand(Handle<Object> handle) {
+ AllowDeferredHandleDereference using_raw_address;
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!HEAP->InNewSpace(obj));
if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
// no relocation needed
- imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE;
+ imm32_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE32;
}
}
@@ -244,6 +374,7 @@ MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
am_ = am;
}
+
MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
rn_ = rn;
rm_ = rm;
@@ -264,6 +395,66 @@ MemOperand::MemOperand(Register rn, Register rm,
}
+NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
+ ASSERT((am == Offset) || (am == PostIndex));
+ rn_ = rn;
+ rm_ = (am == Offset) ? pc : sp;
+ SetAlignment(align);
+}
+
+
+NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
+ rn_ = rn;
+ rm_ = rm;
+ SetAlignment(align);
+}
+
+
+void NeonMemOperand::SetAlignment(int align) {
+ switch (align) {
+ case 0:
+ align_ = 0;
+ break;
+ case 64:
+ align_ = 1;
+ break;
+ case 128:
+ align_ = 2;
+ break;
+ case 256:
+ align_ = 3;
+ break;
+ default:
+ UNREACHABLE();
+ align_ = 0;
+ break;
+ }
+}
+
+
+NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
+ base_ = base;
+ switch (registers_count) {
+ case 1:
+ type_ = nlt_1;
+ break;
+ case 2:
+ type_ = nlt_2;
+ break;
+ case 3:
+ type_ = nlt_3;
+ break;
+ case 4:
+ type_ = nlt_4;
+ break;
+ default:
+ UNREACHABLE();
+ type_ = nlt_1;
+ break;
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@@ -282,8 +473,11 @@ const Instr kPopRegPattern =
// mov lr, pc
const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
// ldr rd, [pc, #offset]
-const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCPattern = al | 5 * B24 | L | kRegister_pc_Code * B16;
+const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
+// vldr dd, [pc, #offset]
+const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
+const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@@ -318,47 +512,13 @@ const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
const Instr kLdrStrOffsetMask = 0x00000fff;
-// Spare buffer.
-static const int kMinimalBufferSize = 4*KB;
-
-
-Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
- : AssemblerBase(arg_isolate),
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
- positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code),
- predictable_code_size_(false) {
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
-
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
- // Set up buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ positions_recorder_(this) {
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
num_pending_reloc_info_ = 0;
+ num_pending_64_bit_reloc_info_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
@@ -370,14 +530,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
Assembler::~Assembler() {
ASSERT(const_pool_blocked_nesting_ == 0);
- if (own_buffer_) {
- if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
}
@@ -385,6 +537,7 @@ void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
// Set up code descriptor.
desc->buffer = buffer_;
@@ -431,6 +584,11 @@ bool Assembler::IsLdrRegisterImmediate(Instr instr) {
}
+bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
+ return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
+}
+
+
int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
ASSERT(IsLdrRegisterImmediate(instr));
bool positive = (instr & B23) == B23;
@@ -439,6 +597,15 @@ int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
}
+int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
+ ASSERT(IsVldrDRegisterImmediate(instr));
+ bool positive = (instr & B23) == B23;
+ int offset = instr & kOff8Mask; // Zero extended offset.
+ offset <<= 2;
+ return positive ? offset : -offset;
+}
+
+
Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
ASSERT(IsLdrRegisterImmediate(instr));
bool positive = offset >= 0;
@@ -451,6 +618,19 @@ Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
}
+Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
+ ASSERT(IsVldrDRegisterImmediate(instr));
+ ASSERT((offset & ~3) == offset); // Must be 64-bit aligned.
+ bool positive = offset >= 0;
+ if (!positive) offset = -offset;
+ ASSERT(is_uint10(offset));
+ // Set bit indicating whether the offset should be added.
+ instr = (instr & ~B23) | (positive ? B23 : 0);
+ // Set the actual offset. Its bottom 2 bits are zero.
+ return (instr & ~kOff8Mask) | (offset >> 2);
+}
+
+
bool Assembler::IsStrRegisterImmediate(Instr instr) {
return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
}
@@ -536,7 +716,14 @@ bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// ldr<cond> <Rd>, [pc +/- offset_12].
- return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
+ return (instr & kLdrPCMask) == kLdrPCPattern;
+}
+
+
+bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // vldr<cond> <Dd>, [pc +/- offset_10].
+ return (instr & kVldrDPCMask) == kVldrDPCPattern;
}
@@ -569,6 +756,7 @@ int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
return instr & kOff12Mask;
}
+
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -578,17 +766,20 @@ int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
// Linked labels refer to unknown positions in the code
// to be generated; pos() is the position of the last
// instruction using the label.
-
-
-// The link chain is terminated by a negative code position (must be aligned)
-const int kEndOfChain = -4;
+//
+// The linked labels form a link chain by making the branch offset
+// in the instruction steam to point to the previous branch
+// instruction using the same label.
+//
+// The link chain is terminated by a branch offset pointing to the
+// same position.
int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
- if ((instr & ~kImm24Mask) == 0) {
- // Emitted label constant, not part of a branch.
- return instr - (Code::kHeaderSize - kHeapObjectTag);
+ if (is_uint24(instr)) {
+ // Emitted link to a label, not part of a branch.
+ return instr;
}
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
int imm26 = ((instr & kImm24Mask) << 8) >> 6;
@@ -603,11 +794,72 @@ int Assembler::target_at(int pos) {
void Assembler::target_at_put(int pos, int target_pos) {
Instr instr = instr_at(pos);
- if ((instr & ~kImm24Mask) == 0) {
- ASSERT(target_pos == kEndOfChain || target_pos >= 0);
- // Emitted label constant, not part of a branch.
- // Make label relative to Code* of generated Code object.
- instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ if (is_uint24(instr)) {
+ ASSERT(target_pos == pos || target_pos >= 0);
+ // Emitted link to a label, not part of a branch.
+ // Load the position of the label relative to the generated code object
+ // pointer in a register.
+
+ // Here are the instructions we need to emit:
+ // For ARMv7: target24 => target16_1:target16_0
+ // movw dst, #target16_0
+ // movt dst, #target16_1
+ // For ARMv6: target24 => target8_2:target8_1:target8_0
+ // mov dst, #target8_0
+ // orr dst, dst, #target8_1 << 8
+ // orr dst, dst, #target8_2 << 16
+
+ // We extract the destination register from the emitted nop instruction.
+ Register dst = Register::from_code(
+ Instruction::RmValue(instr_at(pos + kInstrSize)));
+ ASSERT(IsNop(instr_at(pos + kInstrSize), dst.code()));
+ uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+ ASSERT(is_uint24(target24));
+ if (is_uint8(target24)) {
+ // If the target fits in a byte then only patch with a mov
+ // instruction.
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 1,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->mov(dst, Operand(target24));
+ } else {
+ uint16_t target16_0 = target24 & kImm16Mask;
+ uint16_t target16_1 = target24 >> 16;
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ // Patch with movw/movt.
+ if (target16_1 == 0) {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 1,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->movw(dst, target16_0);
+ } else {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 2,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->movw(dst, target16_0);
+ patcher.masm()->movt(dst, target16_1);
+ }
+ } else {
+ // Patch with a sequence of mov/orr/orr instructions.
+ uint8_t target8_0 = target16_0 & kImm8Mask;
+ uint8_t target8_1 = target16_0 >> 8;
+ uint8_t target8_2 = target16_1 & kImm8Mask;
+ if (target8_2 == 0) {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 2,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->mov(dst, Operand(target8_0));
+ patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
+ } else {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 3,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->mov(dst, Operand(target8_0));
+ patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
+ patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
+ }
+ }
+ }
return;
}
int imm26 = target_pos - (pos + kPcLoadDelta);
@@ -700,27 +952,6 @@ void Assembler::bind_to(Label* L, int pos) {
}
-void Assembler::link_to(Label* L, Label* appendix) {
- if (appendix->is_linked()) {
- if (L->is_linked()) {
- // Append appendix to L's list.
- int fixup_pos;
- int link = L->pos();
- do {
- fixup_pos = link;
- link = target_at(fixup_pos);
- } while (link > 0);
- ASSERT(link == kEndOfChain);
- target_at_put(fixup_pos, appendix->pos());
- } else {
- // L is empty, simply use appendix.
- *L = *appendix;
- }
- }
- appendix->Unuse(); // appendix should not be used anymore
-}
-
-
void Assembler::bind(Label* L) {
ASSERT(!L->is_bound()); // label can only be bound once
bind_to(L, pc_offset());
@@ -730,7 +961,9 @@ void Assembler::bind(Label* L) {
void Assembler::next(Label* L) {
ASSERT(L->is_linked());
int link = target_at(L->pos());
- if (link == kEndOfChain) {
+ if (link == L->pos()) {
+ // Branch target points to the same instuction. This is the end of the link
+ // chain.
L->Unuse();
} else {
ASSERT(link >= 0);
@@ -812,7 +1045,7 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
#endif // def DEBUG
if (assembler != NULL && assembler->predictable_code_size()) return true;
return Serializer::enabled();
- } else if (rmode_ == RelocInfo::NONE) {
+ } else if (RelocInfo::IsNone(rmode_)) {
return false;
}
return true;
@@ -1043,9 +1276,11 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
- target_pos = L->pos(); // L's link
+ // Point to previous instruction that uses the link.
+ target_pos = L->pos();
} else {
- target_pos = kEndOfChain;
+ // First entry of the link chain points to itself.
+ target_pos = pc_offset();
}
L->link_to(pc_offset());
}
@@ -1057,22 +1292,6 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
}
-void Assembler::label_at_put(Label* L, int at_offset) {
- int target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link
- } else {
- target_pos = kEndOfChain;
- }
- L->link_to(at_offset);
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- }
-}
-
-
// Branch instructions.
void Assembler::b(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0);
@@ -1215,6 +1434,45 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
}
+void Assembler::mov_label_offset(Register dst, Label* label) {
+ if (label->is_bound()) {
+ mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
+ } else {
+ // Emit the link to the label in the code stream followed by extra nop
+ // instructions.
+ // If the label is not linked, then start a new link chain by linking it to
+ // itself, emitting pc_offset().
+ int link = label->is_linked() ? label->pos() : pc_offset();
+ label->link_to(pc_offset());
+
+ // When the label is bound, these instructions will be patched with a
+ // sequence of movw/movt or mov/orr/orr instructions. They will load the
+ // destination register with the position of the label from the beginning
+ // of the code.
+ //
+ // The link will be extracted from the first instruction and the destination
+ // register from the second.
+ // For ARMv7:
+ // link
+ // mov dst, dst
+ // For ARMv6:
+ // link
+ // mov dst, dst
+ // mov dst, dst
+ //
+ // When the label gets bound: target_at extracts the link and target_at_put
+ // patches the instructions.
+ ASSERT(is_uint24(link));
+ BlockConstPoolScope block_const_pool(this);
+ emit(link);
+ nop(dst.code());
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ nop(dst.code());
+ }
+ }
+}
+
+
void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
ASSERT(immediate < 0x10000);
// May use movw if supported, but on unsupported platforms will try to use
@@ -1260,6 +1518,7 @@ void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
void Assembler::sdiv(Register dst, Register src1, Register src2,
Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(IsEnabled(SUDIV));
emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
src2.code()*B8 | B4 | src1.code());
}
@@ -1434,6 +1693,107 @@ void Assembler::bfi(Register dst,
}
+void Assembler::pkhbt(Register dst,
+ Register src1,
+ const Operand& src2,
+ Condition cond ) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.125.
+ // cond(31-28) | 01101000(27-20) | Rn(19-16) |
+ // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
+ ASSERT(!dst.is(pc));
+ ASSERT(!src1.is(pc));
+ ASSERT(!src2.rm().is(pc));
+ ASSERT(!src2.rm().is(no_reg));
+ ASSERT(src2.rs().is(no_reg));
+ ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
+ ASSERT(src2.shift_op() == LSL);
+ emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
+ src2.shift_imm_*B7 | B4 | src2.rm().code());
+}
+
+
+void Assembler::pkhtb(Register dst,
+ Register src1,
+ const Operand& src2,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.125.
+ // cond(31-28) | 01101000(27-20) | Rn(19-16) |
+ // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
+ ASSERT(!dst.is(pc));
+ ASSERT(!src1.is(pc));
+ ASSERT(!src2.rm().is(pc));
+ ASSERT(!src2.rm().is(no_reg));
+ ASSERT(src2.rs().is(no_reg));
+ ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
+ ASSERT(src2.shift_op() == ASR);
+ int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
+ emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
+ asr*B7 | B6 | B4 | src2.rm().code());
+}
+
+
+void Assembler::uxtb(Register dst,
+ const Operand& src,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.274.
+ // cond(31-28) | 01101110(27-20) | 1111(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ ASSERT(!dst.is(pc));
+ ASSERT(!src.rm().is(pc));
+ ASSERT(!src.rm().is(no_reg));
+ ASSERT(src.rs().is(no_reg));
+ ASSERT((src.shift_imm_ == 0) ||
+ (src.shift_imm_ == 8) ||
+ (src.shift_imm_ == 16) ||
+ (src.shift_imm_ == 24));
+ ASSERT(src.shift_op() == ROR);
+ emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
+ ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
+}
+
+
+void Assembler::uxtab(Register dst,
+ Register src1,
+ const Operand& src2,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.271.
+ // cond(31-28) | 01101110(27-20) | Rn(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ ASSERT(!dst.is(pc));
+ ASSERT(!src1.is(pc));
+ ASSERT(!src2.rm().is(pc));
+ ASSERT(!src2.rm().is(no_reg));
+ ASSERT(src2.rs().is(no_reg));
+ ASSERT((src2.shift_imm_ == 0) ||
+ (src2.shift_imm_ == 8) ||
+ (src2.shift_imm_ == 16) ||
+ (src2.shift_imm_ == 24));
+ ASSERT(src2.shift_op() == ROR);
+ emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
+ ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
+}
+
+
+void Assembler::uxtb16(Register dst,
+ const Operand& src,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.275.
+ // cond(31-28) | 01101100(27-20) | 1111(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ ASSERT(!dst.is(pc));
+ ASSERT(!src.rm().is(pc));
+ ASSERT(!src.rm().is(no_reg));
+ ASSERT(src.rs().is(no_reg));
+ ASSERT((src.shift_imm_ == 0) ||
+ (src.shift_imm_ == 8) ||
+ (src.shift_imm_ == 16) ||
+ (src.shift_imm_ == 24));
+ ASSERT(src.shift_op() == ROR);
+ emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
+ ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
+}
+
+
// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
ASSERT(!dst.is(pc));
@@ -1512,7 +1872,7 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
void Assembler::ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(ARMv7));
+ ASSERT(IsEnabled(ARMv7));
ASSERT(src.rm().is(no_reg));
ASSERT(!dst1.is(lr)); // r14.
ASSERT_EQ(0, dst1.code() % 2);
@@ -1527,10 +1887,30 @@ void Assembler::strd(Register src1, Register src2,
ASSERT(!src1.is(lr)); // r14.
ASSERT_EQ(0, src1.code() % 2);
ASSERT_EQ(src1.code() + 1, src2.code());
- ASSERT(CpuFeatures::IsEnabled(ARMv7));
+ ASSERT(IsEnabled(ARMv7));
addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
}
+
+// Preload instructions.
+void Assembler::pld(const MemOperand& address) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.128.
+ // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
+ // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
+ ASSERT(address.rm().is(no_reg));
+ ASSERT(address.am() == Offset);
+ int U = B23;
+ int offset = address.offset();
+ if (offset < 0) {
+ offset = -offset;
+ U = 0;
+ }
+ ASSERT(offset < 4096);
+ emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
+ 0xf*B12 | offset);
+}
+
+
// Load/Store multiple instructions.
void Assembler::ldm(BlockAddrMode am,
Register base,
@@ -1579,7 +1959,6 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
emit(reinterpret_cast<Instr>(msg));
}
#else // def __arm__
-#ifdef CAN_USE_ARMV5_INSTRUCTIONS
if (cond != al) {
Label skip;
b(&skip, NegateCondition(cond));
@@ -1588,9 +1967,6 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
} else {
bkpt(0);
}
-#else // ndef CAN_USE_ARMV5_INSTRUCTIONS
- svc(0x9f0001, cond);
-#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
#endif // def __arm__
}
@@ -1723,19 +2099,20 @@ void Assembler::vldr(const DwVfpRegister dst,
int offset,
const Condition cond) {
// Ddst = MEM(Rbase + offset).
- // Instruction details available in ARM DDI 0406A, A8-628.
- // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
- // Vdst(15-12) | 1011(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP2));
+ // Instruction details available in ARM DDI 0406C.b, A8-924.
+ // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
+ // Vd(15-12) | 1011(11-8) | offset
int u = 1;
if (offset < 0) {
offset = -offset;
u = 0;
}
+ int vd, d;
+ dst.split_code(&vd, &d);
ASSERT(offset >= 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
+ emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
0xB*B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
@@ -1746,7 +2123,7 @@ void Assembler::vldr(const DwVfpRegister dst,
} else {
sub(ip, base, Operand(offset));
}
- emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8);
+ emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
}
}
@@ -1768,7 +2145,6 @@ void Assembler::vldr(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1809,19 +2185,21 @@ void Assembler::vstr(const DwVfpRegister src,
int offset,
const Condition cond) {
// MEM(Rbase + offset) = Dsrc.
- // Instruction details available in ARM DDI 0406A, A8-786.
- // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
- // Vsrc(15-12) | 1011(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
+ // Instruction details available in ARM DDI 0406C.b, A8-1082.
+ // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
+ // Vd(15-12) | 1011(11-8) | (offset/4)
int u = 1;
if (offset < 0) {
offset = -offset;
u = 0;
}
ASSERT(offset >= 0);
+ int vd, d;
+ src.split_code(&vd, &d);
+
if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
+ emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
+ ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
@@ -1831,7 +2209,7 @@ void Assembler::vstr(const DwVfpRegister src,
} else {
sub(ip, base, Operand(offset));
}
- emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8);
+ emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
}
}
@@ -1853,7 +2231,6 @@ void Assembler::vstr(const SwVfpRegister src,
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1893,10 +2270,9 @@ void Assembler::vldm(BlockAddrMode am,
DwVfpRegister first,
DwVfpRegister last,
Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-626.
+ // Instruction details available in ARM DDI 0406C.b, A8-922.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
- // first(15-12) | 1010(11-8) | (count * 2)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
+ // first(15-12) | 1011(11-8) | (count * 2)
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -1915,10 +2291,9 @@ void Assembler::vstm(BlockAddrMode am,
DwVfpRegister first,
DwVfpRegister last,
Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-784.
+ // Instruction details available in ARM DDI 0406C.b, A8-1080.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -1939,7 +2314,6 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count/2)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -1960,7 +2334,6 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count/2)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -1972,14 +2345,16 @@ void Assembler::vstm(BlockAddrMode am,
0xA*B8 | count);
}
+
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
- memcpy(&i, &d, 8);
+ OS::MemCopy(&i, &d, 8);
*lo = i & 0xffffffff;
*hi = i >> 32;
}
+
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
@@ -2033,37 +2408,68 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
void Assembler::vmov(const DwVfpRegister dst,
double imm,
- const Register scratch,
- const Condition cond) {
- // Dd = immediate
- // Instruction details available in ARM DDI 0406B, A8-640.
- ASSERT(CpuFeatures::IsEnabled(VFP2));
-
+ const Register scratch) {
uint32_t enc;
if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
- emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
+ //
+ // Dd = immediate
+ // Instruction details available in ARM DDI 0406C.b, A8-936.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
+ } else if (FLAG_enable_vldr_imm) {
+ // TODO(jfb) Temporarily turned off until we have constant blinding or
+ // some equivalent mitigation: an attacker can otherwise control
+ // generated data which also happens to be executable, a Very Bad
+ // Thing indeed.
+ // Blinding gets tricky because we don't have xor, we probably
+ // need to add/subtract without losing precision, which requires a
+ // cookie value that Lithium is probably better positioned to
+ // choose.
+ // We could also add a few peepholes here like detecting 0.0 and
+ // -0.0 and doing a vmov from the sequestered d14, forcing denorms
+ // to zero (we set flush-to-zero), and normalizing NaN values.
+ // We could also detect redundant values.
+ // The code could also randomize the order of values, though
+ // that's tricky because vldr has a limited reach. Furthermore
+ // it breaks load locality.
+ RecordRelocInfo(imm);
+ vldr(dst, MemOperand(pc, 0));
} else {
- // Synthesise the double from ARM immediates. This could be implemented
- // using vldr from a constant pool.
+ // Synthesise the double from ARM immediates.
uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi);
- mov(ip, Operand(lo));
if (scratch.is(no_reg)) {
- // Move the low part of the double into the lower of the corresponsing S
- // registers of D register dst.
- vmov(dst.low(), ip, cond);
-
- // Move the high part of the double into the higher of the corresponsing S
- // registers of D register dst.
- mov(ip, Operand(hi));
- vmov(dst.high(), ip, cond);
+ if (dst.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
+ // Move the low part of the double into the lower of the corresponsing S
+ // registers of D register dst.
+ mov(ip, Operand(lo));
+ vmov(loc.low(), ip);
+
+ // Move the high part of the double into the higher of the
+ // corresponsing S registers of D register dst.
+ mov(ip, Operand(hi));
+ vmov(loc.high(), ip);
+ } else {
+ // D16-D31 does not have S registers, so move the low and high parts
+ // directly to the D register using vmov.32.
+ // Note: This may be slower, so we only do this when we have to.
+ mov(ip, Operand(lo));
+ vmov(dst, VmovIndexLo, ip);
+ mov(ip, Operand(hi));
+ vmov(dst, VmovIndexHi, ip);
+ }
} else {
// Move the low and high parts of the double to a D register in one
// instruction.
+ mov(ip, Operand(lo));
mov(scratch, Operand(hi));
- vmov(dst, ip, scratch, cond);
+ vmov(dst, ip, scratch);
}
}
}
@@ -2074,7 +2480,6 @@ void Assembler::vmov(const SwVfpRegister dst,
const Condition cond) {
// Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(CpuFeatures::IsEnabled(VFP2));
int sd, d, sm, m;
dst.split_code(&sd, &d);
src.split_code(&sm, &m);
@@ -2086,10 +2491,47 @@ void Assembler::vmov(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
// Dd = Dm
- // Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0xB*B20 |
- dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-938.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
+ // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
+ vm);
+}
+
+
+void Assembler::vmov(const DwVfpRegister dst,
+ const VmovIndex index,
+ const Register src,
+ const Condition cond) {
+ // Dd[index] = Rt
+ // Instruction details available in ARM DDI 0406C.b, A8-940.
+ // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
+ // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(index.index == 0 || index.index == 1);
+ int vd, d;
+ dst.split_code(&vd, &d);
+ emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
+ d*B7 | B4);
+}
+
+
+void Assembler::vmov(const Register dst,
+ const VmovIndex index,
+ const DwVfpRegister src,
+ const Condition cond) {
+ // Dd[index] = Rt
+ // Instruction details available in ARM DDI 0406C.b, A8.8.342.
+ // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
+ // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(index.index == 0 || index.index == 1);
+ int vn, n;
+ src.split_code(&vn, &n);
+ emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
+ 0xB*B8 | n*B7 | B4);
}
@@ -2098,13 +2540,14 @@ void Assembler::vmov(const DwVfpRegister dst,
const Register src2,
const Condition cond) {
// Dm = <Rt,Rt2>.
- // Instruction details available in ARM DDI 0406A, A8-646.
+ // Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!src1.is(pc) && !src2.is(pc));
+ int vm, m;
+ dst.split_code(&vm, &m);
emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
- src1.code()*B12 | 0xB*B8 | B4 | dst.code());
+ src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
}
@@ -2113,13 +2556,14 @@ void Assembler::vmov(const Register dst1,
const DwVfpRegister src,
const Condition cond) {
// <Rt,Rt2> = Dm.
- // Instruction details available in ARM DDI 0406A, A8-646.
+ // Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!dst1.is(pc) && !dst2.is(pc));
+ int vm, m;
+ src.split_code(&vm, &m);
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
- dst1.code()*B12 | 0xB*B8 | B4 | src.code());
+ dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
}
@@ -2130,7 +2574,6 @@ void Assembler::vmov(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!src.is(pc));
int sn, n;
dst.split_code(&sn, &n);
@@ -2145,7 +2588,6 @@ void Assembler::vmov(const Register dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!dst.is(pc));
int sn, n;
src.split_code(&sn, &n);
@@ -2270,7 +2712,6 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
}
@@ -2279,7 +2720,6 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
}
@@ -2288,7 +2728,6 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
}
@@ -2297,7 +2736,6 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2306,7 +2744,6 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2315,7 +2752,6 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
}
@@ -2324,26 +2760,55 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
+void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
+ int fraction_bits,
+ const Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8-874.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
+ // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
+ ASSERT(fraction_bits > 0 && fraction_bits <= 32);
+ ASSERT(CpuFeatures::IsSupported(VFP3));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int i = ((32 - fraction_bits) >> 4) & 1;
+ int imm4 = (32 - fraction_bits) & 0xf;
+ emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
+ vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
+}
+
+
void Assembler::vneg(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
- 0x5*B9 | B8 | B6 | src.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-968.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
+ // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
+ m*B5 | vm);
}
void Assembler::vabs(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
- 0x5*B9 | B8 | 0x3*B6 | src.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-524.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
+ // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
+ m*B5 | vm);
}
@@ -2353,12 +2818,17 @@ void Assembler::vadd(const DwVfpRegister dst,
const Condition cond) {
// Dd = vadd(Dn, Dm) double precision floating point addition.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-536.
- // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-830.
+ // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
+ n*B7 | m*B5 | vm);
}
@@ -2368,12 +2838,17 @@ void Assembler::vsub(const DwVfpRegister dst,
const Condition cond) {
// Dd = vsub(Dn, Dm) double precision floating point subtraction.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-1086.
+ // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
+ n*B7 | B6 | m*B5 | vm);
}
@@ -2383,12 +2858,53 @@ void Assembler::vmul(const DwVfpRegister dst,
const Condition cond) {
// Dd = vmul(Dn, Dm) double precision floating point multiplication.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-960.
+ // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
+ n*B7 | m*B5 | vm);
+}
+
+
+void Assembler::vmla(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8-932.
+ // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
+ vm);
+}
+
+
+void Assembler::vmls(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8-932.
+ // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
+ m*B5 | vm);
}
@@ -2398,12 +2914,17 @@ void Assembler::vdiv(const DwVfpRegister dst,
const Condition cond) {
// Dd = vdiv(Dn, Dm) double precision floating point division.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-584.
- // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-882.
+ // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
+ vm);
}
@@ -2411,26 +2932,29 @@ void Assembler::vcmp(const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406A, A8-570.
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
- src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-864.
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ src1.split_code(&vd, &d);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
+ m*B5 | vm);
}
void Assembler::vcmp(const DwVfpRegister src1,
const double src2,
const Condition cond) {
- // vcmp(Dd, Dm) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406A, A8-570.
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
+ // vcmp(Dd, #0.0) double precision floating point comparison.
+ // Instruction details available in ARM DDI 0406C.b, A8-864.
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
ASSERT(src2 == 0.0);
- emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
- src1.code()*B12 | 0x5*B9 | B8 | B6);
+ int vd, d;
+ src1.split_code(&vd, &d);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
}
@@ -2438,7 +2962,6 @@ void Assembler::vmsr(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xE*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@@ -2448,7 +2971,6 @@ void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xF*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@@ -2457,11 +2979,59 @@ void Assembler::vmrs(Register dst, Condition cond) {
void Assembler::vsqrt(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
- dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
+ // Instruction details available in ARM DDI 0406C.b, A8-1058.
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
+ m*B5 | vm);
+}
+
+
+// Support for NEON.
+
+void Assembler::vld1(NeonSize size,
+ const NeonListOperand& dst,
+ const NeonMemOperand& src) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.320.
+ // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
+ // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
+ ASSERT(CpuFeatures::IsSupported(NEON));
+ int vd, d;
+ dst.base().split_code(&vd, &d);
+ emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
+ dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
+}
+
+
+void Assembler::vst1(NeonSize size,
+ const NeonListOperand& src,
+ const NeonMemOperand& dst) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.404.
+ // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
+ // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
+ ASSERT(CpuFeatures::IsSupported(NEON));
+ int vd, d;
+ src.base().split_code(&vd, &d);
+ emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
+ size*B6 | dst.align()*B4 | dst.rm().code());
+}
+
+
+void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.346.
+ // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
+ // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsSupported(NEON));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
+ (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
}
@@ -2538,6 +3108,7 @@ void Assembler::RecordConstPool(int size) {
#endif
}
+
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
@@ -2561,9 +3132,9 @@ void Assembler::GrowBuffer() {
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
+ OS::MemMove(desc.buffer, buffer_, desc.instr_size);
+ OS::MemMove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
DeleteArray(buffer_);
@@ -2594,6 +3165,7 @@ void Assembler::db(uint8_t data) {
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@@ -2605,6 +3177,7 @@ void Assembler::dd(uint32_t data) {
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
@@ -2628,16 +3201,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
|| mode == DONT_USE_CONSTANT_POOL);
// These modes do not need an entry in the constant pool.
} else {
- ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
- if (num_pending_reloc_info_ == 0) {
- first_const_pool_use_ = pc_offset();
- }
- pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
+ RecordRelocInfoConstantPoolEntryHelper(rinfo);
}
- if (rinfo.rmode() != RelocInfo::NONE) {
+ if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
@@ -2664,13 +3230,38 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
+void Assembler::RecordRelocInfo(double data) {
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(pc_, data);
+ RecordRelocInfoConstantPoolEntryHelper(rinfo);
+}
+
+
+void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
+ ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
+ if (num_pending_reloc_info_ == 0) {
+ first_const_pool_use_ = pc_offset();
+ }
+ pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
+ if (rinfo.rmode() == RelocInfo::NONE64) {
+ ++num_pending_64_bit_reloc_info_;
+ }
+ ASSERT(num_pending_64_bit_reloc_info_ <= num_pending_reloc_info_);
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
+}
+
+
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
// If there are some pending entries, the constant pool cannot be blocked
- // further than first_const_pool_use_ + kMaxDistToPool
+ // further than constant pool instruction's reach.
ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
+ (pc_limit - first_const_pool_use_ < kMaxDistToIntPool));
+ // TODO(jfb) Also check 64-bit entries are in range (requires splitting
+ // them up from 32-bit entries).
no_const_pool_before_ = pc_limit;
}
@@ -2692,29 +3283,60 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// There is nothing to do if there are no pending constant pool entries.
if (num_pending_reloc_info_ == 0) {
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
}
- // We emit a constant pool when:
- // * requested to do so by parameter force_emit (e.g. after each function).
- // * the distance to the first instruction accessing the constant pool is
- // kAvgDistToPool or more.
- // * no jump is required and the distance to the first instruction accessing
- // the constant pool is at least kMaxDistToPool / 2.
- ASSERT(first_const_pool_use_ >= 0);
- int dist = pc_offset() - first_const_pool_use_;
- if (!force_emit && dist < kAvgDistToPool &&
- (require_jump || (dist < (kMaxDistToPool / 2)))) {
- return;
- }
-
// Check that the code buffer is large enough before emitting the constant
// pool (include the jump over the pool and the constant pool marker and
// the gap to the relocation information).
+ // Note 64-bit values are wider, and the first one needs to be 64-bit aligned.
int jump_instr = require_jump ? kInstrSize : 0;
- int size = jump_instr + kInstrSize + num_pending_reloc_info_ * kPointerSize;
+ int size_up_to_marker = jump_instr + kInstrSize;
+ int size_after_marker = num_pending_reloc_info_ * kPointerSize;
+ bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
+ // 64-bit values must be 64-bit aligned.
+ // We'll start emitting at PC: branch+marker, then 32-bit values, then
+ // 64-bit values which might need to be aligned.
+ bool require_64_bit_align = has_fp_values &&
+ (((uintptr_t)pc_ + size_up_to_marker + size_after_marker) & 0x3);
+ if (require_64_bit_align) {
+ size_after_marker += kInstrSize;
+ }
+ // num_pending_reloc_info_ also contains 64-bit entries, the above code
+ // therefore already counted half of the size for 64-bit entries. Add the
+ // remaining size.
+ STATIC_ASSERT(kPointerSize == kDoubleSize / 2);
+ size_after_marker += num_pending_64_bit_reloc_info_ * (kDoubleSize / 2);
+
+ int size = size_up_to_marker + size_after_marker;
+
+ // We emit a constant pool when:
+ // * requested to do so by parameter force_emit (e.g. after each function).
+ // * the distance from the first instruction accessing the constant pool to
+ // any of the constant pool entries will exceed its limit the next
+ // time the pool is checked. This is overly restrictive, but we don't emit
+ // constant pool entries in-order so it's conservatively correct.
+ // * the instruction doesn't require a jump after itself to jump over the
+ // constant pool, and we're getting close to running out of range.
+ if (!force_emit) {
+ ASSERT((first_const_pool_use_ >= 0) && (num_pending_reloc_info_ > 0));
+ int dist = pc_offset() + size - first_const_pool_use_;
+ if (has_fp_values) {
+ if ((dist < kMaxDistToFPPool - kCheckPoolInterval) &&
+ (require_jump || (dist < kMaxDistToFPPool / 2))) {
+ return;
+ }
+ } else {
+ if ((dist < kMaxDistToIntPool - kCheckPoolInterval) &&
+ (require_jump || (dist < kMaxDistToIntPool / 2))) {
+ return;
+ }
+ }
+ }
+
int needed_space = size + kGap;
while (buffer_space() <= needed_space) GrowBuffer();
@@ -2730,11 +3352,45 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
b(&after_pool);
}
- // Put down constant pool marker "Undefined instruction" as specified by
- // A5.6 (ARMv7) Instruction set encoding.
- emit(kConstantPoolMarker | num_pending_reloc_info_);
+ // Put down constant pool marker "Undefined instruction".
+ // The data size helps disassembly know what to print.
+ emit(kConstantPoolMarker |
+ EncodeConstantPoolLength(size_after_marker / kPointerSize));
+
+ if (require_64_bit_align) {
+ emit(kConstantPoolMarker);
+ }
- // Emit constant pool entries.
+ // Emit 64-bit constant pool entries first: their range is smaller than
+ // 32-bit entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+
+ if (rinfo.rmode() != RelocInfo::NONE64) {
+ // 32-bit values emitted later.
+ continue;
+ }
+
+ ASSERT(!((uintptr_t)pc_ & 0x3)); // Check 64-bit alignment.
+
+ Instr instr = instr_at(rinfo.pc());
+ // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
+ ASSERT((IsVldrDPcImmediateOffset(instr) &&
+ GetVldrDRegisterImmediateOffset(instr) == 0));
+
+ int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+ ASSERT(is_uint10(delta));
+
+ instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
+
+ const double double_data = rinfo.data64();
+ uint64_t uint_data = 0;
+ OS::MemCopy(&uint_data, &double_data, sizeof(double_data));
+ emit(uint_data & 0xFFFFFFFF);
+ emit(uint_data >> 32);
+ }
+
+ // Emit 32-bit constant pool entries.
for (int i = 0; i < num_pending_reloc_info_; i++) {
RelocInfo& rinfo = pending_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -2742,25 +3398,35 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
rinfo.rmode() != RelocInfo::CONST_POOL);
+ if (rinfo.rmode() == RelocInfo::NONE64) {
+ // 64-bit values emitted earlier.
+ continue;
+ }
+
Instr instr = instr_at(rinfo.pc());
- // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+
+ // 64-bit loads shouldn't get here.
+ ASSERT(!IsVldrDPcImmediateOffset(instr));
+
+ int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+ // 0 is the smallest delta:
+ // ldr rd, [pc, #0]
+ // constant pool marker
+ // data
+
if (IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0) {
- int delta = pc_ - rinfo.pc() - kPcLoadDelta;
- // 0 is the smallest delta:
- // ldr rd, [pc, #0]
- // constant pool marker
- // data
ASSERT(is_uint12(delta));
-
instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
+ emit(rinfo.data());
} else {
ASSERT(IsMovW(instr));
+ emit(rinfo.data());
}
- emit(rinfo.data());
}
num_pending_reloc_info_ = 0;
+ num_pending_64_bit_reloc_info_ = 0;
first_const_pool_use_ = -1;
RecordComment("]");
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index dfcce6011..8caa64df3 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -47,6 +47,73 @@
namespace v8 {
namespace internal {
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a CpuFeatureScope before use.
+class CpuFeatures : public AllStatic {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+
+ // Display target use when compiling.
+ static void PrintTarget();
+
+ // Display features.
+ static void PrintFeatures();
+
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
+ return Check(f, supported_);
+ }
+
+ static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
+ ASSERT(initialized_);
+ return Check(f, found_by_runtime_probing_only_);
+ }
+
+ static bool IsSafeForSnapshot(CpuFeature f) {
+ return Check(f, cross_compile_) ||
+ (IsSupported(f) &&
+ (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ }
+
+ static unsigned cache_line_size() { return cache_line_size_; }
+
+ static bool VerifyCrossCompiling() {
+ return cross_compile_ == 0;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ unsigned mask = flag2set(f);
+ return cross_compile_ == 0 ||
+ (cross_compile_ & mask) == mask;
+ }
+
+ private:
+ static bool Check(CpuFeature f, unsigned set) {
+ return (set & flag2set(f)) != 0;
+ }
+
+ static unsigned flag2set(CpuFeature f) {
+ return 1u << f;
+ }
+
+#ifdef DEBUG
+ static bool initialized_;
+#endif
+ static unsigned supported_;
+ static unsigned found_by_runtime_probing_only_;
+ static unsigned cache_line_size_;
+
+ static unsigned cross_compile_;
+
+ friend class ExternalReference;
+ friend class PlatformFeatureScope;
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
+};
+
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -68,24 +135,52 @@ namespace internal {
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
+// These constants are used in several locations, including static initializers
+const int kRegister_no_reg_Code = -1;
+const int kRegister_r0_Code = 0;
+const int kRegister_r1_Code = 1;
+const int kRegister_r2_Code = 2;
+const int kRegister_r3_Code = 3;
+const int kRegister_r4_Code = 4;
+const int kRegister_r5_Code = 5;
+const int kRegister_r6_Code = 6;
+const int kRegister_r7_Code = 7;
+const int kRegister_r8_Code = 8;
+const int kRegister_r9_Code = 9;
+const int kRegister_r10_Code = 10;
+const int kRegister_fp_Code = 11;
+const int kRegister_ip_Code = 12;
+const int kRegister_sp_Code = 13;
+const int kRegister_lr_Code = 14;
+const int kRegister_pc_Code = 15;
+
// Core register
struct Register {
static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 8;
+ static const int kMaxNumAllocatableRegisters =
+ FLAG_enable_ool_constant_pool ? 8 : 9;
static const int kSizeInBytes = 4;
+ inline static int NumAllocatableRegisters();
+
static int ToAllocationIndex(Register reg) {
- ASSERT(reg.code() < kNumAllocatableRegisters);
+ if (FLAG_enable_ool_constant_pool && (reg.code() >= kRegister_r8_Code)) {
+ return reg.code() - 1;
+ }
+ ASSERT(reg.code() < kMaxNumAllocatableRegisters);
return reg.code();
}
static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ if (FLAG_enable_ool_constant_pool && (index >= 7)) {
+ return from_code(index + 1);
+ }
return from_code(index);
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"r0",
"r1",
@@ -95,7 +190,11 @@ struct Register {
"r5",
"r6",
"r7",
+ "r8",
};
+ if (FLAG_enable_ool_constant_pool && (index >= 7)) {
+ return names[index + 1];
+ }
return names[index];
}
@@ -124,25 +223,6 @@ struct Register {
int code_;
};
-// These constants are used in several locations, including static initializers
-const int kRegister_no_reg_Code = -1;
-const int kRegister_r0_Code = 0;
-const int kRegister_r1_Code = 1;
-const int kRegister_r2_Code = 2;
-const int kRegister_r3_Code = 3;
-const int kRegister_r4_Code = 4;
-const int kRegister_r5_Code = 5;
-const int kRegister_r6_Code = 6;
-const int kRegister_r7_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_fp_Code = 11;
-const int kRegister_ip_Code = 12;
-const int kRegister_sp_Code = 13;
-const int kRegister_lr_Code = 14;
-const int kRegister_pc_Code = 15;
-
const Register no_reg = { kRegister_no_reg_Code };
const Register r0 = { kRegister_r0_Code };
@@ -152,6 +232,7 @@ const Register r3 = { kRegister_r3_Code };
const Register r4 = { kRegister_r4_Code };
const Register r5 = { kRegister_r5_Code };
const Register r6 = { kRegister_r6_Code };
+// Used as constant pool pointer register if FLAG_enable_ool_constant_pool.
const Register r7 = { kRegister_r7_Code };
// Used as context register.
const Register r8 = { kRegister_r8_Code };
@@ -165,9 +246,9 @@ const Register sp = { kRegister_sp_Code };
const Register lr = { kRegister_lr_Code };
const Register pc = { kRegister_pc_Code };
-
// Single word VFP register.
struct SwVfpRegister {
+ static const int kSizeInBytes = 4;
bool is_valid() const { return 0 <= code_ && code_ < 32; }
bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
int code() const {
@@ -190,51 +271,78 @@ struct SwVfpRegister {
// Double word VFP register.
struct DwVfpRegister {
- static const int kNumRegisters = 16;
+ static const int kMaxNumRegisters = 32;
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
// d15: scratch register.
static const int kNumReservedRegisters = 2;
- static const int kNumAllocatableRegisters = kNumRegisters -
+ static const int kMaxNumAllocatableRegisters = kMaxNumRegisters -
kNumReservedRegisters;
+ static const int kSizeInBytes = 8;
+
+ // Note: the number of registers can be different at snapshot and run-time.
+ // Any code included in the snapshot must be able to run both with 16 or 32
+ // registers.
+ inline static int NumRegisters();
+ inline static int NumAllocatableRegisters();
inline static int ToAllocationIndex(DwVfpRegister reg);
+ static const char* AllocationIndexToString(int index);
+ inline static DwVfpRegister FromAllocationIndex(int index);
- static DwVfpRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- return from_code(index);
+ static DwVfpRegister from_code(int code) {
+ DwVfpRegister r = { code };
+ return r;
}
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "d0",
- "d1",
- "d2",
- "d3",
- "d4",
- "d5",
- "d6",
- "d7",
- "d8",
- "d9",
- "d10",
- "d11",
- "d12",
- "d13"
- };
- return names[index];
+ bool is_valid() const {
+ return 0 <= code_ && code_ < kMaxNumRegisters;
+ }
+ bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+ void split_code(int* vm, int* m) const {
+ ASSERT(is_valid());
+ *m = (code_ & 0x10) >> 4;
+ *vm = code_ & 0x0F;
}
- static DwVfpRegister from_code(int code) {
- DwVfpRegister r = { code };
+ int code_;
+};
+
+
+typedef DwVfpRegister DoubleRegister;
+
+
+// Double word VFP register d0-15.
+struct LowDwVfpRegister {
+ public:
+ static const int kMaxNumLowRegisters = 16;
+ operator DwVfpRegister() const {
+ DwVfpRegister r = { code_ };
+ return r;
+ }
+ static LowDwVfpRegister from_code(int code) {
+ LowDwVfpRegister r = { code };
return r;
}
- // Supporting d0 to d15, can be later extended to d31.
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is_valid() const {
+ return 0 <= code_ && code_ < kMaxNumLowRegisters;
+ }
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+ bool is(LowDwVfpRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
SwVfpRegister low() const {
SwVfpRegister reg;
reg.code_ = code_ * 2;
@@ -249,14 +357,28 @@ struct DwVfpRegister {
ASSERT(reg.is_valid());
return reg;
}
+
+ int code_;
+};
+
+
+// Quad word NEON register.
+struct QwNeonRegister {
+ static const int kMaxNumRegisters = 16;
+
+ static QwNeonRegister from_code(int code) {
+ QwNeonRegister r = { code };
+ return r;
+ }
+
+ bool is_valid() const {
+ return (0 <= code_) && (code_ < kMaxNumRegisters);
+ }
+ bool is(QwNeonRegister reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
return code_;
}
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
void split_code(int* vm, int* m) const {
ASSERT(is_valid());
*m = (code_ & 0x10) >> 4;
@@ -267,7 +389,7 @@ struct DwVfpRegister {
};
-typedef DwVfpRegister DoubleRegister;
+typedef QwNeonRegister QuadRegister;
// Support for the VFP registers s0 to s31 (d0 to d15).
@@ -306,22 +428,56 @@ const SwVfpRegister s30 = { 30 };
const SwVfpRegister s31 = { 31 };
const DwVfpRegister no_dreg = { -1 };
-const DwVfpRegister d0 = { 0 };
-const DwVfpRegister d1 = { 1 };
-const DwVfpRegister d2 = { 2 };
-const DwVfpRegister d3 = { 3 };
-const DwVfpRegister d4 = { 4 };
-const DwVfpRegister d5 = { 5 };
-const DwVfpRegister d6 = { 6 };
-const DwVfpRegister d7 = { 7 };
-const DwVfpRegister d8 = { 8 };
-const DwVfpRegister d9 = { 9 };
-const DwVfpRegister d10 = { 10 };
-const DwVfpRegister d11 = { 11 };
-const DwVfpRegister d12 = { 12 };
-const DwVfpRegister d13 = { 13 };
-const DwVfpRegister d14 = { 14 };
-const DwVfpRegister d15 = { 15 };
+const LowDwVfpRegister d0 = { 0 };
+const LowDwVfpRegister d1 = { 1 };
+const LowDwVfpRegister d2 = { 2 };
+const LowDwVfpRegister d3 = { 3 };
+const LowDwVfpRegister d4 = { 4 };
+const LowDwVfpRegister d5 = { 5 };
+const LowDwVfpRegister d6 = { 6 };
+const LowDwVfpRegister d7 = { 7 };
+const LowDwVfpRegister d8 = { 8 };
+const LowDwVfpRegister d9 = { 9 };
+const LowDwVfpRegister d10 = { 10 };
+const LowDwVfpRegister d11 = { 11 };
+const LowDwVfpRegister d12 = { 12 };
+const LowDwVfpRegister d13 = { 13 };
+const LowDwVfpRegister d14 = { 14 };
+const LowDwVfpRegister d15 = { 15 };
+const DwVfpRegister d16 = { 16 };
+const DwVfpRegister d17 = { 17 };
+const DwVfpRegister d18 = { 18 };
+const DwVfpRegister d19 = { 19 };
+const DwVfpRegister d20 = { 20 };
+const DwVfpRegister d21 = { 21 };
+const DwVfpRegister d22 = { 22 };
+const DwVfpRegister d23 = { 23 };
+const DwVfpRegister d24 = { 24 };
+const DwVfpRegister d25 = { 25 };
+const DwVfpRegister d26 = { 26 };
+const DwVfpRegister d27 = { 27 };
+const DwVfpRegister d28 = { 28 };
+const DwVfpRegister d29 = { 29 };
+const DwVfpRegister d30 = { 30 };
+const DwVfpRegister d31 = { 31 };
+
+const QwNeonRegister q0 = { 0 };
+const QwNeonRegister q1 = { 1 };
+const QwNeonRegister q2 = { 2 };
+const QwNeonRegister q3 = { 3 };
+const QwNeonRegister q4 = { 4 };
+const QwNeonRegister q5 = { 5 };
+const QwNeonRegister q6 = { 6 };
+const QwNeonRegister q7 = { 7 };
+const QwNeonRegister q8 = { 8 };
+const QwNeonRegister q9 = { 9 };
+const QwNeonRegister q10 = { 10 };
+const QwNeonRegister q11 = { 11 };
+const QwNeonRegister q12 = { 12 };
+const QwNeonRegister q13 = { 13 };
+const QwNeonRegister q14 = { 14 };
+const QwNeonRegister q15 = { 15 };
+
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
@@ -399,7 +555,7 @@ class Operand BASE_EMBEDDED {
public:
// immediate
INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE));
+ RelocInfo::Mode rmode = RelocInfo::NONE32));
INLINE(static Operand Zero()) {
return Operand(static_cast<int32_t>(0));
}
@@ -412,6 +568,17 @@ class Operand BASE_EMBEDDED {
// rm <shift_op> shift_imm
explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
+ INLINE(static Operand SmiUntag(Register rm)) {
+ return Operand(rm, ASR, kSmiTagSize);
+ }
+ INLINE(static Operand PointerOffsetFromSmiKey(Register key)) {
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ return Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize);
+ }
+ INLINE(static Operand DoubleOffsetFromSmiKey(Register key)) {
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kDoubleSizeLog2);
+ return Operand(key, LSL, kDoubleSizeLog2 - kSmiTagSize);
+ }
// rm <shift_op> rs
explicit Operand(Register rm, ShiftOp shift_op, Register rs);
@@ -468,6 +635,12 @@ class MemOperand BASE_EMBEDDED {
// [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
explicit MemOperand(Register rn, Register rm,
ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
+ INLINE(static MemOperand PointerAddressFromSmiKey(Register array,
+ Register key,
+ AddrMode am = Offset)) {
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ return MemOperand(array, key, LSL, kPointerSizeLog2 - kSmiTagSize, am);
+ }
void set_offset(int32_t offset) {
ASSERT(rm_.is(no_reg));
@@ -498,114 +671,42 @@ class MemOperand BASE_EMBEDDED {
friend class Assembler;
};
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (f == VFP3 && !FLAG_enable_vfp3) return false;
- if (f == VFP2 && !FLAG_enable_vfp2) return false;
- if (f == SUDIV && !FLAG_enable_sudiv) return false;
- if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
- return false;
- }
- return (supported_ & (1u << f)) != 0;
- }
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
- return (enabled & (1u << f)) != 0;
- }
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- unsigned mask = 1u << f;
- // VFP2 and ARMv7 are implied by VFP3.
- if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
-
- private:
- Isolate* isolate_;
- unsigned old_enabled_;
-#else
+// Class NeonMemOperand represents a memory operand in load and
+// store NEON instructions
+class NeonMemOperand BASE_EMBEDDED {
+ public:
+ // [rn {:align}] Offset
+ // [rn {:align}]! PostIndex
+ explicit NeonMemOperand(Register rn, AddrMode am = Offset, int align = 0);
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
+ // [rn {:align}], rm PostIndex
+ explicit NeonMemOperand(Register rn, Register rm, int align = 0);
- class TryForceFeatureScope BASE_EMBEDDED {
- public:
- explicit TryForceFeatureScope(CpuFeature f)
- : old_supported_(CpuFeatures::supported_) {
- if (CanForce()) {
- CpuFeatures::supported_ |= (1u << f);
- }
- }
+ Register rn() const { return rn_; }
+ Register rm() const { return rm_; }
+ int align() const { return align_; }
- ~TryForceFeatureScope() {
- if (CanForce()) {
- CpuFeatures::supported_ = old_supported_;
- }
- }
+ private:
+ void SetAlignment(int align);
- private:
- static bool CanForce() {
- // It's only safe to temporarily force support of CPU features
- // when there's only a single isolate, which is guaranteed when
- // the serializer is enabled.
- return Serializer::enabled();
- }
+ Register rn_; // base
+ Register rm_; // register increment
+ int align_;
+};
- const unsigned old_supported_;
- };
+// Class NeonListOperand represents a list of NEON registers
+class NeonListOperand BASE_EMBEDDED {
+ public:
+ explicit NeonListOperand(DoubleRegister base, int registers_count = 1);
+ DoubleRegister base() const { return base_; }
+ NeonListType type() const { return type_; }
private:
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static unsigned supported_;
- static unsigned found_by_runtime_probing_;
-
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
+ DoubleRegister base_;
+ NeonListType type_;
};
-
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
@@ -629,7 +730,11 @@ extern const Instr kCmpCmnFlip;
extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip;
-
+struct VmovIndex {
+ unsigned char index;
+};
+const VmovIndex VmovIndexLo = { 0 };
+const VmovIndex VmovIndexHi = { 1 };
class Assembler : public AssemblerBase {
public:
@@ -647,15 +752,7 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- ~Assembler();
-
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
- // Avoids using instructions that vary in size in unpredictable ways between
- // the snapshot and the running VM. This is needed by the full compiler so
- // that it can recompile code with debug support and fix the PC.
- void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
+ virtual ~Assembler();
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -684,10 +781,6 @@ class Assembler : public AssemblerBase {
// Manages the jump elimination optimization if the second parameter is true.
int branch_offset(Label* L, bool jump_elimination_allowed);
- // Puts a labels target address at the given position.
- // The high 8 bits are set to zero.
- void label_at_put(Label* L, int at_offset);
-
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_pointer_address_at(Address pc));
@@ -728,37 +821,19 @@ class Assembler : public AssemblerBase {
// Distance between start of patched return sequence and the emitted address
// to jump to.
-#ifdef USE_BLX
// Patched return sequence is:
// ldr ip, [pc, #0] @ emited address and start
// blx ip
static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
-#else
- // Patched return sequence is:
- // mov lr, pc @ start of sequence
- // ldr pc, [pc, #-4] @ emited address
- static const int kPatchReturnSequenceAddressOffset = kInstrSize;
-#endif
// Distance between start of patched debug break slot and the emitted address
// to jump to.
-#ifdef USE_BLX
// Patched debug break slot code is:
// ldr ip, [pc, #0] @ emited address and start
// blx ip
static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
-#else
- // Patched debug break slot code is:
- // mov lr, pc @ start of sequence
- // ldr pc, [pc, #-4] @ emited address
- static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
-#endif
-#ifdef USE_BLX
static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstrSize;
-#else
- static const int kPatchDebugBreakSlotReturnOffset = kInstrSize;
-#endif
// Difference between address of current opcode and value read from pc
// register.
@@ -857,6 +932,10 @@ class Assembler : public AssemblerBase {
mov(dst, Operand(src), s, cond);
}
+ // Load the position of the label relative to the generated code object
+ // pointer in a register.
+ void mov_label_offset(Register dst, Label* label);
+
// ARMv7 instructions for loading a 32 bit immediate in two instructions.
// This may actually emit a different mov instruction, but on an ARMv7 it
// is guaranteed to only emit one instruction.
@@ -932,6 +1011,19 @@ class Assembler : public AssemblerBase {
void bfi(Register dst, Register src, int lsb, int width,
Condition cond = al);
+ void pkhbt(Register dst, Register src1, const Operand& src2,
+ Condition cond = al);
+
+ void pkhtb(Register dst, Register src1, const Operand& src2,
+ Condition cond = al);
+
+ void uxtb(Register dst, const Operand& src, Condition cond = al);
+
+ void uxtab(Register dst, Register src1, const Operand& src2,
+ Condition cond = al);
+
+ void uxtb16(Register dst, const Operand& src, Condition cond = al);
+
// Status register access instructions
void mrs(Register dst, SRegister s, Condition cond = al);
@@ -953,6 +1045,9 @@ class Assembler : public AssemblerBase {
Register src2,
const MemOperand& dst, Condition cond = al);
+ // Preload instructions
+ void pld(const MemOperand& address);
+
// Load/Store multiple instructions
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
@@ -1002,10 +1097,7 @@ class Assembler : public AssemblerBase {
LFlag l = Short); // v5 and above
// Support for VFP.
- // All these APIs support S0 to S31 and D0 to D15.
- // Currently these APIs do not support extended D registers, i.e, D16 to D31.
- // However, some simple modifications can allow
- // these APIs to support D16 to D31.
+ // All these APIs support S0 to S31 and D0 to D31.
void vldr(const DwVfpRegister dst,
const Register base,
@@ -1065,8 +1157,7 @@ class Assembler : public AssemblerBase {
void vmov(const DwVfpRegister dst,
double imm,
- const Register scratch = no_reg,
- const Condition cond = al);
+ const Register scratch = no_reg);
void vmov(const SwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
@@ -1074,6 +1165,14 @@ class Assembler : public AssemblerBase {
const DwVfpRegister src,
const Condition cond = al);
void vmov(const DwVfpRegister dst,
+ const VmovIndex index,
+ const Register src,
+ const Condition cond = al);
+ void vmov(const Register dst,
+ const VmovIndex index,
+ const DwVfpRegister src,
+ const Condition cond = al);
+ void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
const Condition cond = al);
@@ -1115,6 +1214,9 @@ class Assembler : public AssemblerBase {
const DwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
+ void vcvt_f64_s32(const DwVfpRegister dst,
+ int fraction_bits,
+ const Condition cond = al);
void vneg(const DwVfpRegister dst,
const DwVfpRegister src,
@@ -1134,6 +1236,14 @@ class Assembler : public AssemblerBase {
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
+ void vmla(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vmls(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
void vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@@ -1152,6 +1262,17 @@ class Assembler : public AssemblerBase {
const DwVfpRegister src,
const Condition cond = al);
+ // Support for NEON.
+ // All these APIs support D0 to D31 and Q0 to Q15.
+
+ void vld1(NeonSize size,
+ const NeonListOperand& dst,
+ const NeonMemOperand& src);
+ void vst1(NeonSize size,
+ const NeonListOperand& src,
+ const NeonMemOperand& dst);
+ void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
+
// Pseudo instructions
// Different nop operations are used by the code generator to detect certain
@@ -1185,20 +1306,10 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
- bool predictable_code_size() const { return predictable_code_size_; }
-
static bool use_immediate_embedded_pointer_loads(
const Assembler* assembler) {
-#ifdef USE_BLX
return CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size());
-#else
- // If not using BLX, all loads from the constant pool cannot be immediate,
- // because the ldr pc, [pc + #xxxx] used for calls must be a single
- // instruction and cannot be easily distinguished out of context from
- // other loads that could use movw/movt.
- return false;
-#endif
}
// Check the code size generated from label to here.
@@ -1282,8 +1393,6 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
- int pc_offset() const { return pc_ - buffer_; }
-
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions
@@ -1299,8 +1408,11 @@ class Assembler : public AssemblerBase {
static bool IsBranch(Instr instr);
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
+ static bool IsVldrDRegisterImmediate(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
+ static int GetVldrDRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
+ static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset);
static bool IsStrRegisterImmediate(Instr instr);
static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
static bool IsAddRegisterImmediate(Instr instr);
@@ -1315,6 +1427,7 @@ class Assembler : public AssemblerBase {
static bool IsStrRegFpNegOffset(Instr instr);
static bool IsLdrRegFpNegOffset(Instr instr);
static bool IsLdrPcImmediateOffset(Instr instr);
+ static bool IsVldrDPcImmediateOffset(Instr instr);
static bool IsTstImmediate(Instr instr);
static bool IsCmpRegister(Instr instr);
static bool IsCmpImmediate(Instr instr);
@@ -1325,10 +1438,13 @@ class Assembler : public AssemblerBase {
static bool IsMovW(Instr instr);
// Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB thereby defining a maximum distance between the instruction
- // and the accessed constant.
- static const int kMaxDistToPool = 4*KB;
- static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
+ // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
+ // PC-relative loads, thereby defining a maximum distance between the
+ // instruction and the accessed constant.
+ static const int kMaxDistToIntPool = 4*KB;
+ static const int kMaxDistToFPPool = 1*KB;
+ // All relocations could be integer, it therefore acts as the limit.
+ static const int kMaxNumPendingRelocInfo = kMaxDistToIntPool/kInstrSize;
// Postpone the generation of the constant pool for the specified number of
// instructions.
@@ -1343,8 +1459,6 @@ class Assembler : public AssemblerBase {
// the relocation info.
TypeFeedbackId recorded_ast_id_;
- bool emit_debug_code() const { return emit_debug_code_; }
-
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos
@@ -1370,7 +1484,9 @@ class Assembler : public AssemblerBase {
if (--const_pool_blocked_nesting_ == 0) {
// Check the constant pool hasn't been blocked for too long.
ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
+ (pc_offset() < (first_const_pool_use_ + kMaxDistToIntPool)));
+ ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
+ (pc_offset() < (first_const_pool_use_ + kMaxDistToFPPool)));
// Two cases:
// * no_const_pool_before_ >= next_buffer_check_ and the emission is
// still blocked
@@ -1386,13 +1502,6 @@ class Assembler : public AssemblerBase {
}
private:
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
int next_buffer_check_; // pc offset of next buffer check
// Code generation
@@ -1401,7 +1510,6 @@ class Assembler : public AssemblerBase {
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
- byte* pc_; // the program counter; moves forward
// Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional
@@ -1421,13 +1529,6 @@ class Assembler : public AssemblerBase {
static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
- // Average distance beetween a constant pool and the first instruction
- // accessing the constant pool. Longer distance should result in less I-cache
- // pollution.
- // In practice the distance will be smaller since constant pool emission is
- // forced after function return and sometimes after unconditional branches.
- static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
-
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
@@ -1452,6 +1553,9 @@ class Assembler : public AssemblerBase {
RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
// number of pending reloc info entries in the buffer
int num_pending_reloc_info_;
+ // Number of pending reloc info entries included above which also happen to
+ // be 64-bit.
+ int num_pending_64_bit_reloc_info_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
@@ -1477,7 +1581,6 @@ class Assembler : public AssemblerBase {
// Labels
void print(Label* L);
void bind_to(Label* L, int pos);
- void link_to(Label* L, Label* appendix);
void next(Label* L);
enum UseConstantPoolMode {
@@ -1488,17 +1591,14 @@ class Assembler : public AssemblerBase {
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
UseConstantPoolMode mode = USE_CONSTANT_POOL);
+ void RecordRelocInfo(double data);
+ void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
- friend class RegExpMacroAssemblerARM;
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_;
-
- bool emit_debug_code_;
- bool predictable_code_size_;
-
friend class PositionsRecorder;
friend class EnsureSpace;
};
@@ -1512,26 +1612,6 @@ class EnsureSpace BASE_EMBEDDED {
};
-class PredictableCodeSizeScope {
- public:
- explicit PredictableCodeSizeScope(Assembler* assembler)
- : asm_(assembler) {
- old_value_ = assembler->predictable_code_size();
- assembler->set_predictable_code_size(true);
- }
-
- ~PredictableCodeSizeScope() {
- if (!old_value_) {
- asm_->set_predictable_code_size(false);
- }
- }
-
- private:
- Assembler* asm_;
- bool old_value_;
-};
-
-
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 2d1d7b119..60f529003 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "codegen.h"
#include "debug.h"
@@ -104,373 +104,6 @@ static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
}
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. An elements backing store is allocated with size initial_capacity
-// and filled with the hole values.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(scratch3, Operand(0, RelocInfo::NONE));
- __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
-
- if (initial_capacity == 0) {
- __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ add(scratch1, result, Operand(JSArray::kSize));
- __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- __ sub(scratch1, scratch1, Operand(kHeapObjectTag));
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array (untagged)
- // scratch2: start of next object
- __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
- STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- __ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
- STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- static const int kLoopUnfoldLimit = 4;
- if (initial_capacity <= kLoopUnfoldLimit) {
- for (int i = 0; i < initial_capacity; i++) {
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- }
- } else {
- Label loop, entry;
- __ add(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
- __ b(&entry);
- __ bind(&loop);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(scratch1, scratch2);
- __ b(lt, &loop);
- }
-}
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array_storage and elements_array_end
-// (see below for when that is not the case). If the parameter fill_with_holes
-// is true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array_storage is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array_storage,
- Register elements_array_end,
- Register scratch1,
- Register scratch2,
- bool fill_with_hole,
- Label* gc_required) {
- // Load the initial map from the array function.
- __ LoadInitialArrayMap(array_function, scratch2,
- elements_array_storage, fill_with_hole);
-
- if (FLAG_debug_code) { // Assert that array size is not zero.
- __ tst(array_size, array_size);
- __ Assert(ne, "array size is unexpectedly 0");
- }
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested number of elements.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ mov(elements_array_end,
- Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
- __ add(elements_array_end,
- elements_array_end,
- Operand(array_size, ASR, kSmiTagSize));
- __ AllocateInNewSpace(
- elements_array_end,
- result,
- scratch1,
- scratch2,
- gc_required,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array_storage: initial map
- // array_size: size of array (smi)
- __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
- __ str(elements_array_storage,
- FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // array_size: size of array (smi)
- __ add(elements_array_storage, result, Operand(JSArray::kSize));
- __ str(elements_array_storage,
- FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- STATIC_ASSERT(kSmiTag == 0);
- __ sub(elements_array_storage,
- elements_array_storage,
- Operand(kHeapObjectTag));
- // Initialize the fixed array and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // elements_array_storage: elements array (untagged)
- // array_size: size of array (smi)
- __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
- __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ str(array_size,
- MemOperand(elements_array_storage, kPointerSize, PostIndex));
-
- // Calculate elements array and elements array end.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // array_size: smi-tagged size of elements array
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(elements_array_end,
- elements_array_storage,
- Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // elements_array_end: start of next object
- if (fill_with_hole) {
- Label loop, entry;
- __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
- __ jmp(&entry);
- __ bind(&loop);
- __ str(scratch1,
- MemOperand(elements_array_storage, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(elements_array_storage, elements_array_end);
- __ b(lt, &loop);
- }
-}
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// r0: argc
-// r1: constructor (built-in Array function)
-// lr: return address
-// sp[0]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in r1 needs to be preserved for
-// entering the generic code. In both cases argc in r0 needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// construct call and normal call.
-static void ArrayNativeCode(MacroAssembler* masm,
- Label* call_generic_code) {
- Counters* counters = masm->isolate()->counters();
- Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
- has_non_smi_element, finish, cant_transition_map, not_double;
-
- // Check for array construction with zero arguments or one.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- __ b(ne, &argc_one_or_more);
-
- // Handle construction of an empty array.
- __ bind(&empty_array);
- AllocateEmptyJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r3, r4);
- // Set up return value, remove receiver from stack and return.
- __ mov(r0, r2);
- __ add(sp, sp, Operand(kPointerSize));
- __ Jump(lr);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmp(r0, Operand(1));
- __ b(ne, &argc_two_or_more);
- STATIC_ASSERT(kSmiTag == 0);
- __ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
- __ tst(r2, r2);
- __ b(ne, &not_empty_array);
- __ Drop(1); // Adjust stack.
- __ mov(r0, Operand(0)); // Treat this as a call with argc of zero.
- __ b(&empty_array);
-
- __ bind(&not_empty_array);
- __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
- __ b(ne, call_generic_code);
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is too large to actually allocate an elements array.
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
- __ b(ge, call_generic_code);
-
- // r0: argc
- // r1: constructor
- // r2: array_size (smi)
- // sp[0]: argument
- AllocateJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- r6,
- r7,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r2, r4);
- // Set up return value, remove receiver and argument from stack and return.
- __ mov(r0, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Jump(lr);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ mov(r2, Operand(r0, LSL, kSmiTagSize)); // Convet argc to a smi.
-
- // r0: argc
- // r1: constructor
- // r2: array_size (smi)
- // sp[0]: last argument
- AllocateJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- r6,
- r7,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r2, r6);
-
- // Fill arguments as array elements. Copy from the top of the stack (last
- // element) to the array backing store filling it backwards. Note:
- // elements_array_end points after the backing store therefore PreIndex is
- // used when filling the backing store.
- // r0: argc
- // r3: JSArray
- // r4: elements_array storage start (untagged)
- // r5: elements_array_end (untagged)
- // sp[0]: last argument
- Label loop, entry;
- __ mov(r7, sp);
- __ jmp(&entry);
- __ bind(&loop);
- __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(r2, &has_non_smi_element);
- }
- __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
- __ bind(&entry);
- __ cmp(r4, r5);
- __ b(lt, &loop);
-
- __ bind(&finish);
- __ mov(sp, r7);
-
- // Remove caller arguments and receiver from the stack, setup return value and
- // return.
- // r0: argc
- // r3: JSArray
- // sp[0]: receiver
- __ add(sp, sp, Operand(kPointerSize));
- __ mov(r0, r3);
- __ Jump(lr);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(
- r2, r9, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- __ UndoAllocationInNewSpace(r3, r4);
- __ b(call_generic_code);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- // r3: JSArray
- __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- r2,
- r9,
- &cant_transition_map);
- __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ RecordWriteField(r3,
- HeapObject::kMapOffset,
- r2,
- r9,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- Label loop2;
- __ sub(r7, r7, Operand(kPointerSize));
- __ bind(&loop2);
- __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
- __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
- __ cmp(r4, r5);
- __ b(lt, &loop2);
- __ b(&finish);
-}
-
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -485,23 +118,17 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for InternalArray function");
+ __ SmiTst(r2);
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for InternalArray function");
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
// function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle the
- // construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -519,54 +146,20 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function");
+ __ SmiTst(r2);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for Array function");
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
}
// Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_constructor;
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
- // Initial map for the builtin Array function should be a map.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function");
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ // tail call a stub
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+ __ mov(r2, Operand(undefined_sentinel));
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -585,12 +178,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, r2);
__ cmp(function, Operand(r2));
- __ Assert(eq, "Unexpected String function");
+ __ Assert(eq, kUnexpectedStringFunction);
}
// Load the first arguments in r0 and get rid of the rest.
Label no_arguments;
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
__ b(eq, &no_arguments);
// First args = sp[(argc - 1) * 4].
__ sub(r0, r0, Operand(1));
@@ -600,15 +193,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
Register argument = r2;
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- r0, // Input.
- argument, // Result.
- r3, // Scratch.
- r4, // Scratch.
- r5, // Scratch.
- false, // Is it a Smi?
- &not_cached);
+ __ LookupNumberStringCache(r0, // Input.
+ argument, // Result.
+ r3, // Scratch.
+ r4, // Scratch.
+ r5, // Scratch.
+ &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
__ bind(&argument_is_string);
@@ -619,12 +209,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -----------------------------------
Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- r0, // Result.
- r3, // Scratch.
- r4, // Scratch.
- &gc_required,
- TAG_OBJECT);
+ __ Allocate(JSValue::kSize,
+ r0, // Result.
+ r3, // Scratch.
+ r4, // Scratch.
+ &gc_required,
+ TAG_OBJECT);
// Initialising the String Object.
Register map = r3;
@@ -632,10 +222,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ ldrb(r4, FieldMemOperand(map, Map::kInstanceSizeOffset));
__ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(eq, "Unexpected string wrapper instance size");
+ __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
__ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ cmp(r4, Operand(0, RelocInfo::NONE));
- __ Assert(eq, "Unexpected unused properties of string wrapper");
+ __ cmp(r4, Operand::Zero());
+ __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
}
__ str(map, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -682,7 +272,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the empty string into r2, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(argument, Heap::kempty_stringRootIndex);
__ Drop(1);
__ b(&argument_is_string);
@@ -699,39 +289,55 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
+ // Function is also the parameter to the runtime call.
+ __ push(r1);
+
+ __ CallRuntime(function_id, 1);
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore receiver.
+ __ pop(r1);
+}
+
+
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mov(pc, r2);
+ __ Jump(r2);
}
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
+ // Tail call to returned code.
+ __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r0);
+
+ __ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- __ push(r1); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(r5);
- // Restore receiver.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
+void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
@@ -756,7 +362,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the two incoming parameters on the stack.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ SmiTag(r0);
__ push(r0); // Smi-tagged arguments count.
__ push(r1); // Constructor function.
@@ -816,7 +422,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function
// r2: initial map
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+ __ Allocate(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
@@ -839,9 +445,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: object size (in words)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
- __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
@@ -849,14 +454,16 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
// r0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ cmp(r0, r6);
- __ Assert(le, "Unexpected number of pre-allocated property fields.");
+ __ add(ip, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ cmp(r0, ip);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
}
- __ InitializeFieldsWithFiller(r5, r0, r7);
+ __ InitializeFieldsWithFiller(r5, r0, r6);
// To allow for truncation.
- __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
+ __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
}
- __ InitializeFieldsWithFiller(r5, r6, r7);
+ __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -882,7 +489,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Done if no extra properties are to be allocated.
__ b(eq, &allocated);
- __ Assert(pl, "Property allocation count failed.");
+ __ Assert(pl, kPropertyAllocationCountFailed);
// Scale the number of elements by pointer size and add the header for
// FixedArrays to the start of the next object calculation from above.
@@ -891,7 +498,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r4: JSObject
// r5: start of next object
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateInNewSpace(
+ __ Allocate(
r0,
r5,
r6,
@@ -909,7 +516,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
__ str(r6, MemOperand(r2, kPointerSize, PostIndex));
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+ __ SmiTag(r0, r3);
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
// Initialize the fields to undefined.
@@ -921,16 +528,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ cmp(r7, r8);
- __ Assert(eq, "Undefined value not loaded.");
- }
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(&entry);
__ bind(&loop);
- __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
+ __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(r2, r6);
__ b(lt, &loop);
@@ -982,7 +583,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Set up number of arguments for function call below
- __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+ __ SmiUntag(r0, r3);
// Copy arguments and receiver to the expression stack.
// r0: number of arguments
@@ -1044,7 +645,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the type of the result (stored in its map) is less than
// FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r0, r1, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &exit);
// Throw away the result of the constructor invocation and use the
@@ -1094,10 +695,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r2: receiver
// r3: argc
// r4: argv
- // r5-r7, cp may be clobbered
+ // r5-r6, r7 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
- __ mov(cp, Operand(0, RelocInfo::NONE));
+ __ mov(cp, Operand::Zero());
// Enter an internal frame.
{
@@ -1133,7 +735,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
- __ mov(r7, Operand(r4));
+ if (!FLAG_enable_ool_constant_pool) {
+ __ mov(r7, Operand(r4));
+ }
if (kR9Available == 1) {
__ mov(r9, Operand(r4));
}
@@ -1141,6 +745,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
if (is_construct) {
+ // No type feedback cell is available
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ __ mov(r2, Operand(undefined_sentinel));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@@ -1169,60 +777,103 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
+ CallRuntimePassFunction(masm, Runtime::kLazyCompile);
+ // Do a tail-call of the compiled function.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
+}
- // Tear down internal frame.
- }
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
// Do a tail-call of the compiled function.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // r0 - contains return address (beginning of patch sequence)
+ // r1 - isolate
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ PrepareCallCFunction(1, 0, r2);
+ __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ mov(pc, r0);
+}
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // r0 - contains return address (beginning of patch sequence)
+ // r1 - isolate
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ PrepareCallCFunction(1, 0, r2);
+ __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(ExternalReference::get_mark_code_as_executed_function(
+ masm->isolate()), 2);
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+
+ // Jump to point after the code-age stub.
+ __ add(r0, r0, Operand(kNoCodeAgeSequenceLength * Assembler::kInstrSize));
+ __ mov(pc, r0);
+}
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
- // Tear down internal frame.
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
- // Do a tail-call of the compiled function.
- __ Jump(r2);
+ __ add(sp, sp, Operand(kPointerSize)); // Ignore state
+ __ mov(pc, lr); // Jump to miss handler
}
@@ -1263,58 +914,77 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- __ Ret();
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- CpuFeatures::TryForceFeatureScope scope(VFP3);
- if (!CPU::SupportsCrankshaft()) {
- __ Abort("Unreachable code: Cannot optimize without VFP3 support.");
- return;
- }
-
- // Lookup the function in the JavaScript frame and push it as an
- // argument to the on-stack replacement function.
+ // Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Lookup and calculate pc offset.
+ __ ldr(r1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ ldr(r2, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sub(r1, r1, r2);
+ __ SmiTag(r1);
+
+ // Pass both function and pc offset as arguments.
__ push(r0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ push(r1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
+ // If the code object is null, just return to the unoptimized code.
Label skip;
- __ cmp(r0, Operand(Smi::FromInt(-1)));
+ __ cmp(r0, Operand(Smi::FromInt(0)));
__ b(ne, &skip);
__ Ret();
__ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiUntag(r0);
- __ push(r0);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ ldr(r1, MemOperand(r0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ ldr(r1, MemOperand(r1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ add(r0, r0, Operand::SmiUntag(r1));
+ __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ Ret();
}
@@ -1322,7 +992,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r0: actual number of arguments
{ Label done;
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ b(ne, &done);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ push(r2);
@@ -1343,7 +1013,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// r0: actual number of arguments
// r1: function
Label shift_arguments;
- __ mov(r4, Operand(0, RelocInfo::NONE)); // indicate regular JS_FUNCTION
+ __ mov(r4, Operand::Zero()); // indicate regular JS_FUNCTION
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
@@ -1383,7 +1053,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
// Enter an internal frame in order to preserve argument count.
FrameScope scope(masm, StackFrame::INTERNAL);
- __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
+ __ SmiTag(r0);
__ push(r0);
__ push(r2);
@@ -1391,14 +1061,14 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(r2, r0);
__ pop(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ __ SmiUntag(r0);
// Exit the internal frame.
}
// Restore the function to r1, and the flag to r4.
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ mov(r4, Operand(0, RelocInfo::NONE));
+ __ mov(r4, Operand::Zero());
__ jmp(&patch_receiver);
// Use the global receiver object from the called function as the
@@ -1420,11 +1090,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 3b. Check for function proxy.
__ bind(&slow);
- __ mov(r4, Operand(1, RelocInfo::NONE)); // indicate function proxy
+ __ mov(r4, Operand(1, RelocInfo::NONE32)); // indicate function proxy
__ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(eq, &shift_arguments);
__ bind(&non_function);
- __ mov(r4, Operand(2, RelocInfo::NONE)); // indicate non-function
+ __ mov(r4, Operand(2, RelocInfo::NONE32)); // indicate non-function
// 3c. Patch the first argument when calling a non-function. The
// CALL_NON_FUNCTION builtin expects the non-function callee as
@@ -1468,7 +1138,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ tst(r4, r4);
__ b(eq, &function);
// Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ mov(r2, Operand::Zero());
__ SetCallKind(r5, CALL_AS_METHOD);
__ cmp(r4, Operand(1));
__ b(ne, &non_proxy);
@@ -1494,7 +1164,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(r2, Operand(r2, ASR, kSmiTagSize));
+ __ SmiUntag(r2);
__ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ SetCallKind(r5, CALL_AS_METHOD);
__ cmp(r2, r0); // Check formal and actual parameter counts.
@@ -1533,7 +1203,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// here which will cause r2 to become negative.
__ sub(r2, sp, r2);
// Check if the arguments will overflow the stack.
- __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ cmp(r2, Operand::PointerOffsetFromSmiKey(r0));
__ b(gt, &okay); // Signed comparison.
// Out of stack space.
@@ -1546,7 +1216,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Push current limit and index.
__ bind(&okay);
__ push(r0); // limit
- __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
+ __ mov(r1, Operand::Zero()); // initial index
__ push(r1);
// Get the receiver.
@@ -1643,7 +1313,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Invoke the function.
Label call_proxy;
ParameterCount actual(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ __ SmiUntag(r0);
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &call_proxy);
@@ -1658,7 +1328,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ bind(&call_proxy);
__ push(r1); // add function proxy as last argument
__ add(r0, r0, Operand(1));
- __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ mov(r2, Operand::Zero());
__ SetCallKind(r5, CALL_AS_METHOD);
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
@@ -1672,7 +1342,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
__ add(fp, sp, Operand(3 * kPointerSize));
@@ -1688,7 +1358,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ ldr(r1, MemOperand(fp, -3 * kPointerSize));
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
__ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
}
@@ -1719,7 +1389,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r1: function
// r2: expected number of arguments
// r3: code entry to call
- __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
// adjust for return address and receiver
__ add(r0, r0, Operand(2 * kPointerSize));
__ sub(r2, r0, Operand(r2, LSL, kPointerSizeLog2));
@@ -1750,7 +1420,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r1: function
// r2: expected number of arguments
// r3: code entry to call
- __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
// Copy the arguments (including the receiver) to the new stack frame.
// r0: copy start address
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index ceb108ffa..9330eb141 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -27,195 +27,319 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "bootstrapper.h"
#include "code-stubs.h"
#include "regexp-macro-assembler.h"
+#include "stub-cache.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r2 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cond,
- bool never_nan_nan);
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* lhs_not_nan,
- Label* slow,
- bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register lhs,
- Register rhs);
+void ToNumberStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
-// Check if the operand is a heap number.
-static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
- Register scratch1, Register scratch2,
- Label* not_a_heap_number) {
- __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
- __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch1, scratch2);
- __ b(ne, not_a_heap_number);
+
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
}
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label check_heap_number, call_builtin;
- __ JumpIfNotSmi(r0, &check_heap_number);
- __ Ret();
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r3, r2, r1 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+}
- __ bind(&check_heap_number);
- EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin);
- __ Ret();
- __ bind(&call_builtin);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r3, r2, r1, r0 };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in cp.
- Counters* counters = masm->isolate()->counters();
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r2 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
- Label gc;
- // Pop the function info from the stack.
- __ pop(r3);
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
- // Attempt to allocate new JSFunction in new space.
- __ AllocateInNewSpace(JSFunction::kSize,
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
- __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
- __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
- __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
- __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
- __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
- __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ ldr(r1,
- FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ tst(r1, r1);
- __ b(ne, &check_optimized);
+
+void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r2, r1, r0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0, r1 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ Address entry =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
+}
+
+
+void CompareNilICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(CompareNilIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+}
+
+
+void BinaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
+static void InitializeArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // r0 -- number of arguments
+ // r1 -- function
+ // r2 -- type info cell with elements kind
+ static Register registers[] = { r1, r2 };
+ descriptor->register_param_count_ = 2;
+ if (constant_stack_parameter_count != 0) {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = r0;
}
- __ bind(&install_unoptimized);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
- __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-
- // Return result. The argument function info has been popped already.
- __ Ret();
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->register_params_ = registers;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+}
- __ bind(&check_optimized);
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // r0 -- number of arguments
+ // r1 -- constructor function
+ static Register registers[] = { r1 };
+ descriptor->register_param_count_ = 1;
- // r2 holds native context, r1 points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into r4.
- __ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize));
- __ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize));
- __ cmp(r2, r5);
- __ b(eq, &install_optimized);
+ if (constant_stack_parameter_count != 0) {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = r0;
+ }
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->register_params_ = registers;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+}
- // Iterate through the rest of map backwards. r4 holds an index as a Smi.
- Label loop;
- __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Do not double check first entry.
-
- __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ b(eq, &install_unoptimized);
- __ sub(r4, r4, Operand(
- Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
- __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r5, MemOperand(r5));
- __ cmp(r2, r5);
- __ b(ne, &loop);
- // Hit: fetch the optimized code.
- __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(r5, r5, Operand(kPointerSize));
- __ ldr(r4, MemOperand(r5));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(),
- 1, r6, r7);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-
- // Now link a function into a list of optimized functions.
- __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
- // No need for write barrier as JSFunction (eax) is in the new space.
-
- __ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Store JSFunction (eax) into edx before issuing write barrier as
- // it clobbers all the registers passed.
- __ mov(r4, r0);
- __ RecordWriteContextSlot(
- r2,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- r4,
- r1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
-
- // Return result. The argument function info has been popped already.
- __ Ret();
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ LoadRoot(r4, Heap::kFalseValueRootIndex);
- __ Push(cp, r3, r4);
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r2, r0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0, r3, r1, r2 };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cond);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* lhs_not_nan,
+ Label* slow,
+ bool strict);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ Isolate* isolate = masm->isolate();
+ isolate->counters()->code_stubs()->Increment();
+
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ int param_count = descriptor->register_param_count_;
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ASSERT(descriptor->register_param_count_ == 0 ||
+ r0.is(descriptor->register_params_[param_count - 1]));
+ // Push arguments
+ for (int i = 0; i < param_count; ++i) {
+ __ push(descriptor->register_params_[i]);
+ }
+ ExternalReference miss = descriptor->miss_handler();
+ __ CallExternalReference(miss, descriptor->register_param_count_);
+ }
+
+ __ Ret();
}
@@ -225,12 +349,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
// Attempt to allocate the context in new space.
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
+ __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT);
// Load the function from the stack.
__ ldr(r3, MemOperand(sp, 0));
@@ -275,8 +394,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0, r1, r2, &gc, TAG_OBJECT);
+ __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT);
// Load the function from the stack.
__ ldr(r3, MemOperand(sp, 0));
@@ -297,9 +415,8 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
Label after_sentinel;
__ JumpIfNotSmi(r3, &after_sentinel);
if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
__ cmp(r3, Operand::Zero());
- __ Assert(eq, message);
+ __ Assert(eq, kExpected0AsASmiSentinel);
}
__ ldr(r3, GlobalObjectOperand());
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
@@ -330,181 +447,12 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
}
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- Label* fail) {
- // Registers on entry:
- //
- // r3: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
- int size = JSArray::kSize + elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size,
- r0,
- r1,
- r2,
- fail,
- TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ add(r2, r0, Operand(JSArray::kSize));
- __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
-
- // Copy the elements array.
- ASSERT((elements_size % kPointerSize) == 0);
- __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
- }
-}
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: constant elements.
- // [sp + kPointerSize]: literal index.
- // [sp + (2 * kPointerSize)]: literals array.
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &slow_case);
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
- __ b(ne, &check_fast_elements);
- GenerateFastCloneShallowArrayCommon(masm, 0,
- COPY_ON_WRITE_ELEMENTS, &slow_case);
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&check_fast_elements);
- __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &double_elements);
- GenerateFastCloneShallowArrayCommon(masm, length_,
- CLONE_ELEMENTS, &slow_case);
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(r3);
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ CompareRoot(r3, expected_map_index);
- __ Assert(eq, message);
- __ pop(r3);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
-
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: object literal flags.
- // [sp + kPointerSize]: constant properties.
- // [sp + (2 * kPointerSize)]: literal index.
- // [sp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
- __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &slow_case);
-
- // Check that the boilerplate contains only fast properties and we can
- // statically determine the instance size.
- int size = JSObject::kHeaderSize + length_ * kPointerSize;
- __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset));
- __ cmp(r0, Operand(size >> kPointerSizeLog2));
- __ b(ne, &slow_case);
-
- // Allocate the JS object and copy header together with all in-object
- // properties from the boilerplate.
- __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
- for (int i = 0; i < size; i += kPointerSize) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
-
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
// scratch register. Destroys the source register. No GC occurs during this
// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public CodeStub {
+class ConvertToDoubleStub : public PlatformCodeStub {
public:
ConvertToDoubleStub(Register result_reg_1,
Register result_reg_2,
@@ -543,15 +491,14 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
Register mantissa = result2_;
Label not_special;
- // Convert from Smi to integer.
- __ mov(source_, Operand(source_, ASR, kSmiTagSize));
+ __ SmiUntag(source_);
// Move sign bit from source to destination. This works because the sign bit
// in the exponent word of the double has the same position and polarity as
// the 2's complement sign bit in a Smi.
STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
__ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
// Subtract from 0 if source was negative.
- __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register source_ contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
@@ -564,13 +511,11 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
HeapNumber::kExponentBias << HeapNumber::kExponentShift;
__ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
// 1, 0 and -1 all have 0 for the second word.
- __ mov(mantissa, Operand(0, RelocInfo::NONE));
+ __ mov(mantissa, Operand::Zero());
__ Ret();
__ bind(&not_special);
- // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
- // Gets the wrong answer for 0, but we already checked for that case above.
- __ CountLeadingZeros(zeros_, source_, mantissa);
+ __ clz(zeros_, source_);
// Compute exponent and or it into the exponent register.
// We use mantissa as a scratch register here. Use a fudge factor to
// divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
@@ -595,486 +540,112 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
}
-void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
- __ vmov(d7.high(), scratch1);
- __ vcvt_f64_s32(d7, d7.high());
- __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
- __ vmov(d6.high(), scratch1);
- __ vcvt_f64_s32(d6, d6.high());
- if (destination == kCoreRegisters) {
- __ vmov(r2, r3, d7);
- __ vmov(r0, r1, d6);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(scratch1, Operand(r0));
- ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
- __ push(lr);
- __ Call(stub1.GetCode());
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(scratch1, Operand(r1));
- ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
- __ Call(stub2.GetCode());
- __ pop(lr);
- }
-}
-
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label out_of_range, only_low, negate, done;
+ Register input_reg = source();
+ Register result_reg = destination();
-void FloatingPointHelper::LoadOperands(
- MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* slow) {
+ int double_offset = offset();
+ // Account for saved regs if input is sp.
+ if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
- // Load right operand (r0) to d6 or r2/r3.
- LoadNumber(masm, destination,
- r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
+ // Immediate values for this stub fit in instructions, so it's safe to use ip.
+ Register scratch = ip;
+ Register scratch_low =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+ Register scratch_high =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
+ LowDwVfpRegister double_scratch = kScratchDoubleReg;
- // Load left operand (r1) to d7 or r0/r1.
- LoadNumber(masm, destination,
- r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
-}
+ __ Push(scratch_high, scratch_low);
+ if (!skip_fastpath()) {
+ // Load double input.
+ __ vldr(double_scratch, MemOperand(input_reg, double_offset));
+ __ vmov(scratch_low, scratch_high, double_scratch);
-void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
- Destination destination,
- Register object,
- DwVfpRegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
-
- Label is_smi, done;
-
- // Smi-check
- __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
- // Heap number check
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
-
- // Handle loading a double from a heap number.
- if (CpuFeatures::IsSupported(VFP2) &&
- destination == kVFPRegisters) {
- CpuFeatures::Scope scope(VFP2);
- // Load the double from tagged HeapNumber to double register.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(dst, scratch1, HeapNumber::kValueOffset);
- } else {
- ASSERT(destination == kCoreRegisters);
- // Load the double from heap number to dst1 and dst2 in double format.
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
- }
- __ jmp(&done);
+ // Do fast-path convert from double to int.
+ __ vcvt_s32_f64(double_scratch.low(), double_scratch);
+ __ vmov(result_reg, double_scratch.low());
- // Handle loading a double from a smi.
- __ bind(&is_smi);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Convert smi to double using VFP instructions.
- __ vmov(dst.high(), scratch1);
- __ vcvt_f64_s32(dst, dst.high());
- if (destination == kCoreRegisters) {
- // Load the converted smi to dst1 and dst2 in double format.
- __ vmov(dst1, dst2, dst);
- }
+ // If result is not saturated (0x7fffffff or 0x80000000), we are done.
+ __ sub(scratch, result_reg, Operand(1));
+ __ cmp(scratch, Operand(0x7ffffffe));
+ __ b(lt, &done);
} else {
- ASSERT(destination == kCoreRegisters);
- // Write smi to dst1 and dst2 double format.
- __ mov(scratch1, Operand(object));
- ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
- __ push(lr);
- __ Call(stub.GetCode());
- __ pop(lr);
- }
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- Label done;
- Label not_in_int32_range;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
- __ cmp(scratch1, heap_number_map);
- __ b(ne, not_number);
- __ ConvertToInt32(object,
- dst,
- scratch1,
- scratch2,
- double_scratch,
- &not_in_int32_range);
- __ jmp(&done);
-
- __ bind(&not_in_int32_range);
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- __ EmitOutOfInt32RangeTruncate(dst,
- scratch1,
- scratch2,
- scratch3);
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
- Register scratch2,
- SwVfpRegister single_scratch) {
- ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst1));
- ASSERT(!int_scratch.is(dst2));
-
- Label done;
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(single_scratch, int_scratch);
- __ vcvt_f64_s32(double_dst, single_scratch);
- if (destination == kCoreRegisters) {
- __ vmov(dst1, dst2, double_dst);
+ // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
+ // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
+ if (double_offset == 0) {
+ __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
+ } else {
+ __ ldr(scratch_low, MemOperand(input_reg, double_offset));
+ __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
}
- } else {
- Label fewer_than_20_useful_bits;
- // Expected output:
- // | dst2 | dst1 |
- // | s | exp | mantissa |
-
- // Check for zero.
- __ cmp(int_scratch, Operand::Zero());
- __ mov(dst2, int_scratch);
- __ mov(dst1, int_scratch);
- __ b(eq, &done);
-
- // Preload the sign of the value.
- __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
- // Get the absolute value of the object (as an unsigned integer).
- __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
-
- // Get mantissa[51:20].
-
- // Get the position of the first set bit.
- __ CountLeadingZeros(dst1, int_scratch, scratch2);
- __ rsb(dst1, dst1, Operand(31));
-
- // Set the exponent.
- __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
- __ Bfi(dst2, scratch2, scratch2,
- HeapNumber::kExponentShift, HeapNumber::kExponentBits);
-
- // Clear the first non null bit.
- __ mov(scratch2, Operand(1));
- __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
-
- __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
- // Get the number of bits to set in the lower part of the mantissa.
- __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ b(mi, &fewer_than_20_useful_bits);
- // Set the higher 20 bits of the mantissa.
- __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
- __ rsb(scratch2, scratch2, Operand(32));
- __ mov(dst1, Operand(int_scratch, LSL, scratch2));
- __ b(&done);
-
- __ bind(&fewer_than_20_useful_bits);
- __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
- __ mov(scratch2, Operand(int_scratch, LSL, scratch2));
- __ orr(dst2, dst2, scratch2);
- // Set dst1 to 0.
- __ mov(dst1, Operand::Zero());
}
- __ bind(&done);
-}
-
-void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DwVfpRegister double_dst,
- DwVfpRegister double_scratch,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- SwVfpRegister single_scratch,
- Label* not_int32) {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!heap_number_map.is(object) &&
- !heap_number_map.is(scratch1) &&
- !heap_number_map.is(scratch2));
-
- Label done, obj_is_not_smi;
-
- __ JumpIfNotSmi(object, &obj_is_not_smi);
- __ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
- scratch2, single_scratch);
+ __ Ubfx(scratch, scratch_high,
+ HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // Load scratch with exponent - 1. This is faster than loading
+ // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
+ STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
+ __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
+ // If exponent is greater than or equal to 84, the 32 less significant
+ // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
+ // the result is 0.
+ // Compare exponent with 84 (compare exponent - 1 with 83).
+ __ cmp(scratch, Operand(83));
+ __ b(ge, &out_of_range);
+
+ // If we reach this code, 31 <= exponent <= 83.
+ // So, we don't have to handle cases where 0 <= exponent <= 20 for
+ // which we would need to shift right the high part of the mantissa.
+ // Scratch contains exponent - 1.
+ // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
+ __ rsb(scratch, scratch, Operand(51), SetCC);
+ __ b(ls, &only_low);
+ // 21 <= exponent <= 51, shift scratch_low and scratch_high
+ // to generate the result.
+ __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
+ // Scratch contains: 52 - exponent.
+ // We needs: exponent - 20.
+ // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
+ __ rsb(scratch, scratch, Operand(32));
+ __ Ubfx(result_reg, scratch_high,
+ 0, HeapNumber::kMantissaBitsInTopWord);
+ // Set the implicit 1 before the mantissa part in scratch_high.
+ __ orr(result_reg, result_reg,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
+ __ b(&negate);
+
+ __ bind(&out_of_range);
+ __ mov(result_reg, Operand::Zero());
__ b(&done);
- __ bind(&obj_is_not_smi);
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
-
- // Load the number.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Load the double value.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
-
- __ EmitVFPTruncate(kRoundToZero,
- scratch1,
- double_dst,
- scratch2,
- double_scratch,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ b(ne, not_int32);
-
- if (destination == kCoreRegisters) {
- __ vmov(dst1, dst2, double_dst);
- }
-
- } else {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- // Load the double value in the destination registers..
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- // Check for 0 and -0.
- __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
- __ orr(scratch1, scratch1, Operand(dst2));
- __ cmp(scratch1, Operand::Zero());
- __ b(eq, &done);
-
- // Check that the value can be exactly represented by a 32-bit integer.
- // Jump to not_int32 if that's not the case.
- DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
-
- // dst1 and dst2 were trashed. Reload the double value.
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
- }
+ __ bind(&only_low);
+ // 52 <= exponent <= 83, shift only scratch_low.
+ // On entry, scratch contains: 52 - exponent.
+ __ rsb(scratch, scratch, Operand::Zero());
+ __ mov(result_reg, Operand(scratch_low, LSL, scratch));
+
+ __ bind(&negate);
+ // If input was positive, scratch_high ASR 31 equals 0 and
+ // scratch_high LSR 31 equals zero.
+ // New result = (result eor 0) + 0 = result.
+ // If the input was negative, we have to negate the result.
+ // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
+ // New result = (result eor 0xffffffff) + 1 = 0 - result.
+ __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
+ __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
__ bind(&done);
-}
-
-void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
- Label* not_int32) {
- ASSERT(!dst.is(object));
- ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
- ASSERT(!scratch1.is(scratch2) &&
- !scratch1.is(scratch3) &&
- !scratch2.is(scratch3));
-
- Label done;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
-
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
-
- // Object is a heap number.
- // Convert the floating point value to a 32-bit integer.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
-
- // Load the double value.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset);
-
- __ EmitVFPTruncate(kRoundToZero,
- dst,
- double_scratch0,
- scratch1,
- double_scratch1,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ b(ne, not_int32);
- } else {
- // Load the double value in the destination registers.
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- // Check for 0 and -0.
- __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
- __ orr(dst, scratch2, Operand(dst));
- __ cmp(dst, Operand::Zero());
- __ b(eq, &done);
-
- DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
-
- // Registers state after DoubleIs32BitInteger.
- // dst: mantissa[51:20].
- // scratch2: 1
-
- // Shift back the higher bits of the mantissa.
- __ mov(dst, Operand(dst, LSR, scratch3));
- // Set the implicit first bit.
- __ rsb(scratch3, scratch3, Operand(32));
- __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
- // Set the sign.
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
- }
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
- Register dst,
- Register scratch,
- Label* not_int32) {
- // Get exponent alone in scratch.
- __ Ubfx(scratch,
- src1,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Substract the bias from the exponent.
- __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
-
- // src1: higher (exponent) part of the double value.
- // src2: lower (mantissa) part of the double value.
- // scratch: unbiased exponent.
-
- // Fast cases. Check for obvious non 32-bit integer values.
- // Negative exponent cannot yield 32-bit integers.
- __ b(mi, not_int32);
- // Exponent greater than 31 cannot yield 32-bit integers.
- // Also, a positive value with an exponent equal to 31 is outside of the
- // signed 32-bit integer range.
- // Another way to put it is that if (exponent - signbit) > 30 then the
- // number cannot be represented as an int32.
- Register tmp = dst;
- __ sub(tmp, scratch, Operand(src1, LSR, 31));
- __ cmp(tmp, Operand(30));
- __ b(gt, not_int32);
- // - Bits [21:0] in the mantissa are not null.
- __ tst(src2, Operand(0x3fffff));
- __ b(ne, not_int32);
-
- // Otherwise the exponent needs to be big enough to shift left all the
- // non zero bits left. So we need the (30 - exponent) last bits of the
- // 31 higher bits of the mantissa to be null.
- // Because bits [21:0] are null, we can check instead that the
- // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
-
- // Get the 32 higher bits of the mantissa in dst.
- __ Ubfx(dst,
- src2,
- HeapNumber::kMantissaBitsInTopWord,
- 32 - HeapNumber::kMantissaBitsInTopWord);
- __ orr(dst,
- dst,
- Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
-
- // Create the mask and test the lower bits (of the higher bits).
- __ rsb(scratch, scratch, Operand(32));
- __ mov(src2, Operand(1));
- __ mov(src1, Operand(src2, LSL, scratch));
- __ sub(src1, src1, Operand(1));
- __ tst(dst, src1);
- __ b(ne, not_int32);
-}
-
-
-void FloatingPointHelper::CallCCodeForDoubleOperation(
- MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Using core registers:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
-
- // Assert that heap_number_result is callee-saved.
- // We currently always use r5 to pass it.
- ASSERT(heap_number_result.is(r5));
-
- // Push the current return address before the C call. Return will be
- // through pop(pc) below.
- __ push(lr);
- __ PrepareCallCFunction(0, 2, scratch);
- if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(d0, r0, r1);
- __ vmov(d1, r2, r3);
- }
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number. Double returned in
- // registers r0 and r1 or in d0.
- if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP2);
- __ vstr(d0,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- } else {
- __ Strd(r0, r1, FieldMemOperand(heap_number_result,
- HeapNumber::kValueOffset));
- }
- // Place heap_number_result in r0 and return to the pushed return address.
- __ mov(r0, Operand(heap_number_result));
- __ pop(pc);
+ __ Pop(scratch_high, scratch_low);
+ __ Ret();
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
// These variants are compiled ahead of time. See next method.
if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
return true;
@@ -1089,11 +660,12 @@ bool WriteInt32ToHeapNumberStub::IsPregenerated() {
}
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
- stub1.GetCode()->set_is_pregenerated(true);
- stub2.GetCode()->set_is_pregenerated(true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -1114,7 +686,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// Set the sign bit in scratch_ if the value was negative.
__ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
// Subtract from 0 if the value was negative.
- __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs);
+ __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs);
// We should be masking the implict first digit of the mantissa away here,
// but it just ends up combining harmlessly with the last digit of the
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
@@ -1137,7 +709,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
non_smi_exponent += 1 << HeapNumber::kExponentShift;
__ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
__ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
- __ mov(ip, Operand(0, RelocInfo::NONE));
+ __ mov(ip, Operand::Zero());
__ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
__ Ret();
}
@@ -1148,48 +720,43 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cond,
- bool never_nan_nan) {
+ Condition cond) {
Label not_identical;
Label heap_number, return_equal;
__ cmp(r0, r1);
__ b(ne, &not_identical);
- // The two objects are identical. If we know that one of them isn't NaN then
- // we now know they test equal.
- if (cond != eq || !never_nan_nan) {
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cond == lt || cond == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cond == lt || cond == gt) {
+ __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, slow);
+ } else {
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cond != eq) {
+ __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(ge, slow);
- } else {
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cond == le || cond == ge) {
- __ cmp(r4, Operand(ODDBALL_TYPE));
- __ b(ne, &return_equal);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r2);
- __ b(ne, &return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ mov(r0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cond == le || cond == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r2);
+ __ b(ne, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ mov(r0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ mov(r0, Operand(LESS));
}
+ __ Ret();
}
}
}
@@ -1204,47 +771,45 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
__ Ret();
- if (cond != eq || !never_nan_nan) {
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cond != lt && cond != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r3, Operand(-1));
- __ b(ne, &return_equal);
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ orr(r0, r3, Operand(r2), SetCC);
- // For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load r0 with the failing
- // value if it's a NaN.
- if (cond != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq);
- if (cond == le) {
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
- }
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cond != lt && cond != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r3, Operand(-1));
+ __ b(ne, &return_equal);
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ orr(r0, r3, Operand(r2), SetCC);
+ // For equal we already have the right value in r0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
+ // value if it's a NaN.
+ if (cond != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq);
+ if (cond == le) {
+ __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
}
- __ Ret();
}
- // No fall through here.
+ __ Ret();
}
+ // No fall through here.
__ bind(&not_identical);
}
@@ -1280,23 +845,10 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
// Lhs is a smi, rhs is a number.
- if (CpuFeatures::IsSupported(VFP2)) {
- // Convert lhs to a double in d7.
- CpuFeatures::Scope scope(VFP2);
- __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
- // Load the double from rhs, tagged HeapNumber r0, to d6.
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- __ push(lr);
- // Convert lhs to a double in r2, r3.
- __ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode());
- // Load rhs to a double in r0, r1.
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ pop(lr);
- }
+ // Convert lhs to a double in d7.
+ __ SmiToDouble(d7, lhs);
+ // Load the double from rhs, tagged HeapNumber r0, to d6.
+ __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
// We now have both loaded as doubles but we can skip the lhs nan check
// since it's a smi.
@@ -1320,132 +872,14 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
// Rhs is a smi, lhs is a heap number.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Load the double from lhs, tagged HeapNumber r1, to d7.
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- // Convert rhs to a double in d6 .
- __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
- } else {
- __ push(lr);
- // Load lhs to a double in r2, r3.
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- // Convert rhs to a double in r0, r1.
- __ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode());
- __ pop(lr);
- }
+ // Load the double from lhs, tagged HeapNumber r1, to d7.
+ __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
+ // Convert rhs to a double in d6 .
+ __ SmiToDouble(d6, rhs);
// Fall through to both_loaded_as_doubles.
}
-void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
- Label one_is_nan, neither_is_nan;
-
- __ Sbfx(r4,
- lhs_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r4, Operand(-1));
- __ b(ne, lhs_not_nan);
- __ mov(r4,
- Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE));
- __ b(ne, &one_is_nan);
-
- __ bind(lhs_not_nan);
- __ Sbfx(r4,
- rhs_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r4, Operand(-1));
- __ b(ne, &neither_is_nan);
- __ mov(r4,
- Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
- __ b(eq, &neither_is_nan);
-
- __ bind(&one_is_nan);
- // NaN comparisons always fail.
- // Load whatever we need in r0 to make the comparison fail.
- if (cond == lt || cond == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
-
- __ bind(&neither_is_nan);
-}
-
-
-// See comment at call site.
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
- Condition cond) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
-
- // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
- if (cond == eq) {
- // Doubles are not equal unless they have the same bit pattern.
- // Exception: 0 and -0.
- __ cmp(rhs_mantissa, Operand(lhs_mantissa));
- __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
- // Return non-zero if the numbers are unequal.
- __ Ret(ne);
-
- __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
- // If exponents are equal then return 0.
- __ Ret(eq);
-
- // Exponents are unequal. The only way we can return that the numbers
- // are equal is if one is -0 and the other is 0. We already dealt
- // with the case where both are -0 or both are 0.
- // We start by seeing if the mantissas (that are equal) or the bottom
- // 31 bits of the rhs exponent are non-zero. If so we return not
- // equal.
- __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
- __ mov(r0, Operand(r4), LeaveCC, ne);
- __ Ret(ne);
- // Now they are equal if and only if the lhs exponent is zero in its
- // low 31 bits.
- __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
- __ Ret();
- } else {
- // Call a native function to do a comparison between two non-NaNs.
- // Call C routine that may not cause GC or other trouble.
- __ push(lr);
- __ PrepareCallCFunction(0, 2, r5);
- if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(d0, r0, r1);
- __ vmov(d1, r2, r3);
- }
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
- 0, 2);
- __ pop(pc); // Return.
- }
-}
-
-
// See comment at call site.
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
@@ -1480,13 +914,12 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
__ cmp(r3, Operand(ODDBALL_TYPE));
__ b(eq, &return_not_equal);
- // Now that we have the types we might as well check for symbol-symbol.
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(r2, r2, Operand(r3));
- __ tst(r2, Operand(kIsSymbolMask));
- __ b(ne, &return_not_equal);
+ // Now that we have the types we might as well check for
+ // internalized-internalized.
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ orr(r2, r2, Operand(r3));
+ __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ b(eq, &return_not_equal);
}
@@ -1508,43 +941,34 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- }
+ __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
+ __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(both_loaded_as_doubles);
}
-// Fast negative check for symbol-to-symbol equality.
-static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* possible_strings,
- Label* not_both_strings) {
+// Fast negative check for internalized-to-internalized equality.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* possible_strings,
+ Label* not_both_strings) {
ASSERT((lhs.is(r0) && rhs.is(r1)) ||
(lhs.is(r1) && rhs.is(r0)));
// r2 is object type of rhs.
- // Ensure that no non-strings have the symbol bit set.
Label object_test;
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ tst(r2, Operand(kIsNotStringMask));
__ b(ne, &object_test);
- __ tst(r2, Operand(kIsSymbolMask));
- __ b(eq, possible_strings);
+ __ tst(r2, Operand(kIsNotInternalizedMask));
+ __ b(ne, possible_strings);
__ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
__ b(ge, not_both_strings);
- __ tst(r3, Operand(kIsSymbolMask));
- __ b(eq, possible_strings);
+ __ tst(r3, Operand(kIsNotInternalizedMask));
+ __ b(ne, possible_strings);
- // Both are symbols. We already checked they weren't the same pointer
+ // Both are internalized. We already checked they weren't the same pointer
// so they are not equal.
__ mov(r0, Operand(NOT_EQUAL));
__ Ret();
@@ -1567,153 +991,60 @@ static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
- __ sub(mask, mask, Operand(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Isolate* isolate = masm->isolate();
- Label is_smi;
- Label load_result_from_cache;
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ add(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
- __ eor(scratch1, scratch1, Operand(scratch2));
- __ and_(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch1,
- number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- __ ldr(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ sub(scratch2, object, Operand(kHeapObjectTag));
- __ vldr(d0, scratch2, HeapNumber::kValueOffset);
- __ sub(probe, probe, Operand(kHeapObjectTag));
- __ vldr(d1, probe, HeapNumber::kValueOffset);
- __ VFPCompareAndSetFlags(d0, d1);
- __ b(ne, not_found); // The cache did not contain this value.
- __ b(&load_result_from_cache);
- } else {
- __ b(not_found);
- }
+static void ICCompareStub_CheckInputType(MacroAssembler* masm,
+ Register input,
+ Register scratch,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
}
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ and_(scratch, mask, Operand(object, ASR, 1));
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch,
- number_string_cache,
- Operand(scratch, LSL, kPointerSizeLog2 + 1));
-
- // Check if the entry is the smi we are looking for.
- Register probe = mask;
- __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ cmp(object, probe);
- __ b(ne, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ ldr(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(isolate->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
}
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ ldr(r1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
- __ add(sp, sp, Operand(1 * kPointerSize));
- __ Ret();
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-// On entry lhs_ and rhs_ are the values to be compared.
+// On entry r1 and r2 are the values to be compared.
// On exit r0 is 0, positive or negative to indicate the result of
// the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = r1;
+ Register rhs = r0;
+ Condition cc = GetCondition();
+
+ Label miss;
+ ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
+ ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
- if (include_smi_compare_) {
- Label not_two_smis, smi_done;
- __ orr(r2, r1, r0);
- __ JumpIfNotSmi(r2, &not_two_smis);
- __ mov(r1, Operand(r1, ASR, 1));
- __ sub(r0, r1, Operand(r0, ASR, 1));
- __ Ret();
- __ bind(&not_two_smis);
- } else if (FLAG_debug_code) {
- __ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "CompareStub: unexpected smi operands.");
- }
+ Label not_two_smis, smi_done;
+ __ orr(r2, r1, r0);
+ __ JumpIfNotSmi(r2, &not_two_smis);
+ __ mov(r1, Operand(r1, ASR, 1));
+ __ sub(r0, r1, Operand(r0, ASR, 1));
+ __ Ret();
+ __ bind(&not_two_smis);
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
- __ and_(r2, lhs_, Operand(rhs_));
+ __ and_(r2, lhs, Operand(rhs));
__ JumpIfNotSmi(r2, &not_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
@@ -1724,94 +1055,86 @@ void CompareStub::Generate(MacroAssembler* masm) {
// comparison. If VFP3 is supported the double values of the numbers have
// been loaded into d7 and d6. Otherwise, the double values have been loaded
// into r0, r1, r2, and r3.
- EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
+ EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
__ bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in d6 and d7, if
// VFP3 is supported, or in r0, r1, r2, and r3.
Isolate* isolate = masm->isolate();
- if (CpuFeatures::IsSupported(VFP2)) {
- __ bind(&lhs_not_nan);
- CpuFeatures::Scope scope(VFP2);
- Label no_nan;
- // ARMv7 VFP3 instructions to implement double precision comparison.
- __ VFPCompareAndSetFlags(d7, d6);
- Label nan;
- __ b(vs, &nan);
- __ mov(r0, Operand(EQUAL), LeaveCC, eq);
- __ mov(r0, Operand(LESS), LeaveCC, lt);
- __ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ Ret();
+ __ bind(&lhs_not_nan);
+ Label no_nan;
+ // ARMv7 VFP3 instructions to implement double precision comparison.
+ __ VFPCompareAndSetFlags(d7, d6);
+ Label nan;
+ __ b(vs, &nan);
+ __ mov(r0, Operand(EQUAL), LeaveCC, eq);
+ __ mov(r0, Operand(LESS), LeaveCC, lt);
+ __ mov(r0, Operand(GREATER), LeaveCC, gt);
+ __ Ret();
- __ bind(&nan);
- // If one of the sides was a NaN then the v flag is set. Load r0 with
- // whatever it takes to make the comparison fail, since comparisons with NaN
- // always fail.
- if (cc_ == lt || cc_ == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
+ __ bind(&nan);
+ // If one of the sides was a NaN then the v flag is set. Load r0 with
+ // whatever it takes to make the comparison fail, since comparisons with NaN
+ // always fail.
+ if (cc == lt || cc == le) {
+ __ mov(r0, Operand(GREATER));
} else {
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds lhs_not_nan.
- EmitNanCheck(masm, &lhs_not_nan, cc_);
- // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
- // answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
+ __ mov(r0, Operand(LESS));
}
+ __ Ret();
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
// and neither of them is a Smi. The objects are in rhs_ and lhs_.
- if (strict_) {
+ if (strict()) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
}
- Label check_for_symbols;
+ Label check_for_internalized_strings;
Label flat_string_check;
// Check for heap-number-heap-number comparison. Can jump to slow case,
// or load both doubles into r0, r1, r2, r3 and jump to the code that handles
- // that case. If the inputs are not doubles then jumps to check_for_symbols.
+ // that case. If the inputs are not doubles then jumps to
+ // check_for_internalized_strings.
// In this case r2 will contain the type of rhs_. Never falls through.
EmitCheckForTwoHeapNumbers(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
&both_loaded_as_doubles,
- &check_for_symbols,
+ &check_for_internalized_strings,
&flat_string_check);
- __ bind(&check_for_symbols);
+ __ bind(&check_for_internalized_strings);
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
- // symbols.
- if (cc_ == eq && !strict_) {
- // Returns an answer for two symbols or two detectable objects.
+ // internalized strings.
+ if (cc == eq && !strict()) {
+ // Returns an answer for two internalized strings or two detectable objects.
// Otherwise jumps to string case or not both strings case.
// Assumes that r2 is the type of rhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+ EmitCheckForInternalizedStringsOrObjects(
+ masm, lhs, rhs, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
// case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
__ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
- if (cc_ == eq) {
+ if (cc == eq) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
r2,
r3,
r4);
} else {
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
r2,
r3,
r4,
@@ -1821,18 +1144,18 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
- __ Push(lhs_, rhs_);
+ __ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- if (cc_ == eq) {
- native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
int ncr; // NaN compare result
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
ncr = GREATER;
} else {
- ASSERT(cc_ == gt || cc_ == ge); // remaining cases
+ ASSERT(cc == gt || cc == ge); // remaining cases
ncr = LESS;
}
__ mov(r0, Operand(Smi::FromInt(ncr)));
@@ -1842,159 +1165,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
-}
-
-
-// The stub expects its argument in the tos_ register and returns its result in
-// it, too: zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- Label patch;
- const Register map = r9.is(tos_) ? r7 : r9;
- const Register temp = map;
-
- // undefined -> false.
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value.
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- __ tst(tos_, Operand(kSmiTagMask));
- // tos_ contains the correct return value already
- __ Ret(eq);
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(tos_, &patch);
- }
-
- if (types_.NeedsMap()) {
- __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- // Undetectable -> false.
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
- __ Ret(ne);
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // Spec object -> true.
- __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
- // tos_ contains the correct non-zero return value already.
- __ Ret(ge);
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
- __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt);
- __ Ret(lt); // the string length is OK as the return value
- }
- if (types_.Contains(HEAP_NUMBER)) {
- // Heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &not_heap_number);
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
-
- __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ VFPCompareAndSetFlags(d1, 0.0);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
- } else {
- Label done, not_nan, not_zero;
- __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
- // -0 maps to false:
- __ bic(
- temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE), SetCC);
- __ b(ne, &not_zero);
- // If exponent word is zero then the answer depends on the mantissa word.
- __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
- __ jmp(&done);
-
- // Check for NaN.
- __ bind(&not_zero);
- // We already zeroed the sign bit, now shift out the mantissa so we only
- // have the exponent left.
- __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
- unsigned int shifted_exponent_mask =
- HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
- __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE));
- __ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN.
-
- // Reload exponent word.
- __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
- __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE));
- // If mantissa is not zero then we have a NaN, so return 0.
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
- __ b(ne, &done);
-
- // Load mantissa word.
- __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
- __ cmp(temp, Operand(0, RelocInfo::NONE));
- // If mantissa is not zero then we have a NaN, so return 0.
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
- __ b(ne, &done);
-
- __ bind(&not_nan);
- __ mov(tos_, Operand(1, RelocInfo::NONE));
- __ bind(&done);
- }
- __ Ret();
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- __ LoadRoot(ip, value);
- __ cmp(tos_, ip);
- // The value of a root is never NULL, so we can avoid loading a non-null
- // value into tos_ when we want to return 'true'.
- if (!result) {
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- }
- __ Ret(eq);
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- if (!tos_.is(r3)) {
- __ mov(r3, Operand(tos_));
- }
- __ mov(r2, Operand(Smi::FromInt(tos_.code())));
- __ mov(r1, Operand(Smi::FromInt(types_.ToByte())));
- __ Push(r3, r2, r1);
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -2003,1323 +1176,28 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// store the registers in any particular way, but we do have to store and
// restore them.
__ stm(db_w, sp, kCallerSaved | lr.bit());
+
+ const Register scratch = r1;
+
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP2);
- __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vstr(reg, MemOperand(sp, i * kDoubleSize));
- }
+ __ SaveFPRegs(sp, scratch);
}
const int argument_count = 1;
const int fp_argument_count = 0;
- const Register scratch = r1;
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ mov(r0, Operand(ExternalReference::isolate_address()));
+ __ mov(r0, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP2);
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vldr(reg, MemOperand(sp, i * kDoubleSize));
- }
- __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+ __ RestoreFPRegs(sp, scratch);
}
__ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
}
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ mov(r3, Operand(r0)); // the operand
- __ mov(r2, Operand(Smi::FromInt(op_)));
- __ mov(r1, Operand(Smi::FromInt(mode_)));
- __ mov(r0, Operand(Smi::FromInt(operand_type_)));
- __ Push(r3, r2, r1, r0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow) {
- __ JumpIfNotSmi(r0, non_smi);
-
- // The result of negating zero or the smallest negative smi is not a smi.
- __ bic(ip, r0, Operand(0x80000000), SetCC);
- __ b(eq, slow);
-
- // Return '0 - value'.
- __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
- __ Ret();
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi) {
- __ JumpIfNotSmi(r0, non_smi);
-
- // Flip bits and revert inverted smi-tag.
- __ mvn(r0, Operand(r0));
- __ bic(r0, r0, Operand(kSmiTagMask));
- __ Ret();
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateHeapNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateHeapNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
- Label non_smi, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
- // r0 is a heap number. Get a new heap number in r1.
- if (mode_ == UNARY_OVERWRITE) {
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- } else {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r1, Operand(r0));
- __ pop(r0);
- }
-
- __ bind(&heapnumber_allocated);
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
- __ mov(r0, Operand(r1));
- }
- __ Ret();
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(
- MacroAssembler* masm, Label* slow) {
- Label impossible;
-
- EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
- // Convert the heap number is r0 to an untagged integer in r1.
- __ ConvertToInt32(r0, r1, r2, r3, d0, slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ mvn(r1, Operand(r1));
- __ add(r2, r1, Operand(0x40000000), SetCC);
- __ b(mi, &try_float);
-
- // Tag the result as a smi and we're done.
- __ mov(r0, Operand(r1, LSL, kSmiTagSize));
- __ Ret();
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (mode_ == UNARY_NO_OVERWRITE) {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- // Allocate a new heap number without zapping r0, which we need if it fails.
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r2, r0); // Move the new heap number into r2.
- // Get the heap number into r0, now that the new heap number is in r2.
- __ pop(r0);
- }
-
- // Convert the heap number in r0 to an untagged integer in r1.
- // This can't go slow-case because it's the same number we already
- // converted once again.
- __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
- __ mvn(r1, Operand(r1));
-
- __ bind(&heapnumber_allocated);
- __ mov(r0, r2); // Move newly allocated heap number to r0.
- }
-
- if (CpuFeatures::IsSupported(VFP2)) {
- // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
- CpuFeatures::Scope scope(VFP2);
- __ vmov(s0, r1);
- __ vcvt_f64_s32(d0, s0);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(r1, r0, r2);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
- }
-
- __ bind(&impossible);
- if (FLAG_debug_code) {
- __ stop("Incorrect assumption in bit-not stub");
- }
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
- // Handle the slow case by jumping to the JavaScript builtin.
- __ push(r0);
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(r1, r0);
-
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
- __ mov(r0, Operand(Smi::FromInt(operands_type_)));
- __ Push(r2, r1, r0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 5,
- 1);
-}
-
-
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
-
- ASSERT(right.is(r0));
- STATIC_ASSERT(kSmiTag == 0);
-
- Label not_smi_result;
- switch (op_) {
- case Token::ADD:
- __ add(right, left, Operand(right), SetCC); // Add optimistically.
- __ Ret(vc);
- __ sub(right, right, Operand(left)); // Revert optimistic add.
- break;
- case Token::SUB:
- __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
- __ Ret(vc);
- __ sub(right, left, Operand(right)); // Revert optimistic subtract.
- break;
- case Token::MUL:
- // Remove tag from one of the operands. This way the multiplication result
- // will be a smi if it fits the smi range.
- __ SmiUntag(ip, right);
- // Do multiplication
- // scratch1 = lower 32 bits of ip * left.
- // scratch2 = higher 32 bits of ip * left.
- __ smull(scratch1, scratch2, left, ip);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ mov(ip, Operand(scratch1, ASR, 31));
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &not_smi_result);
- // Go slow on zero result to handle -0.
- __ cmp(scratch1, Operand(0));
- __ mov(right, Operand(scratch1), LeaveCC, ne);
- __ Ret(ne);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ Ret(pl); // Return smi 0 if the non-zero one was positive.
- // We fall through here if we multiplied a negative number with 0, because
- // that would mean we should produce -0.
- break;
- case Token::DIV:
- // Check for power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
- // Check for positive and no remainder (scratch1 contains right - 1).
- __ orr(scratch2, scratch1, Operand(0x80000000u));
- __ tst(left, scratch2);
- __ b(ne, &not_smi_result);
-
- // Perform division by shifting.
- __ CountLeadingZeros(scratch1, scratch1, scratch2);
- __ rsb(scratch1, scratch1, Operand(31));
- __ mov(right, Operand(left, LSR, scratch1));
- __ Ret();
- break;
- case Token::MOD:
- // Check for two positive smis.
- __ orr(scratch1, left, Operand(right));
- __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
- __ b(ne, &not_smi_result);
-
- // Check for power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
-
- // Perform modulus by masking.
- __ and_(right, left, Operand(scratch1));
- __ Ret();
- break;
- case Token::BIT_OR:
- __ orr(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_AND:
- __ and_(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_XOR:
- __ eor(right, left, Operand(right));
- __ Ret();
- break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ mov(right, Operand(left, ASR, scratch1));
- // Smi tag result.
- __ bic(right, right, Operand(kSmiTagMask));
- __ Ret();
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSR, scratch2));
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ tst(scratch1, Operand(0xc0000000));
- __ b(ne, &not_smi_result);
- // Smi tag result.
- __ SmiTag(right, scratch1);
- __ Ret();
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSL, scratch2));
- // Check that the signed result fits in a Smi.
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- __ b(mi, &not_smi_result);
- __ SmiTag(right, scratch1);
- __ Ret();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&not_smi_result);
-}
-
-
-void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required) {
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
- Register scratch3 = r4;
-
- ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands) {
- __ AssertSmi(left);
- __ AssertSmi(right);
- }
-
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
- // depending on whether VFP3 is available or not.
- FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP2) &&
- op_ != Token::MOD ?
- FloatingPointHelper::kVFPRegisters :
- FloatingPointHelper::kCoreRegisters;
-
- // Allocate new heap number for result.
- Register result = r5;
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
-
- // Load the operands.
- if (smi_operands) {
- FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
- } else {
- FloatingPointHelper::LoadOperands(masm,
- destination,
- heap_number_map,
- scratch1,
- scratch2,
- not_numbers);
- }
-
- // Calculate the result.
- if (destination == FloatingPointHelper::kVFPRegisters) {
- // Using VFP registers:
- // d6: Left value
- // d7: Right value
- CpuFeatures::Scope scope(VFP2);
- switch (op_) {
- case Token::ADD:
- __ vadd(d5, d6, d7);
- break;
- case Token::SUB:
- __ vsub(d5, d6, d7);
- break;
- case Token::MUL:
- __ vmul(d5, d6, d7);
- break;
- case Token::DIV:
- __ vdiv(d5, d6, d7);
- break;
- default:
- UNREACHABLE();
- }
-
- __ sub(r0, result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op_,
- result,
- scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
- }
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- if (smi_operands) {
- __ SmiUntag(r3, left);
- __ SmiUntag(r2, right);
- } else {
- // Convert operands to 32-bit integers. Right in r2 and left in r3.
- FloatingPointHelper::ConvertNumberToInt32(masm,
- left,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- not_numbers);
- FloatingPointHelper::ConvertNumberToInt32(masm,
- right,
- r2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- not_numbers);
- }
-
- Label result_not_a_smi;
- switch (op_) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of
- // writing the register as an unsigned int so we go to slow case if we
- // hit this case.
- if (CpuFeatures::IsSupported(VFP2)) {
- __ b(mi, &result_not_a_smi);
- } else {
- __ b(mi, not_numbers);
- }
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check that the *signed* result fits in a smi.
- __ add(r3, r2, Operand(0x40000000), SetCC);
- __ b(mi, &result_not_a_smi);
- __ SmiTag(r0, r2);
- __ Ret();
-
- // Allocate new heap number for result.
- __ bind(&result_not_a_smi);
- Register result = r5;
- if (smi_operands) {
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- } else {
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
- }
-
- // r2: Answer as signed int32.
- // r5: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to r0, which is the
- // result.
- __ mov(r0, Operand(r5));
-
- if (CpuFeatures::IsSupported(VFP2)) {
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
- // mentioned above SHR needs to always produce a positive result.
- CpuFeatures::Scope scope(VFP2);
- __ vmov(s0, r2);
- if (op_ == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the lable gc_required.
-void BinaryOpStub::GenerateSmiCode(
- MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
- Label not_smis;
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
-
- // Perform combined smi check on both operands.
- __ orr(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(scratch1, &not_smis);
-
- // If the smi-smi operation results in a smi return is generated.
- GenerateSmiSmiOperation(masm);
-
- // If heap number results are possible generate the result in an allocated
- // heap number.
- if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
- GenerateFPOperation(masm, true, use_runtime, gc_required);
- }
- __ bind(&not_smis);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label not_smis, call_runtime;
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- GenerateSmiCode(masm,
- &call_runtime,
- &call_runtime,
- ALLOW_HEAPNUMBER_RESULTS);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = r1;
- Register right = r0;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::INT32);
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
- DwVfpRegister double_scratch = d0;
-
- Register heap_number_result = no_reg;
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label call_runtime;
- // Labels for type transition, used for wrong input or output types.
- // Both label are currently actually bound to the same position. We use two
- // different label to differentiate the cause leading to type transition.
- Label transition;
-
- // Smi-smi fast case.
- Label skip;
- __ orr(scratch1, left, right);
- __ JumpIfNotSmi(scratch1, &skip);
- GenerateSmiSmiOperation(masm);
- // Fall through if the result is not a smi.
- __ bind(&skip);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers r0 and r1 (right
- // and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination =
- (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD)
- ? FloatingPointHelper::kVFPRegisters
- : FloatingPointHelper::kCoreRegisters;
-
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- right,
- destination,
- d7,
- d8,
- r2,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- left,
- destination,
- d6,
- d8,
- r4,
- r5,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
-
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP2);
- Label return_heap_number;
- switch (op_) {
- case Token::ADD:
- __ vadd(d5, d6, d7);
- break;
- case Token::SUB:
- __ vsub(d5, d6, d7);
- break;
- case Token::MUL:
- __ vmul(d5, d6, d7);
- break;
- case Token::DIV:
- __ vdiv(d5, d6, d7);
- break;
- default:
- UNREACHABLE();
- }
-
- if (op_ != Token::DIV) {
- // These operations produce an integer result.
- // Try to return a smi if we can.
- // Otherwise return a heap number if allowed, or jump to type
- // transition.
-
- __ EmitVFPTruncate(kRoundToZero,
- scratch1,
- d5,
- scratch2,
- d8);
-
- if (result_type_ <= BinaryOpIC::INT32) {
- // If the ne condition is set, result does
- // not fit in a 32-bit integer.
- __ b(ne, &transition);
- }
-
- // Check if the result fits in a smi.
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- // If not try to return a heap number.
- __ b(mi, &return_heap_number);
- // Check for minus zero. Return heap number for minus zero.
- Label not_zero;
- __ cmp(scratch1, Operand::Zero());
- __ b(ne, &not_zero);
- __ vmov(scratch2, d5.high());
- __ tst(scratch2, Operand(HeapNumber::kSignMask));
- __ b(ne, &return_heap_number);
- __ bind(&not_zero);
-
- // Tag the result and return.
- __ SmiTag(r0, scratch1);
- __ Ret();
- } else {
- // DIV just falls through to allocating a heap number.
- }
-
- __ bind(&return_heap_number);
- // Return a heap number, or fall through to type transition or runtime
- // call if we can't.
- if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
- : BinaryOpIC::INT32)) {
- // We are using vfp registers so r5 is available.
- heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
- }
-
- // A DIV operation expecting an integer result falls through
- // to type transition.
-
- } else {
- // We preserved r0 and r1 to be able to call runtime.
- // Save the left value on the stack.
- __ Push(r5, r4);
-
- Label pop_and_call_runtime;
-
- // Allocate a heap number to store the result.
- heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime);
-
- // Load the left value from the value saved on the stack.
- __ Pop(r1, r0);
-
- // Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(
- masm, op_, heap_number_result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-
- __ bind(&pop_and_call_runtime);
- __ Drop(2);
- __ b(&call_runtime);
- }
-
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label return_heap_number;
- Register scratch3 = r5;
- // Convert operands to 32-bit integers. Right in r2 and left in r3. The
- // registers r0 and r1 (right and left) are preserved for the runtime
- // call.
- FloatingPointHelper::LoadNumberAsInt32(masm,
- left,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- d1,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32(masm,
- right,
- r2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- d1,
- &transition);
-
- // The ECMA-262 standard specifies that, for shift operations, only the
- // 5 least significant bits of the shift value should be used.
- switch (op_) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // We only get a negative result if the shift value (r2) is 0.
- // This result cannot be respresented as a signed 32-bit integer, try
- // to return a heap number if we can.
- // The non vfp2 code does not support this special case, so jump to
- // runtime if we don't support it.
- if (CpuFeatures::IsSupported(VFP2)) {
- __ b(mi, (result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number);
- } else {
- __ b(mi, (result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &call_runtime);
- }
- break;
- case Token::SHL:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check if the result fits in a smi.
- __ add(scratch1, r2, Operand(0x40000000), SetCC);
- // If not try to return a heap number. (We know the result is an int32.)
- __ b(mi, &return_heap_number);
- // Tag the result and return.
- __ SmiTag(r0, r2);
- __ Ret();
-
- __ bind(&return_heap_number);
- heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_s32(double_scratch, double_scratch.low());
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_u32(double_scratch, double_scratch.low());
- }
-
- // Store the result.
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- __ mov(r0, r5);
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
-
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- // We never expect DIV to yield an integer result, so we always generate
- // type transition code for DIV operations expecting an integer result: the
- // code will fall through to this type transition.
- if (transition.is_linked() ||
- ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
- __ bind(&transition);
- GenerateTypeTransition(masm);
- }
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
- __ b(ne, &check);
- if (Token::IsBitOp(op_)) {
- __ mov(r1, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r1, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(ne, &done);
- if (Token::IsBitOp(op_)) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateHeapNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime;
- GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
-
- GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
-
- GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- Register left = r1;
- Register right = r0;
-
- // Check if left argument is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &left_not_string);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // At least one argument is not a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Code below will scratch result if allocation fails. To keep both arguments
- // intact for the runtime call result cannot be one of these.
- ASSERT(!result.is(r0) && !result.is(r1));
-
- if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
- Label skip_allocation, allocated;
- Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
- // If the overwritable operand is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
- // Allocate a heap number for the result.
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- __ b(&allocated);
- __ bind(&skip_allocation);
- // Use object holding the overwritable operand for result.
- __ mov(result, Operand(overwritable_operand));
- __ bind(&allocated);
- } else {
- ASSERT(mode_ == NO_OVERWRITE);
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ Push(r1, r0);
-}
-
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Untagged case: double input in d2, double result goes
// into d2.
@@ -3331,105 +1209,105 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
Label calculate;
Label invalid_cache;
const Register scratch0 = r9;
- const Register scratch1 = r7;
+ Register scratch1 = no_reg; // will be r4
const Register cache_entry = r0;
const bool tagged = (argument_type_ == TAGGED);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- if (tagged) {
- // Argument is a number and is on stack and in r0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(r0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into r2, r3.
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
- __ b(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(r0,
- r1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Load it to a double register and store the
- // low and high words into r2, r3.
- __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ vmov(r2, r3, d0);
- } else {
- // Input is untagged double in d2. Output goes to d2.
- __ vmov(r2, r3, d2);
- }
- __ bind(&loaded);
- // r2 = low 32 bits of double value
- // r3 = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ eor(r1, r2, Operand(r3));
- __ eor(r1, r1, Operand(r1, ASR, 16));
- __ eor(r1, r1, Operand(r1, ASR, 8));
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // r2 = low 32 bits of double value.
- // r3 = high 32 bits of double value.
- // r1 = TranscendentalCache::hash(double value).
- Isolate* isolate = masm->isolate();
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(isolate);
- __ mov(cache_entry, Operand(cache_array));
- // cache_entry points to cache array.
- int cache_array_index
- = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
- __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
- // r0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ cmp(cache_entry, Operand(0, RelocInfo::NONE));
- __ b(eq, &invalid_cache);
+ if (tagged) {
+ // Argument is a number and is on stack and in r0.
+ // Load argument and check if it is a smi.
+ __ JumpIfNotSmi(r0, &input_not_smi);
+
+ // Input is a smi. Convert to double and load the low and high words
+ // of the double into r2, r3.
+ __ SmiToDouble(d7, r0);
+ __ vmov(r2, r3, d7);
+ __ b(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ CheckMap(r0,
+ r1,
+ Heap::kHeapNumberMapRootIndex,
+ &calculate,
+ DONT_DO_SMI_CHECK);
+ // Input is a HeapNumber. Load it to a double register and store the
+ // low and high words into r2, r3.
+ __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ vmov(r2, r3, d0);
+ } else {
+ // Input is untagged double in d2. Output goes to d2.
+ __ vmov(r2, r3, d2);
+ }
+ __ bind(&loaded);
+ // r2 = low 32 bits of double value
+ // r3 = high 32 bits of double value
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ eor(r1, r2, Operand(r3));
+ __ eor(r1, r1, Operand(r1, ASR, 16));
+ __ eor(r1, r1, Operand(r1, ASR, 8));
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
+
+ // r2 = low 32 bits of double value.
+ // r3 = high 32 bits of double value.
+ // r1 = TranscendentalCache::hash(double value).
+ Isolate* isolate = masm->isolate();
+ ExternalReference cache_array =
+ ExternalReference::transcendental_cache_array_address(isolate);
+ __ mov(cache_entry, Operand(cache_array));
+ // cache_entry points to cache array.
+ int cache_array_index
+ = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
+ __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
+ // r0 points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ cmp(cache_entry, Operand::Zero());
+ __ b(eq, &invalid_cache);
#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::SubCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
#endif
- // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
- __ add(r1, r1, Operand(r1, LSL, 1));
- __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
- __ cmp(r2, r4);
- __ cmp(r3, r5, eq);
- __ b(ne, &calculate);
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into r0.
- __ pop();
- __ mov(r0, Operand(r6));
- } else {
- // Load result into d2.
- __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
- }
- __ Ret();
- } // if (CpuFeatures::IsSupported(VFP3))
+ // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
+ __ add(r1, r1, Operand(r1, LSL, 1));
+ __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
+ __ cmp(r2, r4);
+ __ cmp(r3, r5, eq);
+ __ b(ne, &calculate);
- __ bind(&calculate);
+ scratch1 = r4; // Start of scratch1 range.
+
+ // Cache hit. Load result, cleanup and return.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(
+ counters->transcendental_cache_hit(), 1, scratch0, scratch1);
+ if (tagged) {
+ // Pop input value from stack and load result into r0.
+ __ pop();
+ __ mov(r0, Operand(r6));
+ } else {
+ // Load result into d2.
+ __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
+ }
+ __ Ret();
+
+ __ bind(&calculate);
+ __ IncrementCounter(
counters->transcendental_cache_miss(), 1, scratch0, scratch1);
if (tagged) {
__ bind(&invalid_cache);
@@ -3437,9 +1315,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime_function, 1, 1);
} else {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- CpuFeatures::Scope scope(VFP2);
-
Label no_update;
Label skip_cache;
@@ -3499,7 +1374,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
Register scratch) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
Isolate* isolate = masm->isolate();
__ push(lr);
@@ -3549,29 +1423,18 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatures::Scope vfp2_scope(VFP2);
const Register base = r1;
const Register exponent = r2;
const Register heapnumbermap = r5;
const Register heapnumber = r0;
- const DoubleRegister double_base = d1;
- const DoubleRegister double_exponent = d2;
- const DoubleRegister double_result = d3;
- const DoubleRegister double_scratch = d0;
+ const DwVfpRegister double_base = d1;
+ const DwVfpRegister double_exponent = d2;
+ const DwVfpRegister double_result = d3;
+ const DwVfpRegister double_scratch = d0;
const SwVfpRegister single_scratch = s0;
const Register scratch = r9;
- const Register scratch2 = r7;
+ const Register scratch2 = r4;
Label call_runtime, done, int_exponent;
if (exponent_type_ == ON_STACK) {
@@ -3697,8 +1560,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ vmov(double_result, 1.0, scratch2);
// Get absolute value of exponent.
- __ cmp(scratch, Operand(0));
- __ mov(scratch2, Operand(0), LeaveCC, mi);
+ __ cmp(scratch, Operand::Zero());
+ __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
__ sub(scratch, scratch2, scratch, LeaveCC, mi);
Label while_true;
@@ -3708,7 +1571,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ vmul(double_scratch, double_scratch, double_scratch, ne);
__ b(ne, &while_true);
- __ cmp(exponent, Operand(0));
+ __ cmp(exponent, Operand::Zero());
__ b(ge, &done);
__ vmov(double_scratch, 1.0, scratch);
__ vdiv(double_result, double_scratch, double_result);
@@ -3763,37 +1626,64 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+bool CEntryStub::IsPregenerated(Isolate* isolate) {
+ return (!save_doubles_ || isolate->fp_stubs_generated()) &&
result_size_ == 1;
}
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpStub::GenerateAheadOfTime(isolate);
}
-void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- StoreBufferOverflowStub stub(kSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ SaveFPRegsMode mode = kSaveFPRegs;
+ CEntryStub save_doubles(1, mode);
+ StoreBufferOverflowStub stub(mode);
+ // These stubs might already be in the snapshot, detect that and don't
+ // regenerate, which would lead to code stub initialization state being messed
+ // up.
+ Code* save_doubles_code;
+ if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
+ save_doubles_code = *save_doubles.GetCode(isolate);
+ }
+ Code* store_buffer_overflow_code;
+ if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
+ store_buffer_overflow_code = *stub.GetCode(isolate);
+ }
+ save_doubles_code->set_is_pregenerated(true);
+ store_buffer_overflow_code->set_is_pregenerated(true);
+ isolate->set_fp_stubs_generated(true);
}
-void CEntryStub::GenerateAheadOfTime() {
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode();
+ Handle<Code> code = stub.GetCode(isolate);
code->set_is_pregenerated(true);
}
+static void JumpIfOOM(MacroAssembler* masm,
+ Register value,
+ Register scratch,
+ Label* oom_label) {
+ STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
+ STATIC_ASSERT(kFailureTag == 3);
+ __ and_(scratch, value, Operand(0xf));
+ __ cmp(scratch, Operand(0xf));
+ __ b(eq, oom_label);
+}
+
+
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -3808,9 +1698,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (do_gc) {
// Passing r0.
- __ PrepareCallCFunction(1, 0, r1);
+ __ PrepareCallCFunction(2, 0, r1);
+ __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(ExternalReference::perform_gc_function(isolate),
- 1, 0);
+ 2, 0);
}
ExternalReference scope_depth =
@@ -3827,7 +1718,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ mov(r0, Operand(r4));
__ mov(r1, Operand(r6));
-#if defined(V8_HOST_ARCH_ARM)
+#if V8_HOST_ARCH_ARM
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (FLAG_debug_code) {
@@ -3843,7 +1734,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
#endif
- __ mov(r2, Operand(ExternalReference::isolate_address()));
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
@@ -3860,6 +1751,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
masm->Jump(r5);
}
+ __ VFPEnsureFPSCRState(r2);
+
if (always_allocate) {
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
// though (contain the result).
@@ -3882,7 +1775,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// sp: stack pointer
// fp: frame pointer
// Callee-saved register r4 still holds argc.
- __ LeaveExitFrame(save_doubles_, r4);
+ __ LeaveExitFrame(save_doubles_, r4, true);
__ mov(pc, lr);
// check if we should retry or throw exception
@@ -3893,15 +1786,20 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ b(eq, &retry);
// Special handling of out of memory exceptions.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ b(eq, throw_out_of_memory_exception);
+ JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
- // Retrieve the pending exception and clear the variable.
- __ mov(r3, Operand(isolate->factory()->the_hole_value()));
+ // Retrieve the pending exception.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(ip));
+
+ // See if we just retrieved an OOM exception.
+ JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
+
+ // Clear the pending exception.
+ __ mov(r3, Operand(isolate->factory()->the_hole_value()));
+ __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
__ str(r3, MemOperand(ip));
// Special handling of termination exceptions which are uncatchable
@@ -3924,6 +1822,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Result returned in r0 or r0+r1 by default.
// NOTE: Invocations of builtins may return failure objects
@@ -3982,13 +1882,16 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
isolate);
- __ mov(r0, Operand(false, RelocInfo::NONE));
+ __ mov(r0, Operand(false, RelocInfo::NONE32));
__ mov(r2, Operand(external_caught));
__ str(r0, MemOperand(r2));
// Set pending exception and r0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
+ Label already_have_failure;
+ JumpIfOOM(masm, r0, ip, &already_have_failure);
+ Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
__ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ bind(&already_have_failure);
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r0, MemOperand(r2));
@@ -4011,18 +1914,18 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, handler_entry, exit;
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Called from C, so do not pop argc and args on exit (preserve sp)
// No need to save register-passed args
// Save callee-saved registers (incl. cp and fp), sp, and lr
__ stm(db_w, sp, kCalleeSaved | lr.bit());
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Save callee-saved vfp registers.
- __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
- // Set up the reserved register for 0.0.
- __ vmov(kDoubleRegZero, 0.0);
- }
+ // Save callee-saved vfp registers.
+ __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
+ // Set up the reserved register for 0.0.
+ __ vmov(kDoubleRegZero, 0.0);
+ __ VFPEnsureFPSCRState(r4);
// Get address of argv, see stm above.
// r0: code entry
@@ -4032,9 +1935,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Set up argv in r4.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- if (CpuFeatures::IsSupported(VFP2)) {
- offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
- }
+ offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
__ ldr(r4, MemOperand(sp, offset_to_argv));
// Push a frame with special values setup to mark it as an entry frame.
@@ -4044,14 +1945,14 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r3: argc
// r4: argv
Isolate* isolate = masm->isolate();
- __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ mov(r7, Operand(Smi::FromInt(marker)));
+ __ mov(r8, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
__ mov(r5,
Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
__ ldr(r5, MemOperand(r5));
- __ Push(r8, r7, r6, r5);
+ __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ __ Push(ip, r8, r6, r5);
// Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@@ -4097,7 +1998,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Invoke: Link this frame into the handler chain. There's only one
// handler block in this code object, so its index is 0.
__ bind(&invoke);
- // Must preserve r0-r4, r5-r7 are available.
+ // Must preserve r0-r4, r5-r6 are available.
__ PushTryHandler(StackHandler::JS_ENTRY, 0);
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bl(&invoke) above, which
@@ -4170,11 +2071,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
#endif
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Restore callee-saved vfp registers.
- __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
- }
+ // Restore callee-saved vfp registers.
+ __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
}
@@ -4253,7 +2151,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Get the map location in scratch and patch it.
__ GetRelocatedValueLocation(inline_site, scratch);
__ ldr(scratch, MemOperand(scratch));
- __ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ str(map, FieldMemOperand(scratch, Cell::kValueOffset));
}
// Register mapping: r3 is object map and r4 is function prototype.
@@ -4358,6 +2256,138 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ __ cmp(r0, Operand(masm->isolate()->factory()->prototype_string()));
+ __ b(ne, &miss);
+ receiver = r1;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- r0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = r0;
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss);
+ __ bind(&miss);
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void StringLengthStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ __ cmp(r0, Operand(masm->isolate()->factory()->length_string()));
+ __ b(ne, &miss);
+ receiver = r1;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- r0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = r0;
+ }
+
+ StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss);
+
+ __ bind(&miss);
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
+ Label miss;
+
+ Register receiver;
+ Register value;
+ if (kind() == Code::KEYED_STORE_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -----------------------------------
+ __ cmp(r1, Operand(masm->isolate()->factory()->length_string()));
+ __ b(ne, &miss);
+ receiver = r2;
+ value = r0;
+ } else {
+ ASSERT(kind() == Code::STORE_IC);
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- r0 : value
+ // -- r1 : receiver
+ // -- r2 : key
+ // -----------------------------------
+ receiver = r1;
+ value = r0;
+ }
+ Register scratch = r3;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
+ __ b(ne, &miss);
+
+ // Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
+ __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
+ __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
+ __ b(ne, &miss);
+
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
+ __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
+ __ b(eq, &miss);
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver, value);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&miss);
+
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
Register InstanceofStub::left() { return r0; }
@@ -4389,7 +2419,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Read the argument from the stack and return it.
__ sub(r3, r0, r1);
- __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
__ ldr(r0, MemOperand(r3, kDisplacement));
__ Jump(lr);
@@ -4403,7 +2433,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Read the argument from the adaptor frame and return it.
__ sub(r3, r0, r1);
- __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
__ ldr(r0, MemOperand(r3, kDisplacement));
__ Jump(lr);
@@ -4496,7 +2526,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT);
+ __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
// r0 = address of new object(s) (tagged)
// r2 = argument count (tagged)
@@ -4575,31 +2605,36 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ ldr(r9, MemOperand(sp, 0 * kPointerSize));
__ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ sub(r9, r9, Operand(r1));
- __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
__ add(r3, r4, Operand(r6, LSL, 1));
__ add(r3, r3, Operand(kParameterMapHeaderSize));
// r6 = loop variable (tagged)
// r1 = mapping index (tagged)
// r3 = address of backing store (tagged)
- // r4 = address of parameter map (tagged)
- // r5 = temporary scratch (a.o., for address calculation)
- // r7 = the hole value
+ // r4 = address of parameter map (tagged), which is also the address of new
+ // object + Heap::kArgumentsObjectSize (tagged)
+ // r0 = temporary scratch (a.o., for address calculation)
+ // r5 = the hole value
__ jmp(&parameters_test);
__ bind(&parameters_loop);
__ sub(r6, r6, Operand(Smi::FromInt(1)));
- __ mov(r5, Operand(r6, LSL, 1));
- __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ str(r9, MemOperand(r4, r5));
- __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ str(r7, MemOperand(r3, r5));
+ __ mov(r0, Operand(r6, LSL, 1));
+ __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ str(r9, MemOperand(r4, r0));
+ __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ str(r5, MemOperand(r3, r0));
__ add(r9, r9, Operand(Smi::FromInt(1)));
__ bind(&parameters_test);
__ cmp(r6, Operand(Smi::FromInt(0)));
__ b(ne, &parameters_loop);
+ // Restore r0 = new object (tagged)
+ __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize));
+
__ bind(&skip_parameter_map);
+ // r0 = address of new object (tagged)
// r2 = argument count (tagged)
// r3 = address of backing store (tagged)
// r5 = scratch
@@ -4630,6 +2665,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Ret();
// Do the runtime call to allocate the arguments object.
+ // r0 = address of new object (tagged)
// r2 = argument count (tagged)
__ bind(&runtime);
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
@@ -4656,7 +2692,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ bind(&adaptor_frame);
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ str(r1, MemOperand(sp, 0));
- __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
__ str(r3, MemOperand(sp, 1 * kPointerSize));
@@ -4664,21 +2700,15 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// of the arguments object and the elements array in words.
Label add_arguments_object;
__ bind(&try_allocate);
- __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ SmiUntag(r1, SetCC);
__ b(eq, &add_arguments_object);
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
__ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
__ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
// Do the allocation of both objects in one go.
- __ AllocateInNewSpace(r1,
- r0,
- r2,
- r3,
- &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT |
- SIZE_IN_WORDS));
+ __ Allocate(r1, r0, r2, r3, &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
@@ -4687,7 +2717,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
- __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
+ __ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize);
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
@@ -4697,7 +2727,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// If there are no actual arguments, we're done.
Label done;
- __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ cmp(r1, Operand::Zero());
__ b(eq, &done);
// Get the parameters pointer from the stack.
@@ -4710,8 +2740,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
__ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // Untag the length for the loop.
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
+ __ SmiUntag(r1);
// Copy the fixed array slots.
Label loop;
@@ -4724,7 +2753,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Post-increment r4 with kPointerSize on each iteration.
__ str(r3, MemOperand(r4, kPointerSize, PostIndex));
__ sub(r1, r1, Operand(1));
- __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ cmp(r1, Operand::Zero());
__ b(ne, &loop);
// Return and remove the on-stack parameters.
@@ -4757,8 +2786,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
- Label runtime, invoke_regexp;
-
+ Label runtime;
// Allocation of registers for this function. These are in callee save
// registers and will be preserved by the call to the native RegExp code, as
// this code is called using the normal C calling convention. When calling
@@ -4766,7 +2794,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// therefore the content of these registers are safe to use after the call.
Register subject = r4;
Register regexp_data = r5;
- Register last_match_info_elements = r6;
+ Register last_match_info_elements = no_reg; // will be r6;
// Ensure that a RegExp stack is allocated.
Isolate* isolate = masm->isolate();
@@ -4776,12 +2804,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference::address_of_regexp_stack_memory_size(isolate);
__ mov(r0, Operand(address_of_regexp_stack_memory_size));
__ ldr(r0, MemOperand(r0, 0));
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ b(eq, &runtime);
// Check that the first argument is a JSRegExp object.
__ ldr(r0, MemOperand(sp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r0, &runtime);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
__ b(ne, &runtime);
@@ -4789,10 +2816,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the RegExp has been compiled (data contains a fixed array).
__ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
- __ tst(regexp_data, Operand(kSmiTagMask));
- __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
+ __ SmiTst(regexp_data);
+ __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
__ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
- __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
+ __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
}
// regexp_data: RegExp data (FixedArray)
@@ -4805,68 +2832,48 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the number of captures fit in the static offsets vector buffer.
__ ldr(r2,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // Multiplying by 2 comes for free since r2 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(r2, r2, Operand(2)); // r2 was a smi.
- // Check that the static offsets vector buffer is large enough.
- __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
__ b(hi, &runtime);
- // r2: Number of capture registers
- // regexp_data: RegExp data (FixedArray)
- // Check that the second argument is a string.
+ // Reset offset for possibly sliced string.
+ __ mov(r9, Operand::Zero());
__ ldr(subject, MemOperand(sp, kSubjectOffset));
__ JumpIfSmi(subject, &runtime);
- Condition is_string = masm->IsObjectStringType(subject, r0);
- __ b(NegateCondition(is_string), &runtime);
- // Get the length of the string to r3.
- __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
-
- // r2: Number of capture registers
- // r3: Length of subject string as a smi
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
- __ JumpIfNotSmi(r0, &runtime);
- __ cmp(r3, Operand(r0));
- __ b(ls, &runtime);
-
- // r2: Number of capture registers
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the fourth object is a JSArray object.
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- __ b(ne, &runtime);
- // Check that the JSArray is in fast case.
- __ ldr(last_match_info_elements,
- FieldMemOperand(r0, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ ldr(r0,
- FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
- __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
- __ b(gt, &runtime);
-
- // Reset offset for possibly sliced string.
- __ mov(r9, Operand(0));
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_string;
+ __ mov(r3, subject); // Make a copy of the original subject string.
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // First check for flat string. None of the following string type tests will
- // succeed if subject is not a string or a short external string.
+ // subject: subject string
+ // r3: subject string
+ // r0: subject string instance type
+ // regexp_data: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (5).
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ // (3) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (4) Is subject external? If yes, go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (6) Not a long external string? If yes, go to (8).
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (5).
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+
+ Label seq_string /* 5 */, external_string /* 7 */,
+ check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
+ not_long_external /* 8 */;
+
+ // (1) Sequential string? If yes, go to (5).
__ and_(r1,
r0,
Operand(kIsNotStringMask |
@@ -4874,80 +2881,65 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
kShortExternalStringMask),
SetCC);
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ b(eq, &seq_string);
+ __ b(eq, &seq_string); // Go to (5).
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // r1: whether subject is a string and if yes, its string representation
- // Check for flat cons string or sliced string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- // In the case of a sliced string its offset has to be taken into account.
- Label cons_string, external_string, check_encoding;
+ // (2) Anything but sequential or cons? If yes, go to (6).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(r1, Operand(kExternalStringTag));
- __ b(lt, &cons_string);
- __ b(eq, &external_string);
+ __ b(ge, &not_seq_nor_cons); // Go to (6).
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
- __ b(ne, &runtime);
-
- // String is sliced.
- __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
- __ mov(r9, Operand(r9, ASR, kSmiTagSize));
- __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
- // r9: offset of sliced string, smi-tagged.
- __ jmp(&check_encoding);
- // String is a cons string, check whether it is flat.
- __ bind(&cons_string);
+ // (3) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ CompareRoot(r0, Heap::kEmptyStringRootIndex);
+ __ CompareRoot(r0, Heap::kempty_stringRootIndex);
__ b(ne, &runtime);
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
- // Is first part of cons or parent of slice a flat string?
- __ bind(&check_encoding);
+
+ // (4) Is subject external? If yes, go to (7).
+ __ bind(&check_underlying);
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask));
- __ b(ne, &external_string);
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ b(ne, &external_string); // Go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
__ bind(&seq_string);
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // r0: Instance type of subject string
- STATIC_ASSERT(4 == kAsciiStringTag);
+ // subject: sequential subject string (or look-alike, external string)
+ // r3: original subject string
+ // Load previous index and check range before r3 is overwritten. We have to
+ // use r3 instead of subject here because subject might have been only made
+ // to look like a sequential string when it actually is an external string.
+ __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(r1, &runtime);
+ __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
+ __ cmp(r3, Operand(r1));
+ __ b(ls, &runtime);
+ __ SmiUntag(r1);
+
+ STATIC_ASSERT(4 == kOneByteStringTag);
STATIC_ASSERT(kTwoByteStringTag == 0);
- // Find the code object based on the assumptions above.
__ and_(r0, r0, Operand(kStringEncodingMask));
__ mov(r3, Operand(r0, ASR, 2), SetCC);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
+ __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
+ __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
+ // (E) Carry on. String handling is done.
+ // r6: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
- __ JumpIfSmi(r7, &runtime);
-
- // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r7: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
- __ mov(r1, Operand(r1, ASR, kSmiTagSize));
+ __ JumpIfSmi(r6, &runtime);
// r1: previous index
// r3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r7: code
+ // r6: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
@@ -4962,7 +2954,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Arguments are before that on the stack or in registers.
// Argument 9 (sp[20]): Pass current isolate address.
- __ mov(r0, Operand(ExternalReference::isolate_address()));
+ __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
__ str(r0, MemOperand(sp, 5 * kPointerSize));
// Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
@@ -4979,7 +2971,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(r0, Operand(0));
+ __ mov(r0, Operand::Zero());
__ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[4]): static offsets vector buffer.
@@ -5004,7 +2996,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ add(r2, r9, Operand(r1, LSL, r3));
__ ldr(r8, FieldMemOperand(subject, String::kLengthOffset));
- __ mov(r8, Operand(r8, ASR, kSmiTagSize));
+ __ SmiUntag(r8);
__ add(r3, r9, Operand(r8, LSL, r3));
// Argument 2 (r1): Previous index.
@@ -5014,20 +3006,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(r0, subject);
// Locate the code entry and call it.
- __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
DirectCEntryStub stub;
- stub.GenerateCall(masm, r7);
+ stub.GenerateCall(masm, r6);
+
+ __ LeaveExitFrame(false, no_reg, true);
- __ LeaveExitFrame(false, no_reg);
+ last_match_info_elements = r6;
// r0: result
// subject: subject string (callee saved)
// regexp_data: RegExp data (callee saved)
// last_match_info_elements: Last match info elements (callee saved)
-
// Check the result.
Label success;
-
__ cmp(r0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
@@ -5073,14 +3065,33 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(r1,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
+ // Multiplying by 2 comes for free since r1 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(r1, r1, Operand(2)); // r1 was a smi.
+ __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(r0, &runtime);
+ __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
+ __ b(ne, &runtime);
+ // Check that the JSArray is in fast case.
+ __ ldr(last_match_info_elements,
+ FieldMemOperand(r0, JSArray::kElementsOffset));
+ __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
+ __ b(ne, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ ldr(r0,
+ FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
+ __ cmp(r2, Operand::SmiUntag(r0));
+ __ b(gt, &runtime);
+
// r1: number of capture registers
// r4: subject string
// Store the capture count.
- __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
+ __ SmiTag(r2, r1);
__ str(r2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
@@ -5090,17 +3101,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(r2, subject);
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastSubjectOffset,
- r2,
- r7,
+ subject,
+ r3,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
+ __ mov(subject, r2);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastInputOffset,
subject,
- r7,
+ r3,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
@@ -5123,7 +3135,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Read the value from the static offsets vector buffer.
__ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
// Store the smi value in the last match info.
- __ mov(r3, Operand(r3, LSL, kSmiTagSize));
+ __ SmiTag(r3);
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
__ jmp(&next_capture);
__ bind(&done);
@@ -5133,8 +3145,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
- // External string. Short external strings have already been ruled out.
- // r0: scratch
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (6) Not a long external string? If yes, go to (8).
+ __ bind(&not_seq_nor_cons);
+ // Compare flags are still set.
+ __ b(gt, &not_long_external); // Go to (8).
+
+ // (7) External string. Make it, offset-wise, look like a sequential string.
__ bind(&external_string);
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
@@ -5142,20 +3163,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ tst(r0, Operand(kIsIndirectStringMask));
- __ Assert(eq, "external string expected, but not found");
+ __ Assert(eq, kExternalStringExpectedButNotFound);
}
__ ldr(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ sub(subject,
subject,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ jmp(&seq_string);
+ __ jmp(&seq_string); // Go to (5).
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ __ bind(&not_long_external);
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
+ __ b(ne, &runtime);
+
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+ // Load offset into r9 and replace subject string with parent.
+ __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ SmiUntag(r9);
+ __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ jmp(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
@@ -5180,9 +3210,9 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// FixedArray.
int objects_size =
(JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
- __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
+ __ SmiUntag(r5, r1);
__ add(r2, r5, Operand(objects_size));
- __ AllocateInNewSpace(
+ __ Allocate(
r2, // In: Size, in words.
r0, // Out: Start of allocation (tagged).
r3, // Scratch register.
@@ -5223,7 +3253,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
__ mov(r2, Operand(factory->fixed_array_map()));
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
// Set FixedArray length.
- __ mov(r6, Operand(r5, LSL, kSmiTagSize));
+ __ SmiTag(r6, r5);
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
// Fill contents of fixed-array with undefined.
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
@@ -5234,7 +3264,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// r3: Start of elements in FixedArray.
// r5: Number of elements to fill.
Label loop;
- __ cmp(r5, Operand(0));
+ __ cmp(r5, Operand::Zero());
__ bind(&loop);
__ b(le, &done); // Jump if r5 is negative or zero.
__ sub(r5, r5, Operand(1), SetCC);
@@ -5254,9 +3284,10 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // r0 : number of arguments to the construct function
// r1 : the function to call
// r2 : cache cell for call target
- Label done;
+ Label initialize, done, miss, megamorphic, not_array_function;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
@@ -5264,25 +3295,71 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
masm->isolate()->heap()->the_hole_value());
// Load the cache state into r3.
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmp(r3, r1);
__ b(eq, &done);
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &done);
+
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the cell either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ __ ldr(r5, FieldMemOperand(r3, 0));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(ne, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(r3);
+ __ cmp(r1, r3);
+ __ b(ne, &megamorphic);
+ __ jmp(&done);
+
+ __ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ b(eq, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
- __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne);
+ __ bind(&megamorphic);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
+ __ jmp(&done);
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(r3);
+ __ cmp(r1, r3);
+ __ b(ne, &not_array_function);
- // An uninitialized cache is patched with the function.
- __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq);
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the cell
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(r0);
+ __ push(r0);
+ __ push(r1);
+ __ push(r2);
+
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
+
+ __ pop(r2);
+ __ pop(r1);
+ __ pop(r0);
+ __ SmiUntag(r0);
+ }
+ __ b(&done);
+
+ __ bind(&not_array_function);
+ __ str(r1, FieldMemOperand(r2, Cell::kValueOffset));
// No need for a write barrier here - cells are rescanned.
__ bind(&done);
@@ -5354,14 +3431,14 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
}
// Check for function proxy.
__ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function);
__ push(r1); // put proxy as additional argument
- __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
- __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
+ __ mov(r2, Operand::Zero());
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
__ SetCallKind(r5, CALL_AS_METHOD);
{
@@ -5375,7 +3452,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ bind(&non_function);
__ str(r1, MemOperand(sp, argc_ * kPointerSize));
__ mov(r0, Operand(argc_)); // Set up the number of arguments.
- __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ mov(r2, Operand::Zero());
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
__ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
@@ -5400,9 +3477,11 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
// Jump to the function-specific construct stub.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
- __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Register jmp_reg = r3;
+ __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
// r0: number of arguments
// r1: called object
@@ -5418,55 +3497,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
- __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ mov(r2, Operand::Zero());
__ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- const char* cc_name;
- switch (cc_) {
- case lt: cc_name = "LT"; break;
- case gt: cc_name = "GT"; break;
- case le: cc_name = "LE"; break;
- case ge: cc_name = "GE"; break;
- case eq: cc_name = "EQ"; break;
- case ne: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == eq || cc_ == ne;
- stream->Add("CompareStub_%s", cc_name);
- stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
- stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
- | RegisterField::encode(lhs_.is(r0))
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
@@ -5493,7 +3530,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ cmp(ip, Operand(index_));
__ b(ls, index_out_of_range_);
- __ mov(index_, Operand(index_, ASR, kSmiTagSize));
+ __ SmiUntag(index_);
StringCharLoadGenerator::Generate(masm,
object_,
@@ -5501,7 +3538,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
result_,
&call_runtime_);
- __ mov(result_, Operand(result_, LSL, kSmiTagSize));
+ __ SmiTag(result_);
__ bind(&exit_);
}
@@ -5509,7 +3546,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
// Index is not a smi.
__ bind(&index_not_smi_);
@@ -5547,14 +3584,14 @@ void StringCharCodeAtGenerator::GenerateSlow(
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
- __ mov(index_, Operand(index_, LSL, kSmiTagSize));
+ __ SmiTag(index_);
__ Push(object_, index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
__ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
@@ -5565,16 +3602,15 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
__ tst(code_,
Operand(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
__ b(ne, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged ASCII char code.
- STATIC_ASSERT(kSmiTag == 0);
- __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
__ b(eq, &slow_case_);
@@ -5585,7 +3621,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
@@ -5595,24 +3631,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
@@ -5629,7 +3648,7 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
if (!ascii) {
__ add(count, count, Operand(count), SetCC);
} else {
- __ cmp(count, Operand(0, RelocInfo::NONE));
+ __ cmp(count, Operand::Zero());
}
__ b(eq, &done);
@@ -5660,7 +3679,6 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
Register scratch2,
Register scratch3,
Register scratch4,
- Register scratch5,
int flags) {
bool ascii = (flags & COPY_ASCII) != 0;
bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
@@ -5669,7 +3687,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
// Check that destination is actually word aligned if the flag says
// that it is.
__ tst(dest, Operand(kPointerAlignmentMask));
- __ Check(eq, "Destination of copy not aligned.");
+ __ Check(eq, kDestinationOfCopyNotAligned);
}
const int kReadAlignment = 4;
@@ -5684,7 +3702,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
if (!ascii) {
__ add(count, count, Operand(count), SetCC);
} else {
- __ cmp(count, Operand(0, RelocInfo::NONE));
+ __ cmp(count, Operand::Zero());
}
__ b(eq, &done);
@@ -5735,30 +3753,29 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
__ bind(&loop);
__ ldr(scratch3, MemOperand(src, 4, PostIndex));
- __ sub(scratch5, limit, Operand(dest));
__ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
__ str(scratch1, MemOperand(dest, 4, PostIndex));
__ mov(scratch1, Operand(scratch3, LSR, right_shift));
// Loop if four or more bytes left to copy.
- // Compare to eight, because we did the subtract before increasing dst.
- __ sub(scratch5, scratch5, Operand(8), SetCC);
+ __ sub(scratch3, limit, Operand(dest));
+ __ sub(scratch3, scratch3, Operand(4), SetCC);
__ b(ge, &loop);
}
// There is now between zero and three bytes left to copy (negative that
- // number is in scratch5), and between one and three bytes already read into
+ // number is in scratch3), and between one and three bytes already read into
// scratch1 (eight times that number in scratch4). We may have read past
// the end of the string, but because objects are aligned, we have not read
// past the end of the object.
// Find the minimum of remaining characters to move and preloaded characters
// and write those as bytes.
- __ add(scratch5, scratch5, Operand(4), SetCC);
+ __ add(scratch3, scratch3, Operand(4), SetCC);
__ b(eq, &done);
- __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
+ __ cmp(scratch4, Operand(scratch3, LSL, 3), ne);
// Move minimum of bytes read and bytes left to copy to scratch4.
- __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
- // Between one and three (value in scratch5) characters already read into
+ __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt);
+ // Between one and three (value in scratch3) characters already read into
// scratch ready to write.
- __ cmp(scratch5, Operand(2));
+ __ cmp(scratch3, Operand(2));
__ strb(scratch1, MemOperand(dest, 1, PostIndex));
__ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
__ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
@@ -5795,7 +3812,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
}
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -5808,7 +3825,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register scratch = scratch3;
// Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
+ // different hash algorithm. Don't try to look for these in the string table.
Label not_array_index;
__ sub(scratch, c1, Operand(static_cast<int>('0')));
__ cmp(scratch, Operand(static_cast<int>('9' - '0')));
@@ -5836,43 +3853,43 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string.
- // Load symbol table
- // Load address of first element of the symbol table.
- Register symbol_table = c2;
- __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+ // Load string table
+ // Load address of first element of the string table.
+ Register string_table = c2;
+ __ LoadRoot(string_table, Heap::kStringTableRootIndex);
Register undefined = scratch4;
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
- // Calculate capacity mask from the symbol table capacity.
+ // Calculate capacity mask from the string table capacity.
Register mask = scratch2;
- __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ ldr(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
__ mov(mask, Operand(mask, ASR, 1));
__ sub(mask, mask, Operand(1));
- // Calculate untagged address of the first element of the symbol table.
- Register first_symbol_table_element = symbol_table;
- __ add(first_symbol_table_element, symbol_table,
- Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+ // Calculate untagged address of the first element of the string table.
+ Register first_string_table_element = string_table;
+ __ add(first_string_table_element, string_table,
+ Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
// Registers
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string
// mask: capacity mask
- // first_symbol_table_element: address of the first element of
- // the symbol table
+ // first_string_table_element: address of the first element of
+ // the string table
// undefined: the undefined object
// scratch: -
- // Perform a number of probes in the symbol table.
+ // Perform a number of probes in the string table.
const int kProbes = 4;
- Label found_in_symbol_table;
+ Label found_in_string_table;
Label next_probe[kProbes];
Register candidate = scratch5; // Scratch register contains candidate.
for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
+ // Calculate entry in string table.
if (i > 0) {
- __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+ __ add(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
} else {
__ mov(candidate, hash);
}
@@ -5880,9 +3897,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ and_(candidate, candidate, Operand(mask));
// Load the entry from the symble table.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ STATIC_ASSERT(StringTable::kEntrySize == 1);
__ ldr(candidate,
- MemOperand(first_symbol_table_element,
+ MemOperand(first_string_table_element,
candidate,
LSL,
kPointerSizeLog2));
@@ -5898,7 +3915,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
if (FLAG_debug_code) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(ip, candidate);
- __ Assert(eq, "oddball in symbol table is not undefined or the hole");
+ __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole);
}
__ jmp(&next_probe[i]);
@@ -5916,9 +3933,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Check if the two characters match.
// Assumes that word load is little endian.
- __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
__ cmp(chars, scratch);
- __ b(eq, &found_in_symbol_table);
+ __ b(eq, &found_in_string_table);
__ bind(&next_probe[i]);
}
@@ -5927,7 +3944,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Scratch register contains result when we fall through to here.
Register result = candidate;
- __ bind(&found_in_symbol_table);
+ __ bind(&found_in_string_table);
__ Move(r0, result);
}
@@ -5999,25 +4016,33 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- // I.e., arithmetic shift right by one un-smi-tags.
- __ mov(r2, Operand(r2, ASR, 1), SetCC);
- __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
- // If either to or from had the smi tag bit set, then carry is set now.
- __ b(cs, &runtime); // Either "from" or "to" is not a smi.
+ // Arithmetic shift right by one un-smi-tags. In this case we rotate right
+ // instead because we bail out on non-smi values: ROR and ASR are equivalent
+ // for smis but they set the flags in a way that's easier to optimize.
+ __ mov(r2, Operand(r2, ROR, 1), SetCC);
+ __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
+ // If either to or from had the smi tag bit set, then C is set now, and N
+ // has the same value: we rotated by 1, so the bottom bit is now the top bit.
// We want to bailout to runtime here if From is negative. In that case, the
// next instruction is not executed and we fall through to bailing out to
- // runtime. pl is the opposite of mi.
- // Both r2 and r3 are untagged integers.
- __ sub(r2, r2, Operand(r3), SetCC, pl);
- __ b(mi, &runtime); // Fail if from > to.
+ // runtime.
+ // Executed if both r2 and r3 are untagged integers.
+ __ sub(r2, r2, Operand(r3), SetCC, cc);
+ // One of the above un-smis or the above SUB could have set N==1.
+ __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
// Make sure first argument is a string.
__ ldr(r0, MemOperand(sp, kStringOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r0, &runtime);
- Condition is_string = masm->IsObjectStringType(r0, r1);
+ // Do a JumpIfSmi, but fold its jump into the subsequent string test.
+ __ SmiTst(r0);
+ Condition is_string = masm->IsObjectStringType(r0, r1, ne);
+ ASSERT(is_string == eq);
__ b(NegateCondition(is_string), &runtime);
+ Label single_char;
+ __ cmp(r2, Operand(1));
+ __ b(eq, &single_char);
+
// Short-cut for the case of trivial substring.
Label return_r0;
// r0: original string
@@ -6047,7 +4072,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ b(ne, &sliced_string);
// Cons string. Check whether it is flat, then fetch first part.
__ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
- __ CompareRoot(r5, Heap::kEmptyStringRootIndex);
+ __ CompareRoot(r5, Heap::kempty_stringRootIndex);
__ b(ne, &runtime);
__ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
// Update instance type.
@@ -6086,14 +4111,14 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_slice);
- __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
+ __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime);
__ jmp(&set_slice_header);
__ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
+ __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
__ bind(&set_slice_header);
__ mov(r3, Operand(r3, LSL, 1));
__ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
@@ -6124,35 +4149,35 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sequential_string);
// Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
- __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&allocate_result);
// Sequential acii string. Allocate the result.
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_sequential);
// Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
+ __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime);
// Locate first character of substring to copy.
__ add(r5, r5, r3);
// Locate first character of result.
- __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// r0: result string
// r1: first character of result string
// r2: result string length
// r5: first character of substring to copy
- STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9,
COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_r0);
// Allocate and copy the resulting two-byte string.
__ bind(&two_byte_sequential);
- __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
+ __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
// Locate first character of substring to copy.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
@@ -6166,17 +4191,30 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r5: first character of substring to copy.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
- masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
+ masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED);
__ bind(&return_r0);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Drop(3);
__ Ret();
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+ __ bind(&single_char);
+ // r0: original string
+ // r1: instance type
+ // r2: length
+ // r3: from index (untagged)
+ __ SmiTag(r3, r3);
+ StringCharAtGenerator generator(
+ r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ Drop(3);
+ __ Ret();
+ generator.SkipSlow(masm, &runtime);
}
@@ -6202,7 +4240,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Label compare_chars;
__ bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0);
- __ cmp(length, Operand(0));
+ __ cmp(length, Operand::Zero());
__ b(ne, &compare_chars);
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
__ Ret();
@@ -6235,7 +4273,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ mov(scratch1, scratch2, LeaveCC, gt);
Register min_length = scratch1;
STATIC_ASSERT(kSmiTag == 0);
- __ cmp(min_length, Operand(0));
+ __ cmp(min_length, Operand::Zero());
__ b(eq, &compare_lengths);
// Compare loop.
@@ -6270,7 +4308,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiUntag(length);
__ add(scratch1, length,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ add(left, left, Operand(scratch1));
__ add(right, right, Operand(scratch1));
__ rsb(length, length, Operand::Zero());
@@ -6340,7 +4378,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
+ // Otherwise, at least one of the arguments is definitely a string,
+ // and we convert the one that is not known to be a string.
+ if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
__ JumpIfEitherSmi(r0, r1, &call_runtime);
// Load instance types.
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -6352,20 +4394,16 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ tst(r4, Operand(kIsNotStringMask));
__ tst(r5, Operand(kIsNotStringMask), eq);
__ b(ne, &call_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
+ } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
+ GenerateConvertArgument(
+ masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
+ GenerateConvertArgument(
+ masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
}
// Both arguments are strings.
@@ -6393,8 +4431,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&strings_not_empty);
}
- __ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ mov(r3, Operand(r3, ASR, kSmiTagSize));
+ __ SmiUntag(r2);
+ __ SmiUntag(r3);
// Both strings are non-empty.
// r0: first string
// r1: second string
@@ -6407,30 +4445,30 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Adding two lengths can't overflow.
STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
__ add(r6, r2, Operand(r3));
- // Use the symbol table when adding two one character strings, as it
- // helps later optimizations to return a symbol here.
+ // Use the string table when adding two one character strings, as it
+ // helps later optimizations to return a string here.
__ cmp(r6, Operand(2));
__ b(ne, &longer_than_two);
// Check that both strings are non-external ASCII strings.
- if (flags_ != NO_STRING_ADD_FLAGS) {
+ if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r3,
&call_runtime);
// Get the two characters forming the sub string.
- __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
+ __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
+ __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize));
- // Try to lookup two character string in symbol table. If it is not found
+ // Try to lookup two character string in string table. If it is not found
// just allocate a new one.
Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
+ StringHelper::GenerateTwoCharacterStringTableProbe(
+ masm, r2, r3, r6, r0, r4, r5, r9, &make_two_character_string);
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -6443,7 +4481,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// in a little endian mode)
__ mov(r6, Operand(2));
__ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+ __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -6461,7 +4499,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If result is not supposed to be flat, allocate a cons string object.
// If both strings are ASCII the result is an ASCII cons string.
- if (flags_ != NO_STRING_ADD_FLAGS) {
+ if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@@ -6475,32 +4513,60 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Allocate an ASCII cons string.
__ bind(&ascii_data);
- __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
+ __ AllocateAsciiConsString(r3, r6, r4, r5, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
- __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
- __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
- __ mov(r0, Operand(r7));
+ Label skip_write_barrier, after_writing;
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(masm->isolate());
+ __ mov(r4, Operand(high_promotion_mode));
+ __ ldr(r4, MemOperand(r4, 0));
+ __ cmp(r4, Operand::Zero());
+ __ b(eq, &skip_write_barrier);
+
+ __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
+ __ RecordWriteField(r3,
+ ConsString::kFirstOffset,
+ r0,
+ r4,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
+ __ RecordWriteField(r3,
+ ConsString::kSecondOffset,
+ r1,
+ r4,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ jmp(&after_writing);
+
+ __ bind(&skip_write_barrier);
+ __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
+ __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
+
+ __ bind(&after_writing);
+
+ __ mov(r0, Operand(r3));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
- // to contain only ASCII characters.
+ // to contain only one byte characters.
// r4: first instance type.
// r5: second instance type.
- __ tst(r4, Operand(kAsciiDataHintMask));
- __ tst(r5, Operand(kAsciiDataHintMask), ne);
+ __ tst(r4, Operand(kOneByteDataHintMask));
+ __ tst(r5, Operand(kOneByteDataHintMask), ne);
__ b(ne, &ascii_data);
__ eor(r4, r4, Operand(r5));
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
+ __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag));
+ __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag));
__ b(eq, &ascii_data);
// Allocate a two byte cons string.
- __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
+ __ AllocateTwoByteConsString(r3, r6, r4, r5, &call_runtime);
__ jmp(&allocated);
// We cannot encounter sliced strings or cons strings here since:
@@ -6516,7 +4582,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r6: sum of lengths.
Label first_prepared, second_prepared;
__ bind(&string_add_flat_result);
- if (flags_ != NO_STRING_ADD_FLAGS) {
+ if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@@ -6524,16 +4590,17 @@ void StringAddStub::Generate(MacroAssembler* masm) {
}
// Check whether both strings have same encoding
- __ eor(r7, r4, Operand(r5));
- __ tst(r7, Operand(kStringEncodingMask));
+ __ eor(ip, r4, Operand(r5));
+ ASSERT(__ ImmediateFitsAddrMode1Instruction(kStringEncodingMask));
+ __ tst(ip, Operand(kStringEncodingMask));
__ b(ne, &call_runtime);
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r4, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r7,
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ add(r6,
r0,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
LeaveCC,
eq);
__ b(eq, &first_prepared);
@@ -6541,15 +4608,15 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kShortExternalStringTag != 0);
__ tst(r4, Operand(kShortExternalStringMask));
__ b(ne, &call_runtime);
- __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
+ __ ldr(r6, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
__ bind(&first_prepared);
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r5, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ add(r1,
r1,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
LeaveCC,
eq);
__ b(eq, &second_prepared);
@@ -6561,43 +4628,46 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&second_prepared);
Label non_ascii_string_add_flat_result;
- // r7: first character of first string
+ // r6: first character of first string
// r1: first character of second string
// r2: length of first string.
// r3: length of second string.
- // r6: sum of lengths.
// Both strings have the same encoding.
STATIC_ASSERT(kTwoByteStringTag == 0);
__ tst(r5, Operand(kStringEncodingMask));
__ b(eq, &non_ascii_string_add_flat_result);
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r2, r2, Operand(r3));
+ __ AllocateAsciiString(r0, r2, r4, r5, r9, &call_runtime);
+ __ sub(r2, r2, Operand(r3));
+ __ add(r5, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// r0: result string.
- // r7: first character of first string.
+ // r6: first character of first string.
// r1: first character of second string.
// r2: length of first string.
// r3: length of second string.
- // r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true);
- // r6: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
+ // r5: first character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, true);
+ // r5: next character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, true);
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ bind(&non_ascii_string_add_flat_result);
- __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ add(r2, r2, Operand(r3));
+ __ AllocateTwoByteString(r0, r2, r4, r5, r9, &call_runtime);
+ __ sub(r2, r2, Operand(r3));
+ __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// r0: result string.
- // r7: first character of first string.
+ // r6: first character of first string.
// r1: first character of second string.
// r2: length of first string.
// r3: length of second string.
- // r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
- // r6: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
+ // r5: first character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, false);
+ // r5: next character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, false);
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -6613,6 +4683,18 @@ void StringAddStub::Generate(MacroAssembler* masm) {
}
+void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ push(r0);
+ __ push(r1);
+}
+
+
+void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) {
+ __ pop(r1);
+ __ pop(r0);
+}
+
+
void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
int stack_offset,
Register arg,
@@ -6628,42 +4710,17 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
__ b(lt, &done);
// Check the number to string cache.
- Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- false,
- &not_cached);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
__ mov(arg, scratch1);
__ str(arg, MemOperand(sp, stack_offset));
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CompareObjectType(
- arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
- __ b(ne, slow);
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ and_(scratch2,
- scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ cmp(scratch2,
- Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, slow);
- __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
- __ str(arg, MemOperand(sp, stack_offset));
-
__ bind(&done);
}
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ orr(r2, r1, r0);
__ JumpIfNotSmi(r2, &miss);
@@ -6674,7 +4731,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
} else {
// Untag before subtracting to avoid handling overflow.
__ SmiUntag(r1);
- __ sub(r0, r1, SmiUntagOperand(r0));
+ __ sub(r0, r1, Operand::SmiUntag(r0));
}
__ Ret();
@@ -6683,53 +4740,67 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- __ and_(r2, r1, Operand(r0));
- __ JumpIfSmi(r2, &generic_stub);
- __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined1);
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined2);
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(r1, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(r0, &miss);
+ }
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or VFP3 is unsupported.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
-
- // Load left and right operand
- __ sub(r2, r1, Operand(kHeapObjectTag));
- __ vldr(d0, r2, HeapNumber::kValueOffset);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vldr(d1, r2, HeapNumber::kValueOffset);
-
- // Compare operands
- __ VFPCompareAndSetFlags(d0, d1);
-
- // Don't base result on status bits when a NaN is involved.
- __ b(vs, &unordered);
-
- // Return a result of -1, 0, or 1, based on status bits.
- __ mov(r0, Operand(EQUAL), LeaveCC, eq);
- __ mov(r0, Operand(LESS), LeaveCC, lt);
- __ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ Ret();
- }
+ // stub if NaN is involved.
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(r0, &right_smi);
+ __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
+ __ sub(r2, r0, Operand(kHeapObjectTag));
+ __ vldr(d1, r2, HeapNumber::kValueOffset);
+ __ b(&left);
+ __ bind(&right_smi);
+ __ SmiToDouble(d1, r0);
+
+ __ bind(&left);
+ __ JumpIfSmi(r1, &left_smi);
+ __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ sub(r2, r1, Operand(kHeapObjectTag));
+ __ vldr(d0, r2, HeapNumber::kValueOffset);
+ __ b(&done);
+ __ bind(&left_smi);
+ __ SmiToDouble(d0, r1);
+
+ __ bind(&done);
+ // Compare operands.
+ __ VFPCompareAndSetFlags(d0, d1);
+
+ // Don't base result on status bits when a NaN is involved.
+ __ b(vs, &unordered);
+
+ // Return a result of -1, 0, or 1, based on status bits.
+ __ mov(r0, Operand(EQUAL), LeaveCC, eq);
+ __ mov(r0, Operand(LESS), LeaveCC, lt);
+ __ mov(r0, Operand(GREATER), LeaveCC, gt);
+ __ Ret();
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
__ bind(&generic_stub);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(ne, &miss);
+ __ JumpIfSmi(r1, &unordered);
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &maybe_undefined2);
__ jmp(&unordered);
@@ -6746,8 +4817,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
Label miss;
// Registers containing left and right operands respectively.
@@ -6759,17 +4830,56 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
// Check that both operands are heap objects.
__ JumpIfEitherSmi(left, right, &miss);
- // Check that both operands are symbols.
+ // Check that both operands are internalized strings.
__ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
__ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp1, tmp1, Operand(tmp2));
- __ tst(tmp1, Operand(kIsSymbolMask));
- __ b(eq, &miss);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ orr(tmp1, tmp1, Operand(tmp2));
+ __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ b(ne, &miss);
+
+ // Internalized strings are compared by identity.
+ __ cmp(left, right);
+ // Make sure r0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(r0));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASSERT(GetCondition() == eq);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = r1;
+ Register right = r0;
+ Register tmp1 = r2;
+ Register tmp2 = r3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
- // Symbols are compared by identity.
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+
+ __ JumpIfNotUniqueName(tmp1, &miss);
+ __ JumpIfNotUniqueName(tmp2, &miss);
+
+ // Unique names are compared by identity.
__ cmp(left, right);
// Make sure r0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
@@ -6785,7 +4895,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -6821,17 +4931,18 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle not identical strings.
- // Check that both strings are symbols. If they are, we're done
- // because we already know they are not identical.
+ // Check that both strings are internalized strings. If they are, we're done
+ // because we already know they are not identical. We know they are both
+ // strings.
if (equality) {
ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp3, tmp1, Operand(tmp2));
- __ tst(tmp3, Operand(kIsSymbolMask));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ orr(tmp3, tmp1, Operand(tmp2));
+ __ tst(tmp3, Operand(kIsNotInternalizedMask));
// Make sure r0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(r0));
- __ Ret(ne);
+ __ Ret(eq);
}
// Check that both strings are sequential ASCII.
@@ -6863,7 +4974,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
@@ -6928,44 +5039,33 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ // Place the return address on the stack, making the call
+ // GC safe. The RegExp backend also relies on this.
+ __ str(lr, MemOperand(sp, 0));
+ __ blx(ip); // Call the C++ function.
+ __ VFPEnsureFPSCRState(r2);
__ ldr(pc, MemOperand(sp, 0));
}
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- ExternalReference function) {
- __ mov(r2, Operand(function));
- GenerateCall(masm, r2);
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
- __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
- RelocInfo::CODE_TARGET));
-
- // Prevent literal pool emission during calculation of return address.
- Assembler::BlockConstPoolScope block_const_pool(masm);
-
- // Push return address (accessible to GC through exit frame pc).
- // Note that using pc with str is deprecated.
- Label start;
- __ bind(&start);
- __ add(ip, pc, Operand(Assembler::kInstrSize));
- __ str(ip, MemOperand(sp, 0));
- __ Jump(target); // Call the C++ function.
- ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta,
- masm->SizeOfCodeGeneratedSince(&start));
-}
-
-
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<String> name,
- Register scratch0) {
+ intptr_t code =
+ reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ __ Move(ip, target);
+ __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
+ __ blx(lr); // Call the stub.
+}
+
+
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0) {
+ ASSERT(name->IsUniqueName());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
@@ -6979,10 +5079,10 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ ldr(index, FieldMemOperand(properties, kCapacityOffset));
__ sub(index, index, Operand(1));
__ and_(index, index, Operand(
- Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+ Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
Register entity_name = scratch0;
@@ -6997,31 +5097,27 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ cmp(entity_name, tmp);
__ b(eq, done);
- if (i != kInlinedProbes - 1) {
- // Load the hole ready for use below:
- __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
+ // Load the hole ready for use below:
+ __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
- // Stop if found the property.
- __ cmp(entity_name, Operand(Handle<String>(name)));
- __ b(eq, miss);
-
- Label the_hole;
- __ cmp(entity_name, tmp);
- __ b(eq, &the_hole);
+ // Stop if found the property.
+ __ cmp(entity_name, Operand(Handle<Name>(name)));
+ __ b(eq, miss);
- // Check if the entry name is not a symbol.
- __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ ldrb(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ tst(entity_name, Operand(kIsSymbolMask));
- __ b(eq, miss);
+ Label good;
+ __ cmp(entity_name, tmp);
+ __ b(eq, &good);
- __ bind(&the_hole);
+ // Check if the entry name is not a unique name.
+ __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ ldrb(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueName(entity_name, miss);
+ __ bind(&good);
- // Restore the properties.
- __ ldr(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- }
+ // Restore the properties.
+ __ ldr(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOffset));
}
const int spill_mask =
@@ -7030,10 +5126,10 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ stm(db_w, sp, spill_mask);
__ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ mov(r1, Operand(Handle<String>(name)));
- StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ __ mov(r1, Operand(Handle<Name>(name)));
+ NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
__ CallStub(&stub);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ ldm(ia_w, sp, spill_mask);
__ b(eq, done);
@@ -7041,27 +5137,27 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
}
-// Probe the string dictionary in the |elements| register. Jump to the
+// Probe the name dictionary in the |elements| register. Jump to the
// |done| label if a property with the given name is found. Jump to
// the |miss| label otherwise.
// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
+void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
ASSERT(!elements.is(scratch1));
ASSERT(!elements.is(scratch2));
ASSERT(!name.is(scratch1));
ASSERT(!name.is(scratch2));
- __ AssertString(name);
+ __ AssertName(name);
// Compute the capacity mask.
__ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
+ __ SmiUntag(scratch1);
__ sub(scratch1, scratch1, Operand(1));
// Generate an unrolled loop that performs a few probes before
@@ -7069,20 +5165,20 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
// cover ~93% of loads from dictionaries.
for (int i = 0; i < kInlinedProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+ __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
__ add(scratch2, scratch2, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
}
- __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
+ __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
// Scale the index by multiplying by the element size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
@@ -7107,9 +5203,9 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ Move(r0, elements);
__ Move(r1, name);
}
- StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
__ CallStub(&stub);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ mov(scratch2, Operand(r2));
__ ldm(ia_w, sp, spill_mask);
@@ -7118,15 +5214,15 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
}
-void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// Registers:
- // result: StringDictionary to probe
+ // result: NameDictionary to probe
// r1: key
- // : StringDictionary to probe.
- // index_: will hold an index of entry if lookup is successful.
- // might alias with result_.
+ // dictionary: NameDictionary to probe.
+ // index: will hold an index of entry if lookup is successful.
+ // might alias with result_.
// Returns:
// result_ is zero if lookup failed, non zero otherwise.
@@ -7142,10 +5238,10 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
__ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
- __ mov(mask, Operand(mask, ASR, kSmiTagSize));
+ __ SmiUntag(mask);
__ sub(mask, mask, Operand(1));
- __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
+ __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
@@ -7156,17 +5252,17 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
__ add(index, hash, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
} else {
__ mov(index, Operand(hash));
}
- __ and_(index, mask, Operand(index, LSR, String::kHashShift));
+ __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
ASSERT_EQ(kSmiTagSize, 1);
@@ -7182,12 +5278,11 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ b(eq, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not a unique name.
__ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ ldrb(entry_key,
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ tst(entry_key, Operand(kIsSymbolMask));
- __ b(eq, &maybe_in_dictionary);
+ __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
}
}
@@ -7215,18 +5310,16 @@ struct AheadOfTimeWriteBarrierStubList {
RememberedSetAction action;
};
+
#define REG(Name) { kRegister_ ## Name ## _Code }
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
- { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
- { REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET },
+ { REG(r6), REG(r4), REG(r3), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
{ REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
{ REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
{ REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
@@ -7248,6 +5341,9 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
// FastNewClosureStub::Generate
{ REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
+ // StringAddStub::Generate
+ { REG(r3), REG(r1), REG(r4), EMIT_REMEMBERED_SET },
+ { REG(r3), REG(r0), REG(r4), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -7255,7 +5351,7 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
#undef REG
-bool RecordWriteStub::IsPregenerated() {
+bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -7271,18 +5367,17 @@ bool RecordWriteStub::IsPregenerated() {
}
-bool StoreBufferOverflowStub::IsPregenerated() {
- return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ // Hydrogen code stubs need stub2 at snapshot time.
+ StoreBufferOverflowStub stub2(kSaveFPRegs);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
}
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -7291,13 +5386,13 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
entry->address,
entry->action,
kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
}
}
bool CodeStub::CanUseFPRegisters() {
- return CpuFeatures::IsSupported(VFP2);
+ return true; // VFP2 is a base requirement for V8
}
@@ -7396,13 +5491,8 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
ASSERT(!address.is(r0));
__ Move(address, regs_.address());
__ Move(r0, regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- __ Move(r1, address);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ ldr(r1, MemOperand(address, 0));
- }
- __ mov(r2, Operand(ExternalReference::isolate_address()));
+ __ Move(r1, address);
+ __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
if (mode == INCREMENTAL_COMPACTION) {
@@ -7510,10 +5600,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : element value to store
- // -- r1 : array literal
- // -- r2 : map of array literal
// -- r3 : element index as smi
- // -- r4 : array literal index in function as smi
+ // -- sp[0] : array literal index in function as smi
+ // -- sp[4] : array literal
+ // clobbers r1, r2, r4
// -----------------------------------
Label element_done;
@@ -7522,6 +5612,11 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label slow_elements;
Label fast_elements;
+ // Get array literal index, array literal and its map.
+ __ ldr(r4, MemOperand(sp, 0 * kPointerSize));
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
+
__ CheckFastElements(r2, r5, &double_elements);
// FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
__ JumpIfSmi(r0, &smi_element);
@@ -7540,7 +5635,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
__ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ str(r0, MemOperand(r6, 0));
// Update the write barrier for the array store.
@@ -7552,24 +5647,38 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// and value is Smi.
__ bind(&smi_element);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
__ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
__ Ret();
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3, r1,
- // Overwrites all regs after this.
- r5, r6, r7, r9, r2,
- &slow_elements);
+ __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
+ __ Ret();
+}
+
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ ldr(r1, MemOperand(fp, parameter_count_offset));
+ if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ __ add(r1, r1, Operand(1));
+ }
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
+ __ add(sp, sp, r1);
__ Ret();
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
- PredictableCodeSizeScope predictable(masm);
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
+ AllowStubCallsScope allow_stub_calls(masm, true);
ProfileEntryHookStub stub;
__ push(lr);
__ CallStub(&stub);
@@ -7583,9 +5692,21 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
const int32_t kReturnAddressDistanceFromFunctionStart =
3 * Assembler::kInstrSize;
- // Save live volatile registers.
- __ Push(lr, r5, r1);
- const int32_t kNumSavedRegs = 3;
+ // This should contain all kCallerSaved registers.
+ const RegList kSavedRegs =
+ 1 << 0 | // r0
+ 1 << 1 | // r1
+ 1 << 2 | // r2
+ 1 << 3 | // r3
+ 1 << 5 | // r5
+ 1 << 9; // r9
+ // We also save lr, so the count here is one higher than the mask indicates.
+ const int32_t kNumSavedRegs = 7;
+
+ ASSERT((kCallerSaved & kSavedRegs) == kCallerSaved);
+
+ // Save all caller-save registers as this may be called from anywhere.
+ __ stm(db_w, sp, kSavedRegs | lr.bit());
// Compute the function's address for the first argument.
__ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
@@ -7602,15 +5723,17 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ and_(sp, sp, Operand(-frame_alignment));
}
-#if defined(V8_HOST_ARCH_ARM)
- __ mov(ip, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
- __ ldr(ip, MemOperand(ip));
+#if V8_HOST_ARCH_ARM
+ int32_t entry_hook =
+ reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
+ __ mov(ip, Operand(entry_hook));
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
- Address trampoline_address = reinterpret_cast<Address>(
- reinterpret_cast<intptr_t>(EntryHookTrampoline));
- ApiFunction dispatcher(trampoline_address);
+ // It additionally takes an isolate as a third parameter
+ __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
+
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ mov(ip, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
masm->isolate())));
@@ -7622,10 +5745,331 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ mov(sp, r5);
}
- __ Pop(lr, r5, r1);
- __ Ret();
+ // Also pop pc to get Ret(0).
+ __ ldm(ia_w, sp, kSavedRegs | pc.bit());
+}
+
+
+template<class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(),
+ CONTEXT_CHECK_REQUIRED,
+ mode);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(r3, Operand(kind));
+ __ b(ne, &next);
+ T stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // r2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // r0 - number of arguments
+ // r1 - constructor?
+ // sp[0] - last argument
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ tst(r3, Operand(1));
+ __ b(ne, &normal_sequence);
+ }
+
+ // look at the first argument
+ __ ldr(r5, MemOperand(sp, 0));
+ __ cmp(r5, Operand::Zero());
+ __ b(eq, &normal_sequence);
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the cell).
+ __ add(r3, r3, Operand(1));
+ __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
+
+ if (FLAG_debug_code) {
+ __ ldr(r5, FieldMemOperand(r5, 0));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSiteInCell);
+ __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
+ }
+
+ // Save the resulting elements kind in type info
+ __ SmiTag(r3);
+ __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
+ __ str(r3, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(r3);
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(r3, Operand(kind));
+ __ b(ne, &next);
+ ArraySingleArgumentConstructorStub stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+template<class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ ElementsKind initial_kind = GetInitialFastElementsKind();
+ ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
+
+ int to_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(kind);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
+ (!FLAG_track_allocation_sites &&
+ (kind == initial_kind || kind == initial_holey_kind))) {
+ T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ }
+ }
}
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
+ stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
+ stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
+ stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ tst(r0, r0);
+ __ b(ne, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmp(r0, Operand(1));
+ __ b(gt, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc (only if argument_count_ == ANY)
+ // -- r1 : constructor
+ // -- r2 : type info cell
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ tst(r3, Operand(kSmiTagMask));
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ CompareObjectType(r3, r3, r4, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+
+ // We should either have undefined in ebx or a valid cell
+ Label okay_here;
+ Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ b(eq, &okay_here);
+ __ ldr(r3, FieldMemOperand(r2, 0));
+ __ cmp(r3, Operand(cell_map));
+ __ Assert(eq, kExpectedPropertyCellInRegisterEbx);
+ __ bind(&okay_here);
+ }
+
+ Label no_info;
+ // Get the elements kind and case on that.
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ b(eq, &no_info);
+ __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
+
+ // If the type cell is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
+ __ ldr(r4, FieldMemOperand(r3, 0));
+ __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
+ __ b(ne, &no_info);
+
+ __ ldr(r3, FieldMemOperand(r3, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(r3);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label not_zero_case, not_one_case;
+ Label normal_sequence;
+
+ __ tst(r0, r0);
+ __ b(ne, &not_zero_case);
+ InternalArrayNoArgumentConstructorStub stub0(kind);
+ __ TailCallStub(&stub0);
+
+ __ bind(&not_zero_case);
+ __ cmp(r0, Operand(1));
+ __ b(gt, &not_one_case);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ ldr(r3, MemOperand(sp, 0));
+ __ cmp(r3, Operand::Zero());
+ __ b(eq, &normal_sequence);
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+ }
+
+ __ bind(&normal_sequence);
+ InternalArraySingleArgumentConstructorStub stub1(kind);
+ __ TailCallStub(&stub1);
+
+ __ bind(&not_one_case);
+ InternalArrayNArgumentsConstructorStub stubN(kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc
+ // -- r1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ tst(r3, Operand(kSmiTagMask));
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ CompareObjectType(r3, r3, r4, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Figure out the right elements kind
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmp(r3, Operand(FAST_ELEMENTS));
+ __ b(eq, &done);
+ __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
+ __ Assert(eq,
+ kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmp(r3, Operand(FAST_ELEMENTS));
+ __ b(eq, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 3e796249c..c03d8f27e 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -34,9 +34,12 @@ namespace v8 {
namespace internal {
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
+
+
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
@@ -58,15 +61,15 @@ class TranscendentalCacheStub: public CodeStub {
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
+ : save_doubles_(save_fp) {}
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
@@ -77,173 +80,6 @@ class StoreBufferOverflowStub: public CodeStub {
};
-class UnaryOpStub: public CodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
- void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateHeapNumberStubSub(MacroAssembler* masm);
- void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- use_vfp2_ = CpuFeatures::IsSupported(VFP2);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_vfp2_(VFP2Bits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_vfp2_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class VFP2Bits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | VFP2Bits::encode(use_vfp2_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiSmiOperation(MacroAssembler* masm);
- void GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -270,18 +106,17 @@ class StringHelper : public AllStatic {
Register scratch2,
Register scratch3,
Register scratch4,
- Register scratch5,
int flags);
- // Probe the symbol table for a two character string. If the string is
+ // Probe the string table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
+ // does not guarantee that the string is not in the string table. If the
// string is found the code falls through with the string in register r0.
// Contents of both c1 and c2 registers are modified. At the exit c1 is
// guaranteed to contain halfword with low and high bytes equal to
// initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -308,20 +143,7 @@ class StringHelper : public AllStatic {
};
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -340,11 +162,14 @@ class StringAddStub: public CodeStub {
Register scratch4,
Label* slow);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateRegisterArgsPop(MacroAssembler* masm);
+
const StringAddFlags flags_;
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -357,7 +182,7 @@ class SubStringStub: public CodeStub {
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
@@ -397,7 +222,7 @@ class StringCompareStub: public CodeStub {
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
+class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
@@ -406,8 +231,8 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
the_heap_number_(the_heap_number),
scratch_(scratch) { }
- bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
Register the_int_;
@@ -431,33 +256,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
};
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
@@ -480,8 +279,8 @@ class RecordWriteStub: public CodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
@@ -551,7 +350,7 @@ class RecordWriteStub: public CodeStub {
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+ scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
@@ -571,30 +370,14 @@ class RecordWriteStub: public CodeStub {
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP2);
- masm->sub(sp,
- sp,
- Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
- // Save all VFP registers except d0.
- for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
- }
+ masm->SaveFPRegs(sp, scratch0_);
}
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP2);
- // Restore all VFP registers except d0.
- for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
- }
- masm->add(sp,
- sp,
- Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+ masm->RestoreFPRegs(sp, scratch0_);
}
masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
}
@@ -610,19 +393,6 @@ class RecordWriteStub: public CodeStub {
Register scratch0_;
Register scratch1_;
- Register GetRegThatIsNotOneOf(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
friend class RecordWriteStub;
};
@@ -669,33 +439,15 @@ class RecordWriteStub: public CodeStub {
};
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM.
-class RegExpCEntryStub: public CodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-};
-
-
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
-class DirectCEntryStub: public CodeStub {
+class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
- void GenerateCall(MacroAssembler* masm, ExternalReference function);
void GenerateCall(MacroAssembler* masm, Register target);
private:
@@ -706,155 +458,11 @@ class DirectCEntryStub: public CodeStub {
};
-class FloatingPointHelper : public AllStatic {
- public:
- enum Destination {
- kVFPRegisters,
- kCoreRegisters
- };
-
-
- // Loads smis from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will be scratched.
- static void LoadSmis(MacroAssembler* masm,
- Destination destination,
- Register scratch1,
- Register scratch2);
-
- // Loads objects from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will still be scratched. If
- // either r0 or r1 is not a number (not smi and not heap number object) the
- // not_number label is jumped to with r0 and r1 intact.
- static void LoadOperands(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-
- // Convert the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1.
- static void ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_int32);
-
- // Converts the integer (untagged smi) in |int_scratch| to a double, storing
- // the result either in |double_dst| or |dst2:dst1|, depending on
- // |destination|.
- // Warning: The value in |int_scratch| will be changed in the process!
- static void ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
- Register scratch2,
- SwVfpRegister single_scratch);
-
- // Load the number from object into double_dst in the double format.
- // Control will jump to not_int32 if the value cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be loaded.
- static void LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DwVfpRegister double_dst,
- DwVfpRegister double_scratch,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- SwVfpRegister single_scratch,
- Label* not_int32);
-
- // Loads the number from object into dst as a 32-bit integer.
- // Control will jump to not_int32 if the object cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be converted.
- // scratch3 is not used when VFP3 is supported.
- static void LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
- Label* not_int32);
-
- // Generate non VFP3 code to check if a double can be exactly represented by a
- // 32-bit integer. This does not check for 0 or -0, which need
- // to be checked for separately.
- // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
- // through otherwise.
- // src1 and src2 will be cloberred.
- //
- // Expected input:
- // - src1: higher (exponent) part of the double value.
- // - src2: lower (mantissa) part of the double value.
- // Output status:
- // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
- // - src2: contains 1.
- // - other registers are clobbered.
- static void DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
- Register dst,
- Register scratch,
- Label* not_int32);
-
- // Generates code to call a C function to do a double operation using core
- // registers. (Used when VFP3 is not supported.)
- // This code never falls through, but returns with a heap number containing
- // the result in r0.
- // Register heapnumber_result must be a heap number in which the
- // result of the operation will be stored.
- // Requires the following layout on entry:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch);
-
- private:
- static void LoadNumber(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register object,
- DwVfpRegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-};
-
-
-class StringDictionaryLookupStub: public CodeStub {
+class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+ explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
void Generate(MacroAssembler* masm);
@@ -863,7 +471,7 @@ class StringDictionaryLookupStub: public CodeStub {
Label* done,
Register receiver,
Register properties,
- Handle<String> name,
+ Handle<Name> name,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
@@ -881,14 +489,14 @@ class StringDictionaryLookupStub: public CodeStub {
static const int kTotalProbes = 20;
static const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryLookup; }
+ Major MajorKey() { return NameDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 09166c3c0..44c331b75 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -27,15 +27,15 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "codegen.h"
#include "macro-assembler.h"
+#include "simulator-arm.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
@@ -49,10 +49,324 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
+#define __ masm.
+
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_arm_machine_code = NULL;
+double fast_exp_simulator(double x) {
+ return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
+ fast_exp_arm_machine_code, x, 0);
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!FLAG_fast_math) return &exp;
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &exp;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ {
+ DwVfpRegister input = d0;
+ DwVfpRegister result = d1;
+ DwVfpRegister double_scratch1 = d2;
+ DwVfpRegister double_scratch2 = d3;
+ Register temp1 = r4;
+ Register temp2 = r5;
+ Register temp3 = r6;
+
+ if (masm.use_eabi_hardfloat()) {
+ // Input value is in d0 anyway, nothing to do.
+ } else {
+ __ vmov(input, r0, r1);
+ }
+ __ Push(temp3, temp2, temp1);
+ MathExpGenerator::EmitMathExp(
+ &masm, input, result, double_scratch1, double_scratch2,
+ temp1, temp2, temp3);
+ __ Pop(temp3, temp2, temp1);
+ if (masm.use_eabi_hardfloat()) {
+ __ vmov(d0, result);
+ } else {
+ __ vmov(r0, r1, result);
+ }
+ __ Ret();
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+ fast_exp_arm_machine_code = buffer;
+ return &fast_exp_simulator;
+#endif
+}
+
+#if defined(V8_HOST_ARCH_ARM)
+OS::MemCopyUint8Function CreateMemCopyUint8Function(
+ OS::MemCopyUint8Function stub) {
+#if defined(USE_SIMULATOR)
+ return stub;
+#else
+ if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
+ return stub;
+ }
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return stub;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ Register dest = r0;
+ Register src = r1;
+ Register chars = r2;
+ Register temp1 = r3;
+ Label less_4;
+
+ if (CpuFeatures::IsSupported(NEON)) {
+ Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
+ Label size_less_than_8;
+ __ pld(MemOperand(src, 0));
+
+ __ cmp(chars, Operand(8));
+ __ b(lt, &size_less_than_8);
+ __ cmp(chars, Operand(32));
+ __ b(lt, &less_32);
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 32));
+ }
+ __ cmp(chars, Operand(64));
+ __ b(lt, &less_64);
+ __ pld(MemOperand(src, 64));
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 96));
+ }
+ __ cmp(chars, Operand(128));
+ __ b(lt, &less_128);
+ __ pld(MemOperand(src, 128));
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 160));
+ }
+ __ pld(MemOperand(src, 192));
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 224));
+ }
+ __ cmp(chars, Operand(256));
+ __ b(lt, &less_256);
+ __ sub(chars, chars, Operand(256));
+
+ __ bind(&loop);
+ __ pld(MemOperand(src, 256));
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 256));
+ }
+ __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+ __ sub(chars, chars, Operand(64), SetCC);
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+ __ b(ge, &loop);
+ __ add(chars, chars, Operand(256));
+
+ __ bind(&less_256);
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+ __ sub(chars, chars, Operand(128));
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+ __ cmp(chars, Operand(64));
+ __ b(lt, &less_64);
+
+ __ bind(&less_128);
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+ __ sub(chars, chars, Operand(64));
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+
+ __ bind(&less_64);
+ __ cmp(chars, Operand(32));
+ __ b(lt, &less_32);
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ sub(chars, chars, Operand(32));
+
+ __ bind(&less_32);
+ __ cmp(chars, Operand(16));
+ __ b(le, &_16_or_less);
+ __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
+ __ sub(chars, chars, Operand(16));
+
+ __ bind(&_16_or_less);
+ __ cmp(chars, Operand(8));
+ __ b(le, &_8_or_less);
+ __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
+ __ sub(chars, chars, Operand(8));
+
+ // Do a last copy which may overlap with the previous copy (up to 8 bytes).
+ __ bind(&_8_or_less);
+ __ rsb(chars, chars, Operand(8));
+ __ sub(src, src, Operand(chars));
+ __ sub(dest, dest, Operand(chars));
+ __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
+ __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
+
+ __ Ret();
+
+ __ bind(&size_less_than_8);
+
+ __ bic(temp1, chars, Operand(0x3), SetCC);
+ __ b(&less_4, eq);
+ __ ldr(temp1, MemOperand(src, 4, PostIndex));
+ __ str(temp1, MemOperand(dest, 4, PostIndex));
+ } else {
+ Register temp2 = ip;
+ Label loop;
+
+ __ bic(temp2, chars, Operand(0x3), SetCC);
+ __ b(&less_4, eq);
+ __ add(temp2, dest, temp2);
+
+ __ bind(&loop);
+ __ ldr(temp1, MemOperand(src, 4, PostIndex));
+ __ str(temp1, MemOperand(dest, 4, PostIndex));
+ __ cmp(dest, temp2);
+ __ b(&loop, ne);
+ }
+
+ __ bind(&less_4);
+ __ mov(chars, Operand(chars, LSL, 31), SetCC);
+ // bit0 => Z (ne), bit1 => C (cs)
+ __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
+ __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
+ __ ldrb(temp1, MemOperand(src), ne);
+ __ strb(temp1, MemOperand(dest), ne);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
+#endif
+}
+
+
+// Convert 8 to 16. The number of character to copy must be at least 8.
+OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
+ OS::MemCopyUint16Uint8Function stub) {
+#if defined(USE_SIMULATOR)
+ return stub;
+#else
+ if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
+ return stub;
+ }
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return stub;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ Register dest = r0;
+ Register src = r1;
+ Register chars = r2;
+ if (CpuFeatures::IsSupported(NEON)) {
+ Register temp = r3;
+ Label loop;
+
+ __ bic(temp, chars, Operand(0x7));
+ __ sub(chars, chars, Operand(temp));
+ __ add(temp, dest, Operand(temp, LSL, 1));
+
+ __ bind(&loop);
+ __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
+ __ vmovl(NeonU8, q0, d0);
+ __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
+ __ cmp(dest, temp);
+ __ b(&loop, ne);
+
+ // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
+ __ rsb(chars, chars, Operand(8));
+ __ sub(src, src, Operand(chars));
+ __ sub(dest, dest, Operand(chars, LSL, 1));
+ __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
+ __ vmovl(NeonU8, q0, d0);
+ __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
+ __ Ret();
+ } else {
+ Register temp1 = r3;
+ Register temp2 = ip;
+ Register temp3 = lr;
+ Register temp4 = r4;
+ Label loop;
+ Label not_two;
+
+ __ Push(lr, r4);
+ __ bic(temp2, chars, Operand(0x3));
+ __ add(temp2, dest, Operand(temp2, LSL, 1));
+
+ __ bind(&loop);
+ __ ldr(temp1, MemOperand(src, 4, PostIndex));
+ __ uxtb16(temp3, Operand(temp1, ROR, 0));
+ __ uxtb16(temp4, Operand(temp1, ROR, 8));
+ __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
+ __ str(temp1, MemOperand(dest));
+ __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
+ __ str(temp1, MemOperand(dest, 4));
+ __ add(dest, dest, Operand(8));
+ __ cmp(dest, temp2);
+ __ b(&loop, ne);
+
+ __ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
+ __ b(&not_two, cc);
+ __ ldrh(temp1, MemOperand(src, 2, PostIndex));
+ __ uxtb(temp3, Operand(temp1, ROR, 8));
+ __ mov(temp3, Operand(temp3, LSL, 16));
+ __ uxtab(temp3, temp3, Operand(temp1, ROR, 0));
+ __ str(temp3, MemOperand(dest, 4, PostIndex));
+ __ bind(&not_two);
+ __ ldrb(temp1, MemOperand(src), ne);
+ __ strh(temp1, MemOperand(dest), ne);
+ __ Pop(pc, r4);
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+
+ return FUNCTION_CAST<OS::MemCopyUint16Uint8Function>(buffer);
+#endif
+}
+#endif
+
+#undef __
+
+
UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
@@ -73,8 +387,11 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
+#define __ ACCESS_MASM(masm)
+
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm) {
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -83,6 +400,11 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(r2, r4, allocation_memento_found);
+ }
+
// Set transitioned map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
@@ -97,7 +419,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -107,7 +429,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// -- r4 : scratch (elements)
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done;
- bool vfp2_supported = CpuFeatures::IsSupported(VFP2);
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
+ }
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
@@ -117,32 +442,16 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ push(lr);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // r4: source FixedArray
// r5: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
// Use lr as a temporary register.
__ mov(lr, Operand(r5, LSL, 2));
- __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize + kPointerSize));
- __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+ __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
+ __ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT);
// r6: destination FixedDoubleArray, not tagged as heap object.
-
- // Align the array conveniently for doubles.
- // Store a filler value in the unused memory.
- Label aligned, aligned_done;
- __ tst(r6, Operand(kDoubleAlignmentMask));
- __ mov(ip, Operand(masm->isolate()->factory()->one_pointer_filler_map()));
- __ b(eq, &aligned);
- // Store at the beginning of the allocated memory and update the base pointer.
- __ str(ip, MemOperand(r6, kPointerSize, PostIndex));
- __ b(&aligned_done);
-
- __ bind(&aligned);
- // Store the filler at the end of the allocated memory.
- __ sub(lr, lr, Operand(kPointerSize));
- __ str(ip, MemOperand(r6, lr));
-
- __ bind(&aligned_done);
+ __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ // r4: source FixedArray.
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
@@ -173,16 +482,15 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Prepare for conversion loop.
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
- __ add(r6, r7, Operand(r5, LSL, 2));
+ __ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize));
+ __ add(r6, r9, Operand(r5, LSL, 2));
__ mov(r4, Operand(kHoleNanLower32));
__ mov(r5, Operand(kHoleNanUpper32));
// r3: begin of source FixedArray element fields, not tagged
// r4: kHoleNanLower32
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
- // r7: begin of FixedDoubleArray element fields, not tagged
- if (!vfp2_supported) __ Push(r1, r0);
+ // r9: begin of FixedDoubleArray element fields, not tagged
__ b(&entry);
@@ -192,7 +500,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
HeapObject::kMapOffset,
r3,
r9,
- kLRHasBeenSaved,
+ kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -205,53 +513,39 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Convert and copy elements.
__ bind(&loop);
- __ ldr(r9, MemOperand(r3, 4, PostIndex));
- // r9: current element
- __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
+ __ ldr(lr, MemOperand(r3, 4, PostIndex));
+ // lr: current element
+ __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
// Normal smi, convert to double and store.
- if (vfp2_supported) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(s0, r9);
- __ vcvt_f64_s32(d0, s0);
- __ vstr(d0, r7, 0);
- __ add(r7, r7, Operand(8));
- } else {
- FloatingPointHelper::ConvertIntToDouble(masm,
- r9,
- FloatingPointHelper::kCoreRegisters,
- d0,
- r0,
- r1,
- lr,
- s0);
- __ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
- }
+ __ vmov(s0, lr);
+ __ vcvt_f64_s32(d0, s0);
+ __ vstr(d0, r9, 0);
+ __ add(r9, r9, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object.
- __ SmiTag(r9);
- __ orr(r9, r9, Operand(1));
- __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, "object found in smi-only array");
+ __ SmiTag(lr);
+ __ orr(lr, lr, Operand(1));
+ __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
+ __ Assert(eq, kObjectFoundInSmiOnlyArray);
}
- __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
+ __ Strd(r4, r5, MemOperand(r9, 8, PostIndex));
__ bind(&entry);
- __ cmp(r7, r6);
+ __ cmp(r9, r6);
__ b(lt, &loop);
- if (!vfp2_supported) __ Pop(r1, r0);
__ pop(lr);
__ bind(&done);
}
void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -262,6 +556,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -----------------------------------
Label entry, loop, convert_hole, gc_required, only_change_map;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
@@ -277,7 +575,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Allocate new FixedArray.
__ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
__ add(r0, r0, Operand(r5, LSL, 1));
- __ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+ __ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
@@ -289,14 +587,12 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ add(r3, r6, Operand(FixedArray::kHeaderSize));
__ add(r6, r6, Operand(kHeapObjectTag));
__ add(r5, r3, Operand(r5, LSL, 1));
- __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
__ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in r4 to fully take advantage of post-indexing.
// r3: begin of destination FixedArray element fields, not tagged
// r4: begin of source FixedDoubleArray element fields, not tagged, +4
// r5: end of destination FixedArray, not tagged
// r6: destination FixedArray
- // r7: the-hole pointer
// r9: heap number map
__ b(&entry);
@@ -308,7 +604,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ bind(&loop);
__ ldr(r1, MemOperand(r4, 8, PostIndex));
- // lr: current element's upper 32 bit
+ // r1: current element's upper 32 bit
// r4: address of next element's upper 32 bit
__ cmp(r1, Operand(kHoleNanUpper32));
__ b(eq, &convert_hole);
@@ -331,7 +627,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
- __ str(r7, MemOperand(r3, 4, PostIndex));
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ str(r0, MemOperand(r3, 4, PostIndex));
__ bind(&entry);
__ cmp(r3, r5);
@@ -387,7 +684,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Label indirect_string_loaded;
__ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ add(index, index, Operand(result, ASR, kSmiTagSize));
+ __ add(index, index, Operand::SmiUntag(result));
__ jmp(&indirect_string_loaded);
// Handle cons strings.
@@ -397,7 +694,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// the string.
__ bind(&cons_string);
__ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ CompareRoot(result, Heap::kEmptyStringRootIndex);
+ __ CompareRoot(result, Heap::kempty_stringRootIndex);
__ b(ne, call_runtime);
// Get the first of the two strings and load its instance type.
__ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
@@ -416,7 +713,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ b(ne, &external_string);
// Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ add(string,
string,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -428,7 +725,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ tst(result, Operand(kIsIndirectStringMask));
- __ Assert(eq, "external string expected, but not found");
+ __ Assert(eq, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
STATIC_CHECK(kShortExternalStringTag != 0);
@@ -450,8 +747,160 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
+
+static MemOperand ExpConstant(int index, Register base) {
+ return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ DwVfpRegister input,
+ DwVfpRegister result,
+ DwVfpRegister double_scratch1,
+ DwVfpRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3) {
+ ASSERT(!input.is(result));
+ ASSERT(!input.is(double_scratch1));
+ ASSERT(!input.is(double_scratch2));
+ ASSERT(!result.is(double_scratch1));
+ ASSERT(!result.is(double_scratch2));
+ ASSERT(!double_scratch1.is(double_scratch2));
+ ASSERT(!temp1.is(temp2));
+ ASSERT(!temp1.is(temp3));
+ ASSERT(!temp2.is(temp3));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label zero, infinity, done;
+
+ __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
+
+ __ vldr(double_scratch1, ExpConstant(0, temp3));
+ __ VFPCompareAndSetFlags(double_scratch1, input);
+ __ b(ge, &zero);
+
+ __ vldr(double_scratch2, ExpConstant(1, temp3));
+ __ VFPCompareAndSetFlags(input, double_scratch2);
+ __ b(ge, &infinity);
+
+ __ vldr(double_scratch1, ExpConstant(3, temp3));
+ __ vldr(result, ExpConstant(4, temp3));
+ __ vmul(double_scratch1, double_scratch1, input);
+ __ vadd(double_scratch1, double_scratch1, result);
+ __ VmovLow(temp2, double_scratch1);
+ __ vsub(double_scratch1, double_scratch1, result);
+ __ vldr(result, ExpConstant(6, temp3));
+ __ vldr(double_scratch2, ExpConstant(5, temp3));
+ __ vmul(double_scratch1, double_scratch1, double_scratch2);
+ __ vsub(double_scratch1, double_scratch1, input);
+ __ vsub(result, result, double_scratch1);
+ __ vmul(double_scratch2, double_scratch1, double_scratch1);
+ __ vmul(result, result, double_scratch2);
+ __ vldr(double_scratch2, ExpConstant(7, temp3));
+ __ vmul(result, result, double_scratch2);
+ __ vsub(result, result, double_scratch1);
+ // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
+ ASSERT(*reinterpret_cast<double*>
+ (ExternalReference::math_exp_constants(8).address()) == 1);
+ __ vmov(double_scratch2, 1);
+ __ vadd(result, result, double_scratch2);
+ __ mov(temp1, Operand(temp2, LSR, 11));
+ __ Ubfx(temp2, temp2, 0, 11);
+ __ add(temp1, temp1, Operand(0x3ff));
+
+ // Must not call ExpConstant() after overwriting temp3!
+ __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
+ __ add(temp3, temp3, Operand(temp2, LSL, 3));
+ __ ldm(ia, temp3, temp2.bit() | temp3.bit());
+ // The first word is loaded is the lower number register.
+ if (temp2.code() < temp3.code()) {
+ __ orr(temp1, temp3, Operand(temp1, LSL, 20));
+ __ vmov(double_scratch1, temp2, temp1);
+ } else {
+ __ orr(temp1, temp2, Operand(temp1, LSL, 20));
+ __ vmov(double_scratch1, temp3, temp1);
+ }
+ __ vmul(result, result, double_scratch1);
+ __ b(&done);
+
+ __ bind(&zero);
+ __ vmov(result, kDoubleRegZero);
+ __ b(&done);
+
+ __ bind(&infinity);
+ __ vldr(result, ExpConstant(2, temp3));
+
+ __ bind(&done);
+}
+
#undef __
+// add(r0, pc, Operand(-8))
+static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
+
+static byte* GetNoCodeAgeSequence(uint32_t* length) {
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found in FUNCTIONS
+ static bool initialized = false;
+ static uint32_t sequence[kNoCodeAgeSequenceLength];
+ byte* byte_sequence = reinterpret_cast<byte*>(sequence);
+ *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
+ if (!initialized) {
+ CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
+ PredictableCodeSizeScope scope(patcher.masm(), *length);
+ patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ patcher.masm()->nop(ip.code());
+ patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
+ initialized = true;
+ }
+ return byte_sequence;
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ bool result = !memcmp(sequence, young_sequence, young_length);
+ ASSERT(result ||
+ Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ Address target_address = Memory::Address_at(
+ sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (age == kNoAgeCodeAge) {
+ CopyBytes(sequence, young_sequence, young_length);
+ CPU::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
+ CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ patcher.masm()->add(r0, pc, Operand(-8));
+ patcher.masm()->ldr(pc, MemOperand(pc, -4));
+ patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index c340e6b10..ecbe64cba 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -44,10 +44,14 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
+ explicit CodeGenerator(Isolate* isolate) {
+ InitializeAstVisitor(isolate);
+ }
+
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
+ static void MakeCodePrologue(CompilationInfo* info, const char* kind);
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
@@ -57,7 +61,7 @@ class CodeGenerator: public AstVisitor {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
- static bool ShouldGenerateLog(Expression* type);
+ static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
@@ -68,6 +72,8 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
@@ -88,6 +94,23 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
+
+class MathExpGenerator : public AllStatic {
+ public:
+ // Register input isn't modified. All other registers are clobbered.
+ static void EmitMathExp(MacroAssembler* masm,
+ DwVfpRegister input,
+ DwVfpRegister result,
+ DwVfpRegister double_scratch1,
+ DwVfpRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc
index bf9da232c..7d59a84b1 100644
--- a/deps/v8/src/arm/constants-arm.cc
+++ b/deps/v8/src/arm/constants-arm.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "constants-arm.h"
@@ -51,7 +51,7 @@ double Instruction::DoubleImmedVmov() const {
uint64_t imm = high16 << 48;
double d;
- memcpy(&d, &imm, 8);
+ OS::MemCopy(&d, &imm, 8);
return d;
}
@@ -87,8 +87,8 @@ const char* Registers::Name(int reg) {
}
-// Support for VFP registers s0 to s31 (d0 to d15).
-// Note that "sN:sM" is the same as "dN/2"
+// Support for VFP registers s0 to s31 (d0 to d15) and d16-d31.
+// Note that "sN:sM" is the same as "dN/2" up to d15.
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* VFPRegisters::names_[kNumVFPRegisters] = {
@@ -97,7 +97,9 @@ const char* VFPRegisters::names_[kNumVFPRegisters] = {
"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"
};
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 4fa49e3d3..703613932 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -33,67 +33,29 @@
#error ARM EABI support is required.
#endif
-// This means that interwork-compatible jump instructions are generated. We
-// want to generate them on the simulator too so it makes snapshots that can
-// be used on real hardware.
-#if defined(__THUMB_INTERWORK__) || !defined(__arm__)
-# define USE_THUMB_INTERWORK 1
-#endif
-
-#if defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7R__) || \
- defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV7_INSTRUCTIONS 1
-#endif
-
-#if defined(__ARM_ARCH_6__) || \
- defined(__ARM_ARCH_6J__) || \
- defined(__ARM_ARCH_6K__) || \
- defined(__ARM_ARCH_6Z__) || \
- defined(__ARM_ARCH_6ZK__) || \
- defined(__ARM_ARCH_6T2__) || \
- defined(CAN_USE_ARMV7_INSTRUCTIONS)
-# define CAN_USE_ARMV6_INSTRUCTIONS 1
-#endif
-
-#if defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5TE__) || \
- defined(__ARM_ARCH_5TEJ__) || \
- defined(CAN_USE_ARMV6_INSTRUCTIONS)
-# define CAN_USE_ARMV5_INSTRUCTIONS 1
-# define CAN_USE_THUMB_INSTRUCTIONS 1
-#endif
-
-// Simulator should support ARM5 instructions and unaligned access by default.
-#if !defined(__arm__)
-# define CAN_USE_ARMV5_INSTRUCTIONS 1
-# define CAN_USE_THUMB_INSTRUCTIONS 1
-
-# ifndef CAN_USE_UNALIGNED_ACCESSES
-# define CAN_USE_UNALIGNED_ACCESSES 1
-# endif
-
-#endif
-
-// Using blx may yield better code, so use it when required or when available
-#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
-#define USE_BLX 1
-#endif
-
namespace v8 {
namespace internal {
// Constant pool marker.
-const int kConstantPoolMarkerMask = 0xffe00000;
-const int kConstantPoolMarker = 0x0c000000;
-const int kConstantPoolLengthMask = 0x001ffff;
+// Use UDF, the permanently undefined instruction.
+const int kConstantPoolMarkerMask = 0xfff000f0;
+const int kConstantPoolMarker = 0xe7f000f0;
+const int kConstantPoolLengthMaxMask = 0xffff;
+inline int EncodeConstantPoolLength(int length) {
+ ASSERT((length & kConstantPoolLengthMaxMask) == length);
+ return ((length & 0xfff0) << 4) | (length & 0xf);
+}
+inline int DecodeConstantPoolLength(int instr) {
+ ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
+ return ((instr >> 4) & 0xfff0) | (instr & 0xf);
+}
// Number of registers in normal ARM mode.
const int kNumRegisters = 16;
// VFP support.
const int kNumVFPSingleRegisters = 32;
-const int kNumVFPDoubleRegisters = 16;
+const int kNumVFPDoubleRegisters = 32;
const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
// PC is register 15.
@@ -258,7 +220,10 @@ enum {
kCoprocessorMask = 15 << 8,
kOpCodeMask = 15 << 21, // In data-processing instructions.
kImm24Mask = (1 << 24) - 1,
- kOff12Mask = (1 << 12) - 1
+ kImm16Mask = (1 << 16) - 1,
+ kImm8Mask = (1 << 8) - 1,
+ kOff12Mask = (1 << 12) - 1,
+ kOff8Mask = (1 << 8) - 1
};
@@ -352,6 +317,32 @@ enum LFlag {
};
+// NEON data type
+enum NeonDataType {
+ NeonS8 = 0x1, // U = 0, imm3 = 0b001
+ NeonS16 = 0x2, // U = 0, imm3 = 0b010
+ NeonS32 = 0x4, // U = 0, imm3 = 0b100
+ NeonU8 = 1 << 24 | 0x1, // U = 1, imm3 = 0b001
+ NeonU16 = 1 << 24 | 0x2, // U = 1, imm3 = 0b010
+ NeonU32 = 1 << 24 | 0x4, // U = 1, imm3 = 0b100
+ NeonDataTypeSizeMask = 0x7,
+ NeonDataTypeUMask = 1 << 24
+};
+
+enum NeonListType {
+ nlt_1 = 0x7,
+ nlt_2 = 0xA,
+ nlt_3 = 0x6,
+ nlt_4 = 0x2
+};
+
+enum NeonSize {
+ Neon8 = 0x0,
+ Neon16 = 0x1,
+ Neon32 = 0x2,
+ Neon64 = 0x4
+};
+
// -----------------------------------------------------------------------------
// Supervisor Call (svc) specific support.
@@ -393,6 +384,7 @@ const uint32_t kVFPOverflowExceptionBit = 1 << 2;
const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
const uint32_t kVFPInexactExceptionBit = 1 << 4;
const uint32_t kVFPFlushToZeroMask = 1 << 24;
+const uint32_t kVFPDefaultNaNModeControlBit = 1 << 25;
const uint32_t kVFPNConditionFlagBit = 1 << 31;
const uint32_t kVFPZConditionFlagBit = 1 << 30;
@@ -455,6 +447,9 @@ extern const Instr kMovLrPc;
// ldr rd, [pc, #offset]
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
+// vldr dd, [pc, #offset]
+extern const Instr kVldrDPCMask;
+extern const Instr kVldrDPCPattern;
// blxcc rm
extern const Instr kBlxRegMask;
@@ -590,6 +585,7 @@ class Instruction {
DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
inline int TypeValue() const { return Bits(27, 25); }
+ inline int SpecialValue() const { return Bits(27, 23); }
inline int RnValue() const { return Bits(19, 16); }
DECLARE_STATIC_ACCESSOR(RnValue);
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index 7b08ed8c2..cf531e129 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -32,7 +32,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "cpu.h"
#include "macro-assembler.h"
@@ -106,15 +106,6 @@ void CPU::FlushICache(void* start, size_t size) {
#endif
}
-
-void CPU::DebugBreak() {
-#if !defined (__arm__) || !defined(CAN_USE_ARMV5_INSTRUCTIONS)
- UNIMPLEMENTED(); // when building ARM emulator target
-#else
- asm volatile("bkpt 0");
-#endif
-}
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index c2941be06..efd11069b 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "codegen.h"
#include "debug.h"
@@ -48,24 +48,15 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// add sp, sp, #4
// bx lr
// to a call to the debug break return code.
- // #ifdef USE_BLX
// ldr ip, [pc, #0]
// blx ip
- // #else
- // mov lr, pc
- // ldr pc, [pc, #-4]
- // #endif
// <debug break return code entry point address>
// bktp 0
CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
-#ifdef USE_BLX
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
-#else
- patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
- patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
-#endif
- patcher.Emit(Isolate::Current()->debug()->debug_break_return()->entry());
+ patcher.Emit(
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry());
patcher.masm()->bkpt(0);
}
@@ -99,23 +90,14 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// mov r2, r2
// mov r2, r2
// to a call to the debug break slot code.
- // #ifdef USE_BLX
// ldr ip, [pc, #0]
// blx ip
- // #else
- // mov lr, pc
- // ldr pc, [pc, #-4]
- // #endif
// <debug break slot code entry point address>
CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
-#ifdef USE_BLX
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
-#else
- patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
- patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
-#endif
- patcher.Emit(Isolate::Current()->debug()->debug_break_slot()->entry());
+ patcher.Emit(
+ debug_info_->GetIsolate()->debug()->debug_break_slot()->entry());
}
@@ -150,9 +132,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ tst(reg, Operand(0xc0000000));
- __ Assert(eq, "Unable to encode value as smi");
+ __ Assert(eq, kUnableToEncodeValueAsSmi);
}
- __ mov(reg, Operand(reg, LSL, kSmiTagSize));
+ __ SmiTag(reg);
}
}
__ stm(db_w, sp, object_regs | non_object_regs);
@@ -161,7 +143,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
+ __ mov(r0, Operand::Zero()); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1);
@@ -174,7 +156,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((non_object_regs & (1 << r)) != 0) {
- __ mov(reg, Operand(reg, LSR, kSmiTagSize));
+ __ SmiUntag(reg);
}
if (FLAG_debug_code &&
(((object_regs |non_object_regs) & (1 << r)) == 0)) {
@@ -244,6 +226,15 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
+}
+
+
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-arm.cc)
// ----------- S t a t e -------------
@@ -324,12 +315,12 @@ void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on arm");
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnArm);
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on arm");
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnArm);
}
const bool Debug::kFrameDropperSupported = false;
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 19667b9d5..9339c5fad 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -35,7 +35,7 @@
namespace v8 {
namespace internal {
-const int Deoptimizer::table_entry_size_ = 16;
+const int Deoptimizer::table_entry_size_ = 12;
int Deoptimizer::patch_size() {
@@ -44,20 +44,8 @@ int Deoptimizer::patch_size() {
}
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- HandleScope scope;
- AssertNoAllocation no_allocation;
-
- if (!function->IsOptimized()) return;
-
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
- // Get the optimized code.
- Code* code = function->code();
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
-
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
@@ -72,17 +60,17 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
- Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
// We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size.
int call_size_in_bytes =
MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
- RelocInfo::NONE);
+ RelocInfo::NONE32);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size());
CodePatcher patcher(call_address, call_size_in_words);
- patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
+ patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
@@ -90,791 +78,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
prev_call_address = call_address;
#endif
}
-
- Isolate* isolate = code->GetIsolate();
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
- }
-}
-
-
-static const int32_t kBranchBeforeStackCheck = 0x2a000001;
-static const int32_t kBranchBeforeInterrupt = 0x5a000004;
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
- // The call of the stack guard check has the following form:
- // e1 5d 00 0c cmp sp, <limit>
- // 2a 00 00 01 bcs ok
- // e5 9f c? ?? ldr ip, [pc, <stack guard address>]
- // e1 2f ff 3c blx ip
- ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
- if (FLAG_count_based_interrupts) {
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- } else {
- ASSERT_EQ(kBranchBeforeStackCheck,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- }
-
- // We patch the code to the following form:
- // e1 5d 00 0c cmp sp, <limit>
- // e1 a0 00 00 mov r0, r0 (NOP)
- // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
- // e1 2f ff 3c blx ip
- // and overwrite the constant containing the
- // address of the stack check stub.
-
- // Replace conditional jump with NOP.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->nop();
-
- // Replace the stack check address in the constant pool
- // with the entry address of the replacement code.
- uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address stack_check_address_pointer = pc_after + stack_check_address_offset;
- ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
- reinterpret_cast<uint32_t>(check_code->entry()));
- Memory::uint32_at(stack_check_address_pointer) =
- reinterpret_cast<uint32_t>(replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
-
- // Replace NOP with conditional jump.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- if (FLAG_count_based_interrupts) {
- patcher.masm()->b(+16, pl);
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- } else {
- patcher.masm()->b(+4, cs);
- ASSERT_EQ(kBranchBeforeStackCheck,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- }
-
- // Replace the stack check address in the constant pool
- // with the entry address of the replacement code.
- uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address stack_check_address_pointer = pc_after + stack_check_address_offset;
- ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
- reinterpret_cast<uint32_t>(replacement_code->entry()));
- Memory::uint32_at(stack_check_address_pointer) =
- reinterpret_cast<uint32_t>(check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, check_code);
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
-
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
- output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- optimized_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index) {
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
- // Arguments adaptor can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // A marker value is used in place of the context.
- output_offset -= kPointerSize;
- intptr_t context = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- output_frame->SetFrameSlot(output_offset, context);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
- top_address + output_offset, output_offset, context);
- }
-
- // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- ASSERT(0 == output_offset);
-
- Builtins* builtins = isolate_->builtins();
- Code* adaptor_trampoline =
- builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
- uint32_t pc = reinterpret_cast<uint32_t>(
- adaptor_trampoline->instruction_start() +
- isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = 8 * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
- // Construct stub can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The output frame reflects a JSConstructStubGeneric frame.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(construct_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- // Constructor function being invoked by the stub.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
- top_address + output_offset, output_offset, value);
- }
-
- ASSERT(0 == output_offset);
-
- uint32_t pc = reinterpret_cast<uint32_t>(
- construct_stub->instruction_start() +
- isolate_->heap()->construct_stub_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
- bool is_setter_stub_frame) {
- JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
- // The receiver (and the implicit return value, if any) are expected in
- // registers by the LoadIC/StoreIC, so they don't belong to the output stack
- // frame. This means that we have to use a height of 0.
- unsigned height = 0;
- unsigned height_in_bytes = height * kPointerSize;
- const char* kind = is_setter_stub_frame ? "setter" : "getter";
- if (FLAG_trace_deopt) {
- PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
- }
-
- // We need 5 stack entries from StackFrame::INTERNAL (lr, fp, cp, frame type,
- // code object, see MacroAssembler::EnterFrame). For a setter stub frames we
- // need one additional entry for the implicit return value, see
- // StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0);
- unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, accessor);
- output_frame->SetFrameType(StackFrame::INTERNAL);
-
- // A frame for an accessor stub can not be the topmost or bottommost one.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous frame's top and
- // this frame's size.
- uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- unsigned output_offset = output_frame_size;
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; function (%s sentinel)\n",
- top_address + output_offset, output_offset, value, kind);
- }
-
- // Get Code object from accessor stub.
- output_offset -= kPointerSize;
- Builtins::Name name = is_setter_stub_frame ?
- Builtins::kStoreIC_Setter_ForDeopt :
- Builtins::kLoadIC_Getter_ForDeopt;
- Code* accessor_stub = isolate_->builtins()->builtin(name);
- value = reinterpret_cast<intptr_t>(accessor_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Skip receiver.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
-
- if (is_setter_stub_frame) {
- // The implicit return value was part of the artificial setter stub
- // environment.
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- ASSERT(0 == output_offset);
-
- Smi* offset = is_setter_stub_frame ?
- isolate_->heap()->setter_stub_deopt_pc_offset() :
- isolate_->heap()->getter_stub_deopt_pc_offset();
- intptr_t pc = reinterpret_cast<intptr_t>(
- accessor_stub->instruction_start() + offset->value());
- output_frame->SetPc(pc);
-}
-
-
-// This code is very similar to ia32 code, but relies on register names (fp, sp)
-// and how the frame is laid out.
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- uint32_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) {
- output_frame->SetRegister(fp.code(), fp_value);
- }
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(cp.code(), value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
- if (is_topmost) {
- output_frame->SetRegister(pc.code(), pc_value);
- }
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
- }
}
@@ -888,7 +91,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -899,6 +102,31 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ ApiFunction function(descriptor->deoptimization_handler_);
+ ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+ intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+ int params = descriptor->environment_length();
+ output_frame->SetRegister(r0.code(), params);
+ output_frame->SetRegister(r1.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ // There is no dynamic alignment padding on ARM in the input frame.
+ return false;
+}
+
+
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@@ -906,9 +134,6 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
- Isolate* isolate = masm()->isolate();
-
- CpuFeatures::Scope scope(VFP3);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -916,23 +141,20 @@ void Deoptimizer::EntryGenerator::Generate() {
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
const int kDoubleRegsSize =
- kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
+ kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
- // Save all VFP registers before messing with them.
- DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
- DwVfpRegister last =
- DwVfpRegister::FromAllocationIndex(
- DwVfpRegister::kNumAllocatableRegisters - 1);
- ASSERT(last.code() > first.code());
- ASSERT((last.code() - first.code()) ==
- (DwVfpRegister::kNumAllocatableRegisters - 1));
-#ifdef DEBUG
- for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
- ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
- (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
- }
-#endif
- __ vstm(db_w, sp, first, last);
+ // Save all allocatable VFP registers before messing with them.
+ ASSERT(kDoubleRegZero.code() == 14);
+ ASSERT(kScratchDoubleReg.code() == 15);
+
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(ip);
+
+ // Push registers d0-d13, and possibly d16-d31, on the stack.
+ // If d16-d31 are not pushed, decrease the stack pointer instead.
+ __ vstm(db_w, sp, d16, d31, ne);
+ __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
+ __ vstm(db_w, sp, d0, d13);
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@@ -945,22 +167,12 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the bailout id from the stack.
__ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
- // Get the address of the location in the code object if possible (r3) (return
+ // Get the address of the location in the code object (r3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r4.
- if (type() == EAGER) {
- __ mov(r3, Operand(0));
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else if (type() == OSR) {
- __ mov(r3, lr);
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ mov(r3, lr);
- // Correct two words for bailout id and return address.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
+ __ mov(r3, lr);
+ // Correct one word for bailout id.
+ __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
__ sub(r4, fp, r4);
// Allocate a new deoptimizer object.
@@ -971,12 +183,12 @@ void Deoptimizer::EntryGenerator::Generate() {
// r2: bailout id already loaded.
// r3: code address or 0 already loaded.
__ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
- __ mov(r5, Operand(ExternalReference::isolate_address()));
+ __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
}
// Preserve "deoptimizer" object in register r0 and get the input
@@ -992,22 +204,17 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Copy VFP registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
- // Remove the bailout id, eventually return address, and the saved registers
- // from the stack.
- if (type() == EAGER || type() == OSR) {
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
+ // Remove the bailout id and the saved registers from the stack.
+ __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
// Compute a pointer to the unwinding limit in register r2; that is
// the first stack slot not part of the input frame.
@@ -1019,10 +226,13 @@ void Deoptimizer::EntryGenerator::Generate() {
// frame description.
__ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
+ Label pop_loop_header;
+ __ b(&pop_loop_header);
__ bind(&pop_loop);
__ pop(r4);
__ str(r4, MemOperand(r3, 0));
__ add(r3, r3, Operand(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
__ cmp(r2, sp);
__ b(ne, &pop_loop);
@@ -1034,38 +244,54 @@ void Deoptimizer::EntryGenerator::Generate() {
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
+ ExternalReference::compute_output_frames_function(isolate()), 1);
}
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop;
- // Outer loop state: r0 = current "FrameDescription** output_",
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
+ // Outer loop state: r4 = current "FrameDescription** output_",
// r1 = one past the last FrameDescription**.
__ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
- __ ldr(r0, MemOperand(r0, Deoptimizer::output_offset())); // r0 is output_.
- __ add(r1, r0, Operand(r1, LSL, 2));
+ __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
+ __ add(r1, r4, Operand(r1, LSL, 2));
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: r2 = current FrameDescription*, r3 = loop index.
- __ ldr(r2, MemOperand(r0, 0)); // output_[ix]
+ __ ldr(r2, MemOperand(r4, 0)); // output_[ix]
__ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
- __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
- __ push(r7);
- __ cmp(r3, Operand(0));
+ __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
+ __ push(r6);
+ __ bind(&inner_loop_header);
+ __ cmp(r3, Operand::Zero());
__ b(ne, &inner_push_loop); // test for gt?
- __ add(r0, r0, Operand(kPointerSize));
- __ cmp(r0, r1);
+ __ add(r4, r4, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ cmp(r4, r1);
__ b(lt, &outer_push_loop);
- // Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
- __ push(r6);
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(ip);
+
+ __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
+ int src_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
+ if (i == kDoubleRegZero.code()) continue;
+ if (i == kScratchDoubleReg.code()) continue;
+
+ const DwVfpRegister reg = DwVfpRegister::from_code(i);
+ __ vldr(reg, r1, src_offset, i < 16 ? al : ne);
+ src_offset += kDoubleSize;
}
+ // Push state, pc, and continuation from the last output frame.
+ __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
+ __ push(r6);
__ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
__ push(r6);
__ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
@@ -1086,26 +312,20 @@ void Deoptimizer::EntryGenerator::Generate() {
__ InitializeRootRegister();
__ pop(ip); // remove pc
- __ pop(r7); // get continuation, leave pc on stack
+ __ pop(ip); // get continuation, leave pc on stack
__ pop(lr);
- __ Jump(r7);
+ __ Jump(ip);
__ stop("Unreachable.");
}
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries. Note that any
- // registers may be still live.
+ // Create a sequence of deoptimization entries.
+ // Note that registers are still live when jumping to an entry.
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- if (type() == EAGER) {
- __ nop();
- } else {
- // Emulate ia32 like call by pushing return address to stack.
- __ push(lr);
- }
__ mov(ip, Operand(i));
__ push(ip);
__ b(&done);
@@ -1114,6 +334,17 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&done);
}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 3c94a46e6..acffaa3f2 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -50,13 +50,10 @@
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
-#ifndef WIN32
-#include <stdint.h>
-#endif
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "constants-arm.h"
#include "disasm.h"
@@ -113,6 +110,8 @@ class Decoder {
// Handle formatting of instructions and their options.
int FormatRegister(Instruction* instr, const char* option);
+ void FormatNeonList(int Vd, int type);
+ void FormatNeonMemory(int Rn, int align, int Rm);
int FormatOption(Instruction* instr, const char* option);
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
@@ -133,6 +132,8 @@ class Decoder {
void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instruction* instr);
+ void DecodeSpecialCondition(Instruction* instr);
+
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
@@ -187,12 +188,14 @@ void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
+
// Print the VFP S register name according to the active name converter.
void Decoder::PrintSRegister(int reg) {
Print(VFPRegisters::Name(reg, false));
}
-// Print the VFP D register name according to the active name converter.
+
+// Print the VFP D register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
Print(VFPRegisters::Name(reg, true));
}
@@ -381,7 +384,16 @@ int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
} else if (format[1] == 'm') {
reg = instr->VFPMRegValue(precision);
} else if (format[1] == 'd') {
- reg = instr->VFPDRegValue(precision);
+ if ((instr->TypeValue() == 7) &&
+ (instr->Bit(24) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(4) == 0x1)) {
+ // vmov.32 has Vd in a different place.
+ reg = instr->Bits(19, 16) | (instr->Bit(7) << 4);
+ } else {
+ reg = instr->VFPDRegValue(precision);
+ }
+
if (format[2] == '+') {
int immed8 = instr->Immed8Value();
if (format[0] == 'S') reg += immed8 - 1;
@@ -408,6 +420,41 @@ int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
}
+void Decoder::FormatNeonList(int Vd, int type) {
+ if (type == nlt_1) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d}", Vd);
+ } else if (type == nlt_2) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d}", Vd, Vd + 1);
+ } else if (type == nlt_3) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2);
+ } else if (type == nlt_4) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2, Vd + 3);
+ }
+}
+
+
+void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "[r%d", Rn);
+ if (align != 0) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ":%d", (1 << align) << 6);
+ }
+ if (Rm == 15) {
+ Print("]");
+ } else if (Rm == 13) {
+ Print("]!");
+ } else {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "], r%d", Rm);
+ }
+}
+
+
// Print the movw or movt instruction.
void Decoder::PrintMovwMovt(Instruction* instr) {
int imm = instr->ImmedMovwMovtValue();
@@ -971,15 +1018,107 @@ void Decoder::DecodeType3(Instruction* instr) {
break;
}
case ia_x: {
- if (instr->HasW()) {
- VERIFY(instr->Bits(5, 4) == 0x1);
- if (instr->Bit(22) == 0x1) {
- Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
+ if (instr->Bit(4) == 0) {
+ Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+ } else {
+ if (instr->Bit(5) == 0) {
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(6) == 0) {
+ Format(instr, "pkhbt'cond 'rd, 'rn, 'rm, lsl #'imm05@07");
+ } else {
+ if (instr->Bits(11, 7) == 0) {
+ Format(instr, "pkhtb'cond 'rd, 'rn, 'rm, asr #32");
+ } else {
+ Format(instr, "pkhtb'cond 'rd, 'rn, 'rm, asr #'imm05@07");
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case 1:
+ UNREACHABLE();
+ break;
+ case 2:
+ UNREACHABLE();
+ break;
+ case 3:
+ Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
+ break;
+ }
} else {
- UNREACHABLE(); // SSAT.
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ UNREACHABLE();
+ break;
+ case 1:
+ UNREACHABLE();
+ break;
+ case 2:
+ if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #0");
+ break;
+ case 1:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case 3:
+ if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #0");
+ break;
+ case 1:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #0");
+ break;
+ case 1:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
}
- } else {
- Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
}
break;
}
@@ -1093,11 +1232,14 @@ int Decoder::DecodeType7(Instruction* instr) {
// vmov: Rt = Sn
// vcvt: Dd = Sm
// vcvt: Sd = Dm
+// vcvt.f64.s32 Dd, Dd, #<fbits>
// Dd = vabs(Dm)
// Dd = vneg(Dm)
// Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm)
+// Dd = vmla(Dn, Dm)
+// Dd = vmls(Dn, Dm)
// Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm)
// vmrs
@@ -1113,20 +1255,27 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
// vmov register to register.
if (instr->SzValue() == 0x1) {
- Format(instr, "vmov.f64'cond 'Dd, 'Dm");
+ Format(instr, "vmov'cond.f64 'Dd, 'Dm");
} else {
- Format(instr, "vmov.f32'cond 'Sd, 'Sm");
+ Format(instr, "vmov'cond.f32 'Sd, 'Sm");
}
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs
- Format(instr, "vabs.f64'cond 'Dd, 'Dm");
+ Format(instr, "vabs'cond.f64 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
- Format(instr, "vneg.f64'cond 'Dd, 'Dm");
+ Format(instr, "vneg'cond.f64 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
+ } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) &&
+ (instr->Bit(8) == 1)) {
+ // vcvt.f64.s32 Dd, Dd, #<fbits>
+ int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0));
+ Format(instr, "vcvt'cond.f64.s32 'Dd, 'Dd");
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", #%d", fraction_bits);
} else if (((instr->Opc2Value() >> 1) == 0x6) &&
(instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
@@ -1134,10 +1283,10 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
(instr->Opc3Value() & 0x1)) {
DecodeVCMP(instr);
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
- Format(instr, "vsqrt.f64'cond 'Dd, 'Dm");
+ Format(instr, "vsqrt'cond.f64 'Dd, 'Dm");
} else if (instr->Opc3Value() == 0x0) {
if (instr->SzValue() == 0x1) {
- Format(instr, "vmov.f64'cond 'Dd, 'd");
+ Format(instr, "vmov'cond.f64 'Dd, 'd");
} else {
Unknown(instr); // Not used by V8.
}
@@ -1147,22 +1296,34 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
} else if (instr->Opc1Value() == 0x3) {
if (instr->SzValue() == 0x1) {
if (instr->Opc3Value() & 0x1) {
- Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
+ Format(instr, "vsub'cond.f64 'Dd, 'Dn, 'Dm");
} else {
- Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
+ Format(instr, "vadd'cond.f64 'Dd, 'Dn, 'Dm");
}
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
- Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
+ Format(instr, "vmul'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmla'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if ((instr->Opc1Value() == 0x0) && (instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmls'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
- Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
+ Format(instr, "vdiv'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
@@ -1173,6 +1334,22 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+ } else if ((instr->VLValue() == 0x0) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ if (instr->Bit(21) == 0x0) {
+ Format(instr, "vmov'cond.32 'Dd[0], 'rt");
+ } else {
+ Format(instr, "vmov'cond.32 'Dd[1], 'rt");
+ }
+ } else if ((instr->VLValue() == 0x1) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ if (instr->Bit(21) == 0x0) {
+ Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
+ } else {
+ Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
+ }
} else if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
@@ -1220,9 +1397,9 @@ void Decoder::DecodeVCMP(Instruction* instr) {
if (dp_operation && !raise_exception_for_qnan) {
if (instr->Opc2Value() == 0x4) {
- Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
+ Format(instr, "vcmp'cond.f64 'Dd, 'Dm");
} else if (instr->Opc2Value() == 0x5) {
- Format(instr, "vcmp.f64'cond 'Dd, #0.0");
+ Format(instr, "vcmp'cond.f64 'Dd, #0.0");
} else {
Unknown(instr); // invalid
}
@@ -1239,9 +1416,9 @@ void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
bool double_to_single = (instr->SzValue() == 1);
if (double_to_single) {
- Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm");
+ Format(instr, "vcvt'cond.f32.f64 'Sd, 'Dm");
} else {
- Format(instr, "vcvt.f64.f32'cond 'Dd, 'Sm");
+ Format(instr, "vcvt'cond.f64.f32 'Dd, 'Sm");
}
}
@@ -1258,15 +1435,15 @@ void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
if (dp_operation) {
if (unsigned_integer) {
- Format(instr, "vcvt.u32.f64'cond 'Sd, 'Dm");
+ Format(instr, "vcvt'cond.u32.f64 'Sd, 'Dm");
} else {
- Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
+ Format(instr, "vcvt'cond.s32.f64 'Sd, 'Dm");
}
} else {
if (unsigned_integer) {
- Format(instr, "vcvt.u32.f32'cond 'Sd, 'Sm");
+ Format(instr, "vcvt'cond.u32.f32 'Sd, 'Sm");
} else {
- Format(instr, "vcvt.s32.f32'cond 'Sd, 'Sm");
+ Format(instr, "vcvt'cond.s32.f32 'Sd, 'Sm");
}
}
} else {
@@ -1274,15 +1451,15 @@ void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
if (dp_operation) {
if (unsigned_integer) {
- Format(instr, "vcvt.f64.u32'cond 'Dd, 'Sm");
+ Format(instr, "vcvt'cond.f64.u32 'Dd, 'Sm");
} else {
- Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
+ Format(instr, "vcvt'cond.f64.s32 'Dd, 'Sm");
}
} else {
if (unsigned_integer) {
- Format(instr, "vcvt.f32.u32'cond 'Sd, 'Sm");
+ Format(instr, "vcvt'cond.f32.u32 'Sd, 'Sm");
} else {
- Format(instr, "vcvt.f32.s32'cond 'Sd, 'Sm");
+ Format(instr, "vcvt'cond.f32.s32 'Sd, 'Sm");
}
}
}
@@ -1336,7 +1513,7 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
- if (instr->Bits(7, 4) != 0x1) {
+ if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
Unknown(instr); // Not used by V8.
} else if (instr->HasL()) {
Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
@@ -1345,6 +1522,7 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
}
break;
case 0x8:
+ case 0xA:
if (instr->HasL()) {
Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]");
} else {
@@ -1352,6 +1530,7 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
}
break;
case 0xC:
+ case 0xE:
if (instr->HasL()) {
Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]");
} else {
@@ -1360,7 +1539,10 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
break;
case 0x4:
case 0x5:
- case 0x9: {
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB: {
bool to_vfp_register = (instr->VLValue() == 0x1);
if (to_vfp_register) {
Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}");
@@ -1377,6 +1559,91 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
}
}
+
+void Decoder::DecodeSpecialCondition(Instruction* instr) {
+ switch (instr->SpecialValue()) {
+ case 5:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl signed
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 7:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl unsigned
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 8:
+ if (instr->Bits(21, 20) == 0) {
+ // vst1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int size = instr->Bits(7, 6);
+ int align = instr->Bits(5, 4);
+ int Rm = instr->VmValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vst1.%d ", (1 << size) << 3);
+ FormatNeonList(Vd, type);
+ Print(", ");
+ FormatNeonMemory(Rn, align, Rm);
+ } else if (instr->Bits(21, 20) == 2) {
+ // vld1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int size = instr->Bits(7, 6);
+ int align = instr->Bits(5, 4);
+ int Rm = instr->VmValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vld1.%d ", (1 << size) << 3);
+ FormatNeonList(Vd, type);
+ Print(", ");
+ FormatNeonMemory(Rn, align, Rm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 0xA:
+ case 0xB:
+ if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
+ int Rn = instr->Bits(19, 16);
+ int offset = instr->Bits(11, 0);
+ if (offset == 0) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d]", Rn);
+ } else if (instr->Bit(23) == 0) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d, #-%d]", Rn, offset);
+ } else {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d, #+%d]", Rn, offset);
+ }
+ } else {
+ Unknown(instr);
+ }
+ break;
+ default:
+ Unknown(instr);
+ break;
+ }
+}
+
#undef VERIFIY
bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
@@ -1388,7 +1655,7 @@ bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
if (IsConstantPoolAt(instr_ptr)) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
- return instruction_bits & kConstantPoolLengthMask;
+ return DecodeConstantPoolLength(instruction_bits);
} else {
return -1;
}
@@ -1403,15 +1670,14 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
"%08x ",
instr->InstructionBits());
if (instr->ConditionField() == kSpecialCondition) {
- Unknown(instr);
+ DecodeSpecialCondition(instr);
return Instruction::kInstrSize;
}
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"constant pool begin (length %d)",
- instruction_bits &
- kConstantPoolLengthMask);
+ DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
}
switch (instr->TypeValue()) {
@@ -1526,8 +1792,9 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p %08x %s\n",
- prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ v8::internal::PrintF(
+ f, "%p %08x %s\n",
+ prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index a805d280c..b2071807d 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -27,17 +27,25 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
-#include "frames-inl.h"
+#include "assembler.h"
+#include "assembler-arm.h"
+#include "assembler-arm-inl.h"
+#include "frames.h"
+#include "macro-assembler.h"
+#include "macro-assembler-arm.h"
namespace v8 {
namespace internal {
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+
+
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index a10acd068..64a718e89 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -64,7 +64,7 @@ const RegList kCalleeSaved =
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
- 1 << 7 | // r7 v4
+ 1 << 7 | // r7 v4 (pp in JavaScript code)
1 << 8 | // r8 v5 (cp in JavaScript code)
kR9Available << 9 | // r9 v6
1 << 10 | // r10 v7
@@ -100,18 +100,6 @@ const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
// ----------------------------------------------------
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -3 * kPointerSize;
@@ -134,20 +122,6 @@ class ExitFrameConstants : public AllStatic {
};
-class StandardFrameConstants : public AllStatic {
- public:
- // Fixed part of the frame consists of return address, caller fp,
- // context and function.
- static const int kFixedFrameSize = 4 * kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = 1 * kPointerSize;
- static const int kCallerSPOffset = 2 * kPointerSize;
-};
-
-
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
@@ -163,14 +137,30 @@ class JavaScriptFrameConstants : public AllStatic {
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
+ // FP-relative.
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kImplicitReceiverOffset = -6 * kPointerSize;
+ static const int kConstructorOffset = -5 * kPointerSize;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+};
+
+
class InternalFrameConstants : public AllStatic {
public:
+ // FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};
@@ -181,6 +171,11 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
+inline void StackHandler::SetFp(Address slot, Address fp) {
+ Memory::Address_at(slot) = fp;
+}
+
+
} } // namespace v8::internal
#endif // V8_ARM_FRAMES_ARM_H_
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index be8228377..c57c78559 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "code-stubs.h"
#include "codegen.h"
@@ -129,8 +129,8 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -138,7 +138,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
@@ -148,13 +148,10 @@ void FullCodeGenerator::Generate() {
// receiver object). r5 is zero for method calls and non-zero for
// function calls.
if (!info->is_classic_mode() || info->is_native()) {
- Label ok;
- __ cmp(r5, Operand(0));
- __ b(eq, &ok);
+ __ cmp(r5, Operand::Zero());
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
+ __ str(r2, MemOperand(sp, receiver_offset), ne);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -162,20 +159,19 @@ void FullCodeGenerator::Generate() {
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- int locals_count = info->scope()->num_stack_slots();
-
- __ Push(lr, fp, cp, r1);
- if (locals_count > 0) {
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
- // Adjust fp to point to caller's fp.
- __ add(fp, sp, Operand(2 * kPointerSize));
+ info->set_prologue_offset(masm_->pc_offset());
+ __ Prologue(BUILD_FUNCTION_FRAME);
+ info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
- for (int i = 0; i < locals_count; i++) {
- __ push(ip);
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
+ if (locals_count > 0) {
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < locals_count; i++) {
+ __ push(ip);
+ }
}
}
@@ -287,9 +283,8 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
- PredictableCodeSizeScope predictable(masm_);
- StackCheckStub stub;
- __ CallStub(&stub);
+ PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -308,7 +303,7 @@ void FullCodeGenerator::Generate() {
EmitReturnSequence();
// Force emit the constant pool, so it doesn't get emitted in the middle
- // of the stack check table.
+ // of the back edge table.
masm()->CheckConstPool(true, false);
}
@@ -320,9 +315,9 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ mov(r2, Operand(profiling_counter_));
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
__ sub(r3, r3, Operand(Smi::FromInt(delta)), SetCC);
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
}
@@ -338,46 +333,34 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
__ mov(r2, Operand(profiling_counter_));
__ mov(r3, Operand(Smi::FromInt(reset_value)));
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Stack check");
- // Block literal pools whilst emitting stack check code.
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
+ // Block literal pools whilst emitting back edge code.
Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ b(pl, &ok);
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- PredictableCodeSizeScope predictable(masm_);
- StackCheckStub stub;
- __ CallStub(&stub);
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ __ b(pl, &ok);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
+ RecordBackEdge(stmt->OsrEntryId());
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -408,7 +391,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ Max(1, distance / kCodeSizeMultiplier));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -419,8 +402,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(r2);
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(r0);
EmitProfilingCounterReset();
@@ -439,12 +422,15 @@ void FullCodeGenerator::EmitReturnSequence() {
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
- PredictableCodeSizeScope predictable(masm_);
+ // TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
+ PredictableCodeSizeScope predictable(masm_, -1);
__ RecordJSReturn();
masm_->mov(sp, fp);
+ int no_frame_start = masm_->pc_offset();
masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
masm_->add(sp, sp, Operand(sp_delta));
masm_->Jump(lr);
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
#ifdef DEBUG
@@ -679,8 +665,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- ToBooleanStub stub(result_register());
- __ CallStub(&stub);
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
__ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@@ -786,9 +772,9 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// Check that we're not inside a with or catch context.
__ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ CompareRoot(r1, Heap::kWithContextMapRootIndex);
- __ Check(ne, "Declaration in with context.");
+ __ Check(ne, kDeclarationInWithContext);
__ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
- __ Check(ne, "Declaration in catch context.");
+ __ Check(ne, kDeclarationInCatchContext);
}
}
@@ -914,34 +900,33 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- Handle<JSModule> instance = declaration->module()->interface()->Instance();
- ASSERT(!instance.is_null());
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name(), zone());
- globals_->Add(instance, zone());
- Visit(declaration->module());
- break;
- }
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ mov(r1, Operand(instance));
- __ str(r1, ContextOperand(cp, variable->index()));
- Visit(declaration->module());
- break;
- }
+ // Load instance object.
+ __ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ ldr(r1, ContextOperand(r1, variable->interface()->Index()));
+ __ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX));
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
+ // Assign it.
+ __ str(r1, ContextOperand(cp, variable->index()));
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ Context::SlotOffset(variable->index()),
+ r1,
+ r3,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
}
@@ -984,6 +969,14 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1033,11 +1026,11 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target());
@@ -1075,9 +1068,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
@@ -1159,14 +1151,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+ Handle<Cell> cell = isolate()->factory()->NewCell(
+ Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(r1, cell);
+ __ Move(r1, cell);
__ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ str(r2, FieldMemOperand(r1, JSGlobalPropertyCell::kValueOffset));
+ __ str(r2, FieldMemOperand(r1, Cell::kValueOffset));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1191,7 +1182,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the current entry of the array into register r3.
__ ldr(r2, MemOperand(sp, 2 * kPointerSize));
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r3, MemOperand::PointerAddressFromSmiKey(r2, r0));
// Get the expected map from the stack or a smi in the
// permanent slow case into register r2.
@@ -1238,7 +1229,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0);
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ b(&loop);
// Remove the pointers stored on the stack.
@@ -1252,6 +1243,65 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ b(eq, loop_statement.break_label());
+ __ CompareRoot(r0, Heap::kNullValueRootIndex);
+ __ b(eq, loop_statement.break_label());
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(r0, &convert);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &done_convert);
+ __ bind(&convert);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+ __ push(r0);
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
@@ -1265,9 +1315,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
- __ mov(r0, Operand(info));
- __ push(r0);
+ FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ __ mov(r2, Operand(info));
__ CallStub(&stub);
} else {
__ mov(r0, Operand(info));
@@ -1391,9 +1440,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST ||
- local->mode() == CONST_HARMONY ||
- local->mode() == LET) {
+ if (local->mode() == LET ||
+ local->mode() == CONST ||
+ local->mode() == CONST_HARMONY) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
if (local->mode() == CONST) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
@@ -1544,7 +1593,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
@@ -1559,7 +1608,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// r0: Newly allocated regexp.
// r5: Materialized regexp.
// r2: temp.
- __ CopyFields(r0, r5, r2.bit(), size / kPointerSize);
+ __ CopyFields(r0, r5, d0, size / kPointerSize);
context()->Plug(r0);
}
@@ -1577,7 +1626,7 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_properties));
@@ -1588,13 +1637,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
- __ Push(r3, r2, r1, r0);
int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (flags != ObjectLiteral::kFastElements ||
+ if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ Push(r3, r2, r1, r0);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@@ -1627,10 +1676,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
+ if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- __ mov(r2, Operand(key->handle()));
+ __ mov(r2, Operand(key->value()));
__ ldr(r1, MemOperand(sp));
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -1642,8 +1691,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
}
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
// Duplicate receiver on stack.
__ ldr(r0, MemOperand(sp));
__ push(r0);
@@ -1657,6 +1704,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Drop(3);
}
break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ ldr(r0, MemOperand(sp));
+ __ push(r0);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ CallRuntime(Runtime::kSetPrototype, 2);
+ } else {
+ __ Drop(2);
+ }
+ break;
+
case ObjectLiteral::Property::GETTER:
accessor_table.lookup(key)->second->getter = value;
break;
@@ -1713,25 +1772,36 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_elements));
- __ Push(r3, r2, r1);
if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ DONT_TRACK_ALLOCATION_SITE,
+ length);
__ CallStub(&stub);
__ IncrementCounter(
isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
} else if (expr->depth() > 1) {
+ __ Push(r3, r2, r1);
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ } else if (Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ Push(r3, r2, r1);
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode = has_fast_elements
- ? FastCloneShallowArrayStub::CLONE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+
+ if (has_fast_elements) {
+ mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
__ CallStub(&stub);
}
@@ -1743,20 +1813,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
__ push(r0);
+ __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
if (IsFastObjectElementsKind(constant_elements_kind)) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ ldr(r6, MemOperand(sp)); // Copy of array literal.
+ __ ldr(r6, MemOperand(sp, kPointerSize)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
__ str(result_register(), FieldMemOperand(r1, offset));
// Update the write barrier for the array store.
@@ -1764,10 +1832,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
- __ ldr(r1, MemOperand(sp)); // Copy of array literal.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
__ mov(r3, Operand(Smi::FromInt(i)));
- __ mov(r4, Operand(Smi::FromInt(expr->literal_index())));
StoreArrayLiteralElementStub stub;
__ CallStub(&stub);
}
@@ -1776,6 +1841,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
+ __ pop(); // literal index
context()->PlugTOS();
} else {
context()->Plug(r0);
@@ -1896,10 +1962,287 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ switch (expr->yield_kind()) {
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+
+ __ bind(&continuation);
+ __ jmp(&resume);
+
+ __ bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ mov(r1, Operand(Smi::FromInt(continuation.pos())));
+ __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
+ __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
+ __ mov(r1, cp);
+ __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ add(r1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ cmp(sp, r1);
+ __ b(eq, &post_runtime);
+ __ push(r0); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ __ pop(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::FINAL: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
+ __ str(r1, FieldMemOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset));
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
+ // Initial send value is undefined.
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ b(&l_next);
+
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+ __ bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw"
+ __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ push(r3); // iter
+ __ push(r0); // exception
+ __ jmp(&l_call);
+
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
+ __ bind(&l_try);
+ __ pop(r0); // result
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ push(r0); // result
+ __ jmp(&l_suspend);
+ __ bind(&l_continuation);
+ __ jmp(&l_resume);
+ __ bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ ldr(r0, MemOperand(sp, generator_object_depth));
+ __ push(r0); // g
+ ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+ __ mov(r1, Operand(Smi::FromInt(l_continuation.pos())));
+ __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
+ __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
+ __ mov(r1, cp);
+ __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ pop(r0); // result
+ EmitReturnSequence();
+ __ bind(&l_resume); // received in r0
+ __ PopTryHandler();
+
+ // receiver = iter; f = 'next'; arg = received;
+ __ bind(&l_next);
+ __ LoadRoot(r2, Heap::knext_stringRootIndex); // "next"
+ __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ push(r3); // iter
+ __ push(r0); // received
+
+ // result = receiver[f](arg);
+ __ bind(&l_call);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
+ CallIC(ic);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // if (!result.done) goto l_try;
+ __ bind(&l_loop);
+ __ push(r0); // save result
+ __ LoadRoot(r2, Heap::kdone_stringRootIndex); // "done"
+ Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(done_ic); // result.done in r0
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ cmp(r0, Operand(0));
+ __ b(eq, &l_try);
+
+ // result.value
+ __ pop(r0); // result
+ __ LoadRoot(r2, Heap::kvalue_stringRootIndex); // "value"
+ Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(value_ic); // result.value in r0
+ context()->DropAndPlug(2, r0); // drop iter and g
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
+ Expression *value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ // The value stays in r0, and is ultimately read by the resumed generator, as
+ // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. r1
+ // will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ __ pop(r1);
+
+ // Check generator state.
+ Label wrong_state, done;
+ __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
+ __ cmp(r3, Operand(Smi::FromInt(0)));
+ __ b(le, &wrong_state);
+
+ // Load suspended function and context.
+ __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
+ __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+
+ // Load receiver and store as the first argument.
+ __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
+ __ push(r2);
+
+ // Push holes for the rest of the arguments to the generator function.
+ __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r3,
+ FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
+ Label push_argument_holes, push_frame;
+ __ bind(&push_argument_holes);
+ __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
+ __ b(mi, &push_frame);
+ __ push(r2);
+ __ jmp(&push_argument_holes);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame;
+ __ bind(&push_frame);
+ __ bl(&resume_frame);
+ __ jmp(&done);
+ __ bind(&resume_frame);
+ __ push(lr); // Return address.
+ __ push(fp); // Caller's frame pointer.
+ __ mov(fp, sp);
+ __ push(cp); // Callee's context.
+ __ push(r4); // Callee's JS Function.
+
+ // Load the operand stack size.
+ __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
+ __ ldr(r3, FieldMemOperand(r3, FixedArray::kLengthOffset));
+ __ SmiUntag(r3);
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ cmp(r3, Operand(0));
+ __ b(ne, &slow_resume);
+ __ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+ __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(r2);
+ __ add(r3, r3, r2);
+ __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ Jump(r3);
+ __ bind(&slow_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ Label push_operand_holes, call_resume;
+ __ bind(&push_operand_holes);
+ __ sub(r3, r3, Operand(1), SetCC);
+ __ b(mi, &call_resume);
+ __ push(r2);
+ __ b(&push_operand_holes);
+ __ bind(&call_resume);
+ __ push(r1);
+ __ push(result_register());
+ __ Push(Smi::FromInt(resume_mode));
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ // Not reached: the runtime call returns elsewhere.
+ __ stop("not-reached");
+
+ // Throw error if we attempt to operate on a running generator.
+ __ bind(&wrong_state);
+ __ push(r1);
+ __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+
+ __ bind(&done);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ __ Allocate(map->instance_size(), r0, r2, r3, &gc_required, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ ldr(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&allocated);
+ __ mov(r1, Operand(map));
+ __ pop(r2);
+ __ mov(r3, Operand(isolate()->factory()->ToBoolean(done)));
+ __ mov(r4, Operand(isolate()->factory()->empty_fixed_array()));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ str(r2,
+ FieldMemOperand(r0, JSGeneratorObject::kResultValuePropertyOffset));
+ __ str(r3,
+ FieldMemOperand(r0, JSGeneratorObject::kResultDonePropertyOffset));
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(r0, JSGeneratorObject::kResultValuePropertyOffset,
+ r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
- __ mov(r2, Operand(key->handle()));
+ __ mov(r2, Operand(key->value()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
@@ -1937,7 +2280,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -1948,23 +2291,18 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// BinaryOpStub::GenerateSmiSmiOperation for comments.
switch (op) {
case Token::SAR:
- __ b(&stub_call);
__ GetLeastBitsFromSmi(scratch1, right, 5);
__ mov(right, Operand(left, ASR, scratch1));
__ bic(right, right, Operand(kSmiTagMask));
break;
case Token::SHL: {
- __ b(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSL, scratch2));
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- __ b(mi, &stub_call);
- __ SmiTag(right, scratch1);
+ __ TrySmiTag(right, scratch1, &stub_call);
break;
}
case Token::SHR: {
- __ b(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSR, scratch2));
@@ -1989,7 +2327,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ mov(ip, Operand(scratch1, ASR, 31));
__ cmp(ip, Operand(scratch2));
__ b(ne, &stub_call);
- __ cmp(scratch1, Operand(0));
+ __ cmp(scratch1, Operand::Zero());
__ mov(right, Operand(scratch1), LeaveCC, ne);
__ b(ne, &done);
__ add(scratch2, right, Operand(left), SetCC);
@@ -2021,7 +2359,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(r1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(r0);
@@ -2029,7 +2367,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten to have a 'throw
+ // Invalid left-hand sides are rewritten by the parser to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
VisitForEffect(expr);
@@ -2059,7 +2397,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->obj());
__ mov(r1, r0);
__ pop(r0); // Restore value.
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2157,7 +2495,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Check for an uninitialized let binding.
__ ldr(r2, location);
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
- __ Check(eq, "Let binding re-initialization.");
+ __ Check(eq, kLetBindingReInitialization);
}
// Perform the assignment.
__ str(r0, location);
@@ -2188,7 +2526,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
Handle<Code> ic = is_classic_mode()
@@ -2321,14 +2659,13 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
__ mov(r2, Operand(cell));
CallFunctionStub stub(arg_count, flags);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2374,7 +2711,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
@@ -2463,7 +2800,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
if (property->key()->IsPropertyName()) {
EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
+ property->key()->AsLiteral()->value(),
RelocInfo::CODE_TARGET);
} else {
EmitKeyedCallWithIC(expr, property->key());
@@ -2517,13 +2854,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
__ mov(r2, Operand(cell));
CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(r0);
}
@@ -2543,7 +2879,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ tst(r0, Operand(kSmiTagMask));
+ __ SmiTst(r0);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2564,7 +2900,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ tst(r0, Operand(kSmiTagMask | 0x80000000));
+ __ NonNegativeSmiTst(r0);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2657,7 +2993,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@@ -2669,7 +3005,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
__ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, if_true);
+ __ b(ne, &skip_lookup);
// Check for fast case object. Generate false result for slow case object.
__ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
@@ -2678,35 +3014,32 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ cmp(r2, ip);
__ b(eq, if_false);
- // Look for valueOf symbol in the descriptor array, and indicate false if
+ // Look for valueOf name in the descriptor array, and indicate false if
// found. Since we omit an enumeration index check, if it is added via a
// transition that shares its descriptor array, this is a false positive.
Label entry, loop, done;
// Skip loop if no descriptors are valid.
__ NumberOfOwnDescriptors(r3, r1);
- __ cmp(r3, Operand(0));
+ __ cmp(r3, Operand::Zero());
__ b(eq, &done);
__ LoadInstanceDescriptors(r1, r4);
// r4: descriptor array.
// r3: valid entries in the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
__ mov(ip, Operand(DescriptorArray::kDescriptorSize));
__ mul(r3, r3, ip);
// Calculate location of the first key name.
__ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
// Calculate the end of the descriptor array.
__ mov(r2, r4);
- __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
// Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- // The use of ip to store the valueOf symbol asumes that it is not otherwise
+ // string "valueOf" the result is false.
+ // The use of ip to store the valueOf string assumes that it is not otherwise
// used in the loop below.
- __ mov(ip, Operand(FACTORY->value_of_symbol()));
+ __ mov(ip, Operand(isolate()->factory()->value_of_string()));
__ jmp(&entry);
__ bind(&loop);
__ ldr(r3, MemOperand(r4, 0));
@@ -2718,6 +3051,14 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ b(ne, &loop);
__ bind(&done);
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+ __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+
+ __ bind(&skip_lookup);
+
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
@@ -2727,16 +3068,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
__ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
__ cmp(r2, r3);
- __ b(ne, if_false);
-
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ jmp(if_true);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
context()->Plug(if_true, if_false);
}
@@ -2941,12 +3275,12 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex);
+ __ LoadRoot(r0, Heap::kfunction_class_stringRootIndex);
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ LoadRoot(r0, Heap::kObject_symbolRootIndex);
+ __ LoadRoot(r0, Heap::kObject_stringRootIndex);
__ jmp(&done);
// Non-JS objects have class null.
@@ -2970,7 +3304,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
@@ -3001,37 +3335,26 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(VFP2)) {
- __ PrepareCallCFunction(1, r0);
- __ ldr(r0,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- CpuFeatures::Scope scope(VFP2);
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
- // Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand(0, RelocInfo::NONE));
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
- __ sub(r0, r4, Operand(kHeapObjectTag));
- __ vstr(d7, r0, HeapNumber::kValueOffset);
- __ mov(r0, r4);
- } else {
- __ PrepareCallCFunction(2, r0);
- __ ldr(r1,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ mov(r0, Operand(r4));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
- __ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
- }
+ __ PrepareCallCFunction(1, r0);
+ __ ldr(r0,
+ ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+
+ // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+ // Create this constant using mov/orr to avoid PC relative load.
+ __ mov(r1, Operand(0x41000000));
+ __ orr(r1, r1, Operand(0x300000));
+ // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
+ __ vmov(d7, r0, r1);
+ // Move 0x4130000000000000 to VFP.
+ __ mov(r0, Operand::Zero());
+ __ vmov(d8, r0, r1);
+ // Subtract and store the result in the heap number.
+ __ vsub(d7, d7, d8);
+ __ sub(r0, r4, Operand(kHeapObjectTag));
+ __ vstr(d7, r0, HeapNumber::kValueOffset);
+ __ mov(r0, r4);
context()->Plug(r0);
}
@@ -3086,7 +3409,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -3129,18 +3452,97 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ __ SmiTst(index);
+ __ Check(eq, kNonSmiIndex);
+ __ SmiTst(value);
+ __ Check(eq, kNonSmiValue);
+
+ __ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
+ __ cmp(index, ip);
+ __ Check(lt, kIndexIsTooLarge);
+
+ __ cmp(index, Operand(Smi::FromInt(0)));
+ __ Check(ge, kIndexIsNegative);
+
+ __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+
+ __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+ __ cmp(ip, Operand(encoding_mask));
+ __ Check(eq, kUnexpectedStringType);
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = r0;
+ Register index = r1;
+ Register value = r2;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(value);
+ __ pop(index);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
+ __ SmiUntag(value, value);
+ __ add(ip,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ strb(value, MemOperand(ip, index, LSR, kSmiTagSize));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = r0;
+ Register index = r1;
+ Register value = r2;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(value);
+ __ pop(index);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ if (FLAG_debug_code) {
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ }
+
+ __ SmiUntag(value, value);
+ __ add(ip,
+ string,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ strh(value, MemOperand(ip, index));
+ context()->Plug(string);
+}
+
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- if (CpuFeatures::IsSupported(VFP2)) {
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kMath_pow, 2);
- }
+ MathPowStub stub(MathPowStub::ON_STACK);
+ __ CallStub(&stub);
context()->Plug(r0);
}
@@ -3176,8 +3578,8 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into r0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
@@ -3278,7 +3680,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
__ jmp(&done);
__ bind(&need_conversion);
@@ -3301,7 +3703,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringAddStub stub(NO_STRING_ADD_FLAGS);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH);
__ CallStub(&stub);
context()->Plug(r0);
}
@@ -3426,12 +3828,12 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
+ __ Abort(kAttemptToUseUndefinedCache);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
context()->Plug(r0);
return;
@@ -3450,12 +3852,11 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Label done, not_found;
// tmp now holds finger offset as a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
// r2 now holds finger offset as a smi.
__ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// r3 now points to the start of fixed array elements.
- __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
+ __ ldr(r2, MemOperand::PointerAddressFromSmiKey(r3, r2, PreIndex));
// Note side effect of PreIndex: r3 now points to the key of the pair.
__ cmp(key, r2);
__ b(ne, &not_found);
@@ -3549,9 +3950,8 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- empty_separator_loop, one_char_separator_loop,
+ Label bailout, done, one_char_separator, long_separator, non_trivial_array,
+ not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop;
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
@@ -3569,25 +3969,24 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Register string = r4;
Register element = r5;
Register elements_end = r6;
- Register scratch1 = r7;
- Register scratch2 = r9;
+ Register scratch = r9;
// Separator operand is on the stack.
__ pop(separator);
// Check that the array is a JSArray.
__ JumpIfSmi(array, &bailout);
- __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
+ __ CompareObjectType(array, scratch, array_length, JS_ARRAY_TYPE);
__ b(ne, &bailout);
// Check that the array has fast elements.
- __ CheckFastElements(scratch1, scratch2, &bailout);
+ __ CheckFastElements(scratch, array_length, &bailout);
// If the array has length zero, return the empty string.
__ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
__ SmiUntag(array_length, SetCC);
__ b(ne, &non_trivial_array);
- __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(r0, Heap::kempty_stringRootIndex);
__ b(&done);
__ bind(&non_trivial_array);
@@ -3599,7 +3998,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Check that all array elements are sequential ASCII strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
- __ mov(string_length, Operand(0));
+ __ mov(string_length, Operand::Zero());
__ add(element,
elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
@@ -3612,17 +4011,17 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// element: Current array element.
// elements_end: Array end.
if (generate_debug_code_) {
- __ cmp(array_length, Operand(0));
- __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
+ __ cmp(array_length, Operand::Zero());
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ JumpIfSmi(string, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
- __ add(string_length, string_length, Operand(scratch1), SetCC);
+ __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
+ __ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
+ __ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
__ b(lt, &loop);
@@ -3643,23 +4042,23 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Check that the separator is a flat ASCII string.
__ JumpIfSmi(separator, &bailout);
- __ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi
- __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
- __ sub(string_length, string_length, Operand(scratch1));
- __ smull(scratch2, ip, array_length, scratch1);
+ __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ sub(string_length, string_length, Operand(scratch));
+ __ smull(scratch, ip, array_length, scratch);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
- __ cmp(ip, Operand(0));
+ __ cmp(ip, Operand::Zero());
__ b(ne, &bailout);
- __ tst(scratch2, Operand(0x80000000));
+ __ tst(scratch, Operand(0x80000000));
__ b(ne, &bailout);
- __ add(string_length, string_length, Operand(scratch2), SetCC);
+ __ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ SmiUntag(string_length);
@@ -3676,9 +4075,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// array_length: Length of the array.
__ AllocateAsciiString(result,
string_length,
- scratch1,
- scratch2,
- elements_end,
+ scratch,
+ string, // used as scratch
+ elements_end, // used as scratch
&bailout);
// Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first
@@ -3688,11 +4087,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
array_length = no_reg;
__ add(result_pos,
result,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(1)));
+ __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ cmp(scratch, Operand(Smi::FromInt(1)));
__ b(eq, &one_char_separator);
__ b(gt, &long_separator);
@@ -3707,8 +4106,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ add(string,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &empty_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@@ -3717,7 +4118,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
- __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+ __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&one_char_separator_loop_entry);
@@ -3737,8 +4138,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ add(string,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &one_char_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@@ -3758,15 +4161,17 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiUntag(string_length);
__ add(string,
separator,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ bind(&long_separator);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ add(string,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &long_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@@ -3926,50 +4331,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- __ JumpIfSmi(result_register(), &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
default:
UNREACHABLE();
}
}
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- // TODO(svenpanne): Allowing format strings in Comment would be nice here...
- Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
- // UnaryOpStub expects the argument to be in the
- // accumulator register r0.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
@@ -4028,7 +4395,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call ToNumber only if operand is not a smi.
Label no_conversion;
- __ JumpIfSmi(r0, &no_conversion);
+ if (ShouldInlineSmiCase(expr->op())) {
+ __ JumpIfSmi(r0, &no_conversion);
+ }
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
@@ -4070,13 +4439,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
}
- __ mov(r1, Operand(Smi::FromInt(count_value)));
+ __ mov(r1, r0);
+ __ mov(r0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()),
+ RelocInfo::CODE_TARGET,
+ expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4103,7 +4475,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -4191,13 +4563,13 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_symbol())) {
+ if (check->Equals(isolate()->heap()->number_string())) {
__ JumpIfSmi(r0, if_true);
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r0, ip);
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ } else if (check->Equals(isolate()->heap()->string_string())) {
__ JumpIfSmi(r0, if_false);
// Check for undetectable objects => false.
__ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE);
@@ -4205,16 +4577,20 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ __ JumpIfSmi(r0, if_false);
+ __ CompareObjectType(r0, r0, r1, SYMBOL_TYPE);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->boolean_string())) {
__ CompareRoot(r0, Heap::kTrueValueRootIndex);
__ b(eq, if_true);
__ CompareRoot(r0, Heap::kFalseValueRootIndex);
Split(eq, if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_symbol())) {
+ check->Equals(isolate()->heap()->null_string())) {
__ CompareRoot(r0, Heap::kNullValueRootIndex);
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ } else if (check->Equals(isolate()->heap()->undefined_string())) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(eq, if_true);
__ JumpIfSmi(r0, if_false);
@@ -4224,14 +4600,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(ne, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ } else if (check->Equals(isolate()->heap()->function_string())) {
__ JumpIfSmi(r0, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
__ b(eq, if_true);
__ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ } else if (check->Equals(isolate()->heap()->object_string())) {
__ JumpIfSmi(r0, if_false);
if (!FLAG_harmony_typeof) {
__ CompareRoot(r0, Heap::kNullValueRootIndex);
@@ -4295,29 +4671,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cond = eq;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cond = eq;
- break;
- case Token::LT:
- cond = lt;
- break;
- case Token::GT:
- cond = gt;
- break;
- case Token::LTE:
- cond = le;
- break;
- case Token::GTE:
- cond = ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cond = CompareIC::ComputeCondition(op);
__ pop(r1);
bool inline_smi_code = ShouldInlineSmiCase(op);
@@ -4333,11 +4687,11 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
Split(cond, if_true, if_false, fall_through);
}
}
@@ -4360,28 +4714,18 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(r1, nil_value);
- __ cmp(r0, r1);
if (expr->op() == Token::EQ_STRICT) {
- Split(eq, if_true, if_false, fall_through);
- } else {
- Heap::RootListIndex other_nil_value = nil == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- __ b(eq, if_true);
- __ LoadRoot(r1, other_nil_value);
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ LoadRoot(r1, nil_value);
__ cmp(r0, r1);
- __ b(eq, if_true);
- __ JumpIfSmi(r0, if_false);
- // It can be an undetectable object.
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
- __ cmp(r1, Operand(1 << Map::kIsUndetectable));
Split(eq, if_true, if_false, fall_through);
+ } else {
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ __ cmp(r0, Operand(0));
+ Split(ne, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -4445,9 +4789,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
__ push(result_register());
// Cook return address in link register to stack (smi encoded Code* delta)
__ sub(r1, lr, Operand(masm_->CodeObject()));
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r1, r1, Operand(r1)); // Convert to smi.
+ __ SmiTag(r1);
// Store result register while executing finally block.
__ push(r1);
@@ -4501,8 +4843,7 @@ void FullCodeGenerator::ExitFinallyBlock() {
// Uncook return address and return.
__ pop(result_register());
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
+ __ SmiUntag(r1);
__ add(pc, r1, Operand(masm_->CodeObject()));
}
@@ -4537,6 +4878,91 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
+
+static const int32_t kBranchBeforeInterrupt = 0x5a000004;
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ Address branch_address = pc - 3 * kInstrSize;
+ CodePatcher patcher(branch_address, 1);
+
+ switch (target_state) {
+ case INTERRUPT:
+ // <decrement profiling counter>
+ // 2a 00 00 01 bpl ok
+ // e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
+ // e1 2f ff 3c blx ip
+ // ok-label
+ patcher.masm()->b(4 * kInstrSize, pl); // Jump offset is 4 instructions.
+ ASSERT_EQ(kBranchBeforeInterrupt, Memory::int32_at(branch_address));
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // <decrement profiling counter>
+ // e1 a0 00 00 mov r0, r0 (NOP)
+ // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
+ // e1 2f ff 3c blx ip
+ // ok-label
+ patcher.masm()->nop();
+ break;
+ }
+
+ Address pc_immediate_load_address = pc - 2 * kInstrSize;
+ // Replace the call address.
+ uint32_t interrupt_address_offset =
+ Memory::uint16_at(pc_immediate_load_address) & 0xfff;
+ Address interrupt_address_pointer = pc + interrupt_address_offset;
+ Memory::uint32_at(interrupt_address_pointer) =
+ reinterpret_cast<uint32_t>(replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_immediate_load_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ ASSERT(Memory::int32_at(pc - kInstrSize) == kBlxIp);
+
+ Address branch_address = pc - 3 * kInstrSize;
+ Address pc_immediate_load_address = pc - 2 * kInstrSize;
+ uint32_t interrupt_address_offset =
+ Memory::uint16_at(pc_immediate_load_address) & 0xfff;
+ Address interrupt_address_pointer = pc + interrupt_address_offset;
+
+ if (Memory::int32_at(branch_address) == kBranchBeforeInterrupt) {
+ ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->InterruptCheck()->entry()));
+ ASSERT(Assembler::IsLdrPcImmediateOffset(
+ Assembler::instr_at(pc_immediate_load_address)));
+ return INTERRUPT;
+ }
+
+ ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address)));
+ ASSERT(Assembler::IsLdrPcImmediateOffset(
+ Assembler::instr_at(pc_immediate_load_address)));
+
+ if (Memory::uint32_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->OnStackReplacement()->entry())) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->OsrAfterStackCheck()->entry()));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 48395897d..aded4c1dd 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "assembler-arm.h"
#include "code-stubs.h"
@@ -64,12 +64,12 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register elements,
- Register t0,
- Register t1,
- Label* miss) {
+static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register t0,
+ Register t1,
+ Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// elements: holds the property dictionary on fall through.
@@ -131,19 +131,19 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
// If probing finds an entry check that the value is a normal
// property.
__ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
__ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
@@ -180,19 +180,19 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
// If probing finds an entry in the dictionary check that the value
// is a normal property that is not read only.
__ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask =
(PropertyDetails::TypeField::kMask |
@@ -213,53 +213,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
}
-void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss,
- support_wrappers);
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
@@ -337,10 +290,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
__ b(hs, out_of_range);
// Fast case: Do the load.
__ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(scratch2,
- MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch2, ip);
// In case the loaded value is the_hole we have to consult GetProperty
@@ -350,30 +300,36 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
}
-// Checks whether a key is an array index string or a symbol string.
-// Falls through if a key is a symbol.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_symbol) {
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_unique) {
// The key is not a smi.
- // Is it a string?
- __ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE);
- __ b(ge, not_symbol);
+ Label unique;
+ // Is it a name?
+ __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
+ __ b(hi, not_unique);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ b(eq, &unique);
// Is the string an array index, with cached numeric value?
- __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
- __ tst(hash, Operand(String::kContainsCachedArrayIndexMask));
+ __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
__ b(eq, index_string);
- // Is the string a symbol?
+ // Is the string internalized? We know it's a string, so a single
+ // bit test is enough.
// map: key map
__ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
- __ tst(hash, Operand(kIsSymbolMask));
- __ b(eq, not_symbol);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ tst(hash, Operand(kIsNotInternalizedMask));
+ __ b(ne, not_unique);
+
+ __ bind(&unique);
}
@@ -398,7 +354,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
extra_state,
Code::NORMAL,
argc);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
// If the stub cache probing failed, the receiver might be a value.
@@ -437,7 +393,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
__ bind(&miss);
@@ -474,7 +430,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
- GenerateStringDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
// r0: elements
// Search the dictionary - put result in register r1.
@@ -578,11 +534,11 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
+ Label check_number_dictionary, check_name, lookup_monomorphic_cache;
+ Label index_smi, index_name;
// Check that the key is a smi.
- __ JumpIfNotSmi(r2, &check_string);
+ __ JumpIfNotSmi(r2, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@@ -609,7 +565,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow_load);
- __ mov(r0, Operand(r2, ASR, kSmiTagSize));
+ __ SmiUntag(r0, r2);
// r0: untagged index
__ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
@@ -629,10 +585,10 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ mov(r1, r0);
__ jmp(&do_call);
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, r2, r0, r3, &index_string, &slow_call);
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, r2, r0, r3, &index_name, &slow_call);
- // The key is known to be a symbol.
+ // The key is known to be a unique name.
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
@@ -660,14 +616,14 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&slow_call);
// This branch is taken if:
// - the receiver requires boxing or access check,
- // - the key is neither smi nor symbol,
+ // - the key is neither smi nor a unique name,
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
__ IncrementCounter(counters->keyed_call_generic_slow(), 1, r0, r3);
GenerateMiss(masm, argc);
- __ bind(&index_string);
+ __ bind(&index_name);
__ IndexFromHash(r3, r2);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
@@ -680,10 +636,10 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -- lr : return address
// -----------------------------------
- // Check if the name is a string.
+ // Check if the name is really a name.
Label miss;
__ JumpIfSmi(r2, &miss);
- __ IsObjectJSStringType(r2, r0, &miss);
+ __ IsObjectNameType(r2, r0, &miss);
CallICBase::GenerateNormal(masm, argc);
__ bind(&miss);
@@ -691,21 +647,18 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
}
-// Defined in ic.cc.
-Object* LoadIC_Miss(Arguments args);
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
- // -- sp[0] : receiver
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
+ Code::NORMAL, Code::LOAD_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
@@ -718,11 +671,10 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
- // -- sp[0] : receiver
// -----------------------------------
Label miss;
- GenerateStringDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
// r1: elements
GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
@@ -739,7 +691,6 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
- // -- sp[0] : receiver
// -----------------------------------
Isolate* isolate = masm->isolate();
@@ -755,6 +706,20 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- r0 : receiver
+ // -----------------------------------
+
+ __ mov(r3, r0);
+ __ Push(r3, r2);
+
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register object,
Register key,
@@ -862,7 +827,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(r0, r2);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -891,7 +856,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -922,10 +887,7 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
}
-Object* KeyedLoadIC_Miss(Arguments args);
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@@ -938,7 +900,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
__ Push(r1, r0);
// Perform tail call to the entry.
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
: ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
@@ -965,7 +927,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
+ Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
Register key = r0;
@@ -974,7 +936,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
// Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_string);
+ __ JumpIfNotSmi(key, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@@ -1001,7 +963,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow);
- __ mov(r2, Operand(r0, ASR, kSmiTagSize));
+ __ SmiUntag(r2, r0);
__ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
__ Ret();
@@ -1011,8 +973,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
1, r2, r3);
GenerateRuntimeGetProperty(masm);
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, key, r2, r3, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(
masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
@@ -1026,15 +988,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ b(eq, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
+ // based on 32 bits of the map pointer and the name hash.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
- __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
- __ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
+ __ ldr(r4, FieldMemOperand(r0, Name::kHashFieldOffset));
+ __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift));
int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
__ And(r3, r3, Operand(mask));
- // Load the key (consisting of map and symbol) from the cache and
+ // Load the key (consisting of map and unique name) from the cache and
// check for match.
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
@@ -1051,13 +1013,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
__ cmp(r2, r5);
__ b(ne, &try_next_entry);
- __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load symbol
+ __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name
__ cmp(r0, r5);
__ b(eq, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
- // Last entry: Load map and move r4 to symbol.
+ // Last entry: Load map and move r4 to name.
__ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
__ cmp(r2, r5);
__ b(ne, &slow);
@@ -1119,11 +1081,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
// Load the property to r0.
GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
- 1, r2, r3);
+ __ IncrementCounter(
+ isolate->counters()->keyed_load_generic_symbol(), 1, r2, r3);
__ Ret();
- __ bind(&index_string);
+ __ bind(&index_name);
__ IndexFromHash(r3, key);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
@@ -1158,7 +1120,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -1174,7 +1136,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ JumpIfSmi(r1, &slow);
// Check that the key is an array index, that is Uint32.
- __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
+ __ NonNegativeSmiTst(r0);
__ b(ne, &slow);
// Get the map of the receiver.
@@ -1198,11 +1160,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1);
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1213,7 +1175,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
masm->isolate())
: ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@@ -1221,63 +1183,41 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
}
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
+ // -- r2 : key
+ // -- r1 : receiver
// -- lr : return address
// -----------------------------------
// Push receiver, key and value for runtime call.
- __ Push(r2, r1, r0);
+ __ Push(r1, r2, r0);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ---------- S t a t e --------------
+ // -- r0 : value
+ // -- r1 : key
// -- r2 : receiver
- // -- r3 : target map
// -- lr : return address
// -----------------------------------
- // Must return the modified receiver in r0.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
- __ mov(r0, r2);
- __ Ret();
- __ bind(&fail);
- }
- __ push(r2);
- __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
- MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r2 : receiver
- // -- r3 : target map
- // -- lr : return address
- // -----------------------------------
- // Must return the modified receiver in r0.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
- __ mov(r0, r2);
- __ Ret();
- __ bind(&fail);
- }
+ // Push receiver, key and value for runtime call.
+ __ Push(r2, r1, r0);
- __ push(r2);
- __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -1339,8 +1279,7 @@ static void KeyedStoreGenerateGenericHelper(
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(address));
+ __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
__ Ret();
__ bind(&non_smi_value);
@@ -1356,7 +1295,7 @@ static void KeyedStoreGenerateGenericHelper(
__ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
__ str(value, MemOperand(address));
// Update write barrier for the elements array address.
__ mov(scratch_value, value); // Preserve the value which is returned.
@@ -1377,14 +1316,7 @@ static void KeyedStoreGenerateGenericHelper(
__ b(ne, slow);
}
__ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
- receiver,
- elements, // Overwritten.
- r3, // Scratch regs...
- r4,
- r5,
- r6,
+ __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
@@ -1407,7 +1339,9 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@@ -1419,7 +1353,9 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -1433,7 +1369,8 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@@ -1457,7 +1394,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Register receiver = r2;
Register receiver_map = r3;
Register elements_map = r6;
- Register elements = r7; // Elements array of the receiver.
+ Register elements = r9; // Elements array of the receiver.
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
@@ -1549,10 +1486,11 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, strict_mode,
+ Code::NORMAL, Code::STORE_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
@@ -1577,62 +1515,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = r1;
- Register value = r0;
- Register scratch = r3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
- __ b(eq, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- GenerateMiss(masm);
-}
-
-
void StoreIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
@@ -1642,7 +1524,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
// -----------------------------------
Label miss;
- GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
Counters* counters = masm->isolate()->counters();
@@ -1656,8 +1538,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -1699,36 +1581,15 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
- State state = TargetState(previous_state, false, x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address cmp_instruction_address =
+ Assembler::return_address_from_call_start(address);
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
+ // If the instruction following the call is not a cmp rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(cmp_instruction_address);
+ return Assembler::IsCmpImmediate(instr);
}
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 21c549f17..615c835be 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -30,6 +30,7 @@
#include "lithium-allocator-inl.h"
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -41,24 +42,6 @@ namespace internal {
LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
#ifdef DEBUG
void LInstruction::VerifyCall() {
// Call instructions can use only fixed registers as temporaries and
@@ -81,14 +64,6 @@ void LInstruction::VerifyCall() {
#endif
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
@@ -112,7 +87,11 @@ void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
- InputAt(i)->PrintTo(stream);
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
}
}
@@ -177,6 +156,7 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
case Token::SHL: return "shl-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
@@ -187,6 +167,11 @@ const char* LArithmeticT::Mnemonic() const {
}
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+
void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
@@ -198,7 +183,7 @@ void LBranch::PrintDataTo(StringStream* stream) {
}
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
@@ -207,15 +192,6 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
value()->PrintTo(stream);
@@ -285,14 +261,23 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
}
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + %d", offset());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("#%d / ", arity());
}
@@ -345,6 +330,15 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
stream->Add(" length ");
@@ -356,8 +350,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ hydrogen()->access().PrintTo(stream);
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -372,21 +365,35 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", additional_index());
+ } else {
+ stream->Add("]");
+ }
}
-void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", additional_index());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
}
@@ -405,18 +412,19 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
+ if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
return spill_slot_count_++;
}
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
+ ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@@ -425,8 +433,17 @@ LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
+ LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+ }
+ }
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -439,7 +456,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(const char* reason) {
+void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -523,6 +540,11 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@@ -590,8 +612,10 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
+ &argument_index_accumulator,
+ &objects_to_materialize));
return instr;
}
@@ -599,6 +623,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
#ifdef DEBUG
instr->VerifyCall();
#endif
@@ -631,7 +656,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -639,8 +664,12 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand =
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- operand->set_virtual_register(allocator_->GetVirtualRegister());
- if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
return operand;
}
@@ -664,8 +693,14 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
}
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
}
@@ -676,51 +711,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseRegisterAtStart(right_value);
- }
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
} else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
+ right = UseRegisterAtStart(right_value);
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
- }
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -729,29 +757,34 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), d1);
+ LOperand* right = UseFixedDouble(instr->right(), d2);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ // We call a C function for double modulo. It can't trigger a GC. We need
+ // to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ return MarkAsCall(DefineFixedDouble(result, d1), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left_operand = UseFixed(left, r1);
LOperand* right_operand = UseFixed(right, r0);
LArithmeticT* result =
- new(zone()) LArithmeticT(op, left_operand, right_operand);
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -789,11 +822,15 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
HEnvironment* last_environment = pred->last_environment();
for (int i = 0; i < block->phis()->length(); ++i) {
HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
}
for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
}
block->UpdateEnvironment(last_environment);
// Pick up the outgoing argument count of one of the predecessors.
@@ -823,16 +860,68 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ ASSERT(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -841,15 +930,17 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+ LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
+ argument_index_accumulator,
+ objects_to_materialize);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
+ int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
LEnvironment* result = new(zone()) LEnvironment(
hydrogen_env->closure(),
hydrogen_env->frame_type(),
@@ -861,13 +952,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
hydrogen_env->entry(),
zone());
int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
+ int object_index = objects_to_materialize->length();
+ for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
+ LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
@@ -878,6 +971,39 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
+ for (int i = object_index; i < objects_to_materialize->length(); ++i) {
+ HValue* object_to_materialize = objects_to_materialize->at(i);
+ int previously_materialized_object = -1;
+ for (int prev = 0; prev < i; ++prev) {
+ if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
+ previously_materialized_object = prev;
+ break;
+ }
+ }
+ int length = object_to_materialize->OperandCount();
+ bool is_arguments = object_to_materialize->IsArgumentsObject();
+ if (previously_materialized_object >= 0) {
+ result->AddDuplicateObject(previously_materialized_object);
+ continue;
+ } else {
+ result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+ }
+ for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+ LOperand* op;
+ HValue* value = object_to_materialize->OperandAt(i);
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
+ } else {
+ ASSERT(!value->IsPushArgument());
+ op = UseAny(value);
+ }
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
+ }
+ }
+
if (hydrogen_env->frame_type() == JS_FUNCTION) {
*argument_index_accumulator = argument_index;
}
@@ -887,31 +1013,34 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor());
}
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ HValue* value = instr->value();
LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
- // deoptimization environment.
+ // deoptimization environment. If the instruction is generic no
+ // environment is needed since all cases are handled.
Representation rep = value->representation();
HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
+ !expected.IsGeneric()) {
return AssignEnvironment(result);
}
return result;
}
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
@@ -922,20 +1051,23 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+ info()->MarkAsRequiresFrame();
LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new(zone()) LArgumentsLength(value));
}
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ info()->MarkAsRequiresFrame();
return DefineAsRegister(new(zone()) LArgumentsElements);
}
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LInstanceOf* result =
- new(zone()) LInstanceOf(UseFixed(instr->left(), r0),
- UseFixed(instr->right(), r1));
+ new(zone()) LInstanceOf(context, UseFixed(instr->left(), r0),
+ UseFixed(instr->right(), r1));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -943,8 +1075,10 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), r0),
- FixedTemp(r4));
+ new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), cp),
+ UseFixed(instr->left(), r0),
+ FixedTemp(r4));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -971,12 +1105,28 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
LOperand* argument = Use(instr->argument());
return new(zone()) LPushArgument(argument);
}
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* inner_object) {
+ LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
+ LInnerAllocatedObject* result =
+ new(zone()) LInnerAllocatedObject(base_object);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
return instr->HasNoUses()
? NULL
@@ -985,7 +1135,13 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, cp);
+ }
+
+ return DefineAsRegister(new(zone()) LContext);
}
@@ -996,7 +1152,8 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- return MarkAsCall(new(zone()) LDeclareGlobals, instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
@@ -1014,97 +1171,176 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(function);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
- return MarkAsCall(DefineFixedDouble(result, d2), instr);
- } else if (op == kMathPowHalf) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LOperand* temp = FixedTemp(d3);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- return DefineFixedDouble(result, d2);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
-
- LOperand* temp = (op == kMathRound) ? FixedTemp(d3) : NULL;
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathFloor:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathSqrt:
- return DefineAsRegister(result);
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- default:
- UNREACHABLE();
- return NULL;
- }
+ switch (instr->op()) {
+ case kMathFloor: return DoMathFloor(instr);
+ case kMathRound: return DoMathRound(instr);
+ case kMathAbs: return DoMathAbs(instr);
+ case kMathLog: return DoMathLog(instr);
+ case kMathSin: return DoMathSin(instr);
+ case kMathCos: return DoMathCos(instr);
+ case kMathTan: return DoMathTan(instr);
+ case kMathExp: return DoMathExp(instr);
+ case kMathSqrt: return DoMathSqrt(instr);
+ case kMathPowHalf: return DoMathPowHalf(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
}
}
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathFloor* result = new(zone()) LMathFloor(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = FixedTemp(d3);
+ LMathRound* result = new(zone()) LMathRound(input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ Representation r = instr->value()->representation();
+ LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+ ? NULL
+ : UseFixed(instr->context(), cp);
+ LOperand* input = UseRegister(instr->value());
+ LMathAbs* result = new(zone()) LMathAbs(context, input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), d2);
+ LMathLog* result = new(zone()) LMathLog(input);
+ return MarkAsCall(DefineFixedDouble(result, d2), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), d2);
+ LMathSin* result = new(zone()) LMathSin(input);
+ return MarkAsCall(DefineFixedDouble(result, d2), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), d2);
+ LMathCos* result = new(zone()) LMathCos(input);
+ return MarkAsCall(DefineFixedDouble(result, d2), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), d2);
+ LMathTan* result = new(zone()) LMathTan(input);
+ return MarkAsCall(DefineFixedDouble(result, d2), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
+ LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathSqrt* result = new(zone()) LMathSqrt(input);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), d2);
+ LOperand* temp = FixedTemp(d3);
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
+ return DefineFixedDouble(result, d2);
+}
+
+
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
- argument_count_ -= instr->argument_count();
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* key = UseFixed(instr->key(), r2);
- return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), r0), instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LCallKeyed(context, key), r0), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), r0), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), r0), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* constructor = UseFixed(instr->constructor(), r1);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r1);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(constructor);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), r0),
- instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LCallFunction(context, function), r0), instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
}
@@ -1124,48 +1360,37 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LBitNotI(value));
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ if (instr->HasPowerOf2Divisor()) {
+ ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+ LOperand* value = UseRegisterAtStart(instr->left());
+ LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
+ return AssignEnvironment(DefineAsRegister(div));
+ }
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
+ LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineAsRegister(div));
+ } else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
- // TODO(1042) The fixed register allocation
- // is needed because we call TypeRecordingBinaryOpStub from
- // the generated code, which requires registers r0
- // and r1 to be used. We should remove that
- // when we provide a native implementation.
- LOperand* dividend = UseFixed(instr->left(), r0);
- LOperand* divisor = UseFixed(instr->right(), r1);
- return AssignEnvironment(AssignPointerMap(
- DefineFixed(new(zone()) LDivI(dividend, divisor), r0)));
} else {
return DoArithmeticT(Token::DIV, instr);
}
@@ -1199,115 +1424,165 @@ bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
}
-HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
- // A value with an integer representation does not need to be transformed.
- if (dividend->representation().IsInteger32()) {
- return dividend;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (dividend->IsChange() &&
- HChange::cast(dividend)->from().IsInteger32()) {
- return HChange::cast(dividend)->value();
+HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ // A value with an integer representation does not need to be transformed.
+ if (divisor->representation().IsInteger32()) {
+ return divisor;
+ // A change from an integer32 can be replaced by the integer32 value.
+ } else if (divisor->IsChange() &&
+ HChange::cast(divisor)->from().IsInteger32()) {
+ return HChange::cast(divisor)->value();
+ }
}
- return NULL;
-}
-
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- // Only optimize when we have magic numbers for the divisor.
- // The standard integer division routine is usually slower than transitionning
- // to VFP.
- if (divisor->IsConstant() &&
- HConstant::cast(divisor)->HasInteger32Value()) {
+ if (divisor->IsConstant() && HConstant::cast(divisor)->HasInteger32Value()) {
HConstant* constant_val = HConstant::cast(divisor);
int32_t int32_val = constant_val->Integer32Value();
- if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) {
+ if (LChunkBuilder::HasMagicNumberForDivisor(int32_val) ||
+ CpuFeatures::IsSupported(SUDIV)) {
return constant_val->CopyToRepresentation(Representation::Integer32(),
divisor->block()->zone());
}
}
+
return NULL;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegisterOrConstant(right);
- LOperand* remainder = TempRegister();
- ASSERT(right->IsConstant() &&
- HConstant::cast(right)->HasInteger32Value() &&
- HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()));
- return AssignEnvironment(DefineAsRegister(
+ HValue* right = instr->right();
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = CpuFeatures::IsSupported(SUDIV)
+ ? UseRegister(right)
+ : UseOrConstant(right);
+ LOperand* remainder = TempRegister();
+ ASSERT(CpuFeatures::IsSupported(SUDIV) ||
+ (right->IsConstant() &&
+ HConstant::cast(right)->HasInteger32Value() &&
+ HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value())));
+ return AssignEnvironment(DefineAsRegister(
new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LModI* mod;
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
- } else {
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- mod = new(zone()) LModI(dividend,
- divisor,
- TempRegister(),
- FixedTemp(d10),
- FixedTemp(d11));
- }
-
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero)) {
+ ASSERT(!right->CanBeZero());
+ LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
+ UseOrConstant(right));
+ LInstruction* result = DefineAsRegister(mod);
+ return (left->CanBeNegative() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero))
+ ? AssignEnvironment(result)
+ : result;
+ } else if (instr->fixed_right_arg().has_value) {
+ LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
+ UseRegisterAtStart(right));
return AssignEnvironment(DefineAsRegister(mod));
+ } else if (CpuFeatures::IsSupported(SUDIV)) {
+ LModI* mod = new(zone()) LModI(UseRegister(left),
+ UseRegister(right));
+ LInstruction* result = DefineAsRegister(mod);
+ return (right->CanBeZero() ||
+ (left->RangeCanInclude(kMinInt) &&
+ right->RangeCanInclude(-1) &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
+ (left->CanBeNegative() &&
+ instr->CanBeZero() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)))
+ ? AssignEnvironment(result)
+ : result;
} else {
- return DefineAsRegister(mod);
+ LModI* mod = new(zone()) LModI(UseRegister(left),
+ UseRegister(right),
+ FixedTemp(d10),
+ FixedTemp(d11));
+ LInstruction* result = DefineAsRegister(mod);
+ return (right->CanBeZero() ||
+ (left->CanBeNegative() &&
+ instr->CanBeZero() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)))
+ ? AssignEnvironment(result)
+ : result;
}
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right = UseFixedDouble(instr->right(), d2);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, d1), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left;
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
- (instr->CheckFlag(HValue::kCanOverflow) ||
- !right->IsConstantOperand())) {
- left = UseRegister(instr->LeastConstantOperand());
- temp = TempRegister();
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ HValue* left = instr->BetterLeftOperand();
+ HValue* right = instr->BetterRightOperand();
+ LOperand* left_op;
+ LOperand* right_op;
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (right->IsConstant()) {
+ HConstant* constant = HConstant::cast(right);
+ int32_t constant_value = constant->Integer32Value();
+ // Constants -1, 0 and 1 can be optimized if the result can overflow.
+ // For other constants, it can be optimized only without overflow.
+ if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+ left_op = UseRegisterAtStart(left);
+ right_op = UseConstant(right);
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
} else {
- left = UseRegisterAtStart(instr->LeastConstantOperand());
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
}
- LMulI* mul = new(zone()) LMulI(left, right, temp);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ LMulI* mul = new(zone()) LMulI(left_op, right_op);
+ if (can_overflow || bailout_on_minus_zero) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
+ if (instr->UseCount() == 1 && (instr->uses().value()->IsAdd() ||
+ instr->uses().value()->IsSub())) {
+ HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value());
+
+ if (use->IsAdd() && instr == use->left()) {
+ // This mul is the lhs of an add. The add and mul will be folded into a
+ // multiply-add in DoAdd.
+ return NULL;
+ }
+ if (instr == use->right() && use->IsAdd() && !use->left()->IsMul()) {
+ // This mul is the rhs of an add, where the lhs is not another mul.
+ // The add and mul will be folded into a multiply-add in DoAdd.
+ return NULL;
+ }
+ if (instr == use->right() && use->IsSub()) {
+ // This mul is the rhs of a sub. The sub and mul will be folded into a
+ // multiply-sub in DoSub.
+ return NULL;
+ }
+ }
+ return DoArithmeticD(Token::MUL, instr);
} else {
return DoArithmeticT(Token::MUL, instr);
}
@@ -1315,9 +1590,15 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ if (instr->left()->IsConstant()) {
+ // If lhs is constant, do reverse subtraction instead.
+ return DoRSub(instr);
+ }
+
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@@ -1327,6 +1608,10 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
}
return result;
} else if (instr->representation().IsDouble()) {
+ if (instr->right()->IsMul()) {
+ return DoMultiplySub(instr->left(), HMul::cast(instr->right()));
+ }
+
return DoArithmeticD(Token::SUB, instr);
} else {
return DoArithmeticT(Token::SUB, instr);
@@ -1334,12 +1619,50 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
}
+LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ // Note: The lhs of the subtraction becomes the rhs of the
+ // reverse-subtraction.
+ LOperand* left = UseRegisterAtStart(instr->right());
+ LOperand* right = UseOrConstantAtStart(instr->left());
+ LRSubI* rsb = new(zone()) LRSubI(left, right);
+ LInstruction* result = DefineAsRegister(rsb);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+ LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+ LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+ LOperand* addend_op = UseRegisterAtStart(addend);
+ return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
+ multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
+ LOperand* minuend_op = UseRegisterAtStart(minuend);
+ LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+ LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+
+ return DefineSameAsFirst(new(zone()) LMultiplySubD(minuend_op,
+ multiplier_op,
+ multiplicand_op));
+}
+
+
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@@ -1347,9 +1670,17 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsDouble()) {
+ if (instr->left()->IsMul()) {
+ return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
+ }
+
+ if (instr->right()->IsMul()) {
+ ASSERT(!instr->left()->IsMul());
+ return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
+ }
+
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1358,11 +1689,11 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -1394,60 +1725,61 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), r0);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, d7), instr);
+ LOperand* global_object = UseTempRegister(instr->global_object());
+ LOperand* scratch = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LOperand* scratch3 = TempRegister();
+ LRandom* result = new(zone()) LRandom(
+ global_object, scratch, scratch2, scratch3);
+ return DefineFixedDouble(result, d7);
}
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
- LCmpT* result = new(zone()) LCmpT(left, right);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
}
}
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
}
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpConstantEqAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()));
+ return new(zone()) LCmpHoleAndBranch(value);
}
@@ -1485,10 +1817,11 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
HStringCompareAndBranch* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(left, right);
+ new(zone()) LStringCompareAndBranch(context, left, right);
return MarkAsCall(result, instr);
}
@@ -1526,19 +1859,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
LOperand* map = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LMapEnumLength(map));
@@ -1566,6 +1886,14 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegisterOrConstant(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
@@ -1573,6 +1901,13 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -1581,8 +1916,9 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), r0);
- return MarkAsCall(new(zone()) LThrow(value), instr);
+ return MarkAsCall(new(zone()) LThrow(context, value), instr);
}
@@ -1602,32 +1938,47 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = NULL;
LInstruction* res = NULL;
- if (instr->value()->type().IsSmi()) {
+ HValue* val = instr->value();
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ value = UseRegisterAtStart(val);
res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
+ value = UseRegister(val);
LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
- : NULL;
- LOperand* temp3 = FixedTemp(d11);
+ LOperand* temp2 = FixedTemp(d11);
res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
temp1,
- temp2,
- temp3));
+ temp2));
res = AssignEnvironment(res);
}
return res;
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
@@ -1638,15 +1989,18 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
Define(result, result_temp);
return AssignPointerMap(result);
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(instr->value());
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
- LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
+ LDoubleToI* res = new(zone()) LDoubleToI(value);
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
@@ -1659,6 +2013,16 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ LInstruction* result = val->CheckFlag(HInstruction::kUint32)
+ ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
+ : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return result;
+ }
+ return AssignEnvironment(result);
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@@ -1675,43 +2039,43 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckNonSmi(value));
}
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(result);
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
}
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
+ LInstruction* result = new(zone()) LCheckInstanceType(value);
+ return AssignEnvironment(result);
}
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckMaps(value);
- return AssignEnvironment(result);
+ LOperand* value = NULL;
+ if (!instr->CanOmitMapChecks()) {
+ value = UseRegisterAtStart(instr->value());
+ if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
+ }
+ LCheckMaps* result = new(zone()) LCheckMaps(value);
+ if (!instr->CanOmitMapChecks()) {
+ AssignEnvironment(result);
+ if (instr->has_migration_target()) return AssignPointerMap(result);
+ }
+ return result;
}
@@ -1720,11 +2084,11 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
Representation input_rep = value->representation();
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(d11)));
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg));
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new(zone()) LClampIToUint8(reg));
} else {
- ASSERT(input_rep.IsTagged());
+ ASSERT(input_rep.IsSmiOrTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve d1 explicitly.
LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(d11));
@@ -1734,16 +2098,25 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new(zone()) LReturn(UseFixed(instr->value(), r0));
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), cp)
+ : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new(zone()) LReturn(UseFixed(instr->value(), r0), context,
+ parameter_count);
}
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
- if (r.IsInteger32()) {
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
return DefineAsRegister(new(zone()) LConstantD);
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new(zone()) LConstantE);
} else if (r.IsTagged()) {
return DefineAsRegister(new(zone()) LConstantT);
} else {
@@ -1762,8 +2135,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), r0);
- LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1779,10 +2154,11 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), r1);
LOperand* value = UseFixed(instr->value(), r0);
LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(global_object, value);
+ new(zone()) LStoreGlobalGeneric(context, global_object, value);
return MarkAsCall(result, instr);
}
@@ -1811,31 +2187,16 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- return DefineAsRegister(
- new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), r0);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, r0), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), r0);
- LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0);
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadNamedGeneric(context, object), r0);
return MarkAsCall(result, instr);
}
@@ -1847,9 +2208,8 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
}
@@ -1860,130 +2220,98 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
- HLoadKeyedFastElement* instr) {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
- if (instr->RequiresHoleCheck()) AssignEnvironment(result);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
- HLoadKeyedFastDoubleElement* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* elements = UseTempRegister(instr->elements());
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
+ ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastDoubleElement* result =
- new(zone()) LLoadKeyedFastDoubleElement(elements, key);
- return AssignEnvironment(DefineAsRegister(result));
-}
+ LLoadKeyed* result = NULL;
+ if (!instr->is_external()) {
+ LOperand* obj = NULL;
+ if (instr->representation().IsDouble()) {
+ obj = UseRegister(instr->elements());
+ } else {
+ ASSERT(instr->representation().IsSmiOrTagged());
+ obj = UseRegisterAtStart(instr->elements());
+ }
+ result = new(zone()) LLoadKeyed(obj, key);
+ } else {
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ LOperand* external_pointer = UseRegister(instr->elements());
+ result = new(zone()) LLoadKeyed(external_pointer, key);
+ }
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
- HLoadKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LLoadKeyedSpecializedArrayElement* result =
- new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
- LInstruction* load_instr = DefineAsRegister(result);
+ DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
- AssignEnvironment(load_instr) : load_instr;
+ bool can_deoptimize = instr->RequiresHoleCheck() ||
+ (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
+ return can_deoptimize ? AssignEnvironment(result) : result;
}
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), r1);
LOperand* key = UseFixed(instr->key(), r0);
LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), r0);
+ DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), r0);
return MarkAsCall(result, instr);
}
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
- HStoreKeyedFastElement* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- ASSERT(instr->value()->representation().IsTagged());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* obj = UseTempRegister(instr->object());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedFastElement(obj, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
- HStoreKeyedFastDoubleElement* instr) {
- ASSERT(instr->value()->representation().IsDouble());
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ if (!instr->is_external()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ LOperand* object = NULL;
+ LOperand* key = NULL;
+ LOperand* val = NULL;
- return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
-}
+ if (instr->value()->representation().IsDouble()) {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegister(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ } else {
+ ASSERT(instr->value()->representation().IsSmiOrTagged());
+ if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegisterAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
+ }
+ return new(zone()) LStoreKeyed(object, key, val);
+ }
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
- HStoreKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
+ (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstant(instr->key());
-
- return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
+ (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->elements()->representation().IsExternal());
+ LOperand* val = UseRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LOperand* external_pointer = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(external_pointer, key, val);
}
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), r2);
LOperand* key = UseFixed(instr->key(), r1);
LOperand* val = UseFixed(instr->value(), r0);
@@ -1992,41 +2320,47 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
ASSERT(instr->key()->representation().IsTagged());
ASSERT(instr->value()->representation().IsTagged());
- return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
+ return MarkAsCall(
+ new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
}
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- ElementsKind from_kind = instr->original_map()->elements_kind();
- ElementsKind to_kind = instr->transitioned_map()->elements_kind();
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- LOperand* object = UseRegister(instr->object());
+ LOperand* object = UseRegister(instr->object());
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
- return DefineSameAsFirst(result);
+ new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
+ return result;
} else {
- LOperand* object = UseFixed(instr->object(), r0);
- LOperand* fixed_object_reg = FixedTemp(r2);
- LOperand* new_map_reg = FixedTemp(r3);
+ LOperand* context = UseFixed(instr->context(), cp);
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object,
- new_map_reg,
- fixed_object_reg);
- return MarkAsCall(DefineFixed(result, r0), instr);
+ new(zone()) LTransitionElementsKind(object, context, NULL);
+ return AssignPointerMap(result);
}
}
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp);
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
+ bool needs_write_barrier_for_map = instr->has_transition() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
if (needs_write_barrier) {
- obj = instr->is_in_object()
+ obj = is_in_object
? UseRegister(instr->object())
: UseTempRegister(instr->object());
} else {
@@ -2035,92 +2369,94 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
: UseRegisterAtStart(instr->object());
}
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ LOperand* val;
+ if (needs_write_barrier ||
+ (FLAG_track_fields && instr->field_representation().IsSmi())) {
+ val = UseTempRegister(instr->value());
+ } else if (FLAG_track_double_fields &&
+ instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp);
+ LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
+ if (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject()) {
+ if (!instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), r1);
LOperand* val = UseFixed(instr->value(), r0);
- LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
+ LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), r0),
- instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LStringAdd(context, left, right), r0),
+ instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
return AssignPointerMap(DefineAsRegister(result));
}
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- LAllocateObject* result =
- new(zone()) LAllocateObject(TempRegister(), TempRegister());
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = instr->size()->IsConstant()
+ ? UseConstant(instr->size())
+ : UseTempRegister(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
}
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), r0), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* object = UseFixed(instr->object(), r0);
- LOperand* key = UseFixed(instr->key(), r1);
- LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
- return MarkAsCall(DefineFixed(result, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), r0), instr);
}
@@ -2133,24 +2469,42 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ int index = static_cast<int>(instr->index());
+ Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
+ return DefineFixed(result, reg);
+ }
}
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallStub, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), r0), instr);
}
@@ -2163,10 +2517,26 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
}
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = UseRegister(instr->index());
+ LOperand* length;
+ LOperand* index;
+ if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+ length = UseRegisterOrConstant(instr->length());
+ index = UseOrConstant(instr->index());
+ } else {
+ length = UseTempRegister(instr->length());
+ index = UseRegisterAtStart(instr->index());
+ }
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@@ -2179,7 +2549,8 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), r0));
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTypeof* result = new(zone()) LTypeof(context, UseFixed(instr->value(), r0));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -2196,20 +2567,7 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
+ instr->ReplayEnvironment(current_block_->last_environment());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
@@ -2231,10 +2589,13 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
if (instr->is_function_entry()) {
- return MarkAsCall(new(zone()) LStackCheck, instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
} else {
ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
}
}
@@ -2246,10 +2607,11 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->call_kind(),
- instr->inlining_kind());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
+ instr->inlining_kind(),
+ instr->undefined_receiver());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
}
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
@@ -2266,7 +2628,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
+ ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
@@ -2277,17 +2639,10 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* key = UseRegisterAtStart(instr->key());
- LOperand* object = UseRegisterAtStart(instr->object());
- LIn* result = new(zone()) LIn(key, object);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->enumerable(), r0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index fb36fe9c0..5087fb33d 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -40,24 +40,16 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
- V(AllocateObject) \
+ V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
V(ArithmeticD) \
V(ArithmeticT) \
- V(ArrayLiteral) \
V(BitI) \
- V(BitNotI) \
V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \
@@ -67,35 +59,43 @@ class LCodeGen;
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckNonSmi) \
V(CheckMaps) \
- V(CheckPrototypeMaps) \
+ V(CheckMapValue) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
- V(CmpConstantEqAndBranch) \
- V(CmpIDAndBranch) \
+ V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(ConstantD) \
+ V(ConstantE) \
V(ConstantI) \
+ V(ConstantS) \
V(ConstantT) \
V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
V(DeclareGlobals) \
- V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(DoubleToSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
V(ElementsKind) \
- V(FastLiteral) \
- V(FixedArrayBaseLength) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -103,45 +103,52 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
- V(In) \
+ V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Uint32ToDouble) \
+ V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
+ V(LoadRoot) \
+ V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyedFastDoubleElement) \
- V(LoadKeyedFastElement) \
+ V(LoadKeyed) \
V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathCos) \
+ V(MathExp) \
+ V(MathFloor) \
V(MathFloorOfDiv) \
+ V(MathLog) \
V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSin) \
+ V(MathSqrt) \
+ V(MathTan) \
V(ModI) \
V(MulI) \
+ V(MultiplyAddD) \
+ V(MultiplySubD) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
V(NumberUntagD) \
- V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -150,51 +157,51 @@ class LCodeGen;
V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyedFastDoubleElement) \
- V(StoreKeyedFastElement) \
+ V(StoreKeyed) \
V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
V(StringCompareAndBranch) \
- V(StringLength) \
V(SubI) \
+ V(RSubI) \
V(TaggedToI) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
+ V(Uint32ToDouble) \
+ V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop)
+ V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -204,13 +211,15 @@ class LCodeGen;
}
-class LInstruction: public ZoneObject {
+class LInstruction : public ZoneObject {
public:
LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false) { }
- virtual ~LInstruction() { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) {
+ }
+
+ virtual ~LInstruction() {}
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
@@ -253,17 +262,25 @@ class LInstruction: public ZoneObject {
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
- void MarkAsCall() { is_call_ = true; }
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
// Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return IsCall(); }
virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
+ virtual LOperand* result() const = 0;
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
#ifdef DEBUG
void VerifyCall();
#endif
@@ -278,10 +295,12 @@ class LInstruction: public ZoneObject {
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
+ class IsCallBits: public BitField<bool, 0, 1> {};
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
- bool is_call_;
+ int bit_field_;
};
@@ -289,13 +308,15 @@ class LInstruction: public ZoneObject {
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
+class LTemplateInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
+ LOperand* result() const { return results_[0]; }
protected:
EmbeddedContainer<LOperand*, R> results_;
@@ -303,15 +324,15 @@ class LTemplateInstruction: public LInstruction {
EmbeddedContainer<LOperand*, T> temps_;
private:
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
};
-class LGap: public LTemplateInstruction<0, 0, 0> {
+class LGap : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
@@ -322,8 +343,8 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsGap() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -359,30 +380,35 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap: public LGap {
+class LInstructionGap V8_FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return !IsRedundant();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
};
-class LGoto: public LTemplateInstruction<0, 0, 0> {
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
- int block_id() const { return block_id_; }
+ int block_id() const { return block_->block_id(); }
private:
- int block_id_;
+ HBasicBlock* block_;
};
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -398,23 +424,44 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel: public LGap {
+class LLabel V8_FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
Label* label() { return &label_; }
LLabel* replacement() const { return replacement_; }
void set_replacement(LLabel* label) { replacement_ = label; }
@@ -426,14 +473,21 @@ class LLabel: public LGap {
};
-class LParameter: public LTemplateInstruction<1, 0, 0> {
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -443,30 +497,60 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> {
};
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
- virtual bool IsControl() const { return true; }
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
private:
HControlInstruction* hydrogen() {
return HControlInstruction::cast(this->hydrogen_value());
}
+
+ Label* false_label_;
+ Label* true_label_;
};
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -480,7 +564,7 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -501,7 +585,7 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -515,11 +599,11 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -531,65 +615,53 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModI: public LTemplateInstruction<1, 2, 3> {
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- // Used when the right hand is a constant power of 2.
- LModI(LOperand* left,
- LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = NULL;
- temps_[1] = NULL;
- temps_[2] = NULL;
- }
-
- // Used for the standard case.
LModI(LOperand* left,
LOperand* right,
- LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
+ LOperand* temp = NULL,
+ LOperand* temp2 = NULL) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
temps_[1] = temp2;
- temps_[2] = temp3;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
-class LDivI: public LTemplateInstruction<1, 2, 0> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LDivI(LOperand* left, LOperand* right) {
+ LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
inputs_[1] = right;
+ temps_[0] = temp;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
@@ -608,26 +680,66 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
};
-class LMulI: public LTemplateInstruction<1, 2, 1> {
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
};
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = addend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* addend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
+// Instruction for computing minuend - multiplier * multiplicand.
+class LMultiplySubD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplySubD(LOperand* minuend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = minuend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* minuend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
+};
+
+
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
@@ -635,21 +747,35 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LUnaryMathOperation(LOperand* value, LOperand* temp) {
+ explicit LMathFloor(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -657,63 +783,151 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
+
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
+ inputs_[0] = value;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ explicit LMathLog(LOperand* value) {
+ inputs_[0] = value;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
};
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
+class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
+ explicit LMathSin(LOperand* value) {
+ inputs_[0] = value;
}
- LOperand* left() { return inputs_[0]; }
+ LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
};
-class LIsNilAndBranch: public LControlInstruction<1, 0> {
+class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LIsNilAndBranch(LOperand* value) {
+ explicit LMathCos(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
+class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathTan(LOperand* value) {
+ inputs_[0] = value;
+ }
- virtual void PrintDataTo(StringStream* stream);
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
};
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* double_temp,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = double_temp;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* double_temp() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSqrt(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathPowHalf(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranch(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -726,11 +940,11 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -743,11 +957,11 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -758,11 +972,11 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -776,19 +990,21 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
public:
- LStringCompareAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
@@ -796,11 +1012,11 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -812,11 +1028,11 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -829,7 +1045,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -841,11 +1058,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -859,19 +1076,21 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@@ -880,28 +1099,32 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
temps_[0] = temp;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
@@ -912,7 +1135,8 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -921,7 +1145,7 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -936,7 +1160,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
};
-class LBitI: public LTemplateInstruction<1, 2, 0> {
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -953,7 +1177,7 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -974,7 +1198,7 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
};
-class LSubI: public LTemplateInstruction<1, 2, 0> {
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -989,7 +1213,22 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
+class LRSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LRSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -998,7 +1237,16 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD: public LTemplateInstruction<1, 0, 0> {
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1007,16 +1255,29 @@ class LConstantD: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- Handle<Object> value() const { return hydrogen()->handle(); }
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
};
-class LBranch: public LControlInstruction<1, 0> {
+class LBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1027,11 +1288,11 @@ class LBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1044,46 +1305,11 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- virtual bool IsControl() const { return true; }
-
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LJSArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
-};
-
-
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
-};
-
-
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1095,7 +1321,7 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
@@ -1108,7 +1334,7 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> {
};
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
+class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1123,7 +1349,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
-class LDateField: public LTemplateInstruction<1, 1, 1> {
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1134,39 +1360,53 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
Smi* index() const { return index_; }
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
private:
Smi* index_;
};
-class LThrow: public LTemplateInstruction<0, 1, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
+ LSeqStringSetChar(String::Encoding encoding,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) : encoding_(encoding) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ inputs_[2] = value;
}
- LOperand* value() { return inputs_[0]; }
+ String::Encoding encoding() { return encoding_; }
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
};
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
+ LThrow(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+ DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
-class LAddI: public LTemplateInstruction<1, 2, 0> {
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1181,7 +1421,7 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1191,12 +1431,12 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
};
-class LPower: public LTemplateInstruction<1, 2, 0> {
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1211,20 +1451,29 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
-class LRandom: public LTemplateInstruction<1, 1, 0> {
+class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
- explicit LRandom(LOperand* global_object) {
+ LRandom(LOperand* global_object,
+ LOperand* scratch,
+ LOperand* scratch2,
+ LOperand* scratch3) {
inputs_[0] = global_object;
+ temps_[0] = scratch;
+ temps_[1] = scratch2;
+ temps_[2] = scratch3;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* global_object() const { return inputs_[0]; }
+ LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
+ LOperand* scratch3() const { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1236,49 +1485,69 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
: op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LReturn: public LTemplateInstruction<0, 1, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LReturn(LOperand* value) {
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
}
LOperand* value() { return inputs_[0]; }
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ ASSERT(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+ LOperand* parameter_count() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1291,26 +1560,15 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@@ -1319,7 +1577,7 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
@@ -1332,19 +1590,17 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
};
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
@@ -1357,90 +1613,62 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
- "load-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
-
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
+ bool is_external() const {
+ return hydrogen()->is_external();
}
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyedGeneric(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
};
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
@@ -1450,7 +1678,7 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1465,16 +1693,19 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
+ LStoreGlobalGeneric(LOperand* context,
+ LOperand* global_object,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ inputs_[2] = value;
}
- LOperand* global_object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
@@ -1484,7 +1715,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1497,11 +1728,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreContextSlot(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -1516,11 +1747,11 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1532,7 +1763,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LDrop: public LTemplateInstruction<0, 0, 0> {
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1545,20 +1776,54 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ temps_[0] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInnerAllocatedObject(LOperand* base_object) {
+ inputs_[0] = base_object;
+ }
+
+ LOperand* base_object() { return inputs_[0]; }
+ int offset() { return hydrogen()->offset(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
+ DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
+};
+
+
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext: public LTemplateInstruction<1, 0, 0> {
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
@@ -1570,14 +1835,20 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
@@ -1589,7 +1860,7 @@ class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
};
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1601,73 +1872,84 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
};
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInvokeFunction(LOperand* function) {
- inputs_[0] = function;
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- LOperand* function() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
- Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
};
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
+ LCallKeyed(LOperand* context, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = key;
}
- LOperand* key() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallNamed(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallFunction(LOperand* function) {
- inputs_[0] = function;
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- LOperand* function() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
@@ -1676,58 +1958,95 @@ class LCallFunction: public LTemplateInstruction<1, 1, 0> {
};
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallGlobal(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<JSFunction> target() const { return hydrogen()->target(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
- LOperand* constructor() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1739,7 +2058,20 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1751,7 +2083,20 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -1763,7 +2108,7 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagU(LOperand* value) {
inputs_[0] = value;
@@ -1775,7 +2120,7 @@ class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -1788,21 +2133,33 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LDoubleToI(LOperand* value, LOperand* temp, LOperand* temp2) {
+ explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1812,31 +2169,28 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LTaggedToI(LOperand* value,
LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
+ LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp;
temps_[1] = temp2;
- temps_[2] = temp3;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -1848,7 +2202,7 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -1861,7 +2215,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -1878,7 +2232,7 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
@@ -1893,222 +2247,203 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
+ Handle<Map> transition() const { return hydrogen()->transition_map(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- LStoreNamedGeneric(LOperand* object, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = value;
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedFastDoubleElement(LOperand* elements,
- LOperand* key,
- LOperand* value) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
+ bool is_external() const { return hydrogen()->is_external(); }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
- "store-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
-
- virtual void PrintDataTo(StringStream* stream);
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ bool NeedsCanonicalization() {
+ if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
+ hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
+ return false;
+ }
+ return hydrogen()->NeedsCanonicalization();
+ }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
-
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = value;
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* obj,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ inputs_[3] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* value) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
-
- ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object,
- LOperand* new_map_temp,
- LOperand* temp) {
+ LOperand* context,
+ LOperand* new_map_temp) {
inputs_[0] = object;
+ inputs_[1] = context;
temps_[0] = new_map_temp;
- temps_[1] = temp;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* object() { return inputs_[0]; }
LOperand* new_map_temp() { return temps_[0]; }
- LOperand* temp() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LTrapAllocationMemento(LOperand* object,
+ LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+ "trap-allocation-memento")
};
-
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
};
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
}
- LOperand* char_code() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
+ explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
}
- LOperand* string() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
};
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -2121,7 +2456,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value) {
inputs_[0] = value;
@@ -2134,25 +2469,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
- public:
- LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
- Handle<JSObject> holder() const { return hydrogen()->holder(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2164,7 +2481,7 @@ class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2173,24 +2490,23 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LClampDToUint8(LOperand* unclamped, LOperand* temp) {
+ explicit LClampDToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
- temps_[0] = temp;
}
LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
};
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2202,7 +2518,7 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped, LOperand* temp) {
inputs_[0] = unclamped;
@@ -2216,59 +2532,55 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LAllocateObject(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
+ LAllocate(LOperand* context,
+ LOperand* size,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp1;
temps_[1] = temp2;
}
- LOperand* temp() { return temps_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+ LOperand* context() { return inputs_[0]; }
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
};
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2281,19 +2593,21 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2306,11 +2620,11 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -2323,45 +2637,25 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- LDeleteProperty(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ LOsrEntry() {}
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- LOsrEntry();
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
-};
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+ LOperand* context() { return inputs_[0]; }
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
- public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2372,33 +2666,21 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> {
};
-class LIn: public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LIn(LOperand* key, LOperand* object) {
- inputs_[0] = key;
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
inputs_[1] = object;
}
- LOperand* key() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInPrepareMap(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2414,7 +2696,7 @@ class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2428,7 +2710,7 @@ class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2443,17 +2725,17 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk: public LChunk {
+class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
};
-class LChunkBuilder BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
@@ -2473,15 +2755,31 @@ class LChunkBuilder BASE_EMBEDDED {
// Build the sequence for the graph.
LPlatformChunk* Build();
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+ LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
+ LInstruction* DoRSub(HSub* instr);
+
static bool HasMagicNumberForDivisor(int32_t divisor);
- static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathTan(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+
private:
enum Status {
UNUSED,
@@ -2500,7 +2798,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* reason);
+ void Abort(BailoutReason reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2538,6 +2836,9 @@ class LChunkBuilder BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
@@ -2579,7 +2880,8 @@ class LChunkBuilder BASE_EMBEDDED {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize);
void VisitInstruction(HInstruction* current);
@@ -2588,7 +2890,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LPlatformChunk* chunk_;
CompilationInfo* info_;
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 6f5aa436a..8b22e625b 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -31,12 +31,13 @@
#include "arm/lithium-gap-resolver-arm.h"
#include "code-stubs.h"
#include "stub-cache.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
-class SafepointGenerator : public CallWrapper {
+class SafepointGenerator V8_FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -44,11 +45,11 @@ class SafepointGenerator : public CallWrapper {
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
+ virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const { }
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
- virtual void AfterCall() const {
+ virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -62,13 +63,9 @@ class SafepointGenerator : public CallWrapper {
#define __ masm()->
bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
+ LPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
- CpuFeatures::Scope scope1(VFP3);
- CpuFeatures::Scope scope2(ARMv7);
-
- CodeStub::GenerateFPStubs();
// Open a frame scope to indicate that there is a frame on the stack. The
// NONE indicates that the scope shouldn't actually generate code to set up
@@ -87,86 +84,94 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (FLAG_weak_embedded_maps_in_optimized_code) {
+ RegisterDependentCodeForEmbeddedMaps(code);
+ }
PopulateDeoptimizationData(code);
+ info()->CommitDependencies(code);
}
-void LCodeGen::Abort(const char* reason) {
+void LCodeGen::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop_at");
+ }
#endif
- // r1: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ cmp(r5, Operand(0));
- __ b(eq, &ok);
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
+ // r1: Callee's JS function.
+ // cp: Callee's context.
+ // fp: Caller's frame pointer.
+ // lr: Caller's pc.
+
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ __ cmp(r5, Operand::Zero());
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ str(r2, MemOperand(sp, receiver_offset), ne);
+ }
}
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ frame_is_built_ = true;
+ info_->AddNoFrameRange(0, masm_->pc_offset());
+ }
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
- __ mov(r0, Operand(slots));
- __ mov(r2, Operand(kSlotsZapValue));
+ __ sub(sp, sp, Operand(slots * kPointerSize));
+ __ push(r0);
+ __ push(r1);
+ __ add(r0, sp, Operand(slots * kPointerSize));
+ __ mov(r1, Operand(kSlotsZapValue));
Label loop;
__ bind(&loop);
- __ push(r2);
- __ sub(r0, r0, Operand(1), SetCC);
+ __ sub(r0, r0, Operand(kPointerSize));
+ __ str(r1, MemOperand(r0, 2 * kPointerSize));
+ __ cmp(r0, sp);
__ b(ne, &loop);
+ __ pop(r1);
+ __ pop(r0);
} else {
__ sub(sp, sp, Operand(slots * kPointerSize));
}
}
+ if (info()->saves_caller_doubles()) {
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
+
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in r1.
@@ -195,39 +200,39 @@ bool LCodeGen::GeneratePrologue() {
__ str(r0, target);
// Update the write barrier. This clobbers r3 and r0.
__ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
+ cp,
+ target.offset(),
+ r0,
+ r3,
+ GetLinkRegisterState(),
+ kSaveFPRegs);
}
}
Comment(";;; End allocate local context");
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so cp still holds the
+ // incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
- }
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
- if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- instr->CompileToNative(this);
- }
- }
- EnsureSpaceForLazyDeopt();
- return !is_aborted();
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ sub(sp, sp, Operand(slots * kPointerSize));
}
@@ -236,11 +241,36 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- Comment(";;; Deferred code @%d: %s.",
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
+
+ Comment(";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
code->instruction_index(),
+ code->instr()->hydrogen_value()->id(),
code->instr()->Mnemonic());
+ __ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ Comment(";;; Deferred code");
+ }
code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Destroy frame");
+ ASSERT(frame_is_built_);
+ __ pop(ip);
+ __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
+ frame_is_built_ = false;
+ }
__ jmp(code->exit());
}
}
@@ -262,23 +292,53 @@ bool LCodeGen::GenerateDeoptJumpTable() {
// Each entry in the jump table generates one instruction and inlines one
// 32bit data after it.
if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
- deopt_jump_table_.length() * 2)) {
- Abort("Generated code is too large");
+ deopt_jump_table_.length() * 7)) {
+ Abort(kGeneratedCodeIsTooLarge);
}
- // Block the constant pool emission during the jump table emission.
- __ BlockConstPoolFor(deopt_jump_table_.length());
- __ RecordComment("[ Deoptimisation jump table");
+ if (deopt_jump_table_.length() > 0) {
+ Comment(";;; -------------------- Jump table --------------------");
+ }
Label table_start;
__ bind(&table_start);
+ Label needs_frame;
for (int i = 0; i < deopt_jump_table_.length(); i++) {
__ bind(&deopt_jump_table_[i].label);
- __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
- __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
+ Address entry = deopt_jump_table_[i].address;
+ Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ if (deopt_jump_table_[i].needs_frame) {
+ __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
+ if (needs_frame.is_bound()) {
+ __ b(&needs_frame);
+ } else {
+ __ bind(&needs_frame);
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ __ mov(lr, Operand(pc), LeaveCC, al);
+ __ mov(pc, ip);
+ }
+ } else {
+ __ mov(lr, Operand(pc), LeaveCC, al);
+ __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
+ }
+ masm()->CheckConstPool(false, false);
}
- ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
- deopt_jump_table_.length() * 2);
- __ RecordComment("]");
+
+ // Force constant pool emission at the end of the deopt jump table to make
+ // sure that no constant pools are emitted after.
+ masm()->CheckConstPool(true, false);
// The deoptimization jump table is the last part of the instruction
// sequence. Mark the generated code as done unless we bailed out.
@@ -299,8 +359,8 @@ Register LCodeGen::ToRegister(int index) const {
}
-DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::FromAllocationIndex(index);
+DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
+ return DwVfpRegister::FromAllocationIndex(index);
}
@@ -316,20 +376,16 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
+ Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
__ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
} else if (r.IsDouble()) {
- Abort("EmitLoadRegister: Unsupported double immediate.");
+ Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
} else {
- ASSERT(r.IsTagged());
- if (literal->IsSmi()) {
- __ mov(scratch, Operand(literal));
- } else {
- __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
- }
+ ASSERT(r.IsSmiOrTagged());
+ __ Move(scratch, literal);
}
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
@@ -341,21 +397,21 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
}
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
ASSERT(op->IsDoubleRegister());
return ToDoubleRegister(op->index());
}
-DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch) {
+DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+ SwVfpRegister flt_scratch,
+ DwVfpRegister dbl_scratch) {
if (op->IsDoubleRegister()) {
return ToDoubleRegister(op->index());
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
+ Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -364,9 +420,9 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
return dbl_scratch;
} else if (r.IsDouble()) {
- Abort("unsupported double immediate");
+ Abort(kUnsupportedDoubleImmediate);
} else if (r.IsTagged()) {
- Abort("unsupported tagged immediate");
+ Abort(kUnsupportedTaggedImmediate);
}
} else if (op->IsStackSlot() || op->IsArgument()) {
// TODO(regis): Why is vldr not taking a MemOperand?
@@ -382,21 +438,39 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return constant->handle();
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
}
bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsInteger32();
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
}
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ ASSERT(r.IsSmiOrTagged());
+ return reinterpret_cast<int32_t>(Smi::FromInt(value));
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(constant->HasInteger32Value());
- return constant->Integer32Value();
+ return Smi::FromInt(constant->Integer32Value());
}
@@ -412,23 +486,26 @@ Operand LCodeGen::ToOperand(LOperand* op) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
+ if (r.IsSmi()) {
+ ASSERT(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
ASSERT(constant->HasInteger32Value());
return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
- Abort("ToOperand Unsupported double immediate.");
+ Abort(kToOperandUnsupportedDoubleImmediate);
}
ASSERT(r.IsTagged());
- return Operand(constant->handle());
+ return Operand(constant->handle(isolate()));
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
- Abort("ToOperand IsDoubleRegister unimplemented");
- return Operand(0);
+ Abort(kToOperandIsDoubleRegisterUnimplemented);
+ return Operand::Zero();
}
// Stack slots not implemented, use ToMemOperand instead.
UNREACHABLE();
- return Operand(0);
+ return Operand::Zero();
}
@@ -436,56 +513,29 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return MemOperand(fp, -(index + 3) * kPointerSize);
- } else {
- // Incoming parameter. Skip the return address.
- return MemOperand(fp, -(index - 1) * kPointerSize);
- }
+ return MemOperand(fp, StackSlotOffset(op->index()));
}
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
ASSERT(op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, context,
- // and the first word of the double in the fixed part of the frame.
- return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
- } else {
- // Incoming parameter. Skip the return address and the first word of
- // the double.
- return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
- }
+ return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count) {
+ Translation* translation) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
+ int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *arguments_index = -environment->parameter_count();
- *arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- arguments_index,
- arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ WriteTranslation(environment->outer(), translation);
+ bool has_closure_id = !info()->closure().is_null() &&
+ !info()->closure().is_identical_to(environment->closure());
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
@@ -506,71 +556,66 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
ASSERT(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
}
- // Inlined frames which push their arguments cause the index to be
- // bumped and a new stack area to be used for materialization.
- if (environment->entry() != NULL &&
- environment->entry()->arguments_pushed()) {
- *arguments_index = *arguments_index < 0
- ? GetStackSlotCount()
- : *arguments_index + *arguments_count;
- *arguments_count = environment->entry()->arguments_count() + 1;
- }
-
+ int object_index = 0;
+ int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- *arguments_index,
- *arguments_count);
- }
- }
-
- AddToTranslation(translation,
+ AddToTranslation(environment,
+ translation,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
+ &object_index,
+ &dematerialized_index);
}
}
-void LCodeGen::AddToTranslation(Translation* translation,
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
} else if (is_uint32) {
@@ -598,7 +643,7 @@ void LCodeGen::AddToTranslation(Translation* translation,
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -619,12 +664,11 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode,
TargetAddressStorageMode storage_mode) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr != NULL);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
__ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
@@ -639,20 +683,36 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr) {
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- ASSERT(pointers != NULL);
- RecordPosition(pointers->position());
- __ CallRuntime(function, num_arguments);
+ __ CallRuntime(function, num_arguments, save_doubles);
+
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Move(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ ldr(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr) {
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
@@ -677,8 +737,6 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
int frame_count = 0;
int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
@@ -686,7 +744,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
+ WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
@@ -697,36 +755,88 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition condition,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
- Abort("bailout was not prepared");
+ Abort(kBailoutWasNotPrepared);
return;
}
ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
-
if (FLAG_deopt_every_n_times == 1 &&
- info_->shared_info()->opt_count() == id) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ !info()->IsStub() &&
+ info()->opt_count() == id) {
+ ASSERT(frame_is_built_);
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
return;
}
- if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
+ if (info()->ShouldTrapOnDeopt()) {
+ __ stop("trap_on_deopt", condition);
+ }
- if (cc == al) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ ASSERT(info()->IsStub() || frame_is_built_);
+ if (condition == al && frame_is_built_) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry)) {
- deopt_jump_table_.Add(JumpTableEntry(entry), zone());
+ (deopt_jump_table_.last().address != entry) ||
+ (deopt_jump_table_.last().bailout_type != bailout_type) ||
+ (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
+ Deoptimizer::JumpTableEntry table_entry(entry,
+ bailout_type,
+ !frame_is_built_);
+ deopt_jump_table_.Add(table_entry, zone());
+ }
+ __ b(condition, &deopt_jump_table_.last().label);
+ }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition,
+ LEnvironment* environment) {
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ DeoptimizeIf(condition, environment, bailout_type);
+}
+
+
+void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
+ ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
+ maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
+ }
}
- __ b(cc, &deopt_jump_table_.last().label);
+ }
+#ifdef VERIFY_HEAP
+ // This disables verification of weak embedded objects after full GC.
+ // AddDependentCode can cause a GC, which would observe the state where
+ // this code is not yet in the depended code lists of the embedded maps.
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
+#endif
+ for (int i = 0; i < maps.length(); i++) {
+ maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
+ }
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
}
}
@@ -737,16 +847,19 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<DeoptimizationInputData> data =
factory()->NewDeoptimizationInputData(length, TENURED);
- Handle<ByteArray> translations = translations_.CreateByteArray();
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
+ { AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
}
- data->SetLiteralArray(*literals);
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
@@ -820,10 +933,6 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
- if (kind & Safepoint::kWithRegisters) {
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp, zone());
- }
}
@@ -834,7 +943,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -856,18 +965,26 @@ void LCodeGen::RecordSafepointWithRegistersAndDoubles(
}
-void LCodeGen::RecordPosition(int position) {
+void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
}
void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_,
+ label->hydrogen_value()->id(),
+ label->block_id(),
+ LabelType(label));
__ bind(label->label());
current_block_ = label->block_id();
DoGap(label);
@@ -901,43 +1018,34 @@ void LCodeGen::DoParameter(LParameter* instr) {
void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(r0));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::RegExpExec: {
RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
__ ldr(r0, MemOperand(sp, 0));
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -947,140 +1055,154 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
+ GenerateOsrPrologue();
}
void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
-
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
-
- if (divisor < 0) divisor = -divisor;
+ HMod* hmod = instr->hydrogen();
+ HValue* left = hmod->left();
+ HValue* right = hmod->right();
+ if (hmod->HasPowerOf2Divisor()) {
+ // TODO(svenpanne) We should really do the strength reduction on the
+ // Hydrogen level.
+ Register left_reg = ToRegister(instr->left());
+ Register result_reg = ToRegister(instr->result());
- Label positive_dividend, done;
- __ cmp(dividend, Operand(0));
- __ b(pl, &positive_dividend);
- __ rsb(result, dividend, Operand(0));
- __ and_(result, result, Operand(divisor - 1), SetCC);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ // Note: The code below even works when right contains kMinInt.
+ int32_t divisor = Abs(right->GetInteger32Constant());
+
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ cmp(left_reg, Operand::Zero());
+ __ b(pl, &left_is_not_negative);
+ __ rsb(result_reg, left_reg, Operand::Zero());
+ __ and_(result_reg, result_reg, Operand(divisor - 1));
+ __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ __ b(&done);
}
- __ rsb(result, result, Operand(0));
- __ b(&done);
- __ bind(&positive_dividend);
- __ and_(result, dividend, Operand(divisor - 1));
+
+ __ bind(&left_is_not_negative);
+ __ and_(result_reg, left_reg, Operand(divisor - 1));
__ bind(&done);
- return;
- }
- // These registers hold untagged 32 bit values.
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
- Register result = ToRegister(instr->result());
- Label done;
+ } else if (hmod->fixed_right_arg().has_value) {
+ Register left_reg = ToRegister(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Register result_reg = ToRegister(instr->result());
- if (CpuFeatures::IsSupported(SUDIV)) {
- CpuFeatures::Scope scope(SUDIV);
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand(0));
- DeoptimizeIf(eq, instr->environment());
+ int32_t divisor = hmod->fixed_right_arg().value;
+ ASSERT(IsPowerOf2(divisor));
+
+ // Check if our assumption of a fixed right operand still holds.
+ __ cmp(right_reg, Operand(divisor));
+ DeoptimizeIf(ne, instr->environment());
+
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ cmp(left_reg, Operand::Zero());
+ __ b(pl, &left_is_not_negative);
+ __ rsb(result_reg, left_reg, Operand::Zero());
+ __ and_(result_reg, result_reg, Operand(divisor - 1));
+ __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ __ b(&done);
}
- // For r3 = r1 % r2; we can have the following ARM code
- // sdiv r3, r1, r2
- // mls r3, r3, r2, r1
+ __ bind(&left_is_not_negative);
+ __ and_(result_reg, left_reg, Operand(divisor - 1));
+ __ bind(&done);
- __ sdiv(result, left, right);
- __ mls(result, result, right, left);
- __ cmp(result, Operand(0));
- __ b(ne, &done);
+ } else if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(masm(), SUDIV);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(left, Operand(0));
- DeoptimizeIf(lt, instr->environment());
- }
- } else {
- Register scratch = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- DwVfpRegister dividend = ToDoubleRegister(instr->temp2());
- DwVfpRegister divisor = ToDoubleRegister(instr->temp3());
- DwVfpRegister quotient = double_scratch0();
-
- ASSERT(!dividend.is(divisor));
- ASSERT(!dividend.is(quotient));
- ASSERT(!divisor.is(quotient));
- ASSERT(!scratch.is(left));
- ASSERT(!scratch.is(right));
- ASSERT(!scratch.is(result));
-
- Label vfp_modulo, both_positive, right_negative;
-
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand(0));
+ Register left_reg = ToRegister(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Register result_reg = ToRegister(instr->result());
+
+ Label done;
+ // Check for x % 0, sdiv might signal an exception. We have to deopt in this
+ // case because we can't return a NaN.
+ if (right->CanBeZero()) {
+ __ cmp(right_reg, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
- __ Move(result, left);
+ // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
+ // want. We have to deopt if we care about -0, because we can't return that.
+ if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
+ Label no_overflow_possible;
+ __ cmp(left_reg, Operand(kMinInt));
+ __ b(ne, &no_overflow_possible);
+ __ cmp(right_reg, Operand(-1));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ } else {
+ __ b(ne, &no_overflow_possible);
+ __ mov(result_reg, Operand::Zero());
+ __ jmp(&done);
+ }
+ __ bind(&no_overflow_possible);
+ }
- // (0 % x) must yield 0 (if x is finite, which is the case here).
- __ cmp(left, Operand(0));
- __ b(eq, &done);
- // Preload right in a vfp register.
- __ vmov(divisor.low(), right);
- __ b(lt, &vfp_modulo);
-
- __ cmp(left, Operand(right));
- __ b(lt, &done);
-
- // Check for (positive) power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
- scratch,
- &right_negative,
- &both_positive);
- // Perform modulo operation (scratch contains right - 1).
- __ and_(result, scratch, Operand(left));
- __ b(&done);
+ // For 'r3 = r1 % r2' we can have the following ARM code:
+ // sdiv r3, r1, r2
+ // mls r3, r3, r2, r1
- __ bind(&right_negative);
- // Negate right. The sign of the divisor does not matter.
- __ rsb(right, right, Operand(0));
-
- __ bind(&both_positive);
- const int kUnfolds = 3;
- // If the right hand side is smaller than the (nonnegative)
- // left hand side, the left hand side is the result.
- // Else try a few subtractions of the left hand side.
- __ mov(scratch, left);
- for (int i = 0; i < kUnfolds; i++) {
- // Check if the left hand side is less or equal than the
- // the right hand side.
- __ cmp(scratch, Operand(right));
- __ mov(result, scratch, LeaveCC, lt);
- __ b(lt, &done);
- // If not, reduce the left hand side by the right hand
- // side and check again.
- if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
+ __ sdiv(result_reg, left_reg, right_reg);
+ __ mls(result_reg, result_reg, right_reg, left_reg);
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (left->CanBeNegative() &&
+ hmod->CanBeZero() &&
+ hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ cmp(result_reg, Operand::Zero());
+ __ b(ne, &done);
+ __ cmp(left_reg, Operand::Zero());
+ DeoptimizeIf(lt, instr->environment());
}
+ __ bind(&done);
- __ bind(&vfp_modulo);
- // Load the arguments in VFP registers.
- // The divisor value is preloaded before. Be careful that 'right'
- // is only live on entry.
- __ vmov(dividend.low(), left);
- // From here on don't use right as it may have been reallocated
- // (for example to scratch2).
- right = no_reg;
+ } else {
+ // General case, without any SDIV support.
+ Register left_reg = ToRegister(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Register result_reg = ToRegister(instr->result());
+ Register scratch = scratch0();
+ ASSERT(!scratch.is(left_reg));
+ ASSERT(!scratch.is(right_reg));
+ ASSERT(!scratch.is(result_reg));
+ DwVfpRegister dividend = ToDoubleRegister(instr->temp());
+ DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
+ ASSERT(!divisor.is(dividend));
+ LowDwVfpRegister quotient = double_scratch0();
+ ASSERT(!quotient.is(dividend));
+ ASSERT(!quotient.is(divisor));
- __ vcvt_f64_s32(dividend, dividend.low());
- __ vcvt_f64_s32(divisor, divisor.low());
+ Label done;
+ // Check for x % 0, we have to deopt in this case because we can't return a
+ // NaN.
+ if (right->CanBeZero()) {
+ __ cmp(right_reg, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
- // We do not care about the sign of the divisor.
+ __ Move(result_reg, left_reg);
+ // Load the arguments in VFP registers. The divisor value is preloaded
+ // before. Be careful that 'right_reg' is only live on entry.
+ // TODO(svenpanne) The last comments seems to be wrong nowadays.
+ __ vmov(double_scratch0().low(), left_reg);
+ __ vcvt_f64_s32(dividend, double_scratch0().low());
+ __ vmov(double_scratch0().low(), right_reg);
+ __ vcvt_f64_s32(divisor, double_scratch0().low());
+
+ // We do not care about the sign of the divisor. Note that we still handle
+ // the kMinInt % -1 case correctly, though.
__ vabs(divisor, divisor);
// Compute the quotient and round it to a 32bit integer.
__ vdiv(quotient, dividend, divisor);
@@ -1088,26 +1210,21 @@ void LCodeGen::DoModI(LModI* instr) {
__ vcvt_f64_s32(quotient, quotient.low());
// Compute the remainder in result.
- DwVfpRegister double_scratch = dividend;
- __ vmul(double_scratch, divisor, quotient);
- __ vcvt_s32_f64(double_scratch.low(), double_scratch);
- __ vmov(scratch, double_scratch.low());
-
- if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ sub(result, left, scratch);
- } else {
- Label ok;
- // Check for -0.
- __ sub(scratch2, left, scratch, SetCC);
- __ b(ne, &ok);
- __ cmp(left, Operand(0));
+ __ vmul(double_scratch0(), divisor, quotient);
+ __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
+ __ vmov(scratch, double_scratch0().low());
+ __ sub(result_reg, left_reg, scratch, SetCC);
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (left->CanBeNegative() &&
+ hmod->CanBeZero() &&
+ hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ b(ne, &done);
+ __ cmp(left_reg, Operand::Zero());
DeoptimizeIf(mi, instr->environment());
- __ bind(&ok);
- // Load the result and we are done.
- __ mov(result, scratch2);
}
+ __ bind(&done);
}
- __ bind(&done);
}
@@ -1135,11 +1252,11 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
if (divisor > 0) {
__ Move(result, dividend);
} else {
- __ rsb(result, dividend, Operand(0), SetCC);
+ __ rsb(result, dividend, Operand::Zero(), SetCC);
DeoptimizeIf(vs, environment);
}
// Compute the remainder.
- __ mov(remainder, Operand(0));
+ __ mov(remainder, Operand::Zero());
return;
default:
@@ -1157,7 +1274,7 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
// handled separately.
if (divisor < 0) {
ASSERT(divisor != -1);
- __ rsb(result, result, Operand(0));
+ __ rsb(result, result, Operand::Zero());
}
// Compute the remainder.
if (divisor > 0) {
@@ -1193,7 +1310,7 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
__ mov(scratch, Operand(scratch, ASR, s));
}
__ add(result, scratch, Operand(dividend, LSR, 31));
- if (divisor < 0) __ rsb(result, result, Operand(0));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
// Compute the remainder.
__ mov(ip, Operand(divisor));
// This sequence could be replaced with 'mls' when
@@ -1206,43 +1323,82 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
void LCodeGen::DoDivI(LDivI* instr) {
- class DeferredDivI: public LDeferredCode {
- public:
- DeferredDivI(LCodeGen* codegen, LDivI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredBinaryOpStub(instr_->pointer_map(),
- instr_->left(),
- instr_->right(),
- Token::DIV);
+ if (instr->hydrogen()->HasPowerOf2Divisor()) {
+ const Register dividend = ToRegister(instr->left());
+ const Register result = ToRegister(instr->result());
+ int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
+ int32_t test_value = 0;
+ int32_t power = 0;
+
+ if (divisor > 0) {
+ test_value = divisor - 1;
+ power = WhichPowerOf2(divisor);
+ } else {
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ __ cmp(dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment());
+ }
+ test_value = - divisor - 1;
+ power = WhichPowerOf2(-divisor);
}
- virtual LInstruction* instr() { return instr_; }
- private:
- LDivI* instr_;
- };
+
+ if (test_value != 0) {
+ if (instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ __ sub(result, dividend, Operand::Zero(), SetCC);
+ __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
+ __ mov(result, Operand(result, ASR, power));
+ if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
+ if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
+ return; // Don't fall through to "__ rsb" below.
+ } else {
+ // Deoptimize if remainder is not 0.
+ __ tst(dividend, Operand(test_value));
+ DeoptimizeIf(ne, instr->environment());
+ __ mov(result, Operand(dividend, ASR, power));
+ if (divisor < 0) __ rsb(result, result, Operand(0));
+ }
+ } else {
+ if (divisor < 0) {
+ __ rsb(result, dividend, Operand(0));
+ } else {
+ __ Move(result, dividend);
+ }
+ }
+
+ return;
+ }
const Register left = ToRegister(instr->left());
const Register right = ToRegister(instr->right());
- const Register scratch = scratch0();
const Register result = ToRegister(instr->result());
// Check for x / 0.
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand(0));
+ __ cmp(right, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ cmp(left, Operand(0));
- __ b(ne, &left_not_zero);
- __ cmp(right, Operand(0));
- DeoptimizeIf(mi, instr->environment());
- __ bind(&left_not_zero);
+ Label positive;
+ if (!instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ // Do the test only if it hadn't be done above.
+ __ cmp(right, Operand::Zero());
+ }
+ __ b(pl, &positive);
+ __ cmp(left, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ __ bind(&positive);
}
- // Check for (-kMinInt / -1).
+ // Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
__ cmp(left, Operand(kMinInt));
@@ -1252,40 +1408,62 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_min_int);
}
- Label done, deoptimize;
- // Test for a few common cases first.
- __ cmp(right, Operand(1));
- __ mov(result, left, LeaveCC, eq);
- __ b(eq, &done);
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(masm(), SUDIV);
+ __ sdiv(result, left, right);
- __ cmp(right, Operand(2));
- __ tst(left, Operand(1), eq);
- __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
- __ b(eq, &done);
+ if (!instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ // Compute remainder and deopt if it's not zero.
+ const Register remainder = scratch0();
+ __ mls(remainder, result, right, left);
+ __ cmp(remainder, Operand::Zero());
+ DeoptimizeIf(ne, instr->environment());
+ }
+ } else {
+ const DoubleRegister vleft = ToDoubleRegister(instr->temp());
+ const DoubleRegister vright = double_scratch0();
+ __ vmov(double_scratch0().low(), left);
+ __ vcvt_f64_s32(vleft, double_scratch0().low());
+ __ vmov(double_scratch0().low(), right);
+ __ vcvt_f64_s32(vright, double_scratch0().low());
+ __ vdiv(vleft, vleft, vright); // vleft now contains the result.
+ __ vcvt_s32_f64(double_scratch0().low(), vleft);
+ __ vmov(result, double_scratch0().low());
+
+ if (!instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ // Deopt if exact conversion to integer was not possible.
+ // Use vright as scratch register.
+ __ vcvt_f64_s32(double_scratch0(), double_scratch0().low());
+ __ VFPCompareAndSetFlags(vleft, double_scratch0());
+ DeoptimizeIf(ne, instr->environment());
+ }
+ }
+}
- __ cmp(right, Operand(4));
- __ tst(left, Operand(3), eq);
- __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
- __ b(eq, &done);
- // Call the stub. The numbers in r0 and r1 have
- // to be tagged to Smis. If that is not possible, deoptimize.
- DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+ DwVfpRegister addend = ToDoubleRegister(instr->addend());
+ DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
- __ TrySmiTag(left, &deoptimize, scratch);
- __ TrySmiTag(right, &deoptimize, scratch);
+ // This is computed in-place.
+ ASSERT(addend.is(ToDoubleRegister(instr->result())));
- __ b(al, deferred->entry());
- __ bind(deferred->exit());
+ __ vmla(addend, multiplier, multiplicand);
+}
- // If the result in r0 is a Smi, untag it, else deoptimize.
- __ JumpIfNotSmi(result, &deoptimize);
- __ SmiUntag(result);
- __ b(&done);
- __ bind(&deoptimize);
- DeoptimizeIf(al, instr->environment());
- __ bind(&done);
+void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
+ DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
+ DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+ // This is computed in-place.
+ ASSERT(minuend.is(ToDoubleRegister(instr->result())));
+
+ __ vmls(minuend, multiplier, multiplicand);
}
@@ -1295,94 +1473,108 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
const Register remainder = ToRegister(instr->temp());
const Register scratch = scratch0();
- // We only optimize this for division by constants, because the standard
- // integer division routine is usually slower than transitionning to VFP.
- // This could be optimized on processors with SDIV available.
- ASSERT(instr->right()->IsConstantOperand());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- if (divisor < 0) {
- __ cmp(left, Operand(0));
- DeoptimizeIf(eq, instr->environment());
- }
- EmitSignedIntegerDivisionByConstant(result,
- left,
- divisor,
- remainder,
- scratch,
- instr->environment());
- // We operated a truncating division. Correct the result if necessary.
- __ cmp(remainder, Operand(0));
- __ teq(remainder, Operand(divisor), ne);
- __ sub(result, result, Operand(1), LeaveCC, mi);
-}
-
-
-void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
- LOperand* left_argument,
- LOperand* right_argument,
- Token::Value op) {
- Register left = ToRegister(left_argument);
- Register right = ToRegister(right_argument);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
- // Move left to r1 and right to r0 for the stub call.
- if (left.is(r1)) {
- __ Move(r0, right);
- } else if (left.is(r0) && right.is(r1)) {
- __ Swap(r0, r1, r2);
- } else if (left.is(r0)) {
- ASSERT(!right.is(r1));
- __ mov(r1, r0);
- __ mov(r0, right);
+ if (!CpuFeatures::IsSupported(SUDIV)) {
+ // If the CPU doesn't support sdiv instruction, we only optimize when we
+ // have magic numbers for the divisor. The standard integer division routine
+ // is usually slower than transitionning to VFP.
+ ASSERT(instr->right()->IsConstantOperand());
+ int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
+ ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
+ if (divisor < 0) {
+ __ cmp(left, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
+ EmitSignedIntegerDivisionByConstant(result,
+ left,
+ divisor,
+ remainder,
+ scratch,
+ instr->environment());
+ // We performed a truncating division. Correct the result if necessary.
+ __ cmp(remainder, Operand::Zero());
+ __ teq(remainder, Operand(divisor), ne);
+ __ sub(result, result, Operand(1), LeaveCC, mi);
} else {
- ASSERT(!left.is(r0) && !right.is(r0));
- __ mov(r0, right);
- __ mov(r1, left);
+ CpuFeatureScope scope(masm(), SUDIV);
+ const Register right = ToRegister(instr->right());
+
+ // Check for x / 0.
+ __ cmp(right, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+
+ // Check for (kMinInt / -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ Label left_not_min_int;
+ __ cmp(left, Operand(kMinInt));
+ __ b(ne, &left_not_min_int);
+ __ cmp(right, Operand(-1));
+ DeoptimizeIf(eq, instr->environment());
+ __ bind(&left_not_min_int);
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ cmp(right, Operand::Zero());
+ __ cmp(left, Operand::Zero(), mi);
+ // "right" can't be null because the code would have already been
+ // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
+ // In this case we need to deoptimize to produce a -0.
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ Label done;
+ __ sdiv(result, left, right);
+ // If both operands have the same sign then we are done.
+ __ eor(remainder, left, Operand(right), SetCC);
+ __ b(pl, &done);
+
+ // Check if the result needs to be corrected.
+ __ mls(remainder, result, right, left);
+ __ cmp(remainder, Operand::Zero());
+ __ sub(result, result, Operand(1), LeaveCC, ne);
+
+ __ bind(&done);
}
- BinaryOpStub stub(op, OVERWRITE_LEFT);
- __ CallStub(&stub);
- RecordSafepointWithRegistersAndDoubles(pointer_map,
- 0,
- Safepoint::kNoLazyDeopt);
- // Overwrite the stored value of r0 with the result of the stub.
- __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
}
void LCodeGen::DoMulI(LMulI* instr) {
- Register scratch = scratch0();
Register result = ToRegister(instr->result());
// Note that result may alias left.
Register left = ToRegister(instr->left());
LOperand* right_op = instr->right();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero =
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- if (right_op->IsConstantOperand() && !can_overflow) {
- // Use optimized code for specific constants.
+ if (right_op->IsConstantOperand()) {
int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
- __ cmp(left, Operand(0));
+ __ cmp(left, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
switch (constant) {
case -1:
- __ rsb(result, left, Operand(0));
+ if (overflow) {
+ __ rsb(result, left, Operand::Zero(), SetCC);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ rsb(result, left, Operand::Zero());
+ }
break;
case 0:
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
- __ cmp(left, Operand(0));
+ __ cmp(left, Operand::Zero());
DeoptimizeIf(mi, instr->environment());
}
- __ mov(result, Operand(0));
+ __ mov(result, Operand::Zero());
break;
case 1:
__ Move(result, left);
@@ -1394,23 +1586,21 @@ void LCodeGen::DoMulI(LMulI* instr) {
int32_t mask = constant >> 31;
uint32_t constant_abs = (constant + mask) ^ mask;
- if (IsPowerOf2(constant_abs) ||
- IsPowerOf2(constant_abs - 1) ||
- IsPowerOf2(constant_abs + 1)) {
- if (IsPowerOf2(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ mov(result, Operand(left, LSL, shift));
- } else if (IsPowerOf2(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ add(result, left, Operand(left, LSL, shift));
- } else if (IsPowerOf2(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ rsb(result, left, Operand(left, LSL, shift));
- }
-
+ if (IsPowerOf2(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ mov(result, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
- if (constant < 0) __ rsb(result, result, Operand(0));
-
+ if (constant < 0) __ rsb(result, result, Operand::Zero());
+ } else if (IsPowerOf2(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ add(result, left, Operand(left, LSL, shift));
+ // Correct the sign of the result is the constant is negative.
+ if (constant < 0) __ rsb(result, result, Operand::Zero());
+ } else if (IsPowerOf2(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ rsb(result, left, Operand(left, LSL, shift));
+ // Correct the sign of the result is the constant is negative.
+ if (constant < 0) __ rsb(result, result, Operand::Zero());
} else {
// Generate standard code.
__ mov(ip, Operand(constant));
@@ -1419,27 +1609,36 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else {
- Register right = EmitLoadRegister(right_op, scratch);
- if (bailout_on_minus_zero) {
- __ orr(ToRegister(instr->temp()), left, right);
- }
+ ASSERT(right_op->IsRegister());
+ Register right = ToRegister(right_op);
- if (can_overflow) {
+ if (overflow) {
+ Register scratch = scratch0();
// scratch:result = left * right.
- __ smull(result, scratch, left, right);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ smull(result, scratch, result, right);
+ } else {
+ __ smull(result, scratch, left, right);
+ }
__ cmp(scratch, Operand(result, ASR, 31));
DeoptimizeIf(ne, instr->environment());
} else {
- __ mul(result, left, right);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ mul(result, result, right);
+ } else {
+ __ mul(result, left, right);
+ }
}
if (bailout_on_minus_zero) {
- // Bail out if the result is supposed to be negative zero.
Label done;
- __ cmp(result, Operand(0));
- __ b(ne, &done);
- __ cmp(ToRegister(instr->temp()), Operand(0));
- DeoptimizeIf(mi, instr->environment());
+ __ teq(left, Operand(right));
+ __ b(pl, &done);
+ // Bail out if the result is minus zero.
+ __ cmp(result, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
__ bind(&done);
}
}
@@ -1469,7 +1668,11 @@ void LCodeGen::DoBitI(LBitI* instr) {
__ orr(result, left, right);
break;
case Token::BIT_XOR:
- __ eor(result, left, right);
+ if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
+ __ mvn(result, Operand(left));
+ } else {
+ __ eor(result, left, right);
+ }
break;
default:
UNREACHABLE();
@@ -1489,6 +1692,9 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
// Mask the right_op operand.
__ and_(scratch, ToRegister(right_op), Operand(0x1F));
switch (instr->op()) {
+ case Token::ROR:
+ __ mov(result, Operand(left, ROR, scratch));
+ break;
case Token::SAR:
__ mov(result, Operand(left, ASR, scratch));
break;
@@ -1512,6 +1718,13 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count != 0) {
+ __ mov(result, Operand(left, ROR, shift_count));
+ } else {
+ __ Move(result, left);
+ }
+ break;
case Token::SAR:
if (shift_count != 0) {
__ mov(result, Operand(left, ASR, shift_count));
@@ -1532,7 +1745,18 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
break;
case Token::SHL:
if (shift_count != 0) {
- __ mov(result, Operand(left, LSL, shift_count));
+ if (instr->hydrogen_value()->representation().IsSmi() &&
+ instr->can_deopt()) {
+ if (shift_count != 1) {
+ __ mov(result, Operand(left, LSL, shift_count - 1));
+ __ SmiTag(result, result, SetCC);
+ } else {
+ __ SmiTag(result, left, SetCC);
+ }
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ mov(result, Operand(left, LSL, shift_count));
+ }
} else {
__ Move(result, left);
}
@@ -1566,8 +1790,33 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
+void LCodeGen::DoRSubI(LRSubI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ LOperand* result = instr->result();
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ SBit set_cond = can_overflow ? SetCC : LeaveCC;
+
+ if (right->IsStackSlot() || right->IsArgument()) {
+ Register right_reg = EmitLoadRegister(right, ip);
+ __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
+ } else {
+ ASSERT(right->IsRegister() || right->IsConstantOperand());
+ __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
+ }
+
+ if (can_overflow) {
+ DeoptimizeIf(vs, instr->environment());
+ }
+}
+
+
void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
+ __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
__ mov(ToRegister(instr->result()), Operand(instr->value()));
}
@@ -1580,28 +1829,15 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
}
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
- if (value->IsSmi()) {
- __ mov(ToRegister(instr->result()), Operand(value));
- } else {
- __ LoadHeapObject(ToRegister(instr->result()),
- Handle<HeapObject>::cast(value));
- }
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ mov(ToRegister(instr->result()), Operand(instr->value()));
}
-void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Handle<Object> value = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ Move(ToRegister(instr->result()), value);
}
@@ -1632,10 +1868,12 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
Register map = ToRegister(instr->temp());
Label done;
- // If the object is a smi return the object.
- __ tst(input, Operand(kSmiTagMask));
- __ Move(result, input, eq);
- __ b(eq, &done);
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ // If the object is a smi return the object.
+ __ SmiTst(input);
+ __ Move(result, input, eq);
+ __ b(eq, &done);
+ }
// If the object is not a value type, return the object.
__ CompareObjectType(input, map, map, JS_VALUE_TYPE);
@@ -1658,7 +1896,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
ASSERT(!scratch.is(scratch0()));
ASSERT(!scratch.is(object));
- __ tst(object, Operand(kSmiTagMask));
+ __ SmiTst(object);
DeoptimizeIf(eq, instr->environment());
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
DeoptimizeIf(ne, instr->environment());
@@ -1686,16 +1924,52 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ mvn(result, Operand(input));
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ Register string = ToRegister(instr->string());
+ LOperand* index_op = instr->index();
+ Register value = ToRegister(instr->value());
+ Register scratch = scratch0();
+ String::Encoding encoding = instr->encoding();
+
+ if (FLAG_debug_code) {
+ __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ __ and_(scratch, scratch,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, kUnexpectedStringType);
+ }
+
+ if (index_op->IsConstantOperand()) {
+ int constant_index = ToInteger32(LConstantOperand::cast(index_op));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ strb(value,
+ FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
+ } else {
+ __ strh(value,
+ FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
+ }
+ } else {
+ Register index = ToRegister(index_op);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ add(scratch, string, Operand(index));
+ __ strb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+ } else {
+ __ add(scratch, string, Operand(index, LSL, 1));
+ __ strh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+ }
+ }
}
void LCodeGen::DoThrow(LThrow* instr) {
Register input_reg = EmitLoadRegister(instr->value(), ip);
__ push(input_reg);
+ ASSERT(ToRegister(instr->context()).is(cp));
CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
@@ -1729,33 +2003,38 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
HMathMinMax::Operation operation = instr->hydrogen()->operation();
- Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
- if (instr->hydrogen()->representation().IsInteger32()) {
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+ Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
Register left_reg = ToRegister(left);
Operand right_op = (right->IsRegister() || right->IsConstantOperand())
? ToOperand(right)
: Operand(EmitLoadRegister(right, ip));
Register result_reg = ToRegister(instr->result());
__ cmp(left_reg, right_op);
- if (!result_reg.is(left_reg)) {
- __ mov(result_reg, left_reg, LeaveCC, condition);
- }
+ __ Move(result_reg, left_reg, condition);
__ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
- DoubleRegister left_reg = ToDoubleRegister(left);
- DoubleRegister right_reg = ToDoubleRegister(right);
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
- Label check_nan_left, check_zero, return_left, return_right, done;
+ DwVfpRegister left_reg = ToDoubleRegister(left);
+ DwVfpRegister right_reg = ToDoubleRegister(right);
+ DwVfpRegister result_reg = ToDoubleRegister(instr->result());
+ Label result_is_nan, return_left, return_right, check_zero, done;
__ VFPCompareAndSetFlags(left_reg, right_reg);
- __ b(vs, &check_nan_left);
- __ b(eq, &check_zero);
- __ b(condition, &return_left);
- __ b(al, &return_right);
-
- __ bind(&check_zero);
+ if (operation == HMathMinMax::kMathMin) {
+ __ b(mi, &return_left);
+ __ b(gt, &return_right);
+ } else {
+ __ b(mi, &return_right);
+ __ b(gt, &return_left);
+ }
+ __ b(vs, &result_is_nan);
+ // Left equals right => check for -0.
__ VFPCompareAndSetFlags(left_reg, 0.0);
- __ b(ne, &return_left); // left == right != 0.
+ if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
+ __ b(ne, &done); // left == right != 0.
+ } else {
+ __ b(ne, &return_left); // left == right != 0.
+ }
// At this point, both left and right are either 0 or -0.
if (operation == HMathMinMax::kMathMin) {
// We could use a single 'vorr' instruction here if we had NEON support.
@@ -1767,30 +2046,30 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
// the decision for vadd is easy because vand is a NEON instruction.
__ vadd(result_reg, left_reg, right_reg);
}
- __ b(al, &done);
+ __ b(&done);
+
+ __ bind(&result_is_nan);
+ __ vadd(result_reg, left_reg, right_reg);
+ __ b(&done);
- __ bind(&check_nan_left);
- __ VFPCompareAndSetFlags(left_reg, left_reg);
- __ b(vs, &return_left); // left == NaN.
__ bind(&return_right);
- if (!right_reg.is(result_reg)) {
- __ vmov(result_reg, right_reg);
+ __ Move(result_reg, right_reg);
+ if (!left_reg.is(result_reg)) {
+ __ b(&done);
}
- __ b(al, &done);
__ bind(&return_left);
- if (!left_reg.is(result_reg)) {
- __ vmov(result_reg, left_reg);
- }
+ __ Move(result_reg, left_reg);
+
__ bind(&done);
}
}
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
- DoubleRegister result = ToDoubleRegister(instr->result());
+ DwVfpRegister left = ToDoubleRegister(instr->left());
+ DwVfpRegister right = ToDoubleRegister(instr->right());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
__ vadd(result, left, right);
@@ -1828,6 +2107,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->left()).is(r1));
ASSERT(ToRegister(instr->right()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
@@ -1836,99 +2116,116 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+ int next_block = GetNextEmittedBlock();
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
-
- if (right_block == left_block) {
+ if (right_block == left_block || condition == al) {
EmitGoto(left_block);
} else if (left_block == next_block) {
- __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+ __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
} else if (right_block == next_block) {
- __ b(cc, chunk_->GetAssemblyLabel(left_block));
+ __ b(condition, chunk_->GetAssemblyLabel(left_block));
} else {
- __ b(cc, chunk_->GetAssemblyLabel(left_block));
+ __ b(condition, chunk_->GetAssemblyLabel(left_block));
__ b(chunk_->GetAssemblyLabel(right_block));
}
}
-void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
+template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
+ int false_block = instr->FalseDestination(chunk_);
+ __ b(condition, chunk_->GetAssemblyLabel(false_block));
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ stop("LBreak");
+}
+
+void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
+ if (r.IsInteger32() || r.IsSmi()) {
+ ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
- __ cmp(reg, Operand(0));
- EmitBranch(true_block, false_block, ne);
+ __ cmp(reg, Operand::Zero());
+ EmitBranch(instr, ne);
} else if (r.IsDouble()) {
- DoubleRegister reg = ToDoubleRegister(instr->value());
- Register scratch = scratch0();
-
+ ASSERT(!info()->IsStub());
+ DwVfpRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
- __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
- __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
- EmitBranch(true_block, false_block, eq);
+ __ VFPCompareAndSetFlags(reg, 0.0);
+ __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
+ EmitBranch(instr, ne);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
} else if (type.IsSmi()) {
- __ cmp(reg, Operand(0));
- EmitBranch(true_block, false_block, ne);
+ ASSERT(!info()->IsStub());
+ __ cmp(reg, Operand::Zero());
+ EmitBranch(instr, ne);
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, al);
+ } else if (type.IsHeapNumber()) {
+ ASSERT(!info()->IsStub());
+ DwVfpRegister dbl_scratch = double_scratch0();
+ __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
+ __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
+ EmitBranch(instr, ne);
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
+ __ cmp(ip, Operand::Zero());
+ EmitBranch(instr, ne);
} else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
if (expected.Contains(ToBooleanStub::UNDEFINED)) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ b(eq, false_label);
+ __ b(eq, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::BOOLEAN)) {
// Boolean -> its value.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ b(eq, true_label);
+ __ b(eq, instr->TrueLabel(chunk_));
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ b(eq, false_label);
+ __ b(eq, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
- __ b(eq, false_label);
+ __ b(eq, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::SMI)) {
// Smis: 0 -> false, all other -> true.
- __ cmp(reg, Operand(0));
- __ b(eq, false_label);
- __ JumpIfSmi(reg, true_label);
+ __ cmp(reg, Operand::Zero());
+ __ b(eq, instr->FalseLabel(chunk_));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
- __ tst(reg, Operand(kSmiTagMask));
+ __ SmiTst(reg);
DeoptimizeIf(eq, instr->environment());
}
@@ -1940,14 +2237,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
// Undetectable -> false.
__ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsUndetectable));
- __ b(ne, false_label);
+ __ b(ne, instr->FalseLabel(chunk_));
}
}
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
__ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, true_label);
+ __ b(ge, instr->TrueLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::STRING)) {
@@ -1956,38 +2253,45 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
__ b(ge, &not_string);
__ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
- __ cmp(ip, Operand(0));
- __ b(ne, true_label);
- __ b(false_label);
+ __ cmp(ip, Operand::Zero());
+ __ b(ne, instr->TrueLabel(chunk_));
+ __ b(instr->FalseLabel(chunk_));
__ bind(&not_string);
}
+ if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ // Symbol value -> true.
+ __ CompareInstanceType(map, ip, SYMBOL_TYPE);
+ __ b(eq, instr->TrueLabel(chunk_));
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
- DoubleRegister dbl_scratch = double_scratch0();
+ DwVfpRegister dbl_scratch = double_scratch0();
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ b(ne, &not_heap_number);
__ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
__ VFPCompareAndSetFlags(dbl_scratch, 0.0);
- __ b(vs, false_label); // NaN -> false.
- __ b(eq, false_label); // +0, -0 -> false.
- __ b(true_label);
+ __ cmp(r0, r0, vs); // NaN -> false.
+ __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
+ __ b(instr->TrueLabel(chunk_));
__ bind(&not_heap_number);
}
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(al, instr->environment());
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(al, instr->environment());
+ }
}
}
}
void LCodeGen::EmitGoto(int block) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
+ if (!IsNextEmittedBlock(block)) {
+ __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
}
}
@@ -2004,6 +2308,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
case Token::EQ_STRICT:
cond = eq;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
case Token::LT:
cond = is_unsigned ? lo : lt;
break;
@@ -2025,20 +2333,17 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cond = TokenToCondition(instr->op(), false);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
@@ -2047,21 +2352,29 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to false block label.
- __ b(vs, chunk_->GetAssemblyLabel(false_block));
+ __ b(vs, instr->FalseLabel(chunk_));
} else {
if (right->IsConstantOperand()) {
- __ cmp(ToRegister(left),
- Operand(ToInteger32(LConstantOperand::cast(right))));
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
+ } else {
+ __ cmp(ToRegister(left), Operand(value));
+ }
} else if (left->IsConstantOperand()) {
- __ cmp(ToRegister(right),
- Operand(ToInteger32(LConstantOperand::cast(left))));
+ int32_t value = ToInteger32(LConstantOperand::cast(left));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
+ } else {
+ __ cmp(ToRegister(right), Operand(value));
+ }
// We transposed the operands. Reverse the condition.
cond = ReverseCondition(cond);
} else {
__ cmp(ToRegister(left), ToRegister(right));
}
}
- EmitBranch(true_block, false_block, cond);
+ EmitBranch(instr, cond);
}
}
@@ -2069,63 +2382,29 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->left());
Register right = ToRegister(instr->right());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
__ cmp(left, Operand(right));
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ cmp(left, Operand(instr->hydrogen()->right()));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register scratch = scratch0();
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ mov(ip, Operand(factory()->the_hole_value()));
+ __ cmp(input_reg, ip);
+ EmitBranch(instr, eq);
return;
}
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(ip, nil_value);
- __ cmp(reg, ip);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, eq);
- } else {
- Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ b(eq, true_label);
- __ LoadRoot(ip, other_nil_value);
- __ cmp(reg, ip);
- __ b(eq, true_label);
- __ JumpIfSmi(reg, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, ne);
- }
+ DwVfpRegister input_reg = ToDoubleRegister(instr->object());
+ __ VFPCompareAndSetFlags(input_reg, input_reg);
+ EmitFalseBranch(instr, vc);
+
+ Register scratch = scratch0();
+ __ VmovHigh(scratch, input_reg);
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ EmitBranch(instr, eq);
}
@@ -2160,22 +2439,21 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp1 = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
Condition true_cond =
- EmitIsObject(reg, temp1, false_label, true_label);
+ EmitIsObject(reg, temp1,
+ instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
- EmitBranch(true_block, false_block, true_cond);
+ EmitBranch(instr, true_cond);
}
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
__ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
return lt;
@@ -2186,24 +2464,20 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp1 = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Condition true_cond =
- EmitIsString(reg, temp1, false_label);
+ EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
- EmitBranch(true_block, false_block, true_cond);
+ EmitBranch(instr, true_cond);
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
Register input_reg = EmitLoadRegister(instr->value(), ip);
- __ tst(input_reg, Operand(kSmiTagMask));
- EmitBranch(true_block, false_block, eq);
+ __ SmiTst(input_reg);
+ EmitBranch(instr, eq);
}
@@ -2211,14 +2485,13 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
__ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
__ tst(temp, Operand(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, ne);
+ EmitBranch(instr, ne);
}
@@ -2243,17 +2516,17 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
+ // This instruction also signals no smi code inlined.
+ __ cmp(r0, Operand::Zero());
Condition condition = ComputeCompareCondition(op);
- EmitBranch(true_block, false_block, condition);
+ EmitBranch(instr, condition);
}
@@ -2281,15 +2554,12 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
}
@@ -2309,13 +2579,10 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
Register input = ToRegister(instr->value());
Register scratch = scratch0();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
__ ldr(scratch,
FieldMemOperand(input, String::kHashFieldOffset));
__ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
@@ -2333,7 +2600,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
- if (class_name->IsEqualTo(CStrVector("Function"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2364,7 +2631,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
- if (class_name->IsEqualTo(CStrVector("Object"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
__ b(ne, is_true);
} else {
__ b(ne, is_false);
@@ -2375,12 +2642,12 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
__ ldr(temp, FieldMemOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is a symbol because it's a literal.
- // The name in the constructor is a symbol because of the way the context is
- // booted. This routine isn't expected to work for random API-created
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
// classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are symbols it is sufficient to use an identity
- // comparison.
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
__ cmp(temp, Operand(class_name));
// End with the answer in flags.
}
@@ -2392,53 +2659,47 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register temp2 = ToRegister(instr->temp());
Handle<String> class_name = instr->hydrogen()->class_name();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
__ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
__ cmp(temp, Operand(instr->map()));
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
__ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
}
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
@@ -2473,11 +2734,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
- PredictableCodeSizeScope predictable(masm_);
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+ PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
+ Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ mov(ip, Operand(Handle<Object>(cell)));
- __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
__ cmp(map, Operand(ip));
__ b(ne, &cache_miss);
// We use Factory::the_hole_value() on purpose instead of loading from the
@@ -2527,17 +2787,18 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub stub(flags);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
// Get the temp register reserved by the instruction. This needs to be r4 as
// its slot of the pushing of safepoint registers is used to communicate the
// offset to the location of the map check.
Register temp = ToRegister(instr->temp());
ASSERT(temp.is(r4));
- __ LoadHeapObject(InstanceofStub::right(), instr->function());
+ __ Move(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 5;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
- PredictableCodeSizeScope predictable(masm_);
+ PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
@@ -2550,7 +2811,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ nop();
}
__ StoreToSafepointRegisterSlot(temp, temp);
- CallCodeGeneric(stub.GetCode(),
+ CallCodeGeneric(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -2563,11 +2824,13 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
void LCodeGen::DoCmpT(LCmpT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
+ // This instruction also signals no smi code inlined.
+ __ cmp(r0, Operand::Zero());
Condition condition = ComputeCompareCondition(op);
__ LoadRoot(ToRegister(instr->result()),
@@ -2580,24 +2843,58 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
+ // Runtime::TraceExit returns its parameter in r0. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
__ push(r0);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
- int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(sp_delta));
+ if (info()->saves_caller_doubles()) {
+ ASSERT(NeedsEagerFrame());
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
+ int no_frame_start = -1;
+ if (NeedsEagerFrame()) {
+ __ mov(sp, fp);
+ no_frame_start = masm_->pc_offset();
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ }
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+ if (sp_delta != 0) {
+ __ add(sp, sp, Operand(sp_delta));
+ }
+ } else {
+ Register reg = ToRegister(instr->parameter_count());
+ // The argument count parameter is a smi
+ __ SmiUntag(reg);
+ __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
+ }
+
__ Jump(lr);
+
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
}
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
- __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+ __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
+ __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
@@ -2607,6 +2904,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->global_object()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
@@ -2623,7 +2921,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register cell = scratch0();
// Load the cell.
- __ mov(cell, Operand(instr->hydrogen()->cell()));
+ __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -2632,18 +2930,19 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
// We use a temp to check the payload (CompareRoot might clobber ip).
Register payload = ToRegister(instr->temp());
- __ ldr(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr->environment());
}
// Store the value.
- __ str(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+ __ str(value, FieldMemOperand(cell, Cell::kValueOffset));
// Cells are always rescanned, so no write barrier here.
}
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->global_object()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
@@ -2692,14 +2991,14 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ str(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteContextSlot(context,
target.offset(),
value,
scratch,
- kLRHasBeenSaved,
+ GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
@@ -2710,104 +3009,43 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
- } else {
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ MemOperand operand = MemOperand(object, offset);
+ if (access.representation().IsByte()) {
+ __ ldrb(result, operand);
} else {
- // Non-negative property indices are in the properties array.
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
+ __ ldr(result, operand);
}
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
- } else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset));
- __ cmp(result, Operand(Handle<Map>(current->map())));
- DeoptimizeIf(ne, env);
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ return;
}
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- Register object_map = scratch0();
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(al, instr->environment());
+ if (instr->hydrogen()->representation().IsDouble()) {
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ __ vldr(result, FieldMemOperand(object, offset));
return;
}
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMap(
- object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
- if (last && !need_generic) {
- DeoptimizeIf(ne, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- } else {
- Label next;
- __ b(ne, &next);
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- __ b(&done);
- __ bind(&next);
- }
+
+ Register result = ToRegister(instr->result());
+ if (!access.IsInobject()) {
+ __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ object = result;
}
- if (need_generic) {
- __ mov(r2, Operand(name));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
+ MemOperand operand = FieldMemOperand(object, offset);
+ if (access.representation().IsByte()) {
+ __ ldrb(result, operand);
+ } else {
+ __ ldr(result, operand);
}
- __ bind(&done);
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
@@ -2862,37 +3100,9 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- Register scratch = scratch0();
-
- __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, fail;
- __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
- __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
- // |scratch| still contains |input|'s map.
- __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
- __ ubfx(scratch, scratch, Map::kElementsKindShift,
- Map::kElementsKindBitCount);
- __ cmp(scratch, Operand(GetInitialFastElementsKind()));
- __ b(lt, &fail);
- __ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
- __ b(le, &done);
- __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ b(lt, &fail);
- __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ b(le, &done);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&done);
- }
+ __ LoadRoot(result, instr->index());
}
@@ -2907,18 +3117,143 @@ void LCodeGen::DoLoadExternalArrayPointer(
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ sub(length, length, index);
- __ add(length, length, Operand(1));
- __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
+ if (instr->length()->IsConstantOperand() &&
+ instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+ int index = (const_length - const_index) + 1;
+ __ ldr(result, MemOperand(arguments, index * kPointerSize));
+ } else {
+ Register length = ToRegister(instr->length());
+ Register index = ToRegister(instr->index());
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ __ sub(length, length, index);
+ __ add(length, length, Operand(1));
+ __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ Operand operand = key_is_constant
+ ? Operand(constant_key << element_size_shift)
+ : Operand(key, LSL, shift_size);
+ __ add(scratch0(), external_pointer, operand);
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ vldr(double_scratch0().low(), scratch0(), additional_offset);
+ __ vcvt_f64_f32(result, double_scratch0().low());
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ vldr(result, scratch0(), additional_offset);
+ }
+ } else {
+ Register result = ToRegister(instr->result());
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ __ ldrsb(result, mem_operand);
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ ldrb(result, mem_operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ __ ldrsh(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ ldrh(result, mem_operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ __ ldr(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ ldr(result, mem_operand);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ cmp(result, Operand(0x80000000));
+ DeoptimizeIf(cs, instr->environment());
+ }
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+
+ int base_offset =
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ (instr->additional_index() << element_size_shift);
+ if (key_is_constant) {
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ base_offset += constant_key << element_size_shift;
+ }
+ __ add(scratch, elements, Operand(base_offset));
+
+ if (!key_is_constant) {
+ key = ToRegister(instr->key());
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ add(scratch, scratch, Operand(key, LSL, shift_size));
+ }
+
+ __ vldr(result, scratch, 0);
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
+ }
}
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
Register elements = ToRegister(instr->elements());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -2931,14 +3266,13 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
instr->additional_index());
store_base = elements;
} else {
- Register key = EmitLoadRegister(instr->key(), scratch0());
- // Even though the HLoadKeyedFastElement instruction forces the input
+ Register key = ToRegister(instr->key());
+ // Even though the HLoadKeyed instruction forces the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ add(scratch, elements,
- Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
+ __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
}
@@ -2949,7 +3283,7 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ tst(result, Operand(kSmiTagMask));
+ __ SmiTst(result);
DeoptimizeIf(ne, instr->environment());
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
@@ -2960,46 +3294,14 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
}
-void LCodeGen::DoLoadKeyedFastDoubleElement(
- LLoadKeyedFastDoubleElement* instr) {
- Register elements = ToRegister(instr->elements());
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- DwVfpRegister result = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_external()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
} else {
- key = ToRegister(instr->key());
+ DoLoadKeyedFixedArray(instr);
}
-
- Operand operand = key_is_constant
- ? Operand(((constant_key + instr->additional_index()) <<
- element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(elements, elements, operand);
- if (!key_is_constant) {
- __ add(elements, elements,
- Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << element_size_shift)));
- }
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
- }
-
- __ vldr(result, elements, 0);
}
@@ -3039,88 +3341,8 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
}
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
- LLoadKeyedSpecializedArrayElement* instr) {
- Register external_pointer = ToRegister(instr->external_pointer());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- CpuFeatures::Scope scope(VFP3);
- DwVfpRegister result = ToDoubleRegister(instr->result());
- Operand operand = key_is_constant
- ? Operand(constant_key << element_size_shift)
- : Operand(key, LSL, shift_size);
- __ add(scratch0(), external_pointer, operand);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(result.low(), scratch0(), additional_offset);
- __ vcvt_f64_f32(result, result.low());
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vldr(result, scratch0(), additional_offset);
- }
- } else {
- Register result = ToRegister(instr->result());
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ ldrsb(result, mem_operand);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ ldrb(result, mem_operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ ldrsh(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ ldrh(result, mem_operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ ldr(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ ldr(result, mem_operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ cmp(result, Operand(0x80000000));
- DeoptimizeIf(cs, instr->environment());
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(r1));
ASSERT(ToRegister(instr->key()).is(r0));
@@ -3205,7 +3427,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ b(eq, &global_object);
// Deoptimize if the receiver is not a JS object.
- __ tst(receiver, Operand(kSmiTagMask));
+ __ SmiTst(receiver);
DeoptimizeIf(eq, instr->environment());
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
DeoptimizeIf(lt, instr->environment());
@@ -3246,7 +3468,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// stack.
Label invoke, loop;
// length is a small non-negative integer, due to the test above.
- __ cmp(length, Operand(0));
+ __ cmp(length, Operand::Zero());
__ b(eq, &invoke);
__ bind(&loop);
__ ldr(scratch, MemOperand(elements, length, LSL, 2));
@@ -3257,7 +3479,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is r0, as expected
@@ -3265,14 +3486,13 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
ParameterCount actual(receiver);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->value();
if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort("DoPushArgument not implemented for double type.");
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
} else {
Register argument_reg = EmitLoadRegister(argument, ip);
__ push(argument_reg);
@@ -3292,8 +3512,14 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
Register result = ToRegister(instr->result());
- __ mov(result, cp);
+ if (info()->IsOptimizing()) {
+ __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ ASSERT(result.is(cp));
+ }
}
@@ -3306,8 +3532,9 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
__ push(cp); // The context is the first argument.
- __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
+ __ Move(scratch0(), instr->hydrogen()->pairs());
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
@@ -3316,8 +3543,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
}
@@ -3329,19 +3557,21 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
int arity,
LInstruction* instr,
CallKind call_kind,
R1State r1_state) {
- bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
- function->shared()->formal_parameter_count() == arity;
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
if (can_invoke_directly) {
if (r1_state == R1_UNINITIALIZED) {
- __ LoadHeapObject(r1, function);
+ __ Move(r1, function);
}
// Change context.
@@ -3349,7 +3579,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Set r0 to arguments count if adaption is not needed. Assumes that r0
// is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
+ if (dont_adapt_arguments) {
__ mov(r0, Operand(arity));
}
@@ -3363,17 +3593,17 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
} else {
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
- __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+ ParameterCount expected(formal_parameter_count);
+ __ InvokeFunction(
+ function, expected, count, CALL_FUNCTION, generator, call_kind);
}
-
- // Restore context.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
- CallKnownFunction(instr->function(),
+ CallKnownFunction(instr->hydrogen()->function(),
+ instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
CALL_AS_METHOD,
@@ -3381,7 +3611,9 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
}
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+ ASSERT(instr->context() != NULL);
+ ASSERT(ToRegister(instr->context()).is(cp));
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -3425,7 +3657,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
// Restore input_reg after call to runtime.
@@ -3447,33 +3680,32 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
}
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- __ cmp(input, Operand(0));
+ __ cmp(input, Operand::Zero());
__ Move(result, input, pl);
// We can make rsb conditional because the previous cmp instruction
// will clear the V (overflow) flag and rsb won't set this flag
// if input is positive.
- __ rsb(result, input, Operand(0), SetCC, mi);
+ __ rsb(result, input, Operand::Zero(), SetCC, mi);
// Deoptimize on overflow.
DeoptimizeIf(vs, instr->environment());
}
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
- LUnaryMathOperation* instr_;
+ LMathAbs* instr_;
};
Representation r = instr->hydrogen()->value()->representation();
@@ -3481,7 +3713,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
__ vabs(result, input);
- } else if (r.IsInteger32()) {
+ } else if (r.IsSmiOrInteger32()) {
EmitIntegerMathAbs(instr);
} else {
// Representation is tagged.
@@ -3497,107 +3729,76 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+ DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
- Register scratch = scratch0();
+ Register input_high = scratch0();
+ Label done, exact;
- __ EmitVFPTruncate(kRoundToMinusInf,
- result,
- input,
- scratch,
- double_scratch0());
- DeoptimizeIf(ne, instr->environment());
+ __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
+ DeoptimizeIf(al, instr->environment());
+ __ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
- Label done;
- __ cmp(result, Operand(0));
+ __ cmp(result, Operand::Zero());
__ b(ne, &done);
- __ vmov(scratch, input.high());
- __ tst(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
- __ bind(&done);
+ __ cmp(input_high, Operand::Zero());
+ DeoptimizeIf(mi, instr->environment());
}
+ __ bind(&done);
}
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
- Register scratch = scratch0();
- Label done, check_sign_on_zero;
-
- // Extract exponent bits.
- __ vmov(result, input.high());
- __ ubfx(scratch,
- result,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // If the number is in ]-0.5, +0.5[, the result is +/- 0.
- __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2));
- __ mov(result, Operand(0), LeaveCC, le);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ b(le, &check_sign_on_zero);
- } else {
- __ b(le, &done);
- }
-
- // The following conversion will not work with numbers
- // outside of ]-2^32, 2^32[.
- __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32));
- DeoptimizeIf(ge, instr->environment());
-
- __ Vmov(double_scratch0(), 0.5, scratch);
- __ vadd(double_scratch0(), input, double_scratch0());
-
- // Save the original sign for later comparison.
- __ and_(scratch, result, Operand(HeapNumber::kSignMask));
-
- // Check sign of the result: if the sign changed, the input
- // value was in ]0.5, 0[ and the result should be -0.
- __ vmov(result, double_scratch0().high());
- __ eor(result, result, Operand(scratch), SetCC);
+ DwVfpRegister input_plus_dot_five = double_scratch1;
+ Register input_high = scratch0();
+ DwVfpRegister dot_five = double_scratch0();
+ Label convert, done;
+
+ __ Vmov(dot_five, 0.5, scratch0());
+ __ vabs(double_scratch1, input);
+ __ VFPCompareAndSetFlags(double_scratch1, dot_five);
+ // If input is in [-0.5, -0], the result is -0.
+ // If input is in [+0, +0.5[, the result is +0.
+ // If the input is +0.5, the result is 1.
+ __ b(hi, &convert); // Out of [-0.5, +0.5].
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(mi, instr->environment());
- } else {
- __ mov(result, Operand(0), LeaveCC, mi);
- __ b(mi, &done);
- }
-
- __ EmitVFPTruncate(kRoundToMinusInf,
- result,
- double_scratch0(),
- scratch,
- double_scratch1);
- DeoptimizeIf(ne, instr->environment());
+ __ VmovHigh(input_high, input);
+ __ cmp(input_high, Operand::Zero());
+ DeoptimizeIf(mi, instr->environment()); // [-0.5, -0].
+ }
+ __ VFPCompareAndSetFlags(input, dot_five);
+ __ mov(result, Operand(1), LeaveCC, eq); // +0.5.
+ // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+ // flag kBailoutOnMinusZero.
+ __ mov(result, Operand::Zero(), LeaveCC, ne);
+ __ b(&done);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- __ cmp(result, Operand(0));
- __ b(ne, &done);
- __ bind(&check_sign_on_zero);
- __ vmov(scratch, input.high());
- __ tst(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
- }
+ __ bind(&convert);
+ __ vadd(input_plus_dot_five, input, dot_five);
+ // Reuse dot_five (double_scratch0) as we no longer need this value.
+ __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
+ &done, &done);
+ DeoptimizeIf(al, instr->environment());
__ bind(&done);
}
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
__ vsqrt(result, input);
}
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister temp = ToDoubleRegister(instr->temp());
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ DwVfpRegister temp = ToDoubleRegister(instr->temp());
// Note that according to ECMA-262 15.8.2.13:
// Math.pow(-Infinity, 0.5) == Infinity
@@ -3626,12 +3827,15 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->left()).is(d1));
ASSERT(ToDoubleRegister(instr->result()).is(d3));
- if (exponent_type.IsTagged()) {
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(r2, &no_deopt);
- __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r7, Operand(ip));
+ __ cmp(r6, Operand(ip));
DeoptimizeIf(ne, instr->environment());
__ bind(&no_deopt);
MathPowStub stub(MathPowStub::TAGGED);
@@ -3648,164 +3852,139 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(d7));
- ASSERT(ToRegister(instr->global_object()).is(r0));
-
+ // Assert that the register size is indeed the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
+ // Load native context
+ Register global_object = ToRegister(instr->global_object());
+ Register native_context = global_object;
+ __ ldr(native_context, FieldMemOperand(
+ global_object, GlobalObject::kNativeContextOffset));
+
+ // Load state (FixedArray of the native context's random seeds)
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
- // r2: FixedArray of the native context's random seeds
+ Register state = native_context;
+ __ ldr(state, FieldMemOperand(native_context, kRandomSeedOffset));
// Load state[0].
- __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
- __ cmp(r1, Operand(0));
- __ b(eq, deferred->entry());
+ Register state0 = ToRegister(instr->scratch());
+ __ ldr(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
// Load state[1].
- __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
- // r1: state[0].
- // r0: state[1].
+ Register state1 = ToRegister(instr->scratch2());
+ __ ldr(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ and_(r3, r1, Operand(0xFFFF));
- __ mov(r4, Operand(18273));
- __ mul(r3, r3, r4);
- __ add(r1, r3, Operand(r1, LSR, 16));
+ Register scratch3 = ToRegister(instr->scratch3());
+ Register scratch4 = scratch0();
+ __ and_(scratch3, state0, Operand(0xFFFF));
+ __ mov(scratch4, Operand(18273));
+ __ mul(scratch3, scratch3, scratch4);
+ __ add(state0, scratch3, Operand(state0, LSR, 16));
// Save state[0].
- __ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
+ __ str(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ and_(r3, r0, Operand(0xFFFF));
- __ mov(r4, Operand(36969));
- __ mul(r3, r3, r4);
- __ add(r0, r3, Operand(r0, LSR, 16));
+ __ and_(scratch3, state1, Operand(0xFFFF));
+ __ mov(scratch4, Operand(36969));
+ __ mul(scratch3, scratch3, scratch4);
+ __ add(state1, scratch3, Operand(state1, LSR, 16));
// Save state[1].
- __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
+ __ str(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ and_(r0, r0, Operand(0x3FFFF));
- __ add(r0, r0, Operand(r1, LSL, 14));
+ Register random = scratch4;
+ __ and_(random, state1, Operand(0x3FFFF));
+ __ add(random, random, Operand(state0, LSL, 14));
- __ bind(deferred->exit());
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
+ __ mov(scratch3, Operand(0x41000000));
+ __ orr(scratch3, scratch3, Operand(0x300000));
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ __ vmov(result, random, scratch3);
// Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand(0, RelocInfo::NONE));
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
+ __ mov(scratch4, Operand::Zero());
+ DwVfpRegister scratch5 = double_scratch0();
+ __ vmov(scratch5, scratch4, scratch3);
+ __ vsub(result, result, scratch5);
}
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, scratch0());
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in r0.
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+ DwVfpRegister double_scratch2 = double_scratch0();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ MathExpGenerator::EmitMathExp(
+ masm(), input, result, double_scratch1, double_scratch2,
+ temp1, temp2, scratch0());
}
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathLog(LMathLog* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, Operand::Zero());
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, Operand::Zero());
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, Operand::Zero());
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, Operand::Zero());
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
- default:
- Abort("Unimplemented type of LUnaryMathOperation.");
- UNREACHABLE();
- }
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(instr->HasPointerMap());
- if (instr->known_function().is_null()) {
+ Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
- CallKnownFunction(instr->known_function(),
+ CallKnownFunction(known_function,
+ instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
CALL_AS_METHOD,
@@ -3815,17 +3994,18 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallNamed(LCallNamed* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
@@ -3834,23 +4014,22 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(r2, Operand(instr->name()));
CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
@@ -3859,13 +4038,13 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(r2, Operand(instr->name()));
CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
- CallKnownFunction(instr->target(),
+ CallKnownFunction(instr->hydrogen()->target(),
+ instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
CALL_AS_FUNCTION,
@@ -3874,12 +4053,61 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->constructor()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
+ __ mov(r0, Operand(instr->arity()));
+ // No cell in r2 for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+ __ mov(r2, Operand(undefined_value));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->constructor()).is(r1));
+ ASSERT(ToRegister(instr->result()).is(r0));
+
__ mov(r0, Operand(instr->arity()));
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ mov(r2, Operand(instr->hydrogen()->property_cell()));
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+ ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here
+ // look at the first argument
+ __ ldr(r5, MemOperand(sp, 0));
+ __ cmp(r5, Operand::Zero());
+ __ b(eq, &packed_case);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+ override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ jmp(&done);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ }
}
@@ -3888,16 +4116,60 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(code_object,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ __ add(result, base, Operand(instr->offset()));
+}
+
+
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
Register scratch = scratch0();
- int offset = instr->offset();
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ Register value = ToRegister(instr->value());
+ MemOperand operand = MemOperand(object, offset);
+ if (representation.IsByte()) {
+ __ strb(value, operand);
+ } else {
+ __ str(value, operand);
+ }
+ return;
+ }
- ASSERT(!object.is(value));
+ Handle<Map> transition = instr->transition();
- if (!instr->transition().is_null()) {
- __ mov(scratch, Operand(instr->transition()));
+ if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ Register value = ToRegister(instr->value());
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ SmiTst(value);
+ DeoptimizeIf(eq, instr->environment());
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ DwVfpRegister value = ToDoubleRegister(instr->value());
+ __ vstr(value, FieldMemOperand(object, offset));
+ return;
+ }
+
+ if (!transition.is_null()) {
+ __ mov(scratch, Operand(transition));
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
@@ -3906,7 +4178,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
HeapObject::kMapOffset,
scratch,
temp,
- kLRHasBeenSaved,
+ GetLinkRegisterState(),
kSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -3914,25 +4186,37 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
- HType type = instr->hydrogen()->value()->type();
+ Register value = ToRegister(instr->value());
+ ASSERT(!object.is(value));
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
- __ str(value, FieldMemOperand(object, offset));
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ if (access.IsInobject()) {
+ MemOperand operand = FieldMemOperand(object, offset);
+ if (representation.IsByte()) {
+ __ strb(value, operand);
+ } else {
+ __ str(value, operand);
+ }
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
offset,
value,
scratch,
- kLRHasBeenSaved,
+ GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
}
} else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ str(value, FieldMemOperand(scratch, offset));
+ MemOperand operand = FieldMemOperand(scratch, offset);
+ if (representation.IsByte()) {
+ __ strb(value, operand);
+ } else {
+ __ str(value, operand);
+ }
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
@@ -3940,7 +4224,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
offset,
value,
object,
- kLRHasBeenSaved,
+ GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
@@ -3950,6 +4234,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
@@ -3962,32 +4247,25 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand) {
- if (value->representation().IsTagged() && !value->type().IsSmi()) {
- if (operand->IsRegister()) {
- __ tst(ToRegister(operand), Operand(kSmiTagMask));
- } else {
- __ mov(ip, ToOperand(operand));
- __ tst(ip, Operand(kSmiTagMask));
- }
- DeoptimizeIf(ne, environment);
+void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) {
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+ Label done;
+ __ b(NegateCondition(condition), &done);
+ __ stop("eliminated bounds check failed");
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(condition, check->environment());
}
}
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->length(),
- instr->length());
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->index(),
- instr->index());
+ if (instr->hydrogen()->skip_check()) return;
+
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
+ if (instr->hydrogen()->length()->representation().IsSmi()) {
__ mov(ip, Operand(Smi::FromInt(constant_index)));
} else {
__ mov(ip, Operand(constant_index));
@@ -3996,106 +4274,13 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
} else {
__ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
}
- DeoptimizeIf(hs, instr->environment());
-}
-
-
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->object());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- // Even though the HLoadKeyedFastElement instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ add(scratch, elements,
- Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- } else {
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ str(value, FieldMemOperand(store_base, offset));
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ add(key, store_base, Operand(offset - kHeapObjectTag));
- __ RecordWrite(elements,
- key,
- value,
- kLRHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFastDoubleElement(
- LStoreKeyedFastDoubleElement* instr) {
- DwVfpRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch = scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- Operand operand = key_is_constant
- ? Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(scratch, elements, operand);
- if (!key_is_constant) {
- __ add(scratch, scratch,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- }
-
- if (instr->NeedsCanonicalization()) {
- // Check for NaN. All NaNs must be canonicalized.
- __ VFPCompareAndSetFlags(value, value);
- // Only load canonical NaN if the comparison above set the overflow.
- __ Vmov(value,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
- no_reg, vs);
- }
-
- __ vstr(value, scratch, instr->additional_index() << element_size_shift);
+ Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
+ ApplyCheckIf(condition, instr);
}
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
- LStoreKeyedSpecializedArrayElement* instr) {
-
- Register external_pointer = ToRegister(instr->external_pointer());
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
@@ -4103,29 +4288,35 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
key = ToRegister(instr->key());
}
int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- CpuFeatures::Scope scope(VFP3);
+ Register address = scratch0();
DwVfpRegister value(ToDoubleRegister(instr->value()));
- Operand operand(key_is_constant
- ? Operand(constant_key << element_size_shift)
- : Operand(key, LSL, shift_size));
- __ add(scratch0(), external_pointer, operand);
+ if (key_is_constant) {
+ if (constant_key != 0) {
+ __ add(address, external_pointer,
+ Operand(constant_key << element_size_shift));
+ } else {
+ address = external_pointer;
+ }
+ } else {
+ __ add(address, external_pointer, Operand(key, LSL, shift_size));
+ }
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
- __ vstr(double_scratch0().low(), scratch0(), additional_offset);
+ __ vstr(double_scratch0().low(), address, additional_offset);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vstr(value, scratch0(), additional_offset);
+ __ vstr(value, address, additional_offset);
}
} else {
Register value(ToRegister(instr->value()));
@@ -4164,7 +4355,110 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
}
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ DwVfpRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ DwVfpRegister double_scratch = double_scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ if (key_is_constant) {
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ __ add(scratch, elements,
+ Operand((constant_key << element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ } else {
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ add(scratch, elements,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ add(scratch, scratch,
+ Operand(ToRegister(instr->key()), LSL, shift_size));
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ // Force a canonical NaN.
+ if (masm()->emit_debug_code()) {
+ __ vmrs(ip);
+ __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
+ __ Assert(ne, kDefaultNaNModeNotSet);
+ }
+ __ VFPCanonicalizeNaN(double_scratch, value);
+ __ vstr(double_scratch, scratch,
+ instr->additional_index() << element_size_shift);
+ } else {
+ __ vstr(value, scratch, instr->additional_index() << element_size_shift);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
+ : no_reg;
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
+ __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
+ } else {
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+ }
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ str(value, FieldMemOperand(store_base, offset));
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ add(key, store_base, Operand(offset - kHeapObjectTag));
+ __ RecordWrite(elements,
+ key,
+ value,
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ // By cases: external, fast double
+ if (instr->is_external()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
+ }
+}
+
+
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(r2));
ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
@@ -4178,63 +4472,68 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_temp());
Register scratch = scratch0();
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = from_map->elements_kind();
- ElementsKind to_kind = to_map->elements_kind();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
Label not_applicable;
__ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
__ cmp(scratch, Operand(from_map));
__ b(ne, &not_applicable);
- __ mov(new_map_reg, Operand(to_map));
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ mov(new_map_reg, Operand(to_map));
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- scratch, kLRHasBeenSaved, kDontSaveFPRegs);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(r2));
- ASSERT(new_map_reg.is(r3));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
- RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(r2));
- ASSERT(new_map_reg.is(r3));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
- RelocInfo::CODE_TARGET, instr);
+ scratch, GetLinkRegisterState(), kDontSaveFPRegs);
} else {
- UNREACHABLE();
+ ASSERT(ToRegister(instr->context()).is(cp));
+ PushSafepointRegistersScope scope(
+ this, Safepoint::kWithRegistersAndDoubles);
+ __ Move(r0, object_reg);
+ __ Move(r1, to_map);
+ TransitionElementsKindStub stub(from_kind, to_kind);
+ __ CallStub(&stub);
+ RecordSafepointWithRegistersAndDoubles(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
}
__ bind(&not_applicable);
}
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+ DeoptimizeIf(eq, instr->environment());
+ __ bind(&no_memento_found);
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ StringAddStub stub(instr->hydrogen()->flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4259,7 +4558,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ mov(result, Operand(0));
+ __ mov(result, Operand::Zero());
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ push(string);
@@ -4274,7 +4573,8 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ instr->context());
__ AssertSmi(r0);
__ SmiUntag(r0);
__ StoreToSafepointRegisterSlot(r0, result);
@@ -4282,12 +4582,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4300,7 +4602,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
Register result = ToRegister(instr->result());
ASSERT(!char_code.is(result));
- __ cmp(char_code, Operand(String::kMaxAsciiCharCode));
+ __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
__ b(hi, deferred->entry());
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
__ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
@@ -4319,23 +4621,16 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ mov(result, Operand(0));
+ __ mov(result, Operand::Zero());
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
}
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
-}
-
-
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
@@ -4353,6 +4648,17 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
+void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(vs, instr->environment());
+ }
+}
+
+
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4363,17 +4669,29 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ __ tst(ToRegister(input), Operand(0xc0000000));
+ DeoptimizeIf(ne, instr->environment());
+ }
+ __ SmiTag(ToRegister(output), ToRegister(input));
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
+ class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_,
instr_->value(),
SIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -4389,16 +4707,16 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_,
instr_->value(),
UNSIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4421,8 +4739,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
Label slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
- DoubleRegister dbl_scratch = double_scratch0();
- SwVfpRegister flt_scratch = dbl_scratch.low();
+ LowDwVfpRegister dbl_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
@@ -4436,16 +4753,16 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ SmiUntag(src, dst);
__ eor(src, src, Operand(0x80000000));
}
- __ vmov(flt_scratch, src);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+ __ vmov(dbl_scratch.low(), src);
+ __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
} else {
- __ vmov(flt_scratch, src);
- __ vcvt_f64_u32(dbl_scratch, flt_scratch);
+ __ vmov(dbl_scratch.low(), src);
+ __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
}
if (FLAG_inline_new) {
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
+ __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
__ Move(dst, r5);
__ b(&done);
}
@@ -4456,9 +4773,17 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// TODO(3095996): Put a valid pointer value in the stack slot where the result
// register is stored, as this register is in the pointer map, but contains an
// integer value.
- __ mov(ip, Operand(0));
+ __ mov(ip, Operand::Zero());
__ StoreToSafepointRegisterSlot(ip, dst);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ Move(dst, r0);
__ sub(dst, dst, Operand(kHeapObjectTag));
@@ -4472,17 +4797,19 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ DwVfpRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
Register reg = ToRegister(instr->result());
Register temp1 = ToRegister(instr->temp());
@@ -4509,10 +4836,18 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register reg = ToRegister(instr->result());
- __ mov(reg, Operand(0));
+ __ mov(reg, Operand::Zero());
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ sub(r0, r0, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(r0, reg);
}
@@ -4539,54 +4874,52 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
- DoubleRegister result_reg,
- bool deoptimize_on_undefined,
+ DwVfpRegister result_reg,
+ bool can_convert_undefined_to_nan,
bool deoptimize_on_minus_zero,
- LEnvironment* env) {
+ LEnvironment* env,
+ NumberUntagDMode mode) {
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
ASSERT(!result_reg.is(double_scratch0()));
-
- Label load_smi, heap_number, done;
-
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
- // Heap number map check.
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
- if (deoptimize_on_undefined) {
- DeoptimizeIf(ne, env);
- } else {
- Label heap_number;
- __ b(eq, &heap_number);
-
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, env);
-
- // Convert undefined to NaN.
- __ LoadRoot(ip, Heap::kNanValueRootIndex);
- __ sub(ip, ip, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
+ Label convert, load_smi, done;
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+ // Heap number map check.
+ __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch, Operand(ip));
+ if (can_convert_undefined_to_nan) {
+ __ b(ne, &convert);
+ } else {
+ DeoptimizeIf(ne, env);
+ }
+ // load heap number
+ __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
+ if (deoptimize_on_minus_zero) {
+ __ VmovLow(scratch, result_reg);
+ __ cmp(scratch, Operand::Zero());
+ __ b(ne, &done);
+ __ VmovHigh(scratch, result_reg);
+ __ cmp(scratch, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(eq, env);
+ }
__ jmp(&done);
-
- __ bind(&heap_number);
- }
- // Heap number to double register conversion.
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
- if (deoptimize_on_minus_zero) {
- __ vmov(ip, result_reg.low());
- __ cmp(ip, Operand(0));
- __ b(ne, &done);
- __ vmov(ip, result_reg.high());
- __ cmp(ip, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, env);
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+ // Convert undefined (and hole) to NaN.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(input_reg, Operand(ip));
+ DeoptimizeIf(ne, env);
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
+ __ jmp(&done);
+ }
+ } else {
+ __ SmiUntag(scratch, input_reg);
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
- __ jmp(&done);
-
// Smi to double register conversion
__ bind(&load_smi);
// scratch: untagged value of input_reg
@@ -4600,8 +4933,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(instr->value());
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
- DwVfpRegister double_scratch = double_scratch0();
- DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3());
+ LowDwVfpRegister double_scratch = double_scratch0();
+ DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
@@ -4612,61 +4945,56 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// The carry flag is set when we reach this deferred code as we just executed
// SmiUntag(heap_object, SetCC)
STATIC_ASSERT(kHeapObjectTag == 1);
- __ adc(input_reg, input_reg, Operand(input_reg));
+ __ adc(scratch2, input_reg, Operand(input_reg));
// Heap number map check.
- __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- SwVfpRegister single_scratch = double_scratch.low();
- ASSERT(!scratch3.is(input_reg) &&
- !scratch3.is(scratch1) &&
- !scratch3.is(scratch2));
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
- Label heap_number;
- __ b(eq, &heap_number);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
- __ mov(input_reg, Operand(0));
+ Label no_heap_number, check_bools, check_false;
+ __ b(ne, &no_heap_number);
+ __ TruncateHeapNumberToI(input_reg, scratch2);
__ b(&done);
- __ bind(&heap_number);
- __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
- __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
+ __ bind(&no_heap_number);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(scratch2, Operand(ip));
+ __ b(ne, &check_bools);
+ __ mov(input_reg, Operand::Zero());
+ __ b(&done);
- __ EmitECMATruncate(input_reg,
- double_scratch2,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
+ __ bind(&check_bools);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(scratch2, Operand(ip));
+ __ b(ne, &check_false);
+ __ mov(input_reg, Operand(1));
+ __ b(&done);
+ __ bind(&check_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(scratch2, Operand(ip));
+ DeoptimizeIf(ne, instr->environment());
+ __ mov(input_reg, Operand::Zero());
+ __ b(&done);
} else {
- CpuFeatures::Scope scope(VFP3);
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr->environment());
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
- __ EmitVFPTruncate(kRoundToZero,
- input_reg,
- double_scratch,
- scratch1,
- double_scratch2,
- kCheckForInexactConversion);
+ __ sub(ip, scratch2, Operand(kHeapObjectTag));
+ __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
+ __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
DeoptimizeIf(ne, instr->environment());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(input_reg, Operand(0));
+ __ cmp(input_reg, Operand::Zero());
__ b(ne, &done);
- __ vmov(scratch1, double_scratch.high());
+ __ VmovHigh(scratch1, double_scratch2);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
}
@@ -4676,12 +5004,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -4692,15 +5022,19 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- // Optimistically untag the input.
- // If the input is a HeapObject, SmiUntag will set the carry flag.
- __ SmiUntag(input_reg, SetCC);
- // Branch to deferred code if the input was tagged.
- // The deferred code will take care of restoring the tag.
- __ b(cs, deferred->entry());
- __ bind(deferred->exit());
+ // Optimistically untag the input.
+ // If the input is a HeapObject, SmiUntag will set the carry flag.
+ __ SmiUntag(input_reg, SetCC);
+ // Branch to deferred code if the input was tagged.
+ // The deferred code will take care of restoring the tag.
+ __ b(cs, deferred->entry());
+ __ bind(deferred->exit());
+ }
}
@@ -4711,60 +5045,85 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
ASSERT(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
- DoubleRegister result_reg = ToDoubleRegister(result);
+ DwVfpRegister result_reg = ToDoubleRegister(result);
+
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
+ instr->hydrogen()->can_convert_undefined_to_nan(),
instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment());
+ instr->environment(),
+ mode);
}
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_input = ToDoubleRegister(instr->value());
+ LowDwVfpRegister double_scratch = double_scratch0();
- Label done;
+ if (instr->truncating()) {
+ __ TruncateDoubleToI(result_reg, double_input);
+ } else {
+ __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
+ // Deoptimize if the input wasn't a int32 (inside a double).
+ DeoptimizeIf(ne, instr->environment());
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ cmp(result_reg, Operand::Zero());
+ __ b(ne, &done);
+ __ VmovHigh(scratch1, double_input);
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment());
+ __ bind(&done);
+ }
+ }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ DwVfpRegister double_input = ToDoubleRegister(instr->value());
+ LowDwVfpRegister double_scratch = double_scratch0();
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- SwVfpRegister single_scratch = double_scratch0().low();
- __ EmitECMATruncate(result_reg,
- double_input,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
+ __ TruncateDoubleToI(result_reg, double_input);
} else {
- DwVfpRegister double_scratch = double_scratch0();
- __ EmitVFPTruncate(kRoundToMinusInf,
- result_reg,
- double_input,
- scratch1,
- double_scratch,
- kCheckForInexactConversion);
-
- // Deoptimize if we had a vfp invalid exception,
- // including inexact operation.
+ __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
+ // Deoptimize if the input wasn't a int32 (inside a double).
DeoptimizeIf(ne, instr->environment());
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ cmp(result_reg, Operand::Zero());
+ __ b(ne, &done);
+ __ VmovHigh(scratch1, double_input);
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment());
+ __ bind(&done);
+ }
}
- __ bind(&done);
+ __ SmiTag(result_reg, SetCC);
+ DeoptimizeIf(vs, instr->environment());
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
- __ tst(ToRegister(input), Operand(kSmiTagMask));
+ __ SmiTst(ToRegister(input));
DeoptimizeIf(ne, instr->environment());
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- __ tst(ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ LOperand* input = instr->value();
+ __ SmiTst(ToRegister(input));
+ DeoptimizeIf(eq, instr->environment());
+ }
}
@@ -4811,59 +5170,95 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (isolate()->heap()->InNewSpace(*target)) {
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ AllowDeferredHandleDereference smi_check;
+ if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ mov(ip, Operand(Handle<Object>(cell)));
- __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(reg, ip);
} else {
- __ cmp(reg, Operand(target));
+ __ cmp(reg, Operand(object));
}
DeoptimizeIf(ne, instr->environment());
}
-void LCodeGen::DoCheckMapCommon(Register reg,
- Register scratch,
- Handle<Map> map,
- CompareMapMode mode,
- LEnvironment* env) {
- Label success;
- __ CompareMap(reg, scratch, map, &success, mode);
- DeoptimizeIf(ne, env);
- __ bind(&success);
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ push(object);
+ __ mov(cp, Operand::Zero());
+ __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r0, scratch0());
+ }
+ __ tst(scratch0(), Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment());
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- Register scratch = scratch0();
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->CanOmitMapChecks()) return;
+ Register map_reg = scratch0();
+
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
+ __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->has_migration_target()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+ __ bind(deferred->check_maps());
+ }
+
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- SmallMapList* map_set = instr->hydrogen()->map_set();
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
+ __ CompareMap(map_reg, map, &success);
__ b(eq, &success);
}
- Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
+
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ __ CompareMap(map_reg, map, &success);
+ if (instr->hydrogen()->has_migration_target()) {
+ __ b(ne, deferred->entry());
+ } else {
+ DeoptimizeIf(ne, instr->environment());
+ }
+
__ bind(&success);
}
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+ DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
- __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+ __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
}
@@ -4878,7 +5273,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+ DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
Label is_smi, done, heap_number;
// Both smi and heap number cases are handled.
@@ -4893,14 +5288,13 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// conversions.
__ cmp(input_reg, Operand(factory()->undefined_value()));
DeoptimizeIf(ne, instr->environment());
- __ mov(result_reg, Operand(0));
+ __ mov(result_reg, Operand::Zero());
__ jmp(&done);
// Heap number
__ bind(&heap_number);
- __ vldr(double_scratch0(), FieldMemOperand(input_reg,
- HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+ __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
__ jmp(&done);
// smi
@@ -4911,339 +5305,109 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
-
- Handle<JSObject> holder = instr->holder();
- Handle<JSObject> current_prototype = instr->prototype();
-
- // Load prototype object.
- __ LoadHeapObject(temp1, current_prototype);
-
- // Check prototype maps up to the holder.
- while (!current_prototype.is_identical_to(holder)) {
- DoCheckMapCommon(temp1, temp2,
- Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
- current_prototype =
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
- // Load next prototype object.
- __ LoadHeapObject(temp1, current_prototype);
- }
-
- // Check the holder map.
- DoCheckMapCommon(temp1, temp2,
- Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
- DeoptimizeIf(ne, instr->environment());
-}
-
-
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
- LAllocateObject* instr_;
+ LAllocate* instr_;
};
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr);
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
+ Register scratch = ToRegister(instr->temp1());
Register scratch2 = ToRegister(instr->temp2());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- // Allocate memory for the object. The initial map might change when
- // the constructor's prototype changes, but instance size and property
- // counts remain unchanged (if slack tracking finished).
- ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- TAG_OBJECT);
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(map, constructor);
- __ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
- __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ str(scratch, FieldMemOperand(result, property_offset));
- }
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand(0));
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ mov(r0, Operand(Smi::FromInt(instance_size)));
- __ push(r0);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
- // Load map into r2.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- __ cmp(r2, Operand(boilerplate_elements_kind));
- DeoptimizeIf(ne, instr->environment());
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
}
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(r3, literals);
- __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ mov(r1, Operand(isolate()->factory()->empty_fixed_array()));
- __ Push(r3, r2, r1);
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset) {
- ASSERT(!source.is(r2));
- ASSERT(!result.is(r2));
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_offset = *offset + object_size;
- int elements_size = has_elements ? elements->Size() : 0;
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ add(r2, result, Operand(elements_offset));
- } else {
- __ ldr(r2, FieldMemOperand(source, i));
- }
- __ str(r2, FieldMemOperand(result, object_offset + i));
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ add(r2, result, Operand(*offset));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- } else {
- __ mov(r2, Operand(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- }
+ Register size = ToRegister(instr->size());
+ __ Allocate(size,
+ result,
+ scratch,
+ scratch2,
+ deferred->entry(),
+ flags);
}
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ ldr(r2, FieldMemOperand(source, i));
- __ str(r2, FieldMemOperand(result, elements_offset + i));
- }
+ __ bind(deferred->exit());
- // Copy elements backing store content.
- int elements_length = has_elements ? elements->length() : 0;
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- // We only support little endian mode...
- int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
- int32_t value_high = static_cast<int32_t>(value >> 32);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ mov(r2, Operand(value_low));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ mov(r2, Operand(value_high));
- __ str(r2, FieldMemOperand(result, total_offset + 4));
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i));
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ add(r2, result, Operand(*offset));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- } else {
- __ mov(r2, Operand(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- }
- }
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ mov(scratch, Operand(size));
} else {
- UNREACHABLE();
+ scratch = ToRegister(instr->size());
}
+ __ sub(scratch, scratch, Operand(kPointerSize));
+ __ sub(result, result, Operand(kHeapObjectTag));
+ Label loop;
+ __ bind(&loop);
+ __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ __ str(scratch2, MemOperand(result, scratch));
+ __ sub(scratch, scratch, Operand(kPointerSize));
+ __ cmp(scratch, Operand(0));
+ __ b(ge, &loop);
+ __ add(result, result, Operand(kHeapObjectTag));
}
}
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
- // Load map into r2.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- __ cmp(r2, Operand(boilerplate_elements_kind));
- DeoptimizeIf(ne, instr->environment());
- }
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ Register result = ToRegister(instr->result());
- __ bind(&runtime_allocate);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ push(r0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, Operand(Smi::FromInt(0)));
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
- ASSERT_EQ(size, offset);
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(r4, literals);
- __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r2, Operand(constant_properties));
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- __ mov(r1, Operand(Smi::FromInt(flags)));
- __ Push(r4, r3, r2, r1);
-
- // Pick the right runtime function or stub to call.
- int properties_count = constant_properties->length() / 2;
- if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ SmiTag(size);
+ __ push(size);
} else {
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Push(Smi::FromInt(size));
+ }
+
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr,
+ instr->context());
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr,
+ instr->context());
+ } else {
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr,
+ instr->context());
}
+ __ StoreToSafepointRegisterSlot(r0, result);
}
@@ -5255,26 +5419,27 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Label materialized;
// Registers will be used as follows:
- // r7 = literals array.
+ // r6 = literals array.
// r1 = regexp literal.
// r0 = regexp literal clone.
- // r2 and r4-r6 are used as temporaries.
+ // r2-5 are used as temporaries.
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(r7, instr->hydrogen()->literals());
- __ ldr(r1, FieldMemOperand(r7, literal_offset));
+ __ Move(r6, instr->hydrogen()->literals());
+ __ ldr(r1, FieldMemOperand(r6, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r1, ip);
__ b(ne, &materialized);
// Create regexp literal using runtime function
// Result will be in r0.
- __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r5, Operand(instr->hydrogen()->pattern()));
- __ mov(r4, Operand(instr->hydrogen()->flags()));
- __ Push(r7, r6, r5, r4);
+ __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ mov(r4, Operand(instr->hydrogen()->pattern()));
+ __ mov(r3, Operand(instr->hydrogen()->flags()));
+ __ Push(r6, r5, r4, r3);
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
__ mov(r1, r0);
@@ -5282,7 +5447,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
@@ -5293,35 +5458,24 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&allocated);
// Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ ldr(r3, FieldMemOperand(r1, i));
- __ ldr(r2, FieldMemOperand(r1, i + kPointerSize));
- __ str(r3, FieldMemOperand(r0, i));
- __ str(r2, FieldMemOperand(r0, i + kPointerSize));
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ ldr(r3, FieldMemOperand(r1, size - kPointerSize));
- __ str(r3, FieldMemOperand(r0, size - kPointerSize));
- }
+ __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
}
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
- __ mov(r1, Operand(shared_info));
- __ push(r1);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ if (!pretenure && instr->hydrogen()->has_no_literals()) {
+ FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ instr->hydrogen()->is_generator());
+ __ mov(r2, Operand(instr->hydrogen()->shared_info()));
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
- __ mov(r2, Operand(shared_info));
- __ mov(r1, Operand(pretenure
- ? factory()->true_value()
- : factory()->false_value()));
+ __ mov(r2, Operand(instr->hydrogen()->shared_info()));
+ __ mov(r1, Operand(pretenure ? factory()->true_value()
+ : factory()->false_value()));
__ Push(cp, r2, r1);
CallRuntime(Runtime::kNewClosure, 3, instr);
}
@@ -5337,17 +5491,13 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
+ Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
+ instr->FalseLabel(chunk_),
input,
instr->type_literal());
if (final_branch_condition != kNoCondition) {
- EmitBranch(true_block, false_block, final_branch_condition);
+ EmitBranch(instr, final_branch_condition);
}
}
@@ -5358,14 +5508,14 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Handle<String> type_name) {
Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
- if (type_name->Equals(heap()->number_symbol())) {
+ if (type_name->Equals(heap()->number_string())) {
__ JumpIfSmi(input, true_label);
__ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(input, Operand(ip));
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->string_symbol())) {
+ } else if (type_name->Equals(heap()->string_string())) {
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
__ b(ge, false_label);
@@ -5373,17 +5523,22 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ tst(ip, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->boolean_symbol())) {
+ } else if (type_name->Equals(heap()->symbol_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ CompareObjectType(input, input, scratch, SYMBOL_TYPE);
+ final_branch_condition = eq;
+
+ } else if (type_name->Equals(heap()->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ b(eq, true_label);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = eq;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->undefined_symbol())) {
+ } else if (type_name->Equals(heap()->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ b(eq, true_label);
__ JumpIfSmi(input, false_label);
@@ -5393,7 +5548,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ tst(ip, Operand(1 << Map::kIsUndetectable));
final_branch_condition = ne;
- } else if (type_name->Equals(heap()->function_symbol())) {
+ } else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
@@ -5401,7 +5556,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->object_symbol())) {
+ } else if (type_name->Equals(heap()->object_string())) {
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
@@ -5427,11 +5582,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp1 = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
EmitIsConstructCall(temp1, scratch0());
- EmitBranch(true_block, false_block, eq);
+ EmitBranch(instr, eq);
}
@@ -5454,27 +5607,27 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
}
-void LCodeGen::EnsureSpaceForLazyDeopt() {
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
// Block literal pool emission for duration of padding.
Assembler::BlockConstPoolScope block_const_pool(masm());
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
while (padding_size > 0) {
__ nop();
padding_size -= Assembler::kInstrSize;
}
}
- last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5483,39 +5636,33 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(al, instr->environment());
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
+ DeoptimizeIf(al, instr->environment(), type);
}
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- Register object = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- Register strict = scratch0();
- __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
- __ Push(object, key, strict);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
}
-void LCodeGen::DoIn(LIn* instr) {
- Register obj = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- __ Push(key, obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
}
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -5526,12 +5673,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5546,10 +5695,14 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &done);
- StackCheckStub stub;
- PredictableCodeSizeScope predictable(masm_);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- EnsureSpaceForLazyDeopt();
+ PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(cp));
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5561,7 +5714,8 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(lo, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5577,15 +5731,13 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// properly registered for deoptimization and records the assembler's PC
// offset.
LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
+
+ GenerateOsrPrologue();
}
@@ -5599,7 +5751,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ cmp(r0, null_value);
DeoptimizeIf(eq, instr->environment());
- __ tst(r0, Operand(kSmiTagMask));
+ __ SmiTst(r0);
DeoptimizeIf(eq, instr->environment());
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@@ -5641,7 +5793,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- __ cmp(result, Operand(0));
+ __ cmp(result, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
__ bind(&done);
@@ -5664,11 +5816,10 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
Register scratch = scratch0();
Label out_of_object, done;
- __ cmp(index, Operand(0));
+ __ cmp(index, Operand::Zero());
__ b(lt, &out_of_object);
- STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
- __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
__ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
__ b(&done);
@@ -5676,7 +5827,8 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&out_of_object);
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
// Index is equal to negated out of object property index plus 1.
- __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
__ ldr(result, FieldMemOperand(scratch,
FixedArray::kHeaderSize - kPointerSize));
__ bind(&done);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 9281537c1..a9b85c89c 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -29,10 +29,13 @@
#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
#include "arm/lithium-arm.h"
+
#include "arm/lithium-gap-resolver-arm.h"
#include "deoptimizer.h"
+#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
+#include "v8utils.h"
namespace v8 {
namespace internal {
@@ -41,26 +44,19 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
+ : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@@ -68,13 +64,27 @@ class LCodeGen BASE_EMBEDDED {
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub() ||
+ info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
+ LinkRegisterStatus GetLinkRegisterState() const {
+ return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+ }
// Support for converting LOperands to assembler types.
// LOperand must be a register.
@@ -84,13 +94,15 @@ class LCodeGen BASE_EMBEDDED {
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
- DoubleRegister ToDoubleRegister(LOperand* op) const;
+ DwVfpRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
- DoubleRegister EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch);
- int ToInteger32(LConstantOperand* op) const;
+ DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
+ SwVfpRegister flt_scratch,
+ DwVfpRegister dbl_scratch);
+ int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
@@ -98,6 +110,7 @@ class LCodeGen BASE_EMBEDDED {
MemOperand ToHighMemOperand(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
@@ -110,10 +123,6 @@ class LCodeGen BASE_EMBEDDED {
void FinishCode(Handle<Code> code);
// Deferred code support.
- void DoDeferredBinaryOpStub(LPointerMap* pointer_map,
- LOperand* left_argument,
- LOperand* right_argument,
- Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
@@ -122,17 +131,14 @@ class LCodeGen BASE_EMBEDDED {
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
+ void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
-
- void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -148,10 +154,7 @@ class LCodeGen BASE_EMBEDDED {
int additional_offset);
// Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
@@ -159,30 +162,15 @@ class LCodeGen BASE_EMBEDDED {
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
Register scratch0() { return r9; }
- DwVfpRegister double_scratch0() { return kScratchDoubleReg; }
+ LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; }
- int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
@@ -193,21 +181,21 @@ class LCodeGen BASE_EMBEDDED {
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
- void Abort(const char* reason);
- void Comment(const char* format, ...);
+ void Abort(BailoutReason reason);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
@@ -228,7 +216,8 @@ class LCodeGen BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
@@ -237,9 +226,11 @@ class LCodeGen BASE_EMBEDDED {
CallRuntime(function, num_arguments, instr);
}
+ void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ LOperand* context);
enum R1State {
R1_UNINITIALIZED,
@@ -249,45 +240,40 @@ class LCodeGen BASE_EMBEDDED {
// Generate a direct call to a known function. Expects the function
// to be in r1.
void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
int arity,
LInstruction* instr,
CallKind call_kind,
R1State r1_state);
- void LoadHeapObject(Register result, Handle<HeapObject> object);
-
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void DeoptimizeIf(Condition condition,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type);
+ void DeoptimizeIf(Condition condition, LEnvironment* environment);
+ void ApplyCheckIf(Condition condition, LBoundsCheck* check);
- void AddToTranslation(Translation* translation,
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
- int arguments_index,
- int arguments_count);
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+ void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
- DoubleRegister ToDoubleRegister(int index) const;
-
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
+ DwVfpRegister ToDoubleRegister(int index) const;
+
+ void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
@@ -302,20 +288,21 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordPosition(int position);
+
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
- void EmitBranch(int left_block, int right_block, Condition cc);
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition condition);
+ template<class InstrType>
+ void EmitFalseBranch(InstrType instr, Condition condition);
void EmitNumberUntagD(Register input,
- DoubleRegister result,
- bool deoptimize_on_undefined,
+ DwVfpRegister result,
+ bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
- LEnvironment* env);
-
- void DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand);
+ LEnvironment* env,
+ NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -338,24 +325,20 @@ class LCodeGen BASE_EMBEDDED {
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input,
Register temp1,
- Label* is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed);
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset);
+ int* offset,
+ AllocationSiteMode mode);
// Emit optimized code for integer division.
// Inputs are signed.
@@ -368,34 +351,23 @@ class LCodeGen BASE_EMBEDDED {
Register scratch,
LEnvironment* environment);
- struct JumpTableEntry {
- explicit inline JumpTableEntry(Address entry)
- : label(),
- address(entry) { }
- Label label;
- Address address;
- };
-
- void EnsureSpaceForLazyDeopt();
-
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> deopt_jump_table_;
+ ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -406,11 +378,12 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope BASE_EMBEDDED {
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;
@@ -453,7 +426,7 @@ class LCodeGen BASE_EMBEDDED {
};
-class LDeferredCode: public ZoneObject {
+class LDeferredCode : public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
@@ -462,7 +435,7 @@ class LDeferredCode: public ZoneObject {
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() { }
+ virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
index c100720d8..0c6b2adad 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -219,7 +219,6 @@ void LGapResolver::EmitMove(int index) {
ASSERT(destination->IsStackSlot());
__ str(source_register, cgen_->ToMemOperand(destination));
}
-
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
@@ -248,26 +247,34 @@ void LGapResolver::EmitMove(int index) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
- __ mov(dst, Operand(cgen_->ToInteger32(constant_source)));
+ __ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ __ Move(dst, cgen_->ToHandle(constant_source));
}
+ } else if (destination->IsDoubleRegister()) {
+ DwVfpRegister result = cgen_->ToDoubleRegister(destination);
+ double v = cgen_->ToDouble(constant_source);
+ __ Vmov(result, v, ip);
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
__ mov(kSavedValueRegister,
- Operand(cgen_->ToInteger32(constant_source)));
+ Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
- __ LoadObject(kSavedValueRegister,
+ __ Move(kSavedValueRegister,
cgen_->ToHandle(constant_source));
}
__ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleRegister()) {
- DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+ DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(destination), source_register);
} else {
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.h b/deps/v8/src/arm/lithium-gap-resolver-arm.h
index 9dd09c8d0..044c2864a 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.h
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.h
@@ -38,7 +38,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver BASE_EMBEDDED {
+class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 623bd6a01..d8771cb70 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -29,11 +29,13 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "bootstrapper.h"
#include "codegen.h"
+#include "cpu-profiler.h"
#include "debug.h"
+#include "isolate-inl.h"
#include "runtime.h"
namespace v8 {
@@ -51,44 +53,15 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
-// We always generate arm code, never thumb code, even if V8 is compiled to
-// thumb, so we require inter-working support
-#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
-#error "flag -mthumb-interwork missing"
-#endif
-
-
-// We do not support thumb inter-working with an arm architecture not supporting
-// the blx instruction (below v5t). If you know what CPU you are compiling for
-// you can use -march=armv7 or similar.
-#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
-# error "For thumb inter-working we require an architecture which supports blx"
-#endif
-
-
-// Using bx does not yield better code, so use it only when required
-#if defined(USE_THUMB_INTERWORK)
-#define USE_BX 1
-#endif
-
-
void MacroAssembler::Jump(Register target, Condition cond) {
-#if USE_BX
bx(target, cond);
-#else
- mov(pc, Operand(target), LeaveCC, cond);
-#endif
}
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
-#if USE_BX
mov(ip, Operand(target, rmode));
bx(ip, cond);
-#else
- mov(pc, Operand(target, rmode), LeaveCC, cond);
-#endif
}
@@ -103,16 +76,13 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code
+ AllowDeferredHandleDereference embedding_raw_address;
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
}
int MacroAssembler::CallSize(Register target, Condition cond) {
-#ifdef USE_BLX
return kInstrSize;
-#else
- return 2 * kInstrSize;
-#endif
}
@@ -121,13 +91,7 @@ void MacroAssembler::Call(Register target, Condition cond) {
BlockConstPoolScope block_const_pool(this);
Label start;
bind(&start);
-#ifdef USE_BLX
blx(target, cond);
-#else
- // set lr for return at current pc + 8
- mov(lr, Operand(pc), LeaveCC, cond);
- mov(pc, Operand(target), LeaveCC, cond);
-#endif
ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
}
@@ -170,7 +134,6 @@ void MacroAssembler::Call(Address target,
set_predictable_code_size(true);
}
-#ifdef USE_BLX
// Call sequence on V7 or later may be :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
@@ -191,12 +154,6 @@ void MacroAssembler::Call(Address target,
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
-#else
- // Set lr for return at current pc + 8.
- mov(lr, Operand(pc), LeaveCC, cond);
- // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
- mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
-#endif
ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(old_predictable_code_size);
@@ -208,6 +165,7 @@ int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id,
Condition cond) {
+ AllowDeferredHandleDereference using_raw_address;
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
}
@@ -225,16 +183,13 @@ void MacroAssembler::Call(Handle<Code> code,
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
// 'code' is always generated ARM code, never THUMB code
+ AllowDeferredHandleDereference embedding_raw_address;
Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
}
void MacroAssembler::Ret(Condition cond) {
-#if USE_BX
bx(lr, cond);
-#else
- mov(pc, Operand(lr), LeaveCC, cond);
-#endif
}
@@ -279,7 +234,19 @@ void MacroAssembler::Push(Handle<Object> handle) {
void MacroAssembler::Move(Register dst, Handle<Object> value) {
- mov(dst, Operand(value));
+ AllowDeferredHandleDereference smi_check;
+ if (value->IsSmi()) {
+ mov(dst, Operand(value));
+ } else {
+ ASSERT(value->IsHeapObject());
+ if (isolate()->heap()->InNewSpace(*value)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(value);
+ mov(dst, Operand(cell));
+ ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
+ } else {
+ mov(dst, Operand(value));
+ }
+ }
}
@@ -290,9 +257,7 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
}
-void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- CpuFeatures::Scope scope(VFP2);
+void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
if (!dst.is(src)) {
vmov(dst, src);
}
@@ -304,7 +269,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
if (!src2.is_reg() &&
!src2.must_output_reloc_info(this) &&
src2.immediate() == 0) {
- mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
+ mov(dst, Operand::Zero(), LeaveCC, cond);
} else if (!src2.is_single_instruction(this) &&
!src2.must_output_reloc_info(this) &&
CpuFeatures::IsSupported(ARMv7) &&
@@ -410,7 +375,7 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
}
tst(dst, Operand(~satval));
b(eq, &done);
- mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative.
+ mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
bind(&done);
} else {
@@ -422,6 +387,15 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
+ if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
+ isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
+ !predictable_code_size()) {
+ // The CPU supports fast immediate values, and this root will never
+ // change. We will load it as a relocatable immediate value.
+ Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
+ mov(destination, Operand(root), LeaveCC, cond);
+ return;
+ }
ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
}
@@ -433,19 +407,6 @@ void MacroAssembler::StoreRoot(Register source,
}
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- mov(result, Operand(cell));
- ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
- } else {
- mov(result, Operand(object));
- }
-}
-
-
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
@@ -517,23 +478,16 @@ void MacroAssembler::RecordWrite(Register object,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!address.is(cp) && !value.is(cp));
-
if (emit_debug_code()) {
ldr(ip, MemOperand(address));
cmp(ip, value);
- Check(eq, "Wrong address or value passed to RecordWrite");
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
Label done;
if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
- tst(value, Operand(kSmiTagMask));
- b(eq, &done);
+ JumpIfSmi(value, &done);
}
CheckPageFlag(value,
@@ -631,20 +585,24 @@ void MacroAssembler::PopSafepointRegisters() {
void MacroAssembler::PushSafepointRegistersAndDoubles() {
+ // Number of d-regs not known at snapshot time.
+ ASSERT(!Serializer::enabled());
PushSafepointRegisters();
- sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+ sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
kDoubleSize));
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
}
}
void MacroAssembler::PopSafepointRegistersAndDoubles() {
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+ // Number of d-regs not known at snapshot time.
+ ASSERT(!Serializer::enabled());
+ for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
}
- add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+ add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
kDoubleSize));
PopSafepointRegisters();
}
@@ -679,8 +637,10 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+ // Number of d-regs not known at snapshot time.
+ ASSERT(!Serializer::enabled());
// General purpose registers are pushed last on the stack.
- int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
+ int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -690,16 +650,15 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
ASSERT(src.rm().is(no_reg));
ASSERT(!dst1.is(lr)); // r14.
- ASSERT_EQ(0, dst1.code() % 2);
- ASSERT_EQ(dst1.code() + 1, dst2.code());
// V8 does not use this addressing mode, so the fallback code
// below doesn't support it yet.
ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
// Generate two ldr instructions if ldrd is not available.
- if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
- CpuFeatures::Scope scope(ARMv7);
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
+ (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
+ CpuFeatureScope scope(this, ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
if ((src.am() == Offset) || (src.am() == NegOffset)) {
@@ -732,16 +691,15 @@ void MacroAssembler::Strd(Register src1, Register src2,
const MemOperand& dst, Condition cond) {
ASSERT(dst.rm().is(no_reg));
ASSERT(!src1.is(lr)); // r14.
- ASSERT_EQ(0, src1.code() % 2);
- ASSERT_EQ(src1.code() + 1, src2.code());
// V8 does not use this addressing mode, so the fallback code
// below doesn't support it yet.
ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
// Generate two str instructions if strd is not available.
- if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
- CpuFeatures::Scope scope(ARMv7);
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
+ (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
+ CpuFeatureScope scope(this, ARMv7);
strd(src1, src2, dst, cond);
} else {
MemOperand dst2(dst);
@@ -759,12 +717,22 @@ void MacroAssembler::Strd(Register src1, Register src2,
}
-void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
- const Register scratch,
- const Condition cond) {
- vmrs(scratch, cond);
- bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
- vmsr(scratch, cond);
+void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
+ // If needed, restore wanted bits of FPSCR.
+ Label fpscr_done;
+ vmrs(scratch);
+ tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
+ b(ne, &fpscr_done);
+ orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
+ vmsr(scratch);
+ bind(&fpscr_done);
+}
+
+
+void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond) {
+ vsub(dst, src, kDoubleRegZero, cond);
}
@@ -803,19 +771,174 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
- const Register scratch,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
+ const Register scratch) {
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
// Handle special values first.
if (value.bits == zero.bits) {
- vmov(dst, kDoubleRegZero, cond);
+ vmov(dst, kDoubleRegZero);
} else if (value.bits == minus_zero.bits) {
- vneg(dst, kDoubleRegZero, cond);
+ vneg(dst, kDoubleRegZero);
} else {
- vmov(dst, imm, scratch, cond);
+ vmov(dst, imm, scratch);
+ }
+}
+
+
+void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
+ if (src.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
+ vmov(dst, loc.high());
+ } else {
+ vmov(dst, VmovIndexHi, src);
+ }
+}
+
+
+void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
+ if (dst.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
+ vmov(loc.high(), src);
+ } else {
+ vmov(dst, VmovIndexHi, src);
+ }
+}
+
+
+void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
+ if (src.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
+ vmov(dst, loc.low());
+ } else {
+ vmov(dst, VmovIndexLo, src);
+ }
+}
+
+
+void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
+ if (dst.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
+ vmov(loc.low(), src);
+ } else {
+ vmov(dst, VmovIndexLo, src);
+ }
+}
+
+
+void MacroAssembler::LoadNumber(Register object,
+ LowDwVfpRegister dst,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number) {
+ Label is_smi, done;
+
+ UntagAndJumpIfSmi(scratch, object, &is_smi);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
+
+ vldr(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+ b(&done);
+
+ // Handle loading a double from a smi.
+ bind(&is_smi);
+ vmov(dst.high(), scratch);
+ vcvt_f64_s32(dst, dst.high());
+
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadNumberAsInt32Double(Register object,
+ DwVfpRegister double_dst,
+ Register heap_number_map,
+ Register scratch,
+ LowDwVfpRegister double_scratch,
+ Label* not_int32) {
+ ASSERT(!scratch.is(object));
+ ASSERT(!heap_number_map.is(object) && !heap_number_map.is(scratch));
+
+ Label done, obj_is_not_smi;
+
+ UntagAndJumpIfNotSmi(scratch, object, &obj_is_not_smi);
+ vmov(double_scratch.low(), scratch);
+ vcvt_f64_s32(double_dst, double_scratch.low());
+ b(&done);
+
+ bind(&obj_is_not_smi);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch, not_int32);
+
+ // Load the number.
+ // Load the double value.
+ vldr(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ TestDoubleIsInt32(double_dst, double_scratch);
+ // Jump to not_int32 if the operation did not succeed.
+ b(ne, not_int32);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadNumberAsInt32(Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch,
+ DwVfpRegister double_scratch0,
+ LowDwVfpRegister double_scratch1,
+ Label* not_int32) {
+ ASSERT(!dst.is(object));
+ ASSERT(!scratch.is(object));
+
+ Label done, maybe_undefined;
+
+ UntagAndJumpIfSmi(dst, object, &done);
+
+ JumpIfNotHeapNumber(object, heap_number_map, scratch, &maybe_undefined);
+
+ // Object is a heap number.
+ // Convert the floating point value to a 32-bit integer.
+ // Load the double value.
+ vldr(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1);
+ // Jump to not_int32 if the operation did not succeed.
+ b(ne, not_int32);
+ b(&done);
+
+ bind(&maybe_undefined);
+ CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ b(ne, not_int32);
+ // |undefined| is truncated to 0.
+ mov(dst, Operand(Smi::FromInt(0)));
+ // Fall through.
+
+ bind(&done);
+}
+
+
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ Push(Smi::FromInt(StackFrame::STUB));
+ // Adjust FP to point to saved FP.
+ add(fp, sp, Operand(2 * kPointerSize));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ add(r0, pc, Operand(-8));
+ ldr(pc, MemOperand(pc, -4));
+ dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ } else {
+ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ nop(ip.code());
+ // Adjust FP to point to saved FP.
+ add(fp, sp, Operand(2 * kPointerSize));
+ }
}
}
@@ -853,7 +976,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Reserve room for saved entry sp and code object.
sub(sp, sp, Operand(2 * kPointerSize));
if (emit_debug_code()) {
- mov(ip, Operand(0));
+ mov(ip, Operand::Zero());
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
mov(ip, Operand(CodeObject()));
@@ -867,12 +990,9 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Optionally save all double registers.
if (save_doubles) {
- DwVfpRegister first = d0;
- DwVfpRegister last =
- DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
- vstm(db_w, sp, first, last);
+ SaveFPRegs(sp, ip);
// Note that d0 will be accessible at
- // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
+ // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
// since the sp slot and code slot were pushed after the fp.
}
@@ -897,7 +1017,7 @@ void MacroAssembler::InitializeNewString(Register string,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2) {
- mov(scratch1, Operand(length, LSL, kSmiTagSize));
+ SmiTag(scratch1, length);
LoadRoot(scratch2, map_index);
str(scratch1, FieldMemOperand(string, String::kLengthOffset));
mov(scratch1, Operand(String::kEmptyHashField));
@@ -907,44 +1027,47 @@ void MacroAssembler::InitializeNewString(Register string,
int MacroAssembler::ActivationFrameAlignment() {
-#if defined(V8_HOST_ARCH_ARM)
+#if V8_HOST_ARCH_ARM
// Running on the real platform. Use the alignment as mandated by the local
// environment.
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
return OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_ARM)
+#else // V8_HOST_ARCH_ARM
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
// flag.
return FLAG_sim_stack_alignment;
-#endif // defined(V8_HOST_ARCH_ARM)
+#endif // V8_HOST_ARCH_ARM
}
void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count) {
+ Register argument_count,
+ bool restore_context) {
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
const int offset = 2 * kPointerSize;
- sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
- DwVfpRegister first = d0;
- DwVfpRegister last =
- DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
- vldm(ia, r3, first, last);
+ sub(r3, fp,
+ Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
+ RestoreFPRegs(r3, ip);
}
// Clear top frame.
- mov(r3, Operand(0, RelocInfo::NONE));
+ mov(r3, Operand::Zero());
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
str(r3, MemOperand(ip));
+
// Restore current context from top and clear it in debug mode.
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- ldr(cp, MemOperand(ip));
+ if (restore_context) {
+ mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ ldr(cp, MemOperand(ip));
+ }
#ifdef DEBUG
+ mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
str(r3, MemOperand(ip));
#endif
@@ -956,8 +1079,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
}
-void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
+
+void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
if (use_eabi_hardfloat()) {
Move(dst, d0);
} else {
@@ -1143,7 +1266,7 @@ void MacroAssembler::InvokeFunction(Register fun,
ldr(expected_reg,
FieldMemOperand(code_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
- mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
+ SmiUntag(expected_reg);
ldr(code_reg,
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
@@ -1153,6 +1276,7 @@ void MacroAssembler::InvokeFunction(Register fun,
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -1161,10 +1285,9 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- LoadHeapObject(r1, function);
+ Move(r1, function);
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- ParameterCount expected(function->shared()->formal_parameter_count());
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
@@ -1205,13 +1328,23 @@ void MacroAssembler::IsObjectJSStringType(Register object,
}
+void MacroAssembler::IsObjectNameType(Register object,
+ Register scratch,
+ Label* fail) {
+ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ cmp(scratch, Operand(LAST_NAME_TYPE));
+ b(hi, fail);
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- mov(r0, Operand(0, RelocInfo::NONE));
+ mov(r0, Operand::Zero());
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
}
#endif
@@ -1226,7 +1359,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
- // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
+ // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
// We will build up the handler from the bottom by pushing on the stack.
// Set up the code object (r5) and the state (r6) for pushing.
unsigned state =
@@ -1237,9 +1370,9 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
- mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
- mov(ip, Operand(0, RelocInfo::NONE)); // NULL frame pointer.
- stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
+ mov(cp, Operand(Smi::FromInt(0))); // Indicates no context.
+ mov(ip, Operand::Zero()); // NULL frame pointer.
+ stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
} else {
stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
}
@@ -1271,7 +1404,7 @@ void MacroAssembler::JumpToHandlerEntry() {
mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
- add(pc, r1, Operand(r2, ASR, kSmiTagSize)); // Jump.
+ add(pc, r1, Operand::SmiUntag(r2)); // Jump
}
@@ -1362,8 +1495,8 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
- cmp(scratch, Operand(0, RelocInfo::NONE));
- Check(ne, "we should not have an empty lexical context");
+ cmp(scratch, Operand::Zero());
+ Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
#endif
// Load the native context of the current context.
@@ -1374,7 +1507,6 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
push(holder_reg); // Temporarily save holder on the stack.
@@ -1382,7 +1514,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
LoadRoot(ip, Heap::kNativeContextMapRootIndex);
cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::native_context should be a native context.");
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
pop(holder_reg); // Restore holder.
}
@@ -1393,19 +1525,18 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
push(holder_reg); // Temporarily save holder on the stack.
mov(holder_reg, ip); // Move ip to its holding place.
LoadRoot(ip, Heap::kNullValueRootIndex);
cmp(holder_reg, ip);
- Check(ne, "JSGlobalProxy::context() should not be null.");
+ Check(ne, kJSGlobalProxyContextShouldNotBeNull);
ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
LoadRoot(ip, Heap::kNativeContextMapRootIndex);
cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::native_context should be a native context.");
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
// Restore ip is not needed. ip is reloaded below.
pop(holder_reg); // Restore holder.
// Restore ip to holder's context.
@@ -1489,7 +1620,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Compute the capacity mask.
ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
- mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
+ SmiUntag(t1);
sub(t1, t1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
@@ -1534,12 +1665,13 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
}
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1567,26 +1699,21 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// The values must be adjacent in memory to allow the use of LDM.
// Also, assert that the registers are numbered such that the values
// are loaded in the correct order.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+ reinterpret_cast<intptr_t>(allocation_limit.address());
ASSERT((limit - top) == kPointerSize);
ASSERT(result.code() < ip.code());
- // Set up allocation top address and object size registers.
+ // Set up allocation top address register.
Register topaddr = scratch1;
- Register obj_size_reg = scratch2;
- mov(topaddr, Operand(new_space_allocation_top));
- Operand obj_size_operand = Operand(object_size);
- if (!obj_size_operand.is_single_instruction(this)) {
- // We are about to steal IP, so we need to load this value first
- mov(obj_size_reg, obj_size_operand);
- }
+ mov(topaddr, Operand(allocation_top));
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
@@ -1600,20 +1727,49 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// respect to register content between debug and release mode.
ldr(ip, MemOperand(topaddr));
cmp(result, ip);
- Check(eq, "Unexpected allocation top");
+ Check(eq, kUnexpectedAllocationTop);
}
// Load allocation limit into ip. Result already contains allocation top.
ldr(ip, MemOperand(topaddr, limit - top));
}
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
+ Label aligned;
+ b(eq, &aligned);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand(ip));
+ b(hs, gc_required);
+ }
+ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
+ bind(&aligned);
+ }
+
// Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top.
- if (obj_size_operand.is_single_instruction(this)) {
- // We can add the size as an immediate
- add(scratch2, result, obj_size_operand, SetCC);
- } else {
- // Doesn't fit in an immediate, we have to use the register
- add(scratch2, result, obj_size_reg, SetCC);
+ // to calculate the new top. We must preserve the ip register at this
+ // point, so we cannot just use add().
+ ASSERT(object_size > 0);
+ Register source = result;
+ Condition cond = al;
+ int shift = 0;
+ while (object_size != 0) {
+ if (((object_size >> shift) & 0x03) == 0) {
+ shift += 2;
+ } else {
+ int bits = object_size & (0xff << shift);
+ object_size -= bits;
+ shift += 8;
+ Operand bits_operand(bits);
+ ASSERT(bits_operand.is_single_instruction(this));
+ add(scratch2, source, bits_operand, SetCC, cond);
+ source = scratch2;
+ cond = cc;
+ }
}
b(cs, gc_required);
cmp(scratch2, Operand(ip));
@@ -1627,12 +1783,12 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
}
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1658,20 +1814,20 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// The values must be adjacent in memory to allow the use of LDM.
// Also, assert that the registers are numbered such that the values
// are loaded in the correct order.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+ reinterpret_cast<intptr_t>(allocation_limit.address());
ASSERT((limit - top) == kPointerSize);
ASSERT(result.code() < ip.code());
// Set up allocation top address.
Register topaddr = scratch1;
- mov(topaddr, Operand(new_space_allocation_top));
+ mov(topaddr, Operand(allocation_top));
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
@@ -1685,12 +1841,29 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// respect to register content between debug and release mode.
ldr(ip, MemOperand(topaddr));
cmp(result, ip);
- Check(eq, "Unexpected allocation top");
+ Check(eq, kUnexpectedAllocationTop);
}
// Load allocation limit into ip. Result already contains allocation top.
ldr(ip, MemOperand(topaddr, limit - top));
}
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
+ Label aligned;
+ b(eq, &aligned);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand(ip));
+ b(hs, gc_required);
+ }
+ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
+ bind(&aligned);
+ }
+
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
@@ -1706,7 +1879,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
tst(scratch2, Operand(kObjectAlignmentMask));
- Check(eq, "Unaligned allocation in new space");
+ Check(eq, kUnalignedAllocationInNewSpace);
}
str(scratch2, MemOperand(topaddr));
@@ -1729,7 +1902,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object,
mov(scratch, Operand(new_space_allocation_top));
ldr(scratch, MemOperand(scratch));
cmp(object, scratch);
- Check(lt, "Undo allocation of non allocated memory");
+ Check(lt, kUndoAllocationOfNonAllocatedMemory);
#endif
// Write the address of the object to un-allocate as the current top.
mov(scratch, Operand(new_space_allocation_top));
@@ -1752,12 +1925,12 @@ void MacroAssembler::AllocateTwoByteString(Register result,
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Set the map, length and hash field.
InitializeNewString(result,
@@ -1776,19 +1949,19 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kCharSize == 1);
add(scratch1, length,
- Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
+ Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Set the map, length and hash field.
InitializeNewString(result,
@@ -1804,12 +1977,8 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
@@ -1824,12 +1993,34 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+ mov(scratch1, Operand(high_promotion_mode));
+ ldr(scratch1, MemOperand(scratch1, 0));
+ cmp(scratch1, Operand::Zero());
+ b(eq, &allocate_new_space);
+
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+
+ jmp(&install_map);
+
+ bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ bind(&install_map);
InitializeNewString(result,
length,
@@ -1844,12 +2035,8 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
@@ -1864,12 +2051,8 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
@@ -1943,18 +2126,15 @@ void MacroAssembler::CheckFastSmiElements(Register map,
}
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register receiver_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* fail) {
- Label smi_value, maybe_nan, have_double_value, is_nan, done;
- Register mantissa_reg = scratch2;
- Register exponent_reg = scratch3;
+void MacroAssembler::StoreNumberToDoubleElements(
+ Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ LowDwVfpRegister double_scratch,
+ Label* fail,
+ int elements_offset) {
+ Label smi_value, store;
// Handle smi values specially.
JumpIfSmi(value_reg, &smi_value);
@@ -1966,102 +2146,40 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
fail,
DONT_DO_SMI_CHECK);
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
- ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- cmp(exponent_reg, scratch1);
- b(ge, &maybe_nan);
-
- ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- bind(&have_double_value);
- add(scratch1, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- str(exponent_reg, FieldMemOperand(scratch1, offset));
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- b(gt, &is_nan);
- ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- cmp(mantissa_reg, Operand(0));
- b(eq, &have_double_value);
- bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
- jmp(&have_double_value);
+ vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+ // Force a canonical NaN.
+ if (emit_debug_code()) {
+ vmrs(ip);
+ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
+ Assert(ne, kDefaultNaNModeNotSet);
+ }
+ VFPCanonicalizeNaN(double_scratch);
+ b(&store);
bind(&smi_value);
- add(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- add(scratch1, scratch1,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- // scratch1 is now effective address of the double element
+ SmiToDouble(double_scratch, value_reg);
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP2)) {
- destination = FloatingPointHelper::kVFPRegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
-
- Register untagged_value = elements_reg;
- SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(this,
- untagged_value,
- destination,
- d0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- s2);
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP2);
- vstr(d0, scratch1, 0);
- } else {
- str(mantissa_reg, MemOperand(scratch1, 0));
- str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
- }
- bind(&done);
+ bind(&store);
+ add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
+ vstr(double_scratch,
+ FieldMemOperand(scratch1,
+ FixedDoubleArray::kHeaderSize - elements_offset));
}
void MacroAssembler::CompareMap(Register obj,
Register scratch,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
+ Label* early_success) {
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMap(scratch, map, early_success, mode);
+ CompareMap(scratch, map, early_success);
}
void MacroAssembler::CompareMap(Register obj_map,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
+ Label* early_success) {
cmp(obj_map, Operand(map));
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- b(eq, early_success);
- cmp(obj_map, Operand(Handle<Map>(current_map)));
- }
- }
- }
}
@@ -2069,14 +2187,13 @@ void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
+ SmiCheckType smi_check_type) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
Label success;
- CompareMap(obj, scratch, map, &success, mode);
+ CompareMap(obj, scratch, map, &success);
b(ne, fail);
bind(&success);
}
@@ -2172,15 +2289,18 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
}
-void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
+void MacroAssembler::CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id,
+ Condition cond) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(), cond);
+ Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond);
}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+ ASSERT(allow_stub_calls_ ||
+ stub->CompilingCallsToThisStubIsGCSafe(isolate()));
+ Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
}
@@ -2189,53 +2309,97 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
}
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ ExternalReference function,
+ Address function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
+ ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(),
+ ExternalReference::handle_scope_limit_address(isolate()),
next_address);
const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(),
+ ExternalReference::handle_scope_level_address(isolate()),
next_address);
+ ASSERT(!thunk_last_arg.is(r3));
+
// Allocate HandleScope in callee-save registers.
- mov(r7, Operand(next_address));
- ldr(r4, MemOperand(r7, kNextOffset));
- ldr(r5, MemOperand(r7, kLimitOffset));
- ldr(r6, MemOperand(r7, kLevelOffset));
+ mov(r9, Operand(next_address));
+ ldr(r4, MemOperand(r9, kNextOffset));
+ ldr(r5, MemOperand(r9, kLimitOffset));
+ ldr(r6, MemOperand(r9, kLevelOffset));
add(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
+ str(r6, MemOperand(r9, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, r0);
+ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ bool* is_profiling_flag =
+ isolate()->cpu_profiler()->is_profiling_address();
+ STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
+ mov(r3, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
+ ldrb(r3, MemOperand(r3, 0));
+ cmp(r3, Operand(0));
+ b(eq, &profiler_disabled);
+
+ // Additional parameter is the address of the actual callback.
+ mov(thunk_last_arg, Operand(reinterpret_cast<int32_t>(function_address)));
+ mov(r3, Operand(thunk_ref));
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ mov(r3, Operand(function));
+ bind(&end_profiler_check);
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
DirectCEntryStub stub;
- stub.GenerateCall(this, function);
+ stub.GenerateCall(this, r3);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, r0);
+ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
+ Label return_value_loaded;
- // If result is non-zero, dereference to get the result value
- // otherwise set it to undefined.
- cmp(r0, Operand(0));
- LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- ldr(r0, MemOperand(r0), ne);
-
+ // load value from ReturnValue
+ ldr(r0, return_value_operand);
+ bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
- str(r4, MemOperand(r7, kNextOffset));
+ str(r4, MemOperand(r9, kNextOffset));
if (emit_debug_code()) {
- ldr(r1, MemOperand(r7, kLevelOffset));
+ ldr(r1, MemOperand(r9, kLevelOffset));
cmp(r1, r6);
- Check(eq, "Unexpected level after return from api call");
+ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
}
sub(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
- ldr(ip, MemOperand(r7, kLimitOffset));
+ str(r6, MemOperand(r9, kLevelOffset));
+ ldr(ip, MemOperand(r9, kLimitOffset));
cmp(r5, ip);
b(ne, &delete_allocated_handles);
@@ -2246,24 +2410,32 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
ldr(r5, MemOperand(ip));
cmp(r4, r5);
b(ne, &promote_scheduled_exception);
+ bind(&exception_handled);
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ ldr(cp, *context_restore_operand);
+ }
// LeaveExitFrame expects unwind space to be in a register.
mov(r4, Operand(stack_space));
- LeaveExitFrame(false, r4);
+ LeaveExitFrame(false, r4, !restore_context);
mov(pc, lr);
bind(&promote_scheduled_exception);
- TailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
- str(r5, MemOperand(r7, kLimitOffset));
+ str(r5, MemOperand(r9, kLimitOffset));
mov(r4, r0);
PrepareCallCFunction(1, r5);
- mov(r0, Operand(ExternalReference::isolate_address()));
+ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
CallCFunction(
ExternalReference::delete_handle_scope_extensions(isolate()), 1);
mov(r0, r4);
@@ -2273,7 +2445,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+ return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
}
@@ -2294,344 +2466,160 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
(1 << String::kArrayIndexValueBits));
// We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
- STATIC_ASSERT(kSmiTag == 0);
Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- mov(index, Operand(hash, LSL, kSmiTagSize));
-}
-
-
-void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
- Register outHighReg,
- Register outLowReg) {
- // ARMv7 VFP3 instructions to implement integer to double conversion.
- mov(r7, Operand(inReg, ASR, kSmiTagSize));
- vmov(s15, r7);
- vcvt_f64_s32(d7, s15);
- vmov(outLowReg, outHighReg, d7);
+ SmiTag(index, hash);
}
-void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
- DwVfpRegister result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- SwVfpRegister scratch3,
- Label* not_number,
- ObjectToDoubleFlags flags) {
- Label done;
- if ((flags & OBJECT_NOT_SMI) == 0) {
- Label not_smi;
- JumpIfNotSmi(object, &not_smi);
- // Remove smi tag and convert to double.
- mov(scratch1, Operand(object, ASR, kSmiTagSize));
- vmov(scratch3, scratch1);
- vcvt_f64_s32(result, scratch3);
- b(&done);
- bind(&not_smi);
- }
- // Check for heap number and load double value from it.
- ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
- sub(scratch2, object, Operand(kHeapObjectTag));
- cmp(scratch1, heap_number_map);
- b(ne, not_number);
- if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
- // If exponent is all ones the number is either a NaN or +/-Infinity.
- ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- Sbfx(scratch1,
- scratch1,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // All-one value sign extend to -1.
- cmp(scratch1, Operand(-1));
- b(eq, not_number);
- }
- vldr(result, scratch2, HeapNumber::kValueOffset);
- bind(&done);
-}
-
-
-void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
- DwVfpRegister value,
- Register scratch1,
- SwVfpRegister scratch2) {
- mov(scratch1, Operand(smi, ASR, kSmiTagSize));
- vmov(scratch2, scratch1);
- vcvt_f64_s32(value, scratch2);
-}
-
-
-// Tries to get a signed int32 out of a double precision floating point heap
-// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
-// 32bits signed integer range.
-void MacroAssembler::ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- DwVfpRegister double_scratch,
- Label *not_int32) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- sub(scratch, source, Operand(kHeapObjectTag));
- vldr(double_scratch, scratch, HeapNumber::kValueOffset);
- vcvt_s32_f64(double_scratch.low(), double_scratch);
- vmov(dest, double_scratch.low());
- // Signed vcvt instruction will saturate to the minimum (0x80000000) or
- // maximun (0x7fffffff) signed 32bits integer when the double is out of
- // range. When substracting one, the minimum signed integer becomes the
- // maximun signed integer.
- sub(scratch, dest, Operand(1));
- cmp(scratch, Operand(LONG_MAX - 1));
- // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
- b(ge, not_int32);
+void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
+ if (CpuFeatures::IsSupported(VFP3)) {
+ vmov(value.low(), smi);
+ vcvt_f64_s32(value, 1);
} else {
- // This code is faster for doubles that are in the ranges -0x7fffffff to
- // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
- // the range of signed int32 values that are not Smis. Jumps to the label
- // 'not_int32' if the double isn't in the range -0x80000000.0 to
- // 0x80000000.0 (excluding the endpoints).
- Label right_exponent, done;
- // Get exponent word.
- ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- Ubfx(scratch2,
- scratch,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // Load dest with zero. We use this either for the final shift or
- // for the answer.
- mov(dest, Operand(0, RelocInfo::NONE));
- // Check whether the exponent matches a 32 bit signed int that is not a Smi.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
- // the exponent that we are fastest at and also the highest exponent we can
- // handle here.
- const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
- // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
- // split it up to avoid a constant pool entry. You can't do that in general
- // for cmp because of the overflow flag, but we know the exponent is in the
- // range 0-2047 so there is no overflow.
- int fudge_factor = 0x400;
- sub(scratch2, scratch2, Operand(fudge_factor));
- cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- b(eq, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- b(gt, not_int32);
-
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
- // it rounds to zero.
- const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
- sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
- // Dest already has a Smi zero.
- b(lt, &done);
-
- // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
- // get how much to shift down.
- rsb(dest, scratch2, Operand(30));
-
- bind(&right_exponent);
- // Get the top bits of the mantissa.
- and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- mov(scratch2, Operand(scratch2, LSL, shift_distance));
- // Put sign in zero flag.
- tst(scratch, Operand(HeapNumber::kSignMask));
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the last 10 bits.
- orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
- // Move down according to the exponent.
- mov(dest, Operand(scratch, LSR, dest));
- // Fix sign if sign bit was set.
- rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
- bind(&done);
+ SmiUntag(ip, smi);
+ vmov(value.low(), ip);
+ vcvt_f64_s32(value, value.low());
}
}
-void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
- Register result,
- DwVfpRegister double_input,
- Register scratch,
- DwVfpRegister double_scratch,
- CheckForInexactConversion check_inexact) {
- ASSERT(!result.is(scratch));
+void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
+ LowDwVfpRegister double_scratch) {
ASSERT(!double_input.is(double_scratch));
+ vcvt_s32_f64(double_scratch.low(), double_input);
+ vcvt_f64_s32(double_scratch, double_scratch.low());
+ VFPCompareAndSetFlags(double_input, double_scratch);
+}
- ASSERT(CpuFeatures::IsSupported(VFP2));
- CpuFeatures::Scope scope(VFP2);
- Register prev_fpscr = result;
- Label done;
- // Test for values that can be exactly represented as a signed 32-bit integer.
+void MacroAssembler::TryDoubleToInt32Exact(Register result,
+ DwVfpRegister double_input,
+ LowDwVfpRegister double_scratch) {
+ ASSERT(!double_input.is(double_scratch));
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
vcvt_f64_s32(double_scratch, double_scratch.low());
VFPCompareAndSetFlags(double_input, double_scratch);
- b(eq, &done);
-
- // Convert to integer, respecting rounding mode.
- int32_t check_inexact_conversion =
- (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
-
- // Set custom FPCSR:
- // - Set rounding mode.
- // - Clear vfp cumulative exception flags.
- // - Make sure Flush-to-zero mode control bit is unset.
- vmrs(prev_fpscr);
- bic(scratch,
- prev_fpscr,
- Operand(kVFPExceptionMask |
- check_inexact_conversion |
- kVFPRoundingModeMask |
- kVFPFlushToZeroMask));
- // 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
- if (rounding_mode != kRoundToNearest) {
- orr(scratch, scratch, Operand(rounding_mode));
- }
- vmsr(scratch);
+}
- // Convert the argument to an integer.
- vcvt_s32_f64(double_scratch.low(),
- double_input,
- (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
- : kFPSCRRounding);
- // Retrieve FPSCR.
- vmrs(scratch);
- // Restore FPSCR.
- vmsr(prev_fpscr);
- // Move the converted value into the result register.
+void MacroAssembler::TryInt32Floor(Register result,
+ DwVfpRegister double_input,
+ Register input_high,
+ LowDwVfpRegister double_scratch,
+ Label* done,
+ Label* exact) {
+ ASSERT(!result.is(input_high));
+ ASSERT(!double_input.is(double_scratch));
+ Label negative, exception;
+
+ VmovHigh(input_high, double_input);
+
+ // Test for NaN and infinities.
+ Sbfx(result, input_high,
+ HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ cmp(result, Operand(-1));
+ b(eq, &exception);
+ // Test for values that can be exactly represented as a
+ // signed 32-bit integer.
+ TryDoubleToInt32Exact(result, double_input, double_scratch);
+ // If exact, return (result already fetched).
+ b(eq, exact);
+ cmp(input_high, Operand::Zero());
+ b(mi, &negative);
+
+ // Input is in ]+0, +inf[.
+ // If result equals 0x7fffffff input was out of range or
+ // in ]0x7fffffff, 0x80000000[. We ignore this last case which
+ // could fits into an int32, that means we always think input was
+ // out of range and always go to exception.
+ // If result < 0x7fffffff, go to done, result fetched.
+ cmn(result, Operand(1));
+ b(mi, &exception);
+ b(done);
+
+ // Input is in ]-inf, -0[.
+ // If x is a non integer negative number,
+ // floor(x) <=> round_to_zero(x) - 1.
+ bind(&negative);
+ sub(result, result, Operand(1), SetCC);
+ // If result is still negative, go to done, result fetched.
+ // Else, we had an overflow and we fall through exception.
+ b(mi, done);
+ bind(&exception);
+}
+
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+ DwVfpRegister double_input,
+ Label* done) {
+ LowDwVfpRegister double_scratch = kScratchDoubleReg;
+ vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
- // Check for vfp exceptions.
- tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
- bind(&done);
+ // If result is not saturated (0x7fffffff or 0x80000000), we are done.
+ sub(ip, result, Operand(1));
+ cmp(ip, Operand(0x7ffffffe));
+ b(lt, done);
}
-void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch) {
- Label done, normal_exponent, restore_sign;
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DwVfpRegister double_input) {
+ Label done;
- // Extract the biased exponent in result.
- Ubfx(result,
- input_high,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
+ TryInlineTruncateDoubleToI(result, double_input, &done);
- // Check for Infinity and NaNs, which should return 0.
- cmp(result, Operand(HeapNumber::kExponentMask));
- mov(result, Operand(0), LeaveCC, eq);
- b(eq, &done);
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(lr);
+ sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
+ vstr(double_input, MemOperand(sp, 0));
- // Express exponent as delta to (number of mantissa bits + 31).
- sub(result,
- result,
- Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
- SetCC);
+ DoubleToIStub stub(sp, result, 0, true, true);
+ CallStub(&stub);
- // If the delta is strictly positive, all bits would be shifted away,
- // which means that we can return 0.
- b(le, &normal_exponent);
- mov(result, Operand(0));
- b(&done);
+ add(sp, sp, Operand(kDoubleSize));
+ pop(lr);
- bind(&normal_exponent);
- const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
- // Calculate shift.
- add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
-
- // Save the sign.
- Register sign = result;
- result = no_reg;
- and_(sign, input_high, Operand(HeapNumber::kSignMask));
-
- // Set the implicit 1 before the mantissa part in input_high.
- orr(input_high,
- input_high,
- Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- // Shift the mantissa bits to the correct position.
- // We don't need to clear non-mantissa bits as they will be shifted away.
- // If they weren't, it would mean that the answer is in the 32bit range.
- mov(input_high, Operand(input_high, LSL, scratch));
-
- // Replace the shifted bits with bits from the lower mantissa word.
- Label pos_shift, shift_done;
- rsb(scratch, scratch, Operand(32), SetCC);
- b(&pos_shift, ge);
-
- // Negate scratch.
- rsb(scratch, scratch, Operand(0));
- mov(input_low, Operand(input_low, LSL, scratch));
- b(&shift_done);
-
- bind(&pos_shift);
- mov(input_low, Operand(input_low, LSR, scratch));
-
- bind(&shift_done);
- orr(input_high, input_high, Operand(input_low));
- // Restore sign if necessary.
- cmp(sign, Operand(0));
- result = sign;
- sign = no_reg;
- rsb(result, input_high, Operand(0), LeaveCC, ne);
- mov(result, input_high, LeaveCC, eq);
bind(&done);
}
-void MacroAssembler::EmitECMATruncate(Register result,
- DwVfpRegister double_input,
- SwVfpRegister single_scratch,
- Register scratch,
- Register input_high,
- Register input_low) {
- CpuFeatures::Scope scope(VFP2);
- ASSERT(!input_high.is(result));
- ASSERT(!input_low.is(result));
- ASSERT(!input_low.is(input_high));
- ASSERT(!scratch.is(result) &&
- !scratch.is(input_high) &&
- !scratch.is(input_low));
- ASSERT(!single_scratch.is(double_input.low()) &&
- !single_scratch.is(double_input.high()));
+void MacroAssembler::TruncateHeapNumberToI(Register result,
+ Register object) {
+ Label done;
+ LowDwVfpRegister double_scratch = kScratchDoubleReg;
+ ASSERT(!result.is(object));
+
+ vldr(double_scratch,
+ MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
+ TryInlineTruncateDoubleToI(result, double_scratch, &done);
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(lr);
+ DoubleToIStub stub(object,
+ result,
+ HeapNumber::kValueOffset - kHeapObjectTag,
+ true,
+ true);
+ CallStub(&stub);
+ pop(lr);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Label* not_number) {
Label done;
+ ASSERT(!result.is(object));
- // Clear cumulative exception flags.
- ClearFPSCRBits(kVFPExceptionMask, scratch);
- // Try a conversion to a signed integer.
- vcvt_s32_f64(single_scratch, double_input);
- vmov(result, single_scratch);
- // Retrieve he FPSCR.
- vmrs(scratch);
- // Check for overflow and NaNs.
- tst(scratch, Operand(kVFPOverflowExceptionBit |
- kVFPUnderflowExceptionBit |
- kVFPInvalidOpExceptionBit));
- // If we had no exceptions we are done.
- b(eq, &done);
+ UntagAndJumpIfSmi(result, object, &done);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+ TruncateHeapNumberToI(result, object);
- // Load the double value and perform a manual truncation.
- vmov(input_low, input_high, double_input);
- EmitOutOfInt32RangeTruncate(result,
- input_high,
- input_low,
- scratch);
bind(&done);
}
@@ -2642,7 +2630,7 @@ void MacroAssembler::GetLeastBitsFromSmi(Register dst,
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
- mov(dst, Operand(src, ASR, kSmiTagSize));
+ SmiUntag(dst, src);
and_(dst, dst, Operand((1 << num_least_bits) - 1));
}
}
@@ -2656,7 +2644,8 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// All parameters are on the stack. r0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -2673,21 +2662,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
mov(r0, Operand(num_arguments));
mov(r1, Operand(ExternalReference(f, isolate())));
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- mov(r0, Operand(function->nargs));
- mov(r1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1, kSaveFPRegs);
+ CEntryStub stub(1, save_doubles);
CallStub(&stub);
}
@@ -2730,7 +2705,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
#endif
mov(r1, Operand(builtin));
CEntryStub stub(1);
- Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -2808,19 +2783,9 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
-void MacroAssembler::Assert(Condition cond, const char* msg) {
+void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
if (emit_debug_code())
- Check(cond, msg);
-}
-
-
-void MacroAssembler::AssertRegisterIsRoot(Register reg,
- Heap::RootListIndex index) {
- if (emit_debug_code()) {
- LoadRoot(ip, index);
- cmp(reg, ip);
- Check(eq, "Register did not match expected root");
- }
+ Check(cond, reason);
}
@@ -2839,23 +2804,23 @@ void MacroAssembler::AssertFastElements(Register elements) {
LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
- Abort("JSObject with fast elements map has slow elements");
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
bind(&ok);
pop(elements);
}
}
-void MacroAssembler::Check(Condition cond, const char* msg) {
+void MacroAssembler::Check(Condition cond, BailoutReason reason) {
Label L;
b(cond, &L);
- Abort(msg);
+ Abort(reason);
// will not return here
bind(&L);
}
-void MacroAssembler::Abort(const char* msg) {
+void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
// We want to pass the msg string like a smi to avoid GC
@@ -2863,6 +2828,7 @@ void MacroAssembler::Abort(const char* msg) {
// properly. Instead, we pass an aligned pointer that is
// a proper v8 smi, but also pass the alignment difference
// from the real pointer as a smi.
+ const char* msg = GetBailoutReason(reason);
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
@@ -2871,6 +2837,11 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ stop(msg);
+ return;
+ }
#endif
mov(r0, Operand(p0));
@@ -2982,6 +2953,19 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
+void MacroAssembler::LoadArrayFunction(Register function) {
+ // Load the global or builtins object from the current context.
+ ldr(function,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the global context from the global or builtins object.
+ ldr(function,
+ FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
+ // Load the array function from the native context.
+ ldr(function,
+ MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
@@ -2992,7 +2976,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
b(&ok);
bind(&fail);
- Abort("Global functions must have initial map");
+ Abort(kGlobalFunctionsMustHaveInitialMap);
bind(&ok);
}
}
@@ -3034,7 +3018,7 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
void MacroAssembler::UntagAndJumpIfSmi(
Register dst, Register src, Label* smi_case) {
STATIC_ASSERT(kSmiTag == 0);
- mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
+ SmiUntag(dst, src, SetCC);
b(cc, smi_case); // Shifter carry is not set for a smi.
}
@@ -3042,7 +3026,7 @@ void MacroAssembler::UntagAndJumpIfSmi(
void MacroAssembler::UntagAndJumpIfNotSmi(
Register dst, Register src, Label* non_smi_case) {
STATIC_ASSERT(kSmiTag == 0);
- mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
+ SmiUntag(dst, src, SetCC);
b(cs, non_smi_case); // Shifter carry is set for a non-smi.
}
@@ -3061,7 +3045,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(ne, "Operand is a smi");
+ Check(ne, kOperandIsASmi);
}
}
@@ -3070,7 +3054,7 @@ void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(eq, "Operand is not smi");
+ Check(eq, kOperandIsNotSmi);
}
}
@@ -3079,23 +3063,35 @@ void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(ne, "Operand is a smi and not a string");
+ Check(ne, kOperandIsASmiAndNotAString);
push(object);
ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
pop(object);
- Check(lo, "Operand is not a string");
+ Check(lo, kOperandIsNotAString);
}
}
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsASmiAndNotAName);
+ push(object);
+ ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(object, object, LAST_NAME_TYPE);
+ pop(object);
+ Check(le, kOperandIsNotAName);
+ }
+}
+
-void MacroAssembler::AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
+
+void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
if (emit_debug_code()) {
- CompareRoot(src, root_value_index);
- Check(eq, message);
+ CompareRoot(reg, index);
+ Check(eq, kHeapNumberMapRegisterClobbered);
}
}
@@ -3105,12 +3101,94 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
Register scratch,
Label* on_not_heap_number) {
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
cmp(scratch, heap_number_map);
b(ne, on_not_heap_number);
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
+ sub(mask, mask, Operand(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ add(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
+ eor(scratch1, scratch1, Operand(scratch2));
+ and_(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ add(scratch1,
+ number_string_cache,
+ Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+ Register probe = mask;
+ ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ sub(scratch2, object, Operand(kHeapObjectTag));
+ vldr(d0, scratch2, HeapNumber::kValueOffset);
+ sub(probe, probe, Operand(kHeapObjectTag));
+ vldr(d1, probe, HeapNumber::kValueOffset);
+ VFPCompareAndSetFlags(d0, d1);
+ b(ne, not_found); // The cache did not contain this value.
+ b(&load_result_from_cache);
+
+ bind(&is_smi);
+ Register scratch = scratch1;
+ and_(scratch, mask, Operand(object, ASR, 1));
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ add(scratch,
+ number_string_cache,
+ Operand(scratch, LSL, kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ cmp(object, probe);
+ b(ne, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(),
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
@@ -3137,7 +3215,6 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
Register scratch2,
Label* failure) {
// Check that neither is a smi.
- STATIC_ASSERT(kSmiTag == 0);
and_(scratch1, first, Operand(second));
JumpIfSmi(scratch1, failure);
JumpIfNonSmisNotBothSequentialAsciiStrings(first,
@@ -3148,6 +3225,19 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
}
+void MacroAssembler::JumpIfNotUniqueName(Register reg,
+ Label* not_unique_name) {
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ b(eq, &succeed);
+ cmp(reg, Operand(SYMBOL_TYPE));
+ b(ne, not_unique_name);
+
+ bind(&succeed);
+}
+
+
// Allocates a heap number or jumps to the need_gc label if the young space
// is full and a scavenge is needed.
void MacroAssembler::AllocateHeapNumber(Register result,
@@ -3158,16 +3248,11 @@ void MacroAssembler::AllocateHeapNumber(Register result,
TaggingMode tagging_mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- tagging_mode == TAG_RESULT ? TAG_OBJECT :
- NO_ALLOCATION_FLAGS);
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+ tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
// Store heap number map in the allocated object.
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
if (tagging_mode == TAG_RESULT) {
str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
} else {
@@ -3191,27 +3276,23 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
// Copies a fixed number of fields of heap objects from src to dst.
void MacroAssembler::CopyFields(Register dst,
Register src,
- RegList temps,
+ LowDwVfpRegister double_scratch,
int field_count) {
- // At least one bit set in the first 15 registers.
- ASSERT((temps & ((1 << 15) - 1)) != 0);
- ASSERT((temps & dst.bit()) == 0);
- ASSERT((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
-
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < 15; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.set_code(i);
- break;
- }
+ int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
+ for (int i = 0; i < double_count; i++) {
+ vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
+ vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
}
- ASSERT(!tmp.is(no_reg));
- for (int i = 0; i < field_count; i++) {
- ldr(tmp, FieldMemOperand(src, i * kPointerSize));
- str(tmp, FieldMemOperand(dst, i * kPointerSize));
+ STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
+ STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
+
+ int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
+ if (remain != 0) {
+ vldr(double_scratch.low(),
+ FieldMemOperand(src, (field_count - 1) * kPointerSize));
+ vstr(double_scratch.low(),
+ FieldMemOperand(dst, (field_count - 1) * kPointerSize));
}
}
@@ -3220,25 +3301,24 @@ void MacroAssembler::CopyBytes(Register src,
Register dst,
Register length,
Register scratch) {
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+ Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
- bind(&align_loop);
- cmp(length, Operand(0));
- b(eq, &done);
+ cmp(length, Operand(kPointerSize));
+ b(le, &byte_loop);
+
bind(&align_loop_1);
tst(src, Operand(kPointerSize - 1));
b(eq, &word_loop);
ldrb(scratch, MemOperand(src, 1, PostIndex));
strb(scratch, MemOperand(dst, 1, PostIndex));
sub(length, length, Operand(1), SetCC);
- b(ne, &byte_loop_1);
-
+ b(&align_loop_1);
// Copy bytes in word size chunks.
bind(&word_loop);
if (emit_debug_code()) {
tst(src, Operand(kPointerSize - 1));
- Assert(eq, "Expecting alignment for CopyBytes");
+ Assert(eq, kExpectingAlignmentForCopyBytes);
}
cmp(length, Operand(kPointerSize));
b(lt, &byte_loop);
@@ -3259,7 +3339,7 @@ void MacroAssembler::CopyBytes(Register src,
// Copy the last bytes if any left.
bind(&byte_loop);
- cmp(length, Operand(0));
+ cmp(length, Operand::Zero());
b(eq, &done);
bind(&byte_loop_1);
ldrb(scratch, MemOperand(src, 1, PostIndex));
@@ -3283,41 +3363,26 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
}
-void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
- Register source, // Input.
- Register scratch) {
- ASSERT(!zeros.is(source) || !source.is(scratch));
- ASSERT(!zeros.is(scratch));
- ASSERT(!scratch.is(ip));
- ASSERT(!source.is(ip));
- ASSERT(!zeros.is(ip));
-#ifdef CAN_USE_ARMV5_INSTRUCTIONS
- clz(zeros, source); // This instruction is only supported after ARM5.
-#else
- // Order of the next two lines is important: zeros register
- // can be the same as source register.
- Move(scratch, source);
- mov(zeros, Operand(0, RelocInfo::NONE));
- // Top 16.
- tst(scratch, Operand(0xffff0000));
- add(zeros, zeros, Operand(16), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
- // Top 8.
- tst(scratch, Operand(0xff000000));
- add(zeros, zeros, Operand(8), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
- // Top 4.
- tst(scratch, Operand(0xf0000000));
- add(zeros, zeros, Operand(4), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
- // Top 2.
- tst(scratch, Operand(0xc0000000));
- add(zeros, zeros, Operand(2), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
- // Top bit.
- tst(scratch, Operand(0x80000000u));
- add(zeros, zeros, Operand(1), LeaveCC, eq);
-#endif
+void MacroAssembler::CheckFor32DRegs(Register scratch) {
+ mov(scratch, Operand(ExternalReference::cpu_features()));
+ ldr(scratch, MemOperand(scratch));
+ tst(scratch, Operand(1u << VFP32DREGS));
+}
+
+
+void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
+ CheckFor32DRegs(scratch);
+ vstm(db_w, location, d16, d31, ne);
+ sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
+ vstm(db_w, location, d0, d15);
+}
+
+
+void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
+ CheckFor32DRegs(scratch);
+ vldm(ia_w, location, d0, d15);
+ vldm(ia_w, location, d16, d31, ne);
+ add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
}
@@ -3327,9 +3392,10 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register scratch1,
Register scratch2,
Label* failure) {
- int kFlatAsciiStringMask =
+ const int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
and_(scratch1, first, Operand(kFlatAsciiStringMask));
and_(scratch2, second, Operand(kFlatAsciiStringMask));
cmp(scratch1, Operand(kFlatAsciiStringTag));
@@ -3342,9 +3408,10 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
Register scratch,
Label* failure) {
- int kFlatAsciiStringMask =
+ const int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
and_(scratch, type, Operand(kFlatAsciiStringMask));
cmp(scratch, Operand(kFlatAsciiStringTag));
b(ne, failure);
@@ -3359,9 +3426,9 @@ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
if (use_eabi_hardfloat()) {
// In the hard floating point calling convention, we can use
// all double registers to pass doubles.
- if (num_double_arguments > DoubleRegister::kNumRegisters) {
+ if (num_double_arguments > DoubleRegister::NumRegisters()) {
stack_passed_words +=
- 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
+ 2 * (num_double_arguments - DoubleRegister::NumRegisters());
}
} else {
// In the soft floating point calling convention, every double
@@ -3402,8 +3469,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
if (use_eabi_hardfloat()) {
Move(d0, dreg);
} else {
@@ -3412,9 +3478,8 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
- DoubleRegister dreg2) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
+ DwVfpRegister dreg2) {
if (use_eabi_hardfloat()) {
if (dreg2.is(d0)) {
ASSERT(!dreg1.is(d1));
@@ -3431,9 +3496,8 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
Register reg) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
Move(r0, reg);
@@ -3478,7 +3542,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
-#if defined(V8_HOST_ARCH_ARM)
+#if V8_HOST_ARCH_ARM
if (emit_debug_code()) {
int frame_alignment = OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
@@ -3518,7 +3582,7 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
// Check that the instruction is a ldr reg, [pc + offset] .
and_(result, result, Operand(kLdrPCPattern));
cmp(result, Operand(kLdrPCPattern));
- Check(eq, "The instruction to patch should be a load from pc.");
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
// Result was clobbered. Restore it.
ldr(result, MemOperand(ldr_location));
}
@@ -3542,6 +3606,18 @@ void MacroAssembler::CheckPageFlag(
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ mov(scratch, Operand(map));
+ ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
+ tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
+ b(ne, if_deprecated);
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
@@ -3684,7 +3760,7 @@ void MacroAssembler::EnsureNotWhite(
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
- ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+ ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
ldr(ip, FieldMemOperand(value, String::kLengthOffset));
tst(instance_type, Operand(kStringEncodingMask));
@@ -3714,24 +3790,23 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
void MacroAssembler::ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister temp_double_reg) {
+ DwVfpRegister input_reg,
+ LowDwVfpRegister double_scratch) {
Label above_zero;
Label done;
Label in_bounds;
- Vmov(temp_double_reg, 0.0);
- VFPCompareAndSetFlags(input_reg, temp_double_reg);
+ VFPCompareAndSetFlags(input_reg, 0.0);
b(gt, &above_zero);
// Double value is less than zero, NaN or Inf, return 0.
- mov(result_reg, Operand(0));
+ mov(result_reg, Operand::Zero());
b(al, &done);
// Double value is >= 255, return 255.
bind(&above_zero);
- Vmov(temp_double_reg, 255.0, result_reg);
- VFPCompareAndSetFlags(input_reg, temp_double_reg);
+ Vmov(double_scratch, 255.0, result_reg);
+ VFPCompareAndSetFlags(input_reg, double_scratch);
b(le, &in_bounds);
mov(result_reg, Operand(255));
b(al, &done);
@@ -3743,8 +3818,8 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
// Set rounding mode to round to the nearest integer by clearing bits[23:22].
bic(result_reg, ip, Operand(kVFPRoundingModeMask));
vmsr(result_reg);
- vcvt_s32_f64(input_reg.low(), input_reg, kFPSCRRounding);
- vmov(result_reg, input_reg.low());
+ vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
+ vmov(result_reg, double_scratch.low());
// Restore FPSCR.
vmsr(ip);
bind(&done);
@@ -3808,6 +3883,52 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
}
+void MacroAssembler::TestJSArrayForAllocationMemento(
+ Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ add(scratch_reg, receiver_reg,
+ Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
+ cmp(scratch_reg, Operand(new_space_start));
+ b(lt, no_memento_found);
+ mov(ip, Operand(new_space_allocation_top));
+ ldr(ip, MemOperand(ip));
+ cmp(scratch_reg, ip);
+ b(gt, no_memento_found);
+ ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
+ cmp(scratch_reg,
+ Operand(isolate()->factory()->allocation_memento_map()));
+}
+
+
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+}
+
+
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
@@ -3832,11 +3953,13 @@ bool AreAliased(Register reg1,
#endif
-CodePatcher::CodePatcher(byte* address, int instructions)
+CodePatcher::CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache)
: address_(address),
- instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap) {
+ masm_(NULL, address, size_ + Assembler::kGap),
+ flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
@@ -3846,7 +3969,9 @@ CodePatcher::CodePatcher(byte* address, int instructions)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CPU::FlushICache(address_, size_);
+ if (flush_cache_ == FLUSH) {
+ CPU::FlushICache(address_, size_);
+ }
// Check that the code was patched as expected.
ASSERT(masm_.pc_ == address_ + size_);
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index e3e39a387..32471443b 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -44,29 +44,10 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
}
-inline Operand SmiUntagOperand(Register object) {
- return Operand(object, ASR, kSmiTagSize);
-}
-
-
-
// Give alias names to registers
-const Register cp = { 8 }; // JavaScript context pointer
-const Register kRootRegister = { 10 }; // Roots array pointer.
-
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1,
- // Specify that the requested size of the space to allocate is specified in
- // words instead of bytes.
- SIZE_IN_WORDS = 1 << 2
-};
+const Register pp = { kRegister_r7_Code }; // Constant pool pointer.
+const Register cp = { kRegister_r8_Code }; // JavaScript context pointer.
+const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
// Flags used for AllocateHeapNumber
enum TaggingMode {
@@ -76,22 +57,20 @@ enum TaggingMode {
DONT_TAG_RESULT
};
-// Flags used for the ObjectToDoubleVFPRegister function.
-enum ObjectToDoubleFlags {
- // No special flags.
- NO_OBJECT_TO_DOUBLE_FLAGS = 0,
- // Object is known to be a non smi.
- OBJECT_NOT_SMI = 1 << 0,
- // Don't load NaNs or infinities, branch to the non number case instead.
- AVOID_NANS_AND_INFINITIES = 1 << 1
-};
-
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2 = no_reg,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+
+
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
@@ -174,11 +153,13 @@ class MacroAssembler: public Assembler {
Condition cond = al);
void Call(Label* target);
+ void Push(Register src) { push(src); }
+ void Pop(Register dst) { pop(dst); }
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al);
- void Move(DoubleRegister dst, DoubleRegister src);
+ void Move(DwVfpRegister dst, DwVfpRegister src);
// Load an object from the root table.
void LoadRoot(Register destination,
@@ -189,16 +170,6 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond = al);
- void LoadHeapObject(Register dst, Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Move(result, object);
- }
- }
-
// ---------------------------------------------------------------------------
// GC Support
@@ -226,6 +197,10 @@ class MacroAssembler: public Assembler {
Condition cc,
Label* condition_met);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
@@ -322,6 +297,7 @@ class MacroAssembler: public Assembler {
// Push a handle.
void Push(Handle<Object> handle);
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
@@ -473,10 +449,23 @@ class MacroAssembler: public Assembler {
const MemOperand& dst,
Condition cond = al);
- // Clear specified FPSCR bits.
- void ClearFPSCRBits(const uint32_t bits_to_clear,
- const Register scratch,
- const Condition cond = al);
+ // Ensure that FPSCR contains values needed by JavaScript.
+ // We need the NaNModeControlBit to be sure that operations like
+ // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
+ // In VFP3 it will be always the Canonical NaN.
+ // In VFP2 it will be either the Canonical NaN or the negative version
+ // of the Canonical NaN. It doesn't matter if we have two values. The aim
+ // is to be sure to never generate the hole NaN.
+ void VFPEnsureFPSCRState(Register scratch);
+
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void VFPCanonicalizeNaN(const DwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond = al);
+ void VFPCanonicalizeNaN(const DwVfpRegister value,
+ const Condition cond = al) {
+ VFPCanonicalizeNaN(value, value, cond);
+ }
// Compare double values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const DwVfpRegister src1,
@@ -498,8 +487,49 @@ class MacroAssembler: public Assembler {
void Vmov(const DwVfpRegister dst,
const double imm,
- const Register scratch = no_reg,
- const Condition cond = al);
+ const Register scratch = no_reg);
+
+ void VmovHigh(Register dst, DwVfpRegister src);
+ void VmovHigh(DwVfpRegister dst, Register src);
+ void VmovLow(Register dst, DwVfpRegister src);
+ void VmovLow(DwVfpRegister dst, Register src);
+
+ // Loads the number from object into dst register.
+ // If |object| is neither smi nor heap number, |not_number| is jumped to
+ // with |object| still intact.
+ void LoadNumber(Register object,
+ LowDwVfpRegister dst,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number);
+
+ // Loads the number from object into double_dst in the double format.
+ // Control will jump to not_int32 if the value cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be loaded.
+ void LoadNumberAsInt32Double(Register object,
+ DwVfpRegister double_dst,
+ Register heap_number_map,
+ Register scratch,
+ LowDwVfpRegister double_scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst as a 32-bit integer.
+ // Control will jump to not_int32 if the object cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be converted.
+ void LoadNumberAsInt32(Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch,
+ DwVfpRegister double_scratch0,
+ LowDwVfpRegister double_scratch1,
+ Label* not_int32);
+
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
@@ -508,7 +538,9 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
- void LeaveExitFrame(bool save_doubles, Register argument_count);
+ void LeaveExitFrame(bool save_doubles,
+ Register argument_count,
+ bool restore_context);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -533,6 +565,7 @@ class MacroAssembler: public Assembler {
bool can_have_holes);
void LoadGlobalFunction(int index, Register function);
+ void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -578,6 +611,7 @@ class MacroAssembler: public Assembler {
CallKind call_kind);
void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -596,6 +630,10 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
+ void IsObjectNameType(Register object,
+ Register scratch,
+ Label* fail);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
@@ -679,25 +717,26 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Allocation support
- // Allocate an object in new space. The object_size is specified
- // either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the new space is exhausted control continues at the
- // gc_required label. The allocated object is returned in result. If
- // the flag tag_allocated_object is true the result is tagged as as
- // a heap object. All registers are clobbered also when control
- // continues at the gc_required label.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ // Allocate an object in new space or old pointer space. The object_size is
+ // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. If the space is exhausted control continues at the gc_required
+ // label. The allocated object is returned in result. If the flag
+ // tag_allocated_object is true the result is tagged as as a heap object.
+ // All registers are clobbered also when control continues at the gc_required
+ // label.
+ void Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. The caller must make sure that no pointers
@@ -756,7 +795,10 @@ class MacroAssembler: public Assembler {
Label* gc_required);
// Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
+ void CopyFields(Register dst,
+ Register src,
+ LowDwVfpRegister double_scratch,
+ int field_count);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
@@ -827,18 +869,14 @@ class MacroAssembler: public Assembler {
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail, in which
- // case scratch2, scratch3 and scratch4 are unmodified.
+ // the FastDoubleElements array elements. Otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- Register receiver_reg,
- // All regs below here overwritten.
Register elements_reg,
Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* fail);
+ LowDwVfpRegister double_scratch,
+ Label* fail,
+ int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
@@ -847,15 +885,13 @@ class MacroAssembler: public Assembler {
void CompareMap(Register obj,
Register scratch,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ Label* early_success);
// As above, but the map of the object is already loaded into the register
// which is preserved by the code generated.
void CompareMap(Register obj_map,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ Label* early_success);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@@ -865,8 +901,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ SmiCheckType smi_check_type);
void CheckMap(Register obj,
@@ -893,12 +928,15 @@ class MacroAssembler: public Assembler {
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
- // Returns a condition that will be enabled if the object was a string.
+ // Returns a condition that will be enabled if the object was a string
+ // and the passed-in condition passed. If the passed-in condition failed
+ // then flags remain unchanged.
Condition IsObjectStringType(Register obj,
- Register type) {
- ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset));
- ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
- tst(type, Operand(kIsNotStringMask));
+ Register type,
+ Condition cond = al) {
+ ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
+ ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
+ tst(type, Operand(kIsNotStringMask), cond);
ASSERT_EQ(0, kStringTag);
return eq;
}
@@ -918,102 +956,98 @@ class MacroAssembler: public Assembler {
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
- // Uses VFP instructions to Convert a Smi to a double.
- void IntegerToDoubleConversionWithVFP3(Register inReg,
- Register outHighReg,
- Register outLowReg);
+ // Load the value of a smi object into a double register.
+ // The register value must be between d0 and d15.
+ void SmiToDouble(LowDwVfpRegister value, Register smi);
+
+ // Check if a double can be exactly represented as a signed 32-bit integer.
+ // Z flag set to one if true.
+ void TestDoubleIsInt32(DwVfpRegister double_input,
+ LowDwVfpRegister double_scratch);
+
+ // Try to convert a double to a signed 32-bit integer.
+ // Z flag set to one and result assigned if the conversion is exact.
+ void TryDoubleToInt32Exact(Register result,
+ DwVfpRegister double_input,
+ LowDwVfpRegister double_scratch);
+
+ // Floor a double and writes the value to the result register.
+ // Go to exact if the conversion is exact (to be able to test -0),
+ // fall through calling code if an overflow occurred, else go to done.
+ // In return, input_high is loaded with high bits of input.
+ void TryInt32Floor(Register result,
+ DwVfpRegister double_input,
+ Register input_high,
+ LowDwVfpRegister double_scratch,
+ Label* done,
+ Label* exact);
- // Load the value of a number object into a VFP double register. If the object
- // is not a number a jump to the label not_number is performed and the VFP
- // double register is unchanged.
- void ObjectToDoubleVFPRegister(
- Register object,
- DwVfpRegister value,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- SwVfpRegister scratch3,
- Label* not_number,
- ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
-
- // Load the value of a smi object into a VFP double register. The register
- // scratch1 can be the same register as smi in which case smi will hold the
- // untagged value afterwards.
- void SmiToDoubleVFPRegister(Register smi,
- DwVfpRegister value,
- Register scratch1,
- SwVfpRegister scratch2);
-
- // Convert the HeapNumber pointed to by source to a 32bits signed integer
- // dest. If the HeapNumber does not fit into a 32bits signed integer branch
- // to not_int32 label. If VFP3 is available double_scratch is used but not
- // scratch2.
- void ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- DwVfpRegister double_scratch,
- Label *not_int32);
-
- // Truncates a double using a specific rounding mode, and writes the value
- // to the result register.
- // Clears the z flag (ne condition) if an overflow occurs.
- // If kCheckForInexactConversion is passed, the z flag is also cleared if the
- // conversion was inexact, i.e. if the double value could not be converted
- // exactly to a 32-bit integer.
- void EmitVFPTruncate(VFPRoundingMode rounding_mode,
- Register result,
- DwVfpRegister double_input,
- Register scratch,
- DwVfpRegister double_scratch,
- CheckForInexactConversion check
- = kDontCheckForInexactConversion);
-
- // Helper for EmitECMATruncate.
- // This will truncate a floating-point value outside of the signed 32bit
- // integer range to a 32bit signed integer.
- // Expects the double value loaded in input_high and input_low.
- // Exits with the answer in 'result'.
- // Note that this code does not work for values in the 32bit range!
- void EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch);
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-arm.cc.
+ void TryInlineTruncateDoubleToI(Register result,
+ DwVfpRegister input,
+ Label* done);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer and all other registers clobbered.
- void EmitECMATruncate(Register result,
- DwVfpRegister double_input,
- SwVfpRegister single_scratch,
- Register scratch,
- Register scratch2,
- Register scratch3);
-
- // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
- // instruction. On pre-ARM5 hardware this routine gives the wrong answer
- // for 0 (31 instead of 32). Source and scratch can be the same in which case
- // the source is clobbered. Source and zeros can also be the same in which
- // case scratch should be a different register.
- void CountLeadingZeros(Register zeros,
- Register source,
- Register scratch);
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DwVfpRegister double_input);
+
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Label* not_int32);
+
+ // Check whether d16-d31 are available on the CPU. The result is given by the
+ // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
+ void CheckFor32DRegs(Register scratch);
+
+ // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
+ // values to location, saving [d0..(d15|d31)].
+ void SaveFPRegs(Register location, Register scratch);
+
+ // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
+ // values to location, restoring [d0..(d15|d31)].
+ void RestoreFPRegs(Register location, Register scratch);
// ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub.
- void CallStub(CodeStub* stub, Condition cond = al);
+ void CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ }
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
@@ -1054,9 +1088,9 @@ class MacroAssembler: public Assembler {
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
- void SetCallCDoubleArguments(DoubleRegister dreg);
- void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
- void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+ void SetCallCDoubleArguments(DwVfpRegister dreg);
+ void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2);
+ void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -1072,13 +1106,19 @@ class MacroAssembler: public Assembler {
int num_reg_arguments,
int num_double_arguments);
- void GetCFunctionDoubleResult(const DoubleRegister dst);
+ void GetCFunctionDoubleResult(const DwVfpRegister dst);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
+ void CallApiFunctionAndReturn(ExternalReference function,
+ Address function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
@@ -1118,15 +1158,14 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cond, const char* msg);
- void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
+ void Assert(Condition cond, BailoutReason reason);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
- void Check(Condition cond, const char* msg);
+ void Check(Condition cond, BailoutReason reason);
// Print a message to stdout and abort execution.
- void Abort(const char* msg);
+ void Abort(BailoutReason msg);
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
@@ -1139,7 +1178,9 @@ class MacroAssembler: public Assembler {
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
-#if USE_EABI_HARDFLOAT
+#ifdef __arm__
+ return OS::ArmUsingHardFloat();
+#elif USE_EABI_HARDFLOAT
return true;
#else
return false;
@@ -1180,18 +1221,21 @@ class MacroAssembler: public Assembler {
// Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and
// sets flags.
- void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
- mov(scratch, reg);
- SmiTag(scratch, SetCC);
+ void TrySmiTag(Register reg, Label* not_a_smi) {
+ TrySmiTag(reg, reg, not_a_smi);
+ }
+ void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
+ SmiTag(ip, src, SetCC);
b(vs, not_a_smi);
- mov(reg, scratch);
+ mov(reg, ip);
}
+
void SmiUntag(Register reg, SBit s = LeaveCC) {
- mov(reg, Operand(reg, ASR, kSmiTagSize), s);
+ mov(reg, Operand::SmiUntag(reg), s);
}
void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
- mov(dst, Operand(src, ASR, kSmiTagSize), s);
+ mov(dst, Operand::SmiUntag(src), s);
}
// Untag the source value into destination and jump if source is a smi.
@@ -1202,7 +1246,14 @@ class MacroAssembler: public Assembler {
// Souce and destination can be the same register.
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
- // Jump the register contains a smi.
+ // Test if the register contains a smi (Z == 0 (eq) if true).
+ inline void SmiTst(Register value) {
+ tst(value, Operand(kSmiTagMask));
+ }
+ inline void NonNegativeSmiTst(Register value) {
+ tst(value, Operand(kSmiTagMask | kSmiSignMask));
+ }
+ // Jump if the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
@@ -1221,14 +1272,15 @@ class MacroAssembler: public Assembler {
void AssertNotSmi(Register object);
void AssertSmi(Register object);
- // Abort execution if argument is a string, enabled via --debug-code.
+ // Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
- // Abort execution if argument is not the root value with the given index,
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
+ // Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
- void AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
+ void AssertIsRoot(Register reg, Heap::RootListIndex index);
// ---------------------------------------------------------------------------
// HeapNumber utilities
@@ -1241,6 +1293,18 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
// Checks if both objects are sequential ASCII strings and jumps to label
// if either is not. Assumes that neither object is a smi.
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
@@ -1272,6 +1336,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* failure);
+ void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
// ---------------------------------------------------------------------------
// Patching helpers.
@@ -1285,8 +1350,8 @@ class MacroAssembler: public Assembler {
void ClampUint8(Register output_reg, Register input_reg);
void ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister temp_double_reg);
+ DwVfpRegister input_reg,
+ LowDwVfpRegister double_scratch);
void LoadInstanceDescriptors(Register map, Register descriptors);
@@ -1309,6 +1374,26 @@ class MacroAssembler: public Assembler {
// in r0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, condition flags are set to eq.
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ b(eq, memento_found);
+ bind(&no_memento_found);
+ }
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
@@ -1361,9 +1446,9 @@ class MacroAssembler: public Assembler {
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class StandardFrame;
};
@@ -1374,7 +1459,14 @@ class MacroAssembler: public Assembler {
// an assertion to fail.
class CodePatcher {
public:
- CodePatcher(byte* address, int instructions);
+ enum FlushICache {
+ FLUSH,
+ DONT_FLUSH
+ };
+
+ CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache = FLUSH);
virtual ~CodePatcher();
// Macro assembler to emit code.
@@ -1392,9 +1484,9 @@ class CodePatcher {
private:
byte* address_; // The address of the code being patched.
- int instructions_; // Number of instructions of the expected patch size.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
+ FlushICache flush_cache_; // Whether to flush the I cache after patching.
};
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index 17b867784..cbc34e10b 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -27,8 +27,9 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
+#include "cpu-profiler.h"
#include "unicode.h"
#include "log.h"
#include "code-stubs.h"
@@ -122,7 +123,7 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -133,7 +134,6 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
exit_label_() {
ASSERT_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
- EmitBacktrackConstantPool();
__ bind(&start_label_); // And then continue from here.
}
@@ -204,7 +204,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, &not_at_start);
// If we did, are we still at the start of the input?
@@ -219,7 +219,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, on_not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@@ -235,54 +235,6 @@ void RegExpMacroAssemblerARM::CheckCharacterLT(uc16 limit, Label* on_less) {
}
-void RegExpMacroAssemblerARM::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- if (on_failure == NULL) {
- // Instead of inlining a backtrack for each test, (re)use the global
- // backtrack target.
- on_failure = &backtrack_label_;
- }
-
- if (check_end_of_string) {
- // Is last character of required match inside string.
- CheckPosition(cp_offset + str.length() - 1, on_failure);
- }
-
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
- if (cp_offset != 0) {
- int byte_offset = cp_offset * char_size();
- __ add(r0, r0, Operand(byte_offset));
- }
-
- // r0 : Address of characters to match against str.
- int stored_high_byte = 0;
- for (int i = 0; i < str.length(); i++) {
- if (mode_ == ASCII) {
- __ ldrb(r1, MemOperand(r0, char_size(), PostIndex));
- ASSERT(str[i] <= String::kMaxAsciiCharCode);
- __ cmp(r1, Operand(str[i]));
- } else {
- __ ldrh(r1, MemOperand(r0, char_size(), PostIndex));
- uc16 match_char = str[i];
- int match_high_byte = (match_char >> 8);
- if (match_high_byte == 0) {
- __ cmp(r1, Operand(str[i]));
- } else {
- if (match_high_byte != stored_high_byte) {
- __ mov(r2, Operand(match_high_byte));
- stored_high_byte = match_high_byte;
- }
- __ add(r3, r2, Operand(match_char & 0xff));
- __ cmp(r1, r3);
- }
- }
- BranchOrBacktrack(ne, on_failure);
- }
-}
-
-
void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
__ ldr(r0, MemOperand(backtrack_stackpointer(), 0));
__ cmp(current_input_offset(), r0);
@@ -337,8 +289,13 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ b(ne, &fail);
__ sub(r3, r3, Operand('a'));
__ cmp(r3, Operand('z' - 'a')); // Is r3 a lowercase letter?
- __ b(hi, &fail);
-
+ __ b(ls, &loop_check); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ sub(r3, r3, Operand(224 - 'a'));
+ __ cmp(r3, Operand(254 - 224));
+ __ b(hi, &fail); // Weren't Latin-1 letters.
+ __ cmp(r3, Operand(247 - 224)); // Check for 247.
+ __ b(eq, &fail);
__ bind(&loop_check);
__ cmp(r0, r1);
@@ -375,17 +332,17 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Address of current input position.
__ add(r1, current_input_offset(), Operand(end_of_input_address()));
// Isolate.
- __ mov(r3, Operand(ExternalReference::isolate_address()));
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
{
AllowExternalCallThatCantCauseGC scope(masm_);
ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ ExternalReference::re_case_insensitive_compare_uc16(isolate());
__ CallCFunction(function, argument_count);
}
// Check if function returned non-zero for success or zero for failure.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(eq, on_no_match);
// On success, increment position by length of capture.
__ add(current_input_offset(), current_input_offset(), Operand(r4));
@@ -508,7 +465,7 @@ void RegExpMacroAssemblerARM::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ mov(r0, Operand(table));
- if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
+ if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ and_(r1, current_character(), Operand(kTableSize - 1));
__ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
} else {
@@ -517,7 +474,7 @@ void RegExpMacroAssemblerARM::CheckBitInTable(
Operand(ByteArray::kHeaderSize - kHeapObjectTag));
}
__ ldrb(r0, MemOperand(r0, r1));
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, on_bit_set);
}
@@ -530,34 +487,28 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
case 's':
// Match space-characters
if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ cmp(current_character(), Operand(' '));
__ b(eq, &success);
// Check range 0x09..0x0d
__ sub(r0, current_character(), Operand('\t'));
__ cmp(r0, Operand('\r' - '\t'));
- BranchOrBacktrack(hi, on_no_match);
+ __ b(ls, &success);
+ // \u00a0 (NBSP).
+ __ cmp(r0, Operand(0x00a0 - '\t'));
+ BranchOrBacktrack(ne, on_no_match);
__ bind(&success);
return true;
}
return false;
case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- __ cmp(current_character(), Operand(' '));
- BranchOrBacktrack(eq, on_no_match);
- __ sub(r0, current_character(), Operand('\t'));
- __ cmp(r0, Operand('\r' - '\t'));
- BranchOrBacktrack(ls, on_no_match);
- return true;
- }
+ // The emitted code for generic character classes is good enough.
return false;
case 'd':
// Match ASCII digits ('0'..'9')
__ sub(r0, current_character(), Operand('0'));
- __ cmp(current_character(), Operand('9' - '0'));
+ __ cmp(r0, Operand('9' - '0'));
BranchOrBacktrack(hi, on_no_match);
return true;
case 'D':
@@ -613,7 +564,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
ExternalReference map = ExternalReference::re_word_character_map();
__ mov(r0, Operand(map));
__ ldrb(r0, MemOperand(r0, current_character()));
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(eq, on_no_match);
return true;
}
@@ -627,7 +578,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
ExternalReference map = ExternalReference::re_word_character_map();
__ mov(r0, Operand(map));
__ ldrb(r0, MemOperand(r0, current_character()));
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, on_no_match);
if (mode_ != ASCII) {
__ bind(&done);
@@ -675,7 +626,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
- __ mov(r0, Operand(0, RelocInfo::NONE));
+ __ mov(r0, Operand::Zero());
__ push(r0); // Make room for success counter and initialize it to 0.
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
// Check if we have space on the stack for registers.
@@ -683,7 +634,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label stack_ok;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
+ ExternalReference::address_of_stack_limit(isolate());
__ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ sub(r0, sp, r0, SetCC);
@@ -700,7 +651,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
CallCheckStackGuardState(r0);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
// If returned value is non-zero, we exit with the returned value as result.
__ b(ne, &return_r0);
@@ -728,7 +679,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
- __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ cmp(r1, Operand::Zero());
__ b(ne, &load_char_start_regexp);
__ mov(current_character(), Operand('\n'), LeaveCC, eq);
__ jmp(&start_regexp);
@@ -834,7 +785,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Not a zero-length match, restart.
__ b(ne, &load_char_start_regexp);
// Offset from the end is zero if we already reached the end.
- __ cmp(current_input_offset(), Operand(0));
+ __ cmp(current_input_offset(), Operand::Zero());
__ b(eq, &exit_label_);
// Advance current position after a zero-length match.
__ add(current_input_offset(),
@@ -873,7 +824,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
SafeCallTarget(&check_preempt_label_);
CallCheckStackGuardState(r0);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
// If returning non-zero, we should end execution with the given
// result as return value.
__ b(ne, &return_r0);
@@ -894,13 +845,13 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ PrepareCallCFunction(num_arguments, r0);
__ mov(r0, backtrack_stackpointer());
__ add(r1, frame_pointer(), Operand(kStackHighEnd));
- __ mov(r2, Operand(ExternalReference::isolate_address()));
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_->isolate());
+ ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
__ b(eq, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), r0);
@@ -918,10 +869,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(&code_desc);
- Handle<Code> code = FACTORY->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
+ Handle<Code> code = isolate()->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code);
}
@@ -987,37 +937,8 @@ void RegExpMacroAssemblerARM::PopRegister(int register_index) {
}
-static bool is_valid_memory_offset(int value) {
- if (value < 0) value = -value;
- return value < (1<<12);
-}
-
-
void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
- if (label->is_bound()) {
- int target = label->pos();
- __ mov(r0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
- } else {
- int constant_offset = GetBacktrackConstantPoolEntry();
- masm_->label_at_put(label, constant_offset);
- // Reading pc-relative is based on the address 8 bytes ahead of
- // the current opcode.
- unsigned int offset_of_pc_register_read =
- masm_->pc_offset() + Assembler::kPcLoadDelta;
- int pc_offset_of_constant =
- constant_offset - offset_of_pc_register_read;
- ASSERT(pc_offset_of_constant < 0);
- if (is_valid_memory_offset(pc_offset_of_constant)) {
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ ldr(r0, MemOperand(pc, pc_offset_of_constant));
- } else {
- // Not a 12-bit offset, so it needs to be loaded from the constant
- // pool.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
- __ ldr(r0, MemOperand(pc, r0));
- }
- }
+ __ mov_label_offset(r0, label);
Push(r0);
CheckStackLimit();
}
@@ -1104,16 +1025,34 @@ void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
// Private methods:
void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
+ __ PrepareCallCFunction(3, scratch);
+
// RegExp code frame pointer.
__ mov(r2, frame_pointer());
// Code* of self.
__ mov(r1, Operand(masm_->CodeObject()));
- // r0 becomes return address pointer.
+
+ // We need to make room for the return address on the stack.
+ int stack_alignment = OS::ActivationFrameAlignment();
+ ASSERT(IsAligned(stack_alignment, kPointerSize));
+ __ sub(sp, sp, Operand(stack_alignment));
+
+ // r0 will point to the return address, placed by DirectCEntry.
+ __ mov(r0, sp);
+
ExternalReference stack_guard_check =
- ExternalReference::re_check_stack_guard_state(masm_->isolate());
- CallCFunctionUsingStub(stack_guard_check, num_arguments);
+ ExternalReference::re_check_stack_guard_state(isolate());
+ __ mov(ip, Operand(stack_guard_check));
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm_, ip);
+
+ // Drop the return address from the stack.
+ __ add(sp, sp, Operand(stack_alignment));
+
+ ASSERT(stack_alignment != 0);
+ __ ldr(sp, MemOperand(sp, 0));
+
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
}
@@ -1128,7 +1067,6 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
@@ -1150,7 +1088,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1181,7 +1119,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
@@ -1293,7 +1231,7 @@ void RegExpMacroAssemblerARM::Pop(Register target) {
void RegExpMacroAssemblerARM::CheckPreemption() {
// Check for preemption.
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
+ ExternalReference::address_of_stack_limit(isolate());
__ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ cmp(sp, r0);
@@ -1303,7 +1241,7 @@ void RegExpMacroAssemblerARM::CheckPreemption() {
void RegExpMacroAssemblerARM::CheckStackLimit() {
ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
+ ExternalReference::address_of_regexp_stack_limit(isolate());
__ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ cmp(backtrack_stackpointer(), Operand(r0));
@@ -1311,53 +1249,6 @@ void RegExpMacroAssemblerARM::CheckStackLimit() {
}
-void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
- __ CheckConstPool(false, false);
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- backtrack_constant_pool_offset_ = masm_->pc_offset();
- for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
- __ emit(0);
- }
-
- backtrack_constant_pool_capacity_ = kBacktrackConstantPoolSize;
-}
-
-
-int RegExpMacroAssemblerARM::GetBacktrackConstantPoolEntry() {
- while (backtrack_constant_pool_capacity_ > 0) {
- int offset = backtrack_constant_pool_offset_;
- backtrack_constant_pool_offset_ += kPointerSize;
- backtrack_constant_pool_capacity_--;
- if (masm_->pc_offset() - offset < 2 * KB) {
- return offset;
- }
- }
- Label new_pool_skip;
- __ jmp(&new_pool_skip);
- EmitBacktrackConstantPool();
- __ bind(&new_pool_skip);
- int offset = backtrack_constant_pool_offset_;
- backtrack_constant_pool_offset_ += kPointerSize;
- backtrack_constant_pool_capacity_--;
- return offset;
-}
-
-
-void RegExpMacroAssemblerARM::CallCFunctionUsingStub(
- ExternalReference function,
- int num_arguments) {
- // Must pass all arguments in registers. The stub pushes on the stack.
- ASSERT(num_arguments <= 4);
- __ mov(code_pointer(), Operand(function));
- RegExpCEntryStub stub;
- __ CallStub(&stub);
- if (OS::ActivationFrameAlignment() != 0) {
- __ ldr(sp, MemOperand(sp, 0));
- }
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
-}
-
-
bool RegExpMacroAssemblerARM::CanReadUnaligned() {
return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
}
@@ -1400,17 +1291,6 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
}
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- int stack_alignment = OS::ActivationFrameAlignment();
- if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
- // Stack is already aligned for call, so decrement by alignment
- // to make room for storing the link register.
- __ str(lr, MemOperand(sp, stack_alignment, NegPreIndex));
- __ mov(r0, sp);
- __ Call(r5);
- __ ldr(pc, MemOperand(sp, stack_alignment, PostIndex));
-}
-
#undef __
#endif // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index c45669ae8..8d9d515c7 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -30,6 +30,7 @@
#include "arm/assembler-arm.h"
#include "arm/assembler-arm-inl.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
@@ -52,10 +53,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
@@ -163,9 +160,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
- void EmitBacktrackConstantPool();
- int GetBacktrackConstantPoolEntry();
-
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
@@ -215,14 +209,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// and increments it by a word size.
inline void Pop(Register target);
- // Calls a C function and cleans up the frame alignment done by
- // by FrameAlign. The called function *is* allowed to trigger a garbage
- // collection, but may not take more than four arguments (no arguments
- // passed on the stack), and the first argument will be a pointer to the
- // return address.
- inline void CallCFunctionUsingStub(ExternalReference function,
- int num_arguments);
-
+ Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;
@@ -236,11 +223,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// are always 0..num_saved_registers_-1)
int num_saved_registers_;
- // Manage a small pre-allocated pool for writing label targets
- // to for pushing backtrack addresses.
- int backtrack_constant_pool_offset_;
- int backtrack_constant_pool_capacity_;
-
// Labels used internally.
Label entry_label_;
Label start_label_;
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 5b8ba2ada..461d032b9 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -26,14 +26,15 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
-#include <math.h>
+#include <cmath>
#include <cstdarg>
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "disasm.h"
#include "assembler.h"
+#include "codegen.h"
#include "arm/constants-arm.h"
#include "arm/simulator-arm.h"
@@ -111,8 +112,8 @@ void ArmDebugger::Stop(Instruction* instr) {
ASSERT(msg != NULL);
// Update this stop description.
- if (isWatchedStop(code) && !watched_stops[code].desc) {
- watched_stops[code].desc = msg;
+ if (isWatchedStop(code) && !watched_stops_[code].desc) {
+ watched_stops_[code].desc = msg;
}
if (strlen(msg) > 0) {
@@ -140,8 +141,8 @@ void ArmDebugger::Stop(Instruction* instr) {
char* msg = *reinterpret_cast<char**>(sim_->get_pc()
+ Instruction::kInstrSize);
// Update this stop description.
- if (sim_->isWatchedStop(code) && !sim_->watched_stops[code].desc) {
- sim_->watched_stops[code].desc = msg;
+ if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
+ sim_->watched_stops_[code].desc = msg;
}
// Print the stop message and code if it is not the default code.
if (code != kMaxStopCode) {
@@ -330,7 +331,7 @@ void ArmDebugger::Debug() {
PrintF("\n");
}
}
- for (int i = 0; i < kNumVFPDoubleRegisters; i++) {
+ for (int i = 0; i < DwVfpRegister::NumRegisters(); i++) {
dvalue = GetVFPDoubleRegisterValue(i);
uint64_t as_words = BitCast<uint64_t>(dvalue);
PrintF("%3s: %f 0x%08x %08x\n",
@@ -398,7 +399,7 @@ void ArmDebugger::Debug() {
int32_t words;
if (argc == next_arg) {
words = 10;
- } else if (argc == next_arg + 1) {
+ } else {
if (!GetValue(argv[next_arg], &words)) {
words = 10;
}
@@ -411,7 +412,7 @@ void ArmDebugger::Debug() {
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
Heap* current_heap = v8::internal::Isolate::Current()->heap();
- if (current_heap->Contains(obj) || ((value & 1) == 0)) {
+ if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
PrintF("smi %d", value / 2);
@@ -720,7 +721,7 @@ void Simulator::CheckICache(v8::internal::HashMap* i_cache,
Instruction::kInstrSize) == 0);
} else {
// Cache miss. Load memory into the cache.
- memcpy(cached_line, line, CachePage::kLineLength);
+ OS::MemCopy(cached_line, line, CachePage::kLineLength);
*cache_valid_byte = CachePage::LINE_VALID;
}
}
@@ -764,14 +765,15 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// All registers are initialized to zero to start with
// even though s_registers_ & d_registers_ share the same
// physical registers in the target.
- for (int i = 0; i < num_s_registers; i++) {
- vfp_register[i] = 0;
+ for (int i = 0; i < num_d_registers * 2; i++) {
+ vfp_registers_[i] = 0;
}
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = false;
c_flag_FPSCR_ = false;
v_flag_FPSCR_ = false;
FPSCR_rounding_mode_ = RZ;
+ FPSCR_default_NaN_mode_ = true;
inv_op_vfp_flag_ = false;
div_zero_vfp_flag_ = false;
@@ -828,7 +830,10 @@ class Redirection {
Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) return current;
+ if (current->external_function_ == external_function) {
+ ASSERT_EQ(current->type(), type);
+ return current;
+ }
}
return new Redirection(external_function, type);
}
@@ -900,13 +905,19 @@ double Simulator::get_double_from_register_pair(int reg) {
double dm_val = 0.0;
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
- char buffer[2 * sizeof(vfp_register[0])];
- memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
- memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ char buffer[2 * sizeof(vfp_registers_[0])];
+ OS::MemCopy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+ OS::MemCopy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
}
+void Simulator::set_register_pair_from_double(int reg, double* value) {
+ ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
+ memcpy(registers_ + reg, value, sizeof(*value));
+}
+
+
void Simulator::set_dw_register(int dreg, const int* dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
registers_[dreg] = dbl[0];
@@ -914,6 +925,54 @@ void Simulator::set_dw_register(int dreg, const int* dbl) {
}
+void Simulator::get_d_register(int dreg, uint64_t* value) {
+ ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+ memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value));
+}
+
+
+void Simulator::set_d_register(int dreg, const uint64_t* value) {
+ ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+ memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value));
+}
+
+
+void Simulator::get_d_register(int dreg, uint32_t* value) {
+ ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+ memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value) * 2);
+}
+
+
+void Simulator::set_d_register(int dreg, const uint32_t* value) {
+ ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+ memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value) * 2);
+}
+
+
+void Simulator::get_q_register(int qreg, uint64_t* value) {
+ ASSERT((qreg >= 0) && (qreg < num_q_registers));
+ memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 2);
+}
+
+
+void Simulator::set_q_register(int qreg, const uint64_t* value) {
+ ASSERT((qreg >= 0) && (qreg < num_q_registers));
+ memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 2);
+}
+
+
+void Simulator::get_q_register(int qreg, uint32_t* value) {
+ ASSERT((qreg >= 0) && (qreg < num_q_registers));
+ memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 4);
+}
+
+
+void Simulator::set_q_register(int qreg, const uint32_t* value) {
+ ASSERT((qreg >= 0) && (qreg < num_q_registers));
+ memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 4);
+}
+
+
// Raw access to the PC register.
void Simulator::set_pc(int32_t value) {
pc_modified_ = true;
@@ -935,13 +994,13 @@ int32_t Simulator::get_pc() const {
// Getting from and setting into VFP registers.
void Simulator::set_s_register(int sreg, unsigned int value) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
- vfp_register[sreg] = value;
+ vfp_registers_[sreg] = value;
}
unsigned int Simulator::get_s_register(int sreg) const {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
- return vfp_register[sreg];
+ return vfp_registers_[sreg];
}
@@ -949,12 +1008,12 @@ template<class InputType, int register_size>
void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
ASSERT(reg_index >= 0);
if (register_size == 1) ASSERT(reg_index < num_s_registers);
- if (register_size == 2) ASSERT(reg_index < num_d_registers);
+ if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
- char buffer[register_size * sizeof(vfp_register[0])];
- memcpy(buffer, &value, register_size * sizeof(vfp_register[0]));
- memcpy(&vfp_register[reg_index * register_size], buffer,
- register_size * sizeof(vfp_register[0]));
+ char buffer[register_size * sizeof(vfp_registers_[0])];
+ OS::MemCopy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
+ OS::MemCopy(&vfp_registers_[reg_index * register_size], buffer,
+ register_size * sizeof(vfp_registers_[0]));
}
@@ -962,68 +1021,33 @@ template<class ReturnType, int register_size>
ReturnType Simulator::GetFromVFPRegister(int reg_index) {
ASSERT(reg_index >= 0);
if (register_size == 1) ASSERT(reg_index < num_s_registers);
- if (register_size == 2) ASSERT(reg_index < num_d_registers);
+ if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
ReturnType value = 0;
- char buffer[register_size * sizeof(vfp_register[0])];
- memcpy(buffer, &vfp_register[register_size * reg_index],
- register_size * sizeof(vfp_register[0]));
- memcpy(&value, buffer, register_size * sizeof(vfp_register[0]));
+ char buffer[register_size * sizeof(vfp_registers_[0])];
+ OS::MemCopy(buffer, &vfp_registers_[register_size * reg_index],
+ register_size * sizeof(vfp_registers_[0]));
+ OS::MemCopy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
return value;
}
-// For use in calls that take two double values, constructed either
-// from r0-r3 or d0 and d1.
-void Simulator::GetFpArgs(double* x, double* y) {
- if (use_eabi_hardfloat()) {
- *x = vfp_register[0];
- *y = vfp_register[1];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
- // Registers 2 and 3 -> y.
- memcpy(buffer, registers_ + 2, sizeof(*y));
- memcpy(y, buffer, sizeof(*y));
- }
-}
-
-// For use in calls that take one double value, constructed either
-// from r0 and r1 or d0.
-void Simulator::GetFpArgs(double* x) {
+// Runtime FP routines take:
+// - two double arguments
+// - one double argument and zero or one integer arguments.
+// All are consructed here from r0-r3 or d0, d1 and r0.
+void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (use_eabi_hardfloat()) {
- *x = vfp_register[0];
+ *x = get_double_from_d_register(0);
+ *y = get_double_from_d_register(1);
+ *z = get_register(0);
} else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
- }
-}
-
-
-// For use in calls that take one double value constructed either
-// from r0 and r1 or d0 and one integer value.
-void Simulator::GetFpArgs(double* x, int32_t* y) {
- if (use_eabi_hardfloat()) {
- *x = vfp_register[0];
- *y = registers_[1];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
- // Register 2 -> y.
- memcpy(buffer, registers_ + 2, sizeof(*y));
- memcpy(y, buffer, sizeof(*y));
+ *x = get_double_from_register_pair(0);
+ // Register 2 and 3 -> y.
+ *y = get_double_from_register_pair(2);
+ // Register 2 -> z
+ *z = get_register(2);
}
}
@@ -1031,15 +1055,15 @@ void Simulator::GetFpArgs(double* x, int32_t* y) {
// The return value is either in r0/r1 or d0.
void Simulator::SetFpResult(const double& result) {
if (use_eabi_hardfloat()) {
- char buffer[2 * sizeof(vfp_register[0])];
- memcpy(buffer, &result, sizeof(buffer));
+ char buffer[2 * sizeof(vfp_registers_[0])];
+ OS::MemCopy(buffer, &result, sizeof(buffer));
// Copy result to d0.
- memcpy(vfp_register, buffer, sizeof(buffer));
+ OS::MemCopy(vfp_registers_, buffer, sizeof(buffer));
} else {
char buffer[2 * sizeof(registers_[0])];
- memcpy(buffer, &result, sizeof(buffer));
+ OS::MemCopy(buffer, &result, sizeof(buffer));
// Copy result to r0 and r1.
- memcpy(registers_, buffer, sizeof(buffer));
+ OS::MemCopy(registers_, buffer, sizeof(buffer));
}
}
@@ -1051,6 +1075,7 @@ void Simulator::TrashCallerSaveRegisters() {
registers_[12] = 0x50Bad4U;
}
+
// Some Operating Systems allow unaligned access on ARMv7 targets. We
// assume that unaligned accesses are not allowed unless the v8 build system
// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
@@ -1295,7 +1320,7 @@ bool Simulator::OverflowFrom(int32_t alu_out,
// Support for VFP comparisons.
void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
- if (isnan(val1) || isnan(val2)) {
+ if (std::isnan(val1) || std::isnan(val2)) {
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = false;
c_flag_FPSCR_ = true;
@@ -1387,7 +1412,14 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
}
case ROR: {
- UNIMPLEMENTED();
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else {
+ uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
+ uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
+ result = right | left;
+ *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
+ }
break;
}
@@ -1459,7 +1491,14 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
}
case ROR: {
- UNIMPLEMENTED();
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else {
+ uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
+ uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
+ result = right | left;
+ *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
+ }
break;
}
@@ -1496,11 +1535,11 @@ static int count_bits(int bit_vector) {
}
-void Simulator::ProcessPUW(Instruction* instr,
- int num_regs,
- int reg_size,
- intptr_t* start_address,
- intptr_t* end_address) {
+int32_t Simulator::ProcessPU(Instruction* instr,
+ int num_regs,
+ int reg_size,
+ intptr_t* start_address,
+ intptr_t* end_address) {
int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
switch (instr->PUField()) {
@@ -1531,11 +1570,10 @@ void Simulator::ProcessPUW(Instruction* instr,
break;
}
}
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
+ return rn_val;
}
+
// Addressing Mode 4 - Load and Store Multiple
void Simulator::HandleRList(Instruction* instr, bool load) {
int rlist = instr->RlistValue();
@@ -1543,7 +1581,8 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
intptr_t start_address = 0;
intptr_t end_address = 0;
- ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
+ int32_t rn_val =
+ ProcessPU(instr, num_regs, kPointerSize, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
// Catch null pointers a little earlier.
@@ -1562,6 +1601,9 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
rlist >>= 1;
}
ASSERT(end_address == ((intptr_t)address) - 4);
+ if (instr->HasW()) {
+ set_register(instr->RnValue(), rn_val);
+ }
}
@@ -1584,7 +1626,8 @@ void Simulator::HandleVList(Instruction* instr) {
intptr_t start_address = 0;
intptr_t end_address = 0;
- ProcessPUW(instr, num_regs, operand_size, &start_address, &end_address);
+ int32_t rn_val =
+ ProcessPU(instr, num_regs, operand_size, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
for (int reg = vd; reg < vd + num_regs; reg++) {
@@ -1599,20 +1642,27 @@ void Simulator::HandleVList(Instruction* instr) {
address += 1;
} else {
if (load) {
- set_s_register_from_sinteger(
- 2 * reg, ReadW(reinterpret_cast<int32_t>(address), instr));
- set_s_register_from_sinteger(
- 2 * reg + 1, ReadW(reinterpret_cast<int32_t>(address + 1), instr));
+ int32_t data[] = {
+ ReadW(reinterpret_cast<int32_t>(address), instr),
+ ReadW(reinterpret_cast<int32_t>(address + 1), instr)
+ };
+ double d;
+ OS::MemCopy(&d, data, 8);
+ set_d_register_from_double(reg, d);
} else {
- WriteW(reinterpret_cast<int32_t>(address),
- get_sinteger_from_s_register(2 * reg), instr);
- WriteW(reinterpret_cast<int32_t>(address + 1),
- get_sinteger_from_s_register(2 * reg + 1), instr);
+ int32_t data[2];
+ double d = get_double_from_d_register(reg);
+ OS::MemCopy(data, &d, 8);
+ WriteW(reinterpret_cast<int32_t>(address), data[0], instr);
+ WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
}
address += 2;
}
}
ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
+ if (instr->HasW()) {
+ set_register(instr->RnValue(), rn_val);
+ }
}
@@ -1628,18 +1678,22 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg3,
int32_t arg4,
int32_t arg5);
-typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3);
+
+// These prototypes handle the four types of FP calls.
+typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPCall)(double darg0);
+typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, int32_t arg1);
// This signature supports direct call to accessor getter callback.
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
- int32_t arg1);
+typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(
+ int32_t arg0, int32_t arg1, int32_t arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
@@ -1665,59 +1719,33 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
- if (use_eabi_hardfloat()) {
- // With the hard floating point calling convention, double
- // arguments are passed in VFP registers. Fetch the arguments
- // from there and call the builtin using soft floating point
- // convention.
- switch (redirection->type()) {
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_COMPARE_CALL:
- arg0 = vfp_register[0];
- arg1 = vfp_register[1];
- arg2 = vfp_register[2];
- arg3 = vfp_register[3];
- break;
- case ExternalReference::BUILTIN_FP_CALL:
- arg0 = vfp_register[0];
- arg1 = vfp_register[1];
- break;
- case ExternalReference::BUILTIN_FP_INT_CALL:
- arg0 = vfp_register[0];
- arg1 = vfp_register[1];
- arg2 = get_register(0);
- break;
- default:
- break;
- }
- }
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr);
intptr_t external =
reinterpret_cast<intptr_t>(redirection->external_function());
if (fp_call) {
+ double dval0, dval1; // one or two double parameters
+ int32_t ival; // zero or one integer parameters
+ int64_t iresult = 0; // integer return value
+ double dresult = 0; // double return value
+ GetFpArgs(&dval0, &dval1, &ival);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double dval0, dval1;
- int32_t ival;
+ SimulatorRuntimeCall generic_target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
- GetFpArgs(&dval0, &dval1);
PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(target), dval0, dval1);
+ FUNCTION_ADDR(generic_target), dval0, dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
- GetFpArgs(&dval0);
PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(target), dval0);
+ FUNCTION_ADDR(generic_target), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
- GetFpArgs(&dval0, &ival);
PrintF("Call to host function at %p with args %f, %d",
- FUNCTION_ADDR(target), dval0, ival);
+ FUNCTION_ADDR(generic_target), dval0, ival);
break;
default:
UNREACHABLE();
@@ -1729,57 +1757,111 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(r0, static_cast<int32_t>(iresult));
+ set_register(r1, static_cast<int32_t>(iresult >> 32));
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double result = target(arg0, arg1, arg2, arg3);
- SetFpResult(result);
- } else {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
- set_register(r0, lo_res);
- set_register(r1, hi_res);
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08x",
+ reinterpret_cast<void*>(external), arg0);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08x\n", get_register(sp));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
SimulatorRuntimeDirectApiCall target =
reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ target(arg0);
+ } else if (
+ redirection->type() == ExternalReference::PROFILING_API_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF("Call to host function at %p args %08x",
- FUNCTION_ADDR(target), arg0);
+ PrintF("Call to host function at %p args %08x %08x",
+ reinterpret_cast<void*>(external), arg0, arg1);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
- v8::Handle<v8::Value> result = target(arg0);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ target(arg0, arg1);
+ } else if (
+ redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08x %08x",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08x\n", get_register(sp));
+ }
+ PrintF("\n");
}
- set_register(r0, (int32_t) *result);
- } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ CHECK(stack_aligned);
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ target(arg0, arg1);
+ } else if (
+ redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF("Call to host function at %p args %08x %08x",
- FUNCTION_ADDR(target), arg0, arg1);
+ PrintF("Call to host function at %p args %08x %08x %08x",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
- v8::Handle<v8::Value> result = target(arg0, arg1);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, (int32_t) *result);
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
+ external);
+ target(arg0, arg1, arg2);
} else {
// builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
@@ -1787,7 +1869,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
- "Call to host function at %p"
+ "Call to host function at %p "
"args %08x, %08x, %08x, %08x, %08x, %08x",
FUNCTION_ADDR(target),
arg0,
@@ -1845,6 +1927,12 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
+double Simulator::canonicalizeNaN(double value) {
+ return (FPSCR_default_NaN_mode_ && std::isnan(value)) ?
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double() : value;
+}
+
+
// Stop helper functions.
bool Simulator::isStopInstruction(Instruction* instr) {
return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
@@ -1861,14 +1949,14 @@ bool Simulator::isEnabledStop(uint32_t code) {
ASSERT(code <= kMaxStopCode);
// Unwatched stops are always enabled.
return !isWatchedStop(code) ||
- !(watched_stops[code].count & kStopDisabledBit);
+ !(watched_stops_[code].count & kStopDisabledBit);
}
void Simulator::EnableStop(uint32_t code) {
ASSERT(isWatchedStop(code));
if (!isEnabledStop(code)) {
- watched_stops[code].count &= ~kStopDisabledBit;
+ watched_stops_[code].count &= ~kStopDisabledBit;
}
}
@@ -1876,7 +1964,7 @@ void Simulator::EnableStop(uint32_t code) {
void Simulator::DisableStop(uint32_t code) {
ASSERT(isWatchedStop(code));
if (isEnabledStop(code)) {
- watched_stops[code].count |= kStopDisabledBit;
+ watched_stops_[code].count |= kStopDisabledBit;
}
}
@@ -1884,13 +1972,13 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
ASSERT(code <= kMaxStopCode);
ASSERT(isWatchedStop(code));
- if ((watched_stops[code].count & ~(1 << 31)) == 0x7fffffff) {
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
PrintF("Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n", code);
- watched_stops[code].count = 0;
+ watched_stops_[code].count = 0;
EnableStop(code);
} else {
- watched_stops[code].count++;
+ watched_stops_[code].count++;
}
}
@@ -1902,12 +1990,12 @@ void Simulator::PrintStopInfo(uint32_t code) {
PrintF("Stop not watched.");
} else {
const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
- int32_t count = watched_stops[code].count & ~kStopDisabledBit;
+ int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
// Don't print the state of unused breakpoints.
if (count != 0) {
- if (watched_stops[code].desc) {
+ if (watched_stops_[code].desc) {
PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
- code, code, state, count, watched_stops[code].desc);
+ code, code, state, count, watched_stops_[code].desc);
} else {
PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
code, code, state, count);
@@ -2487,36 +2575,148 @@ void Simulator::DecodeType3(Instruction* instr) {
break;
}
case ia_x: {
- if (instr->HasW()) {
- ASSERT(instr->Bits(5, 4) == 0x1);
-
- if (instr->Bit(22) == 0x1) { // USAT.
- int32_t sat_pos = instr->Bits(20, 16);
- int32_t sat_val = (1 << sat_pos) - 1;
- int32_t shift = instr->Bits(11, 7);
- int32_t shift_type = instr->Bit(6);
- int32_t rm_val = get_register(instr->RmValue());
- if (shift_type == 0) { // LSL
- rm_val <<= shift;
- } else { // ASR
- rm_val >>= shift;
+ if (instr->Bit(4) == 0) {
+ // Memop.
+ } else {
+ if (instr->Bit(5) == 0) {
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(6) == 0) {
+ // Pkhbt.
+ uint32_t rn_val = get_register(rn);
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t shift = instr->Bits(11, 7);
+ rm_val <<= shift;
+ set_register(rd, (rn_val & 0xFFFF) | (rm_val & 0xFFFF0000U));
+ } else {
+ // Pkhtb.
+ uint32_t rn_val = get_register(rn);
+ int32_t rm_val = get_register(instr->RmValue());
+ int32_t shift = instr->Bits(11, 7);
+ if (shift == 0) {
+ shift = 32;
+ }
+ rm_val >>= shift;
+ set_register(rd, (rn_val & 0xFFFF0000U) | (rm_val & 0xFFFF));
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 1:
+ UNIMPLEMENTED();
+ break;
+ case 2:
+ UNIMPLEMENTED();
+ break;
+ case 3: {
+ // Usat.
+ int32_t sat_pos = instr->Bits(20, 16);
+ int32_t sat_val = (1 << sat_pos) - 1;
+ int32_t shift = instr->Bits(11, 7);
+ int32_t shift_type = instr->Bit(6);
+ int32_t rm_val = get_register(instr->RmValue());
+ if (shift_type == 0) { // LSL
+ rm_val <<= shift;
+ } else { // ASR
+ rm_val >>= shift;
+ }
+ // If saturation occurs, the Q flag should be set in the CPSR.
+ // There is no Q flag yet, and no instruction (MRS) to read the
+ // CPSR directly.
+ if (rm_val > sat_val) {
+ rm_val = sat_val;
+ } else if (rm_val < 0) {
+ rm_val = 0;
+ }
+ set_register(rd, rm_val);
+ break;
+ }
}
- // If saturation occurs, the Q flag should be set in the CPSR.
- // There is no Q flag yet, and no instruction (MRS) to read the
- // CPSR directly.
- if (rm_val > sat_val) {
- rm_val = sat_val;
- } else if (rm_val < 0) {
- rm_val = 0;
+ } else {
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ UNIMPLEMENTED();
+ break;
+ case 1:
+ UNIMPLEMENTED();
+ break;
+ case 2:
+ if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ // Uxtb16.
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd,
+ (rm_val & 0xFF) | (rm_val & 0xFF0000));
+ } else {
+ UNIMPLEMENTED();
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 3:
+ if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ // Uxtb.
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd, (rm_val & 0xFF));
+ } else {
+ // Uxtab.
+ uint32_t rn_val = get_register(rn);
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd, rn_val + (rm_val & 0xFF));
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
}
- set_register(rd, rm_val);
- } else { // SSAT.
- UNIMPLEMENTED();
}
return;
- } else {
- Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
- UNIMPLEMENTED();
}
break;
}
@@ -2669,6 +2869,7 @@ void Simulator::DecodeType7(Instruction* instr) {
// vmov :Rt = Sn
// vcvt: Dd = Sm
// vcvt: Sd = Dm
+// vcvt.f64.s32 Dd, Dd, #<fbits>
// Dd = vabs(Dm)
// Dd = vneg(Dm)
// Dd = vadd(Dn, Dm)
@@ -2705,16 +2906,25 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
// vabs
double dm_value = get_double_from_d_register(vm);
double dd_value = fabs(dm_value);
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
double dm_value = get_double_from_d_register(vm);
double dd_value = -dm_value;
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
+ } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) &&
+ (instr->Bit(8) == 1)) {
+ // vcvt.f64.s32 Dd, Dd, #<fbits>
+ int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0));
+ int fixed_value = get_sinteger_from_s_register(vd * 2);
+ double divide = 1 << fraction_bits;
+ set_d_register_from_double(vd, fixed_value / divide);
} else if (((instr->Opc2Value() >> 1) == 0x6) &&
(instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
@@ -2725,6 +2935,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
// vsqrt
double dm_value = get_double_from_d_register(vm);
double dd_value = sqrt(dm_value);
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if (instr->Opc3Value() == 0x0) {
// vmov immediate.
@@ -2746,12 +2957,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value - dm_value;
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
// vadd
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value + dm_value;
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
}
} else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
@@ -2763,7 +2976,32 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Opc1Value() == 0x0)) {
+ // vmla, vmls
+ const bool is_vmls = (instr->Opc3Value() & 0x1);
+
+ if (instr->SzValue() != 0x1) {
+ UNREACHABLE(); // Not used by V8.
+ }
+
+ const double dd_val = get_double_from_d_register(vd);
+ const double dn_val = get_double_from_d_register(vn);
+ const double dm_val = get_double_from_d_register(vm);
+
+ // Note: we do the mul and add/sub in separate steps to avoid getting a
+ // result with too high precision.
+ set_d_register_from_double(vd, dn_val * dm_val);
+ if (is_vmls) {
+ set_d_register_from_double(
+ vd,
+ canonicalizeNaN(dd_val - get_double_from_d_register(vd)));
+ } else {
+ set_d_register_from_double(
+ vd,
+ canonicalizeNaN(dd_val + get_double_from_d_register(vd)));
+ }
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv
if (instr->SzValue() != 0x1) {
@@ -2774,6 +3012,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value / dm_value;
div_zero_vfp_flag_ = (dm_value == 0);
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
UNIMPLEMENTED(); // Not used by V8.
@@ -2782,6 +3021,26 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+ } else if ((instr->VLValue() == 0x0) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ // vmov (ARM core register to scalar)
+ int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
+ double dd_value = get_double_from_d_register(vd);
+ int32_t data[2];
+ OS::MemCopy(data, &dd_value, 8);
+ data[instr->Bit(21)] = get_register(instr->RtValue());
+ OS::MemCopy(&dd_value, data, 8);
+ set_d_register_from_double(vd, dd_value);
+ } else if ((instr->VLValue() == 0x1) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ // vmov (scalar to ARM core register)
+ int vn = instr->Bits(19, 16) | (instr->Bit(7) << 4);
+ double dn_value = get_double_from_d_register(vn);
+ int32_t data[2];
+ OS::MemCopy(data, &dn_value, 8);
+ set_register(instr->RtValue(), data[instr->Bit(21)]);
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
@@ -2796,6 +3055,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
(z_flag_FPSCR_ << 30) |
(c_flag_FPSCR_ << 29) |
(v_flag_FPSCR_ << 28) |
+ (FPSCR_default_NaN_mode_ << 25) |
(inexact_vfp_flag_ << 4) |
(underflow_vfp_flag_ << 3) |
(overflow_vfp_flag_ << 2) |
@@ -2818,6 +3078,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
z_flag_FPSCR_ = (rt_value >> 30) & 1;
c_flag_FPSCR_ = (rt_value >> 29) & 1;
v_flag_FPSCR_ = (rt_value >> 28) & 1;
+ FPSCR_default_NaN_mode_ = (rt_value >> 25) & 1;
inexact_vfp_flag_ = (rt_value >> 4) & 1;
underflow_vfp_flag_ = (rt_value >> 3) & 1;
overflow_vfp_flag_ = (rt_value >> 2) & 1;
@@ -2878,7 +3139,7 @@ void Simulator::DecodeVCMP(Instruction* instr) {
// Raise exceptions for quiet NaNs if necessary.
if (instr->Bit(7) == 1) {
- if (isnan(dd_value)) {
+ if (std::isnan(dd_value)) {
inv_op_vfp_flag_ = true;
}
}
@@ -3055,15 +3316,15 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
if (src_precision == kDoublePrecision) {
if (unsigned_integer) {
- set_d_register_from_double(dst,
- static_cast<double>((uint32_t)val));
+ set_d_register_from_double(
+ dst, static_cast<double>(static_cast<uint32_t>(val)));
} else {
set_d_register_from_double(dst, static_cast<double>(val));
}
} else {
if (unsigned_integer) {
- set_s_register_from_float(dst,
- static_cast<float>((uint32_t)val));
+ set_s_register_from_float(
+ dst, static_cast<float>(static_cast<uint32_t>(val)));
} else {
set_s_register_from_float(dst, static_cast<float>(val));
}
@@ -3120,31 +3381,32 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
- if (instr->Bits(7, 4) != 0x1) {
+ if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
UNIMPLEMENTED(); // Not used by V8.
} else {
int rt = instr->RtValue();
int rn = instr->RnValue();
- int vm = instr->VmValue();
+ int vm = instr->VFPMRegValue(kDoublePrecision);
if (instr->HasL()) {
- int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
- int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
-
- set_register(rt, rt_int_value);
- set_register(rn, rn_int_value);
+ int32_t data[2];
+ double d = get_double_from_d_register(vm);
+ OS::MemCopy(data, &d, 8);
+ set_register(rt, data[0]);
+ set_register(rn, data[1]);
} else {
- int32_t rs_val = get_register(rt);
- int32_t rn_val = get_register(rn);
-
- set_s_register_from_sinteger(2*vm, rs_val);
- set_s_register_from_sinteger((2*vm+1), rn_val);
+ int32_t data[] = { get_register(rt), get_register(rn) };
+ double d;
+ OS::MemCopy(&d, data, 8);
+ set_d_register_from_double(vm, d);
}
}
break;
case 0x8:
- case 0xC: { // Load and store double to memory.
+ case 0xA:
+ case 0xC:
+ case 0xE: { // Load and store double to memory.
int rn = instr->RnValue();
- int vd = instr->VdValue();
+ int vd = instr->VFPDRegValue(kDoublePrecision);
int offset = instr->Immed8Value();
if (!instr->HasU()) {
offset = -offset;
@@ -3152,18 +3414,29 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
int32_t address = get_register(rn) + 4 * offset;
if (instr->HasL()) {
// Load double from memory: vldr.
- set_s_register_from_sinteger(2*vd, ReadW(address, instr));
- set_s_register_from_sinteger(2*vd + 1, ReadW(address + 4, instr));
+ int32_t data[] = {
+ ReadW(address, instr),
+ ReadW(address + 4, instr)
+ };
+ double val;
+ OS::MemCopy(&val, data, 8);
+ set_d_register_from_double(vd, val);
} else {
// Store double to memory: vstr.
- WriteW(address, get_sinteger_from_s_register(2*vd), instr);
- WriteW(address + 4, get_sinteger_from_s_register(2*vd + 1), instr);
+ int32_t data[2];
+ double val = get_double_from_d_register(vd);
+ OS::MemCopy(data, &val, 8);
+ WriteW(address, data[0], instr);
+ WriteW(address + 4, data[1], instr);
}
break;
}
case 0x4:
case 0x5:
+ case 0x6:
+ case 0x7:
case 0x9:
+ case 0xB:
// Load/store multiple double from memory: vldm/vstm.
HandleVList(instr);
break;
@@ -3176,6 +3449,156 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
}
+void Simulator::DecodeSpecialCondition(Instruction* instr) {
+ switch (instr->SpecialValue()) {
+ case 5:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl signed
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
+ int esize = 8 * imm3;
+ int elements = 64 / esize;
+ int8_t from[8];
+ get_d_register(Vm, reinterpret_cast<uint64_t*>(from));
+ int16_t to[8];
+ int e = 0;
+ while (e < elements) {
+ to[e] = from[e];
+ e++;
+ }
+ set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 7:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl unsigned
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
+ int esize = 8 * imm3;
+ int elements = 64 / esize;
+ uint8_t from[8];
+ get_d_register(Vm, reinterpret_cast<uint64_t*>(from));
+ uint16_t to[8];
+ int e = 0;
+ while (e < elements) {
+ to[e] = from[e];
+ e++;
+ }
+ set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 8:
+ if (instr->Bits(21, 20) == 0) {
+ // vst1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int Rm = instr->VmValue();
+ int32_t address = get_register(Rn);
+ int regs = 0;
+ switch (type) {
+ case nlt_1:
+ regs = 1;
+ break;
+ case nlt_2:
+ regs = 2;
+ break;
+ case nlt_3:
+ regs = 3;
+ break;
+ case nlt_4:
+ regs = 4;
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ int r = 0;
+ while (r < regs) {
+ uint32_t data[2];
+ get_d_register(Vd + r, data);
+ WriteW(address, data[0], instr);
+ WriteW(address + 4, data[1], instr);
+ address += 8;
+ r++;
+ }
+ if (Rm != 15) {
+ if (Rm == 13) {
+ set_register(Rn, address);
+ } else {
+ set_register(Rn, get_register(Rn) + get_register(Rm));
+ }
+ }
+ } else if (instr->Bits(21, 20) == 2) {
+ // vld1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int Rm = instr->VmValue();
+ int32_t address = get_register(Rn);
+ int regs = 0;
+ switch (type) {
+ case nlt_1:
+ regs = 1;
+ break;
+ case nlt_2:
+ regs = 2;
+ break;
+ case nlt_3:
+ regs = 3;
+ break;
+ case nlt_4:
+ regs = 4;
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ int r = 0;
+ while (r < regs) {
+ uint32_t data[2];
+ data[0] = ReadW(address, instr);
+ data[1] = ReadW(address + 4, instr);
+ set_d_register(Vd + r, data);
+ address += 8;
+ r++;
+ }
+ if (Rm != 15) {
+ if (Rm == 13) {
+ set_register(Rn, address);
+ } else {
+ set_register(Rn, get_register(Rn) + get_register(Rm));
+ }
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 0xA:
+ case 0xB:
+ if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
+ // pld: ignore instruction.
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+}
+
+
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
@@ -3192,7 +3615,7 @@ void Simulator::InstructionDecode(Instruction* instr) {
PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
}
if (instr->ConditionField() == kSpecialCondition) {
- UNIMPLEMENTED();
+ DecodeSpecialCondition(instr);
} else if (ConditionallyExecute(instr)) {
switch (instr->TypeValue()) {
case 0:
@@ -3273,33 +3696,7 @@ void Simulator::Execute() {
}
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
- // Set up arguments
-
- // First four arguments passed in registers.
- ASSERT(argument_count >= 4);
- set_register(r0, va_arg(parameters, int32_t));
- set_register(r1, va_arg(parameters, int32_t));
- set_register(r2, va_arg(parameters, int32_t));
- set_register(r3, va_arg(parameters, int32_t));
-
- // Remaining arguments passed on stack.
- int original_stack = get_register(sp);
- // Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- // Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
- set_register(sp, entry_stack);
-
+void Simulator::CallInternal(byte* entry) {
// Prepare to execute the code at entry
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
@@ -3353,6 +3750,37 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
set_register(r9, r9_val);
set_register(r10, r10_val);
set_register(r11, r11_val);
+}
+
+
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+ // Set up arguments
+
+ // First four arguments passed in registers.
+ ASSERT(argument_count >= 4);
+ set_register(r0, va_arg(parameters, int32_t));
+ set_register(r1, va_arg(parameters, int32_t));
+ set_register(r2, va_arg(parameters, int32_t));
+ set_register(r3, va_arg(parameters, int32_t));
+
+ // Remaining arguments passed on stack.
+ int original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ for (int i = 4; i < argument_count; i++) {
+ stack_argument[i - 4] = va_arg(parameters, int32_t);
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
+ CallInternal(entry);
// Pop stack passed arguments.
CHECK_EQ(entry_stack, get_register(sp));
@@ -3363,6 +3791,35 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
+void Simulator::CallFP(byte* entry, double d0, double d1) {
+ if (use_eabi_hardfloat()) {
+ set_d_register_from_double(0, d0);
+ set_d_register_from_double(1, d1);
+ } else {
+ set_register_pair_from_double(0, &d0);
+ set_register_pair_from_double(2, &d1);
+ }
+ CallInternal(entry);
+}
+
+
+int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
+ CallFP(entry, d0, d1);
+ int32_t result = get_register(r0);
+ return result;
+}
+
+
+double Simulator::CallFPReturnsDouble(byte* entry, double d0, double d1) {
+ CallFP(entry, d0, d1);
+ if (use_eabi_hardfloat()) {
+ return get_double_from_d_register(0);
+ } else {
+ return get_double_from_register_pair(0);
+ }
+}
+
+
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index abc91bbc4..e392c5cb3 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -142,7 +142,12 @@ class Simulator {
num_s_registers = 32,
d0 = 0, d1, d2, d3, d4, d5, d6, d7,
d8, d9, d10, d11, d12, d13, d14, d15,
- num_d_registers = 16
+ d16, d17, d18, d19, d20, d21, d22, d23,
+ d24, d25, d26, d27, d28, d29, d30, d31,
+ num_d_registers = 32,
+ q0 = 0, q1, q2, q3, q4, q5, q6, q7,
+ q8, q9, q10, q11, q12, q13, q14, q15,
+ num_q_registers = 16
};
explicit Simulator(Isolate* isolate);
@@ -158,9 +163,19 @@ class Simulator {
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
+ void set_register_pair_from_double(int reg, double* value);
void set_dw_register(int dreg, const int* dbl);
// Support for VFP.
+ void get_d_register(int dreg, uint64_t* value);
+ void set_d_register(int dreg, const uint64_t* value);
+ void get_d_register(int dreg, uint32_t* value);
+ void set_d_register(int dreg, const uint32_t* value);
+ void get_q_register(int qreg, uint64_t* value);
+ void set_q_register(int qreg, const uint64_t* value);
+ void get_q_register(int qreg, uint32_t* value);
+ void set_q_register(int qreg, const uint32_t* value);
+
void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const;
@@ -205,6 +220,10 @@ class Simulator {
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
+ // Alternative: call a 2-argument double function.
+ void CallFP(byte* entry, double d0, double d1);
+ int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
+ double CallFPReturnsDouble(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -270,15 +289,16 @@ class Simulator {
// Support for VFP.
void Compute_FPSCR_Flags(double val1, double val2);
void Copy_FPSCR_to_APSR();
+ inline double canonicalizeNaN(double value);
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instruction* instr, bool* carry_out);
- void ProcessPUW(Instruction* instr,
- int num_regs,
- int operand_size,
- intptr_t* start_address,
- intptr_t* end_address);
+ int32_t ProcessPU(Instruction* instr,
+ int num_regs,
+ int operand_size,
+ intptr_t* start_address,
+ intptr_t* end_address);
void HandleRList(Instruction* instr, bool load);
void HandleVList(Instruction* inst);
void SoftwareInterrupt(Instruction* instr);
@@ -323,6 +343,7 @@ class Simulator {
// Support for VFP.
void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instruction* instr);
+ void DecodeSpecialCondition(Instruction* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
@@ -343,10 +364,8 @@ class Simulator {
void* external_function,
v8::internal::ExternalReference::Type type);
- // For use in calls that take double value arguments.
- void GetFpArgs(double* x, double* y);
- void GetFpArgs(double* x);
- void GetFpArgs(double* x, int32_t* y);
+ // Handle arguments and return value for runtime FP functions.
+ void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
@@ -356,6 +375,8 @@ class Simulator {
template<class InputType, int register_size>
void SetVFPRegister(int reg_index, const InputType& value);
+ void CallInternal(byte* entry);
+
// Architecture state.
// Saturating instructions require a Q flag to indicate saturation.
// There is currently no way to read the CPSR directly, and thus read the Q
@@ -367,7 +388,7 @@ class Simulator {
bool v_flag_;
// VFP architecture state.
- unsigned int vfp_register[num_s_registers];
+ unsigned int vfp_registers_[num_d_registers * 2];
bool n_flag_FPSCR_;
bool z_flag_FPSCR_;
bool c_flag_FPSCR_;
@@ -375,6 +396,7 @@ class Simulator {
// VFP rounding mode. See ARM DDI 0406B Page A2-29.
VFPRoundingMode FPSCR_rounding_mode_;
+ bool FPSCR_default_NaN_mode_;
// VFP FP exception flags architecture state.
bool inv_op_vfp_flag_;
@@ -408,14 +430,14 @@ class Simulator {
static const uint32_t kStopDisabledBit = 1 << 31;
// A stop is enabled, meaning the simulator will stop when meeting the
- // instruction, if bit 31 of watched_stops[code].count is unset.
- // The value watched_stops[code].count & ~(1 << 31) indicates how many times
+ // instruction, if bit 31 of watched_stops_[code].count is unset.
+ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through.
struct StopCountAndDesc {
uint32_t count;
char* desc;
};
- StopCountAndDesc watched_stops[kNumOfWatchedStops];
+ StopCountAndDesc watched_stops_[kNumOfWatchedStops];
};
@@ -425,6 +447,10 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
+#define CALL_GENERATED_FP_INT(entry, p0, p1) \
+ Simulator::current(Isolate::Current())->CallFPReturnsInt( \
+ FUNCTION_ADDR(entry), p0, p1)
+
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 9fc39d4ad..923011fe0 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_ARM
#include "ic-inl.h"
#include "codegen.h"
@@ -102,12 +102,7 @@ static void ProbeTable(Isolate* isolate,
uint32_t mask = Code::kFlagsNotUsedInLookup;
ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
__ bic(flags_reg, flags_reg, Operand(mask));
- // Using cmn and the negative instead of cmp means we can use movw.
- if (flags < 0) {
- __ cmn(flags_reg, Operand(-flags));
- } else {
- __ cmp(flags_reg, Operand(flags));
- }
+ __ cmp(flags_reg, Operand(flags));
__ b(ne, &miss);
#ifdef DEBUG
@@ -126,18 +121,14 @@ static void ProbeTable(Isolate* isolate,
}
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<String> name,
- Register scratch0,
- Register scratch1) {
- ASSERT(name->IsSymbol());
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(name->IsUniqueName());
+ ASSERT(!receiver.is(scratch0));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
@@ -173,13 +164,13 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
__ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- StringDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
@@ -228,7 +219,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
- __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
+ __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, Operand(ip));
uint32_t mask = kPrimaryTableSize - 1;
@@ -320,26 +311,21 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
}
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
- Handle<JSObject> holder,
- int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ ldr(dst, FieldMemOperand(src, offset));
- } else {
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ int offset = index * kPointerSize;
+ if (!inobject) {
// Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ offset = offset + FixedArray::kHeaderSize;
__ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- __ ldr(dst, FieldMemOperand(dst, offset));
+ src = dst;
}
+ __ ldr(dst, FieldMemOperand(src, offset));
}
@@ -390,31 +376,27 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
// Load length directly from the string.
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
__ Ret();
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ b(ne, miss);
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmp(scratch1, Operand(JS_VALUE_TYPE));
+ __ b(ne, miss);
- // Unwrap the value and check if the wrapped value is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
- }
+ // Unwrap the value and check if the wrapped value is a string.
+ __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+ __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
+ __ Ret();
}
@@ -429,67 +411,93 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
-// Generate StoreField code, value is passed in r0 register.
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ __ mov(scratch, Operand(cell));
+ __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch, ip);
+ __ b(ne, miss);
+}
+
+
+void StoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
+// Generate StoreTransition code, value is passed in r0 register.
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name,
- Register receiver_reg,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow) {
// r0 : value
Label exit;
- LookupResult lookup(masm->isolate());
- object->Lookup(*name, &lookup);
- if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
- // In sloppy mode, we could just return the value and be done. However, we
- // might be in strict mode, where we have to throw. Since we cannot tell,
- // go into slow case unconditionally.
- __ jmp(miss_label);
- return;
- }
-
- // Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK, mode);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
- // Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- if (lookup.IsFound()) {
- holder = lookup.holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- // We need an extra register, push
- __ push(name_reg);
- Label miss_pop, done_check;
- CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, &miss_pop);
- __ jmp(&done_check);
- __ bind(&miss_pop);
- __ pop(name_reg);
- __ jmp(miss_label);
- __ bind(&done_check);
- __ pop(name_reg);
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ Move(scratch1, constant);
+ __ cmp(value_reg, scratch1);
+ __ b(ne, miss_label);
+ } else if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch1, value_reg);
+ __ vmov(s0, scratch1);
+ __ vcvt_f64_s32(d0, s0);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
}
// Stub never generated for non-global objects that require access
@@ -497,7 +505,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
+ if (details.type() == FIELD &&
+ object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ push(receiver_reg);
@@ -511,79 +520,225 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- if (!transition.is_null()) {
- // Update the map of the object.
- __ mov(scratch1, Operand(transition));
- __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- name_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ // Update the map of the object.
+ __ mov(scratch1, Operand(transition));
+ __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ scratch2,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ ASSERT(value_reg.is(r0));
+ __ Ret();
+ return;
}
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ str(r0, FieldMemOperand(receiver_reg, offset));
-
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(r0, &exit);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, r0);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ str(storage_reg, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ str(value_reg, FieldMemOperand(receiver_reg, offset));
+ }
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg,
+ offset,
+ storage_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ str(r0, FieldMemOperand(scratch1, offset));
-
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(r0, &exit);
-
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, r0);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ str(storage_reg, FieldMemOperand(scratch1, offset));
+ } else {
+ __ str(value_reg, FieldMemOperand(scratch1, offset));
+ }
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1,
+ offset,
+ storage_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
}
// Return the value (register r0).
+ ASSERT(value_reg.is(r0));
__ bind(&exit);
__ Ret();
}
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Handle<Code> code = (kind == Code::LOAD_IC)
- ? masm->isolate()->builtins()->LoadIC_Miss()
- : masm->isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
+// Generate StoreField code, value is passed in r0 register.
+// When leaving generated code after success, the receiver_reg and name_reg
+// may be clobbered. Upon branch to miss_label, the receiver and name
+// registers have their original values.
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // r0 : value
+ Label exit;
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ int index = lookup->GetFieldIndex().field_index();
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ // Load the double storage.
+ if (index < 0) {
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ ldr(scratch1, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ ldr(scratch1, FieldMemOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch2, value_reg);
+ __ vmov(s0, scratch2);
+ __ vcvt_f64_s32(d0, s0);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ vstr(d0, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
+ // Return the value (register r0).
+ ASSERT(value_reg.is(r0));
+ __ Ret();
+ return;
+ }
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ str(value_reg, FieldMemOperand(receiver_reg, offset));
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ str(value_reg, FieldMemOperand(scratch1, offset));
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ // Return the value (register r0).
+ ASSERT(value_reg.is(r0));
+ __ bind(&exit);
+ __ Ret();
+}
+
+
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ bind(label);
+ __ mov(this->name(), Operand(name));
+ }
}
@@ -622,6 +777,11 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
@@ -630,10 +790,6 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(scratch);
__ push(receiver);
__ push(holder);
- __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(scratch);
- __ mov(scratch, Operand(ExternalReference::isolate_address()));
- __ push(scratch);
}
@@ -648,7 +804,7 @@ static void CompileCallLoadPropertyWithInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate());
- __ mov(r0, Operand(6));
+ __ mov(r0, Operand(StubCache::kInterceptorArgsLength));
__ mov(r1, Operand(ref));
CEntryStub stub(1);
@@ -656,7 +812,7 @@ static void CompileCallLoadPropertyWithInterceptor(
}
-static const int kFastApiCallArguments = 4;
+static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
// Reserves space for the extra arguments to API function in the
// caller's frame.
@@ -679,37 +835,46 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
static void GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
- // -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee JS function
- // -- sp[8] : call data
- // -- sp[12] : isolate
- // -- sp[16] : last JS argument
+ // -- sp[0] - sp[24] : FunctionCallbackInfo, incl.
+ // : holder (set by CheckPrototypes)
+ // -- sp[28] : last JS argument
// -- ...
- // -- sp[(argc + 3) * 4] : first JS argument
- // -- sp[(argc + 4) * 4] : receiver
+ // -- sp[(argc + 6) * 4] : first JS argument
+ // -- sp[(argc + 7) * 4] : receiver
// -----------------------------------
+ typedef FunctionCallbackArguments FCA;
+ // Save calling context.
+ __ str(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize));
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(r5, function);
+ __ Move(r5, function);
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
+ __ str(r5, MemOperand(sp, FCA::kCalleeIndex * kPointerSize));
- // Pass the additional arguments.
+ // Construct the FunctionCallbackInfo.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data());
+ Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ Move(r0, api_call_info);
__ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
} else {
__ Move(r6, call_data);
}
- __ mov(r7, Operand(ExternalReference::isolate_address()));
- // Store JS function, call data and isolate.
- __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
+ // Store call data.
+ __ str(r6, MemOperand(sp, FCA::kDataIndex * kPointerSize));
+ // Store isolate.
+ __ mov(r5, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ str(r5, MemOperand(sp, FCA::kIsolateIndex * kPointerSize));
+ // Store ReturnValue default and ReturnValue.
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ str(r5, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize));
+ __ str(r5, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize));
// Prepare arguments.
- __ add(r2, sp, Operand(3 * kPointerSize));
+ __ mov(r2, sp);
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -718,30 +883,78 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // r0 = v8::Arguments&
+ // r0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ add(r0, sp, Operand(1 * kPointerSize));
- // v8::Arguments::implicit_args_
+ // FunctionCallbackInfo::implicit_args_
__ str(r2, MemOperand(r0, 0 * kPointerSize));
- // v8::Arguments::values_
- __ add(ip, r2, Operand(argc * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ add(ip, r2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize));
__ str(ip, MemOperand(r0, 1 * kPointerSize));
- // v8::Arguments::length_ = argc
+ // FunctionCallbackInfo::length_ = argc
__ mov(ip, Operand(argc));
__ str(ip, MemOperand(r0, 2 * kPointerSize));
- // v8::Arguments::is_construct_call = 0
- __ mov(ip, Operand(0));
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ mov(ip, Operand::Zero());
__ str(ip, MemOperand(r0, 3 * kPointerSize));
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
ExternalReference ref = ExternalReference(&fun,
- ExternalReference::DIRECT_API_CALL,
+ type,
masm->isolate());
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+
AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ MemOperand return_value_operand(fp,
+ (2 + FCA::kReturnValueOffset) * kPointerSize);
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
+ __ CallApiFunctionAndReturn(ref,
+ function_address,
+ thunk_ref,
+ r1,
+ kStackUnwindSpace,
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
+}
+
+
+// Generate call to api function.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+ ASSERT(!receiver.is(scratch));
+
+ typedef FunctionCallbackArguments FCA;
+ const int stack_space = kFastApiCallArguments + argc + 1;
+ // Assign stack space for the call arguments.
+ __ sub(sp, sp, Operand(stack_space * kPointerSize));
+ // Write holder to stack frame.
+ __ str(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
+ // Write receiver to stack frame.
+ int index = stack_space - 1;
+ __ str(receiver, MemOperand(sp, index * kPointerSize));
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ ASSERT(!receiver.is(values[i]));
+ ASSERT(!scratch.is(values[i]));
+ __ str(receiver, MemOperand(sp, index-- * kPointerSize));
+ }
+
+ GenerateFastApiDirectCall(masm, optimization, argc, true);
}
@@ -759,7 +972,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
void Compile(MacroAssembler* masm,
Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name,
+ Handle<Name> name,
LookupResult* lookup,
Register receiver,
Register scratch1,
@@ -790,7 +1003,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register scratch3,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
- Handle<String> name,
+ Handle<Name> name,
const CallOptimization& optimization,
Label* miss_label) {
ASSERT(optimization.is_constant_call());
@@ -855,12 +1068,15 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiDirectCall(
+ masm, optimization, arguments_.immediate(), false);
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- __ InvokeFunction(optimization.constant_function(), arguments_,
+ Handle<JSFunction> function = optimization.constant_function();
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments_,
JUMP_FUNCTION, NullCallWrapper(), call_kind);
}
@@ -884,7 +1100,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<String> name,
+ Handle<Name> name,
Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
@@ -900,7 +1116,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
// Leave the internal frame.
@@ -936,39 +1152,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
};
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- __ mov(scratch, Operand(cell));
- __ ldr(scratch,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- __ b(ne, miss);
-}
-
-
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Register scratch,
- Label* miss) {
+void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
Handle<JSObject> current = object;
while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
+ if (current->IsJSGlobalObject()) {
GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
+ Handle<JSGlobalObject>::cast(current),
name,
scratch,
miss);
@@ -978,109 +1172,8 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
}
-// Convert and store int passed in register ival to IEEE 754 single precision
-// floating point value at memory location (dst + 4 * wordoffset)
-// If VFP3 is available use it for conversion.
-static void StoreIntAsFloat(MacroAssembler* masm,
- Register dst,
- Register wordoffset,
- Register ival,
- Register fval,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(s0, ival);
- __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
- __ vcvt_f32_s32(s0, s0);
- __ vstr(s0, scratch1, 0);
- } else {
- Label not_special, done;
- // Move sign bit from source to destination. This works because the sign
- // bit in the exponent word of the double has the same position and polarity
- // as the 2's complement sign bit in a Smi.
- ASSERT(kBinary32SignMask == 0x80000000u);
-
- __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
- // Negate value if it is negative.
- __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register ival contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(ival, Operand(1));
- __ b(gt, &not_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased).
- static const uint32_t exponent_word_for_1 =
- kBinary32ExponentBias << kBinary32ExponentShift;
-
- __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
- __ b(&done);
-
- __ bind(&not_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- Register zeros = scratch2;
- __ CountLeadingZeros(zeros, ival, scratch1);
-
- // Compute exponent and or it into the exponent register.
- __ rsb(scratch1,
- zeros,
- Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
-
- __ orr(fval,
- fval,
- Operand(scratch1, LSL, kBinary32ExponentShift));
-
- // Shift up the source chopping the top bit off.
- __ add(zeros, zeros, Operand(1));
- // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
- __ mov(ival, Operand(ival, LSL, zeros));
- // And the top (top 20 bits).
- __ orr(fval,
- fval,
- Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
-
- __ bind(&done);
- __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
- }
-}
-
-
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
-
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
-
- __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
- if (mantissa_shift_for_hi_word > 0) {
- __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
- __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
- } else {
- __ mov(loword, Operand(0, RelocInfo::NONE));
- __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
- }
+void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1094,9 +1187,15 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register holder_reg,
Register scratch1,
Register scratch2,
- Handle<String> name,
+ Handle<Name> name,
int save_at_depth,
- Label* miss) {
+ Label* miss,
+ PrototypeCheckType check) {
+ // Make sure that the type feedback oracle harvests the receiver map.
+ // TODO(svenpanne) Remove this hack when all ICs are reworked.
+ __ mov(scratch1, Operand(Handle<Map>(object->map())));
+
+ Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -1106,8 +1205,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register reg = object_reg;
int depth = 0;
+ typedef FunctionCallbackArguments FCA;
if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
+ __ str(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
// Check the maps in the prototype chain.
@@ -1124,11 +1224,12 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- name = factory()->LookupSymbol(name);
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
}
ASSERT(current->property_dictionary()->FindEntry(*name) ==
- StringDictionary::kNotFound);
+ NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
scratch1, scratch2);
@@ -1137,9 +1238,14 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
reg = holder_reg; // From now on the object will be in holder_reg.
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
- Handle<Map> current_map(current->map());
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
+ Register map_reg = scratch1;
+ if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ Handle<Map> current_map(current->map());
+ // CheckMap implicitly loads the map of |reg| into |map_reg|.
+ __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+ } else {
+ __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
@@ -1152,7 +1258,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (heap()->InNewSpace(*prototype)) {
// The prototype is in new space; we cannot store a reference to it
// in the code. Load it from the map.
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
} else {
// The prototype is in old space; load it directly.
__ mov(reg, Operand(prototype));
@@ -1160,7 +1266,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
+ __ str(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
// Go to the next object in the prototype chain.
@@ -1168,11 +1274,13 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
// Log the check depth.
- LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- // Check the holder map.
- __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
- DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
+ DONT_DO_SMI_CHECK);
+ }
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1190,161 +1298,183 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
- GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
- __ Ret();
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ b(success);
+ __ bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ }
}
-void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSFunction> value,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ b(success);
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ }
+}
- // Check that the maps haven't changed.
- CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
- // Return the constant value.
- __ LoadHeapObject(r0, value);
- __ Ret();
-}
+Register LoadStubCompiler::CallbackHandlerFrontend(
+ Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* success,
+ Handle<Object> callback) {
+ Label miss;
+ Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
-void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- ASSERT(!receiver.is(scratch1));
- ASSERT(!receiver.is(scratch2));
- ASSERT(!receiver.is(scratch3));
-
- // Load the properties dictionary.
- Register dictionary = scratch1;
- __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- miss,
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ ASSERT(!reg.is(scratch2()));
+ ASSERT(!reg.is(scratch3()));
+ ASSERT(!reg.is(scratch4()));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch4();
+ __ ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &miss,
&probe_done,
dictionary,
- name_reg,
- scratch2,
- scratch3);
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch3 contains the
- // pointer into the dictionary. Check that the value is the callback.
- Register pointer = scratch3;
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
- __ cmp(scratch2, Operand(callback));
- __ b(ne, miss);
+ this->name(),
+ scratch2(),
+ scratch3());
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3();
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ ldr(scratch2(), FieldMemOperand(pointer, kValueOffset));
+ __ cmp(scratch2(), Operand(callback));
+ __ b(ne, &miss);
+ }
+
+ HandlerFrontendFooter(name, success, &miss);
+ return reg;
}
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
+}
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- GenerateDictionaryLoadCallback(
- reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
- }
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ Move(r0, value);
+ __ Ret();
+}
+
+void LoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
- __ push(receiver);
- __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+ ASSERT(!scratch2().is(reg));
+ ASSERT(!scratch3().is(reg));
+ ASSERT(!scratch4().is(reg));
+ __ push(receiver());
if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch3, callback);
- __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
+ __ Move(scratch3(), callback);
+ __ ldr(scratch3(), FieldMemOperand(scratch3(),
+ ExecutableAccessorInfo::kDataOffset));
} else {
- __ Move(scratch3, Handle<Object>(callback->data()));
- }
- __ Push(reg, scratch3);
- __ mov(scratch3, Operand(ExternalReference::isolate_address()));
- __ Push(scratch3, name_reg);
- __ mov(r0, sp); // r0 = Handle<String>
+ __ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
+ }
+ __ push(scratch3());
+ __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
+ __ mov(scratch4(), scratch3());
+ __ Push(scratch3(), scratch4());
+ __ mov(scratch4(),
+ Operand(ExternalReference::isolate_address(isolate())));
+ __ Push(scratch4(), reg);
+ __ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_
+ __ push(name());
+ __ mov(r0, sp); // r0 = Handle<Name>
const int kApiStackSpace = 1;
FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // Create AccessorInfo instance on the stack above the exit frame with
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object** args_) as the data.
- __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
+ __ str(scratch2(), MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
- const int kStackUnwindSpace = 5;
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference ref =
- ExternalReference(&fun,
- ExternalReference::DIRECT_GETTER_CALL,
- masm()->isolate());
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
-}
-
-void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Label* miss) {
+ ApiFunction fun(getter_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, isolate());
+
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ ExternalReference::Type thunk_type =
+ ExternalReference::PROFILING_GETTER_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ isolate());
+ __ CallApiFunctionAndReturn(ref,
+ getter_address,
+ thunk_ref,
+ r2,
+ kStackUnwindSpace,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
+void LoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<Name> name) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
@@ -1353,8 +1483,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo()) {
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
compile_followup_inline = callback->getter() != NULL &&
callback->IsCompatibleReceiver(*object);
}
@@ -1364,17 +1495,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
// Preserve the receiver register explicitly whenever it is different from
// the holder and it is needed should the interceptor return without any
// result. The CALLBACKS case needs the receiver to be passed into C++ code,
// the FIELD case might cause a miss during the prototype check.
bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+ bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
(lookup->type() == CALLBACKS || must_perfrom_prototype_check);
// Save necessary data before invoking an interceptor.
@@ -1382,103 +1510,52 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
{
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
- __ Push(receiver, holder_reg, name_reg);
+ __ Push(receiver(), holder_reg, this->name());
} else {
- __ Push(holder_reg, name_reg);
+ __ Push(holder_reg, this->name());
}
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
+ receiver(),
holder_reg,
- name_reg,
+ this->name(),
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch1);
+ __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch1());
__ b(eq, &interceptor_failed);
frame_scope.GenerateLeaveFrame();
__ Ret();
__ bind(&interceptor_failed);
- __ pop(name_reg);
+ __ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
- __ pop(receiver);
+ __ pop(receiver());
}
// Leave the internal frame.
}
- // Check that the maps from interceptor's holder to lookup's holder
- // haven't changed. And load lookup's holder into |holder| register.
- if (must_perfrom_prototype_check) {
- holder_reg = CheckPrototypes(interceptor_holder,
- holder_reg,
- Handle<JSObject>(lookup->holder()),
- scratch1,
- scratch2,
- scratch3,
- name,
- miss);
- }
- if (lookup->IsField()) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Retrieve a field from field's holder.
- GenerateFastPropertyLoad(masm(), r0, holder_reg,
- Handle<JSObject>(lookup->holder()),
- lookup->GetFieldIndex());
- __ Ret();
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- Handle<AccessorInfo> callback(
- AccessorInfo::cast(lookup->GetCallbackObject()));
- ASSERT(callback->getter() != NULL);
-
- // Tail call to runtime.
- // Important invariant in CALLBACKS case: the code above must be
- // structured to never clobber |receiver| register.
- __ Move(scratch2, callback);
- // holder_reg is either receiver or scratch1.
- if (!receiver.is(holder_reg)) {
- ASSERT(scratch1.is(holder_reg));
- __ Push(receiver, holder_reg);
- } else {
- __ push(receiver);
- __ push(holder_reg);
- }
- __ ldr(scratch3,
- FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
- __ mov(scratch1, Operand(ExternalReference::isolate_address()));
- __ Push(scratch3, scratch1, scratch2, name_reg);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- PushInterceptorArguments(masm(), receiver, holder_reg,
- name_reg, interceptor_holder);
+ PushInterceptorArguments(masm(), receiver(), holder_reg,
+ this->name(), interceptor_holder);
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
- masm()->isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ isolate());
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
__ cmp(r2, Operand(name));
__ b(ne, miss);
@@ -1488,7 +1565,7 @@ void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name,
+ Handle<Name> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
@@ -1505,12 +1582,12 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Label* miss) {
// Get the value from the cell.
__ mov(r3, Operand(cell));
- __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(r1, FieldMemOperand(r3, Cell::kValueOffset));
// Check that the cell contains the same function.
if (heap()->InNewSpace(*function)) {
@@ -1545,8 +1622,8 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
- Handle<String> name) {
+ PropertyIndex index,
+ Handle<Name> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1564,7 +1641,8 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
- GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
+ GenerateFastPropertyLoad(masm(), r1, reg, index.is_inobject(holder),
+ index.translate(holder), Representation::Tagged());
GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
@@ -1577,12 +1655,61 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
+Handle<Code> CallStubCompiler::CompileArrayCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Cell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name,
+ Code::StubType type) {
+ Label miss;
+
+ // Check that function is still array
+ const int argc = arguments().immediate();
+ GenerateNameCheck(name, &miss);
+ Register receiver = r1;
+
+ if (cell.is_null()) {
+ __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the maps haven't changed.
+ CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0,
+ r4, name, &miss);
+ } else {
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
+ site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
+ Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
+ __ mov(r0, Operand(argc));
+ __ mov(r2, Operand(site_feedback_cell));
+ __ mov(r1, Operand(function));
+
+ ArrayConstructorStub stub(isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(type, name);
+}
+
+
Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1618,7 +1745,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements;
+ Label attempt_to_grow_elements, with_write_barrier, check_double;
Register elements = r6;
Register end_elements = r5;
@@ -1629,14 +1756,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ CheckMap(elements,
r0,
Heap::kFixedArrayMapRootIndex,
- &call_builtin,
+ &check_double,
DONT_DO_SMI_CHECK);
-
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
__ add(r0, r0, Operand(Smi::FromInt(argc)));
// Get the elements' length.
@@ -1647,7 +1771,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ b(gt, &attempt_to_grow_elements);
// Check if value is a smi.
- Label with_write_barrier;
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(r4, &with_write_barrier);
@@ -1657,8 +1780,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
@@ -1667,39 +1789,79 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Drop(argc + 1);
__ Ret();
+ __ bind(&check_double);
+
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ r0,
+ Heap::kFixedDoubleArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+
+ // Get the array's length into r0 and calculate new length.
+ __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ add(r0, r0, Operand(Smi::FromInt(argc)));
+
+ // Get the elements' length.
+ __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ cmp(r0, r4);
+ __ b(gt, &call_builtin);
+
+ __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ StoreNumberToDoubleElements(r4, r0, elements, r5, d0,
+ &call_builtin, argc * kDoubleSize);
+
+ // Save new length.
+ __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Check for a smi.
+ __ Drop(argc + 1);
+ __ Ret();
+
__ bind(&with_write_barrier);
__ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
- __ CheckFastObjectElements(r3, r7, &not_fast_object);
+ __ CheckFastObjectElements(r3, r9, &not_fast_object);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
- __ CheckFastSmiElements(r3, r7, &call_builtin);
+ __ CheckFastSmiElements(r3, r9, &call_builtin);
+
+ __ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r9, ip);
+ __ b(eq, &call_builtin);
// edx: receiver
// r3: map
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r3,
- r7,
+ r9,
&try_holey_map);
__ mov(r2, receiver);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
r3,
- r7,
+ r9,
&call_builtin);
__ mov(r2, receiver);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(r3, r3, &call_builtin);
@@ -1711,8 +1873,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
__ RecordWrite(elements,
@@ -1727,7 +1888,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ bind(&attempt_to_grow_elements);
// r0: array's length + 1.
- // r4: elements' length.
if (!FLAG_inline_new) {
__ b(&call_builtin);
@@ -1738,23 +1898,21 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(r2, &no_fast_elements_check);
- __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(r7, r7, &call_builtin);
+ __ ldr(r9, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(r9, r9, &call_builtin);
__ bind(&no_fast_elements_check);
- Isolate* isolate = masm()->isolate();
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate);
+ ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate);
+ ExternalReference::new_space_allocation_limit_address(isolate());
const int kAllocationDelta = 4;
// Load top and check if it is the end of elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
__ add(end_elements, end_elements, Operand(kEndElementsOffset));
- __ mov(r7, Operand(new_space_allocation_top));
- __ ldr(r3, MemOperand(r7));
+ __ mov(r4, Operand(new_space_allocation_top));
+ __ ldr(r3, MemOperand(r4));
__ cmp(end_elements, r3);
__ b(ne, &call_builtin);
@@ -1766,7 +1924,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// We fit and could grow elements.
// Update new_space_allocation_top.
- __ str(r3, MemOperand(r7));
+ __ str(r3, MemOperand(r4));
// Push the argument.
__ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
@@ -1777,6 +1935,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Update elements' and array's sizes.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
__ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
@@ -1785,10 +1944,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Ret();
}
__ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
- masm()->isolate()),
- argc + 1,
- 1);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1);
}
// Handle call cache miss.
@@ -1796,16 +1953,17 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1849,11 +2007,9 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
// Get the last element.
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
// We can't address the last element in one operation. Compute the more
// expensive shift first, and use an offset later on.
- __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(elements, elements, Operand::PointerOffsetFromSmiKey(r4));
__ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ cmp(r0, r6);
__ b(eq, &call_builtin);
@@ -1872,26 +2028,25 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
__ Ret();
__ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
- masm()->isolate()),
- argc + 1,
- 1);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1);
// Handle call cache miss.
__ bind(&miss);
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -1922,8 +2077,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
r0,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r1, r3, r4, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ r0, holder, r1, r3, r4, name, &miss);
Register receiver = r1;
Register index = r4;
@@ -1963,16 +2119,17 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2002,8 +2159,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
r0,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r1, r3, r4, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ r0, holder, r1, r3, r4, name, &miss);
Register receiver = r0;
Register index = r4;
@@ -2033,7 +2191,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(r0, Heap::kempty_stringRootIndex);
__ Drop(argc + 1);
__ Ret();
}
@@ -2045,16 +2203,17 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2075,7 +2234,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
@@ -2093,7 +2251,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// Check the code is a smi.
Label slow;
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(code, &slow);
// Convert the smi code to uint16.
@@ -2110,24 +2267,26 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2136,11 +2295,6 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// -- sp[argc * 4] : receiver
// -----------------------------------
- if (!CpuFeatures::IsSupported(VFP2)) {
- return Handle<Code>::null();
- }
-
- CpuFeatures::Scope scope_vfp2(VFP2);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
@@ -2151,7 +2305,6 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
name, &miss);
@@ -2166,17 +2319,13 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ ldr(r0, MemOperand(sp, 0 * kPointerSize));
// If the argument is a smi, just return.
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask));
+ __ SmiTst(r0);
__ Drop(argc + 1, eq);
__ Ret(eq);
__ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
- Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return;
-
- // If vfp3 is enabled, we use the fpu rounding with the RM (round towards
- // minus infinity) mode.
+ Label smi_check, just_return;
// Load the HeapNumber value.
// We will need access to the value in the core registers, so we load it
@@ -2186,94 +2335,67 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ vmov(d1, r4, r5);
- // Backup FPSCR.
- __ vmrs(r3);
- // Set custom FPCSR:
- // - Set rounding mode to "Round towards Minus Infinity"
- // (i.e. bits [23:22] = 0b10).
- // - Clear vfp cumulative exception flags (bits [3:0]).
- // - Make sure Flush-to-zero mode control bit is unset (bit 22).
- __ bic(r9, r3,
- Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
- __ orr(r9, r9, Operand(kRoundToMinusInf));
- __ vmsr(r9);
-
- // Convert the argument to an integer.
- __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
-
- // Use vcvt latency to start checking for special cases.
- // Get the argument exponent and clear the sign bit.
- __ bic(r6, r5, Operand(HeapNumber::kSignMask));
- __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord));
-
- // Retrieve FPSCR and check for vfp exceptions.
- __ vmrs(r9);
- __ tst(r9, Operand(kVFPExceptionMask));
- __ b(&no_vfp_exception, eq);
-
- // Check for NaN, Infinity, and -Infinity.
+ // Check for NaN, Infinities and -0.
// They are invariant through a Math.Floor call, so just
// return the original argument.
- __ sub(r7, r6, Operand(HeapNumber::kExponentMask
- >> HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ b(&restore_fpscr_and_return, eq);
- // We had an overflow or underflow in the conversion. Check if we
- // have a big exponent.
- __ cmp(r7, Operand(HeapNumber::kMantissaBits));
- // If greater or equal, the argument is already round and in r0.
- __ b(&restore_fpscr_and_return, ge);
- __ b(&wont_fit_smi);
-
- __ bind(&no_vfp_exception);
- // Move the result back to general purpose register r0.
- __ vmov(r0, s0);
- // Check if the result fits into a smi.
- __ add(r1, r0, Operand(0x40000000), SetCC);
- __ b(&wont_fit_smi, mi);
- // Tag the result.
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
-
- // Check for -0.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- __ b(&restore_fpscr_and_return, ne);
- // r5 already holds the HeapNumber exponent.
- __ tst(r5, Operand(HeapNumber::kSignMask));
- // If our HeapNumber is negative it was -0, so load its address and return.
- // Else r0 is loaded with 0, so we can also just return.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne);
-
- __ bind(&restore_fpscr_and_return);
- // Restore FPSCR and return.
- __ vmsr(r3);
+ __ Sbfx(r3, r5, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ __ cmp(r3, Operand(-1));
+ __ b(eq, &just_return);
+ __ eor(r3, r5, Operand(0x80000000u));
+ __ orr(r3, r3, r4, SetCC);
+ __ b(eq, &just_return);
+ // Test for values that can be exactly represented as a
+ // signed 32-bit integer.
+ __ TryDoubleToInt32Exact(r0, d1, d2);
+ // If exact, check smi
+ __ b(eq, &smi_check);
+ __ cmp(r5, Operand(0));
+
+ // If input is in ]+0, +inf[, the cmp has cleared overflow and negative
+ // (V=0 and N=0), the two following instructions won't execute and
+ // we fall through smi_check to check if the result can fit into a smi.
+
+ // If input is in ]-inf, -0[, sub one and, go to slow if we have
+ // an overflow. Else we fall through smi check.
+ // Hint: if x is a negative, non integer number,
+ // floor(x) <=> round_to_zero(x) - 1.
+ __ sub(r0, r0, Operand(1), SetCC, mi);
+ __ b(vs, &slow);
+
+ __ bind(&smi_check);
+ // Check if the result can fit into an smi. If we had an overflow,
+ // the result is either 0x80000000 or 0x7FFFFFFF and won't fit into an smi.
+ // If result doesn't fit into an smi, branch to slow.
+ __ SmiTag(r0, SetCC);
+ __ b(vs, &slow);
+
+ __ bind(&just_return);
__ Drop(argc + 1);
__ Ret();
- __ bind(&wont_fit_smi);
- // Restore FPCSR and fall to slow case.
- __ vmsr(r3);
-
__ bind(&slow);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2291,7 +2413,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
name, &miss);
@@ -2307,7 +2428,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
// Check if the argument is a smi.
Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(r0, &not_smi);
// Do bitwise not or do nothing depending on the sign of the
@@ -2355,15 +2475,16 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
@@ -2371,7 +2492,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name) {
Counters* counters = isolate()->counters();
@@ -2405,7 +2526,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
depth, &miss);
- GenerateFastApiDirectCall(masm(), optimization, argc);
+ GenerateFastApiDirectCall(masm(), optimization, argc, false);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
@@ -2418,23 +2539,15 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
}
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function,
- Handle<String> name,
- CheckType check) {
+void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Label* success) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
Label miss;
GenerateNameCheck(name, &miss);
@@ -2452,8 +2565,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
switch (check) {
case RECEIVER_MAP_CHECK:
- __ IncrementCounter(masm()->isolate()->counters()->call_const(),
- 1, r0, r3);
+ __ IncrementCounter(isolate()->counters()->call_const(), 1, r0, r3);
// Check that the maps haven't changed.
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
@@ -2468,78 +2580,102 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
break;
case STRING_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- // Check that the object is a two-byte string or a symbol.
- __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r3, r1, r4, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ // Check that the object is a string.
+ __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
+ __ b(ge, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ r0, holder, r3, r1, r4, name, &miss);
break;
- case NUMBER_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(r1, &fast);
- __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r3, r1, r4, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case SYMBOL_CHECK:
+ // Check that the object is a symbol.
+ __ CompareObjectType(r1, r1, r3, SYMBOL_TYPE);
+ __ b(ne, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::SYMBOL_FUNCTION_INDEX, r0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ r0, holder, r3, r1, r4, name, &miss);
break;
- case BOOLEAN_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &fast);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r3, r1, r4, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case NUMBER_CHECK: {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ JumpIfSmi(r1, &fast);
+ __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
+ __ b(ne, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ r0, holder, r3, r1, r4, name, &miss);
break;
+ }
+ case BOOLEAN_CHECK: {
+ Label fast;
+ // Check that the object is a boolean.
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(eq, &fast);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(ne, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ r0, holder, r3, r1, r4, name, &miss);
+ break;
+ }
}
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ __ b(success);
// Handle call cache miss.
__ bind(&miss);
GenerateMissBranch();
+}
+
+
+void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), call_kind);
+}
+
+
+Handle<Code> CallStubCompiler::CompileCallConstant(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Handle<JSFunction> function) {
+ if (HasCustomCallGenerator(function)) {
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<Cell>::null(),
+ function, Handle<String>::cast(name),
+ Code::CONSTANT);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
+ }
+
+ Label success;
+
+ CompileHandlerFrontend(object, holder, name, check, &success);
+ __ bind(&success);
+ CompileHandlerBackend(function);
// Return the generated code.
return GetCode(function);
@@ -2548,7 +2684,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name) {
+ Handle<Name> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -2587,15 +2723,17 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<Name> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ Handle<Code> code = CompileCustomCall(
+ object, holder, cell, function, Handle<String>::cast(name),
+ Code::NORMAL);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
@@ -2619,7 +2757,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
ParameterCount expected(function->shared()->formal_parameter_count());
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
@@ -2642,72 +2780,49 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
}
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- r1, r2, r3, r4,
- &miss);
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
- // Check that the maps haven't changed.
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
// Stub never generated for non-global objects that require access checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- __ push(r1); // receiver
+ __ push(receiver()); // receiver
__ mov(ip, Operand(callback)); // callback info
- __ Push(ip, r2, r0);
+ __ push(ip);
+ __ mov(ip, Operand(name));
+ __ Push(ip, value());
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
- masm()->isolate());
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 1, values);
// Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+ return GetCode(kind(), Code::CALLBACKS, name);
}
@@ -2734,8 +2849,9 @@ void StoreStubCompiler::GenerateStoreViaSetter(
// Call the JavaScript setter with receiver and value on the stack.
__ Push(r1, r0);
ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2756,198 +2872,104 @@ void StoreStubCompiler::GenerateStoreViaSetter(
#define __ ACCESS_MASM(masm())
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
-
- GenerateStoreViaSetter(masm(), setter);
-
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> receiver,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
+ Handle<JSObject> object,
+ Handle<Name> name) {
Label miss;
// Check that the map of the object hasn't changed.
- __ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
+ DO_SMI_CHECK);
// Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(r1, r3, &miss);
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
}
// Stub is never generated for non-global objects that require access
// checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- __ Push(r1, r2, r0); // Receiver, name, value.
+ __ Push(receiver(), this->name(), value());
- __ mov(r0, Operand(Smi::FromInt(strict_mode_)));
- __ push(r0); // strict mode
+ __ mov(scratch1(), Operand(Smi::FromInt(strict_mode())));
+ __ push(scratch1()); // strict mode
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
- masm()->isolate());
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::INTERCEPTOR, name);
}
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<Name> name,
+ Handle<JSGlobalObject> global) {
+ Label success;
- // Check that the map of the global has not changed.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Handle<Map>(object->map())));
- __ b(ne, &miss);
+ NonexistentHandlerFrontend(object, last, name, &success, global);
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ mov(r4, Operand(cell));
- __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
- __ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
- __ cmp(r5, r6);
- __ b(eq, &miss);
-
- // Store the value in the cell.
- __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
- // Cells are always rescanned, so no write barrier here.
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
+ __ bind(&success);
+ // Return undefined if maps of the full prototype chain are still the
+ // same and no global property with this name contains a value.
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ Ret();
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1, r4, r3);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
// Return the generated code.
- return GetCode(Code::NORMAL, name);
+ return GetCode(kind(), Code::NONEXISTENT, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> last) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that receiver is not a smi.
- __ JumpIfSmi(r0, &miss);
-
- // Check the maps of the full prototype chain.
- CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss);
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { r0, r2, r3, r1, r4, r5 };
+ return registers;
+}
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (last->IsGlobalObject()) {
- GenerateCheckPropertyCell(
- masm(), Handle<GlobalObject>::cast(last), name, r1, &miss);
- }
- // Return undefined if maps of the full prototype chain are still the
- // same and no global property with this name contains a value.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ Ret();
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { r1, r0, r2, r3, r4, r5 };
+ return registers;
+}
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
- // Return the generated code.
- return GetCode(Code::NONEXISTENT, factory()->empty_string());
+Register* StoreStubCompiler::registers() {
+ // receiver, name, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { r1, r2, r0, r3, r4, r5 };
+ return registers;
}
-Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- int index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { r2, r1, r0, r3, r4, r5 };
+ return registers;
+}
- GenerateLoadField(object, holder, r0, r3, r1, r4, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
- // Return the generated code.
- return GetCode(Code::FIELD, name);
+void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
+ Register name_reg,
+ Label* miss) {
+ __ cmp(name_reg, Operand(name));
+ __ b(ne, miss);
}
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
- GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, r5, callback, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
+ Register name_reg,
+ Label* miss) {
+ __ cmp(name_reg, Operand(name));
+ __ b(ne, miss);
}
@@ -2956,6 +2978,7 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
// -- r0 : receiver
@@ -2967,10 +2990,11 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(r0);
+ __ push(receiver);
ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2988,94 +3012,22 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(r0, &miss);
- CheckPrototypes(receiver, r0, holder, r3, r4, r1, name, &miss);
-
- GenerateLoadViaGetter(masm(), getter);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> value,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadConstant(object, holder, r0, r3, r1, r4, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(object, holder, &lookup, r0, r2, r3, r1, r4, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
+ Handle<GlobalObject> global,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
bool is_dont_delete) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
+ Label success, miss;
- // Check that the map of the global has not changed.
- __ JumpIfSmi(r0, &miss);
- CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
+ __ CheckMap(
+ receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
+ HandlerFrontendHeader(
+ object, receiver(), Handle<JSObject>::cast(global), name, &miss);
// Get the value from the cell.
__ mov(r3, Operand(cell));
- __ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(r4, FieldMemOperand(r3, Cell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
@@ -3084,301 +3036,55 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ b(eq, &miss);
}
- __ mov(r0, r4);
- Counters* counters = masm()->isolate()->counters();
+ HandlerFrontendFooter(name, &success, &miss);
+ __ bind(&success);
+
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
+ __ mov(r0, r4);
__ Ret();
- __ bind(&miss);
- __ IncrementCounter(counters->named_load_global_stub_miss(), 1, r1, r3);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- int index) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, r5, callback, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::INTERCEPTOR, name);
+ return GetICCode(kind(), Code::NORMAL, name);
}
-Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadArrayLength(masm(), r1, r2, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
-
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
Label miss;
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
-
- // Check the name hasn't changed.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
-}
-
+ if (check == PROPERTY) {
+ GenerateNameCheck(name, this->name(), &miss);
+ }
-Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_ics) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
- __ JumpIfSmi(r1, &miss);
+ __ JumpIfSmi(receiver(), &miss);
+ Register map_reg = scratch1();
int receiver_count = receiver_maps->length();
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ int number_of_handled_maps = 0;
+ __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- __ mov(ip, Operand(receiver_maps->at(current)));
- __ cmp(r2, ip);
- __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET, eq);
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ mov(ip, Operand(receiver_maps->at(current)));
+ __ cmp(map_reg, ip);
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
+ }
}
+ ASSERT(number_of_handled_maps != 0);
__ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : name
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1, r3, r4);
-
- // Check that the name has not changed.
- __ cmp(r1, Operand(name));
- __ b(ne, &miss);
-
- // r3 is used as scratch register. r1 and r2 keep their values if a jump to
- // the miss label is generated.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- r2, r1, r3, r4,
- &miss);
- __ bind(&miss);
-
- __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
- Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub =
- KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
-
- __ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetICCode(kind(), type, name, state);
}
@@ -3386,182 +3092,31 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -----------------------------------
Label miss;
- __ JumpIfSmi(r2, &miss);
+ __ JumpIfSmi(receiver(), &miss);
int receiver_count = receiver_maps->length();
- __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int i = 0; i < receiver_count; ++i) {
__ mov(ip, Operand(receiver_maps->at(i)));
- __ cmp(r3, ip);
+ __ cmp(scratch1(), ip);
if (transitioned_maps->at(i).is_null()) {
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
} else {
Label next_map;
__ b(ne, &next_map);
- __ mov(r3, Operand(transitioned_maps->at(i)));
+ __ mov(transition_map(), Operand(transitioned_maps->at(i)));
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
__ bind(&next_map);
}
}
__ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
-}
-
-
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // ----------- S t a t e -------------
- // -- r0 : argc
- // -- r1 : constructor
- // -- lr : return address
- // -- [sp] : last argument
- // -----------------------------------
- Label generic_stub_call;
-
- // Use r7 for holding undefined which is used in several places below.
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(r2, r7);
- __ b(ne, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- // r7: undefined
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &generic_stub_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ b(ne, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r7: undefined
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
- __ Check(ne, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject in new space.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r7: undefined
- ASSERT(function->has_initial_map());
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-#ifdef DEBUG
- int instance_size = function->initial_map()->instance_size();
- __ cmp(r3, Operand(instance_size >> kPointerSizeLog2));
- __ Check(eq, "Instance size of initial map changed.");
-#endif
- __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r7: undefined
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Calculate the location of the first argument. The stack contains only the
- // argc arguments.
- __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
-
- // Fill all the in-object properties with undefined.
- // r0: argc
- // r1: first argument
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- // r7: undefined
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- Label not_passed, next;
- // Check if the argument assigned to the property is actually passed.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ cmp(r0, Operand(arg_number));
- __ b(le, &not_passed);
- // Argument passed - find it on the stack.
- __ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize));
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- __ b(&next);
- __ bind(&not_passed);
- // Set the property to undefined.
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- __ bind(&next);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
- __ mov(r2, Operand(constant));
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- }
-
- // r0: argc
- // r4: JSObject (not tagged)
- // Move argc to r1 and the JSObject to return to r0 and tag it.
- __ mov(r1, r0);
- __ mov(r0, r4);
- __ orr(r0, r0, Operand(kHeapObjectTag));
-
- // r0: JSObject
- // r1: argc
- // Remove caller arguments and receiver from the stack and return.
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
- __ add(sp, sp, Operand(kPointerSize));
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1, r1, r2);
- __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2);
- __ Jump(lr);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> code = masm()->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(code, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
+ return GetICCode(
+ kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
}
@@ -3581,8 +3136,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
Register key = r0;
Register receiver = r1;
- __ JumpIfNotSmi(key, &miss_force_generic);
- __ mov(r2, Operand(key, ASR, kSmiTagSize));
+ __ UntagAndJumpIfNotSmi(r2, key, &miss_force_generic);
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
__ Ret();
@@ -3597,9 +3151,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
// Miss case, call the runtime.
__ bind(&miss_force_generic);
@@ -3609,1178 +3161,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-static bool IsElementTypeSigned(ElementsKind elements_kind) {
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- return true;
-
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- return false;
-
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- return false;
- }
- return false;
-}
-
-
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch0,
- Register scratch1,
- DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
- Label* fail) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- Label key_ok;
- // Check for smi or a smi inside a heap number. We convert the heap
- // number and check if the conversion is exact and fits into the smi
- // range.
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- scratch0,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
- __ sub(ip, key, Operand(kHeapObjectTag));
- __ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
- __ EmitVFPTruncate(kRoundToZero,
- scratch0,
- double_scratch0,
- scratch1,
- double_scratch1,
- kCheckForInexactConversion);
- __ b(ne, fail);
- __ TrySmiTag(scratch0, fail, scratch1);
- __ mov(key, scratch0);
- __ bind(&key_ok);
- } else {
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, fail);
- }
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow, failed_allocation;
-
- Register key = r0;
- Register receiver = r1;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // r3: elements array
-
- // Check that the index is in range.
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(key, ip);
- // Unsigned comparison catches both negative and too-large values.
- __ b(hs, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
- // r3: base pointer of external storage
-
- // We are not untagging smi key and instead work with it
- // as if it was premultiplied by 2.
- STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
-
- Register value = r2;
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ ldrsb(value, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ ldrb(value, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ ldrsh(value, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ ldrh(value, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ add(r2, r3, Operand(key, LSL, 1));
- __ vldr(s0, r2, 0);
- } else {
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- }
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ add(r2, r3, Operand(key, LSL, 2));
- __ vldr(d0, r2, 0);
- } else {
- __ add(r4, r3, Operand(key, LSL, 2));
- // r4: pointer to the beginning of the double we want to load.
- __ ldr(r2, MemOperand(r4, 0));
- __ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
- }
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // r2: value
- // For float array type:
- // s0: value (if VFP3 is supported)
- // r2: value (if VFP3 is not supported)
- // For double array type:
- // d0: value (if VFP3 is supported)
- // r2/r3: value (if VFP3 is not supported)
-
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- __ cmp(value, Operand(0xC0000000));
- __ b(mi, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
- // Now we can use r0 for the result as key is not needed any more.
- __ add(r0, r5, Operand(kHeapObjectTag));
- __ vmov(s0, value);
- __ vcvt_f64_s32(d0, s0);
- __ vstr(d0, r5, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
- // Now we can use r0 for the result as key is not needed any more.
- __ mov(r0, r5);
- Register dst1 = r1;
- Register dst2 = r3;
- FloatingPointHelper::Destination dest =
- FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::ConvertIntToDouble(masm,
- value,
- dest,
- d0,
- dst1,
- dst2,
- r9,
- s0);
- __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- Label box_int, done;
- __ tst(value, Operand(0xC0000000));
- __ b(ne, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- __ vmov(s0, value);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
- // registers - also when jumping due to exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
-
- __ vcvt_f64_u32(d0, s0);
- __ vstr(d0, r2, HeapNumber::kValueOffset);
-
- __ add(r0, r2, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Check whether unsigned integer fits into smi.
- Label box_int_0, box_int_1, done;
- __ tst(value, Operand(0x80000000));
- __ b(ne, &box_int_0);
- __ tst(value, Operand(0x40000000));
- __ b(ne, &box_int_1);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- Register hiword = value; // r2.
- Register loword = r3;
-
- __ bind(&box_int_0);
- // Integer does not have leading zeros.
- GenerateUInt2Double(masm, hiword, loword, r4, 0);
- __ b(&done);
-
- __ bind(&box_int_1);
- // Integer has one leading zero.
- GenerateUInt2Double(masm, hiword, loword, r4, 1);
-
-
- __ bind(&done);
- // Integer was converted to double in registers hiword:loword.
- // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
- // clobbers all registers - also when jumping due to exhausted young
- // space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
-
- __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
- __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r4);
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
- __ vcvt_f64_f32(d0, s0);
- __ vstr(d0, r2, HeapNumber::kValueOffset);
-
- __ add(r0, r2, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
- // VFP is not available, do manual single to double conversion.
-
- // r2: floating point value (binary32)
- // r3: heap number for result
-
- // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
- // the slow case from here.
- __ and_(r0, value, Operand(kBinary32MantissaMask));
-
- // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
- // the slow case from here.
- __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
- __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ teq(r1, Operand(0x00));
- __ b(eq, &exponent_rebiased);
-
- __ teq(r1, Operand(0xff));
- __ mov(r1, Operand(0x7ff), LeaveCC, eq);
- __ b(eq, &exponent_rebiased);
-
- // Rebias exponent.
- __ add(r1,
- r1,
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ and_(r2, value, Operand(kBinary32SignMask));
- value = no_reg;
- __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
- __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
-
- __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
- __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r3);
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
- __ vstr(d0, r2, HeapNumber::kValueOffset);
-
- __ add(r0, r2, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
-
- __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
- __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
- __ mov(r0, r4);
- __ Ret();
- }
-
- } else {
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
- }
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, check_heap_number, miss_force_generic;
-
- // Register usage.
- Register value = r0;
- Register key = r1;
- Register receiver = r2;
- // r3 mostly holds the elements array or the destination external array.
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check that the index is in range
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(key, ip);
- // Unsigned comparison catches both negative and too-large values.
- __ b(hs, &miss_force_generic);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // r3: external array.
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Double to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(value, &slow);
- } else {
- __ JumpIfNotSmi(value, &check_heap_number);
- }
- __ SmiUntag(r5, value);
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
- // r5: value (integer).
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- // Clamp the value to [0..255].
- __ Usat(r5, 8, Operand(r5));
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Perform int-to-float conversion and store to memory.
- __ SmiUntag(r4, key);
- StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ add(r3, r3, Operand(key, LSL, 2));
- // r3: effective address of the double element
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP2)) {
- destination = FloatingPointHelper::kVFPRegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
- FloatingPointHelper::ConvertIntToDouble(
- masm, r5, destination,
- d0, r6, r7, // These are: double_dst, dst1, dst2.
- r4, s2); // These are: scratch2, single_scratch.
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP2);
- __ vstr(d0, r3, 0);
- } else {
- __ str(r6, MemOperand(r3, 0));
- __ str(r7, MemOperand(r3, Register::kSizeInBytes));
- }
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // Entry registers are intact, r0 holds the value which is the return value.
- __ Ret();
-
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- // r3: external array.
- __ bind(&check_heap_number);
- __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(key, LSL, 1));
- __ vcvt_f32_f64(s0, d0);
- __ vstr(s0, r5, 0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(key, LSL, 2));
- __ vstr(d0, r5, 0);
- } else {
- // Hoisted load. vldr requires offset to be a multiple of 4 so we can
- // not include -kHeapObjectTag into it.
- __ sub(r5, value, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ EmitECMATruncate(r5, d0, s2, r6, r7, r9);
-
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
- } else {
- // VFP3 is not available do manual conversions.
- __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Label done, nan_or_infinity_or_zero;
- static const int kMantissaInHiWordShift =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaInLoWordShift =
- kBitsPerInt - kMantissaInHiWordShift;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ b(eq, &nan_or_infinity_or_zero);
-
- __ teq(r9, Operand(r7));
- __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
- __ b(eq, &nan_or_infinity_or_zero);
-
- // Rebias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ add(r9,
- r9,
- Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
-
- __ cmp(r9, Operand(kBinary32MaxExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
- __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
- __ b(gt, &done);
-
- __ cmp(r9, Operand(kBinary32MinExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
- __ b(lt, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
- __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
-
- __ bind(&done);
- __ str(r5, MemOperand(r3, key, LSL, 1));
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
-
- __ bind(&nan_or_infinity_or_zero);
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r9, r9, r7);
- __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
- __ b(&done);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ add(r7, r3, Operand(key, LSL, 2));
- // r7: effective address of destination element.
- __ str(r6, MemOperand(r7, 0));
- __ str(r5, MemOperand(r7, Register::kSizeInBytes));
- __ Ret();
- } else {
- bool is_signed_type = IsElementTypeSigned(elements_kind);
- int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
-
- Label done, sign;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ b(eq, &done);
-
- __ teq(r9, Operand(r7));
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ b(eq, &done);
-
- // Unbias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
- // If exponent is negative then result is 0.
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
- __ b(mi, &done);
-
- // If exponent is too big then result is minimal value.
- __ cmp(r9, Operand(meaningfull_bits - 1));
- __ mov(r5, Operand(min_value), LeaveCC, ge);
- __ b(ge, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
-
- __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
- __ b(pl, &sign);
-
- __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
- __ mov(r5, Operand(r5, LSL, r9));
- __ rsb(r9, r9, Operand(meaningfull_bits));
- __ orr(r5, r5, Operand(r6, LSR, r9));
-
- __ bind(&sign);
- __ teq(r7, Operand(0, RelocInfo::NONE));
- __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
-
- __ bind(&done);
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
- }
- }
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- // Miss case, call the runtime.
- __ bind(&miss_force_generic);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic);
-
- // Get the elements array.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ AssertFastElements(r2);
-
- // Check that the key is within bounds.
- __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ cmp(r0, Operand(r3));
- __ b(hs, &miss_force_generic);
-
- // Load the result and make sure it's not the hole.
- __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(r4,
- MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &miss_force_generic);
- __ mov(r0, r4);
- __ Ret();
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- Register key_reg = r0;
- Register receiver_reg = r1;
- Register elements_reg = r2;
- Register heap_number_reg = r2;
- Register indexed_double_offset = r3;
- Register scratch = r4;
- Register scratch2 = r5;
- Register scratch3 = r6;
- Register heap_number_map = r7;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
-
- // Get the elements array.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ cmp(key_reg, Operand(scratch));
- __ b(hs, &miss_force_generic);
-
- // Load the upper word of the double in the fixed array and test for NaN.
- __ add(indexed_double_offset, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- __ b(&miss_force_generic, eq);
-
- // Non-NaN. Allocate a new heap number and copy the double value into it.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
- heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
-
- // Don't need to reload the upper 32 bits of the double, it's already in
- // scratch.
- __ str(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kExponentOffset));
- __ ldr(scratch, FieldMemOperand(indexed_double_offset,
- FixedArray::kHeaderSize));
- __ str(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kMantissaOffset));
-
- __ mov(r0, heap_number_reg);
- __ Ret();
-
- __ bind(&slow_allocate_heapnumber);
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -- r4 : scratch (elements)
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = r0;
- Register key_reg = r1;
- Register receiver_reg = r2;
- Register scratch = r4;
- Register elements_reg = r3;
- Register length_reg = r5;
- Register scratch2 = r6;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
- }
-
- // Check that the key is within bounds.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- if (is_js_array) {
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis.
- __ cmp(key_reg, scratch);
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ b(hs, &grow);
- } else {
- __ b(hs, &miss_force_generic);
- }
-
- // Make sure elements is a fast element array, not 'cow'.
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- __ add(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(scratch,
- scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value_reg, MemOperand(scratch));
- } else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ add(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(scratch,
- scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value_reg, MemOperand(scratch));
- __ mov(receiver_reg, value_reg);
- __ RecordWrite(elements_reg, // Object.
- scratch, // Address.
- receiver_reg, // Value.
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- }
- // value_reg (r0) is preserved.
- // Done.
- __ Ret();
-
- __ bind(&miss_force_generic);
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags already set by previous compare.
- __ b(ne, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ ldr(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
- __ b(ne, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
- TAG_OBJECT);
-
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
- }
-
- // Store the element at index zero.
- __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
-
- // Install the new backing store in the JSArray.
- __ str(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ Ret();
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedCOWArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ cmp(length_reg, scratch);
- __ b(hs, &slow);
-
- // Grow the array and finish the store.
- __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -- r4 : scratch
- // -- r5 : scratch
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = r0;
- Register key_reg = r1;
- Register receiver_reg = r2;
- Register elements_reg = r3;
- Register scratch1 = r4;
- Register scratch2 = r5;
- Register scratch3 = r6;
- Register scratch4 = r7;
- Register length_reg = r7;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
-
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ ldr(scratch1,
- FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis, unsigned compare catches both negative and out-of-bound
- // indexes.
- __ cmp(key_reg, scratch1);
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ b(hs, &grow);
- } else {
- __ b(hs, &miss_force_generic);
- }
-
- __ bind(&finish_store);
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- receiver_reg,
- // All registers after this are overwritten.
- elements_reg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- &transition_elements_kind);
- __ Ret();
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags already set by previous compare.
- __ b(ne, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(value_reg, &value_is_smi);
- __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ ldr(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
- __ b(ne, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
- TAG_OBJECT);
-
- // Initialize the new FixedDoubleArray. Leave elements unitialized for
- // efficiency, they are guaranteed to be initialized before use.
- __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
- __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ mov(scratch1,
- Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ str(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
-
- // Install the new backing store in the JSArray.
- __ str(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ jmp(&finish_store);
-
- __ bind(&check_capacity);
- // Make sure that the backing store can hold additional elements.
- __ ldr(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
- __ cmp(length_reg, scratch1);
- __ b(hs, &slow);
-
- // Grow the array and finish the store.
- __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(ic_slow, RelocInfo::CODE_TARGET);
- }
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
}
diff --git a/deps/v8/src/array-iterator.js b/deps/v8/src/array-iterator.js
new file mode 100644
index 000000000..e73498684
--- /dev/null
+++ b/deps/v8/src/array-iterator.js
@@ -0,0 +1,126 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// 'AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'use strict';
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+var ARRAY_ITERATOR_KIND_KEYS = 1;
+var ARRAY_ITERATOR_KIND_VALUES = 2;
+var ARRAY_ITERATOR_KIND_ENTRIES = 3;
+// The spec draft also has "sparse" but it is never used.
+
+var iteratorObjectSymbol = %CreateSymbol(UNDEFINED);
+var arrayIteratorNextIndexSymbol = %CreateSymbol(UNDEFINED);
+var arrayIterationKindSymbol = %CreateSymbol(UNDEFINED);
+
+function ArrayIterator() {}
+
+// 15.4.5.1 CreateArrayIterator Abstract Operation
+function CreateArrayIterator(array, kind) {
+ var object = ToObject(array);
+ var iterator = new ArrayIterator;
+ iterator[iteratorObjectSymbol] = object;
+ iterator[arrayIteratorNextIndexSymbol] = 0;
+ iterator[arrayIterationKindSymbol] = kind;
+ return iterator;
+}
+
+// 15.19.4.3.4 CreateItrResultObject
+function CreateIteratorResultObject(value, done) {
+ return {value: value, done: done};
+}
+
+// 15.4.5.2.2 ArrayIterator.prototype.next( )
+function ArrayIteratorNext() {
+ var iterator = ToObject(this);
+ var array = iterator[iteratorObjectSymbol];
+ if (!array) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Array Iterator.prototype.next']);
+ }
+
+ var index = iterator[arrayIteratorNextIndexSymbol];
+ var itemKind = iterator[arrayIterationKindSymbol];
+ var length = TO_UINT32(array.length);
+
+ // "sparse" is never used.
+
+ if (index >= length) {
+ iterator[arrayIteratorNextIndexSymbol] = 1 / 0; // Infinity
+ return CreateIteratorResultObject(UNDEFINED, true);
+ }
+
+ iterator[arrayIteratorNextIndexSymbol] = index + 1;
+
+ if (itemKind == ARRAY_ITERATOR_KIND_VALUES)
+ return CreateIteratorResultObject(array[index], false);
+
+ if (itemKind == ARRAY_ITERATOR_KIND_ENTRIES)
+ return CreateIteratorResultObject([index, array[index]], false);
+
+ return CreateIteratorResultObject(index, false);
+}
+
+function ArrayEntries() {
+ return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_ENTRIES);
+}
+
+function ArrayValues() {
+ return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_VALUES);
+}
+
+function ArrayKeys() {
+ return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_KEYS);
+}
+
+function SetUpArrayIterator() {
+ %CheckIsBootstrapping();
+
+ %FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
+ %FunctionSetReadOnlyPrototype(ArrayIterator);
+
+ InstallFunctions(ArrayIterator.prototype, DONT_ENUM, $Array(
+ 'next', ArrayIteratorNext
+ ));
+}
+
+SetUpArrayIterator();
+
+function ExtendArrayPrototype() {
+ %CheckIsBootstrapping();
+
+ InstallFunctions($Array.prototype, DONT_ENUM, $Array(
+ 'entries', ArrayEntries,
+ 'values', ArrayValues,
+ 'keys', ArrayKeys
+ ));
+}
+
+ExtendArrayPrototype();
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 250c30c32..e98d7f5b5 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -38,22 +38,21 @@ var visited_arrays = new InternalArray();
// Gets a sorted array of array keys. Useful for operations on sparse
// arrays. Dupes have not been removed.
-function GetSortedArrayKeys(array, intervals) {
- var length = intervals.length;
- var keys = [];
- for (var k = 0; k < length; k++) {
- var key = intervals[k];
- if (key < 0) {
- var j = -1 - key;
- var limit = j + intervals[++k];
- for (; j < limit; j++) {
- var e = array[j];
- if (!IS_UNDEFINED(e) || j in array) {
- keys.push(j);
- }
+function GetSortedArrayKeys(array, indices) {
+ var keys = new InternalArray();
+ if (IS_NUMBER(indices)) {
+ // It's an interval
+ var limit = indices;
+ for (var i = 0; i < limit; ++i) {
+ var e = array[i];
+ if (!IS_UNDEFINED(e) || i in array) {
+ keys.push(i);
}
- } else {
- // The case where key is undefined also ends here.
+ }
+ } else {
+ var length = indices.length;
+ for (var k = 0; k < length; ++k) {
+ var key = indices[k];
if (!IS_UNDEFINED(key)) {
var e = array[key];
if (!IS_UNDEFINED(e) || key in array) {
@@ -61,8 +60,8 @@ function GetSortedArrayKeys(array, intervals) {
}
}
}
+ %_CallFunction(keys, function(a, b) { return a - b; }, ArraySort);
}
- %_CallFunction(keys, function(a, b) { return a - b; }, ArraySort);
return keys;
}
@@ -217,34 +216,21 @@ function ConvertToLocaleString(e) {
// special array operations to handle sparse arrays in a sensible fashion.
function SmartSlice(array, start_i, del_count, len, deleted_elements) {
// Move deleted elements to a new array (the return value from splice).
- // Intervals array can contain keys and intervals. See comment in Concat.
- var intervals = %GetArrayKeys(array, start_i + del_count);
- var length = intervals.length;
- for (var k = 0; k < length; k++) {
- var key = intervals[k];
- if (key < 0) {
- var j = -1 - key;
- var interval_limit = j + intervals[++k];
- if (j < start_i) {
- j = start_i;
- }
- for (; j < interval_limit; j++) {
- // ECMA-262 15.4.4.12 line 10. The spec could also be
- // interpreted such that %HasLocalProperty would be the
- // appropriate test. We follow KJS in consulting the
- // prototype.
- var current = array[j];
- if (!IS_UNDEFINED(current) || j in array) {
- deleted_elements[j - start_i] = current;
- }
+ var indices = %GetArrayKeys(array, start_i + del_count);
+ if (IS_NUMBER(indices)) {
+ var limit = indices;
+ for (var i = start_i; i < limit; ++i) {
+ var current = array[i];
+ if (!IS_UNDEFINED(current) || i in array) {
+ deleted_elements[i - start_i] = current;
}
- } else {
+ }
+ } else {
+ var length = indices.length;
+ for (var k = 0; k < length; ++k) {
+ var key = indices[k];
if (!IS_UNDEFINED(key)) {
if (key >= start_i) {
- // ECMA-262 15.4.4.12 line 10. The spec could also be
- // interpreted such that %HasLocalProperty would be the
- // appropriate test. We follow KJS in consulting the
- // prototype.
var current = array[key];
if (!IS_UNDEFINED(current) || key in array) {
deleted_elements[key - start_i] = current;
@@ -261,50 +247,32 @@ function SmartSlice(array, start_i, del_count, len, deleted_elements) {
function SmartMove(array, start_i, del_count, len, num_additional_args) {
// Move data to new array.
var new_array = new InternalArray(len - del_count + num_additional_args);
- var intervals = %GetArrayKeys(array, len);
- var length = intervals.length;
- for (var k = 0; k < length; k++) {
- var key = intervals[k];
- if (key < 0) {
- var j = -1 - key;
- var interval_limit = j + intervals[++k];
- while (j < start_i && j < interval_limit) {
- // The spec could also be interpreted such that
- // %HasLocalProperty would be the appropriate test. We follow
- // KJS in consulting the prototype.
- var current = array[j];
- if (!IS_UNDEFINED(current) || j in array) {
- new_array[j] = current;
- }
- j++;
+ var indices = %GetArrayKeys(array, len);
+ if (IS_NUMBER(indices)) {
+ var limit = indices;
+ for (var i = 0; i < start_i && i < limit; ++i) {
+ var current = array[i];
+ if (!IS_UNDEFINED(current) || i in array) {
+ new_array[i] = current;
}
- j = start_i + del_count;
- while (j < interval_limit) {
- // ECMA-262 15.4.4.12 lines 24 and 41. The spec could also be
- // interpreted such that %HasLocalProperty would be the
- // appropriate test. We follow KJS in consulting the
- // prototype.
- var current = array[j];
- if (!IS_UNDEFINED(current) || j in array) {
- new_array[j - del_count + num_additional_args] = current;
- }
- j++;
+ }
+ for (var i = start_i + del_count; i < limit; ++i) {
+ var current = array[i];
+ if (!IS_UNDEFINED(current) || i in array) {
+ new_array[i - del_count + num_additional_args] = current;
}
- } else {
+ }
+ } else {
+ var length = indices.length;
+ for (var k = 0; k < length; ++k) {
+ var key = indices[k];
if (!IS_UNDEFINED(key)) {
if (key < start_i) {
- // The spec could also be interpreted such that
- // %HasLocalProperty would be the appropriate test. We follow
- // KJS in consulting the prototype.
var current = array[key];
if (!IS_UNDEFINED(current) || key in array) {
new_array[key] = current;
}
} else if (key >= start_i + del_count) {
- // ECMA-262 15.4.4.12 lines 24 and 41. The spec could also
- // be interpreted such that %HasLocalProperty would be the
- // appropriate test. We follow KJS in consulting the
- // prototype.
var current = array[key];
if (!IS_UNDEFINED(current) || key in array) {
new_array[key - del_count + num_additional_args] = current;
@@ -413,6 +381,7 @@ function ArrayJoin(separator) {
["Array.prototype.join"]);
}
+ var length = TO_UINT32(this.length);
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
@@ -422,10 +391,26 @@ function ArrayJoin(separator) {
var result = %_FastAsciiArrayJoin(this, separator);
if (!IS_UNDEFINED(result)) return result;
- return Join(this, TO_UINT32(this.length), separator, ConvertToString);
+ return Join(this, length, separator, ConvertToString);
}
+function ObservedArrayPop(n) {
+ n--;
+ var value = this[n];
+
+ try {
+ BeginPerformSplice(this);
+ delete this[n];
+ this.length = n;
+ } finally {
+ EndPerformSplice(this);
+ EnqueueSpliceRecord(this, n, [value], 0);
+ }
+
+ return value;
+}
+
// Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6.
function ArrayPop() {
@@ -439,14 +424,36 @@ function ArrayPop() {
this.length = n;
return;
}
+
+ if (%IsObserved(this))
+ return ObservedArrayPop.call(this, n);
+
n--;
var value = this[n];
+ Delete(this, ToName(n), true);
this.length = n;
- delete this[n];
return value;
}
+function ObservedArrayPush() {
+ var n = TO_UINT32(this.length);
+ var m = %_ArgumentsLength();
+
+ try {
+ BeginPerformSplice(this);
+ for (var i = 0; i < m; i++) {
+ this[i+n] = %_Arguments(i);
+ }
+ this.length = n + m;
+ } finally {
+ EndPerformSplice(this);
+ EnqueueSpliceRecord(this, n, [], m);
+ }
+
+ return this.length;
+}
+
// Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7.
function ArrayPush() {
@@ -455,6 +462,9 @@ function ArrayPush() {
["Array.prototype.push"]);
}
+ if (%IsObserved(this))
+ return ObservedArrayPush.apply(this, arguments);
+
var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
for (var i = 0; i < m; i++) {
@@ -566,6 +576,21 @@ function ArrayReverse() {
}
+function ObservedArrayShift(len) {
+ var first = this[0];
+
+ try {
+ BeginPerformSplice(this);
+ SimpleMove(this, 0, 1, len, 0);
+ this.length = len - 1;
+ } finally {
+ EndPerformSplice(this);
+ EnqueueSpliceRecord(this, 0, [first], 0);
+ }
+
+ return first;
+}
+
function ArrayShift() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
@@ -579,6 +604,9 @@ function ArrayShift() {
return;
}
+ if (%IsObserved(this))
+ return ObservedArrayShift.call(this, len);
+
var first = this[0];
if (IS_ARRAY(this)) {
@@ -592,6 +620,24 @@ function ArrayShift() {
return first;
}
+function ObservedArrayUnshift() {
+ var len = TO_UINT32(this.length);
+ var num_arguments = %_ArgumentsLength();
+
+ try {
+ BeginPerformSplice(this);
+ SimpleMove(this, 0, 0, len, num_arguments);
+ for (var i = 0; i < num_arguments; i++) {
+ this[i] = %_Arguments(i);
+ }
+ this.length = len + num_arguments;
+ } finally {
+ EndPerformSplice(this);
+ EnqueueSpliceRecord(this, 0, [], num_arguments);
+ }
+
+ return len + num_arguments;
+}
function ArrayUnshift(arg1) { // length == 1
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
@@ -599,6 +645,9 @@ function ArrayUnshift(arg1) { // length == 1
["Array.prototype.unshift"]);
}
+ if (%IsObserved(this))
+ return ObservedArrayUnshift.apply(this, arguments);
+
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
@@ -628,7 +677,7 @@ function ArraySlice(start, end) {
var start_i = TO_INTEGER(start);
var end_i = len;
- if (end !== void 0) end_i = TO_INTEGER(end);
+ if (!IS_UNDEFINED(end)) end_i = TO_INTEGER(end);
if (start_i < 0) {
start_i += len;
@@ -649,6 +698,7 @@ function ArraySlice(start, end) {
if (end_i < start_i) return result;
if (IS_ARRAY(this) &&
+ !%IsObserved(this) &&
(end_i > 1000) &&
(%EstimateNumberOfElements(this) < end_i)) {
SmartSlice(this, start_i, end_i - start_i, len, result);
@@ -662,50 +712,99 @@ function ArraySlice(start, end) {
}
-function ArraySplice(start, delete_count) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.splice"]);
- }
-
- var num_arguments = %_ArgumentsLength();
-
- var len = TO_UINT32(this.length);
- var start_i = TO_INTEGER(start);
-
+function ComputeSpliceStartIndex(start_i, len) {
if (start_i < 0) {
start_i += len;
- if (start_i < 0) start_i = 0;
- } else {
- if (start_i > len) start_i = len;
+ return start_i < 0 ? 0 : start_i;
}
+ return start_i > len ? len : start_i;
+}
+
+
+function ComputeSpliceDeleteCount(delete_count, num_arguments, len, start_i) {
// SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
// given as a request to delete all the elements from the start.
// And it differs from the case of undefined delete count.
// This does not follow ECMA-262, but we do the same for
// compatibility.
var del_count = 0;
- if (num_arguments == 1) {
- del_count = len - start_i;
- } else {
- del_count = TO_INTEGER(delete_count);
- if (del_count < 0) del_count = 0;
- if (del_count > len - start_i) del_count = len - start_i;
- }
+ if (num_arguments == 1)
+ return len - start_i;
+
+ del_count = TO_INTEGER(delete_count);
+ if (del_count < 0)
+ return 0;
+
+ if (del_count > len - start_i)
+ return len - start_i;
+
+ return del_count;
+}
+
+function ObservedArraySplice(start, delete_count) {
+ var num_arguments = %_ArgumentsLength();
+ var len = TO_UINT32(this.length);
+ var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
+ var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
+ start_i);
var deleted_elements = [];
deleted_elements.length = del_count;
+ var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
+
+ try {
+ BeginPerformSplice(this);
+
+ SimpleSlice(this, start_i, del_count, len, deleted_elements);
+ SimpleMove(this, start_i, del_count, len, num_elements_to_add);
+
+ // Insert the arguments into the resulting array in
+ // place of the deleted elements.
+ var i = start_i;
+ var arguments_index = 2;
+ var arguments_length = %_ArgumentsLength();
+ while (arguments_index < arguments_length) {
+ this[i++] = %_Arguments(arguments_index++);
+ }
+ this.length = len - del_count + num_elements_to_add;
+
+ } finally {
+ EndPerformSplice(this);
+ if (deleted_elements.length || num_elements_to_add) {
+ EnqueueSpliceRecord(this,
+ start_i,
+ deleted_elements.slice(),
+ num_elements_to_add);
+ }
+ }
- // Number of elements to add.
- var num_additional_args = 0;
- if (num_arguments > 2) {
- num_additional_args = num_arguments - 2;
+ // Return the deleted elements.
+ return deleted_elements;
+}
+
+
+function ArraySplice(start, delete_count) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.splice"]);
}
- var use_simple_splice = true;
+ if (%IsObserved(this))
+ return ObservedArraySplice.apply(this, arguments);
+
+ var num_arguments = %_ArgumentsLength();
+ var len = TO_UINT32(this.length);
+ var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
+ var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
+ start_i);
+ var deleted_elements = [];
+ deleted_elements.length = del_count;
+ var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
- if (IS_ARRAY(this) && num_additional_args !== del_count) {
+ var use_simple_splice = true;
+ if (IS_ARRAY(this) &&
+ num_elements_to_add !== del_count) {
// If we are only deleting/moving a few things near the end of the
// array then the simple version is going to be faster, because it
// doesn't touch most of the array.
@@ -717,10 +816,10 @@ function ArraySplice(start, delete_count) {
if (use_simple_splice) {
SimpleSlice(this, start_i, del_count, len, deleted_elements);
- SimpleMove(this, start_i, del_count, len, num_additional_args);
+ SimpleMove(this, start_i, del_count, len, num_elements_to_add);
} else {
SmartSlice(this, start_i, del_count, len, deleted_elements);
- SmartMove(this, start_i, del_count, len, num_additional_args);
+ SmartMove(this, start_i, del_count, len, num_elements_to_add);
}
// Insert the arguments into the resulting array in
@@ -731,7 +830,7 @@ function ArraySplice(start, delete_count) {
while (arguments_index < arguments_length) {
this[i++] = %_Arguments(arguments_index++);
}
- this.length = len - del_count + num_additional_args;
+ this.length = len - del_count + num_elements_to_add;
// Return the deleted elements.
return deleted_elements;
@@ -881,26 +980,24 @@ function ArraySort(comparefn) {
// of a prototype property.
var CopyFromPrototype = function CopyFromPrototype(obj, length) {
var max = 0;
- for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
+ for (var proto = %GetPrototype(obj); proto; proto = %GetPrototype(proto)) {
var indices = %GetArrayKeys(proto, length);
- if (indices.length > 0) {
- if (indices[0] == -1) {
- // It's an interval.
- var proto_length = indices[1];
- for (var i = 0; i < proto_length; i++) {
- if (!obj.hasOwnProperty(i) && proto.hasOwnProperty(i)) {
- obj[i] = proto[i];
- if (i >= max) { max = i + 1; }
- }
+ if (IS_NUMBER(indices)) {
+ // It's an interval.
+ var proto_length = indices;
+ for (var i = 0; i < proto_length; i++) {
+ if (!obj.hasOwnProperty(i) && proto.hasOwnProperty(i)) {
+ obj[i] = proto[i];
+ if (i >= max) { max = i + 1; }
}
- } else {
- for (var i = 0; i < indices.length; i++) {
- var index = indices[i];
- if (!IS_UNDEFINED(index) &&
- !obj.hasOwnProperty(index) && proto.hasOwnProperty(index)) {
- obj[index] = proto[index];
- if (index >= max) { max = index + 1; }
- }
+ }
+ } else {
+ for (var i = 0; i < indices.length; i++) {
+ var index = indices[i];
+ if (!IS_UNDEFINED(index) &&
+ !obj.hasOwnProperty(index) && proto.hasOwnProperty(index)) {
+ obj[index] = proto[index];
+ if (index >= max) { max = index + 1; }
}
}
}
@@ -912,24 +1009,22 @@ function ArraySort(comparefn) {
// where a prototype of obj has an element. I.e., shadow all prototype
// elements in that range.
var ShadowPrototypeElements = function(obj, from, to) {
- for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
+ for (var proto = %GetPrototype(obj); proto; proto = %GetPrototype(proto)) {
var indices = %GetArrayKeys(proto, to);
- if (indices.length > 0) {
- if (indices[0] == -1) {
- // It's an interval.
- var proto_length = indices[1];
- for (var i = from; i < proto_length; i++) {
- if (proto.hasOwnProperty(i)) {
- obj[i] = void 0;
- }
+ if (IS_NUMBER(indices)) {
+ // It's an interval.
+ var proto_length = indices;
+ for (var i = from; i < proto_length; i++) {
+ if (proto.hasOwnProperty(i)) {
+ obj[i] = UNDEFINED;
}
- } else {
- for (var i = 0; i < indices.length; i++) {
- var index = indices[i];
- if (!IS_UNDEFINED(index) && from <= index &&
- proto.hasOwnProperty(index)) {
- obj[index] = void 0;
- }
+ }
+ } else {
+ for (var i = 0; i < indices.length; i++) {
+ var index = indices[i];
+ if (!IS_UNDEFINED(index) && from <= index &&
+ proto.hasOwnProperty(index)) {
+ obj[index] = UNDEFINED;
}
}
}
@@ -966,7 +1061,7 @@ function ArraySort(comparefn) {
if (first_undefined < last_defined) {
// Fill in hole or undefined.
obj[first_undefined] = obj[last_defined];
- obj[last_defined] = void 0;
+ obj[last_defined] = UNDEFINED;
}
}
// If there were any undefineds in the entire array, first_undefined
@@ -978,12 +1073,12 @@ function ArraySort(comparefn) {
// an undefined should be and vice versa.
var i;
for (i = first_undefined; i < length - num_holes; i++) {
- obj[i] = void 0;
+ obj[i] = UNDEFINED;
}
for (i = length - num_holes; i < length; i++) {
// For compatability with Webkit, do not expose elements in the prototype.
- if (i in obj.__proto__) {
- obj[i] = void 0;
+ if (i in %GetPrototype(obj)) {
+ obj[i] = UNDEFINED;
} else {
delete obj[i];
}
@@ -1010,11 +1105,13 @@ function ArraySort(comparefn) {
max_prototype_element = CopyFromPrototype(this, length);
}
- var num_non_undefined = %RemoveArrayHoles(this, length);
+ var num_non_undefined = %IsObserved(this) ?
+ -1 : %RemoveArrayHoles(this, length);
+
if (num_non_undefined == -1) {
- // There were indexed accessors in the array. Move array holes and
- // undefineds to the end using a Javascript function that is safe
- // in the presence of accessors.
+ // The array is observed, or there were indexed accessors in the array.
+ // Move array holes and undefineds to the end using a Javascript function
+ // that is safe in the presence of accessors and is observable.
num_non_undefined = SafeRemoveArrayHoles(this);
}
@@ -1049,7 +1146,7 @@ function ArrayFilter(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1100,9 +1197,10 @@ function ArrayForEach(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
+
if (%DebugCallbackSupportsStepping(f)) {
for (var i = 0; i < length; i++) {
if (i in array) {
@@ -1143,7 +1241,7 @@ function ArraySome(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1186,7 +1284,7 @@ function ArrayEvery(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1228,7 +1326,7 @@ function ArrayMap(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1280,18 +1378,15 @@ function ArrayIndexOf(element, index) {
var min = index;
var max = length;
if (UseSparseVariant(this, length, IS_ARRAY(this))) {
- var intervals = %GetArrayKeys(this, length);
- if (intervals.length == 2 && intervals[0] < 0) {
- // A single interval.
- var intervalMin = -(intervals[0] + 1);
- var intervalMax = intervalMin + intervals[1];
- if (min < intervalMin) min = intervalMin;
- max = intervalMax; // Capped by length already.
+ var indices = %GetArrayKeys(this, length);
+ if (IS_NUMBER(indices)) {
+ // It's an interval.
+ max = indices; // Capped by length already.
// Fall through to loop below.
} else {
- if (intervals.length == 0) return -1;
+ if (indices.length == 0) return -1;
// Get all the keys in sorted order.
- var sortedKeys = GetSortedArrayKeys(this, intervals);
+ var sortedKeys = GetSortedArrayKeys(this, indices);
var n = sortedKeys.length;
var i = 0;
while (i < n && sortedKeys[i] < index) i++;
@@ -1341,18 +1436,15 @@ function ArrayLastIndexOf(element, index) {
var min = 0;
var max = index;
if (UseSparseVariant(this, length, IS_ARRAY(this))) {
- var intervals = %GetArrayKeys(this, index + 1);
- if (intervals.length == 2 && intervals[0] < 0) {
- // A single interval.
- var intervalMin = -(intervals[0] + 1);
- var intervalMax = intervalMin + intervals[1];
- if (min < intervalMin) min = intervalMin;
- max = intervalMax; // Capped by index already.
+ var indices = %GetArrayKeys(this, index + 1);
+ if (IS_NUMBER(indices)) {
+ // It's an interval.
+ max = indices; // Capped by index already.
// Fall through to loop below.
} else {
- if (intervals.length == 0) return -1;
+ if (indices.length == 0) return -1;
// Get all the keys in sorted order.
- var sortedKeys = GetSortedArrayKeys(this, intervals);
+ var sortedKeys = GetSortedArrayKeys(this, indices);
var i = sortedKeys.length - 1;
while (i >= 0) {
var key = sortedKeys[i];
@@ -1491,8 +1583,10 @@ function ArrayIsArray(obj) {
// -------------------------------------------------------------------
+
function SetUpArray() {
%CheckIsBootstrapping();
+
// Set up non-enumerable constructor property on the Array.prototype
// object.
%SetProperty($Array.prototype, "constructor", $Array, DONT_ENUM);
@@ -1549,6 +1643,15 @@ function SetUpArray() {
// exposed to user code.
// Adding only the functions that are actually used.
SetUpLockedPrototype(InternalArray, $Array(), $Array(
+ "concat", getFunction("concat", ArrayConcat),
+ "indexOf", getFunction("indexOf", ArrayIndexOf),
+ "join", getFunction("join", ArrayJoin),
+ "pop", getFunction("pop", ArrayPop),
+ "push", getFunction("push", ArrayPush),
+ "splice", getFunction("splice", ArraySplice)
+ ));
+
+ SetUpLockedPrototype(InternalPackedArray, $Array(), $Array(
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush)
diff --git a/deps/v8/src/arraybuffer.js b/deps/v8/src/arraybuffer.js
new file mode 100644
index 000000000..c5c98dbe4
--- /dev/null
+++ b/deps/v8/src/arraybuffer.js
@@ -0,0 +1,111 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+var $ArrayBuffer = global.ArrayBuffer;
+
+// -------------------------------------------------------------------
+
+function ArrayBufferConstructor(length) { // length = 1
+ if (%_IsConstructCall()) {
+ var byteLength = ToPositiveInteger(length, 'invalid_array_buffer_length');
+ %ArrayBufferInitialize(this, byteLength);
+ } else {
+ throw MakeTypeError('constructor_not_function', ["ArrayBuffer"]);
+ }
+}
+
+function ArrayBufferGetByteLength() {
+ if (!IS_ARRAYBUFFER(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['ArrayBuffer.prototype.byteLength', this]);
+ }
+ return %ArrayBufferGetByteLength(this);
+}
+
+// ES6 Draft 15.13.5.5.3
+function ArrayBufferSlice(start, end) {
+ if (!IS_ARRAYBUFFER(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['ArrayBuffer.prototype.slice', this]);
+ }
+
+ var relativeStart = TO_INTEGER(start);
+ var first;
+ if (relativeStart < 0) {
+ first = MathMax(this.byteLength + relativeStart, 0);
+ } else {
+ first = MathMin(relativeStart, this.byteLength);
+ }
+ var relativeEnd = IS_UNDEFINED(end) ? this.byteLength : TO_INTEGER(end);
+ var fin;
+ if (relativeEnd < 0) {
+ fin = MathMax(this.byteLength + relativeEnd, 0);
+ } else {
+ fin = MathMin(relativeEnd, this.byteLength);
+ }
+
+ if (fin < first) {
+ fin = first;
+ }
+ var newLen = fin - first;
+ // TODO(dslomov): implement inheritance
+ var result = new $ArrayBuffer(newLen);
+
+ %ArrayBufferSliceImpl(this, result, first);
+ return result;
+}
+
+function ArrayBufferIsView(obj) {
+ return %ArrayBufferIsView(obj);
+}
+
+function SetUpArrayBuffer() {
+ %CheckIsBootstrapping();
+
+ // Set up the ArrayBuffer constructor function.
+ %SetCode($ArrayBuffer, ArrayBufferConstructor);
+ %FunctionSetPrototype($ArrayBuffer, new $Object());
+
+ // Set up the constructor property on the ArrayBuffer prototype object.
+ %SetProperty($ArrayBuffer.prototype, "constructor", $ArrayBuffer, DONT_ENUM);
+
+ InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLength);
+
+ InstallFunctions($ArrayBuffer, DONT_ENUM, $Array(
+ "isView", ArrayBufferIsView
+ ));
+
+ InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array(
+ "slice", ArrayBufferSlice
+ ));
+}
+
+SetUpArrayBuffer();
+
+
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index d81d4ae61..9ed43601c 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -34,7 +34,7 @@
#include "assembler.h"
-#include <math.h> // For cos, log, pow, sin, tan, etc.
+#include <cmath>
#include "api.h"
#include "builtins.h"
#include "counters.h"
@@ -43,7 +43,7 @@
#include "deoptimizer.h"
#include "execution.h"
#include "ic.h"
-#include "isolate.h"
+#include "isolate-inl.h"
#include "jsregexp.h"
#include "lazy-instance.h"
#include "platform.h"
@@ -91,31 +91,141 @@ namespace internal {
struct DoubleConstant BASE_EMBEDDED {
double min_int;
double one_half;
+ double minus_one_half;
double minus_zero;
double zero;
double uint8_max_value;
double negative_infinity;
double canonical_non_hole_nan;
double the_hole_nan;
+ double uint32_bias;
};
static DoubleConstant double_constants;
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
+static bool math_exp_data_initialized = false;
+static Mutex* math_exp_data_mutex = NULL;
+static double* math_exp_constants_array = NULL;
+static double* math_exp_log_table_array = NULL;
+
// -----------------------------------------------------------------------------
// Implementation of AssemblerBase
-AssemblerBase::AssemblerBase(Isolate* isolate)
+AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
: isolate_(isolate),
- jit_cookie_(0) {
+ jit_cookie_(0),
+ enabled_cpu_features_(0),
+ emit_debug_code_(FLAG_debug_code),
+ predictable_code_size_(false) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
- jit_cookie_ = V8::RandomPrivate(isolate);
+ jit_cookie_ = isolate->random_number_generator()->NextInt();
+ }
+
+ if (buffer == NULL) {
+ // Do our own buffer management.
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+ if (isolate->assembler_spare_buffer() != NULL) {
+ buffer = isolate->assembler_spare_buffer();
+ isolate->set_assembler_spare_buffer(NULL);
+ }
+ }
+ if (buffer == NULL) buffer = NewArray<byte>(buffer_size);
+ own_buffer_ = true;
+ } else {
+ // Use externally provided buffer instead.
+ ASSERT(buffer_size > 0);
+ own_buffer_ = false;
+ }
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+
+ pc_ = buffer_;
+}
+
+
+AssemblerBase::~AssemblerBase() {
+ if (own_buffer_) {
+ if (isolate() != NULL &&
+ isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
+ } else {
+ DeleteArray(buffer_);
+ }
}
}
// -----------------------------------------------------------------------------
+// Implementation of PredictableCodeSizeScope
+
+PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
+ int expected_size)
+ : assembler_(assembler),
+ expected_size_(expected_size),
+ start_offset_(assembler->pc_offset()),
+ old_value_(assembler->predictable_code_size()) {
+ assembler_->set_predictable_code_size(true);
+}
+
+
+PredictableCodeSizeScope::~PredictableCodeSizeScope() {
+ // TODO(svenpanne) Remove the 'if' when everything works.
+ if (expected_size_ >= 0) {
+ CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
+ }
+ assembler_->set_predictable_code_size(old_value_);
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of CpuFeatureScope
+
+#ifdef DEBUG
+CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
+ : assembler_(assembler) {
+ ASSERT(CpuFeatures::IsSafeForSnapshot(f));
+ old_enabled_ = assembler_->enabled_cpu_features();
+ uint64_t mask = static_cast<uint64_t>(1) << f;
+ // TODO(svenpanne) This special case below doesn't belong here!
+#if V8_TARGET_ARCH_ARM
+ // ARMv7 is implied by VFP3.
+ if (f == VFP3) {
+ mask |= static_cast<uint64_t>(1) << ARMv7;
+ }
+#endif
+ assembler_->set_enabled_cpu_features(old_enabled_ | mask);
+}
+
+
+CpuFeatureScope::~CpuFeatureScope() {
+ assembler_->set_enabled_cpu_features(old_enabled_);
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// Implementation of PlatformFeatureScope
+
+PlatformFeatureScope::PlatformFeatureScope(CpuFeature f)
+ : old_cross_compile_(CpuFeatures::cross_compile_) {
+ // CpuFeatures is a global singleton, therefore this is only safe in
+ // single threaded code.
+ ASSERT(Serializer::enabled());
+ uint64_t mask = static_cast<uint64_t>(1) << f;
+ CpuFeatures::cross_compile_ |= mask;
+}
+
+
+PlatformFeatureScope::~PlatformFeatureScope() {
+ CpuFeatures::cross_compile_ = old_cross_compile_;
+}
+
+
+// -----------------------------------------------------------------------------
// Implementation of Label
int Label::pos() const {
@@ -290,6 +400,7 @@ void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
}
}
+
void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
WriteExtraTag(kConstPoolExtraTag, kConstPoolTag);
for (int i = 0; i < kIntSize; i++) {
@@ -299,6 +410,7 @@ void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
}
}
+
void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
WriteExtraTag(kDataJumpExtraTag, top_tag);
for (int i = 0; i < kIntptrSize; i++) {
@@ -313,6 +425,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#ifdef DEBUG
byte* begin_pos = pos_;
#endif
+ ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
ASSERT(rinfo->pc() - last_pc_ >= 0);
ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
<= kMaxStandardNonCompactModes);
@@ -570,6 +683,15 @@ void RelocIterator::next() {
}
}
}
+ if (code_age_sequence_ != NULL) {
+ byte* old_code_age_sequence = code_age_sequence_;
+ code_age_sequence_ = NULL;
+ if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) {
+ rinfo_.data_ = 0;
+ rinfo_.pc_ = old_code_age_sequence;
+ return;
+ }
+ }
done_ = true;
}
@@ -585,6 +707,12 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
+ byte* sequence = code->FindCodeAgeSequence();
+ if (sequence != NULL && !Code::IsYoungSequence(sequence)) {
+ code_age_sequence_ = sequence;
+ } else {
+ code_age_sequence_ = NULL;
+ }
if (mode_mask_ == 0) pos_ = end_;
next();
}
@@ -600,6 +728,7 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
+ code_age_sequence_ = NULL;
if (mode_mask_ == 0) pos_ = end_;
next();
}
@@ -609,11 +738,28 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
// Implementation of RelocInfo
+#ifdef DEBUG
+bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
+ // Ensure there are no code targets or embedded objects present in the
+ // deoptimization entries, they would require relocation after code
+ // generation.
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL) |
+ RelocInfo::kApplyMask;
+ RelocIterator it(desc, mode_mask);
+ return !it.done();
+}
+#endif
+
+
#ifdef ENABLE_DISASSEMBLER
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
- case RelocInfo::NONE:
- return "no reloc";
+ case RelocInfo::NONE32:
+ return "no reloc 32";
+ case RelocInfo::NONE64:
+ return "no reloc 64";
case RelocInfo::EMBEDDED_OBJECT:
return "embedded object";
case RelocInfo::CONSTRUCT_CALL:
@@ -629,8 +775,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "code target";
case RelocInfo::CODE_TARGET_WITH_ID:
return "code target with id";
- case RelocInfo::GLOBAL_PROPERTY_CELL:
- return "global property cell";
+ case RelocInfo::CELL:
+ return "property cell";
case RelocInfo::RUNTIME_ENTRY:
return "runtime entry";
case RelocInfo::JS_RETURN:
@@ -652,6 +798,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
UNREACHABLE();
#endif
return "debug break slot";
+ case RelocInfo::CODE_AGE_SEQUENCE:
+ return "code_age_sequence";
case RelocInfo::NUMBER_OF_MODES:
UNREACHABLE();
return "number_of_modes";
@@ -660,7 +808,7 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
}
-void RelocInfo::Print(FILE* out) {
+void RelocInfo::Print(Isolate* isolate, FILE* out) {
PrintF(out, "%p %s", pc_, RelocModeName(rmode_));
if (IsComment(rmode_)) {
PrintF(out, " (%s)", reinterpret_cast<char*>(data_));
@@ -669,7 +817,7 @@ void RelocInfo::Print(FILE* out) {
target_object()->ShortPrint(out);
PrintF(out, ")");
} else if (rmode_ == EXTERNAL_REFERENCE) {
- ExternalReferenceEncoder ref_encoder;
+ ExternalReferenceEncoder ref_encoder(isolate);
PrintF(out, " (%s) (%p)",
ref_encoder.NameOfAddress(*target_reference_address()),
*target_reference_address());
@@ -682,11 +830,11 @@ void RelocInfo::Print(FILE* out) {
}
} else if (IsPosition(rmode_)) {
PrintF(out, " (%" V8_PTR_PREFIX "d)", data());
- } else if (rmode_ == RelocInfo::RUNTIME_ENTRY &&
- Isolate::Current()->deoptimizer_data() != NULL) {
+ } else if (IsRuntimeEntry(rmode_) &&
+ isolate->deoptimizer_data() != NULL) {
// Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId(
- target_address(), Deoptimizer::EAGER);
+ isolate, target_address(), Deoptimizer::EAGER);
if (id != Deoptimizer::kNotDeoptimizationEntry) {
PrintF(out, " (deoptimization bailout %d)", id);
}
@@ -703,7 +851,7 @@ void RelocInfo::Verify() {
case EMBEDDED_OBJECT:
Object::VerifyPointer(target_object());
break;
- case GLOBAL_PROPERTY_CELL:
+ case CELL:
Object::VerifyPointer(target_cell());
break;
case DEBUG_BREAK:
@@ -720,7 +868,7 @@ void RelocInfo::Verify() {
CHECK(addr != NULL);
// Check that we can find the right code object.
Code* code = Code::GetCodeFromTargetAddress(addr);
- Object* found = HEAP->FindCodeObject(addr);
+ Object* found = code->GetIsolate()->FindCodeObject(addr);
CHECK(found->IsCode());
CHECK(code->address() == HeapObject::cast(found)->address());
break;
@@ -734,11 +882,15 @@ void RelocInfo::Verify() {
case INTERNAL_REFERENCE:
case CONST_POOL:
case DEBUG_BREAK_SLOT:
- case NONE:
+ case NONE32:
+ case NONE64:
break;
case NUMBER_OF_MODES:
UNREACHABLE();
break;
+ case CODE_AGE_SEQUENCE:
+ ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode());
+ break;
}
}
#endif // VERIFY_HEAP
@@ -750,12 +902,67 @@ void RelocInfo::Verify() {
void ExternalReference::SetUp() {
double_constants.min_int = kMinInt;
double_constants.one_half = 0.5;
+ double_constants.minus_one_half = -0.5;
double_constants.minus_zero = -0.0;
double_constants.uint8_max_value = 255;
double_constants.zero = 0.0;
double_constants.canonical_non_hole_nan = OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
+ double_constants.uint32_bias =
+ static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
+
+ math_exp_data_mutex = new Mutex();
+}
+
+
+void ExternalReference::InitializeMathExpData() {
+ // Early return?
+ if (math_exp_data_initialized) return;
+
+ LockGuard<Mutex> lock_guard(math_exp_data_mutex);
+ if (!math_exp_data_initialized) {
+ // If this is changed, generated code must be adapted too.
+ const int kTableSizeBits = 11;
+ const int kTableSize = 1 << kTableSizeBits;
+ const double kTableSizeDouble = static_cast<double>(kTableSize);
+
+ math_exp_constants_array = new double[9];
+ // Input values smaller than this always return 0.
+ math_exp_constants_array[0] = -708.39641853226408;
+ // Input values larger than this always return +Infinity.
+ math_exp_constants_array[1] = 709.78271289338397;
+ math_exp_constants_array[2] = V8_INFINITY;
+ // The rest is black magic. Do not attempt to understand it. It is
+ // loosely based on the "expd" function published at:
+ // http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
+ const double constant3 = (1 << kTableSizeBits) / log(2.0);
+ math_exp_constants_array[3] = constant3;
+ math_exp_constants_array[4] =
+ static_cast<double>(static_cast<int64_t>(3) << 51);
+ math_exp_constants_array[5] = 1 / constant3;
+ math_exp_constants_array[6] = 3.0000000027955394;
+ math_exp_constants_array[7] = 0.16666666685227835;
+ math_exp_constants_array[8] = 1;
+
+ math_exp_log_table_array = new double[kTableSize];
+ for (int i = 0; i < kTableSize; i++) {
+ double value = pow(2, i / kTableSizeDouble);
+ uint64_t bits = BitCast<uint64_t, double>(value);
+ bits &= (static_cast<uint64_t>(1) << 52) - 1;
+ double mantissa = BitCast<double, uint64_t>(bits);
+ math_exp_log_table_array[i] = mantissa;
+ }
+
+ math_exp_data_initialized = true;
+ }
+}
+
+
+void ExternalReference::TearDownMathExpData() {
+ delete[] math_exp_constants_array;
+ delete[] math_exp_log_table_array;
+ delete math_exp_data_mutex;
}
@@ -784,8 +991,8 @@ ExternalReference::ExternalReference(const Runtime::Function* f,
: address_(Redirect(isolate, f->entry)) {}
-ExternalReference ExternalReference::isolate_address() {
- return ExternalReference(Isolate::Current());
+ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
+ return ExternalReference(isolate);
}
@@ -874,11 +1081,30 @@ ExternalReference ExternalReference::get_date_field_function(
}
+ExternalReference ExternalReference::get_make_code_young_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung)));
+}
+
+
+ExternalReference ExternalReference::get_mark_code_as_executed_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(Code::MarkCodeAsExecuted)));
+}
+
+
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
+ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
+ return ExternalReference(isolate->stress_deopt_count_address());
+}
+
+
ExternalReference ExternalReference::transcendental_cache_array_address(
Isolate* isolate) {
return ExternalReference(
@@ -900,6 +1126,20 @@ ExternalReference ExternalReference::compute_output_frames_function(
}
+ExternalReference ExternalReference::log_enter_external_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal)));
+}
+
+
+ExternalReference ExternalReference::log_leave_external_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
+}
+
+
ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
}
@@ -917,6 +1157,12 @@ ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
}
+ExternalReference ExternalReference::allocation_sites_list_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->heap()->allocation_sites_list_address());
+}
+
+
ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) {
return ExternalReference(isolate->stack_guard()->address_of_jslimit());
}
@@ -969,18 +1215,56 @@ ExternalReference ExternalReference::new_space_allocation_limit_address(
}
-ExternalReference ExternalReference::handle_scope_level_address() {
- return ExternalReference(HandleScope::current_level_address());
+ExternalReference ExternalReference::old_pointer_space_allocation_top_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->heap()->OldPointerSpaceAllocationTopAddress());
}
-ExternalReference ExternalReference::handle_scope_next_address() {
- return ExternalReference(HandleScope::current_next_address());
+ExternalReference ExternalReference::old_pointer_space_allocation_limit_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->heap()->OldPointerSpaceAllocationLimitAddress());
}
-ExternalReference ExternalReference::handle_scope_limit_address() {
- return ExternalReference(HandleScope::current_limit_address());
+ExternalReference ExternalReference::old_data_space_allocation_top_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->heap()->OldDataSpaceAllocationTopAddress());
+}
+
+
+ExternalReference ExternalReference::old_data_space_allocation_limit_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->heap()->OldDataSpaceAllocationLimitAddress());
+}
+
+
+ExternalReference ExternalReference::
+ new_space_high_promotion_mode_active_address(Isolate* isolate) {
+ return ExternalReference(
+ isolate->heap()->NewSpaceHighPromotionModeActiveAddress());
+}
+
+
+ExternalReference ExternalReference::handle_scope_level_address(
+ Isolate* isolate) {
+ return ExternalReference(HandleScope::current_level_address(isolate));
+}
+
+
+ExternalReference ExternalReference::handle_scope_next_address(
+ Isolate* isolate) {
+ return ExternalReference(HandleScope::current_next_address(isolate));
+}
+
+
+ExternalReference ExternalReference::handle_scope_limit_address(
+ Isolate* isolate) {
+ return ExternalReference(HandleScope::current_limit_address(isolate));
}
@@ -1018,6 +1302,12 @@ ExternalReference ExternalReference::address_of_one_half() {
}
+ExternalReference ExternalReference::address_of_minus_one_half() {
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.minus_one_half));
+}
+
+
ExternalReference ExternalReference::address_of_minus_zero() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.minus_zero));
@@ -1053,12 +1343,26 @@ ExternalReference ExternalReference::address_of_the_hole_nan() {
}
+ExternalReference ExternalReference::record_object_allocation_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate,
+ FUNCTION_ADDR(HeapProfiler::RecordObjectAllocationFromMasm)));
+}
+
+
+ExternalReference ExternalReference::address_of_uint32_bias() {
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.uint32_bias));
+}
+
+
#ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state(
Isolate* isolate) {
Address function;
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
#elif V8_TARGET_ARCH_IA32
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
@@ -1072,6 +1376,7 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
return ExternalReference(Redirect(isolate, function));
}
+
ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
@@ -1084,6 +1389,7 @@ ExternalReference ExternalReference::re_case_insensitive_compare_uc16(
FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
}
+
ExternalReference ExternalReference::re_word_character_map() {
return ExternalReference(
NativeRegExpMacroAssembler::word_character_map_address());
@@ -1186,12 +1492,46 @@ ExternalReference ExternalReference::math_log_double_function(
}
+ExternalReference ExternalReference::math_exp_constants(int constant_index) {
+ ASSERT(math_exp_data_initialized);
+ return ExternalReference(
+ reinterpret_cast<void*>(math_exp_constants_array + constant_index));
+}
+
+
+ExternalReference ExternalReference::math_exp_log_table() {
+ ASSERT(math_exp_data_initialized);
+ return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array));
+}
+
+
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);
}
+ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
+ return ExternalReference(entry);
+}
+
+
+double power_helper(double x, double y) {
+ int y_int = static_cast<int>(y);
+ if (y == y_int) {
+ return power_double_int(x, y_int); // Returns 1 if exponent is 0.
+ }
+ if (y == 0.5) {
+ return (std::isinf(x)) ? V8_INFINITY
+ : fast_sqrt(x + 0.0); // Convert -0 to +0.
+ }
+ if (y == -0.5) {
+ return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
+ }
+ return power_double_double(x, y);
+}
+
+
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
@@ -1212,10 +1552,11 @@ double power_double_int(double x, int y) {
double power_double_double(double x, double y) {
-#ifdef __MINGW64_VERSION_MAJOR
+#if defined(__MINGW64_VERSION_MAJOR) && \
+ (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)
// MinGW64 has a custom implementation for pow. This handles certain
// special cases that are different.
- if ((x == 0.0 || isinf(x)) && isfinite(y)) {
+ if ((x == 0.0 || std::isinf(x)) && std::isfinite(y)) {
double f;
if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
}
@@ -1228,7 +1569,9 @@ double power_double_double(double x, double y) {
// The checks for special cases can be dropped in ia32 because it has already
// been done in generated code before bailing out here.
- if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value();
+ if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
+ return OS::nan_value();
+ }
return pow(x, y);
}
@@ -1330,6 +1673,10 @@ void PositionsRecorder::RecordPosition(int pos) {
gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, false);
}
#endif
+ LOG_CODE_EVENT(assembler_->isolate(),
+ CodeLinePosInfoAddPositionEvent(jit_handler_data_,
+ assembler_->pc_offset(),
+ pos));
}
@@ -1342,6 +1689,11 @@ void PositionsRecorder::RecordStatementPosition(int pos) {
gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, true);
}
#endif
+ LOG_CODE_EVENT(assembler_->isolate(),
+ CodeLinePosInfoAddStatementPositionEvent(
+ jit_handler_data_,
+ assembler_->pc_offset(),
+ pos));
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index a0e55cc81..f0b7fed90 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -50,24 +50,99 @@ class ApiFunction;
namespace internal {
-struct StatsCounter;
+class StatsCounter;
// -----------------------------------------------------------------------------
// Platform independent assembler base class.
class AssemblerBase: public Malloced {
public:
- explicit AssemblerBase(Isolate* isolate);
+ AssemblerBase(Isolate* isolate, void* buffer, int buffer_size);
+ virtual ~AssemblerBase();
Isolate* isolate() const { return isolate_; }
- int jit_cookie() { return jit_cookie_; }
+ int jit_cookie() const { return jit_cookie_; }
+
+ bool emit_debug_code() const { return emit_debug_code_; }
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
+ bool predictable_code_size() const { return predictable_code_size_; }
+ void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
+
+ uint64_t enabled_cpu_features() const { return enabled_cpu_features_; }
+ void set_enabled_cpu_features(uint64_t features) {
+ enabled_cpu_features_ = features;
+ }
+ bool IsEnabled(CpuFeature f) {
+ return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
+ }
// Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
// cross-snapshotting.
static void QuietNaN(HeapObject* nan) { }
+ int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
+
+ static const int kMinimalBufferSize = 4*KB;
+
+ protected:
+ // The buffer into which code and relocation info are generated. It could
+ // either be owned by the assembler or be provided externally.
+ byte* buffer_;
+ int buffer_size_;
+ bool own_buffer_;
+
+ // The program counter, which points into the buffer above and moves forward.
+ byte* pc_;
+
private:
Isolate* isolate_;
int jit_cookie_;
+ uint64_t enabled_cpu_features_;
+ bool emit_debug_code_;
+ bool predictable_code_size_;
+};
+
+
+// Avoids using instructions that vary in size in unpredictable ways between the
+// snapshot and the running VM.
+class PredictableCodeSizeScope {
+ public:
+ PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
+ ~PredictableCodeSizeScope();
+
+ private:
+ AssemblerBase* assembler_;
+ int expected_size_;
+ int start_offset_;
+ bool old_value_;
+};
+
+
+// Enable a specified feature within a scope.
+class CpuFeatureScope BASE_EMBEDDED {
+ public:
+#ifdef DEBUG
+ CpuFeatureScope(AssemblerBase* assembler, CpuFeature f);
+ ~CpuFeatureScope();
+
+ private:
+ AssemblerBase* assembler_;
+ uint64_t old_enabled_;
+#else
+ CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) {}
+#endif
+};
+
+
+// Enable a unsupported feature within a scope for cross-compiling for a
+// different CPU.
+class PlatformFeatureScope BASE_EMBEDDED {
+ public:
+ explicit PlatformFeatureScope(CpuFeature f);
+ ~PlatformFeatureScope();
+
+ private:
+ uint64_t old_cross_compile_;
};
@@ -133,7 +208,6 @@ class Label BASE_EMBEDDED {
}
friend class Assembler;
- friend class RegexpAssembler;
friend class Displacement;
friend class RegExpMacroAssemblerIrregexp;
};
@@ -191,7 +265,7 @@ class RelocInfo BASE_EMBEDDED {
CODE_TARGET_CONTEXT, // Code target used for contextual loads and stores.
DEBUG_BREAK, // Code target for the debugger statement.
EMBEDDED_OBJECT,
- GLOBAL_PROPERTY_CELL,
+ CELL,
// Everything after runtime_entry (inclusive) is not GC'ed.
RUNTIME_ENTRY,
@@ -210,9 +284,16 @@ class RelocInfo BASE_EMBEDDED {
// add more as needed
// Pseudo-types
NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
- NONE, // never recorded
+ NONE32, // never recorded 32-bit value
+ NONE64, // never recorded 64-bit value
+ CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
+ // code aging.
+ FIRST_REAL_RELOC_MODE = CODE_TARGET,
+ LAST_REAL_RELOC_MODE = CONST_POOL,
+ FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
+ LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_CODE_ENUM = DEBUG_BREAK,
- LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
+ LAST_GCED_ENUM = CELL,
// Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID,
LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE
@@ -224,7 +305,19 @@ class RelocInfo BASE_EMBEDDED {
RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
: pc_(pc), rmode_(rmode), data_(data), host_(host) {
}
+ RelocInfo(byte* pc, double data64)
+ : pc_(pc), rmode_(NONE64), data64_(data64), host_(NULL) {
+ }
+ static inline bool IsRealRelocMode(Mode mode) {
+ return mode >= FIRST_REAL_RELOC_MODE &&
+ mode <= LAST_REAL_RELOC_MODE;
+ }
+ static inline bool IsPseudoRelocMode(Mode mode) {
+ ASSERT(!IsRealRelocMode(mode));
+ return mode >= FIRST_PSEUDO_RELOC_MODE &&
+ mode <= LAST_PSEUDO_RELOC_MODE;
+ }
static inline bool IsConstructCall(Mode mode) {
return mode == CONSTRUCT_CALL;
}
@@ -234,6 +327,9 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsEmbeddedObject(Mode mode) {
return mode == EMBEDDED_OBJECT;
}
+ static inline bool IsRuntimeEntry(Mode mode) {
+ return mode == RUNTIME_ENTRY;
+ }
// Is the relocation mode affected by GC?
static inline bool IsGCRelocMode(Mode mode) {
return mode <= LAST_GCED_ENUM;
@@ -262,6 +358,12 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsDebugBreakSlot(Mode mode) {
return mode == DEBUG_BREAK_SLOT;
}
+ static inline bool IsNone(Mode mode) {
+ return mode == NONE32 || mode == NONE64;
+ }
+ static inline bool IsCodeAgeSequence(Mode mode) {
+ return mode == CODE_AGE_SEQUENCE;
+ }
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
@@ -269,6 +371,7 @@ class RelocInfo BASE_EMBEDDED {
void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
+ double data64() const { return data64_; }
Code* host() const { return host_; }
// Apply a relocation by delta bytes
@@ -281,7 +384,7 @@ class RelocInfo BASE_EMBEDDED {
// Read/modify the code target in the branch/call instruction
// this relocation applies to;
- // can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+ // can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
INLINE(Address target_address());
INLINE(void set_target_address(Address target,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
@@ -290,11 +393,17 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Object** target_object_address());
INLINE(void set_target_object(Object* target,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
- INLINE(JSGlobalPropertyCell* target_cell());
- INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
- INLINE(void set_target_cell(JSGlobalPropertyCell* cell,
+ INLINE(Address target_runtime_entry(Assembler* origin));
+ INLINE(void set_target_runtime_entry(Address target,
+ WriteBarrierMode mode =
+ UPDATE_WRITE_BARRIER));
+ INLINE(Cell* target_cell());
+ INLINE(Handle<Cell> target_cell_handle());
+ INLINE(void set_target_cell(Cell* cell,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
-
+ INLINE(Handle<Object> code_age_stub_handle(Assembler* origin));
+ INLINE(Code* code_age_stub());
+ INLINE(void set_code_age_stub(Code* stub));
// Read the address of the word containing the target_address in an
// instruction stream. What this means exactly is architecture-independent.
@@ -328,7 +437,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Object** call_object_address());
template<typename StaticVisitor> inline void Visit(Heap* heap);
- inline void Visit(ObjectVisitor* v);
+ inline void Visit(Isolate* isolate, ObjectVisitor* v);
// Patch the code with some other code.
void PatchCode(byte* instructions, int instruction_count);
@@ -344,10 +453,16 @@ class RelocInfo BASE_EMBEDDED {
// debugger.
INLINE(bool IsPatchedDebugBreakSlotSequence());
+#ifdef DEBUG
+ // Check whether the given code contains relocation information that
+ // either is position-relative or movable by the garbage collector.
+ static bool RequiresRelocation(const CodeDesc& desc);
+#endif
+
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* RelocModeName(Mode rmode);
- void Print(FILE* out);
+ void Print(Isolate* isolate, FILE* out);
#endif // ENABLE_DISASSEMBLER
#ifdef VERIFY_HEAP
void Verify();
@@ -366,7 +481,10 @@ class RelocInfo BASE_EMBEDDED {
// comment).
byte* pc_;
Mode rmode_;
- intptr_t data_;
+ union {
+ intptr_t data_;
+ double data64_;
+ };
Code* host_;
// Code and Embedded Object pointers on some platforms are stored split
// across two consecutive 32-bit instructions. Heap management
@@ -487,6 +605,7 @@ class RelocIterator: public Malloced {
byte* pos_;
byte* end_;
+ byte* code_age_sequence_;
RelocInfo rinfo_;
bool done_;
int mode_mask_;
@@ -537,18 +656,31 @@ class ExternalReference BASE_EMBEDDED {
BUILTIN_FP_INT_CALL,
// Direct call to API function callback.
- // Handle<Value> f(v8::Arguments&)
+ // void f(v8::FunctionCallbackInfo&)
DIRECT_API_CALL,
+ // Call to function callback via InvokeFunctionCallback.
+ // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
+ PROFILING_API_CALL,
+
// Direct call to accessor getter callback.
- // Handle<value> f(Local<String> property, AccessorInfo& info)
- DIRECT_GETTER_CALL
+ // void f(Local<String> property, PropertyCallbackInfo& info)
+ DIRECT_GETTER_CALL,
+
+ // Call to accessor getter callback via InvokeAccessorGetterCallback.
+ // void f(Local<String> property, PropertyCallbackInfo& info,
+ // AccessorGetterCallback callback)
+ PROFILING_GETTER_CALL
};
static void SetUp();
+ static void InitializeMathExpData();
+ static void TearDownMathExpData();
typedef void* ExternalReferenceRedirector(void* original, Type type);
+ ExternalReference() : address_(NULL) {}
+
ExternalReference(Builtins::CFunctionId id, Isolate* isolate);
ExternalReference(ApiFunction* ptr, Type type, Isolate* isolate);
@@ -571,8 +703,8 @@ class ExternalReference BASE_EMBEDDED {
explicit ExternalReference(const SCTableReference& table_ref);
- // Isolate::Current() as an external reference.
- static ExternalReference isolate_address();
+ // Isolate as an external reference.
+ static ExternalReference isolate_address(Isolate* isolate);
// One-of-a-kind references. These references are not part of a general
// pattern. This means that they have to be added to the
@@ -595,10 +727,20 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference get_date_field_function(Isolate* isolate);
static ExternalReference date_cache_stamp(Isolate* isolate);
+ static ExternalReference get_make_code_young_function(Isolate* isolate);
+ static ExternalReference get_mark_code_as_executed_function(Isolate* isolate);
+
+ // New heap objects tracking support.
+ static ExternalReference record_object_allocation_function(Isolate* isolate);
+
// Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);
+ // Log support.
+ static ExternalReference log_enter_external_function(Isolate* isolate);
+ static ExternalReference log_leave_external_function(Isolate* isolate);
+
// Static data in the keyed lookup cache.
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
@@ -606,6 +748,9 @@ class ExternalReference BASE_EMBEDDED {
// Static variable Heap::roots_array_start()
static ExternalReference roots_array_start(Isolate* isolate);
+ // Static variable Heap::allocation_sites_list_address()
+ static ExternalReference allocation_sites_list_address(Isolate* isolate);
+
// Static variable StackGuard::address_of_jslimit()
static ExternalReference address_of_stack_limit(Isolate* isolate);
@@ -634,6 +779,16 @@ class ExternalReference BASE_EMBEDDED {
// Used for fast allocation in generated code.
static ExternalReference new_space_allocation_top_address(Isolate* isolate);
static ExternalReference new_space_allocation_limit_address(Isolate* isolate);
+ static ExternalReference old_pointer_space_allocation_top_address(
+ Isolate* isolate);
+ static ExternalReference old_pointer_space_allocation_limit_address(
+ Isolate* isolate);
+ static ExternalReference old_data_space_allocation_top_address(
+ Isolate* isolate);
+ static ExternalReference old_data_space_allocation_limit_address(
+ Isolate* isolate);
+ static ExternalReference new_space_high_promotion_mode_active_address(
+ Isolate* isolate);
static ExternalReference double_fp_operation(Token::Value operation,
Isolate* isolate);
@@ -641,9 +796,9 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference power_double_double_function(Isolate* isolate);
static ExternalReference power_double_int_function(Isolate* isolate);
- static ExternalReference handle_scope_next_address();
- static ExternalReference handle_scope_limit_address();
- static ExternalReference handle_scope_level_address();
+ static ExternalReference handle_scope_next_address(Isolate* isolate);
+ static ExternalReference handle_scope_limit_address(Isolate* isolate);
+ static ExternalReference handle_scope_level_address(Isolate* isolate);
static ExternalReference scheduled_exception_address(Isolate* isolate);
static ExternalReference address_of_pending_message_obj(Isolate* isolate);
@@ -653,21 +808,30 @@ class ExternalReference BASE_EMBEDDED {
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
static ExternalReference address_of_one_half();
+ static ExternalReference address_of_minus_one_half();
static ExternalReference address_of_minus_zero();
static ExternalReference address_of_zero();
static ExternalReference address_of_uint8_max_value();
static ExternalReference address_of_negative_infinity();
static ExternalReference address_of_canonical_non_hole_nan();
static ExternalReference address_of_the_hole_nan();
+ static ExternalReference address_of_uint32_bias();
static ExternalReference math_sin_double_function(Isolate* isolate);
static ExternalReference math_cos_double_function(Isolate* isolate);
static ExternalReference math_tan_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate);
+ static ExternalReference math_exp_constants(int constant_index);
+ static ExternalReference math_exp_log_table();
+
static ExternalReference page_flags(Page* page);
- Address address() const {return reinterpret_cast<Address>(address_);}
+ static ExternalReference ForDeoptEntry(Address entry);
+
+ static ExternalReference cpu_features();
+
+ Address address() const { return reinterpret_cast<Address>(address_); }
#ifdef ENABLE_DEBUGGER_SUPPORT
// Function Debug::Break()
@@ -704,6 +868,16 @@ class ExternalReference BASE_EMBEDDED {
reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
}
+ static ExternalReference stress_deopt_count(Isolate* isolate);
+
+ bool operator==(const ExternalReference& other) const {
+ return address_ == other.address_;
+ }
+
+ bool operator!=(const ExternalReference& other) const {
+ return !(*this == other);
+ }
+
private:
explicit ExternalReference(void* address)
: address_(address) {}
@@ -760,6 +934,7 @@ class PositionsRecorder BASE_EMBEDDED {
#ifdef ENABLE_GDB_JIT_INTERFACE
gdbjit_lineinfo_ = NULL;
#endif
+ jit_handler_data_ = NULL;
}
#ifdef ENABLE_GDB_JIT_INTERFACE
@@ -779,7 +954,15 @@ class PositionsRecorder BASE_EMBEDDED {
return lineinfo;
}
#endif
+ void AttachJITHandlerData(void* user_data) {
+ jit_handler_data_ = user_data;
+ }
+ void* DetachJITHandlerData() {
+ void* old_data = jit_handler_data_;
+ jit_handler_data_ = NULL;
+ return old_data;
+ }
// Set current position to pos.
void RecordPosition(int pos);
@@ -802,6 +985,9 @@ class PositionsRecorder BASE_EMBEDDED {
GDBJITLineInfo* gdbjit_lineinfo_;
#endif
+ // Currently jit_handler_data_ is used to store JITHandler-specific data
+ // over the lifetime of a PositionsRecorder
+ void* jit_handler_data_;
friend class PreservePositionScope;
DISALLOW_COPY_AND_ASSIGN(PositionsRecorder);
@@ -866,6 +1052,7 @@ inline int NumberOfBitsSet(uint32_t x) {
bool EvalComparison(Token::Value op, double op1, double op2);
// Computes pow(x, y) with the special cases in the spec for Math.pow.
+double power_helper(double x, double y);
double power_double_int(double x, int y);
double power_double_double(double x, double y);
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h
new file mode 100644
index 000000000..269b280d0
--- /dev/null
+++ b/deps/v8/src/assert-scope.h
@@ -0,0 +1,184 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ASSERT_SCOPE_H_
+#define V8_ASSERT_SCOPE_H_
+
+#include "allocation.h"
+#include "platform.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+enum PerThreadAssertType {
+ HEAP_ALLOCATION_ASSERT,
+ HANDLE_ALLOCATION_ASSERT,
+ HANDLE_DEREFERENCE_ASSERT,
+ DEFERRED_HANDLE_DEREFERENCE_ASSERT,
+ CODE_DEPENDENCY_CHANGE_ASSERT,
+ LAST_PER_THREAD_ASSERT_TYPE
+};
+
+
+#ifdef DEBUG
+class PerThreadAssertData {
+ public:
+ PerThreadAssertData() : nesting_level_(0) {
+ for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) {
+ assert_states_[i] = true;
+ }
+ }
+
+ void set(PerThreadAssertType type, bool allow) {
+ assert_states_[type] = allow;
+ }
+
+ bool get(PerThreadAssertType type) const {
+ return assert_states_[type];
+ }
+
+ void increment_level() { ++nesting_level_; }
+ bool decrement_level() { return --nesting_level_ == 0; }
+
+ private:
+ bool assert_states_[LAST_PER_THREAD_ASSERT_TYPE];
+ int nesting_level_;
+
+ DISALLOW_COPY_AND_ASSIGN(PerThreadAssertData);
+};
+#endif // DEBUG
+
+
+class PerThreadAssertScopeBase {
+#ifdef DEBUG
+
+ protected:
+ PerThreadAssertScopeBase() {
+ data_ = GetAssertData();
+ if (data_ == NULL) {
+ data_ = new PerThreadAssertData();
+ SetThreadLocalData(data_);
+ }
+ data_->increment_level();
+ }
+
+ ~PerThreadAssertScopeBase() {
+ if (!data_->decrement_level()) return;
+ for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) {
+ ASSERT(data_->get(static_cast<PerThreadAssertType>(i)));
+ }
+ delete data_;
+ SetThreadLocalData(NULL);
+ }
+
+ static PerThreadAssertData* GetAssertData() {
+ return reinterpret_cast<PerThreadAssertData*>(
+ Thread::GetThreadLocal(thread_local_key));
+ }
+
+ static Thread::LocalStorageKey thread_local_key;
+ PerThreadAssertData* data_;
+ friend class Isolate;
+
+ private:
+ static void SetThreadLocalData(PerThreadAssertData* data) {
+ Thread::SetThreadLocal(thread_local_key, data);
+ }
+#endif // DEBUG
+};
+
+
+
+template <PerThreadAssertType type, bool allow>
+class PerThreadAssertScope : public PerThreadAssertScopeBase {
+ public:
+#ifndef DEBUG
+ PerThreadAssertScope() { }
+ static void SetIsAllowed(bool is_allowed) { }
+#else
+ PerThreadAssertScope() {
+ old_state_ = data_->get(type);
+ data_->set(type, allow);
+ }
+
+ ~PerThreadAssertScope() { data_->set(type, old_state_); }
+
+ static bool IsAllowed() {
+ PerThreadAssertData* data = GetAssertData();
+ return data == NULL || data->get(type);
+ }
+
+ private:
+ bool old_state_;
+#endif
+};
+
+// Scope to document where we do not expect handles to be created.
+typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>
+ DisallowHandleAllocation;
+
+// Scope to introduce an exception to DisallowHandleAllocation.
+typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>
+ AllowHandleAllocation;
+
+// Scope to document where we do not expect any allocation and GC.
+typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, false>
+ DisallowHeapAllocation;
+
+// Scope to introduce an exception to DisallowHeapAllocation.
+typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, true>
+ AllowHeapAllocation;
+
+// Scope to document where we do not expect any handle dereferences.
+typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>
+ DisallowHandleDereference;
+
+// Scope to introduce an exception to DisallowHandleDereference.
+typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>
+ AllowHandleDereference;
+
+// Scope to document where we do not expect deferred handles to be dereferenced.
+typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
+ DisallowDeferredHandleDereference;
+
+// Scope to introduce an exception to DisallowDeferredHandleDereference.
+typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
+ AllowDeferredHandleDereference;
+
+// Scope to document where we do not expect deferred handles to be dereferenced.
+typedef PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>
+ DisallowCodeDependencyChange;
+
+// Scope to introduce an exception to DisallowDeferredHandleDereference.
+typedef PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>
+ AllowCodeDependencyChange;
+
+} } // namespace v8::internal
+
+#endif // V8_ASSERT_SCOPE_H_
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 52990b8fe..843f8c896 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -27,8 +27,10 @@
#include "ast.h"
-#include <math.h> // For isfinite.
+#include <cmath> // For isfinite.
#include "builtins.h"
+#include "code-stubs.h"
+#include "contexts.h"
#include "conversions.h"
#include "hashmap.h"
#include "parser.h"
@@ -55,28 +57,38 @@ AST_NODE_LIST(DECL_ACCEPT)
bool Expression::IsSmiLiteral() {
- return AsLiteral() != NULL && AsLiteral()->handle()->IsSmi();
+ return AsLiteral() != NULL && AsLiteral()->value()->IsSmi();
}
bool Expression::IsStringLiteral() {
- return AsLiteral() != NULL && AsLiteral()->handle()->IsString();
+ return AsLiteral() != NULL && AsLiteral()->value()->IsString();
}
bool Expression::IsNullLiteral() {
- return AsLiteral() != NULL && AsLiteral()->handle()->IsNull();
+ return AsLiteral() != NULL && AsLiteral()->value()->IsNull();
}
-VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
- : Expression(isolate),
+bool Expression::IsUndefinedLiteral(Isolate* isolate) {
+ VariableProxy* var_proxy = AsVariableProxy();
+ if (var_proxy == NULL) return false;
+ Variable* var = var_proxy->var();
+ // The global identifier "undefined" is immutable. Everything
+ // else could be reassigned.
+ return var != NULL && var->location() == Variable::UNALLOCATED &&
+ var_proxy->name()->Equals(isolate->heap()->undefined_string());
+}
+
+
+VariableProxy::VariableProxy(Isolate* isolate, Variable* var, int position)
+ : Expression(isolate, position),
name_(var->name()),
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
is_trivial_(false),
is_lvalue_(false),
- position_(RelocInfo::kNoPosition),
interface_(var->interface()) {
BindTo(var);
}
@@ -87,22 +99,22 @@ VariableProxy::VariableProxy(Isolate* isolate,
bool is_this,
Interface* interface,
int position)
- : Expression(isolate),
+ : Expression(isolate, position),
name_(name),
var_(NULL),
is_this_(is_this),
is_trivial_(false),
is_lvalue_(false),
- position_(position),
interface_(interface) {
// Names must be canonicalized for fast equality checks.
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
}
void VariableProxy::BindTo(Variable* var) {
ASSERT(var_ == NULL); // must be bound only once
ASSERT(var != NULL); // must bind
+ ASSERT(!FLAG_harmony_modules || interface_->IsUnified(var->interface()));
ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name()));
// Ideally CONST-ness should match. However, this is very hard to achieve
// because we don't know the exact semantics of conflicting (const and
@@ -119,14 +131,16 @@ Assignment::Assignment(Isolate* isolate,
Expression* target,
Expression* value,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
target_(target),
value_(value),
- pos_(pos),
binary_operation_(NULL),
assignment_id_(GetNextId(isolate)),
- is_monomorphic_(false) { }
+ is_monomorphic_(false),
+ is_uninitialized_(false),
+ is_pre_monomorphic_(false),
+ store_mode_(STANDARD_STORE) { }
Token::Value Assignment::binary_op() const {
@@ -173,15 +187,15 @@ LanguageMode FunctionLiteral::language_mode() const {
}
-ObjectLiteral::Property::Property(Literal* key,
- Expression* value,
- Isolate* isolate) {
+ObjectLiteralProperty::ObjectLiteralProperty(Literal* key,
+ Expression* value,
+ Isolate* isolate) {
emit_store_ = true;
key_ = key;
value_ = value;
- Object* k = *key->handle();
- if (k->IsSymbol() &&
- isolate->heap()->Proto_symbol()->Equals(String::cast(k))) {
+ Object* k = *key->value();
+ if (k->IsInternalizedString() &&
+ isolate->heap()->proto_string()->Equals(String::cast(k))) {
kind_ = PROTOTYPE;
} else if (value_->AsMaterializedLiteral() != NULL) {
kind_ = MATERIALIZED_LITERAL;
@@ -193,7 +207,8 @@ ObjectLiteral::Property::Property(Literal* key,
}
-ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
+ObjectLiteralProperty::ObjectLiteralProperty(bool is_getter,
+ FunctionLiteral* value) {
emit_store_ = true;
value_ = value;
kind_ = is_getter ? GETTER : SETTER;
@@ -217,33 +232,6 @@ bool ObjectLiteral::Property::emit_store() {
}
-bool IsEqualString(void* first, void* second) {
- ASSERT((*reinterpret_cast<String**>(first))->IsString());
- ASSERT((*reinterpret_cast<String**>(second))->IsString());
- Handle<String> h1(reinterpret_cast<String**>(first));
- Handle<String> h2(reinterpret_cast<String**>(second));
- return (*h1)->Equals(*h2);
-}
-
-
-bool IsEqualNumber(void* first, void* second) {
- ASSERT((*reinterpret_cast<Object**>(first))->IsNumber());
- ASSERT((*reinterpret_cast<Object**>(second))->IsNumber());
-
- Handle<Object> h1(reinterpret_cast<Object**>(first));
- Handle<Object> h2(reinterpret_cast<Object**>(second));
- if (h1->IsSmi()) {
- return h2->IsSmi() && *h1 == *h2;
- }
- if (h2->IsSmi()) return false;
- Handle<HeapNumber> n1 = Handle<HeapNumber>::cast(h1);
- Handle<HeapNumber> n2 = Handle<HeapNumber>::cast(h2);
- ASSERT(isfinite(n1->value()));
- ASSERT(isfinite(n2->value()));
- return n1->value() == n2->value();
-}
-
-
void ObjectLiteral::CalculateEmitStore(Zone* zone) {
ZoneAllocationPolicy allocator(zone);
@@ -252,11 +240,12 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
for (int i = properties()->length() - 1; i >= 0; i--) {
ObjectLiteral::Property* property = properties()->at(i);
Literal* literal = property->key();
- if (literal->handle()->IsNull()) continue;
+ if (literal->value()->IsNull()) continue;
uint32_t hash = literal->Hash();
// If the key of a computed property is in the table, do not emit
// a store for the property later.
- if (property->kind() == ObjectLiteral::Property::COMPUTED &&
+ if ((property->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL ||
+ property->kind() == ObjectLiteral::Property::COMPUTED) &&
table.Lookup(literal, hash, false, allocator) != NULL) {
property->set_emit_store(false);
} else {
@@ -277,14 +266,23 @@ void TargetCollector::AddTarget(Label* target, Zone* zone) {
}
-bool UnaryOperation::ResultOverwriteAllowed() {
- switch (op_) {
- case Token::BIT_NOT:
- case Token::SUB:
- return true;
- default:
- return false;
- }
+void UnaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
+ // TODO(olivf) If this Operation is used in a test context, then the
+ // expression has a ToBoolean stub and we want to collect the type
+ // information. However the GraphBuilder expects it to be on the instruction
+ // corresponding to the TestContext, therefore we have to store it here and
+ // not on the operand.
+ set_to_boolean_types(oracle->ToBooleanTypes(expression()->test_id()));
+}
+
+
+void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
+ // TODO(olivf) If this Operation is used in a test context, then the right
+ // hand side has a ToBoolean stub and we want to collect the type information.
+ // However the GraphBuilder expects it to be on the instruction corresponding
+ // to the TestContext, therefore we have to store it here and not on the
+ // right hand operand.
+ set_to_boolean_types(oracle->ToBooleanTypes(right()->test_id()));
}
@@ -327,7 +325,7 @@ static bool MatchLiteralCompareTypeof(Expression* left,
Handle<String>* check) {
if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
*expr = left->AsUnaryOperation()->expression();
- *check = Handle<String>::cast(right->AsLiteral()->handle());
+ *check = Handle<String>::cast(right->AsLiteral()->value());
return true;
}
return false;
@@ -349,22 +347,29 @@ static bool IsVoidOfLiteral(Expression* expr) {
}
-// Check for the pattern: void <literal> equals <expression>
+// Check for the pattern: void <literal> equals <expression> or
+// undefined equals <expression>
static bool MatchLiteralCompareUndefined(Expression* left,
Token::Value op,
Expression* right,
- Expression** expr) {
+ Expression** expr,
+ Isolate* isolate) {
if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
*expr = right;
return true;
}
+ if (left->IsUndefinedLiteral(isolate) && Token::IsEqualityOp(op)) {
+ *expr = right;
+ return true;
+ }
return false;
}
-bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
- return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
- MatchLiteralCompareUndefined(right_, op_, left_, expr);
+bool CompareOperation::IsLiteralCompareUndefined(
+ Expression** expr, Isolate* isolate) {
+ return MatchLiteralCompareUndefined(left_, op_, right_, expr, isolate) ||
+ MatchLiteralCompareUndefined(right_, op_, left_, expr, isolate);
}
@@ -402,34 +407,45 @@ bool FunctionDeclaration::IsInlineable() const {
// ----------------------------------------------------------------------------
// Recording of type feedback
+// TODO(rossberg): all RecordTypeFeedback functions should disappear
+// once we use the common type field in the AST consistently.
+
+
+void ForInStatement::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+ for_in_type_ = static_cast<ForInType>(oracle->ForInType(this));
+}
+
+
+void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
+ to_boolean_types_ = oracle->ToBooleanTypes(test_id());
+}
+
+
void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Zone* zone) {
// Record type feedback from the oracle in the AST.
is_uninitialized_ = oracle->LoadIsUninitialized(this);
if (is_uninitialized_) return;
+ is_pre_monomorphic_ = oracle->LoadIsPreMonomorphic(this);
is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
+ ASSERT(!is_pre_monomorphic_ || !is_monomorphic_);
receiver_types_.Clear();
if (key()->IsPropertyName()) {
- if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_ArrayLength)) {
- is_array_length_ = true;
- } else if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_StringLength)) {
- is_string_length_ = true;
- } else if (oracle->LoadIsBuiltin(this,
- Builtins::kLoadIC_FunctionPrototype)) {
+ FunctionPrototypeStub proto_stub(Code::LOAD_IC);
+ if (oracle->LoadIsStub(this, &proto_stub)) {
is_function_prototype_ = true;
} else {
Literal* lit_key = key()->AsLiteral();
- ASSERT(lit_key != NULL && lit_key->handle()->IsString());
- Handle<String> name = Handle<String>::cast(lit_key->handle());
+ ASSERT(lit_key != NULL && lit_key->value()->IsString());
+ Handle<String> name = Handle<String>::cast(lit_key->value());
oracle->LoadReceiverTypes(this, name, &receiver_types_);
}
} else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
is_string_access_ = true;
} else if (is_monomorphic_) {
- receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this),
- zone);
- } else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) {
+ receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this), zone);
+ } else if (oracle->LoadIsPolymorphic(this)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(PropertyFeedbackId(), &receiver_types_);
}
@@ -441,19 +457,26 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Property* prop = target()->AsProperty();
ASSERT(prop != NULL);
TypeFeedbackId id = AssignmentFeedbackId();
+ is_uninitialized_ = oracle->StoreIsUninitialized(id);
+ if (is_uninitialized_) return;
+
+ is_pre_monomorphic_ = oracle->StoreIsPreMonomorphic(id);
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
+ ASSERT(!is_pre_monomorphic_ || !is_monomorphic_);
receiver_types_.Clear();
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
- ASSERT(lit_key != NULL && lit_key->handle()->IsString());
- Handle<String> name = Handle<String>::cast(lit_key->handle());
+ ASSERT(lit_key != NULL && lit_key->value()->IsString());
+ Handle<String> name = Handle<String>::cast(lit_key->value());
oracle->StoreReceiverTypes(this, name, &receiver_types_);
} else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(id), zone);
- } else if (oracle->StoreIsMegamorphicWithTypeInfo(id)) {
+ store_mode_ = oracle->GetStoreMode(id);
+ } else if (oracle->StoreIsKeyedPolymorphic(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
+ store_mode_ = oracle->GetStoreMode(id);
}
}
@@ -467,26 +490,19 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
// Record receiver type for monomorphic keyed stores.
receiver_types_.Add(
oracle->StoreMonomorphicReceiverType(id), zone);
- } else if (oracle->StoreIsMegamorphicWithTypeInfo(id)) {
+ } else if (oracle->StoreIsKeyedPolymorphic(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
+ } else {
+ oracle->CollectPolymorphicStoreReceiverTypes(id, &receiver_types_);
}
+ store_mode_ = oracle->GetStoreMode(id);
+ type_ = oracle->IncrementType(this);
}
void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- TypeInfo info = oracle->SwitchType(this);
- if (info.IsSmi()) {
- compare_type_ = SMI_ONLY;
- } else if (info.IsSymbol()) {
- compare_type_ = SYMBOL_ONLY;
- } else if (info.IsNonSymbol()) {
- compare_type_ = STRING_ONLY;
- } else if (info.IsNonPrimitive()) {
- compare_type_ = OBJECT_ONLY;
- } else {
- ASSERT(compare_type_ == NONE);
- }
+ compare_type_ = oracle->ClauseType(CompareId());
}
@@ -503,14 +519,24 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
}
LookupResult lookup(type->GetIsolate());
while (true) {
+ // If a dictionary map is found in the prototype chain before the actual
+ // target, a new target can always appear. In that case, bail out.
+ // TODO(verwaest): Alternatively a runtime negative lookup on the normal
+ // receiver or prototype could be added.
+ if (type->is_dictionary_map()) return false;
type->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) {
switch (lookup.type()) {
- case CONSTANT_FUNCTION:
+ case CONSTANT: {
// We surely know the target for a constant function.
- target_ =
- Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
- return true;
+ Handle<Object> constant(lookup.GetConstantFromMap(*type),
+ type->GetIsolate());
+ if (constant->IsJSFunction()) {
+ target_ = Handle<JSFunction>::cast(constant);
+ return true;
+ }
+ // Fall through.
+ }
case NORMAL:
case FIELD:
case CALLBACKS:
@@ -528,7 +554,6 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
if (!type->prototype()->IsJSObject()) return false;
// Go up the prototype chain, recording where we are currently.
holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
- if (!holder_->HasFastProperties()) return false;
type = Handle<Map>(holder()->map());
}
}
@@ -537,16 +562,16 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
LookupResult* lookup) {
target_ = Handle<JSFunction>::null();
- cell_ = Handle<JSGlobalPropertyCell>::null();
+ cell_ = Handle<Cell>::null();
ASSERT(lookup->IsFound() &&
lookup->type() == NORMAL &&
lookup->holder() == *global);
- cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(lookup));
+ cell_ = Handle<Cell>(global->GetPropertyCell(lookup));
if (cell_->value()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
- if (!HEAP->InNewSpace(*candidate)) {
+ if (!lookup->isolate()->heap()->InNewSpace(*candidate)) {
target_ = candidate;
return true;
}
@@ -555,6 +580,32 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
}
+Handle<JSObject> Call::GetPrototypeForPrimitiveCheck(
+ CheckType check, Isolate* isolate) {
+ v8::internal::Context* native_context = isolate->context()->native_context();
+ JSFunction* function = NULL;
+ switch (check) {
+ case RECEIVER_MAP_CHECK:
+ UNREACHABLE();
+ break;
+ case STRING_CHECK:
+ function = native_context->string_function();
+ break;
+ case SYMBOL_CHECK:
+ function = native_context->symbol_function();
+ break;
+ case NUMBER_CHECK:
+ function = native_context->number_function();
+ break;
+ case BOOLEAN_CHECK:
+ function = native_context->boolean_function();
+ break;
+ }
+ ASSERT(function != NULL);
+ return Handle<JSObject>(JSObject::cast(function->instance_prototype()));
+}
+
+
void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
CallKind call_kind) {
is_monomorphic_ = oracle->CallIsMonomorphic(this);
@@ -565,11 +616,18 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
} else {
// Method call. Specialize for the receiver types seen at runtime.
Literal* key = property->key()->AsLiteral();
- ASSERT(key != NULL && key->handle()->IsString());
- Handle<String> name = Handle<String>::cast(key->handle());
+ ASSERT(key != NULL && key->value()->IsString());
+ Handle<String> name = Handle<String>::cast(key->value());
+ check_type_ = oracle->GetCallCheckType(this);
receiver_types_.Clear();
- oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
-#ifdef DEBUG
+ if (check_type_ == RECEIVER_MAP_CHECK) {
+ oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
+ is_monomorphic_ = is_monomorphic_ && receiver_types_.length() > 0;
+ } else {
+ holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
+ receiver_types_.Add(handle(holder_->map()), oracle->zone());
+ }
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length();
for (int i = 0; i < length; i++) {
@@ -578,18 +636,8 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
}
}
#endif
- check_type_ = oracle->GetCallCheckType(this);
if (is_monomorphic_) {
- Handle<Map> map;
- if (receiver_types_.length() > 0) {
- ASSERT(check_type_ == RECEIVER_MAP_CHECK);
- map = receiver_types_.at(0);
- } else {
- ASSERT(check_type_ != RECEIVER_MAP_CHECK);
- holder_ = Handle<JSObject>(
- oracle->GetPrototypeForPrimitiveCheck(check_type_));
- map = Handle<Map>(holder_->map());
- }
+ Handle<Map> map = receiver_types_.first();
is_monomorphic_ = ComputeTarget(map, name);
}
}
@@ -597,21 +645,16 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+ allocation_info_cell_ = oracle->GetCallNewAllocationInfoCell(this);
is_monomorphic_ = oracle->CallNewIsMonomorphic(this);
if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(this);
- }
-}
-
-
-void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- TypeInfo info = oracle->CompareType(this);
- if (info.IsSmi()) {
- compare_type_ = SMI_ONLY;
- } else if (info.IsNonPrimitive()) {
- compare_type_ = OBJECT_ONLY;
- } else {
- ASSERT(compare_type_ == NONE);
+ Object* value = allocation_info_cell_->value();
+ ASSERT(!value->IsTheHole());
+ if (value->IsAllocationSite()) {
+ AllocationSite* site = AllocationSite::cast(value);
+ elements_kind_ = site->GetElementsKind();
+ }
}
}
@@ -626,14 +669,6 @@ void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
-bool AstVisitor::CheckStackOverflow() {
- if (stack_overflow_) return true;
- StackLimitCheck check(isolate_);
- if (!check.HasOverflowed()) return false;
- return (stack_overflow_ = true);
-}
-
-
void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
for (int i = 0; i < declarations->length(); i++) {
Visit(declarations->at(i));
@@ -643,7 +678,9 @@ void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
void AstVisitor::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
- Visit(statements->at(i));
+ Statement* stmt = statements->at(i);
+ Visit(stmt);
+ if (stmt->IsJump()) break;
}
}
@@ -722,12 +759,12 @@ Interval RegExpQuantifier::CaptureRegisters() {
bool RegExpAssertion::IsAnchoredAtStart() {
- return type() == RegExpAssertion::START_OF_INPUT;
+ return assertion_type() == RegExpAssertion::START_OF_INPUT;
}
bool RegExpAssertion::IsAnchoredAtEnd() {
- return type() == RegExpAssertion::END_OF_INPUT;
+ return assertion_type() == RegExpAssertion::END_OF_INPUT;
}
@@ -793,12 +830,13 @@ bool RegExpCapture::IsAnchoredAtEnd() {
// in as many cases as possible, to make it more difficult for incorrect
// parses to look as correct ones which is likely if the input and
// output formats are alike.
-class RegExpUnparser: public RegExpVisitor {
+class RegExpUnparser V8_FINAL : public RegExpVisitor {
public:
explicit RegExpUnparser(Zone* zone);
void VisitCharacterRange(CharacterRange that);
SmartArrayPointer<const char> ToString() { return stream_.ToCString(); }
-#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, void* data);
+#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, \
+ void* data) V8_OVERRIDE;
FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
#undef MAKE_CASE
private:
@@ -859,7 +897,7 @@ void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) {
- switch (that->type()) {
+ switch (that->assertion_type()) {
case RegExpAssertion::START_OF_INPUT:
stream()->Add("@^i");
break;
@@ -896,12 +934,12 @@ void* RegExpUnparser::VisitAtom(RegExpAtom* that, void* data) {
void* RegExpUnparser::VisitText(RegExpText* that, void* data) {
if (that->elements()->length() == 1) {
- that->elements()->at(0).data.u_atom->Accept(this, data);
+ that->elements()->at(0).tree()->Accept(this, data);
} else {
stream()->Add("(!");
for (int i = 0; i < that->elements()->length(); i++) {
stream()->Add(" ");
- that->elements()->at(i).data.u_atom->Accept(this, data);
+ that->elements()->at(i).tree()->Accept(this, data);
}
stream()->Add(")");
}
@@ -1001,10 +1039,10 @@ CaseClause::CaseClause(Isolate* isolate,
Expression* label,
ZoneList<Statement*>* statements,
int pos)
- : label_(label),
+ : AstNode(pos),
+ label_(label),
statements_(statements),
- position_(pos),
- compare_type_(NONE),
+ compare_type_(Type::None(), isolate),
compare_id_(AstNode::GetNextId(isolate)),
entry_id_(AstNode::GetNextId(isolate)) {
}
@@ -1017,15 +1055,10 @@ CaseClause::CaseClause(Isolate* isolate,
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
- add_flag(kDontOptimize); \
+ set_dont_optimize_reason(k##NodeType); \
add_flag(kDontInline); \
add_flag(kDontSelfOptimize); \
}
-#define DONT_INLINE_NODE(NodeType) \
- void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_flag(kDontInline); \
- }
#define DONT_SELFOPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
@@ -1034,7 +1067,7 @@ CaseClause::CaseClause(Isolate* isolate,
#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
- add_flag(kDontOptimize); \
+ set_dont_optimize_reason(k##NodeType); \
add_flag(kDontInline); \
add_flag(kDontSelfOptimize); \
add_flag(kDontCache); \
@@ -1050,10 +1083,13 @@ REGULAR_NODE(ContinueStatement)
REGULAR_NODE(BreakStatement)
REGULAR_NODE(ReturnStatement)
REGULAR_NODE(SwitchStatement)
+REGULAR_NODE(CaseClause)
REGULAR_NODE(Conditional)
REGULAR_NODE(Literal)
+REGULAR_NODE(ArrayLiteral)
REGULAR_NODE(ObjectLiteral)
REGULAR_NODE(RegExpLiteral)
+REGULAR_NODE(FunctionLiteral)
REGULAR_NODE(Assignment)
REGULAR_NODE(Throw)
REGULAR_NODE(Property)
@@ -1070,29 +1106,26 @@ REGULAR_NODE(CallNew)
// LOOKUP variables only result from constructs that cannot be inlined anyway.
REGULAR_NODE(VariableProxy)
-// We currently do not optimize any modules. Note in particular, that module
-// instance objects associated with ModuleLiterals are allocated during
-// scope resolution, and references to them are embedded into the code.
-// That code may hence neither be cached nor re-compiled.
+// We currently do not optimize any modules.
DONT_OPTIMIZE_NODE(ModuleDeclaration)
DONT_OPTIMIZE_NODE(ImportDeclaration)
DONT_OPTIMIZE_NODE(ExportDeclaration)
DONT_OPTIMIZE_NODE(ModuleVariable)
DONT_OPTIMIZE_NODE(ModulePath)
DONT_OPTIMIZE_NODE(ModuleUrl)
+DONT_OPTIMIZE_NODE(ModuleStatement)
+DONT_OPTIMIZE_NODE(Yield)
DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(TryCatchStatement)
DONT_OPTIMIZE_NODE(TryFinallyStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
-DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral)
-
-DONT_INLINE_NODE(ArrayLiteral) // TODO(1322): Allow materialized literals.
-DONT_INLINE_NODE(FunctionLiteral)
+DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
DONT_SELFOPTIMIZE_NODE(ForInStatement)
+DONT_SELFOPTIMIZE_NODE(ForOfStatement)
DONT_CACHE_NODE(ModuleLiteral)
@@ -1103,8 +1136,9 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
// optimize them.
add_flag(kDontInline);
} else if (node->function()->intrinsic_type == Runtime::INLINE &&
- (node->name()->IsEqualTo(CStrVector("_ArgumentsLength")) ||
- node->name()->IsEqualTo(CStrVector("_Arguments")))) {
+ (node->name()->IsOneByteEqualTo(
+ STATIC_ASCII_VECTOR("_ArgumentsLength")) ||
+ node->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_Arguments")))) {
// Don't inline the %_ArgumentsLength or %_Arguments because their
// implementation will not work. There is no stack frame to get them
// from.
@@ -1114,25 +1148,24 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
#undef REGULAR_NODE
#undef DONT_OPTIMIZE_NODE
-#undef DONT_INLINE_NODE
#undef DONT_SELFOPTIMIZE_NODE
#undef DONT_CACHE_NODE
Handle<String> Literal::ToString() {
- if (handle_->IsString()) return Handle<String>::cast(handle_);
- ASSERT(handle_->IsNumber());
+ if (value_->IsString()) return Handle<String>::cast(value_);
+ ASSERT(value_->IsNumber());
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
const char* str;
- if (handle_->IsSmi()) {
+ if (value_->IsSmi()) {
// Optimization only, the heap number case would subsume this.
- OS::SNPrintF(buffer, "%d", Smi::cast(*handle_)->value());
+ OS::SNPrintF(buffer, "%d", Smi::cast(*value_)->value());
str = arr;
} else {
- str = DoubleToCString(handle_->Number(), buffer);
+ str = DoubleToCString(value_->Number(), buffer);
}
- return FACTORY->NewStringFromAscii(CStrVector(str));
+ return isolate_->factory()->NewStringFromAscii(CStrVector(str));
}
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 802ac6596..b4f7348ee 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -39,6 +39,8 @@
#include "small-pointer-list.h"
#include "smart-pointers.h"
#include "token.h"
+#include "type-info.h" // TODO(rossberg): this should eventually be removed
+#include "types.h"
#include "utils.h"
#include "variables.h"
#include "interface.h"
@@ -75,6 +77,7 @@ namespace internal {
#define STATEMENT_NODE_LIST(V) \
V(Block) \
+ V(ModuleStatement) \
V(ExpressionStatement) \
V(EmptyStatement) \
V(IfStatement) \
@@ -87,13 +90,14 @@ namespace internal {
V(WhileStatement) \
V(ForStatement) \
V(ForInStatement) \
+ V(ForOfStatement) \
V(TryCatchStatement) \
V(TryFinallyStatement) \
V(DebuggerStatement)
#define EXPRESSION_NODE_LIST(V) \
V(FunctionLiteral) \
- V(SharedFunctionInfoLiteral) \
+ V(NativeFunctionLiteral) \
V(Conditional) \
V(VariableProxy) \
V(Literal) \
@@ -101,6 +105,7 @@ namespace internal {
V(ObjectLiteral) \
V(ArrayLiteral) \
V(Assignment) \
+ V(Yield) \
V(Throw) \
V(Property) \
V(Call) \
@@ -112,11 +117,15 @@ namespace internal {
V(CompareOperation) \
V(ThisFunction)
+#define AUXILIARY_NODE_LIST(V) \
+ V(CaseClause)
+
#define AST_NODE_LIST(V) \
DECLARATION_NODE_LIST(V) \
MODULE_NODE_LIST(V) \
STATEMENT_NODE_LIST(V) \
- EXPRESSION_NODE_LIST(V)
+ EXPRESSION_NODE_LIST(V) \
+ AUXILIARY_NODE_LIST(V)
// Forward declarations
class AstConstructionVisitor;
@@ -156,22 +165,23 @@ typedef ZoneList<Handle<String> > ZoneStringList;
typedef ZoneList<Handle<Object> > ZoneObjectList;
-#define DECLARE_NODE_TYPE(type) \
- virtual void Accept(AstVisitor* v); \
- virtual AstNode::Type node_type() const { return AstNode::k##type; } \
+#define DECLARE_NODE_TYPE(type) \
+ virtual void Accept(AstVisitor* v) V8_OVERRIDE; \
+ virtual AstNode::NodeType node_type() const V8_FINAL V8_OVERRIDE { \
+ return AstNode::k##type; \
+ } \
template<class> friend class AstNodeFactory;
enum AstPropertiesFlag {
kDontInline,
- kDontOptimize,
kDontSelfOptimize,
kDontSoftInline,
kDontCache
};
-class AstProperties BASE_EMBEDDED {
+class AstProperties V8_FINAL BASE_EMBEDDED {
public:
class Flags : public EnumSet<AstPropertiesFlag, int> {};
@@ -190,7 +200,7 @@ class AstProperties BASE_EMBEDDED {
class AstNode: public ZoneObject {
public:
#define DECLARE_TYPE_ENUM(type) k##type,
- enum Type {
+ enum NodeType {
AST_NODE_LIST(DECLARE_TYPE_ENUM)
kInvalid = -1
};
@@ -200,12 +210,12 @@ class AstNode: public ZoneObject {
return zone->New(static_cast<int>(size));
}
- AstNode() { }
-
- virtual ~AstNode() { }
+ explicit AstNode(int position): position_(position) {}
+ virtual ~AstNode() {}
virtual void Accept(AstVisitor* v) = 0;
- virtual Type node_type() const = 0;
+ virtual NodeType node_type() const = 0;
+ int position() const { return position_; }
// Type testing & conversion functions overridden by concrete subclasses.
#define DECLARE_NODE_FUNCTIONS(type) \
@@ -242,24 +252,21 @@ class AstNode: public ZoneObject {
void* operator new(size_t size);
friend class CaseClause; // Generates AST IDs.
+
+ int position_;
};
-class Statement: public AstNode {
+class Statement : public AstNode {
public:
- Statement() : statement_pos_(RelocInfo::kNoPosition) {}
+ explicit Statement(int position) : AstNode(position) {}
bool IsEmpty() { return AsEmptyStatement() != NULL; }
-
- void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
- int statement_pos() const { return statement_pos_; }
-
- private:
- int statement_pos_;
+ virtual bool IsJump() const { return false; }
};
-class SmallMapList {
+class SmallMapList V8_FINAL {
public:
SmallMapList() {}
SmallMapList(int capacity, Zone* zone) : list_(capacity, zone) {}
@@ -271,6 +278,24 @@ class SmallMapList {
bool is_empty() const { return list_.is_empty(); }
int length() const { return list_.length(); }
+ void AddMapIfMissing(Handle<Map> map, Zone* zone) {
+ Map* updated = map->CurrentMapForDeprecated();
+ if (updated == NULL) return;
+ map = Handle<Map>(updated);
+ for (int i = 0; i < length(); ++i) {
+ if (at(i).is_identical_to(map)) return;
+ }
+ Add(map, zone);
+ }
+
+ void FilterForPossibleTransitions(Map* root_map) {
+ for (int i = list_.length() - 1; i >= 0; i--) {
+ if (at(i)->FindRootMap() != root_map) {
+ list_.RemoveElement(list_.at(i));
+ }
+ }
+ }
+
void Add(Handle<Map> handle, Zone* zone) {
list_.Add(handle.location(), zone);
}
@@ -290,7 +315,7 @@ class SmallMapList {
};
-class Expression: public AstNode {
+class Expression : public AstNode {
public:
enum Context {
// Not assigned a context yet, or else will not be visited during
@@ -304,11 +329,6 @@ class Expression: public AstNode {
kTest
};
- virtual int position() const {
- UNREACHABLE();
- return 0;
- }
-
virtual bool IsValidLeftHandSide() { return false; }
// Helpers for ToBoolean conversion.
@@ -333,6 +353,13 @@ class Expression: public AstNode {
// True iff the expression is the null literal.
bool IsNullLiteral();
+ // True if we can prove that the expression is the undefined literal.
+ bool IsUndefinedLiteral(Isolate* isolate);
+
+ // Expression type bounds
+ Bounds bounds() { return bounds_; }
+ void set_bounds(Bounds bounds) { bounds_ = bounds; }
+
// Type feedback information for assignments and properties.
virtual bool IsMonomorphic() {
UNREACHABLE();
@@ -342,30 +369,38 @@ class Expression: public AstNode {
UNREACHABLE();
return NULL;
}
- Handle<Map> GetMonomorphicReceiverType() {
- ASSERT(IsMonomorphic());
- SmallMapList* types = GetReceiverTypes();
- ASSERT(types != NULL && types->length() == 1);
- return types->at(0);
+ virtual KeyedAccessStoreMode GetStoreMode() {
+ UNREACHABLE();
+ return STANDARD_STORE;
}
+ // TODO(rossberg): this should move to its own AST node eventually.
+ virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
+ byte to_boolean_types() const { return to_boolean_types_; }
+
BailoutId id() const { return id_; }
TypeFeedbackId test_id() const { return test_id_; }
protected:
- explicit Expression(Isolate* isolate)
- : id_(GetNextId(isolate)),
+ Expression(Isolate* isolate, int pos)
+ : AstNode(pos),
+ bounds_(Bounds::Unbounded(isolate)),
+ id_(GetNextId(isolate)),
test_id_(GetNextId(isolate)) {}
+ void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
private:
+ Bounds bounds_;
+ byte to_boolean_types_;
+
const BailoutId id_;
const TypeFeedbackId test_id_;
};
-class BreakableStatement: public Statement {
+class BreakableStatement : public Statement {
public:
- enum Type {
+ enum BreakableType {
TARGET_FOR_ANONYMOUS,
TARGET_FOR_NAMED_ONLY
};
@@ -375,21 +410,28 @@ class BreakableStatement: public Statement {
ZoneStringList* labels() const { return labels_; }
// Type testing & conversion.
- virtual BreakableStatement* AsBreakableStatement() { return this; }
+ virtual BreakableStatement* AsBreakableStatement() V8_FINAL V8_OVERRIDE {
+ return this;
+ }
// Code generation
Label* break_target() { return &break_target_; }
// Testers.
- bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
+ bool is_target_for_anonymous() const {
+ return breakable_type_ == TARGET_FOR_ANONYMOUS;
+ }
BailoutId EntryId() const { return entry_id_; }
BailoutId ExitId() const { return exit_id_; }
protected:
- BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type)
- : labels_(labels),
- type_(type),
+ BreakableStatement(
+ Isolate* isolate, ZoneStringList* labels,
+ BreakableType breakable_type, int position)
+ : Statement(position),
+ labels_(labels),
+ breakable_type_(breakable_type),
entry_id_(GetNextId(isolate)),
exit_id_(GetNextId(isolate)) {
ASSERT(labels == NULL || labels->length() > 0);
@@ -398,14 +440,14 @@ class BreakableStatement: public Statement {
private:
ZoneStringList* labels_;
- Type type_;
+ BreakableType breakable_type_;
Label break_target_;
const BailoutId entry_id_;
const BailoutId exit_id_;
};
-class Block: public BreakableStatement {
+class Block V8_FINAL : public BreakableStatement {
public:
DECLARE_NODE_TYPE(Block)
@@ -416,6 +458,11 @@ class Block: public BreakableStatement {
ZoneList<Statement*>* statements() { return &statements_; }
bool is_initializer_block() const { return is_initializer_block_; }
+ virtual bool IsJump() const V8_OVERRIDE {
+ return !statements_.is_empty() && statements_.last()->IsJump()
+ && labels() == NULL; // Good enough as an approximation...
+ }
+
Scope* scope() const { return scope_; }
void set_scope(Scope* scope) { scope_ = scope; }
@@ -424,8 +471,9 @@ class Block: public BreakableStatement {
ZoneStringList* labels,
int capacity,
bool is_initializer_block,
+ int pos,
Zone* zone)
- : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
+ : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY, pos),
statements_(capacity, zone),
is_initializer_block_(is_initializer_block),
scope_(NULL) {
@@ -438,7 +486,7 @@ class Block: public BreakableStatement {
};
-class Declaration: public AstNode {
+class Declaration : public AstNode {
public:
VariableProxy* proxy() const { return proxy_; }
VariableMode mode() const { return mode_; }
@@ -449,8 +497,10 @@ class Declaration: public AstNode {
protected:
Declaration(VariableProxy* proxy,
VariableMode mode,
- Scope* scope)
- : proxy_(proxy),
+ Scope* scope,
+ int pos)
+ : AstNode(pos),
+ proxy_(proxy),
mode_(mode),
scope_(scope) {
ASSERT(IsDeclaredVariableMode(mode));
@@ -465,39 +515,41 @@ class Declaration: public AstNode {
};
-class VariableDeclaration: public Declaration {
+class VariableDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(VariableDeclaration)
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return mode() == VAR ? kCreatedInitialized : kNeedsInitialization;
}
protected:
VariableDeclaration(VariableProxy* proxy,
VariableMode mode,
- Scope* scope)
- : Declaration(proxy, mode, scope) {
+ Scope* scope,
+ int pos)
+ : Declaration(proxy, mode, scope, pos) {
}
};
-class FunctionDeclaration: public Declaration {
+class FunctionDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(FunctionDeclaration)
FunctionLiteral* fun() const { return fun_; }
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return kCreatedInitialized;
}
- virtual bool IsInlineable() const;
+ virtual bool IsInlineable() const V8_OVERRIDE;
protected:
FunctionDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* fun,
- Scope* scope)
- : Declaration(proxy, mode, scope),
+ Scope* scope,
+ int pos)
+ : Declaration(proxy, mode, scope, pos),
fun_(fun) {
// At the moment there are no "const functions" in JavaScript...
ASSERT(mode == VAR || mode == LET);
@@ -509,20 +561,21 @@ class FunctionDeclaration: public Declaration {
};
-class ModuleDeclaration: public Declaration {
+class ModuleDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(ModuleDeclaration)
Module* module() const { return module_; }
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return kCreatedInitialized;
}
protected:
ModuleDeclaration(VariableProxy* proxy,
Module* module,
- Scope* scope)
- : Declaration(proxy, LET, scope),
+ Scope* scope,
+ int pos)
+ : Declaration(proxy, MODULE, scope, pos),
module_(module) {
}
@@ -531,20 +584,21 @@ class ModuleDeclaration: public Declaration {
};
-class ImportDeclaration: public Declaration {
+class ImportDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(ImportDeclaration)
Module* module() const { return module_; }
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return kCreatedInitialized;
}
protected:
ImportDeclaration(VariableProxy* proxy,
Module* module,
- Scope* scope)
- : Declaration(proxy, LET, scope),
+ Scope* scope,
+ int pos)
+ : Declaration(proxy, LET, scope, pos),
module_(module) {
}
@@ -553,31 +607,33 @@ class ImportDeclaration: public Declaration {
};
-class ExportDeclaration: public Declaration {
+class ExportDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(ExportDeclaration)
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return kCreatedInitialized;
}
protected:
- ExportDeclaration(VariableProxy* proxy, Scope* scope)
- : Declaration(proxy, LET, scope) {}
+ ExportDeclaration(VariableProxy* proxy, Scope* scope, int pos)
+ : Declaration(proxy, LET, scope, pos) {}
};
-class Module: public AstNode {
+class Module : public AstNode {
public:
Interface* interface() const { return interface_; }
Block* body() const { return body_; }
protected:
- explicit Module(Zone* zone)
- : interface_(Interface::NewModule(zone)),
+ Module(Zone* zone, int pos)
+ : AstNode(pos),
+ interface_(Interface::NewModule(zone)),
body_(NULL) {}
- explicit Module(Interface* interface, Block* body = NULL)
- : interface_(interface),
+ Module(Interface* interface, int pos, Block* body = NULL)
+ : AstNode(pos),
+ interface_(interface),
body_(body) {}
private:
@@ -586,30 +642,31 @@ class Module: public AstNode {
};
-class ModuleLiteral: public Module {
+class ModuleLiteral V8_FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModuleLiteral)
protected:
- ModuleLiteral(Block* body, Interface* interface) : Module(interface, body) {}
+ ModuleLiteral(Block* body, Interface* interface, int pos)
+ : Module(interface, pos, body) {}
};
-class ModuleVariable: public Module {
+class ModuleVariable V8_FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModuleVariable)
VariableProxy* proxy() const { return proxy_; }
protected:
- inline explicit ModuleVariable(VariableProxy* proxy);
+ inline ModuleVariable(VariableProxy* proxy, int pos);
private:
VariableProxy* proxy_;
};
-class ModulePath: public Module {
+class ModulePath V8_FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModulePath)
@@ -617,8 +674,8 @@ class ModulePath: public Module {
Handle<String> name() const { return name_; }
protected:
- ModulePath(Module* module, Handle<String> name, Zone* zone)
- : Module(zone),
+ ModulePath(Module* module, Handle<String> name, Zone* zone, int pos)
+ : Module(zone, pos),
module_(module),
name_(name) {
}
@@ -629,15 +686,15 @@ class ModulePath: public Module {
};
-class ModuleUrl: public Module {
+class ModuleUrl V8_FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModuleUrl)
Handle<String> url() const { return url_; }
protected:
- ModuleUrl(Handle<String> url, Zone* zone)
- : Module(zone), url_(url) {
+ ModuleUrl(Handle<String> url, Zone* zone, int pos)
+ : Module(zone, pos), url_(url) {
}
private:
@@ -645,10 +702,32 @@ class ModuleUrl: public Module {
};
-class IterationStatement: public BreakableStatement {
+class ModuleStatement V8_FINAL : public Statement {
+ public:
+ DECLARE_NODE_TYPE(ModuleStatement)
+
+ VariableProxy* proxy() const { return proxy_; }
+ Block* body() const { return body_; }
+
+ protected:
+ ModuleStatement(VariableProxy* proxy, Block* body, int pos)
+ : Statement(pos),
+ proxy_(proxy),
+ body_(body) {
+ }
+
+ private:
+ VariableProxy* proxy_;
+ Block* body_;
+};
+
+
+class IterationStatement : public BreakableStatement {
public:
// Type testing & conversion.
- virtual IterationStatement* AsIterationStatement() { return this; }
+ virtual IterationStatement* AsIterationStatement() V8_FINAL V8_OVERRIDE {
+ return this;
+ }
Statement* body() const { return body_; }
@@ -660,8 +739,8 @@ class IterationStatement: public BreakableStatement {
Label* continue_target() { return &continue_target_; }
protected:
- IterationStatement(Isolate* isolate, ZoneStringList* labels)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+ IterationStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS, pos),
body_(NULL),
osr_entry_id_(GetNextId(isolate)) {
}
@@ -673,11 +752,12 @@ class IterationStatement: public BreakableStatement {
private:
Statement* body_;
Label continue_target_;
+
const BailoutId osr_entry_id_;
};
-class DoWhileStatement: public IterationStatement {
+class DoWhileStatement V8_FINAL : public IterationStatement {
public:
DECLARE_NODE_TYPE(DoWhileStatement)
@@ -688,33 +768,27 @@ class DoWhileStatement: public IterationStatement {
Expression* cond() const { return cond_; }
- // Position where condition expression starts. We need it to make
- // the loop's condition a breakable location.
- int condition_position() { return condition_position_; }
- void set_condition_position(int pos) { condition_position_ = pos; }
-
- virtual BailoutId ContinueId() const { return continue_id_; }
- virtual BailoutId StackCheckId() const { return back_edge_id_; }
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return continue_id_; }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return back_edge_id_; }
BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
- DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
+ DoWhileStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : IterationStatement(isolate, labels, pos),
cond_(NULL),
- condition_position_(-1),
continue_id_(GetNextId(isolate)),
back_edge_id_(GetNextId(isolate)) {
}
private:
Expression* cond_;
- int condition_position_;
+
const BailoutId continue_id_;
const BailoutId back_edge_id_;
};
-class WhileStatement: public IterationStatement {
+class WhileStatement V8_FINAL : public IterationStatement {
public:
DECLARE_NODE_TYPE(WhileStatement)
@@ -731,13 +805,13 @@ class WhileStatement: public IterationStatement {
may_have_function_literal_ = value;
}
- virtual BailoutId ContinueId() const { return EntryId(); }
- virtual BailoutId StackCheckId() const { return body_id_; }
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
BailoutId BodyId() const { return body_id_; }
protected:
- WhileStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
+ WhileStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : IterationStatement(isolate, labels, pos),
cond_(NULL),
may_have_function_literal_(true),
body_id_(GetNextId(isolate)) {
@@ -745,13 +819,15 @@ class WhileStatement: public IterationStatement {
private:
Expression* cond_;
+
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
+
const BailoutId body_id_;
};
-class ForStatement: public IterationStatement {
+class ForStatement V8_FINAL : public IterationStatement {
public:
DECLARE_NODE_TYPE(ForStatement)
@@ -776,8 +852,8 @@ class ForStatement: public IterationStatement {
may_have_function_literal_ = value;
}
- virtual BailoutId ContinueId() const { return continue_id_; }
- virtual BailoutId StackCheckId() const { return body_id_; }
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return continue_id_; }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
BailoutId BodyId() const { return body_id_; }
bool is_fast_smi_loop() { return loop_variable_ != NULL; }
@@ -785,8 +861,8 @@ class ForStatement: public IterationStatement {
void set_loop_variable(Variable* var) { loop_variable_ = var; }
protected:
- ForStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
+ ForStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : IterationStatement(isolate, labels, pos),
init_(NULL),
cond_(NULL),
next_(NULL),
@@ -800,136 +876,239 @@ class ForStatement: public IterationStatement {
Statement* init_;
Expression* cond_;
Statement* next_;
+
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
Variable* loop_variable_;
+
const BailoutId continue_id_;
const BailoutId body_id_;
};
-class ForInStatement: public IterationStatement {
+class ForEachStatement : public IterationStatement {
public:
- DECLARE_NODE_TYPE(ForInStatement)
+ enum VisitMode {
+ ENUMERATE, // for (each in subject) body;
+ ITERATE // for (each of subject) body;
+ };
- void Initialize(Expression* each, Expression* enumerable, Statement* body) {
+ void Initialize(Expression* each, Expression* subject, Statement* body) {
IterationStatement::Initialize(body);
each_ = each;
- enumerable_ = enumerable;
+ subject_ = subject;
}
Expression* each() const { return each_; }
- Expression* enumerable() const { return enumerable_; }
+ Expression* subject() const { return subject_; }
+
+ protected:
+ ForEachStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : IterationStatement(isolate, labels, pos),
+ each_(NULL),
+ subject_(NULL) {
+ }
+
+ private:
+ Expression* each_;
+ Expression* subject_;
+};
- virtual BailoutId ContinueId() const { return EntryId(); }
- virtual BailoutId StackCheckId() const { return body_id_; }
- BailoutId BodyId() const { return body_id_; }
- BailoutId PrepareId() const { return prepare_id_; }
+
+class ForInStatement V8_FINAL : public ForEachStatement {
+ public:
+ DECLARE_NODE_TYPE(ForInStatement)
+
+ Expression* enumerable() const {
+ return subject();
+ }
TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
+ ForInType for_in_type() const { return for_in_type_; }
+
+ BailoutId BodyId() const { return body_id_; }
+ BailoutId PrepareId() const { return prepare_id_; }
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
protected:
- ForInStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- each_(NULL),
- enumerable_(NULL),
+ ForInStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : ForEachStatement(isolate, labels, pos),
+ for_in_type_(SLOW_FOR_IN),
body_id_(GetNextId(isolate)),
prepare_id_(GetNextId(isolate)) {
}
- private:
- Expression* each_;
- Expression* enumerable_;
+ ForInType for_in_type_;
const BailoutId body_id_;
const BailoutId prepare_id_;
};
-class ExpressionStatement: public Statement {
+class ForOfStatement V8_FINAL : public ForEachStatement {
+ public:
+ DECLARE_NODE_TYPE(ForOfStatement)
+
+ void Initialize(Expression* each,
+ Expression* subject,
+ Statement* body,
+ Expression* assign_iterator,
+ Expression* next_result,
+ Expression* result_done,
+ Expression* assign_each) {
+ ForEachStatement::Initialize(each, subject, body);
+ assign_iterator_ = assign_iterator;
+ next_result_ = next_result;
+ result_done_ = result_done;
+ assign_each_ = assign_each;
+ }
+
+ Expression* iterable() const {
+ return subject();
+ }
+
+ // var iterator = iterable;
+ Expression* assign_iterator() const {
+ return assign_iterator_;
+ }
+
+ // var result = iterator.next();
+ Expression* next_result() const {
+ return next_result_;
+ }
+
+ // result.done
+ Expression* result_done() const {
+ return result_done_;
+ }
+
+ // each = result.value
+ Expression* assign_each() const {
+ return assign_each_;
+ }
+
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return BackEdgeId(); }
+
+ BailoutId BackEdgeId() const { return back_edge_id_; }
+
+ protected:
+ ForOfStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : ForEachStatement(isolate, labels, pos),
+ assign_iterator_(NULL),
+ next_result_(NULL),
+ result_done_(NULL),
+ assign_each_(NULL),
+ back_edge_id_(GetNextId(isolate)) {
+ }
+
+ Expression* assign_iterator_;
+ Expression* next_result_;
+ Expression* result_done_;
+ Expression* assign_each_;
+ const BailoutId back_edge_id_;
+};
+
+
+class ExpressionStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(ExpressionStatement)
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() const { return expression_; }
+ virtual bool IsJump() const V8_OVERRIDE { return expression_->IsThrow(); }
protected:
- explicit ExpressionStatement(Expression* expression)
- : expression_(expression) { }
+ ExpressionStatement(Expression* expression, int pos)
+ : Statement(pos), expression_(expression) { }
private:
Expression* expression_;
};
-class ContinueStatement: public Statement {
+class JumpStatement : public Statement {
+ public:
+ virtual bool IsJump() const V8_FINAL V8_OVERRIDE { return true; }
+
+ protected:
+ explicit JumpStatement(int pos) : Statement(pos) {}
+};
+
+
+class ContinueStatement V8_FINAL : public JumpStatement {
public:
DECLARE_NODE_TYPE(ContinueStatement)
IterationStatement* target() const { return target_; }
protected:
- explicit ContinueStatement(IterationStatement* target)
- : target_(target) { }
+ explicit ContinueStatement(IterationStatement* target, int pos)
+ : JumpStatement(pos), target_(target) { }
private:
IterationStatement* target_;
};
-class BreakStatement: public Statement {
+class BreakStatement V8_FINAL : public JumpStatement {
public:
DECLARE_NODE_TYPE(BreakStatement)
BreakableStatement* target() const { return target_; }
protected:
- explicit BreakStatement(BreakableStatement* target)
- : target_(target) { }
+ explicit BreakStatement(BreakableStatement* target, int pos)
+ : JumpStatement(pos), target_(target) { }
private:
BreakableStatement* target_;
};
-class ReturnStatement: public Statement {
+class ReturnStatement V8_FINAL : public JumpStatement {
public:
DECLARE_NODE_TYPE(ReturnStatement)
Expression* expression() const { return expression_; }
protected:
- explicit ReturnStatement(Expression* expression)
- : expression_(expression) { }
+ explicit ReturnStatement(Expression* expression, int pos)
+ : JumpStatement(pos), expression_(expression) { }
private:
Expression* expression_;
};
-class WithStatement: public Statement {
+class WithStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(WithStatement)
+ Scope* scope() { return scope_; }
Expression* expression() const { return expression_; }
Statement* statement() const { return statement_; }
protected:
- WithStatement(Expression* expression, Statement* statement)
- : expression_(expression),
+ WithStatement(
+ Scope* scope, Expression* expression, Statement* statement, int pos)
+ : Statement(pos),
+ scope_(scope),
+ expression_(expression),
statement_(statement) { }
private:
+ Scope* scope_;
Expression* expression_;
Statement* statement_;
};
-class CaseClause: public ZoneObject {
+class CaseClause V8_FINAL : public AstNode {
public:
- CaseClause(Isolate* isolate,
- Expression* label,
- ZoneList<Statement*>* statements,
- int pos);
+ DECLARE_NODE_TYPE(CaseClause)
bool is_default() const { return label_ == NULL; }
Expression* label() const {
@@ -939,58 +1118,56 @@ class CaseClause: public ZoneObject {
Label* body_target() { return &body_target_; }
ZoneList<Statement*>* statements() const { return statements_; }
- int position() const { return position_; }
- void set_position(int pos) { position_ = pos; }
-
BailoutId EntryId() const { return entry_id_; }
// Type feedback information.
TypeFeedbackId CompareId() { return compare_id_; }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
- bool IsSymbolCompare() { return compare_type_ == SYMBOL_ONLY; }
- bool IsStringCompare() { return compare_type_ == STRING_ONLY; }
- bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
+ Handle<Type> compare_type() { return compare_type_; }
private:
+ CaseClause(Isolate* isolate,
+ Expression* label,
+ ZoneList<Statement*>* statements,
+ int pos);
+
Expression* label_;
Label body_target_;
ZoneList<Statement*>* statements_;
- int position_;
- enum CompareTypeFeedback {
- NONE,
- SMI_ONLY,
- SYMBOL_ONLY,
- STRING_ONLY,
- OBJECT_ONLY
- };
- CompareTypeFeedback compare_type_;
+ Handle<Type> compare_type_;
+
const TypeFeedbackId compare_id_;
const BailoutId entry_id_;
};
-class SwitchStatement: public BreakableStatement {
+class SwitchStatement V8_FINAL : public BreakableStatement {
public:
DECLARE_NODE_TYPE(SwitchStatement)
void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
tag_ = tag;
cases_ = cases;
+ switch_type_ = UNKNOWN_SWITCH;
}
Expression* tag() const { return tag_; }
ZoneList<CaseClause*>* cases() const { return cases_; }
+ enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH, GENERIC_SWITCH };
+ SwitchType switch_type() const { return switch_type_; }
+ void set_switch_type(SwitchType switch_type) { switch_type_ = switch_type; }
+
protected:
- SwitchStatement(Isolate* isolate, ZoneStringList* labels)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+ SwitchStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS, pos),
tag_(NULL),
cases_(NULL) { }
private:
Expression* tag_;
ZoneList<CaseClause*>* cases_;
+ SwitchType switch_type_;
};
@@ -999,7 +1176,7 @@ class SwitchStatement: public BreakableStatement {
// the parser implicitly creates an empty statement. Use the
// HasThenStatement() and HasElseStatement() functions to check if a
// given if-statement has a then- or an else-part containing code.
-class IfStatement: public Statement {
+class IfStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(IfStatement)
@@ -1010,6 +1187,11 @@ class IfStatement: public Statement {
Statement* then_statement() const { return then_statement_; }
Statement* else_statement() const { return else_statement_; }
+ virtual bool IsJump() const V8_OVERRIDE {
+ return HasThenStatement() && then_statement()->IsJump()
+ && HasElseStatement() && else_statement()->IsJump();
+ }
+
BailoutId IfId() const { return if_id_; }
BailoutId ThenId() const { return then_id_; }
BailoutId ElseId() const { return else_id_; }
@@ -1018,8 +1200,10 @@ class IfStatement: public Statement {
IfStatement(Isolate* isolate,
Expression* condition,
Statement* then_statement,
- Statement* else_statement)
- : condition_(condition),
+ Statement* else_statement,
+ int pos)
+ : Statement(pos),
+ condition_(condition),
then_statement_(then_statement),
else_statement_(else_statement),
if_id_(GetNextId(isolate)),
@@ -1039,9 +1223,10 @@ class IfStatement: public Statement {
// NOTE: TargetCollectors are represented as nodes to fit in the target
// stack in the compiler; this should probably be reworked.
-class TargetCollector: public AstNode {
+class TargetCollector V8_FINAL : public AstNode {
public:
- explicit TargetCollector(Zone* zone) : targets_(0, zone) { }
+ explicit TargetCollector(Zone* zone)
+ : AstNode(RelocInfo::kNoPosition), targets_(0, zone) { }
// Adds a jump target to the collector. The collector stores a pointer not
// a copy of the target to make binding work, so make sure not to pass in
@@ -1049,9 +1234,9 @@ class TargetCollector: public AstNode {
void AddTarget(Label* target, Zone* zone);
// Virtual behaviour. TargetCollectors are never part of the AST.
- virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
- virtual Type node_type() const { return kInvalid; }
- virtual TargetCollector* AsTargetCollector() { return this; }
+ virtual void Accept(AstVisitor* v) V8_OVERRIDE { UNREACHABLE(); }
+ virtual NodeType node_type() const V8_OVERRIDE { return kInvalid; }
+ virtual TargetCollector* AsTargetCollector() V8_OVERRIDE { return this; }
ZoneList<Label*>* targets() { return &targets_; }
@@ -1060,7 +1245,7 @@ class TargetCollector: public AstNode {
};
-class TryStatement: public Statement {
+class TryStatement : public Statement {
public:
void set_escaping_targets(ZoneList<Label*>* targets) {
escaping_targets_ = targets;
@@ -1071,8 +1256,9 @@ class TryStatement: public Statement {
ZoneList<Label*>* escaping_targets() const { return escaping_targets_; }
protected:
- TryStatement(int index, Block* try_block)
- : index_(index),
+ TryStatement(int index, Block* try_block, int pos)
+ : Statement(pos),
+ index_(index),
try_block_(try_block),
escaping_targets_(NULL) { }
@@ -1085,7 +1271,7 @@ class TryStatement: public Statement {
};
-class TryCatchStatement: public TryStatement {
+class TryCatchStatement V8_FINAL : public TryStatement {
public:
DECLARE_NODE_TYPE(TryCatchStatement)
@@ -1098,8 +1284,9 @@ class TryCatchStatement: public TryStatement {
Block* try_block,
Scope* scope,
Variable* variable,
- Block* catch_block)
- : TryStatement(index, try_block),
+ Block* catch_block,
+ int pos)
+ : TryStatement(index, try_block, pos),
scope_(scope),
variable_(variable),
catch_block_(catch_block) {
@@ -1112,15 +1299,16 @@ class TryCatchStatement: public TryStatement {
};
-class TryFinallyStatement: public TryStatement {
+class TryFinallyStatement V8_FINAL : public TryStatement {
public:
DECLARE_NODE_TYPE(TryFinallyStatement)
Block* finally_block() const { return finally_block_; }
protected:
- TryFinallyStatement(int index, Block* try_block, Block* finally_block)
- : TryStatement(index, try_block),
+ TryFinallyStatement(
+ int index, Block* try_block, Block* finally_block, int pos)
+ : TryStatement(index, try_block, pos),
finally_block_(finally_block) { }
private:
@@ -1128,59 +1316,63 @@ class TryFinallyStatement: public TryStatement {
};
-class DebuggerStatement: public Statement {
+class DebuggerStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(DebuggerStatement)
protected:
- DebuggerStatement() {}
+ explicit DebuggerStatement(int pos): Statement(pos) {}
};
-class EmptyStatement: public Statement {
+class EmptyStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(EmptyStatement)
protected:
- EmptyStatement() {}
+ explicit EmptyStatement(int pos): Statement(pos) {}
};
-class Literal: public Expression {
+class Literal V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Literal)
- virtual bool IsPropertyName() {
- if (handle_->IsSymbol()) {
+ virtual bool IsPropertyName() V8_OVERRIDE {
+ if (value_->IsInternalizedString()) {
uint32_t ignored;
- return !String::cast(*handle_)->AsArrayIndex(&ignored);
+ return !String::cast(*value_)->AsArrayIndex(&ignored);
}
return false;
}
Handle<String> AsPropertyName() {
ASSERT(IsPropertyName());
- return Handle<String>::cast(handle_);
+ return Handle<String>::cast(value_);
}
- virtual bool ToBooleanIsTrue() { return handle_->ToBoolean()->IsTrue(); }
- virtual bool ToBooleanIsFalse() { return handle_->ToBoolean()->IsFalse(); }
+ virtual bool ToBooleanIsTrue() V8_OVERRIDE {
+ return value_->BooleanValue();
+ }
+ virtual bool ToBooleanIsFalse() V8_OVERRIDE {
+ return !value_->BooleanValue();
+ }
// Identity testers.
bool IsNull() const {
- ASSERT(!handle_.is_null());
- return handle_->IsNull();
+ ASSERT(!value_.is_null());
+ return value_->IsNull();
}
bool IsTrue() const {
- ASSERT(!handle_.is_null());
- return handle_->IsTrue();
+ ASSERT(!value_.is_null());
+ return value_->IsTrue();
}
bool IsFalse() const {
- ASSERT(!handle_.is_null());
- return handle_->IsFalse();
+ ASSERT(!value_.is_null());
+ return value_->IsFalse();
}
- Handle<Object> handle() const { return handle_; }
+ Handle<Object> value() const { return value_; }
// Support for using Literal as a HashMap key. NOTE: Currently, this works
// only for string and number literals!
@@ -1195,19 +1387,23 @@ class Literal: public Expression {
TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); }
protected:
- Literal(Isolate* isolate, Handle<Object> handle)
- : Expression(isolate),
- handle_(handle) { }
+ Literal(
+ Isolate* isolate, Handle<Object> value, int position)
+ : Expression(isolate, position),
+ value_(value),
+ isolate_(isolate) { }
private:
Handle<String> ToString();
- Handle<Object> handle_;
+ Handle<Object> value_;
+ // TODO(dcarney): remove. this is only needed for Match and Hash.
+ Isolate* isolate_;
};
// Base class for literals that needs space in the corresponding JSFunction.
-class MaterializedLiteral: public Expression {
+class MaterializedLiteral : public Expression {
public:
virtual MaterializedLiteral* AsMaterializedLiteral() { return this; }
@@ -1223,8 +1419,9 @@ class MaterializedLiteral: public Expression {
MaterializedLiteral(Isolate* isolate,
int literal_index,
bool is_simple,
- int depth)
- : Expression(isolate),
+ int depth,
+ int pos)
+ : Expression(isolate, pos),
literal_index_(literal_index),
is_simple_(is_simple),
depth_(depth) {}
@@ -1236,52 +1433,55 @@ class MaterializedLiteral: public Expression {
};
+// Property is used for passing information
+// about an object literal's properties from the parser
+// to the code generator.
+class ObjectLiteralProperty V8_FINAL : public ZoneObject {
+ public:
+ enum Kind {
+ CONSTANT, // Property with constant value (compile time).
+ COMPUTED, // Property with computed value (execution time).
+ MATERIALIZED_LITERAL, // Property value is a materialized literal.
+ GETTER, SETTER, // Property is an accessor function.
+ PROTOTYPE // Property is __proto__.
+ };
+
+ ObjectLiteralProperty(Literal* key, Expression* value, Isolate* isolate);
+
+ Literal* key() { return key_; }
+ Expression* value() { return value_; }
+ Kind kind() { return kind_; }
+
+ // Type feedback information.
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ bool IsMonomorphic() { return !receiver_type_.is_null(); }
+ Handle<Map> GetReceiverType() { return receiver_type_; }
+
+ bool IsCompileTimeValue();
+
+ void set_emit_store(bool emit_store);
+ bool emit_store();
+
+ protected:
+ template<class> friend class AstNodeFactory;
+
+ ObjectLiteralProperty(bool is_getter, FunctionLiteral* value);
+ void set_key(Literal* key) { key_ = key; }
+
+ private:
+ Literal* key_;
+ Expression* value_;
+ Kind kind_;
+ bool emit_store_;
+ Handle<Map> receiver_type_;
+};
+
+
// An object literal has a boilerplate object that is used
// for minimizing the work when constructing it at runtime.
-class ObjectLiteral: public MaterializedLiteral {
- public:
- // Property is used for passing information
- // about an object literal's properties from the parser
- // to the code generator.
- class Property: public ZoneObject {
- public:
- enum Kind {
- CONSTANT, // Property with constant value (compile time).
- COMPUTED, // Property with computed value (execution time).
- MATERIALIZED_LITERAL, // Property value is a materialized literal.
- GETTER, SETTER, // Property is an accessor function.
- PROTOTYPE // Property is __proto__.
- };
-
- Property(Literal* key, Expression* value, Isolate* isolate);
-
- Literal* key() { return key_; }
- Expression* value() { return value_; }
- Kind kind() { return kind_; }
-
- // Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- bool IsMonomorphic() { return !receiver_type_.is_null(); }
- Handle<Map> GetReceiverType() { return receiver_type_; }
-
- bool IsCompileTimeValue();
-
- void set_emit_store(bool emit_store);
- bool emit_store();
-
- protected:
- template<class> friend class AstNodeFactory;
-
- Property(bool is_getter, FunctionLiteral* value);
- void set_key(Literal* key) { key_ = key; }
-
- private:
- Literal* key_;
- Expression* value_;
- Kind kind_;
- bool emit_store_;
- Handle<Map> receiver_type_;
- };
+class ObjectLiteral V8_FINAL : public MaterializedLiteral {
+ public:
+ typedef ObjectLiteralProperty Property;
DECLARE_NODE_TYPE(ObjectLiteral)
@@ -1289,10 +1489,9 @@ class ObjectLiteral: public MaterializedLiteral {
return constant_properties_;
}
ZoneList<Property*>* properties() const { return properties_; }
-
bool fast_elements() const { return fast_elements_; }
-
- bool has_function() { return has_function_; }
+ bool may_store_doubles() const { return may_store_doubles_; }
+ bool has_function() const { return has_function_; }
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
@@ -1319,23 +1518,27 @@ class ObjectLiteral: public MaterializedLiteral {
bool is_simple,
bool fast_elements,
int depth,
- bool has_function)
- : MaterializedLiteral(isolate, literal_index, is_simple, depth),
+ bool may_store_doubles,
+ bool has_function,
+ int pos)
+ : MaterializedLiteral(isolate, literal_index, is_simple, depth, pos),
constant_properties_(constant_properties),
properties_(properties),
fast_elements_(fast_elements),
+ may_store_doubles_(may_store_doubles),
has_function_(has_function) {}
private:
Handle<FixedArray> constant_properties_;
ZoneList<Property*>* properties_;
bool fast_elements_;
+ bool may_store_doubles_;
bool has_function_;
};
// Node for capturing a regexp literal.
-class RegExpLiteral: public MaterializedLiteral {
+class RegExpLiteral V8_FINAL : public MaterializedLiteral {
public:
DECLARE_NODE_TYPE(RegExpLiteral)
@@ -1346,8 +1549,9 @@ class RegExpLiteral: public MaterializedLiteral {
RegExpLiteral(Isolate* isolate,
Handle<String> pattern,
Handle<String> flags,
- int literal_index)
- : MaterializedLiteral(isolate, literal_index, false, 1),
+ int literal_index,
+ int pos)
+ : MaterializedLiteral(isolate, literal_index, false, 1, pos),
pattern_(pattern),
flags_(flags) {}
@@ -1356,9 +1560,10 @@ class RegExpLiteral: public MaterializedLiteral {
Handle<String> flags_;
};
+
// An array literal has a literals object that is used
// for minimizing the work when constructing it at runtime.
-class ArrayLiteral: public MaterializedLiteral {
+class ArrayLiteral V8_FINAL : public MaterializedLiteral {
public:
DECLARE_NODE_TYPE(ArrayLiteral)
@@ -1376,8 +1581,9 @@ class ArrayLiteral: public MaterializedLiteral {
ZoneList<Expression*>* values,
int literal_index,
bool is_simple,
- int depth)
- : MaterializedLiteral(isolate, literal_index, is_simple, depth),
+ int depth,
+ int pos)
+ : MaterializedLiteral(isolate, literal_index, is_simple, depth, pos),
constant_elements_(constant_elements),
values_(values),
first_element_id_(ReserveIdRange(isolate, values->length())) {}
@@ -1389,11 +1595,11 @@ class ArrayLiteral: public MaterializedLiteral {
};
-class VariableProxy: public Expression {
+class VariableProxy V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(VariableProxy)
- virtual bool IsValidLeftHandSide() {
+ virtual bool IsValidLeftHandSide() V8_OVERRIDE {
return var_ == NULL ? true : var_->IsValidLeftHandSide();
}
@@ -1410,18 +1616,17 @@ class VariableProxy: public Expression {
Handle<String> name() const { return name_; }
Variable* var() const { return var_; }
bool is_this() const { return is_this_; }
- int position() const { return position_; }
Interface* interface() const { return interface_; }
void MarkAsTrivial() { is_trivial_ = true; }
void MarkAsLValue() { is_lvalue_ = true; }
- // Bind this proxy to the variable var.
+ // Bind this proxy to the variable var. Interfaces must match.
void BindTo(Variable* var);
protected:
- VariableProxy(Isolate* isolate, Variable* var);
+ VariableProxy(Isolate* isolate, Variable* var, int position);
VariableProxy(Isolate* isolate,
Handle<String> name,
@@ -1436,33 +1641,38 @@ class VariableProxy: public Expression {
// True if this variable proxy is being used in an assignment
// or with a increment/decrement operator.
bool is_lvalue_;
- int position_;
Interface* interface_;
};
-class Property: public Expression {
+class Property V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Property)
- virtual bool IsValidLeftHandSide() { return true; }
+ virtual bool IsValidLeftHandSide() V8_OVERRIDE { return true; }
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
- virtual int position() const { return pos_; }
BailoutId LoadId() const { return load_id_; }
- bool IsStringLength() const { return is_string_length_; }
bool IsStringAccess() const { return is_string_access_; }
bool IsFunctionPrototype() const { return is_function_prototype_; }
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- bool IsArrayLength() { return is_array_length_; }
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ return &receiver_types_;
+ }
+ virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
+ return STANDARD_STORE;
+ }
bool IsUninitialized() { return is_uninitialized_; }
+ bool IsPreMonomorphic() { return is_pre_monomorphic_; }
+ bool HasNoTypeInformation() {
+ return is_uninitialized_ || is_pre_monomorphic_;
+ }
TypeFeedbackId PropertyFeedbackId() { return reuse(id()); }
protected:
@@ -1470,48 +1680,61 @@ class Property: public Expression {
Expression* obj,
Expression* key,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
obj_(obj),
key_(key),
- pos_(pos),
load_id_(GetNextId(isolate)),
is_monomorphic_(false),
+ is_pre_monomorphic_(false),
is_uninitialized_(false),
- is_array_length_(false),
- is_string_length_(false),
is_string_access_(false),
is_function_prototype_(false) { }
private:
Expression* obj_;
Expression* key_;
- int pos_;
const BailoutId load_id_;
SmallMapList receiver_types_;
bool is_monomorphic_ : 1;
+ bool is_pre_monomorphic_ : 1;
bool is_uninitialized_ : 1;
- bool is_array_length_ : 1;
- bool is_string_length_ : 1;
bool is_string_access_ : 1;
bool is_function_prototype_ : 1;
};
-class Call: public Expression {
+class Call V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Call)
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
- virtual int position() const { return pos_; }
// Type feedback information.
TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle, CallKind call_kind);
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- virtual bool IsMonomorphic() { return is_monomorphic_; }
+ virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ return &receiver_types_;
+ }
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
CheckType check_type() const { return check_type_; }
+
+ void set_string_check(Handle<JSObject> holder) {
+ holder_ = holder;
+ check_type_ = STRING_CHECK;
+ }
+
+ void set_number_check(Handle<JSObject> holder) {
+ holder_ = holder;
+ check_type_ = NUMBER_CHECK;
+ }
+
+ void set_map_check() {
+ holder_ = Handle<JSObject>::null();
+ check_type_ = RECEIVER_MAP_CHECK;
+ }
+
Handle<JSFunction> target() { return target_; }
// A cache for the holder, set as a side effect of computing the target of the
@@ -1519,13 +1742,18 @@ class Call: public Expression {
// as the holder!
Handle<JSObject> holder() { return holder_; }
- Handle<JSGlobalPropertyCell> cell() { return cell_; }
+ Handle<Cell> cell() { return cell_; }
bool ComputeTarget(Handle<Map> type, Handle<String> name);
bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup);
BailoutId ReturnId() const { return return_id_; }
+ // TODO(rossberg): this should really move somewhere else (and be merged with
+ // various similar methods in objets.cc), but for now...
+ static Handle<JSObject> GetPrototypeForPrimitiveCheck(
+ CheckType check, Isolate* isolate);
+
#ifdef DEBUG
// Used to assert that the FullCodeGenerator records the return site.
bool return_is_recorded_;
@@ -1536,10 +1764,9 @@ class Call: public Expression {
Expression* expression,
ZoneList<Expression*>* arguments,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
expression_(expression),
arguments_(arguments),
- pos_(pos),
is_monomorphic_(false),
check_type_(RECEIVER_MAP_CHECK),
return_id_(GetNextId(isolate)) { }
@@ -1547,32 +1774,34 @@ class Call: public Expression {
private:
Expression* expression_;
ZoneList<Expression*>* arguments_;
- int pos_;
bool is_monomorphic_;
CheckType check_type_;
SmallMapList receiver_types_;
Handle<JSFunction> target_;
Handle<JSObject> holder_;
- Handle<JSGlobalPropertyCell> cell_;
+ Handle<Cell> cell_;
const BailoutId return_id_;
};
-class CallNew: public Expression {
+class CallNew V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CallNew)
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
- virtual int position() const { return pos_; }
// Type feedback information.
TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- Handle<JSFunction> target() { return target_; }
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ Handle<JSFunction> target() const { return target_; }
+ ElementsKind elements_kind() const { return elements_kind_; }
+ Handle<Cell> allocation_info_cell() const {
+ return allocation_info_cell_;
+ }
BailoutId ReturnId() const { return return_id_; }
@@ -1581,20 +1810,21 @@ class CallNew: public Expression {
Expression* expression,
ZoneList<Expression*>* arguments,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
expression_(expression),
arguments_(arguments),
- pos_(pos),
is_monomorphic_(false),
+ elements_kind_(GetInitialFastElementsKind()),
return_id_(GetNextId(isolate)) { }
private:
Expression* expression_;
ZoneList<Expression*>* arguments_;
- int pos_;
bool is_monomorphic_;
Handle<JSFunction> target_;
+ ElementsKind elements_kind_;
+ Handle<Cell> allocation_info_cell_;
const BailoutId return_id_;
};
@@ -1604,7 +1834,7 @@ class CallNew: public Expression {
// language construct. Instead it is used to call a C or JS function
// with a set of arguments. This is used from the builtins that are
// implemented in JavaScript (see "v8natives.js").
-class CallRuntime: public Expression {
+class CallRuntime V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CallRuntime)
@@ -1619,8 +1849,9 @@ class CallRuntime: public Expression {
CallRuntime(Isolate* isolate,
Handle<String> name,
const Runtime::Function* function,
- ZoneList<Expression*>* arguments)
- : Expression(isolate),
+ ZoneList<Expression*>* arguments,
+ int pos)
+ : Expression(isolate, pos),
name_(name),
function_(function),
arguments_(arguments) { }
@@ -1632,30 +1863,27 @@ class CallRuntime: public Expression {
};
-class UnaryOperation: public Expression {
+class UnaryOperation V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(UnaryOperation)
- virtual bool ResultOverwriteAllowed();
-
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
- virtual int position() const { return pos_; }
BailoutId MaterializeTrueId() { return materialize_true_id_; }
BailoutId MaterializeFalseId() { return materialize_false_id_; }
- TypeFeedbackId UnaryOperationFeedbackId() const { return reuse(id()); }
+ virtual void RecordToBooleanTypeFeedback(
+ TypeFeedbackOracle* oracle) V8_OVERRIDE;
protected:
UnaryOperation(Isolate* isolate,
Token::Value op,
Expression* expression,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
expression_(expression),
- pos_(pos),
materialize_true_id_(GetNextId(isolate)),
materialize_false_id_(GetNextId(isolate)) {
ASSERT(Token::IsUnaryOp(op));
@@ -1664,7 +1892,6 @@ class UnaryOperation: public Expression {
private:
Token::Value op_;
Expression* expression_;
- int pos_;
// For unary not (Token::NOT), the AST ids where true and false will
// actually be materialized, respectively.
@@ -1673,7 +1900,7 @@ class UnaryOperation: public Expression {
};
-class BinaryOperation: public Expression {
+class BinaryOperation V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(BinaryOperation)
@@ -1682,11 +1909,15 @@ class BinaryOperation: public Expression {
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
- virtual int position() const { return pos_; }
BailoutId RightId() const { return right_id_; }
TypeFeedbackId BinaryOperationFeedbackId() const { return reuse(id()); }
+ Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
+ void set_fixed_right_arg(Maybe<int> arg) { fixed_right_arg_ = arg; }
+
+ virtual void RecordToBooleanTypeFeedback(
+ TypeFeedbackOracle* oracle) V8_OVERRIDE;
protected:
BinaryOperation(Isolate* isolate,
@@ -1694,11 +1925,10 @@ class BinaryOperation: public Expression {
Expression* left,
Expression* right,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
left_(left),
right_(right),
- pos_(pos),
right_id_(GetNextId(isolate)) {
ASSERT(Token::IsBinaryOp(op));
}
@@ -1707,14 +1937,18 @@ class BinaryOperation: public Expression {
Token::Value op_;
Expression* left_;
Expression* right_;
- int pos_;
+
+ // TODO(rossberg): the fixed arg should probably be represented as a Constant
+ // type for the RHS.
+ Maybe<int> fixed_right_arg_;
+
// The short-circuit logical operations need an AST ID for their
// right-hand subexpression.
const BailoutId right_id_;
};
-class CountOperation: public Expression {
+class CountOperation V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CountOperation)
@@ -1727,13 +1961,16 @@ class CountOperation: public Expression {
}
Expression* expression() const { return expression_; }
- virtual int position() const { return pos_; }
-
- virtual void MarkAsStatement() { is_prefix_ = true; }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* znoe);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ return &receiver_types_;
+ }
+ virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
+ return store_mode_;
+ }
+ Handle<Type> type() const { return type_; }
BailoutId AssignmentId() const { return assignment_id_; }
@@ -1746,44 +1983,46 @@ class CountOperation: public Expression {
bool is_prefix,
Expression* expr,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
is_prefix_(is_prefix),
+ is_monomorphic_(false),
+ store_mode_(STANDARD_STORE),
expression_(expr),
- pos_(pos),
assignment_id_(GetNextId(isolate)),
count_id_(GetNextId(isolate)) {}
private:
Token::Value op_;
- bool is_prefix_;
- bool is_monomorphic_;
+ bool is_prefix_ : 1;
+ bool is_monomorphic_ : 1;
+ KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
+ // must have extra bit.
+ Handle<Type> type_;
+
Expression* expression_;
- int pos_;
const BailoutId assignment_id_;
const TypeFeedbackId count_id_;
SmallMapList receiver_types_;
};
-class CompareOperation: public Expression {
+class CompareOperation V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CompareOperation)
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
- virtual int position() const { return pos_; }
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
- bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
+ Handle<Type> combined_type() const { return combined_type_; }
+ void set_combined_type(Handle<Type> type) { combined_type_ = type; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
- bool IsLiteralCompareUndefined(Expression** expr);
+ bool IsLiteralCompareUndefined(Expression** expr, Isolate* isolate);
bool IsLiteralCompareNull(Expression** expr);
protected:
@@ -1792,12 +2031,11 @@ class CompareOperation: public Expression {
Expression* left,
Expression* right,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
left_(left),
right_(right),
- pos_(pos),
- compare_type_(NONE) {
+ combined_type_(Type::None(), isolate) {
ASSERT(Token::IsCompareOp(op));
}
@@ -1805,14 +2043,12 @@ class CompareOperation: public Expression {
Token::Value op_;
Expression* left_;
Expression* right_;
- int pos_;
- enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
- CompareTypeFeedback compare_type_;
+ Handle<Type> combined_type_;
};
-class Conditional: public Expression {
+class Conditional V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Conditional)
@@ -1820,9 +2056,6 @@ class Conditional: public Expression {
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
- int then_expression_position() const { return then_expression_position_; }
- int else_expression_position() const { return else_expression_position_; }
-
BailoutId ThenId() const { return then_id_; }
BailoutId ElseId() const { return else_id_; }
@@ -1831,14 +2064,11 @@ class Conditional: public Expression {
Expression* condition,
Expression* then_expression,
Expression* else_expression,
- int then_expression_position,
- int else_expression_position)
- : Expression(isolate),
+ int position)
+ : Expression(isolate, position),
condition_(condition),
then_expression_(then_expression),
else_expression_(else_expression),
- then_expression_position_(then_expression_position),
- else_expression_position_(else_expression_position),
then_id_(GetNextId(isolate)),
else_id_(GetNextId(isolate)) { }
@@ -1846,14 +2076,12 @@ class Conditional: public Expression {
Expression* condition_;
Expression* then_expression_;
Expression* else_expression_;
- int then_expression_position_;
- int else_expression_position_;
const BailoutId then_id_;
const BailoutId else_id_;
};
-class Assignment: public Expression {
+class Assignment V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Assignment)
@@ -1864,7 +2092,6 @@ class Assignment: public Expression {
Token::Value op() const { return op_; }
Expression* target() const { return target_; }
Expression* value() const { return value_; }
- virtual int position() const { return pos_; }
BinaryOperation* binary_operation() const { return binary_operation_; }
// This check relies on the definition order of token in token.h.
@@ -1875,8 +2102,18 @@ class Assignment: public Expression {
// Type feedback information.
TypeFeedbackId AssignmentFeedbackId() { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ bool IsUninitialized() { return is_uninitialized_; }
+ bool IsPreMonomorphic() { return is_pre_monomorphic_; }
+ bool HasNoTypeInformation() {
+ return is_uninitialized_ || is_pre_monomorphic_;
+ }
+ virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ return &receiver_types_;
+ }
+ virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
+ return store_mode_;
+ }
protected:
Assignment(Isolate* isolate,
@@ -1889,8 +2126,8 @@ class Assignment: public Expression {
void Init(Isolate* isolate, AstNodeFactory<Visitor>* factory) {
ASSERT(Token::IsAssignmentOp(op_));
if (is_compound()) {
- binary_operation_ =
- factory->NewBinaryOperation(binary_op(), target_, value_, pos_ + 1);
+ binary_operation_ = factory->NewBinaryOperation(
+ binary_op(), target_, value_, position() + 1);
}
}
@@ -1898,35 +2135,83 @@ class Assignment: public Expression {
Token::Value op_;
Expression* target_;
Expression* value_;
- int pos_;
BinaryOperation* binary_operation_;
const BailoutId assignment_id_;
- bool is_monomorphic_;
+ bool is_monomorphic_ : 1;
+ bool is_uninitialized_ : 1;
+ bool is_pre_monomorphic_ : 1;
+ KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
+ // must have extra bit.
SmallMapList receiver_types_;
};
-class Throw: public Expression {
+class Yield V8_FINAL : public Expression {
+ public:
+ DECLARE_NODE_TYPE(Yield)
+
+ enum Kind {
+ INITIAL, // The initial yield that returns the unboxed generator object.
+ SUSPEND, // A normal yield: { value: EXPRESSION, done: false }
+ DELEGATING, // A yield*.
+ FINAL // A return: { value: EXPRESSION, done: true }
+ };
+
+ Expression* generator_object() const { return generator_object_; }
+ Expression* expression() const { return expression_; }
+ Kind yield_kind() const { return yield_kind_; }
+
+ // Delegating yield surrounds the "yield" in a "try/catch". This index
+ // locates the catch handler in the handler table, and is equivalent to
+ // TryCatchStatement::index().
+ int index() const {
+ ASSERT(yield_kind() == DELEGATING);
+ return index_;
+ }
+ void set_index(int index) {
+ ASSERT(yield_kind() == DELEGATING);
+ index_ = index;
+ }
+
+ protected:
+ Yield(Isolate* isolate,
+ Expression* generator_object,
+ Expression* expression,
+ Kind yield_kind,
+ int pos)
+ : Expression(isolate, pos),
+ generator_object_(generator_object),
+ expression_(expression),
+ yield_kind_(yield_kind),
+ index_(-1) { }
+
+ private:
+ Expression* generator_object_;
+ Expression* expression_;
+ Kind yield_kind_;
+ int index_;
+};
+
+
+class Throw V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Throw)
Expression* exception() const { return exception_; }
- virtual int position() const { return pos_; }
protected:
Throw(Isolate* isolate, Expression* exception, int pos)
- : Expression(isolate), exception_(exception), pos_(pos) {}
+ : Expression(isolate, pos), exception_(exception) {}
private:
Expression* exception_;
- int pos_;
};
-class FunctionLiteral: public Expression {
+class FunctionLiteral V8_FINAL : public Expression {
public:
- enum Type {
+ enum FunctionType {
ANONYMOUS_EXPRESSION,
NAMED_EXPRESSION,
DECLARATION
@@ -1947,6 +2232,11 @@ class FunctionLiteral: public Expression {
kNotParenthesized
};
+ enum IsGeneratorFlag {
+ kIsGenerator,
+ kNotGenerator
+ };
+
DECLARE_NODE_TYPE(FunctionLiteral)
Handle<String> name() const { return name_; }
@@ -1965,12 +2255,6 @@ class FunctionLiteral: public Expression {
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
int handler_count() { return handler_count_; }
- bool has_only_simple_this_property_assignments() {
- return HasOnlySimpleThisPropertyAssignments::decode(bitfield_);
- }
- Handle<FixedArray> this_property_assignments() {
- return this_property_assignments_;
- }
int parameter_count() { return parameter_count_; }
bool AllowsLazyCompilation();
@@ -2007,12 +2291,22 @@ class FunctionLiteral: public Expression {
bitfield_ = IsParenthesized::update(bitfield_, kIsParenthesized);
}
+ bool is_generator() {
+ return IsGenerator::decode(bitfield_) == kIsGenerator;
+ }
+
int ast_node_count() { return ast_properties_.node_count(); }
AstProperties::Flags* flags() { return ast_properties_.flags(); }
void set_ast_properties(AstProperties* ast_properties) {
ast_properties_ = *ast_properties;
}
+ bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
+ BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+ void set_dont_optimize_reason(BailoutReason reason) {
+ dont_optimize_reason_ = reason;
+ }
+
protected:
FunctionLiteral(Isolate* isolate,
Handle<String> name,
@@ -2021,42 +2315,41 @@ class FunctionLiteral: public Expression {
int materialized_literal_count,
int expected_property_count,
int handler_count,
- bool has_only_simple_this_property_assignments,
- Handle<FixedArray> this_property_assignments,
int parameter_count,
- Type type,
+ FunctionType function_type,
ParameterFlag has_duplicate_parameters,
IsFunctionFlag is_function,
- IsParenthesizedFlag is_parenthesized)
- : Expression(isolate),
+ IsParenthesizedFlag is_parenthesized,
+ IsGeneratorFlag is_generator,
+ int position)
+ : Expression(isolate, position),
name_(name),
scope_(scope),
body_(body),
- this_property_assignments_(this_property_assignments),
inferred_name_(isolate->factory()->empty_string()),
+ dont_optimize_reason_(kNoReason),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
handler_count_(handler_count),
parameter_count_(parameter_count),
function_token_position_(RelocInfo::kNoPosition) {
bitfield_ =
- HasOnlySimpleThisPropertyAssignments::encode(
- has_only_simple_this_property_assignments) |
- IsExpression::encode(type != DECLARATION) |
- IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) |
+ IsExpression::encode(function_type != DECLARATION) |
+ IsAnonymous::encode(function_type == ANONYMOUS_EXPRESSION) |
Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters) |
IsFunction::encode(is_function) |
- IsParenthesized::encode(is_parenthesized);
+ IsParenthesized::encode(is_parenthesized) |
+ IsGenerator::encode(is_generator);
}
private:
Handle<String> name_;
Scope* scope_;
ZoneList<Statement*>* body_;
- Handle<FixedArray> this_property_assignments_;
Handle<String> inferred_name_;
AstProperties ast_properties_;
+ BailoutReason dont_optimize_reason_;
int materialized_literal_count_;
int expected_property_count_;
@@ -2065,42 +2358,40 @@ class FunctionLiteral: public Expression {
int function_token_position_;
unsigned bitfield_;
- class HasOnlySimpleThisPropertyAssignments: public BitField<bool, 0, 1> {};
- class IsExpression: public BitField<bool, 1, 1> {};
- class IsAnonymous: public BitField<bool, 2, 1> {};
- class Pretenure: public BitField<bool, 3, 1> {};
- class HasDuplicateParameters: public BitField<ParameterFlag, 4, 1> {};
- class IsFunction: public BitField<IsFunctionFlag, 5, 1> {};
- class IsParenthesized: public BitField<IsParenthesizedFlag, 6, 1> {};
+ class IsExpression: public BitField<bool, 0, 1> {};
+ class IsAnonymous: public BitField<bool, 1, 1> {};
+ class Pretenure: public BitField<bool, 2, 1> {};
+ class HasDuplicateParameters: public BitField<ParameterFlag, 3, 1> {};
+ class IsFunction: public BitField<IsFunctionFlag, 4, 1> {};
+ class IsParenthesized: public BitField<IsParenthesizedFlag, 5, 1> {};
+ class IsGenerator: public BitField<IsGeneratorFlag, 6, 1> {};
};
-class SharedFunctionInfoLiteral: public Expression {
+class NativeFunctionLiteral V8_FINAL : public Expression {
public:
- DECLARE_NODE_TYPE(SharedFunctionInfoLiteral)
+ DECLARE_NODE_TYPE(NativeFunctionLiteral)
- Handle<SharedFunctionInfo> shared_function_info() const {
- return shared_function_info_;
- }
+ Handle<String> name() const { return name_; }
+ v8::Extension* extension() const { return extension_; }
protected:
- SharedFunctionInfoLiteral(
- Isolate* isolate,
- Handle<SharedFunctionInfo> shared_function_info)
- : Expression(isolate),
- shared_function_info_(shared_function_info) { }
+ NativeFunctionLiteral(
+ Isolate* isolate, Handle<String> name, v8::Extension* extension, int pos)
+ : Expression(isolate, pos), name_(name), extension_(extension) {}
private:
- Handle<SharedFunctionInfo> shared_function_info_;
+ Handle<String> name_;
+ v8::Extension* extension_;
};
-class ThisFunction: public Expression {
+class ThisFunction V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(ThisFunction)
protected:
- explicit ThisFunction(Isolate* isolate): Expression(isolate) {}
+ explicit ThisFunction(Isolate* isolate, int pos): Expression(isolate, pos) {}
};
#undef DECLARE_NODE_TYPE
@@ -2120,10 +2411,10 @@ class RegExpVisitor BASE_EMBEDDED {
};
-class RegExpTree: public ZoneObject {
+class RegExpTree : public ZoneObject {
public:
static const int kInfinity = kMaxInt;
- virtual ~RegExpTree() { }
+ virtual ~RegExpTree() {}
virtual void* Accept(RegExpVisitor* visitor, void* data) = 0;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) = 0;
@@ -2145,19 +2436,19 @@ class RegExpTree: public ZoneObject {
};
-class RegExpDisjunction: public RegExpTree {
+class RegExpDisjunction V8_FINAL : public RegExpTree {
public:
explicit RegExpDisjunction(ZoneList<RegExpTree*>* alternatives);
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpDisjunction* AsDisjunction();
- virtual Interval CaptureRegisters();
- virtual bool IsDisjunction();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpDisjunction* AsDisjunction() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsDisjunction() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return min_match_; }
+ virtual int max_match() V8_OVERRIDE { return max_match_; }
ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
private:
ZoneList<RegExpTree*>* alternatives_;
@@ -2166,19 +2457,19 @@ class RegExpDisjunction: public RegExpTree {
};
-class RegExpAlternative: public RegExpTree {
+class RegExpAlternative V8_FINAL : public RegExpTree {
public:
explicit RegExpAlternative(ZoneList<RegExpTree*>* nodes);
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAlternative* AsAlternative();
- virtual Interval CaptureRegisters();
- virtual bool IsAlternative();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpAlternative* AsAlternative() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsAlternative() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return min_match_; }
+ virtual int max_match() V8_OVERRIDE { return max_match_; }
ZoneList<RegExpTree*>* nodes() { return nodes_; }
private:
ZoneList<RegExpTree*>* nodes_;
@@ -2187,9 +2478,9 @@ class RegExpAlternative: public RegExpTree {
};
-class RegExpAssertion: public RegExpTree {
+class RegExpAssertion V8_FINAL : public RegExpTree {
public:
- enum Type {
+ enum AssertionType {
START_OF_LINE,
START_OF_INPUT,
END_OF_LINE,
@@ -2197,23 +2488,23 @@ class RegExpAssertion: public RegExpTree {
BOUNDARY,
NON_BOUNDARY
};
- explicit RegExpAssertion(Type type) : type_(type) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ explicit RegExpAssertion(AssertionType type) : assertion_type_(type) { }
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAssertion* AsAssertion();
- virtual bool IsAssertion();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
- Type type() { return type_; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpAssertion* AsAssertion() V8_OVERRIDE;
+ virtual bool IsAssertion() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return 0; }
+ virtual int max_match() V8_OVERRIDE { return 0; }
+ AssertionType assertion_type() { return assertion_type_; }
private:
- Type type_;
+ AssertionType assertion_type_;
};
-class CharacterSet BASE_EMBEDDED {
+class CharacterSet V8_FINAL BASE_EMBEDDED {
public:
explicit CharacterSet(uc16 standard_set_type)
: ranges_(NULL),
@@ -2236,7 +2527,7 @@ class CharacterSet BASE_EMBEDDED {
};
-class RegExpCharacterClass: public RegExpTree {
+class RegExpCharacterClass V8_FINAL : public RegExpTree {
public:
RegExpCharacterClass(ZoneList<CharacterRange>* ranges, bool is_negated)
: set_(ranges),
@@ -2244,15 +2535,15 @@ class RegExpCharacterClass: public RegExpTree {
explicit RegExpCharacterClass(uc16 type)
: set_(type),
is_negated_(false) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpCharacterClass* AsCharacterClass();
- virtual bool IsCharacterClass();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return 1; }
- virtual int max_match() { return 1; }
- virtual void AppendToText(RegExpText* text, Zone* zone);
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpCharacterClass* AsCharacterClass() V8_OVERRIDE;
+ virtual bool IsCharacterClass() V8_OVERRIDE;
+ virtual bool IsTextElement() V8_OVERRIDE { return true; }
+ virtual int min_match() V8_OVERRIDE { return 1; }
+ virtual int max_match() V8_OVERRIDE { return 1; }
+ virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
CharacterSet character_set() { return set_; }
// TODO(lrn): Remove need for complex version if is_standard that
// recognizes a mangled standard set and just do { return set_.is_special(); }
@@ -2278,18 +2569,18 @@ class RegExpCharacterClass: public RegExpTree {
};
-class RegExpAtom: public RegExpTree {
+class RegExpAtom V8_FINAL : public RegExpTree {
public:
explicit RegExpAtom(Vector<const uc16> data) : data_(data) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAtom* AsAtom();
- virtual bool IsAtom();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return data_.length(); }
- virtual int max_match() { return data_.length(); }
- virtual void AppendToText(RegExpText* text, Zone* zone);
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpAtom* AsAtom() V8_OVERRIDE;
+ virtual bool IsAtom() V8_OVERRIDE;
+ virtual bool IsTextElement() V8_OVERRIDE { return true; }
+ virtual int min_match() V8_OVERRIDE { return data_.length(); }
+ virtual int max_match() V8_OVERRIDE { return data_.length(); }
+ virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
Vector<const uc16> data() { return data_; }
int length() { return data_.length(); }
private:
@@ -2297,18 +2588,18 @@ class RegExpAtom: public RegExpTree {
};
-class RegExpText: public RegExpTree {
+class RegExpText V8_FINAL : public RegExpTree {
public:
explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpText* AsText();
- virtual bool IsText();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return length_; }
- virtual int max_match() { return length_; }
- virtual void AppendToText(RegExpText* text, Zone* zone);
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpText* AsText() V8_OVERRIDE;
+ virtual bool IsText() V8_OVERRIDE;
+ virtual bool IsTextElement() V8_OVERRIDE { return true; }
+ virtual int min_match() V8_OVERRIDE { return length_; }
+ virtual int max_match() V8_OVERRIDE { return length_; }
+ virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
void AddElement(TextElement elm, Zone* zone) {
elements_.Add(elm, zone);
length_ += elm.length();
@@ -2320,24 +2611,24 @@ class RegExpText: public RegExpTree {
};
-class RegExpQuantifier: public RegExpTree {
+class RegExpQuantifier V8_FINAL : public RegExpTree {
public:
- enum Type { GREEDY, NON_GREEDY, POSSESSIVE };
- RegExpQuantifier(int min, int max, Type type, RegExpTree* body)
+ enum QuantifierType { GREEDY, NON_GREEDY, POSSESSIVE };
+ RegExpQuantifier(int min, int max, QuantifierType type, RegExpTree* body)
: body_(body),
min_(min),
max_(max),
min_match_(min * body->min_match()),
- type_(type) {
+ quantifier_type_(type) {
if (max > 0 && body->max_match() > kInfinity / max) {
max_match_ = kInfinity;
} else {
max_match_ = max * body->max_match();
}
}
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
+ RegExpNode* on_success) V8_OVERRIDE;
static RegExpNode* ToNode(int min,
int max,
bool is_greedy,
@@ -2345,16 +2636,16 @@ class RegExpQuantifier: public RegExpTree {
RegExpCompiler* compiler,
RegExpNode* on_success,
bool not_at_start = false);
- virtual RegExpQuantifier* AsQuantifier();
- virtual Interval CaptureRegisters();
- virtual bool IsQuantifier();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
+ virtual RegExpQuantifier* AsQuantifier() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsQuantifier() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return min_match_; }
+ virtual int max_match() V8_OVERRIDE { return max_match_; }
int min() { return min_; }
int max() { return max_; }
- bool is_possessive() { return type_ == POSSESSIVE; }
- bool is_non_greedy() { return type_ == NON_GREEDY; }
- bool is_greedy() { return type_ == GREEDY; }
+ bool is_possessive() { return quantifier_type_ == POSSESSIVE; }
+ bool is_non_greedy() { return quantifier_type_ == NON_GREEDY; }
+ bool is_greedy() { return quantifier_type_ == GREEDY; }
RegExpTree* body() { return body_; }
private:
@@ -2363,28 +2654,28 @@ class RegExpQuantifier: public RegExpTree {
int max_;
int min_match_;
int max_match_;
- Type type_;
+ QuantifierType quantifier_type_;
};
-class RegExpCapture: public RegExpTree {
+class RegExpCapture V8_FINAL : public RegExpTree {
public:
explicit RegExpCapture(RegExpTree* body, int index)
: body_(body), index_(index) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
+ RegExpNode* on_success) V8_OVERRIDE;
static RegExpNode* ToNode(RegExpTree* body,
int index,
RegExpCompiler* compiler,
RegExpNode* on_success);
- virtual RegExpCapture* AsCapture();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual Interval CaptureRegisters();
- virtual bool IsCapture();
- virtual int min_match() { return body_->min_match(); }
- virtual int max_match() { return body_->max_match(); }
+ virtual RegExpCapture* AsCapture() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsCapture() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return body_->min_match(); }
+ virtual int max_match() V8_OVERRIDE { return body_->max_match(); }
RegExpTree* body() { return body_; }
int index() { return index_; }
static int StartRegister(int index) { return index * 2; }
@@ -2396,7 +2687,7 @@ class RegExpCapture: public RegExpTree {
};
-class RegExpLookahead: public RegExpTree {
+class RegExpLookahead V8_FINAL : public RegExpTree {
public:
RegExpLookahead(RegExpTree* body,
bool is_positive,
@@ -2407,15 +2698,15 @@ class RegExpLookahead: public RegExpTree {
capture_count_(capture_count),
capture_from_(capture_from) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpLookahead* AsLookahead();
- virtual Interval CaptureRegisters();
- virtual bool IsLookahead();
- virtual bool IsAnchoredAtStart();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpLookahead* AsLookahead() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsLookahead() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return 0; }
+ virtual int max_match() V8_OVERRIDE { return 0; }
RegExpTree* body() { return body_; }
bool is_positive() { return is_positive_; }
int capture_count() { return capture_count_; }
@@ -2429,17 +2720,17 @@ class RegExpLookahead: public RegExpTree {
};
-class RegExpBackReference: public RegExpTree {
+class RegExpBackReference V8_FINAL : public RegExpTree {
public:
explicit RegExpBackReference(RegExpCapture* capture)
: capture_(capture) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpBackReference* AsBackReference();
- virtual bool IsBackReference();
- virtual int min_match() { return 0; }
- virtual int max_match() { return capture_->max_match(); }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpBackReference* AsBackReference() V8_OVERRIDE;
+ virtual bool IsBackReference() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return 0; }
+ virtual int max_match() V8_OVERRIDE { return capture_->max_match(); }
int index() { return capture_->index(); }
RegExpCapture* capture() { return capture_; }
private:
@@ -2447,16 +2738,16 @@ class RegExpBackReference: public RegExpTree {
};
-class RegExpEmpty: public RegExpTree {
+class RegExpEmpty V8_FINAL : public RegExpTree {
public:
RegExpEmpty() { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpEmpty* AsEmpty();
- virtual bool IsEmpty();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpEmpty* AsEmpty() V8_OVERRIDE;
+ virtual bool IsEmpty() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return 0; }
+ virtual int max_match() V8_OVERRIDE { return 0; }
static RegExpEmpty* GetInstance() {
static RegExpEmpty* instance = ::new RegExpEmpty();
return instance;
@@ -2467,8 +2758,8 @@ class RegExpEmpty: public RegExpTree {
// ----------------------------------------------------------------------------
// Out-of-line inline constructors (to side-step cyclic dependencies).
-inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
- : Module(proxy->interface()),
+inline ModuleVariable::ModuleVariable(VariableProxy* proxy, int pos)
+ : Module(proxy->interface(), pos),
proxy_(proxy) {
}
@@ -2479,40 +2770,51 @@ inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
class AstVisitor BASE_EMBEDDED {
public:
- AstVisitor() : isolate_(Isolate::Current()), stack_overflow_(false) { }
- virtual ~AstVisitor() { }
+ AstVisitor() {}
+ virtual ~AstVisitor() {}
// Stack overflow check and dynamic dispatch.
- void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); }
+ virtual void Visit(AstNode* node) = 0;
// Iteration left-to-right.
virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
virtual void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitExpressions(ZoneList<Expression*>* expressions);
- // Stack overflow tracking support.
- bool HasStackOverflow() const { return stack_overflow_; }
- bool CheckStackOverflow();
-
- // If a stack-overflow exception is encountered when visiting a
- // node, calling SetStackOverflow will make sure that the visitor
- // bails out without visiting more nodes.
- void SetStackOverflow() { stack_overflow_ = true; }
- void ClearStackOverflow() { stack_overflow_ = false; }
-
// Individual AST nodes.
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) = 0;
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
+};
- protected:
- Isolate* isolate() { return isolate_; }
- private:
- Isolate* isolate_;
- bool stack_overflow_;
-};
+#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
+public: \
+ virtual void Visit(AstNode* node) V8_FINAL V8_OVERRIDE { \
+ if (!CheckStackOverflow()) node->Accept(this); \
+ } \
+ \
+ void SetStackOverflow() { stack_overflow_ = true; } \
+ void ClearStackOverflow() { stack_overflow_ = false; } \
+ bool HasStackOverflow() const { return stack_overflow_; } \
+ \
+ bool CheckStackOverflow() { \
+ if (stack_overflow_) return true; \
+ StackLimitCheck check(isolate_); \
+ if (!check.HasOverflowed()) return false; \
+ return (stack_overflow_ = true); \
+ } \
+ \
+private: \
+ void InitializeAstVisitor(Isolate* isolate) { \
+ isolate_ = isolate; \
+ stack_overflow_ = false; \
+ } \
+ Isolate* isolate() { return isolate_; } \
+ \
+ Isolate* isolate_; \
+ bool stack_overflow_
// ----------------------------------------------------------------------------
@@ -2520,9 +2822,10 @@ class AstVisitor BASE_EMBEDDED {
class AstConstructionVisitor BASE_EMBEDDED {
public:
- AstConstructionVisitor() { }
+ AstConstructionVisitor() : dont_optimize_reason_(kNoReason) { }
AstProperties* ast_properties() { return &properties_; }
+ BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
private:
template<class> friend class AstNodeFactory;
@@ -2535,8 +2838,12 @@ class AstConstructionVisitor BASE_EMBEDDED {
void increase_node_count() { properties_.add_node_count(1); }
void add_flag(AstPropertiesFlag flag) { properties_.flags()->Add(flag); }
+ void set_dont_optimize_reason(BailoutReason reason) {
+ dont_optimize_reason_ = reason;
+ }
AstProperties properties_;
+ BailoutReason dont_optimize_reason_;
};
@@ -2555,7 +2862,7 @@ class AstNullVisitor BASE_EMBEDDED {
// AstNode factory
template<class Visitor>
-class AstNodeFactory BASE_EMBEDDED {
+class AstNodeFactory V8_FINAL BASE_EMBEDDED {
public:
AstNodeFactory(Isolate* isolate, Zone* zone)
: isolate_(isolate),
@@ -2569,115 +2876,147 @@ class AstNodeFactory BASE_EMBEDDED {
VariableDeclaration* NewVariableDeclaration(VariableProxy* proxy,
VariableMode mode,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
VariableDeclaration* decl =
- new(zone_) VariableDeclaration(proxy, mode, scope);
+ new(zone_) VariableDeclaration(proxy, mode, scope, pos);
VISIT_AND_RETURN(VariableDeclaration, decl)
}
FunctionDeclaration* NewFunctionDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* fun,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
FunctionDeclaration* decl =
- new(zone_) FunctionDeclaration(proxy, mode, fun, scope);
+ new(zone_) FunctionDeclaration(proxy, mode, fun, scope, pos);
VISIT_AND_RETURN(FunctionDeclaration, decl)
}
ModuleDeclaration* NewModuleDeclaration(VariableProxy* proxy,
Module* module,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
ModuleDeclaration* decl =
- new(zone_) ModuleDeclaration(proxy, module, scope);
+ new(zone_) ModuleDeclaration(proxy, module, scope, pos);
VISIT_AND_RETURN(ModuleDeclaration, decl)
}
ImportDeclaration* NewImportDeclaration(VariableProxy* proxy,
Module* module,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
ImportDeclaration* decl =
- new(zone_) ImportDeclaration(proxy, module, scope);
+ new(zone_) ImportDeclaration(proxy, module, scope, pos);
VISIT_AND_RETURN(ImportDeclaration, decl)
}
ExportDeclaration* NewExportDeclaration(VariableProxy* proxy,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
ExportDeclaration* decl =
- new(zone_) ExportDeclaration(proxy, scope);
+ new(zone_) ExportDeclaration(proxy, scope, pos);
VISIT_AND_RETURN(ExportDeclaration, decl)
}
- ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface) {
- ModuleLiteral* module = new(zone_) ModuleLiteral(body, interface);
+ ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface, int pos) {
+ ModuleLiteral* module = new(zone_) ModuleLiteral(body, interface, pos);
VISIT_AND_RETURN(ModuleLiteral, module)
}
- ModuleVariable* NewModuleVariable(VariableProxy* proxy) {
- ModuleVariable* module = new(zone_) ModuleVariable(proxy);
+ ModuleVariable* NewModuleVariable(VariableProxy* proxy, int pos) {
+ ModuleVariable* module = new(zone_) ModuleVariable(proxy, pos);
VISIT_AND_RETURN(ModuleVariable, module)
}
- ModulePath* NewModulePath(Module* origin, Handle<String> name) {
- ModulePath* module = new(zone_) ModulePath(origin, name, zone_);
+ ModulePath* NewModulePath(Module* origin, Handle<String> name, int pos) {
+ ModulePath* module = new(zone_) ModulePath(origin, name, zone_, pos);
VISIT_AND_RETURN(ModulePath, module)
}
- ModuleUrl* NewModuleUrl(Handle<String> url) {
- ModuleUrl* module = new(zone_) ModuleUrl(url, zone_);
+ ModuleUrl* NewModuleUrl(Handle<String> url, int pos) {
+ ModuleUrl* module = new(zone_) ModuleUrl(url, zone_, pos);
VISIT_AND_RETURN(ModuleUrl, module)
}
Block* NewBlock(ZoneStringList* labels,
int capacity,
- bool is_initializer_block) {
+ bool is_initializer_block,
+ int pos) {
Block* block = new(zone_) Block(
- isolate_, labels, capacity, is_initializer_block, zone_);
+ isolate_, labels, capacity, is_initializer_block, pos, zone_);
VISIT_AND_RETURN(Block, block)
}
#define STATEMENT_WITH_LABELS(NodeType) \
- NodeType* New##NodeType(ZoneStringList* labels) { \
- NodeType* stmt = new(zone_) NodeType(isolate_, labels); \
+ NodeType* New##NodeType(ZoneStringList* labels, int pos) { \
+ NodeType* stmt = new(zone_) NodeType(isolate_, labels, pos); \
VISIT_AND_RETURN(NodeType, stmt); \
}
STATEMENT_WITH_LABELS(DoWhileStatement)
STATEMENT_WITH_LABELS(WhileStatement)
STATEMENT_WITH_LABELS(ForStatement)
- STATEMENT_WITH_LABELS(ForInStatement)
STATEMENT_WITH_LABELS(SwitchStatement)
#undef STATEMENT_WITH_LABELS
- ExpressionStatement* NewExpressionStatement(Expression* expression) {
- ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression);
+ ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode,
+ ZoneStringList* labels,
+ int pos) {
+ switch (visit_mode) {
+ case ForEachStatement::ENUMERATE: {
+ ForInStatement* stmt = new(zone_) ForInStatement(isolate_, labels, pos);
+ VISIT_AND_RETURN(ForInStatement, stmt);
+ }
+ case ForEachStatement::ITERATE: {
+ ForOfStatement* stmt = new(zone_) ForOfStatement(isolate_, labels, pos);
+ VISIT_AND_RETURN(ForOfStatement, stmt);
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+ }
+
+ ModuleStatement* NewModuleStatement(
+ VariableProxy* proxy, Block* body, int pos) {
+ ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body, pos);
+ VISIT_AND_RETURN(ModuleStatement, stmt)
+ }
+
+ ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) {
+ ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression, pos);
VISIT_AND_RETURN(ExpressionStatement, stmt)
}
- ContinueStatement* NewContinueStatement(IterationStatement* target) {
- ContinueStatement* stmt = new(zone_) ContinueStatement(target);
+ ContinueStatement* NewContinueStatement(IterationStatement* target, int pos) {
+ ContinueStatement* stmt = new(zone_) ContinueStatement(target, pos);
VISIT_AND_RETURN(ContinueStatement, stmt)
}
- BreakStatement* NewBreakStatement(BreakableStatement* target) {
- BreakStatement* stmt = new(zone_) BreakStatement(target);
+ BreakStatement* NewBreakStatement(BreakableStatement* target, int pos) {
+ BreakStatement* stmt = new(zone_) BreakStatement(target, pos);
VISIT_AND_RETURN(BreakStatement, stmt)
}
- ReturnStatement* NewReturnStatement(Expression* expression) {
- ReturnStatement* stmt = new(zone_) ReturnStatement(expression);
+ ReturnStatement* NewReturnStatement(Expression* expression, int pos) {
+ ReturnStatement* stmt = new(zone_) ReturnStatement(expression, pos);
VISIT_AND_RETURN(ReturnStatement, stmt)
}
- WithStatement* NewWithStatement(Expression* expression,
- Statement* statement) {
- WithStatement* stmt = new(zone_) WithStatement(expression, statement);
+ WithStatement* NewWithStatement(Scope* scope,
+ Expression* expression,
+ Statement* statement,
+ int pos) {
+ WithStatement* stmt = new(zone_) WithStatement(
+ scope, expression, statement, pos);
VISIT_AND_RETURN(WithStatement, stmt)
}
IfStatement* NewIfStatement(Expression* condition,
Statement* then_statement,
- Statement* else_statement) {
+ Statement* else_statement,
+ int pos) {
IfStatement* stmt = new(zone_) IfStatement(
- isolate_, condition, then_statement, else_statement);
+ isolate_, condition, then_statement, else_statement, pos);
VISIT_AND_RETURN(IfStatement, stmt)
}
@@ -2685,36 +3024,45 @@ class AstNodeFactory BASE_EMBEDDED {
Block* try_block,
Scope* scope,
Variable* variable,
- Block* catch_block) {
+ Block* catch_block,
+ int pos) {
TryCatchStatement* stmt = new(zone_) TryCatchStatement(
- index, try_block, scope, variable, catch_block);
+ index, try_block, scope, variable, catch_block, pos);
VISIT_AND_RETURN(TryCatchStatement, stmt)
}
TryFinallyStatement* NewTryFinallyStatement(int index,
Block* try_block,
- Block* finally_block) {
+ Block* finally_block,
+ int pos) {
TryFinallyStatement* stmt =
- new(zone_) TryFinallyStatement(index, try_block, finally_block);
+ new(zone_) TryFinallyStatement(index, try_block, finally_block, pos);
VISIT_AND_RETURN(TryFinallyStatement, stmt)
}
- DebuggerStatement* NewDebuggerStatement() {
- DebuggerStatement* stmt = new(zone_) DebuggerStatement();
+ DebuggerStatement* NewDebuggerStatement(int pos) {
+ DebuggerStatement* stmt = new(zone_) DebuggerStatement(pos);
VISIT_AND_RETURN(DebuggerStatement, stmt)
}
- EmptyStatement* NewEmptyStatement() {
- return new(zone_) EmptyStatement();
+ EmptyStatement* NewEmptyStatement(int pos) {
+ return new(zone_) EmptyStatement(pos);
}
- Literal* NewLiteral(Handle<Object> handle) {
- Literal* lit = new(zone_) Literal(isolate_, handle);
+ CaseClause* NewCaseClause(
+ Expression* label, ZoneList<Statement*>* statements, int pos) {
+ CaseClause* clause =
+ new(zone_) CaseClause(isolate_, label, statements, pos);
+ VISIT_AND_RETURN(CaseClause, clause)
+ }
+
+ Literal* NewLiteral(Handle<Object> handle, int pos) {
+ Literal* lit = new(zone_) Literal(isolate_, handle, pos);
VISIT_AND_RETURN(Literal, lit)
}
- Literal* NewNumberLiteral(double number) {
- return NewLiteral(isolate_->factory()->NewNumber(number, TENURED));
+ Literal* NewNumberLiteral(double number, int pos) {
+ return NewLiteral(isolate_->factory()->NewNumber(number, TENURED), pos);
}
ObjectLiteral* NewObjectLiteral(
@@ -2724,26 +3072,30 @@ class AstNodeFactory BASE_EMBEDDED {
bool is_simple,
bool fast_elements,
int depth,
- bool has_function) {
+ bool may_store_doubles,
+ bool has_function,
+ int pos) {
ObjectLiteral* lit = new(zone_) ObjectLiteral(
isolate_, constant_properties, properties, literal_index,
- is_simple, fast_elements, depth, has_function);
+ is_simple, fast_elements, depth, may_store_doubles, has_function, pos);
VISIT_AND_RETURN(ObjectLiteral, lit)
}
ObjectLiteral::Property* NewObjectLiteralProperty(bool is_getter,
- FunctionLiteral* value) {
+ FunctionLiteral* value,
+ int pos) {
ObjectLiteral::Property* prop =
new(zone_) ObjectLiteral::Property(is_getter, value);
- prop->set_key(NewLiteral(value->name()));
+ prop->set_key(NewLiteral(value->name(), pos));
return prop; // Not an AST node, will not be visited.
}
RegExpLiteral* NewRegExpLiteral(Handle<String> pattern,
Handle<String> flags,
- int literal_index) {
+ int literal_index,
+ int pos) {
RegExpLiteral* lit =
- new(zone_) RegExpLiteral(isolate_, pattern, flags, literal_index);
+ new(zone_) RegExpLiteral(isolate_, pattern, flags, literal_index, pos);
VISIT_AND_RETURN(RegExpLiteral, lit);
}
@@ -2751,14 +3103,17 @@ class AstNodeFactory BASE_EMBEDDED {
ZoneList<Expression*>* values,
int literal_index,
bool is_simple,
- int depth) {
+ int depth,
+ int pos) {
ArrayLiteral* lit = new(zone_) ArrayLiteral(
- isolate_, constant_elements, values, literal_index, is_simple, depth);
+ isolate_, constant_elements, values, literal_index, is_simple,
+ depth, pos);
VISIT_AND_RETURN(ArrayLiteral, lit)
}
- VariableProxy* NewVariableProxy(Variable* var) {
- VariableProxy* proxy = new(zone_) VariableProxy(isolate_, var);
+ VariableProxy* NewVariableProxy(Variable* var,
+ int pos = RelocInfo::kNoPosition) {
+ VariableProxy* proxy = new(zone_) VariableProxy(isolate_, var, pos);
VISIT_AND_RETURN(VariableProxy, proxy)
}
@@ -2792,9 +3147,10 @@ class AstNodeFactory BASE_EMBEDDED {
CallRuntime* NewCallRuntime(Handle<String> name,
const Runtime::Function* function,
- ZoneList<Expression*>* arguments) {
+ ZoneList<Expression*>* arguments,
+ int pos) {
CallRuntime* call =
- new(zone_) CallRuntime(isolate_, name, function, arguments);
+ new(zone_) CallRuntime(isolate_, name, function, arguments, pos);
VISIT_AND_RETURN(CallRuntime, call)
}
@@ -2836,11 +3192,9 @@ class AstNodeFactory BASE_EMBEDDED {
Conditional* NewConditional(Expression* condition,
Expression* then_expression,
Expression* else_expression,
- int then_expression_position,
- int else_expression_position) {
+ int position) {
Conditional* cond = new(zone_) Conditional(
- isolate_, condition, then_expression, else_expression,
- then_expression_position, else_expression_position);
+ isolate_, condition, then_expression, else_expression, position);
VISIT_AND_RETURN(Conditional, cond)
}
@@ -2854,6 +3208,15 @@ class AstNodeFactory BASE_EMBEDDED {
VISIT_AND_RETURN(Assignment, assign)
}
+ Yield* NewYield(Expression *generator_object,
+ Expression* expression,
+ Yield::Kind yield_kind,
+ int pos) {
+ Yield* yield = new(zone_) Yield(
+ isolate_, generator_object, expression, yield_kind, pos);
+ VISIT_AND_RETURN(Yield, yield)
+ }
+
Throw* NewThrow(Expression* exception, int pos) {
Throw* t = new(zone_) Throw(isolate_, exception, pos);
VISIT_AND_RETURN(Throw, t)
@@ -2866,19 +3229,18 @@ class AstNodeFactory BASE_EMBEDDED {
int materialized_literal_count,
int expected_property_count,
int handler_count,
- bool has_only_simple_this_property_assignments,
- Handle<FixedArray> this_property_assignments,
int parameter_count,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
- FunctionLiteral::Type type,
+ FunctionLiteral::FunctionType function_type,
FunctionLiteral::IsFunctionFlag is_function,
- FunctionLiteral::IsParenthesizedFlag is_parenthesized) {
+ FunctionLiteral::IsParenthesizedFlag is_parenthesized,
+ FunctionLiteral::IsGeneratorFlag is_generator,
+ int position) {
FunctionLiteral* lit = new(zone_) FunctionLiteral(
isolate_, name, scope, body,
materialized_literal_count, expected_property_count, handler_count,
- has_only_simple_this_property_assignments, this_property_assignments,
- parameter_count, type, has_duplicate_parameters, is_function,
- is_parenthesized);
+ parameter_count, function_type, has_duplicate_parameters, is_function,
+ is_parenthesized, is_generator, position);
// Top-level literal doesn't count for the AST's properties.
if (is_function == FunctionLiteral::kIsFunction) {
visitor_.VisitFunctionLiteral(lit);
@@ -2886,15 +3248,15 @@ class AstNodeFactory BASE_EMBEDDED {
return lit;
}
- SharedFunctionInfoLiteral* NewSharedFunctionInfoLiteral(
- Handle<SharedFunctionInfo> shared_function_info) {
- SharedFunctionInfoLiteral* lit =
- new(zone_) SharedFunctionInfoLiteral(isolate_, shared_function_info);
- VISIT_AND_RETURN(SharedFunctionInfoLiteral, lit)
+ NativeFunctionLiteral* NewNativeFunctionLiteral(
+ Handle<String> name, v8::Extension* extension, int pos) {
+ NativeFunctionLiteral* lit =
+ new(zone_) NativeFunctionLiteral(isolate_, name, extension, pos);
+ VISIT_AND_RETURN(NativeFunctionLiteral, lit)
}
- ThisFunction* NewThisFunction() {
- ThisFunction* fun = new(zone_) ThisFunction(isolate_);
+ ThisFunction* NewThisFunction(int pos) {
+ ThisFunction* fun = new(zone_) ThisFunction(isolate_, pos);
VISIT_AND_RETURN(ThisFunction, fun)
}
diff --git a/deps/v8/src/atomicops.h b/deps/v8/src/atomicops.h
index 1f0c44a67..789721edf 100644
--- a/deps/v8/src/atomicops.h
+++ b/deps/v8/src/atomicops.h
@@ -58,7 +58,7 @@ typedef int32_t Atomic32;
#ifdef V8_HOST_ARCH_64_BIT
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
// means Atomic64 and AtomicWord should be the same type on 64-bit.
-#if defined(__APPLE__)
+#if defined(__ILP32__) || defined(__APPLE__)
// MacOS is an exception to the implicit conversion rule above,
// because it uses long for intptr_t.
typedef int64_t Atomic64;
@@ -151,18 +151,17 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
} } // namespace v8::internal
// Include our platform specific implementation.
-#if defined(_MSC_VER) && \
- (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
+#if defined(THREAD_SANITIZER)
+#include "atomicops_internals_tsan.h"
+#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_msvc.h"
-#elif defined(__APPLE__) && \
- (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
+#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_macosx.h"
-#elif defined(__GNUC__) && \
- (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
+#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_gcc.h"
-#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
+#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "atomicops_internals_arm_gcc.h"
-#elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
+#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
#include "atomicops_internals_mips_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
diff --git a/deps/v8/src/atomicops_internals_mips_gcc.h b/deps/v8/src/atomicops_internals_mips_gcc.h
index 9498fd76e..cb8f8b9d9 100644
--- a/deps/v8/src/atomicops_internals_mips_gcc.h
+++ b/deps/v8/src/atomicops_internals_mips_gcc.h
@@ -30,8 +30,6 @@
#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
namespace v8 {
namespace internal {
@@ -111,9 +109,9 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
- ATOMICOPS_COMPILER_BARRIER();
+ MemoryBarrier();
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
- ATOMICOPS_COMPILER_BARRIER();
+ MemoryBarrier();
return res;
}
@@ -126,19 +124,16 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- ATOMICOPS_COMPILER_BARRIER();
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- ATOMICOPS_COMPILER_BARRIER();
+ MemoryBarrier();
return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- ATOMICOPS_COMPILER_BARRIER();
- Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- ATOMICOPS_COMPILER_BARRIER();
- return res;
+ MemoryBarrier();
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
@@ -176,6 +171,4 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
} } // namespace v8::internal
-#undef ATOMICOPS_COMPILER_BARRIER
-
#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/deps/v8/src/atomicops_internals_tsan.h b/deps/v8/src/atomicops_internals_tsan.h
new file mode 100644
index 000000000..b5162bad9
--- /dev/null
+++ b/deps/v8/src/atomicops_internals_tsan.h
@@ -0,0 +1,400 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// This file is an internal atomic implementation for compiler-based
+// ThreadSanitizer. Use base/atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
+#define V8_ATOMICOPS_INTERNALS_TSAN_H_
+
+namespace v8 {
+namespace internal {
+
+#ifndef TSAN_INTERFACE_ATOMIC_H
+#define TSAN_INTERFACE_ATOMIC_H
+
+// This struct is not part of the public API of this module; clients may not
+// use it. (However, it's exported via BASE_EXPORT because clients implicitly
+// do use it at link time by inlining these functions.)
+// Features of this x86. Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+ bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
+ // after acquire compare-and-swap.
+ bool has_sse2; // Processor has SSE2.
+};
+extern struct AtomicOps_x86CPUFeatureStruct
+ AtomicOps_Internalx86CPUFeatures;
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef char __tsan_atomic8;
+typedef short __tsan_atomic16; // NOLINT
+typedef int __tsan_atomic32;
+typedef long __tsan_atomic64; // NOLINT
+
+#if defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)
+typedef __int128 __tsan_atomic128;
+#define __TSAN_HAS_INT128 1
+#else
+typedef char __tsan_atomic128;
+#define __TSAN_HAS_INT128 0
+#endif
+
+typedef enum {
+ __tsan_memory_order_relaxed,
+ __tsan_memory_order_consume,
+ __tsan_memory_order_acquire,
+ __tsan_memory_order_release,
+ __tsan_memory_order_acq_rel,
+ __tsan_memory_order_seq_cst,
+} __tsan_memory_order;
+
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
+ __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
+ __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
+ __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
+ __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
+ __tsan_memory_order mo);
+
+void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
+ __tsan_memory_order mo);
+void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
+ __tsan_memory_order mo);
+void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
+ __tsan_memory_order mo);
+void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
+ __tsan_memory_order mo);
+void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
+ __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+
+__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
+ volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
+ volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
+ volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+
+void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+void __tsan_atomic_signal_fence(__tsan_memory_order mo);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
+ return cmp;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+ Atomic32 increment) {
+ return increment + __tsan_atomic32_fetch_add(ptr, increment,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
+ Atomic32 increment) {
+ return increment + __tsan_atomic32_fetch_add(ptr, increment,
+ __tsan_memory_order_acq_rel);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_acquire, __tsan_memory_order_acquire);
+ return cmp;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_release, __tsan_memory_order_relaxed);
+ return cmp;
+}
+
+inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
+ return cmp;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+ Atomic64 increment) {
+ return increment + __tsan_atomic64_fetch_add(ptr, increment,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+ Atomic64 increment) {
+ return increment + __tsan_atomic64_fetch_add(ptr, increment,
+ __tsan_memory_order_acq_rel);
+}
+
+inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_acquire, __tsan_memory_order_acquire);
+ return cmp;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_release, __tsan_memory_order_relaxed);
+ return cmp;
+}
+
+inline void MemoryBarrier() {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+} // namespace internal
+} // namespace v8
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif // V8_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/deps/v8/src/atomicops_internals_x86_gcc.cc b/deps/v8/src/atomicops_internals_x86_gcc.cc
index 181c20247..950b423f4 100644
--- a/deps/v8/src/atomicops_internals_x86_gcc.cc
+++ b/deps/v8/src/atomicops_internals_x86_gcc.cc
@@ -31,6 +31,7 @@
#include <string.h>
#include "atomicops.h"
+#include "platform.h"
// This file only makes sense with atomicops_internals_x86_gcc.h -- it
// depends on structs that are defined in that file. If atomicops.h
@@ -84,9 +85,9 @@ void AtomicOps_Internalx86CPUFeaturesInit() {
// Get vendor string (issue CPUID with eax = 0)
cpuid(eax, ebx, ecx, edx, 0);
char vendor[13];
- memcpy(vendor, &ebx, 4);
- memcpy(vendor + 4, &edx, 4);
- memcpy(vendor + 8, &ecx, 4);
+ v8::internal::OS::MemCopy(vendor, &ebx, 4);
+ v8::internal::OS::MemCopy(vendor + 4, &edx, 4);
+ v8::internal::OS::MemCopy(vendor + 8, &ecx, 4);
vendor[12] = 0;
// get feature flags in ecx/edx, and family/model in eax
@@ -123,6 +124,7 @@ class AtomicOpsx86Initializer {
}
};
+
// A global to get use initialized on startup via static initialization :/
AtomicOpsx86Initializer g_initer;
diff --git a/deps/v8/src/atomicops_internals_x86_gcc.h b/deps/v8/src/atomicops_internals_x86_gcc.h
index 6e55b5018..e58d598fb 100644
--- a/deps/v8/src/atomicops_internals_x86_gcc.h
+++ b/deps/v8/src/atomicops_internals_x86_gcc.h
@@ -168,7 +168,7 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return *ptr;
}
-#if defined(__x86_64__)
+#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT)
// 64-bit low-level operations on 64-bit platform.
diff --git a/deps/v8/src/bignum-dtoa.cc b/deps/v8/src/bignum-dtoa.cc
index a9616909d..c5ad4420c 100644
--- a/deps/v8/src/bignum-dtoa.cc
+++ b/deps/v8/src/bignum-dtoa.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <math.h>
+#include <cmath>
#include "../include/v8stdint.h"
#include "checks.h"
diff --git a/deps/v8/src/bignum.cc b/deps/v8/src/bignum.cc
index 9436322ed..af0edde6d 100644
--- a/deps/v8/src/bignum.cc
+++ b/deps/v8/src/bignum.cc
@@ -45,6 +45,7 @@ static int BitSize(S value) {
return 8 * sizeof(value);
}
+
// Guaranteed to lie in one Bigit.
void Bignum::AssignUInt16(uint16_t value) {
ASSERT(kBigitSize >= BitSize(value));
@@ -735,6 +736,13 @@ void Bignum::BigitsShiftLeft(int shift_amount) {
void Bignum::SubtractTimes(const Bignum& other, int factor) {
+#ifdef DEBUG
+ Bignum a, b;
+ a.AssignBignum(*this);
+ b.AssignBignum(other);
+ b.MultiplyByUInt32(factor);
+ a.SubtractBignum(b);
+#endif
ASSERT(exponent_ <= other.exponent_);
if (factor < 3) {
for (int i = 0; i < factor; ++i) {
@@ -758,9 +766,9 @@ void Bignum::SubtractTimes(const Bignum& other, int factor) {
Chunk difference = bigits_[i] - borrow;
bigits_[i] = difference & kBigitMask;
borrow = difference >> (kChunkSize - 1);
- ++i;
}
Clamp();
+ ASSERT(Bignum::Equal(a, *this));
}
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index a368eefe7..234a2118b 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -43,6 +43,7 @@
#include "extensions/externalize-string-extension.h"
#include "extensions/gc-extension.h"
#include "extensions/statistics-extension.h"
+#include "code-stubs.h"
namespace v8 {
namespace internal {
@@ -63,8 +64,9 @@ NativesExternalStringResource::NativesExternalStringResource(
}
-Bootstrapper::Bootstrapper()
- : nesting_(0),
+Bootstrapper::Bootstrapper(Isolate* isolate)
+ : isolate_(isolate),
+ nesting_(0),
extensions_cache_(Script::TYPE_EXTENSION),
delete_these_non_arrays_on_tear_down_(NULL),
delete_these_arrays_on_tear_down_(NULL) {
@@ -73,9 +75,7 @@ Bootstrapper::Bootstrapper()
Handle<String> Bootstrapper::NativesSourceLookup(int index) {
ASSERT(0 <= index && index < Natives::GetBuiltinsCount());
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
+ Heap* heap = isolate_->heap();
if (heap->natives_source_cache()->get(index)->IsUndefined()) {
// We can use external strings for the natives.
Vector<const char> source = Natives::GetRawScriptSource(index);
@@ -84,16 +84,21 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
source.start(),
source.length());
Handle<String> source_code =
- factory->NewExternalStringFromAscii(resource);
+ isolate_->factory()->NewExternalStringFromAscii(resource);
heap->natives_source_cache()->set(index, *source_code);
}
- Handle<Object> cached_source(heap->natives_source_cache()->get(index));
+ Handle<Object> cached_source(heap->natives_source_cache()->get(index),
+ isolate_);
return Handle<String>::cast(cached_source);
}
void Bootstrapper::Initialize(bool create_heap_objects) {
- extensions_cache_.Initialize(create_heap_objects);
+ extensions_cache_.Initialize(isolate_, create_heap_objects);
+}
+
+
+void Bootstrapper::InitializeOncePerProcess() {
GCExtension::Register();
ExternalizeStringExtension::Register();
StatisticsExtension::Register();
@@ -135,7 +140,7 @@ void Bootstrapper::TearDown() {
delete_these_arrays_on_tear_down_ = NULL;
}
- extensions_cache_.Initialize(false); // Yes, symmetrical
+ extensions_cache_.Initialize(isolate_, false); // Yes, symmetrical
}
@@ -147,23 +152,13 @@ class Genesis BASE_EMBEDDED {
v8::ExtensionConfiguration* extensions);
~Genesis() { }
- Handle<Context> result() { return result_; }
-
- Genesis* previous() { return previous_; }
-
Isolate* isolate() const { return isolate_; }
Factory* factory() const { return isolate_->factory(); }
Heap* heap() const { return isolate_->heap(); }
- private:
- Handle<Context> native_context_;
- Isolate* isolate_;
-
- // There may be more than one active genesis object: When GC is
- // triggered during environment creation there may be weak handle
- // processing callbacks which may create new environments.
- Genesis* previous_;
+ Handle<Context> result() { return result_; }
+ private:
Handle<Context> native_context() { return native_context_; }
// Creates some basic objects. Used for creating a context from scratch.
@@ -198,14 +193,20 @@ class Genesis BASE_EMBEDDED {
// detached from the other objects in the snapshot.
void HookUpInnerGlobal(Handle<GlobalObject> inner_global);
// New context initialization. Used for creating a context from scratch.
- bool InitializeGlobal(Handle<GlobalObject> inner_global,
+ void InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function);
void InitializeExperimentalGlobal();
// Installs the contents of the native .js files on the global objects.
// Used for creating a context from scratch.
void InstallNativeFunctions();
void InstallExperimentalNativeFunctions();
+ Handle<JSFunction> InstallInternalArray(Handle<JSBuiltinsObject> builtins,
+ const char* name,
+ ElementsKind elements_kind);
bool InstallNatives();
+
+ Handle<JSFunction> InstallTypedArray(const char* name,
+ ElementsKind elementsKind);
bool InstallExperimentalNatives();
void InstallBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
@@ -230,9 +231,11 @@ class Genesis BASE_EMBEDDED {
// provided.
static bool InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions);
- static bool InstallExtension(const char* name,
+ static bool InstallExtension(Isolate* isolate,
+ const char* name,
ExtensionStates* extension_states);
- static bool InstallExtension(v8::RegisteredExtension* current,
+ static bool InstallExtension(Isolate* isolate,
+ v8::RegisteredExtension* current,
ExtensionStates* extension_states);
static void InstallSpecialObjects(Handle<Context> native_context);
bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
@@ -268,22 +271,27 @@ class Genesis BASE_EMBEDDED {
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
- static bool CompileNative(Vector<const char> name, Handle<String> source);
- static bool CompileScriptCached(Vector<const char> name,
+ static bool CompileNative(Isolate* isolate,
+ Vector<const char> name,
+ Handle<String> source);
+ static bool CompileScriptCached(Isolate* isolate,
+ Vector<const char> name,
Handle<String> source,
SourceCodeCache* cache,
v8::Extension* extension,
Handle<Context> top_context,
bool use_runtime_context);
+ Isolate* isolate_;
Handle<Context> result_;
+ Handle<Context> native_context_;
- // Function instance maps. Function literal maps are created initially with
- // a read only prototype for the processing of JS builtins. Later the function
- // instance maps are replaced in order to make prototype writable.
- // These are the final, writable prototype, maps.
- Handle<Map> function_instance_map_writable_prototype_;
- Handle<Map> strict_mode_function_instance_map_writable_prototype_;
+ // Function maps. Function maps are created initially with a read only
+ // prototype for the processing of JS builtins. Later the function maps are
+ // replaced in order to make prototype writable. These are the final, writable
+ // prototype, maps.
+ Handle<Map> function_map_writable_prototype_;
+ Handle<Map> strict_mode_function_map_writable_prototype_;
Handle<JSFunction> throw_type_error_function;
BootstrapperActive active_;
@@ -298,20 +306,16 @@ void Bootstrapper::Iterate(ObjectVisitor* v) {
Handle<Context> Bootstrapper::CreateEnvironment(
- Isolate* isolate,
Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions) {
- HandleScope scope;
- Handle<Context> env;
- Genesis genesis(isolate, global_object, global_template, extensions);
- env = genesis.result();
- if (!env.is_null()) {
- if (InstallExtensions(env, extensions)) {
- return env;
- }
+ HandleScope scope(isolate_);
+ Genesis genesis(isolate_, global_object, global_template, extensions);
+ Handle<Context> env = genesis.result();
+ if (env.is_null() || !InstallExtensions(env, extensions)) {
+ return Handle<Context>();
}
- return Handle<Context>();
+ return scope.CloseAndEscape(env);
}
@@ -350,19 +354,20 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
int instance_size,
Handle<JSObject> prototype,
Builtins::Name call,
- bool is_ecma_native) {
+ bool install_initial_map,
+ bool set_instance_class_name) {
Isolate* isolate = target->GetIsolate();
Factory* factory = isolate->factory();
- Handle<String> symbol = factory->LookupAsciiSymbol(name);
+ Handle<String> internalized_name = factory->InternalizeUtf8String(name);
Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call));
Handle<JSFunction> function = prototype.is_null() ?
- factory->NewFunctionWithoutPrototype(symbol, call_code) :
- factory->NewFunctionWithPrototype(symbol,
+ factory->NewFunctionWithoutPrototype(internalized_name, call_code) :
+ factory->NewFunctionWithPrototype(internalized_name,
type,
instance_size,
prototype,
call_code,
- is_ecma_native);
+ install_initial_map);
PropertyAttributes attributes;
if (target->IsJSBuiltinsObject()) {
attributes =
@@ -372,9 +377,9 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
}
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
- target, symbol, function, attributes));
- if (is_ecma_native) {
- function->shared()->set_instance_class_name(*symbol);
+ target, internalized_name, function, attributes));
+ if (set_instance_class_name) {
+ function->shared()->set_instance_class_name(*internalized_name);
}
function->shared()->set_native(true);
return function;
@@ -400,19 +405,19 @@ void Genesis::SetFunctionInstanceDescriptor(
map->set_instance_descriptors(*descriptors);
{ // Add length.
- CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs);
+ CallbacksDescriptor d(*factory()->length_string(), *length, attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add name.
- CallbacksDescriptor d(*factory()->name_symbol(), *name, attribs);
+ CallbacksDescriptor d(*factory()->name_string(), *name, attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add arguments.
- CallbacksDescriptor d(*factory()->arguments_symbol(), *args, attribs);
+ CallbacksDescriptor d(*factory()->arguments_string(), *args, attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add caller.
- CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs);
+ CallbacksDescriptor d(*factory()->caller_string(), *caller, attribs);
map->AppendDescriptor(&d, witness);
}
if (prototypeMode != DONT_ADD_PROTOTYPE) {
@@ -420,7 +425,7 @@ void Genesis::SetFunctionInstanceDescriptor(
if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY);
}
- CallbacksDescriptor d(*factory()->prototype_symbol(), *prototype, attribs);
+ CallbacksDescriptor d(*factory()->prototype_string(), *prototype, attribs);
map->AppendDescriptor(&d, witness);
}
}
@@ -438,12 +443,6 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Allocate the map for function instances. Maps are allocated first and their
// prototypes patched later, once empty function is created.
- // Please note that the prototype property for function instances must be
- // writable.
- Handle<Map> function_instance_map =
- CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
- native_context()->set_function_instance_map(*function_instance_map);
-
// Functions with this map will not have a 'prototype' property, and
// can not be used as constructors.
Handle<Map> function_without_prototype_map =
@@ -459,13 +458,11 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// The final map for functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
- function_instance_map_writable_prototype_ =
- CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
+ function_map_writable_prototype_ = CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
- Handle<String> object_name = Handle<String>(heap->Object_symbol());
+ Handle<String> object_name = factory->Object_string();
{ // --- O b j e c t ---
Handle<JSFunction> object_fun =
@@ -483,14 +480,19 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
TENURED);
native_context()->set_initial_object_prototype(*prototype);
- SetPrototype(object_fun, prototype);
+ // For bootstrapping set the array prototype to be the same as the object
+ // prototype, otherwise the missing initial_array_prototype will cause
+ // assertions during startup.
+ native_context()->set_initial_array_prototype(*prototype);
+ Accessors::FunctionSetPrototype(object_fun, prototype);
}
// Allocate the empty function as the prototype for function ECMAScript
// 262 15.3.4.
- Handle<String> symbol = factory->LookupAsciiSymbol("Empty");
+ Handle<String> empty_string =
+ factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("Empty"));
Handle<JSFunction> empty_function =
- factory->NewFunctionWithoutPrototype(symbol, CLASSIC_MODE);
+ factory->NewFunctionWithoutPrototype(empty_string, CLASSIC_MODE);
// --- E m p t y ---
Handle<Code> code =
@@ -498,7 +500,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
Builtins::kEmptyFunction));
empty_function->set_code(*code);
empty_function->shared()->set_code(*code);
- Handle<String> source = factory->NewStringFromAscii(CStrVector("() {}"));
+ Handle<String> source =
+ factory->NewStringFromOneByte(STATIC_ASCII_VECTOR("() {}"));
Handle<Script> script = factory->NewScript(source);
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
empty_function->shared()->set_script(*script);
@@ -508,10 +511,9 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Set prototypes for the function maps.
native_context()->function_map()->set_prototype(*empty_function);
- native_context()->function_instance_map()->set_prototype(*empty_function);
native_context()->function_without_prototype_map()->
set_prototype(*empty_function);
- function_instance_map_writable_prototype_->set_prototype(*empty_function);
+ function_map_writable_prototype_->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE);
@@ -536,32 +538,34 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
if (prototypeMode != DONT_ADD_PROTOTYPE) {
prototype = factory()->NewForeign(&Accessors::FunctionPrototype);
}
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE);
+ PropertyAttributes rw_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ PropertyAttributes ro_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
map->set_instance_descriptors(*descriptors);
{ // Add length.
- CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs);
+ CallbacksDescriptor d(*factory()->length_string(), *length, ro_attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add name.
- CallbacksDescriptor d(*factory()->name_symbol(), *name, attribs);
+ CallbacksDescriptor d(*factory()->name_string(), *name, rw_attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add arguments.
- CallbacksDescriptor d(*factory()->arguments_symbol(), *arguments, attribs);
+ CallbacksDescriptor d(*factory()->arguments_string(), *arguments,
+ rw_attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add caller.
- CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs);
+ CallbacksDescriptor d(*factory()->caller_string(), *caller, rw_attribs);
map->AppendDescriptor(&d, witness);
}
if (prototypeMode != DONT_ADD_PROTOTYPE) {
// Add prototype.
- if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) {
- attribs = static_cast<PropertyAttributes>(attribs | READ_ONLY);
- }
- CallbacksDescriptor d(*factory()->prototype_symbol(), *prototype, attribs);
+ PropertyAttributes attribs =
+ prototypeMode == ADD_WRITEABLE_PROTOTYPE ? rw_attribs : ro_attribs;
+ CallbacksDescriptor d(*factory()->prototype_string(), *prototype, attribs);
map->AppendDescriptor(&d, witness);
}
}
@@ -570,7 +574,8 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
// ECMAScript 5th Edition, 13.2.3
Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
if (throw_type_error_function.is_null()) {
- Handle<String> name = factory()->LookupAsciiSymbol("ThrowTypeError");
+ Handle<String> name = factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("ThrowTypeError"));
throw_type_error_function =
factory()->NewFunctionWithoutPrototype(name, CLASSIC_MODE);
Handle<Code> code(isolate()->builtins()->builtin(
@@ -599,12 +604,6 @@ Handle<Map> Genesis::CreateStrictModeFunctionMap(
void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
- // Allocate map for the strict mode function instances.
- Handle<Map> strict_mode_function_instance_map =
- CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
- native_context()->set_strict_mode_function_instance_map(
- *strict_mode_function_instance_map);
-
// Allocate map for the prototype-less strict mode instances.
Handle<Map> strict_mode_function_without_prototype_map =
CreateStrictModeFunctionMap(DONT_ADD_PROTOTYPE, empty);
@@ -621,15 +620,13 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// The final map for the strict mode functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
- strict_mode_function_instance_map_writable_prototype_ =
+ strict_mode_function_map_writable_prototype_ =
CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
// Complete the callbacks.
- PoisonArgumentsAndCaller(strict_mode_function_instance_map);
PoisonArgumentsAndCaller(strict_mode_function_without_prototype_map);
PoisonArgumentsAndCaller(strict_mode_function_map);
- PoisonArgumentsAndCaller(
- strict_mode_function_instance_map_writable_prototype_);
+ PoisonArgumentsAndCaller(strict_mode_function_map_writable_prototype_);
}
@@ -645,8 +642,8 @@ static void SetAccessors(Handle<Map> map,
void Genesis::PoisonArgumentsAndCaller(Handle<Map> map) {
- SetAccessors(map, factory()->arguments_symbol(), GetThrowTypeErrorFunction());
- SetAccessors(map, factory()->caller_symbol(), GetThrowTypeErrorFunction());
+ SetAccessors(map, factory()->arguments_string(), GetThrowTypeErrorFunction());
+ SetAccessors(map, factory()->caller_string(), GetThrowTypeErrorFunction());
}
@@ -674,9 +671,8 @@ void Genesis::CreateRoots() {
// closure and extension object later (we need the empty function
// and the global object, but in order to create those, we need the
// native context).
- native_context_ = Handle<Context>::cast(isolate()->global_handles()->Create(
- *factory()->NewNativeContext()));
- AddToWeakNativeContextList(*native_context_);
+ native_context_ = factory()->NewNativeContext();
+ AddToWeakNativeContextList(*native_context());
isolate()->set_context(*native_context());
// Allocate the message listeners object.
@@ -713,7 +709,8 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
Handle<FunctionTemplateInfo> global_constructor =
Handle<FunctionTemplateInfo>(
FunctionTemplateInfo::cast(data->constructor()));
- Handle<Object> proto_template(global_constructor->prototype_template());
+ Handle<Object> proto_template(global_constructor->prototype_template(),
+ isolate());
if (!proto_template->IsUndefined()) {
js_global_template =
Handle<ObjectTemplateInfo>::cast(proto_template);
@@ -721,7 +718,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
}
if (js_global_template.is_null()) {
- Handle<String> name = Handle<String>(heap()->empty_symbol());
+ Handle<String> name = Handle<String>(heap()->empty_string());
Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
Builtins::kIllegal));
js_global_function =
@@ -734,7 +731,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
JSObject::cast(js_global_function->instance_prototype()));
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
- prototype, factory()->constructor_symbol(),
+ prototype, factory()->constructor_string(),
isolate()->object_function(), NONE));
} else {
Handle<FunctionTemplateInfo> js_global_constructor(
@@ -755,7 +752,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
// Step 2: create or re-initialize the global proxy object.
Handle<JSFunction> global_proxy_function;
if (global_template.IsEmpty()) {
- Handle<String> name = Handle<String>(heap()->empty_symbol());
+ Handle<String> name = Handle<String>(heap()->empty_string());
Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
Builtins::kIllegal));
global_proxy_function =
@@ -771,7 +768,8 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
factory()->OuterGlobalObject);
}
- Handle<String> global_name = factory()->LookupAsciiSymbol("global");
+ Handle<String> global_name = factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("global"));
global_proxy_function->shared()->set_instance_class_name(*global_name);
global_proxy_function->initial_map()->set_is_access_check_needed(true);
@@ -803,15 +801,16 @@ void Genesis::HookUpGlobalProxy(Handle<GlobalObject> inner_global,
void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
Handle<GlobalObject> inner_global_from_snapshot(
- GlobalObject::cast(native_context_->extension()));
- Handle<JSBuiltinsObject> builtins_global(native_context_->builtins());
- native_context_->set_extension(*inner_global);
- native_context_->set_global_object(*inner_global);
- native_context_->set_security_token(*inner_global);
+ GlobalObject::cast(native_context()->extension()));
+ Handle<JSBuiltinsObject> builtins_global(native_context()->builtins());
+ native_context()->set_extension(*inner_global);
+ native_context()->set_global_object(*inner_global);
+ native_context()->set_security_token(*inner_global);
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
ForceSetProperty(builtins_global,
- factory()->LookupAsciiSymbol("global"),
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("global")),
inner_global,
attributes);
// Set up the reference from the global object to the builtins object.
@@ -823,9 +822,9 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
// This is only called if we are not using snapshots. The equivalent
// work in the snapshot case is done in HookUpInnerGlobal.
-bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
+void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function) {
- // --- G l o b a l C o n t e x t ---
+ // --- N a t i v e C o n t e x t ---
// Use the empty function as closure (no scope info).
native_context()->set_closure(*empty_function);
native_context()->set_previous(NULL);
@@ -842,7 +841,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
- Handle<String> object_name = Handle<String>(heap->Object_symbol());
+ Handle<String> object_name = factory->Object_string();
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
inner_global, object_name,
@@ -852,22 +851,26 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Install global Function object
InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
- empty_function, Builtins::kIllegal, true); // ECMA native.
+ empty_function, Builtins::kIllegal, true, true);
{ // --- A r r a y ---
Handle<JSFunction> array_function =
InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
isolate->initial_object_prototype(),
- Builtins::kArrayCode, true);
- array_function->shared()->set_construct_stub(
- isolate->builtins()->builtin(Builtins::kArrayConstructCode));
+ Builtins::kArrayCode, true, true);
array_function->shared()->DontAdaptArguments();
+ array_function->shared()->set_function_data(Smi::FromInt(kArrayCode));
// This seems a bit hackish, but we need to make sure Array.length
// is 1.
array_function->shared()->set_length(1);
Handle<Map> initial_map(array_function->initial_map());
+
+ // This assert protects an optimization in
+ // HGraphBuilder::JSArrayBuilder::EmitMapCode()
+ ASSERT(initial_map->elements_kind() == GetInitialFastElementsKind());
+
Handle<DescriptorArray> array_descriptors(
factory->NewDescriptorArray(0, 1));
DescriptorArray::WhitenessWitness witness(*array_descriptors);
@@ -878,7 +881,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
initial_map->set_instance_descriptors(*array_descriptors);
{ // Add length.
- CallbacksDescriptor d(*factory->length_symbol(), *array_length, attribs);
+ CallbacksDescriptor d(*factory->length_string(), *array_length, attribs);
array_function->initial_map()->AppendDescriptor(&d, witness);
}
@@ -887,13 +890,19 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// as the constructor. 'Array' property on a global object can be
// overwritten by JS code.
native_context()->set_array_function(*array_function);
+
+ // Cache the array maps, needed by ArrayConstructorStub
+ CacheInitialJSArrayMaps(native_context(), initial_map);
+ ArrayConstructorStub array_constructor_stub(isolate);
+ Handle<Code> code = array_constructor_stub.GetCode(isolate);
+ array_function->shared()->set_construct_stub(*code);
}
{ // --- N u m b e r ---
Handle<JSFunction> number_fun =
InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
native_context()->set_number_function(*number_fun);
}
@@ -901,7 +910,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> boolean_fun =
InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
native_context()->set_boolean_function(*boolean_fun);
}
@@ -909,7 +918,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> string_fun =
InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
string_fun->shared()->set_construct_stub(
isolate->builtins()->builtin(Builtins::kStringConstructCode));
native_context()->set_string_function(*string_fun);
@@ -927,7 +936,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
string_map->set_instance_descriptors(*string_descriptors);
{ // Add length.
- CallbacksDescriptor d(*factory->length_symbol(), *string_length, attribs);
+ CallbacksDescriptor d(*factory->length_string(), *string_length, attribs);
string_map->AppendDescriptor(&d, witness);
}
}
@@ -937,7 +946,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> date_fun =
InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
native_context()->set_date_function(*date_fun);
}
@@ -948,7 +957,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> regexp_fun =
InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
native_context()->set_regexp_function(*regexp_fun);
ASSERT(regexp_fun->has_initial_map());
@@ -964,39 +973,44 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
{
// ECMA-262, section 15.10.7.1.
- FieldDescriptor field(heap->source_symbol(),
+ FieldDescriptor field(heap->source_string(),
JSRegExp::kSourceFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.2.
- FieldDescriptor field(heap->global_symbol(),
+ FieldDescriptor field(heap->global_string(),
JSRegExp::kGlobalFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.3.
- FieldDescriptor field(heap->ignore_case_symbol(),
+ FieldDescriptor field(heap->ignore_case_string(),
JSRegExp::kIgnoreCaseFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.4.
- FieldDescriptor field(heap->multiline_symbol(),
+ FieldDescriptor field(heap->multiline_string(),
JSRegExp::kMultilineFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.5.
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- FieldDescriptor field(heap->last_index_symbol(),
+ FieldDescriptor field(heap->last_index_string(),
JSRegExp::kLastIndexFieldIndex,
- writable);
+ writable,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
@@ -1012,7 +1026,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
proto_map->set_prototype(native_context()->initial_object_prototype());
Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
- heap->query_colon_symbol());
+ heap->query_colon_string());
proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
@@ -1029,13 +1043,11 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
{ // -- J S O N
- Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
+ Handle<String> name = factory->InternalizeUtf8String("JSON");
Handle<JSFunction> cons = factory->NewFunction(name,
factory->the_hole_value());
- { MaybeObject* result = cons->SetInstancePrototype(
- native_context()->initial_object_prototype());
- if (result->IsFailure()) return false;
- }
+ JSFunction::SetInstancePrototype(cons,
+ Handle<Object>(native_context()->initial_object_prototype(), isolate));
cons->SetInstanceClassName(*name);
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject());
@@ -1045,11 +1057,60 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
native_context()->set_json_object(*json_object);
}
+ { // -- A r r a y B u f f e r
+ Handle<JSFunction> array_buffer_fun =
+ InstallFunction(
+ global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
+ JSArrayBuffer::kSizeWithInternalFields,
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
+ native_context()->set_array_buffer_fun(*array_buffer_fun);
+ }
+
+ { // -- T y p e d A r r a y s
+ Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array",
+ EXTERNAL_BYTE_ELEMENTS);
+ native_context()->set_int8_array_fun(*int8_fun);
+ Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array",
+ EXTERNAL_UNSIGNED_BYTE_ELEMENTS);
+ native_context()->set_uint8_array_fun(*uint8_fun);
+ Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array",
+ EXTERNAL_SHORT_ELEMENTS);
+ native_context()->set_int16_array_fun(*int16_fun);
+ Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array",
+ EXTERNAL_UNSIGNED_SHORT_ELEMENTS);
+ native_context()->set_uint16_array_fun(*uint16_fun);
+ Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array",
+ EXTERNAL_INT_ELEMENTS);
+ native_context()->set_int32_array_fun(*int32_fun);
+ Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array",
+ EXTERNAL_UNSIGNED_INT_ELEMENTS);
+ native_context()->set_uint32_array_fun(*uint32_fun);
+ Handle<JSFunction> float_fun = InstallTypedArray("Float32Array",
+ EXTERNAL_FLOAT_ELEMENTS);
+ native_context()->set_float_array_fun(*float_fun);
+ Handle<JSFunction> double_fun = InstallTypedArray("Float64Array",
+ EXTERNAL_DOUBLE_ELEMENTS);
+ native_context()->set_double_array_fun(*double_fun);
+ Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray",
+ EXTERNAL_PIXEL_ELEMENTS);
+ native_context()->set_uint8c_array_fun(*uint8c_fun);
+
+ Handle<JSFunction> data_view_fun =
+ InstallFunction(
+ global, "DataView", JS_DATA_VIEW_TYPE,
+ JSDataView::kSizeWithInternalFields,
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
+ native_context()->set_data_view_fun(*data_view_fun);
+ }
+
{ // --- arguments_boilerplate_
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
// class_name equals 'Arguments'.
- Handle<String> symbol = factory->LookupAsciiSymbol("Arguments");
+ Handle<String> arguments_string = factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("Arguments"));
Handle<Code> code = Handle<Code>(
isolate->builtins()->builtin(Builtins::kIllegal));
Handle<JSObject> prototype =
@@ -1057,14 +1118,14 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSObject::cast(native_context()->object_function()->prototype()));
Handle<JSFunction> function =
- factory->NewFunctionWithPrototype(symbol,
+ factory->NewFunctionWithPrototype(arguments_string,
JS_OBJECT_TYPE,
JSObject::kHeaderSize,
prototype,
code,
false);
ASSERT(!function->has_initial_map());
- function->shared()->set_instance_class_name(*symbol);
+ function->shared()->set_instance_class_name(*arguments_string);
function->shared()->set_expected_nof_properties(2);
Handle<JSObject> result = factory->NewJSObject(function);
@@ -1073,22 +1134,24 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// callee must be added as the second property.
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
- result, factory->length_symbol(),
- factory->undefined_value(), DONT_ENUM));
+ result, factory->length_string(),
+ factory->undefined_value(), DONT_ENUM,
+ Object::FORCE_TAGGED, FORCE_FIELD));
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
- result, factory->callee_symbol(),
- factory->undefined_value(), DONT_ENUM));
+ result, factory->callee_string(),
+ factory->undefined_value(), DONT_ENUM,
+ Object::FORCE_TAGGED, FORCE_FIELD));
#ifdef DEBUG
LookupResult lookup(isolate);
- result->LocalLookup(heap->callee_symbol(), &lookup);
+ result->LocalLookup(heap->callee_string(), &lookup);
ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
+ ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsCalleeIndex);
- result->LocalLookup(heap->length_symbol(), &lookup);
+ result->LocalLookup(heap->length_string(), &lookup);
ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
+ ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@@ -1147,17 +1210,18 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
map->set_instance_descriptors(*descriptors);
{ // length
- FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
+ FieldDescriptor d(
+ *factory->length_string(), 0, DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d, witness);
}
{ // callee
- CallbacksDescriptor d(*factory->callee_symbol(),
+ CallbacksDescriptor d(*factory->callee_string(),
*callee,
attributes);
map->AppendDescriptor(&d, witness);
}
{ // caller
- CallbacksDescriptor d(*factory->caller_symbol(),
+ CallbacksDescriptor d(*factory->caller_string(),
*caller,
attributes);
map->AppendDescriptor(&d, witness);
@@ -1179,14 +1243,14 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Add length property only for strict mode boilerplate.
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
- result, factory->length_symbol(),
+ result, factory->length_string(),
factory->undefined_value(), DONT_ENUM));
#ifdef DEBUG
LookupResult lookup(isolate);
- result->LocalLookup(heap->length_symbol(), &lookup);
+ result->LocalLookup(heap->length_string(), &lookup);
ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
+ ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@@ -1201,13 +1265,14 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<Code> code = Handle<Code>(
isolate->builtins()->builtin(Builtins::kIllegal));
Handle<JSFunction> context_extension_fun =
- factory->NewFunction(factory->empty_symbol(),
+ factory->NewFunction(factory->empty_string(),
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JSObject::kHeaderSize,
code,
true);
- Handle<String> name = factory->LookupAsciiSymbol("context_extension");
+ Handle<String> name = factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("context_extension"));
context_extension_fun->shared()->set_instance_class_name(*name);
native_context()->set_context_extension_function(*context_extension_fun);
}
@@ -1219,7 +1284,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<Code>(isolate->builtins()->builtin(
Builtins::kHandleApiCallAsFunction));
Handle<JSFunction> delegate =
- factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE,
+ factory->NewFunction(factory->empty_string(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, code, true);
native_context()->set_call_as_function_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
@@ -1231,7 +1296,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<Code>(isolate->builtins()->builtin(
Builtins::kHandleApiCallAsConstructor));
Handle<JSFunction> delegate =
- factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE,
+ factory->NewFunction(factory->empty_string(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, code, true);
native_context()->set_call_as_constructor_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
@@ -1240,17 +1305,28 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Initialize the out of memory slot.
native_context()->set_out_of_memory(heap->false_value());
- // Initialize the data slot.
- native_context()->set_data(heap->undefined_value());
+ // Initialize the embedder data slot.
+ Handle<FixedArray> embedder_data = factory->NewFixedArray(2);
+ native_context()->set_embedder_data(*embedder_data);
- {
- // Initialize the random seed slot.
- Handle<ByteArray> zeroed_byte_array(
- factory->NewByteArray(kRandomStateSize));
- native_context()->set_random_seed(*zeroed_byte_array);
- memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
- }
- return true;
+ // Allocate the random seed slot.
+ Handle<ByteArray> random_seed = factory->NewByteArray(kRandomStateSize);
+ native_context()->set_random_seed(*random_seed);
+}
+
+
+Handle<JSFunction> Genesis::InstallTypedArray(
+ const char* name, ElementsKind elementsKind) {
+ Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
+ Handle<JSFunction> result = InstallFunction(global, name, JS_TYPED_ARRAY_TYPE,
+ JSTypedArray::kSize, isolate()->initial_object_prototype(),
+ Builtins::kIllegal, false, true);
+
+ Handle<Map> initial_map = isolate()->factory()->NewMap(
+ JS_TYPED_ARRAY_TYPE, JSTypedArray::kSizeWithInternalFields, elementsKind);
+ result->set_initial_map(*initial_map);
+ initial_map->set_constructor(*result);
+ return result;
}
@@ -1258,26 +1334,112 @@ void Genesis::InitializeExperimentalGlobal() {
Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
// TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no
- // longer need to live behind a flag, so functions get added to the snapshot.
+ // longer need to live behind flags, so functions get added to the snapshot.
+
+ if (FLAG_harmony_symbols) {
+ // --- S y m b o l ---
+ Handle<JSFunction> symbol_fun =
+ InstallFunction(global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
+ native_context()->set_symbol_function(*symbol_fun);
+ }
+
if (FLAG_harmony_collections) {
{ // -- S e t
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
- prototype, Builtins::kIllegal, true);
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
}
{ // -- M a p
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
- prototype, Builtins::kIllegal, true);
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
}
{ // -- W e a k M a p
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
- prototype, Builtins::kIllegal, true);
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
}
+ { // -- W e a k S e t
+ InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
+ }
+ }
+
+ if (FLAG_harmony_generators) {
+ // Create generator meta-objects and install them on the builtins object.
+ Handle<JSObject> builtins(native_context()->builtins());
+ Handle<JSObject> generator_object_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ Handle<JSFunction> generator_function_prototype =
+ InstallFunction(builtins, "GeneratorFunctionPrototype",
+ JS_FUNCTION_TYPE, JSFunction::kHeaderSize,
+ generator_object_prototype, Builtins::kIllegal,
+ false, false);
+ InstallFunction(builtins, "GeneratorFunction",
+ JS_FUNCTION_TYPE, JSFunction::kSize,
+ generator_function_prototype, Builtins::kIllegal,
+ false, false);
+
+ // Create maps for generator functions and their prototypes. Store those
+ // maps in the native context.
+ Handle<Map> function_map(native_context()->function_map());
+ Handle<Map> generator_function_map = factory()->CopyMap(function_map);
+ generator_function_map->set_prototype(*generator_function_prototype);
+ native_context()->set_generator_function_map(*generator_function_map);
+
+ Handle<Map> strict_mode_function_map(
+ native_context()->strict_mode_function_map());
+ Handle<Map> strict_mode_generator_function_map = factory()->CopyMap(
+ strict_mode_function_map);
+ strict_mode_generator_function_map->set_prototype(
+ *generator_function_prototype);
+ native_context()->set_strict_mode_generator_function_map(
+ *strict_mode_generator_function_map);
+
+ Handle<Map> object_map(native_context()->object_function()->initial_map());
+ Handle<Map> generator_object_prototype_map = factory()->CopyMap(
+ object_map, 0);
+ generator_object_prototype_map->set_prototype(
+ *generator_object_prototype);
+ native_context()->set_generator_object_prototype_map(
+ *generator_object_prototype_map);
+
+ // Create a map for generator result objects.
+ ASSERT(object_map->inobject_properties() == 0);
+ STATIC_ASSERT(JSGeneratorObject::kResultPropertyCount == 2);
+ Handle<Map> generator_result_map = factory()->CopyMap(object_map,
+ JSGeneratorObject::kResultPropertyCount);
+ ASSERT(generator_result_map->inobject_properties() ==
+ JSGeneratorObject::kResultPropertyCount);
+
+ Handle<DescriptorArray> descriptors = factory()->NewDescriptorArray(0,
+ JSGeneratorObject::kResultPropertyCount);
+ DescriptorArray::WhitenessWitness witness(*descriptors);
+ generator_result_map->set_instance_descriptors(*descriptors);
+
+ Handle<String> value_string = factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("value"));
+ FieldDescriptor value_descr(*value_string,
+ JSGeneratorObject::kResultValuePropertyIndex,
+ NONE,
+ Representation::Tagged());
+ generator_result_map->AppendDescriptor(&value_descr, witness);
+
+ Handle<String> done_string = factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("done"));
+ FieldDescriptor done_descr(*done_string,
+ JSGeneratorObject::kResultDonePropertyIndex,
+ NONE,
+ Representation::Tagged());
+ generator_result_map->AppendDescriptor(&done_descr, witness);
+
+ generator_result_map->set_unused_property_fields(0);
+ ASSERT_EQ(JSGeneratorObject::kResultSize,
+ generator_result_map->instance_size());
+ native_context()->set_generator_result_map(*generator_result_map);
}
}
@@ -1286,7 +1448,7 @@ bool Genesis::CompileBuiltin(Isolate* isolate, int index) {
Vector<const char> name = Natives::GetScriptName(index);
Handle<String> source_code =
isolate->bootstrapper()->NativesSourceLookup(index);
- return CompileNative(name, source_code);
+ return CompileNative(isolate, name, source_code);
}
@@ -1296,23 +1458,25 @@ bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
Handle<String> source_code =
factory->NewStringFromAscii(
ExperimentalNatives::GetRawScriptSource(index));
- return CompileNative(name, source_code);
+ return CompileNative(isolate, name, source_code);
}
-bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
- HandleScope scope;
- Isolate* isolate = source->GetIsolate();
+bool Genesis::CompileNative(Isolate* isolate,
+ Vector<const char> name,
+ Handle<String> source) {
+ HandleScope scope(isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
isolate->debugger()->set_compiling_natives(true);
#endif
// During genesis, the boilerplate for stack overflow won't work until the
// environment has been at least partially initialized. Add a stack check
// before entering JS code to catch overflow early.
- StackLimitCheck check(Isolate::Current());
+ StackLimitCheck check(isolate);
if (check.HasOverflowed()) return false;
- bool result = CompileScriptCached(name,
+ bool result = CompileScriptCached(isolate,
+ name,
source,
NULL,
NULL,
@@ -1327,26 +1491,28 @@ bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
}
-bool Genesis::CompileScriptCached(Vector<const char> name,
+bool Genesis::CompileScriptCached(Isolate* isolate,
+ Vector<const char> name,
Handle<String> source,
SourceCodeCache* cache,
v8::Extension* extension,
Handle<Context> top_context,
bool use_runtime_context) {
- Factory* factory = source->GetIsolate()->factory();
- HandleScope scope;
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
Handle<SharedFunctionInfo> function_info;
// If we can't find the function in the cache, we compile a new
// function and insert it into the cache.
if (cache == NULL || !cache->Lookup(name, &function_info)) {
- ASSERT(source->IsAsciiRepresentation());
+ ASSERT(source->IsOneByteRepresentation());
Handle<String> script_name = factory->NewStringFromUtf8(name);
function_info = Compiler::Compile(
source,
script_name,
0,
0,
+ false,
top_context,
extension,
NULL,
@@ -1372,24 +1538,26 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
Handle<Object> receiver =
Handle<Object>(use_runtime_context
? top_context->builtins()
- : top_context->global_object());
+ : top_context->global_object(),
+ isolate);
bool has_pending_exception;
- Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
+ Execution::Call(isolate, fun, receiver, 0, NULL, &has_pending_exception);
if (has_pending_exception) return false;
return true;
}
-#define INSTALL_NATIVE(Type, name, var) \
- Handle<String> var##_name = factory()->LookupAsciiSymbol(name); \
- Object* var##_native = \
- native_context()->builtins()->GetPropertyNoExceptionThrown( \
- *var##_name); \
+#define INSTALL_NATIVE(Type, name, var) \
+ Handle<String> var##_name = \
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR(name)); \
+ Object* var##_native = \
+ native_context()->builtins()->GetPropertyNoExceptionThrown( \
+ *var##_name); \
native_context()->set_##var(Type::cast(var##_native));
void Genesis::InstallNativeFunctions() {
- HandleScope scope;
+ HandleScope scope(isolate());
INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
INSTALL_NATIVE(JSFunction, "ToString", to_string_fun);
@@ -1408,6 +1576,7 @@ void Genesis::InstallNativeFunctions() {
to_complete_property_descriptor);
}
+
void Genesis::InstallExperimentalNativeFunctions() {
if (FLAG_harmony_proxies) {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
@@ -1415,13 +1584,76 @@ void Genesis::InstallExperimentalNativeFunctions() {
INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
}
+ if (FLAG_harmony_observation) {
+ INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change);
+ INSTALL_NATIVE(JSFunction, "EnqueueSpliceRecord", observers_enqueue_splice);
+ INSTALL_NATIVE(JSFunction, "BeginPerformSplice",
+ observers_begin_perform_splice);
+ INSTALL_NATIVE(JSFunction, "EndPerformSplice",
+ observers_end_perform_splice);
+ INSTALL_NATIVE(JSFunction, "DeliverChangeRecords",
+ observers_deliver_changes);
+ }
}
#undef INSTALL_NATIVE
+Handle<JSFunction> Genesis::InstallInternalArray(
+ Handle<JSBuiltinsObject> builtins,
+ const char* name,
+ ElementsKind elements_kind) {
+ // --- I n t e r n a l A r r a y ---
+ // An array constructor on the builtins object that works like
+ // the public Array constructor, except that its prototype
+ // doesn't inherit from Object.prototype.
+ // To be used only for internal work by builtins. Instances
+ // must not be leaked to user code.
+ Handle<JSFunction> array_function =
+ InstallFunction(builtins,
+ name,
+ JS_ARRAY_TYPE,
+ JSArray::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kInternalArrayCode,
+ true, true);
+ Handle<JSObject> prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ Accessors::FunctionSetPrototype(array_function, prototype);
+
+ InternalArrayConstructorStub internal_array_constructor_stub(isolate());
+ Handle<Code> code = internal_array_constructor_stub.GetCode(isolate());
+ array_function->shared()->set_construct_stub(*code);
+ array_function->shared()->DontAdaptArguments();
+
+ Handle<Map> original_map(array_function->initial_map());
+ Handle<Map> initial_map = factory()->CopyMap(original_map);
+ initial_map->set_elements_kind(elements_kind);
+ array_function->set_initial_map(*initial_map);
+
+ // Make "length" magic on instances.
+ Handle<DescriptorArray> array_descriptors(
+ factory()->NewDescriptorArray(0, 1));
+ DescriptorArray::WhitenessWitness witness(*array_descriptors);
+
+ Handle<Foreign> array_length(factory()->NewForeign(
+ &Accessors::ArrayLength));
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE);
+ initial_map->set_instance_descriptors(*array_descriptors);
+
+ { // Add length.
+ CallbacksDescriptor d(
+ *factory()->length_string(), *array_length, attribs);
+ array_function->initial_map()->AppendDescriptor(&d, witness);
+ }
+
+ return array_function;
+}
+
+
bool Genesis::InstallNatives() {
- HandleScope scope;
+ HandleScope scope(isolate());
// Create a function for the builtins object. Allocate space for the
// JavaScript builtins, a reference to the builtins object
@@ -1429,11 +1661,12 @@ bool Genesis::InstallNatives() {
Handle<Code> code = Handle<Code>(
isolate()->builtins()->builtin(Builtins::kIllegal));
Handle<JSFunction> builtins_fun =
- factory()->NewFunction(factory()->empty_symbol(),
+ factory()->NewFunction(factory()->empty_string(),
JS_BUILTINS_OBJECT_TYPE,
JSBuiltinsObject::kSize, code, true);
- Handle<String> name = factory()->LookupAsciiSymbol("builtins");
+ Handle<String> name =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("builtins"));
builtins_fun->shared()->set_instance_class_name(*name);
builtins_fun->initial_map()->set_dictionary_map(true);
builtins_fun->initial_map()->set_prototype(heap()->null_value());
@@ -1452,11 +1685,12 @@ bool Genesis::InstallNatives() {
// global object.
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- Handle<String> global_symbol = factory()->LookupAsciiSymbol("global");
- Handle<Object> global_obj(native_context()->global_object());
+ Handle<String> global_string =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("global"));
+ Handle<Object> global_obj(native_context()->global_object(), isolate());
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
- builtins, global_symbol, global_obj, attributes));
+ builtins, global_string, global_obj, attributes));
// Set up the reference from the global object to the builtins object.
JSGlobalObject::cast(native_context()->global_object())->
@@ -1464,7 +1698,7 @@ bool Genesis::InstallNatives() {
// Create a bridge function that has context in the native context.
Handle<JSFunction> bridge =
- factory()->NewFunction(factory()->empty_symbol(),
+ factory()->NewFunction(factory()->empty_string(),
factory()->undefined_value());
ASSERT(bridge->context() == *isolate()->native_context());
@@ -1480,10 +1714,10 @@ bool Genesis::InstallNatives() {
Handle<JSFunction> script_fun =
InstallFunction(builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
isolate()->initial_object_prototype(),
- Builtins::kIllegal, false);
+ Builtins::kIllegal, false, false);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetPrototype(script_fun, prototype);
+ Accessors::FunctionSetPrototype(script_fun, prototype);
native_context()->set_script_function(*script_fun);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
@@ -1495,41 +1729,52 @@ bool Genesis::InstallNatives() {
Handle<Foreign> script_source(
factory()->NewForeign(&Accessors::ScriptSource));
Handle<Foreign> script_name(factory()->NewForeign(&Accessors::ScriptName));
- Handle<String> id_symbol(factory()->LookupAsciiSymbol("id"));
+ Handle<String> id_string(factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("id")));
Handle<Foreign> script_id(factory()->NewForeign(&Accessors::ScriptId));
- Handle<String> line_offset_symbol(
- factory()->LookupAsciiSymbol("line_offset"));
+ Handle<String> line_offset_string(
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("line_offset")));
Handle<Foreign> script_line_offset(
factory()->NewForeign(&Accessors::ScriptLineOffset));
- Handle<String> column_offset_symbol(
- factory()->LookupAsciiSymbol("column_offset"));
+ Handle<String> column_offset_string(
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("column_offset")));
Handle<Foreign> script_column_offset(
factory()->NewForeign(&Accessors::ScriptColumnOffset));
- Handle<String> data_symbol(factory()->LookupAsciiSymbol("data"));
+ Handle<String> data_string(factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("data")));
Handle<Foreign> script_data(factory()->NewForeign(&Accessors::ScriptData));
- Handle<String> type_symbol(factory()->LookupAsciiSymbol("type"));
+ Handle<String> type_string(factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("type")));
Handle<Foreign> script_type(factory()->NewForeign(&Accessors::ScriptType));
- Handle<String> compilation_type_symbol(
- factory()->LookupAsciiSymbol("compilation_type"));
+ Handle<String> compilation_type_string(
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("compilation_type")));
Handle<Foreign> script_compilation_type(
factory()->NewForeign(&Accessors::ScriptCompilationType));
- Handle<String> line_ends_symbol(factory()->LookupAsciiSymbol("line_ends"));
+ Handle<String> line_ends_string(factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("line_ends")));
Handle<Foreign> script_line_ends(
factory()->NewForeign(&Accessors::ScriptLineEnds));
- Handle<String> context_data_symbol(
- factory()->LookupAsciiSymbol("context_data"));
+ Handle<String> context_data_string(
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("context_data")));
Handle<Foreign> script_context_data(
factory()->NewForeign(&Accessors::ScriptContextData));
- Handle<String> eval_from_script_symbol(
- factory()->LookupAsciiSymbol("eval_from_script"));
+ Handle<String> eval_from_script_string(
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("eval_from_script")));
Handle<Foreign> script_eval_from_script(
factory()->NewForeign(&Accessors::ScriptEvalFromScript));
- Handle<String> eval_from_script_position_symbol(
- factory()->LookupAsciiSymbol("eval_from_script_position"));
+ Handle<String> eval_from_script_position_string(
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("eval_from_script_position")));
Handle<Foreign> script_eval_from_script_position(
factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition));
- Handle<String> eval_from_function_name_symbol(
- factory()->LookupAsciiSymbol("eval_from_function_name"));
+ Handle<String> eval_from_function_name_string(
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("eval_from_function_name")));
Handle<Foreign> script_eval_from_function_name(
factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName));
PropertyAttributes attribs =
@@ -1538,67 +1783,67 @@ bool Genesis::InstallNatives() {
{
CallbacksDescriptor d(
- *factory()->source_symbol(), *script_source, attribs);
+ *factory()->source_string(), *script_source, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
- CallbacksDescriptor d(*factory()->name_symbol(), *script_name, attribs);
+ CallbacksDescriptor d(*factory()->name_string(), *script_name, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
- CallbacksDescriptor d(*id_symbol, *script_id, attribs);
+ CallbacksDescriptor d(*id_string, *script_id, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
- CallbacksDescriptor d(*line_offset_symbol, *script_line_offset, attribs);
+ CallbacksDescriptor d(*line_offset_string, *script_line_offset, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
CallbacksDescriptor d(
- *column_offset_symbol, *script_column_offset, attribs);
+ *column_offset_string, *script_column_offset, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
- CallbacksDescriptor d(*data_symbol, *script_data, attribs);
+ CallbacksDescriptor d(*data_string, *script_data, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
- CallbacksDescriptor d(*type_symbol, *script_type, attribs);
+ CallbacksDescriptor d(*type_string, *script_type, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
CallbacksDescriptor d(
- *compilation_type_symbol, *script_compilation_type, attribs);
+ *compilation_type_string, *script_compilation_type, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
- CallbacksDescriptor d(*line_ends_symbol, *script_line_ends, attribs);
+ CallbacksDescriptor d(*line_ends_string, *script_line_ends, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
CallbacksDescriptor d(
- *context_data_symbol, *script_context_data, attribs);
+ *context_data_string, *script_context_data, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
CallbacksDescriptor d(
- *eval_from_script_symbol, *script_eval_from_script, attribs);
+ *eval_from_script_string, *script_eval_from_script, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
CallbacksDescriptor d(
- *eval_from_script_position_symbol,
+ *eval_from_script_position_string,
*script_eval_from_script_position,
attribs);
script_map->AppendDescriptor(&d, witness);
@@ -1606,7 +1851,7 @@ bool Genesis::InstallNatives() {
{
CallbacksDescriptor d(
- *eval_from_function_name_symbol,
+ *eval_from_function_name_string,
*script_eval_from_function_name,
attribs);
script_map->AppendDescriptor(&d, witness);
@@ -1625,67 +1870,28 @@ bool Genesis::InstallNatives() {
InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE,
JSValue::kSize,
isolate()->initial_object_prototype(),
- Builtins::kIllegal, false);
+ Builtins::kIllegal, false, false);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetPrototype(opaque_reference_fun, prototype);
+ Accessors::FunctionSetPrototype(opaque_reference_fun, prototype);
native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
- { // --- I n t e r n a l A r r a y ---
- // An array constructor on the builtins object that works like
- // the public Array constructor, except that its prototype
- // doesn't inherit from Object.prototype.
- // To be used only for internal work by builtins. Instances
- // must not be leaked to user code.
+ // InternalArrays should not use Smi-Only array optimizations. There are too
+ // many places in the C++ runtime code (e.g. RegEx) that assume that
+ // elements in InternalArrays can be set to non-Smi values without going
+ // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
+ // transition easy to trap. Moreover, they rarely are smi-only.
+ {
Handle<JSFunction> array_function =
- InstallFunction(builtins,
- "InternalArray",
- JS_ARRAY_TYPE,
- JSArray::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kInternalArrayCode,
- true);
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetPrototype(array_function, prototype);
-
- array_function->shared()->set_construct_stub(
- isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
- array_function->shared()->DontAdaptArguments();
-
- // InternalArrays should not use Smi-Only array optimizations. There are too
- // many places in the C++ runtime code (e.g. RegEx) that assume that
- // elements in InternalArrays can be set to non-Smi values without going
- // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
- // transition easy to trap. Moreover, they rarely are smi-only.
- MaybeObject* maybe_map = array_function->initial_map()->Copy();
- Map* new_map;
- if (!maybe_map->To(&new_map)) return false;
- new_map->set_elements_kind(FAST_HOLEY_ELEMENTS);
- array_function->set_initial_map(new_map);
-
- // Make "length" magic on instances.
- Handle<Map> initial_map(array_function->initial_map());
- Handle<DescriptorArray> array_descriptors(
- factory()->NewDescriptorArray(0, 1));
- DescriptorArray::WhitenessWitness witness(*array_descriptors);
-
- Handle<Foreign> array_length(factory()->NewForeign(
- &Accessors::ArrayLength));
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE);
- initial_map->set_instance_descriptors(*array_descriptors);
-
- { // Add length.
- CallbacksDescriptor d(
- *factory()->length_symbol(), *array_length, attribs);
- array_function->initial_map()->AppendDescriptor(&d, witness);
- }
-
+ InstallInternalArray(builtins, "InternalArray", FAST_HOLEY_ELEMENTS);
native_context()->set_internal_array_function(*array_function);
}
+ {
+ InstallInternalArray(builtins, "InternalPackedArray", FAST_ELEMENTS);
+ }
+
if (FLAG_disable_native_files) {
PrintF("Warning: Running without installed natives!\n");
return true;
@@ -1713,9 +1919,10 @@ bool Genesis::InstallNatives() {
HeapObject::cast(string_function->initial_map()->prototype())->map());
// Install Function.prototype.call and apply.
- { Handle<String> key = factory()->function_class_symbol();
+ { Handle<String> key = factory()->function_class_string();
Handle<JSFunction> function =
- Handle<JSFunction>::cast(GetProperty(isolate()->global_object(), key));
+ Handle<JSFunction>::cast(
+ GetProperty(isolate(), isolate()->global_object(), key));
Handle<JSObject> proto =
Handle<JSObject>(JSObject::cast(function->instance_prototype()));
@@ -1724,12 +1931,12 @@ bool Genesis::InstallNatives() {
InstallFunction(proto, "call", JS_OBJECT_TYPE, JSObject::kHeaderSize,
Handle<JSObject>::null(),
Builtins::kFunctionCall,
- false);
+ false, false);
Handle<JSFunction> apply =
InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
Handle<JSObject>::null(),
Builtins::kFunctionApply,
- false);
+ false, false);
// Make sure that Function.prototype.call appears to be compiled.
// The code will never be called, but inline caching for call will
@@ -1776,7 +1983,7 @@ bool Genesis::InstallNatives() {
JSFunction* array_function = native_context()->array_function();
Handle<DescriptorArray> array_descriptors(
array_function->initial_map()->instance_descriptors());
- String* length = heap()->length_symbol();
+ String* length = heap()->length_string();
int old = array_descriptors->SearchWithCache(
length, array_function->initial_map());
ASSERT(old != DescriptorArray::kNotFound);
@@ -1786,16 +1993,18 @@ bool Genesis::InstallNatives() {
initial_map->AppendDescriptor(&desc, witness);
}
{
- FieldDescriptor index_field(heap()->index_symbol(),
+ FieldDescriptor index_field(heap()->index_string(),
JSRegExpResult::kIndexIndex,
- NONE);
+ NONE,
+ Representation::Tagged());
initial_map->AppendDescriptor(&index_field, witness);
}
{
- FieldDescriptor input_field(heap()->input_symbol(),
+ FieldDescriptor input_field(heap()->input_string(),
JSRegExpResult::kInputIndex,
- NONE);
+ NONE,
+ Representation::Tagged());
initial_map->AppendDescriptor(&input_field, witness);
}
@@ -1818,6 +2027,11 @@ bool Genesis::InstallExperimentalNatives() {
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount();
i++) {
+ if (FLAG_harmony_symbols &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native symbol.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
if (FLAG_harmony_proxies &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native proxy.js") == 0) {
@@ -1828,6 +2042,36 @@ bool Genesis::InstallExperimentalNatives() {
"native collection.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
+ if (FLAG_harmony_observation &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native object-observe.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
+ if (FLAG_harmony_generators &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native generator.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
+ if (FLAG_harmony_iteration &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native array-iterator.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
+ if (FLAG_harmony_strings &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native harmony-string.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
+ if (FLAG_harmony_arrays &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native harmony-array.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
+ if (FLAG_harmony_maths &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native harmony-math.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
}
InstallExperimentalNativeFunctions();
@@ -1839,18 +2083,19 @@ bool Genesis::InstallExperimentalNatives() {
static Handle<JSObject> ResolveBuiltinIdHolder(
Handle<Context> native_context,
const char* holder_expr) {
- Factory* factory = native_context->GetIsolate()->factory();
+ Isolate* isolate = native_context->GetIsolate();
+ Factory* factory = isolate->factory();
Handle<GlobalObject> global(native_context->global_object());
const char* period_pos = strchr(holder_expr, '.');
if (period_pos == NULL) {
- return Handle<JSObject>::cast(
- GetProperty(global, factory->LookupAsciiSymbol(holder_expr)));
+ return Handle<JSObject>::cast(GetProperty(
+ isolate, global, factory->InternalizeUtf8String(holder_expr)));
}
ASSERT_EQ(".prototype", period_pos);
Vector<const char> property(holder_expr,
static_cast<int>(period_pos - holder_expr));
Handle<JSFunction> function = Handle<JSFunction>::cast(
- GetProperty(global, factory->LookupSymbol(property)));
+ GetProperty(isolate, global, factory->InternalizeUtf8String(property)));
return Handle<JSObject>(JSObject::cast(function->prototype()));
}
@@ -1859,7 +2104,7 @@ static void InstallBuiltinFunctionId(Handle<JSObject> holder,
const char* function_name,
BuiltinFunctionId id) {
Factory* factory = holder->GetIsolate()->factory();
- Handle<String> name = factory->LookupAsciiSymbol(function_name);
+ Handle<String> name = factory->InternalizeUtf8String(function_name);
Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked();
Handle<JSFunction> function(JSFunction::cast(function_object));
function->shared()->set_function_data(Smi::FromInt(id));
@@ -1867,7 +2112,7 @@ static void InstallBuiltinFunctionId(Handle<JSObject> holder,
void Genesis::InstallBuiltinFunctionIds() {
- HandleScope scope;
+ HandleScope scope(isolate());
#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
{ \
Handle<JSObject> holder = ResolveBuiltinIdHolder( \
@@ -1906,7 +2151,8 @@ void Genesis::InstallJSFunctionResultCaches() {
#undef F
;
- Handle<FixedArray> caches = FACTORY->NewFixedArray(kNumberOfCaches, TENURED);
+ Handle<FixedArray> caches =
+ factory()->NewFixedArray(kNumberOfCaches, TENURED);
int index = 0;
@@ -1925,17 +2171,16 @@ void Genesis::InstallJSFunctionResultCaches() {
void Genesis::InitializeNormalizedMapCaches() {
Handle<FixedArray> array(
- FACTORY->NewFixedArray(NormalizedMapCache::kEntries, TENURED));
+ factory()->NewFixedArray(NormalizedMapCache::kEntries, TENURED));
native_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
}
bool Bootstrapper::InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions) {
- Isolate* isolate = native_context->GetIsolate();
- BootstrapperActive active;
- SaveContext saved_context(isolate);
- isolate->set_context(*native_context);
+ BootstrapperActive active(this);
+ SaveContext saved_context(isolate_);
+ isolate_->set_context(*native_context);
if (!Genesis::InstallExtensions(native_context, extensions)) return false;
Genesis::InstallSpecialObjects(native_context);
return true;
@@ -1945,12 +2190,13 @@ bool Bootstrapper::InstallExtensions(Handle<Context> native_context,
void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Isolate* isolate = native_context->GetIsolate();
Factory* factory = isolate->factory();
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSGlobalObject> global(JSGlobalObject::cast(
native_context->global_object()));
// Expose the natives in global if a name for it is specified.
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
- Handle<String> natives = factory->LookupAsciiSymbol(FLAG_expose_natives_as);
+ Handle<String> natives =
+ factory->InternalizeUtf8String(FLAG_expose_natives_as);
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
global, natives,
@@ -1960,8 +2206,10 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Handle<Object> Error = GetProperty(global, "Error");
if (Error->IsJSObject()) {
- Handle<String> name = factory->LookupAsciiSymbol("stackTraceLimit");
- Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit));
+ Handle<String> name = factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("stackTraceLimit"));
+ Handle<Smi> stack_trace_limit(
+ Smi::FromInt(FLAG_stack_trace_limit), isolate);
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
Handle<JSObject>::cast(Error), name,
@@ -1971,7 +2219,7 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
- Debug* debug = Isolate::Current()->debug();
+ Debug* debug = isolate->debug();
// If loading fails we just bail out without installing the
// debugger but without tanking the whole context.
if (!debug->Load()) return;
@@ -1982,8 +2230,9 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
native_context->security_token());
Handle<String> debug_string =
- factory->LookupAsciiSymbol(FLAG_expose_debug_as);
- Handle<Object> global_proxy(debug->debug_context()->global_proxy());
+ factory->InternalizeUtf8String(FLAG_expose_debug_as);
+ Handle<Object> global_proxy(
+ debug->debug_context()->global_proxy(), isolate);
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
global, debug_string, global_proxy, DONT_ENUM));
@@ -1991,10 +2240,12 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
#endif
}
+
static uint32_t Hash(RegisteredExtension* extension) {
return v8::internal::ComputePointerHash(extension);
}
+
static bool MatchRegisteredExtensions(void* key1, void* key2) {
return key1 == key2;
}
@@ -2020,26 +2271,22 @@ void Genesis::ExtensionStates::set_state(RegisteredExtension* extension,
bool Genesis::InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions) {
- // TODO(isolates): Extensions on multiple isolates may take a little more
- // effort. (The external API reads 'ignore'-- does that mean
- // we can break the interface?)
-
-
+ Isolate* isolate = native_context->GetIsolate();
ExtensionStates extension_states; // All extensions have state UNVISITED.
// Install auto extensions.
v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
while (current != NULL) {
if (current->extension()->auto_enable())
- InstallExtension(current, &extension_states);
+ InstallExtension(isolate, current, &extension_states);
current = current->next();
}
- if (FLAG_expose_gc) InstallExtension("v8/gc", &extension_states);
+ if (FLAG_expose_gc) InstallExtension(isolate, "v8/gc", &extension_states);
if (FLAG_expose_externalize_string) {
- InstallExtension("v8/externalize", &extension_states);
+ InstallExtension(isolate, "v8/externalize", &extension_states);
}
if (FLAG_track_gc_object_stats) {
- InstallExtension("v8/statistics", &extension_states);
+ InstallExtension(isolate, "v8/statistics", &extension_states);
}
if (extensions == NULL) return true;
@@ -2047,7 +2294,7 @@ bool Genesis::InstallExtensions(Handle<Context> native_context,
int count = v8::ImplementationUtilities::GetNameCount(extensions);
const char** names = v8::ImplementationUtilities::GetNames(extensions);
for (int i = 0; i < count; i++) {
- if (!InstallExtension(names[i], &extension_states))
+ if (!InstallExtension(isolate, names[i], &extension_states))
return false;
}
@@ -2057,7 +2304,8 @@ bool Genesis::InstallExtensions(Handle<Context> native_context,
// Installs a named extension. This methods is unoptimized and does
// not scale well if we want to support a large number of extensions.
-bool Genesis::InstallExtension(const char* name,
+bool Genesis::InstallExtension(Isolate* isolate,
+ const char* name,
ExtensionStates* extension_states) {
v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
// Loop until we find the relevant extension
@@ -2071,13 +2319,14 @@ bool Genesis::InstallExtension(const char* name,
"v8::Context::New()", "Cannot find required extension");
return false;
}
- return InstallExtension(current, extension_states);
+ return InstallExtension(isolate, current, extension_states);
}
-bool Genesis::InstallExtension(v8::RegisteredExtension* current,
+bool Genesis::InstallExtension(Isolate* isolate,
+ v8::RegisteredExtension* current,
ExtensionStates* extension_states) {
- HandleScope scope;
+ HandleScope scope(isolate);
if (extension_states->get_state(current) == INSTALLED) return true;
// The current node has already been visited so there must be a
@@ -2092,19 +2341,21 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current,
v8::Extension* extension = current->extension();
// Install the extension's dependencies
for (int i = 0; i < extension->dependency_count(); i++) {
- if (!InstallExtension(extension->dependencies()[i], extension_states))
+ if (!InstallExtension(isolate,
+ extension->dependencies()[i],
+ extension_states)) {
return false;
+ }
}
- Isolate* isolate = Isolate::Current();
Handle<String> source_code =
isolate->factory()->NewExternalStringFromAscii(extension->source());
- bool result = CompileScriptCached(
- CStrVector(extension->name()),
- source_code,
- isolate->bootstrapper()->extensions_cache(),
- extension,
- Handle<Context>(isolate->context()),
- false);
+ bool result = CompileScriptCached(isolate,
+ CStrVector(extension->name()),
+ source_code,
+ isolate->bootstrapper()->extensions_cache(),
+ extension,
+ Handle<Context>(isolate->context()),
+ false);
ASSERT(isolate->has_pending_exception() != result);
if (!result) {
// We print out the name of the extension that fail to install.
@@ -2122,11 +2373,11 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current,
bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
- HandleScope scope;
- Factory* factory = builtins->GetIsolate()->factory();
+ HandleScope scope(isolate());
for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
- Handle<String> name = factory->LookupAsciiSymbol(Builtins::GetName(id));
+ Handle<String> name =
+ factory()->InternalizeUtf8String(Builtins::GetName(id));
Object* function_object = builtins->GetPropertyNoExceptionThrown(*name);
Handle<JSFunction> function
= Handle<JSFunction>(JSFunction::cast(function_object));
@@ -2164,6 +2415,10 @@ bool Genesis::ConfigureGlobalObjects(
}
SetObjectPrototype(global_proxy, inner_global);
+
+ native_context()->set_initial_array_prototype(
+ JSArray::cast(native_context()->array_function()->prototype()));
+
return true;
}
@@ -2192,27 +2447,28 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
if (from->HasFastProperties()) {
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(from->map()->instance_descriptors());
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ for (int i = 0; i < from->map()->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
case FIELD: {
- HandleScope inner;
- Handle<String> key = Handle<String>(descs->GetKey(i));
+ HandleScope inner(isolate());
+ Handle<Name> key = Handle<Name>(descs->GetKey(i));
int index = descs->GetFieldIndex(i);
- Handle<Object> value = Handle<Object>(from->FastPropertyAt(index));
- CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
+ ASSERT(!descs->GetDetails(i).representation().IsDouble());
+ Handle<Object> value = Handle<Object>(from->RawFastPropertyAt(index),
+ isolate());
+ CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
to, key, value, details.attributes()));
break;
}
- case CONSTANT_FUNCTION: {
- HandleScope inner;
- Handle<String> key = Handle<String>(descs->GetKey(i));
- Handle<JSFunction> fun =
- Handle<JSFunction>(descs->GetConstantFunction(i));
- CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
+ case CONSTANT: {
+ HandleScope inner(isolate());
+ Handle<Name> key = Handle<Name>(descs->GetKey(i));
+ Handle<Object> constant(descs->GetConstant(i), isolate());
+ CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
- to, key, fun, details.attributes()));
+ to, key, constant, details.attributes()));
break;
}
case CALLBACKS: {
@@ -2220,14 +2476,13 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
to->LocalLookup(descs->GetKey(i), &result);
// If the property is already there we skip it
if (result.IsFound()) continue;
- HandleScope inner;
+ HandleScope inner(isolate());
ASSERT(!to->HasFastProperties());
// Add to dictionary.
- Handle<String> key = Handle<String>(descs->GetKey(i));
- Handle<Object> callbacks(descs->GetCallbacksObject(i));
- PropertyDetails d = PropertyDetails(details.attributes(),
- CALLBACKS,
- details.descriptor_index());
+ Handle<Name> key = Handle<Name>(descs->GetKey(i));
+ Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
+ PropertyDetails d = PropertyDetails(
+ details.attributes(), CALLBACKS, i + 1);
JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
}
@@ -2243,25 +2498,28 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
}
}
} else {
- Handle<StringDictionary> properties =
- Handle<StringDictionary>(from->property_dictionary());
+ Handle<NameDictionary> properties =
+ Handle<NameDictionary>(from->property_dictionary());
int capacity = properties->Capacity();
for (int i = 0; i < capacity; i++) {
Object* raw_key(properties->KeyAt(i));
if (properties->IsKey(raw_key)) {
- ASSERT(raw_key->IsString());
+ ASSERT(raw_key->IsName());
// If the property is already there we skip it.
LookupResult result(isolate());
- to->LocalLookup(String::cast(raw_key), &result);
+ to->LocalLookup(Name::cast(raw_key), &result);
if (result.IsFound()) continue;
// Set the property.
- Handle<String> key = Handle<String>(String::cast(raw_key));
- Handle<Object> value = Handle<Object>(properties->ValueAt(i));
- if (value->IsJSGlobalPropertyCell()) {
- value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value());
+ Handle<Name> key = Handle<Name>(Name::cast(raw_key));
+ Handle<Object> value = Handle<Object>(properties->ValueAt(i),
+ isolate());
+ ASSERT(!value->IsCell());
+ if (value->IsPropertyCell()) {
+ value = Handle<Object>(PropertyCell::cast(*value)->value(),
+ isolate());
}
PropertyDetails details = properties->DetailsAt(i);
- CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
+ CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
to, key, value, details.attributes()));
}
@@ -2275,14 +2533,13 @@ void Genesis::TransferIndexedProperties(Handle<JSObject> from,
// Cloning the elements array is sufficient.
Handle<FixedArray> from_elements =
Handle<FixedArray>(FixedArray::cast(from->elements()));
- Handle<FixedArray> to_elements = FACTORY->CopyFixedArray(from_elements);
+ Handle<FixedArray> to_elements = factory()->CopyFixedArray(from_elements);
to->set_elements(*to_elements);
}
void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
- HandleScope outer;
- Factory* factory = from->GetIsolate()->factory();
+ HandleScope outer(isolate());
ASSERT(!from->IsJSArray());
ASSERT(!to->IsJSArray());
@@ -2292,7 +2549,7 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
// Transfer the prototype (new map is needed).
Handle<Map> old_to_map = Handle<Map>(to->map());
- Handle<Map> new_to_map = factory->CopyMap(old_to_map);
+ Handle<Map> new_to_map = factory()->CopyMap(old_to_map);
new_to_map->set_prototype(from->map()->prototype());
to->set_map(*new_to_map);
}
@@ -2302,42 +2559,47 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
// The maps with writable prototype are created in CreateEmptyFunction
// and CreateStrictModeFunctionMaps respectively. Initially the maps are
// created with read-only prototype for JS builtins processing.
- ASSERT(!function_instance_map_writable_prototype_.is_null());
- ASSERT(!strict_mode_function_instance_map_writable_prototype_.is_null());
+ ASSERT(!function_map_writable_prototype_.is_null());
+ ASSERT(!strict_mode_function_map_writable_prototype_.is_null());
// Replace function instance maps to make prototype writable.
- native_context()->set_function_map(
- *function_instance_map_writable_prototype_);
+ native_context()->set_function_map(*function_map_writable_prototype_);
native_context()->set_strict_mode_function_map(
- *strict_mode_function_instance_map_writable_prototype_);
+ *strict_mode_function_map_writable_prototype_);
}
Genesis::Genesis(Isolate* isolate,
Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
- v8::ExtensionConfiguration* extensions) : isolate_(isolate) {
+ v8::ExtensionConfiguration* extensions)
+ : isolate_(isolate),
+ active_(isolate->bootstrapper()) {
result_ = Handle<Context>::null();
- // If V8 isn't running and cannot be initialized, just return.
- if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
+ // If V8 cannot be initialized, just return.
+ if (!V8::Initialize(NULL)) return;
// Before creating the roots we must save the context and restore it
// on all function exits.
- HandleScope scope;
SaveContext saved_context(isolate);
// During genesis, the boilerplate for stack overflow won't work until the
// environment has been at least partially initialized. Add a stack check
// before entering JS code to catch overflow early.
- StackLimitCheck check(Isolate::Current());
+ StackLimitCheck check(isolate);
if (check.HasOverflowed()) return;
- Handle<Context> new_context = Snapshot::NewContextFromSnapshot();
- if (!new_context.is_null()) {
- native_context_ =
- Handle<Context>::cast(isolate->global_handles()->Create(*new_context));
- AddToWeakNativeContextList(*native_context_);
- isolate->set_context(*native_context_);
+ // We can only de-serialize a context if the isolate was initialized from
+ // a snapshot. Otherwise we have to build the context from scratch.
+ if (isolate->initialized_from_snapshot()) {
+ native_context_ = Snapshot::NewContextFromSnapshot(isolate);
+ } else {
+ native_context_ = Handle<Context>();
+ }
+
+ if (!native_context().is_null()) {
+ AddToWeakNativeContextList(*native_context());
+ isolate->set_context(*native_context());
isolate->counters()->contexts_created_by_snapshot()->Increment();
Handle<GlobalObject> inner_global;
Handle<JSGlobalProxy> global_proxy =
@@ -2358,7 +2620,7 @@ Genesis::Genesis(Isolate* isolate,
Handle<JSGlobalProxy> global_proxy =
CreateNewGlobals(global_template, global_object, &inner_global);
HookUpGlobalProxy(inner_global, global_proxy);
- if (!InitializeGlobal(inner_global, empty_function)) return;
+ InitializeGlobal(inner_global, empty_function);
InstallJSFunctionResultCaches();
InitializeNormalizedMapCaches();
if (!InstallNatives()) return;
@@ -2373,7 +2635,15 @@ Genesis::Genesis(Isolate* isolate,
InitializeExperimentalGlobal();
if (!InstallExperimentalNatives()) return;
- result_ = native_context_;
+ // Initially seed the per-context random number generator
+ // using the per-isolate random number generator.
+ uint32_t* state = reinterpret_cast<uint32_t*>(
+ native_context()->random_seed()->GetDataStartAddress());
+ do {
+ isolate->random_number_generator()->NextBytes(state, kRandomStateSize);
+ } while (state[0] == 0 || state[1] == 0);
+
+ result_ = native_context();
}
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 179e65c35..bac9f4037 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -44,8 +44,8 @@ class SourceCodeCache BASE_EMBEDDED {
public:
explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
- void Initialize(bool create_heap_objects) {
- cache_ = create_heap_objects ? HEAP->empty_fixed_array() : NULL;
+ void Initialize(Isolate* isolate, bool create_heap_objects) {
+ cache_ = create_heap_objects ? isolate->heap()->empty_fixed_array() : NULL;
}
void Iterate(ObjectVisitor* v) {
@@ -54,8 +54,8 @@ class SourceCodeCache BASE_EMBEDDED {
bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
for (int i = 0; i < cache_->length(); i+=2) {
- SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
- if (str->IsEqualTo(name)) {
+ SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i));
+ if (str->IsUtf8EqualTo(name)) {
*handle = Handle<SharedFunctionInfo>(
SharedFunctionInfo::cast(cache_->get(i + 1)));
return true;
@@ -65,13 +65,14 @@ class SourceCodeCache BASE_EMBEDDED {
}
void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
- HandleScope scope;
+ Isolate* isolate = shared->GetIsolate();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
int length = cache_->length();
- Handle<FixedArray> new_array =
- FACTORY->NewFixedArray(length + 2, TENURED);
+ Handle<FixedArray> new_array = factory->NewFixedArray(length + 2, TENURED);
cache_->CopyTo(0, *new_array, 0, cache_->length());
cache_ = *new_array;
- Handle<String> str = FACTORY->NewStringFromAscii(name, TENURED);
+ Handle<String> str = factory->NewStringFromAscii(name, TENURED);
cache_->set(length, *str);
cache_->set(length + 1, *shared);
Script::cast(shared->script())->set_type(Smi::FromInt(type_));
@@ -88,6 +89,8 @@ class SourceCodeCache BASE_EMBEDDED {
// context.
class Bootstrapper {
public:
+ static void InitializeOncePerProcess();
+
// Requires: Heap::SetUp has been called.
void Initialize(bool create_heap_objects);
void TearDown();
@@ -95,7 +98,6 @@ class Bootstrapper {
// Creates a JavaScript Global Context with initial object graph.
// The returned value is a global handle casted to V8Environment*.
Handle<Context> CreateEnvironment(
- Isolate* isolate,
Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions);
@@ -132,6 +134,7 @@ class Bootstrapper {
SourceCodeCache* extensions_cache() { return &extensions_cache_; }
private:
+ Isolate* isolate_;
typedef int NestingCounterType;
NestingCounterType nesting_;
SourceCodeCache extensions_cache_;
@@ -144,7 +147,7 @@ class Bootstrapper {
friend class Isolate;
friend class NativesExternalStringResource;
- Bootstrapper();
+ explicit Bootstrapper(Isolate* isolate);
DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
};
@@ -152,15 +155,18 @@ class Bootstrapper {
class BootstrapperActive BASE_EMBEDDED {
public:
- BootstrapperActive() {
- ++Isolate::Current()->bootstrapper()->nesting_;
+ explicit BootstrapperActive(Bootstrapper* bootstrapper)
+ : bootstrapper_(bootstrapper) {
+ ++bootstrapper_->nesting_;
}
~BootstrapperActive() {
- --Isolate::Current()->bootstrapper()->nesting_;
+ --bootstrapper_->nesting_;
}
private:
+ Bootstrapper* bootstrapper_;
+
DISALLOW_COPY_AND_ASSIGN(BootstrapperActive);
};
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index df70cd4fc..b614904c9 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -31,6 +31,7 @@
#include "arguments.h"
#include "bootstrapper.h"
#include "builtins.h"
+#include "cpu-profiler.h"
#include "gdb-jit.h"
#include "ic-inl.h"
#include "heap-profiler.h"
@@ -125,23 +126,30 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
#ifdef DEBUG
-#define BUILTIN(name) \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
- name##ArgumentsType args, Isolate* isolate); \
- MUST_USE_RESULT static MaybeObject* Builtin_##name( \
- name##ArgumentsType args, Isolate* isolate) { \
- ASSERT(isolate == Isolate::Current()); \
- args.Verify(); \
- return Builtin_Impl_##name(args, isolate); \
- } \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+#define BUILTIN(name) \
+ MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+ name##ArgumentsType args, Isolate* isolate); \
+ MUST_USE_RESULT static MaybeObject* Builtin_##name( \
+ int args_length, Object** args_object, Isolate* isolate) { \
+ name##ArgumentsType args(args_length, args_object); \
+ args.Verify(); \
+ return Builtin_Impl_##name(args, isolate); \
+ } \
+ MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
name##ArgumentsType args, Isolate* isolate)
#else // For release mode.
-#define BUILTIN(name) \
- static MaybeObject* Builtin_##name(name##ArgumentsType args, Isolate* isolate)
-
+#define BUILTIN(name) \
+ static MaybeObject* Builtin_impl##name( \
+ name##ArgumentsType args, Isolate* isolate); \
+ static MaybeObject* Builtin_##name( \
+ int args_length, Object** args_object, Isolate* isolate) { \
+ name##ArgumentsType args(args_length, args_object); \
+ return Builtin_impl##name(args, isolate); \
+ } \
+ static MaybeObject* Builtin_impl##name( \
+ name##ArgumentsType args, Isolate* isolate)
#endif
@@ -150,7 +158,7 @@ static inline bool CalledAsConstructor(Isolate* isolate) {
// Calculate the result using a full stack frame iterator and check
// that the state of the stack is as we assume it to be in the
// code below.
- StackFrameIterator it;
+ StackFrameIterator it(isolate);
ASSERT(it.frame()->is_exit());
it.Advance();
StackFrame* frame = it.frame();
@@ -173,6 +181,7 @@ static inline bool CalledAsConstructor(Isolate* isolate) {
return result;
}
+
// ----------------------------------------------------------------------------
BUILTIN(Illegal) {
@@ -186,162 +195,15 @@ BUILTIN(EmptyFunction) {
}
-static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
- Isolate* isolate,
- JSFunction* constructor) {
- Heap* heap = isolate->heap();
- isolate->counters()->array_function_runtime()->Increment();
-
- JSArray* array;
- if (CalledAsConstructor(isolate)) {
- array = JSArray::cast((*args)[0]);
- // Initialize elements and length in case later allocations fail so that the
- // array object is initialized in a valid state.
- array->set_length(Smi::FromInt(0));
- array->set_elements(heap->empty_fixed_array());
- if (!FLAG_smi_only_arrays) {
- Context* native_context = isolate->context()->native_context();
- if (array->GetElementsKind() == GetInitialFastElementsKind() &&
- !native_context->js_array_maps()->IsUndefined()) {
- FixedArray* map_array =
- FixedArray::cast(native_context->js_array_maps());
- array->set_map(Map::cast(map_array->
- get(TERMINAL_FAST_ELEMENTS_KIND)));
- }
- }
- } else {
- // Allocate the JS Array
- MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
- if (!maybe_obj->To(&array)) return maybe_obj;
- }
-
- // Optimize the case where there is one argument and the argument is a
- // small smi.
- if (args->length() == 2) {
- Object* obj = (*args)[1];
- if (obj->IsSmi()) {
- int len = Smi::cast(obj)->value();
- if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
- Object* fixed_array;
- { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
- if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj;
- }
- ElementsKind elements_kind = array->GetElementsKind();
- if (!IsFastHoleyElementsKind(elements_kind)) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- MaybeObject* maybe_array =
- array->TransitionElementsKind(elements_kind);
- if (maybe_array->IsFailure()) return maybe_array;
- }
- // We do not use SetContent to skip the unnecessary elements type check.
- array->set_elements(FixedArray::cast(fixed_array));
- array->set_length(Smi::cast(obj));
- return array;
- }
- }
- // Take the argument as the length.
- { MaybeObject* maybe_obj = array->Initialize(0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return array->SetElementsLength((*args)[1]);
- }
-
- // Optimize the case where there are no parameters passed.
- if (args->length() == 1) {
- return array->Initialize(JSArray::kPreallocatedArrayElements);
- }
-
- // Set length and elements on the array.
- int number_of_elements = args->length() - 1;
- MaybeObject* maybe_object =
- array->EnsureCanContainElements(args, 1, number_of_elements,
- ALLOW_CONVERTED_DOUBLE_ELEMENTS);
- if (maybe_object->IsFailure()) return maybe_object;
-
- // Allocate an appropriately typed elements array.
- MaybeObject* maybe_elms;
- ElementsKind elements_kind = array->GetElementsKind();
- if (IsFastDoubleElementsKind(elements_kind)) {
- maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
- number_of_elements);
- } else {
- maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
- }
- FixedArrayBase* elms;
- if (!maybe_elms->To<FixedArrayBase>(&elms)) return maybe_elms;
-
- // Fill in the content
- switch (array->GetElementsKind()) {
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS: {
- FixedArray* smi_elms = FixedArray::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER);
- }
- break;
- }
- case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS: {
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- FixedArray* object_elms = FixedArray::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- object_elms->set(index, (*args)[index+1], mode);
- }
- break;
- }
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- double_elms->set(index, (*args)[index+1]->Number());
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- array->set_elements(elms);
- array->set_length(Smi::FromInt(number_of_elements));
- return array;
-}
-
-
-BUILTIN(InternalArrayCodeGeneric) {
- return ArrayCodeGenericCommon(
- &args,
- isolate,
- isolate->context()->native_context()->internal_array_function());
-}
-
-
-BUILTIN(ArrayCodeGeneric) {
- return ArrayCodeGenericCommon(
- &args,
- isolate,
- isolate->context()->native_context()->array_function());
-}
-
-
-static void MoveElements(Heap* heap,
- AssertNoAllocation* no_gc,
- FixedArray* dst,
- int dst_index,
- FixedArray* src,
- int src_index,
- int len) {
+static void MoveDoubleElements(FixedDoubleArray* dst,
+ int dst_index,
+ FixedDoubleArray* src,
+ int src_index,
+ int len) {
if (len == 0) return;
- ASSERT(dst->map() != HEAP->fixed_cow_array_map());
- memmove(dst->data_start() + dst_index,
- src->data_start() + src_index,
- len * kPointerSize);
- WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
- if (mode == UPDATE_WRITE_BARRIER) {
- heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
- }
- heap->incremental_marking()->RecordWrites(dst);
+ OS::MemMove(dst->data_start() + dst_index,
+ src->data_start() + src_index,
+ len * kDoubleSize);
}
@@ -351,24 +213,39 @@ static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) {
}
-static FixedArray* LeftTrimFixedArray(Heap* heap,
- FixedArray* elms,
- int to_trim) {
- ASSERT(elms->map() != HEAP->fixed_cow_array_map());
+static void FillWithHoles(FixedDoubleArray* dst, int from, int to) {
+ for (int i = from; i < to; i++) {
+ dst->set_the_hole(i);
+ }
+}
+
+
+static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
+ FixedArrayBase* elms,
+ int to_trim) {
+ Map* map = elms->map();
+ int entry_size;
+ if (elms->IsFixedArray()) {
+ entry_size = kPointerSize;
+ } else {
+ entry_size = kDoubleSize;
+ }
+ ASSERT(elms->map() != heap->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
- ASSERT(!HEAP->lo_space()->Contains(elms));
+ ASSERT(!heap->lo_space()->Contains(elms));
- STATIC_ASSERT(FixedArray::kMapOffset == 0);
- STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
- STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
+ STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
+ STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
+ STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
Object** former_start = HeapObject::RawField(elms, 0);
const int len = elms->length();
- if (to_trim > FixedArray::kHeaderSize / kPointerSize &&
+ if (to_trim * entry_size > FixedArrayBase::kHeaderSize &&
+ elms->IsFixedArray() &&
!heap->new_space()->Contains(elms)) {
// If we are doing a big trim in old space then we zap the space that was
// formerly part of the array so that the GC (aided by the card-based
@@ -382,23 +259,34 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- heap->CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
+ heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
- former_start[to_trim] = heap->fixed_array_map();
- former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
+ int new_start_index = to_trim * (entry_size / kPointerSize);
+ former_start[new_start_index] = map;
+ former_start[new_start_index + 1] = Smi::FromInt(len - to_trim);
// Maintain marking consistency for HeapObjectIterator and
// IncrementalMarking.
- int size_delta = to_trim * kPointerSize;
+ int size_delta = to_trim * entry_size;
if (heap->marking()->TransferMark(elms->address(),
elms->address() + size_delta)) {
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
}
- HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
- elms->address() + size_delta));
- return FixedArray::cast(HeapObject::FromAddress(
- elms->address() + to_trim * kPointerSize));
+ FixedArrayBase* new_elms = FixedArrayBase::cast(HeapObject::FromAddress(
+ elms->address() + size_delta));
+ HeapProfiler* profiler = heap->isolate()->heap_profiler();
+ if (profiler->is_profiling()) {
+ profiler->ObjectMoveEvent(elms->address(),
+ new_elms->address(),
+ new_elms->Size());
+ if (profiler->is_tracking_allocations()) {
+ // Report filler object as a new allocation.
+ // Otherwise it will become an untracked object.
+ profiler->NewObjectEvent(elms->address(), elms->Size());
+ }
+ }
+ return new_elms;
}
@@ -427,19 +315,14 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (args == NULL || array->HasFastObjectElements()) return elms;
- if (array->HasFastDoubleElements()) {
- ASSERT(elms == heap->empty_fixed_array());
- MaybeObject* maybe_transition =
- array->TransitionElementsKind(FAST_ELEMENTS);
- if (maybe_transition->IsFailure()) return maybe_transition;
- return elms;
- }
} else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
if (args == NULL || array->HasFastObjectElements() ||
- maybe_writable_result->IsFailure()) {
+ !maybe_writable_result->To(&elms)) {
return maybe_writable_result;
}
+ } else if (map == heap->fixed_double_array_map()) {
+ if (args == NULL) return elms;
} else {
return NULL;
}
@@ -449,13 +332,28 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
int args_length = args->length();
if (first_added_arg >= args_length) return array->elements();
- MaybeObject* maybe_array = array->EnsureCanContainElements(
- args,
- first_added_arg,
- args_length - first_added_arg,
- DONT_ALLOW_DOUBLE_ELEMENTS);
- if (maybe_array->IsFailure()) return maybe_array;
- return array->elements();
+ ElementsKind origin_kind = array->map()->elements_kind();
+ ASSERT(!IsFastObjectElementsKind(origin_kind));
+ ElementsKind target_kind = origin_kind;
+ int arg_count = args->length() - first_added_arg;
+ Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
+ for (int i = 0; i < arg_count; i++) {
+ Object* arg = arguments[i];
+ if (arg->IsHeapObject()) {
+ if (arg->IsHeapNumber()) {
+ target_kind = FAST_DOUBLE_ELEMENTS;
+ } else {
+ target_kind = FAST_ELEMENTS;
+ break;
+ }
+ }
+ }
+ if (target_kind != origin_kind) {
+ MaybeObject* maybe_failure = array->TransitionElementsKind(target_kind);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ return array->elements();
+ }
+ return elms;
}
@@ -486,7 +384,8 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
argv[i] = args.at<Object>(i + 1);
}
bool pending_exception;
- Handle<Object> result = Execution::Call(function,
+ Handle<Object> result = Execution::Call(isolate,
+ function,
args.receiver(),
argc,
argv.start(),
@@ -499,127 +398,191 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
BUILTIN(ArrayPush) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
- if (maybe_elms_obj == NULL) {
- return CallJsBuiltin(isolate, "ArrayPush", args);
- }
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
+ if (maybe_elms_obj == NULL) {
+ return CallJsBuiltin(isolate, "ArrayPush", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
+ if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
+
JSArray* array = JSArray::cast(receiver);
+ ASSERT(!array->map()->is_observed());
- int len = Smi::cast(array->length())->value();
- int to_add = args.length() - 1;
- if (to_add == 0) {
- return Smi::FromInt(len);
- }
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- ASSERT(to_add <= (Smi::kMaxValue - len));
+ ElementsKind kind = array->GetElementsKind();
- int new_length = len + to_add;
+ if (IsFastSmiOrObjectElementsKind(kind)) {
+ FixedArray* elms = FixedArray::cast(elms_obj);
- if (new_length > elms->length()) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ int len = Smi::cast(array->length())->value();
+ int to_add = args.length() - 1;
+ if (to_add == 0) {
+ return Smi::FromInt(len);
+ }
+ // Currently fixed arrays cannot grow too big, so
+ // we should never hit this case.
+ ASSERT(to_add <= (Smi::kMaxValue - len));
+
+ int new_length = len + to_add;
+
+ if (new_length > elms->length()) {
+ // New backing storage is needed.
+ int capacity = new_length + (new_length >> 1) + 16;
+ FixedArray* new_elms;
+ MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
+ if (!maybe_obj->To(&new_elms)) return maybe_obj;
+
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ NULL, 0, kind, new_elms, 0,
+ ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
+
+ elms = new_elms;
}
- FixedArray* new_elms = FixedArray::cast(obj);
- ElementsKind kind = array->GetElementsKind();
- CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len);
- FillWithHoles(heap, new_elms, new_length, capacity);
+ // Add the provided values.
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ for (int index = 0; index < to_add; index++) {
+ elms->set(index + len, args[index + 1], mode);
+ }
- elms = new_elms;
- }
+ if (elms != array->elements()) {
+ array->set_elements(elms);
+ }
- // Add the provided values.
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int index = 0; index < to_add; index++) {
- elms->set(index + len, args[index + 1], mode);
- }
+ // Set the length.
+ array->set_length(Smi::FromInt(new_length));
+ return Smi::FromInt(new_length);
+ } else {
+ int len = Smi::cast(array->length())->value();
+ int elms_len = elms_obj->length();
- if (elms != array->elements()) {
- array->set_elements(elms);
- }
+ int to_add = args.length() - 1;
+ if (to_add == 0) {
+ return Smi::FromInt(len);
+ }
+ // Currently fixed arrays cannot grow too big, so
+ // we should never hit this case.
+ ASSERT(to_add <= (Smi::kMaxValue - len));
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
- return Smi::FromInt(new_length);
+ int new_length = len + to_add;
+
+ FixedDoubleArray* new_elms;
+
+ if (new_length > elms_len) {
+ // New backing storage is needed.
+ int capacity = new_length + (new_length >> 1) + 16;
+ MaybeObject* maybe_obj =
+ heap->AllocateUninitializedFixedDoubleArray(capacity);
+ if (!maybe_obj->To(&new_elms)) return maybe_obj;
+
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ NULL, 0, kind, new_elms, 0,
+ ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
+ } else {
+ // to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the
+ // empty_fixed_array.
+ new_elms = FixedDoubleArray::cast(elms_obj);
+ }
+
+ // Add the provided values.
+ DisallowHeapAllocation no_gc;
+ int index;
+ for (index = 0; index < to_add; index++) {
+ Object* arg = args[index + 1];
+ new_elms->set(index + len, arg->Number());
+ }
+
+ if (new_elms != array->elements()) {
+ array->set_elements(new_elms);
+ }
+
+ // Set the length.
+ array->set_length(Smi::FromInt(new_length));
+ return Smi::FromInt(new_length);
+ }
}
BUILTIN(ArrayPop) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
- FixedArray* elms = FixedArray::cast(elms_obj);
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ if (maybe_elms == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
+ if (!maybe_elms->To(&elms_obj)) return maybe_elms;
+
JSArray* array = JSArray::cast(receiver);
+ ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
- // Get top element
- MaybeObject* top = elms->get(len - 1);
-
- // Set the length.
- array->set_length(Smi::FromInt(len - 1));
-
- if (!top->IsTheHole()) {
- // Delete the top element.
- elms->set_the_hole(len - 1);
- return top;
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ int new_length = len - 1;
+ MaybeObject* maybe_result;
+ if (accessor->HasElement(array, array, new_length, elms_obj)) {
+ maybe_result = accessor->Get(array, array, new_length, elms_obj);
+ } else {
+ maybe_result = array->GetPrototype()->GetElement(isolate, len - 1);
}
-
- top = array->GetPrototype()->GetElement(len - 1);
-
- return top;
+ if (maybe_result->IsFailure()) return maybe_result;
+ MaybeObject* maybe_failure =
+ accessor->SetLength(array, Smi::FromInt(new_length));
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ return maybe_result;
}
BUILTIN(ArrayShift) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayShift", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArrayShift", args);
+ if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
+
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayShift", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastSmiOrObjectElements());
+ ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
// Get first element
- Object* first = elms->get(0);
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ Object* first;
+ MaybeObject* maybe_first = accessor->Get(receiver, array, 0, elms_obj);
+ if (!maybe_first->To(&first)) return maybe_first;
if (first->IsTheHole()) {
first = heap->undefined_value();
}
- if (!heap->lo_space()->Contains(elms)) {
- array->set_elements(LeftTrimFixedArray(heap, elms, 1));
+ if (!heap->lo_space()->Contains(elms_obj)) {
+ array->set_elements(LeftTrimFixedArray(heap, elms_obj, 1));
} else {
// Shift the elements.
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1);
- elms->set(len - 1, heap->the_hole_value());
+ if (elms_obj->IsFixedArray()) {
+ FixedArray* elms = FixedArray::cast(elms_obj);
+ DisallowHeapAllocation no_gc;
+ heap->MoveElements(elms, 0, 1, len - 1);
+ elms->set(len - 1, heap->the_hole_value());
+ } else {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ MoveDoubleElements(elms, 0, elms, 1, len - 1);
+ elms->set_the_hole(len - 1);
+ }
}
// Set the length.
@@ -632,19 +595,22 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayUnshift", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArrayUnshift", args);
+ if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
+
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastSmiOrObjectElements());
+ ASSERT(!array->map()->is_observed());
+ if (!array->HasFastSmiOrObjectElements()) {
+ return CallJsBuiltin(isolate, "ArrayUnshift", args);
+ }
+ FixedArray* elms = FixedArray::cast(elms_obj);
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@@ -661,23 +627,27 @@ BUILTIN(ArrayUnshift) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* new_elms = FixedArray::cast(obj);
+ FixedArray* new_elms;
+ MaybeObject* maybe_elms = heap->AllocateUninitializedFixedArray(capacity);
+ if (!maybe_elms->To(&new_elms)) return maybe_elms;
+
ElementsKind kind = array->GetElementsKind();
- CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len);
- FillWithHoles(heap, new_elms, new_length, capacity);
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ NULL, 0, kind, new_elms, to_add,
+ ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
+
elms = new_elms;
array->set_elements(elms);
} else {
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc, elms, to_add, elms, 0, len);
+ DisallowHeapAllocation no_gc;
+ heap->MoveElements(elms, to_add, 0, len);
}
// Add the provided values.
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int i = 0; i < to_add; i++) {
elms->set(i, args[i + 1], mode);
@@ -692,16 +662,20 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- FixedArray* elms;
+ FixedArrayBase* elms;
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
- if (!array->HasFastSmiOrObjectElements() ||
- !IsJSArrayFastElementMovingAllowed(heap, array)) {
+ if (!IsJSArrayFastElementMovingAllowed(heap, array)) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+
+ if (array->HasFastElements()) {
+ elms = array->elements();
+ } else {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- elms = FixedArray::cast(array->elements());
len = Smi::cast(array->length())->value();
} else {
// Array.slice(arguments, ...) is quite a common idiom (notably more
@@ -710,15 +684,19 @@ BUILTIN(ArraySlice) {
isolate->context()->native_context()->arguments_boilerplate()->map();
bool is_arguments_object_with_fast_elements =
- receiver->IsJSObject()
- && JSObject::cast(receiver)->map() == arguments_map
- && JSObject::cast(receiver)->HasFastSmiOrObjectElements();
+ receiver->IsJSObject() &&
+ JSObject::cast(receiver)->map() == arguments_map;
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- elms = FixedArray::cast(JSObject::cast(receiver)->elements());
- Object* len_obj = JSObject::cast(receiver)
- ->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
+ JSObject* object = JSObject::cast(receiver);
+
+ if (object->HasFastElements()) {
+ elms = object->elements();
+ } else {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
if (!len_obj->IsSmi()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -726,12 +704,10 @@ BUILTIN(ArraySlice) {
if (len > elms->length()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- for (int i = 0; i < len; i++) {
- if (elms->get(i) == heap->the_hole_value()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- }
}
+
+ JSObject* object = JSObject::cast(receiver);
+
ASSERT(len >= 0);
int n_arguments = args.length() - 1;
@@ -744,6 +720,12 @@ BUILTIN(ArraySlice) {
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
+ } else if (arg1->IsHeapNumber()) {
+ double start = HeapNumber::cast(arg1)->value();
+ if (start < kMinInt || start > kMaxInt) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -751,6 +733,12 @@ BUILTIN(ArraySlice) {
Object* arg2 = args[2];
if (arg2->IsSmi()) {
relative_end = Smi::cast(arg2)->value();
+ } else if (arg2->IsHeapNumber()) {
+ double end = HeapNumber::cast(arg2)->value();
+ if (end < kMinInt || end > kMaxInt) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ relative_end = std::isnan(end) ? 0 : static_cast<int>(end);
} else if (!arg2->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -765,21 +753,40 @@ BUILTIN(ArraySlice) {
int final = (relative_end < 0) ? Max(len + relative_end, 0)
: Min(relative_end, len);
- ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind();
-
// Calculate the length of result array.
int result_len = Max(final - k, 0);
- MaybeObject* maybe_array =
- heap->AllocateJSArrayAndStorage(elements_kind,
- result_len,
- result_len);
+ ElementsKind kind = object->GetElementsKind();
+ if (IsHoleyElementsKind(kind)) {
+ bool packed = true;
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
+ for (int i = k; i < final; i++) {
+ if (!accessor->HasElement(object, object, i, elms)) {
+ packed = false;
+ break;
+ }
+ }
+ if (packed) {
+ kind = GetPackedElementsKind(kind);
+ } else if (!receiver->IsJSArray()) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ }
+
JSArray* result_array;
+ MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(kind,
+ result_len,
+ result_len);
+
+ DisallowHeapAllocation no_gc;
+ if (result_len == 0) return maybe_array;
if (!maybe_array->To(&result_array)) return maybe_array;
- CopyObjectToObjectElements(elms, elements_kind, k,
- FixedArray::cast(result_array->elements()),
- elements_kind, 0, result_len);
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ NULL, k, kind, result_array->elements(), 0, result_len, elms);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
return result_array;
}
@@ -788,19 +795,19 @@ BUILTIN(ArraySlice) {
BUILTIN(ArraySplice) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArraySplice", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
+ if (maybe_elms == NULL) {
+ return CallJsBuiltin(isolate, "ArraySplice", args);
}
+ if (!maybe_elms->To(&elms_obj)) return maybe_elms;
+
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastSmiOrObjectElements());
+ ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
@@ -811,6 +818,12 @@ BUILTIN(ArraySplice) {
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
+ } else if (arg1->IsHeapNumber()) {
+ double start = HeapNumber::cast(arg1)->value();
+ if (start < kMinInt || start > kMaxInt) {
+ return CallJsBuiltin(isolate, "ArraySplice", args);
+ }
+ relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
@@ -840,51 +853,83 @@ BUILTIN(ArraySplice) {
actual_delete_count = Min(Max(value, 0), len - actual_start);
}
+ ElementsKind elements_kind = array->GetElementsKind();
+
+ int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
+ int new_length = len - actual_delete_count + item_count;
+
+ // For double mode we do not support changing the length.
+ if (new_length > len && IsFastDoubleElementsKind(elements_kind)) {
+ return CallJsBuiltin(isolate, "ArraySplice", args);
+ }
+
+ if (new_length == 0) {
+ MaybeObject* maybe_array = heap->AllocateJSArrayWithElements(
+ elms_obj, elements_kind, actual_delete_count);
+ if (maybe_array->IsFailure()) return maybe_array;
+ array->set_elements(heap->empty_fixed_array());
+ array->set_length(Smi::FromInt(0));
+ return maybe_array;
+ }
+
JSArray* result_array = NULL;
- ElementsKind elements_kind =
- JSObject::cast(receiver)->GetElementsKind();
MaybeObject* maybe_array =
heap->AllocateJSArrayAndStorage(elements_kind,
actual_delete_count,
actual_delete_count);
if (!maybe_array->To(&result_array)) return maybe_array;
- {
- // Fill newly created array.
- CopyObjectToObjectElements(elms, elements_kind, actual_start,
- FixedArray::cast(result_array->elements()),
- elements_kind, 0, actual_delete_count);
+ if (actual_delete_count > 0) {
+ DisallowHeapAllocation no_gc;
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ NULL, actual_start, elements_kind, result_array->elements(),
+ 0, actual_delete_count, elms_obj);
+ // Cannot fail since the origin and target array are of the same elements
+ // kind.
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
}
- int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
- int new_length = len - actual_delete_count + item_count;
-
bool elms_changed = false;
if (item_count < actual_delete_count) {
// Shrink the array.
- const bool trim_array = !heap->lo_space()->Contains(elms) &&
+ const bool trim_array = !heap->lo_space()->Contains(elms_obj) &&
((actual_start + item_count) <
(len - actual_delete_count - actual_start));
if (trim_array) {
const int delta = actual_delete_count - item_count;
- {
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc, elms, delta, elms, 0, actual_start);
+ if (elms_obj->IsFixedDoubleArray()) {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ MoveDoubleElements(elms, delta, elms, 0, actual_start);
+ } else {
+ FixedArray* elms = FixedArray::cast(elms_obj);
+ DisallowHeapAllocation no_gc;
+ heap->MoveElements(elms, delta, 0, actual_start);
}
- elms = LeftTrimFixedArray(heap, elms, delta);
+ elms_obj = LeftTrimFixedArray(heap, elms_obj, delta);
elms_changed = true;
} else {
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc,
- elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
- FillWithHoles(heap, elms, new_length, len);
+ if (elms_obj->IsFixedDoubleArray()) {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ MoveDoubleElements(elms, actual_start + item_count,
+ elms, actual_start + actual_delete_count,
+ (len - actual_delete_count - actual_start));
+ FillWithHoles(elms, new_length, len);
+ } else {
+ FixedArray* elms = FixedArray::cast(elms_obj);
+ DisallowHeapAllocation no_gc;
+ heap->MoveElements(elms, actual_start + item_count,
+ actual_start + actual_delete_count,
+ (len - actual_delete_count - actual_start));
+ FillWithHoles(heap, elms, new_length, len);
+ }
}
} else if (item_count > actual_delete_count) {
+ FixedArray* elms = FixedArray::cast(elms_obj);
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
@@ -893,48 +938,60 @@ BUILTIN(ArraySplice) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- Object* obj;
- { MaybeObject* maybe_obj =
- heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* new_elms = FixedArray::cast(obj);
+ FixedArray* new_elms;
+ MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
+ if (!maybe_obj->To(&new_elms)) return maybe_obj;
- {
+ DisallowHeapAllocation no_gc;
+
+ ElementsKind kind = array->GetElementsKind();
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ if (actual_start > 0) {
// Copy the part before actual_start as is.
- ElementsKind kind = array->GetElementsKind();
- CopyObjectToObjectElements(elms, kind, 0,
- new_elms, kind, 0, actual_start);
- const int to_copy = len - actual_delete_count - actual_start;
- CopyObjectToObjectElements(elms, kind,
- actual_start + actual_delete_count,
- new_elms, kind,
- actual_start + item_count, to_copy);
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ NULL, 0, kind, new_elms, 0, actual_start, elms);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
}
-
- FillWithHoles(heap, new_elms, new_length, capacity);
-
- elms = new_elms;
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ NULL, actual_start + actual_delete_count, kind, new_elms,
+ actual_start + item_count,
+ ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
+
+ elms_obj = new_elms;
elms_changed = true;
} else {
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc,
- elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
+ DisallowHeapAllocation no_gc;
+ heap->MoveElements(elms, actual_start + item_count,
+ actual_start + actual_delete_count,
+ (len - actual_delete_count - actual_start));
}
}
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int k = actual_start; k < actual_start + item_count; k++) {
- elms->set(k, args[3 + k - actual_start], mode);
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ for (int k = actual_start; k < actual_start + item_count; k++) {
+ Object* arg = args[3 + k - actual_start];
+ if (arg->IsSmi()) {
+ elms->set(k, Smi::cast(arg)->value());
+ } else {
+ elms->set(k, HeapNumber::cast(arg)->value());
+ }
+ }
+ } else {
+ FixedArray* elms = FixedArray::cast(elms_obj);
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ for (int k = actual_start; k < actual_start + item_count; k++) {
+ elms->set(k, args[3 + k - actual_start], mode);
+ }
}
if (elms_changed) {
- array->set_elements(elms);
+ array->set_elements(elms_obj);
}
-
// Set the length.
array->set_length(Smi::FromInt(new_length));
@@ -956,14 +1013,15 @@ BUILTIN(ArrayConcat) {
int n_arguments = args.length();
int result_len = 0;
ElementsKind elements_kind = GetInitialFastElementsKind();
+ bool has_double = false;
+ bool is_holey = false;
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
if (!arg->IsJSArray() ||
- !JSArray::cast(arg)->HasFastSmiOrObjectElements() ||
+ !JSArray::cast(arg)->HasFastElements() ||
JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
-
int len = Smi::cast(JSArray::cast(arg)->length())->value();
// We shouldn't overflow when adding another len.
@@ -973,47 +1031,52 @@ BUILTIN(ArrayConcat) {
result_len += len;
ASSERT(result_len >= 0);
- if (result_len > FixedArray::kMaxLength) {
+ if (result_len > FixedDoubleArray::kMaxLength) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
- if (!JSArray::cast(arg)->HasFastSmiElements()) {
- if (IsFastSmiElementsKind(elements_kind)) {
- if (IsFastHoleyElementsKind(elements_kind)) {
- elements_kind = FAST_HOLEY_ELEMENTS;
- } else {
- elements_kind = FAST_ELEMENTS;
- }
- }
- }
-
- if (JSArray::cast(arg)->HasFastHoleyElements()) {
- elements_kind = GetHoleyElementsKind(elements_kind);
+ ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
+ has_double = has_double || IsFastDoubleElementsKind(arg_kind);
+ is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
+ if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) {
+ elements_kind = arg_kind;
}
}
- // Allocate result.
+ if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind);
+
+ // If a double array is concatted into a fast elements array, the fast
+ // elements array needs to be initialized to contain proper holes, since
+ // boxing doubles may cause incremental marking.
+ ArrayStorageAllocationMode mode =
+ has_double && IsFastObjectElementsKind(elements_kind)
+ ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS;
JSArray* result_array;
+ // Allocate result.
MaybeObject* maybe_array =
heap->AllocateJSArrayAndStorage(elements_kind,
result_len,
- result_len);
+ result_len,
+ mode);
if (!maybe_array->To(&result_array)) return maybe_array;
if (result_len == 0) return result_array;
- // Copy data.
- int start_pos = 0;
- FixedArray* result_elms(FixedArray::cast(result_array->elements()));
+ int j = 0;
+ FixedArrayBase* storage = result_array->elements();
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
- FixedArray* elms = FixedArray::cast(array->elements());
- CopyObjectToObjectElements(elms, elements_kind, 0,
- result_elms, elements_kind,
- start_pos, len);
- start_pos += len;
+ ElementsKind from_kind = array->GetElementsKind();
+ if (len > 0) {
+ MaybeObject* maybe_failure =
+ accessor->CopyElements(array, 0, from_kind, storage, j, len);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ j += len;
+ }
}
- ASSERT(start_pos == result_len);
+
+ ASSERT(j == result_len);
return result_array;
}
@@ -1024,21 +1087,38 @@ BUILTIN(ArrayConcat) {
BUILTIN(StrictModePoisonPill) {
- HandleScope scope;
+ HandleScope scope(isolate);
return isolate->Throw(*isolate->factory()->NewTypeError(
"strict_poison_pill", HandleVector<Object>(NULL, 0)));
}
+
// -----------------------------------------------------------------------------
//
+// Searches the hidden prototype chain of the given object for the first
+// object that is an instance of the given type. If no such object can
+// be found then Heap::null_value() is returned.
+static inline Object* FindHidden(Heap* heap,
+ Object* object,
+ FunctionTemplateInfo* type) {
+ if (object->IsInstanceOf(type)) return object;
+ Object* proto = object->GetPrototype(heap->isolate());
+ if (proto->IsJSObject() &&
+ JSObject::cast(proto)->map()->is_hidden_prototype()) {
+ return FindHidden(heap, proto, type);
+ }
+ return heap->null_value();
+}
+
+
// Returns the holder JSObject if the function can legally be called
// with this receiver. Returns Heap::null_value() if the call is
// illegal. Any arguments that don't fit the expected type is
-// overwritten with undefined. Arguments that do fit the expected
-// type is overwritten with the object in the prototype chain that
-// actually has that type.
+// overwritten with undefined. Note that holder and the arguments are
+// implicitly rewritten with the first object in the hidden prototype
+// chain that actually has the expected type.
static inline Object* TypeCheck(Heap* heap,
int argc,
Object** argv,
@@ -1051,15 +1131,10 @@ static inline Object* TypeCheck(Heap* heap,
SignatureInfo* sig = SignatureInfo::cast(sig_obj);
// If necessary, check the receiver
Object* recv_type = sig->receiver();
-
Object* holder = recv;
if (!recv_type->IsUndefined()) {
- for (; holder != heap->null_value(); holder = holder->GetPrototype()) {
- if (holder->IsInstanceOf(FunctionTemplateInfo::cast(recv_type))) {
- break;
- }
- }
- if (holder == heap->null_value()) return holder;
+ holder = FindHidden(heap, holder, FunctionTemplateInfo::cast(recv_type));
+ if (holder == heap->null_value()) return heap->null_value();
}
Object* args_obj = sig->args();
// If there is no argument signature we're done
@@ -1072,13 +1147,9 @@ static inline Object* TypeCheck(Heap* heap,
if (argtype->IsUndefined()) continue;
Object** arg = &argv[-1 - i];
Object* current = *arg;
- for (; current != heap->null_value(); current = current->GetPrototype()) {
- if (current->IsInstanceOf(FunctionTemplateInfo::cast(argtype))) {
- *arg = current;
- break;
- }
- }
- if (current == heap->null_value()) *arg = heap->undefined_value();
+ current = FindHidden(heap, current, FunctionTemplateInfo::cast(argtype));
+ if (current == heap->null_value()) current = heap->undefined_value();
+ *arg = current;
}
return holder;
}
@@ -1119,32 +1190,23 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
if (!raw_call_data->IsUndefined()) {
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
Object* callback_obj = call_data->callback();
- v8::InvocationCallback callback =
- v8::ToCData<v8::InvocationCallback>(callback_obj);
+ v8::FunctionCallback callback =
+ v8::ToCData<v8::FunctionCallback>(callback_obj);
Object* data_obj = call_data->data();
Object* result;
LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
ASSERT(raw_holder->IsJSObject());
- CustomArguments custom(isolate);
- v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
- isolate, data_obj, *function, raw_holder);
-
- v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
- custom.end(),
- &args[0] - 1,
- args.length() - 1,
- is_construct);
-
- v8::Handle<v8::Value> value;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- ExternalCallbackScope call_scope(isolate,
- v8::ToCData<Address>(callback_obj));
- value = callback(new_args);
- }
+ FunctionCallbackArguments custom(isolate,
+ data_obj,
+ *function,
+ raw_holder,
+ &args[0] - 1,
+ args.length() - 1,
+ is_construct);
+
+ v8::Handle<v8::Value> value = custom.Call(callback);
if (value.IsEmpty()) {
result = heap->undefined_value();
} else {
@@ -1197,8 +1259,8 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
ASSERT(!handler->IsUndefined());
CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
Object* callback_obj = call_data->callback();
- v8::InvocationCallback callback =
- v8::ToCData<v8::InvocationCallback>(callback_obj);
+ v8::FunctionCallback callback =
+ v8::ToCData<v8::FunctionCallback>(callback_obj);
// Get the data for the call and perform the callback.
Object* result;
@@ -1206,22 +1268,14 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
HandleScope scope(isolate);
LOG(isolate, ApiObjectAccess("call non-function", obj));
- CustomArguments custom(isolate);
- v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
- isolate, call_data->data(), constructor, obj);
- v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
- custom.end(),
- &args[0] - 1,
- args.length() - 1,
- is_construct_call);
- v8::Handle<v8::Value> value;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- ExternalCallbackScope call_scope(isolate,
- v8::ToCData<Address>(callback_obj));
- value = callback(new_args);
- }
+ FunctionCallbackArguments custom(isolate,
+ call_data->data(),
+ constructor,
+ obj,
+ &args[0] - 1,
+ args.length() - 1,
+ is_construct_call);
+ v8::Handle<v8::Value> value = custom.Call(callback);
if (value.IsEmpty()) {
result = heap->undefined_value();
} else {
@@ -1249,26 +1303,6 @@ BUILTIN(HandleApiCallAsConstructor) {
}
-static void Generate_LoadIC_ArrayLength(MacroAssembler* masm) {
- LoadIC::GenerateArrayLength(masm);
-}
-
-
-static void Generate_LoadIC_StringLength(MacroAssembler* masm) {
- LoadIC::GenerateStringLength(masm, false);
-}
-
-
-static void Generate_LoadIC_StringWrapperLength(MacroAssembler* masm) {
- LoadIC::GenerateStringLength(masm, true);
-}
-
-
-static void Generate_LoadIC_FunctionPrototype(MacroAssembler* masm) {
- LoadIC::GenerateFunctionPrototype(masm);
-}
-
-
static void Generate_LoadIC_Initialize(MacroAssembler* masm) {
LoadIC::GenerateInitialize(masm);
}
@@ -1295,7 +1329,13 @@ static void Generate_LoadIC_Normal(MacroAssembler* masm) {
static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
- LoadStubCompiler::GenerateLoadViaGetter(masm, Handle<JSFunction>());
+ LoadStubCompiler::GenerateLoadViaGetter(
+ masm, LoadStubCompiler::registers()[0], Handle<JSFunction>());
+}
+
+
+static void Generate_LoadIC_Slow(MacroAssembler* masm) {
+ LoadIC::GenerateRuntimeGetProperty(masm);
}
@@ -1310,12 +1350,12 @@ static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm, false);
+ KeyedLoadIC::GenerateMiss(masm, MISS);
}
static void Generate_KeyedLoadIC_MissForceGeneric(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm, true);
+ KeyedLoadIC::GenerateMiss(masm, MISS_FORCE_GENERIC);
}
@@ -1333,14 +1373,27 @@ static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
KeyedLoadIC::GeneratePreMonomorphic(masm);
}
+
static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
KeyedLoadIC::GenerateIndexedInterceptor(masm);
}
+
static void Generate_KeyedLoadIC_NonStrictArguments(MacroAssembler* masm) {
KeyedLoadIC::GenerateNonStrictArguments(masm);
}
+
+static void Generate_StoreIC_Slow(MacroAssembler* masm) {
+ StoreIC::GenerateSlow(masm);
+}
+
+
+static void Generate_StoreIC_Slow_Strict(MacroAssembler* masm) {
+ StoreIC::GenerateSlow(masm);
+}
+
+
static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
StoreIC::GenerateInitialize(masm);
}
@@ -1351,6 +1404,16 @@ static void Generate_StoreIC_Initialize_Strict(MacroAssembler* masm) {
}
+static void Generate_StoreIC_PreMonomorphic(MacroAssembler* masm) {
+ StoreIC::GeneratePreMonomorphic(masm);
+}
+
+
+static void Generate_StoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
+ StoreIC::GeneratePreMonomorphic(masm);
+}
+
+
static void Generate_StoreIC_Miss(MacroAssembler* masm) {
StoreIC::GenerateMiss(masm);
}
@@ -1376,28 +1439,28 @@ static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
}
-static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) {
- StoreIC::GenerateArrayLength(masm);
+static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
+ StoreIC::GenerateRuntimeSetProperty(masm, kNonStrictMode);
}
-static void Generate_StoreIC_ArrayLength_Strict(MacroAssembler* masm) {
- StoreIC::GenerateArrayLength(masm);
+static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
+ StoreIC::GenerateRuntimeSetProperty(masm, kStrictMode);
}
-static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
- StoreIC::GenerateGlobalProxy(masm, kNonStrictMode);
+static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
+ StoreStubCompiler::GenerateStoreViaSetter(masm, Handle<JSFunction>());
}
-static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
- StoreIC::GenerateGlobalProxy(masm, kStrictMode);
+static void Generate_StoreIC_Generic(MacroAssembler* masm) {
+ StoreIC::GenerateRuntimeSetProperty(masm, kNonStrictMode);
}
-static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
- StoreStubCompiler::GenerateStoreViaSetter(masm, Handle<JSFunction>());
+static void Generate_StoreIC_Generic_Strict(MacroAssembler* masm) {
+ StoreIC::GenerateRuntimeSetProperty(masm, kStrictMode);
}
@@ -1412,12 +1475,12 @@ static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMiss(masm, false);
+ KeyedStoreIC::GenerateMiss(masm, MISS);
}
static void Generate_KeyedStoreIC_MissForceGeneric(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMiss(masm, true);
+ KeyedStoreIC::GenerateMiss(masm, MISS_FORCE_GENERIC);
}
@@ -1426,6 +1489,11 @@ static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
}
+static void Generate_KeyedStoreIC_Slow_Strict(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateSlow(masm);
+}
+
+
static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
KeyedStoreIC::GenerateInitialize(masm);
}
@@ -1435,18 +1503,22 @@ static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
KeyedStoreIC::GenerateInitialize(masm);
}
-static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
- KeyedStoreIC::GenerateNonStrictArguments(masm);
+
+static void Generate_KeyedStoreIC_PreMonomorphic(MacroAssembler* masm) {
+ KeyedStoreIC::GeneratePreMonomorphic(masm);
}
-static void Generate_TransitionElementsSmiToDouble(MacroAssembler* masm) {
- KeyedStoreIC::GenerateTransitionElementsSmiToDouble(masm);
+
+static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
+ KeyedStoreIC::GeneratePreMonomorphic(masm);
}
-static void Generate_TransitionElementsDoubleToObject(MacroAssembler* masm) {
- KeyedStoreIC::GenerateTransitionElementsDoubleToObject(masm);
+
+static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateNonStrictArguments(masm);
}
+
#ifdef ENABLE_DEBUGGER_SUPPORT
static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
Debug::GenerateLoadICDebugBreak(masm);
@@ -1468,6 +1540,11 @@ static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
}
+static void Generate_CompareNilIC_DebugBreak(MacroAssembler* masm) {
+ Debug::GenerateCompareNilICDebugBreak(masm);
+}
+
+
static void Generate_Return_DebugBreak(MacroAssembler* masm) {
Debug::GenerateReturnDebugBreak(masm);
}
@@ -1599,17 +1676,28 @@ void Builtins::InitBuiltinFunctionTable() {
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;
+#define DEF_FUNCTION_PTR_H(aname, kind, extra) \
+ functions->generator = FUNCTION_ADDR(Generate_##aname); \
+ functions->c_code = NULL; \
+ functions->s_name = #aname; \
+ functions->name = k##aname; \
+ functions->flags = Code::ComputeFlags( \
+ Code::HANDLER, MONOMORPHIC, extra, Code::NORMAL, Code::kind); \
+ functions->extra_args = NO_EXTRA_ARGUMENTS; \
+ ++functions;
+
BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
+ BUILTIN_LIST_H(DEF_FUNCTION_PTR_H)
BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
#undef DEF_FUNCTION_PTR_C
#undef DEF_FUNCTION_PTR_A
}
-void Builtins::SetUp(bool create_heap_objects) {
+
+void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
ASSERT(!initialized_);
- Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
// Create a scope for the handles in the builtins.
@@ -1703,6 +1791,16 @@ const char* Builtins::Lookup(byte* pc) {
}
+void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
+ masm->TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
+void Builtins::Generate_StackCheck(MacroAssembler* masm) {
+ masm->TailCallRuntime(Runtime::kStackGuard, 0, 1);
+}
+
+
#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore) \
Handle<Code> Builtins::name() { \
Code** code_address = \
@@ -1715,8 +1813,15 @@ Handle<Code> Builtins::name() { \
reinterpret_cast<Code**>(builtin_address(k##name)); \
return Handle<Code>(code_address); \
}
+#define DEFINE_BUILTIN_ACCESSOR_H(name, kind, extra) \
+Handle<Code> Builtins::name() { \
+ Code** code_address = \
+ reinterpret_cast<Code**>(builtin_address(k##name)); \
+ return Handle<Code>(code_address); \
+}
BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
+BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H)
BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
#undef DEFINE_BUILTIN_ACCESSOR_C
#undef DEFINE_BUILTIN_ACCESSOR_A
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index ca70ae540..9b589d843 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -38,15 +38,35 @@ enum BuiltinExtraArguments {
};
+#define CODE_AGE_LIST_WITH_ARG(V, A) \
+ V(Quadragenarian, A) \
+ V(Quinquagenarian, A) \
+ V(Sexagenarian, A) \
+ V(Septuagenarian, A) \
+ V(Octogenarian, A)
+
+#define CODE_AGE_LIST_IGNORE_ARG(X, V) V(X)
+
+#define CODE_AGE_LIST(V) \
+ CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
+
+#define CODE_AGE_LIST_WITH_NO_AGE(V) \
+ V(NoAge) \
+ CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
+
+#define DECLARE_CODE_AGE_BUILTIN(C, V) \
+ V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \
+ UNINITIALIZED, Code::kNoExtraICState) \
+ V(Make##C##CodeYoungAgainEvenMarking, BUILTIN, \
+ UNINITIALIZED, Code::kNoExtraICState)
+
+
// Define list of builtins implemented in C++.
#define BUILTIN_LIST_C(V) \
V(Illegal, NO_EXTRA_ARGUMENTS) \
\
V(EmptyFunction, NO_EXTRA_ARGUMENTS) \
\
- V(InternalArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
- V(ArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
- \
V(ArrayPush, NO_EXTRA_ARGUMENTS) \
V(ArrayPop, NO_EXTRA_ARGUMENTS) \
V(ArrayShift, NO_EXTRA_ARGUMENTS) \
@@ -82,13 +102,15 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(LazyRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(ParallelRecompile, BUILTIN, UNINITIALIZED, \
+ V(ConcurrentRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(NotifyOSR, BUILTIN, UNINITIALIZED, \
+ V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
@@ -97,30 +119,16 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(KeyedLoadIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(KeyedLoadIC_Slow, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(KeyedStoreIC_Slow, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
- V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
@@ -130,53 +138,53 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
- V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \
+ V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
- V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \
+ V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
- V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MEGAMORPHIC, \
+ V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
+ V(StoreIC_PreMonomorphic, STORE_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
- V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \
+ V(StoreIC_Generic, STORE_IC, GENERIC, \
Code::kNoExtraICState) \
- V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
+ V(StoreIC_Generic_Strict, STORE_IC, GENERIC, \
kStrictMode) \
- V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \
+ V(StoreIC_GlobalProxy, STORE_IC, GENERIC, \
+ Code::kNoExtraICState) \
+ V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
kStrictMode) \
- V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
+ V(StoreIC_PreMonomorphic_Strict, STORE_IC, PREMONOMORPHIC, \
kStrictMode) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
kStrictMode) \
- V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
+ V(StoreIC_GlobalProxy_Strict, STORE_IC, GENERIC, \
kStrictMode) \
V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
kStrictMode) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \
+ V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, \
Code::kNoExtraICState) \
\
V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
kStrictMode) \
- V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
+ V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
kStrictMode) \
- V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MEGAMORPHIC, \
+ V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
+ kStrictMode) \
+ V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
- V(TransitionElementsSmiToDouble, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(TransitionElementsDoubleToObject, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, \
@@ -188,43 +196,66 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(ArrayCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
\
V(StringConstructCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState)
+ Code::kNoExtraICState) \
+ V(InterruptCheck, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(StackCheck, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ \
+ V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
+// Define list of builtin handlers implemented in assembly.
+#define BUILTIN_LIST_H(V) \
+ V(LoadIC_Slow, LOAD_IC, Code::kNoExtraICState) \
+ V(KeyedLoadIC_Slow, KEYED_LOAD_IC, Code::kNoExtraICState) \
+ V(StoreIC_Slow, STORE_IC, Code::kNoExtraICState) \
+ V(StoreIC_Slow_Strict, STORE_IC, kStrictMode) \
+ V(KeyedStoreIC_Slow, KEYED_STORE_IC, Code::kNoExtraICState)\
+ V(KeyedStoreIC_Slow_Strict, KEYED_STORE_IC, kStrictMode) \
+ V(LoadIC_Normal, LOAD_IC, Code::kNoExtraICState) \
+ V(StoreIC_Normal, STORE_IC, Code::kNoExtraICState) \
+ V(StoreIC_Normal_Strict, STORE_IC, kStrictMode)
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
-#define BUILTIN_LIST_DEBUG_A(V) \
- V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(CallFunctionStub_Recording_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(CallConstructStub_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(CallConstructStub_Recording_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState)
+#define BUILTIN_LIST_DEBUG_A(V) \
+ V(Return_DebugBreak, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(CallFunctionStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(CallConstructStub_DebugBreak, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(CallConstructStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(LoadIC_DebugBreak, LOAD_IC, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(StoreIC_DebugBreak, STORE_IC, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(CompareNilIC_DebugBreak, COMPARE_NIL_IC, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(PlainReturn_LiveEdit, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(FrameDropper_LiveEdit, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK)
#else
#define BUILTIN_LIST_DEBUG_A(V)
#endif
@@ -242,8 +273,6 @@ enum BuiltinExtraArguments {
V(BIT_OR, 1) \
V(BIT_AND, 1) \
V(BIT_XOR, 1) \
- V(UNARY_MINUS, 0) \
- V(BIT_NOT, 0) \
V(SHL, 1) \
V(SAR, 1) \
V(SHR, 1) \
@@ -263,7 +292,6 @@ enum BuiltinExtraArguments {
V(APPLY_PREPARE, 1) \
V(APPLY_OVERFLOW, 1)
-
class BuiltinFunctionTable;
class ObjectVisitor;
@@ -274,7 +302,7 @@ class Builtins {
// Generate all builtin code objects. Should be called once during
// isolate initialization.
- void SetUp(bool create_heap_objects);
+ void SetUp(Isolate* isolate, bool create_heap_objects);
void TearDown();
// Garbage collection support.
@@ -286,8 +314,10 @@ class Builtins {
enum Name {
#define DEF_ENUM_C(name, ignore) k##name,
#define DEF_ENUM_A(name, kind, state, extra) k##name,
+#define DEF_ENUM_H(name, kind, extra) k##name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
+ BUILTIN_LIST_H(DEF_ENUM_H)
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
#undef DEF_ENUM_C
#undef DEF_ENUM_A
@@ -311,8 +341,10 @@ class Builtins {
#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
Handle<Code> name();
+#define DECLARE_BUILTIN_ACCESSOR_H(name, kind, extra) Handle<Code> name();
BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
+ BUILTIN_LIST_H(DECLARE_BUILTIN_ACCESSOR_H)
BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
#undef DECLARE_BUILTIN_ACCESSOR_C
#undef DECLARE_BUILTIN_ACCESSOR_A
@@ -356,7 +388,7 @@ class Builtins {
CFunctionId id,
BuiltinExtraArguments extra_args);
static void Generate_InRecompileQueue(MacroAssembler* masm);
- static void Generate_ParallelRecompile(MacroAssembler* masm);
+ static void Generate_ConcurrentRecompile(MacroAssembler* masm);
static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
@@ -365,8 +397,9 @@ class Builtins {
static void Generate_LazyCompile(MacroAssembler* masm);
static void Generate_LazyRecompile(MacroAssembler* masm);
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
+ static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
- static void Generate_NotifyOSR(MacroAssembler* masm);
+ static void Generate_NotifyStubFailure(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
static void Generate_FunctionCall(MacroAssembler* masm);
@@ -374,10 +407,23 @@ class Builtins {
static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm);
- static void Generate_ArrayConstructCode(MacroAssembler* masm);
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
+ static void Generate_OsrAfterStackCheck(MacroAssembler* masm);
+ static void Generate_InterruptCheck(MacroAssembler* masm);
+ static void Generate_StackCheck(MacroAssembler* masm);
+
+#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ static void Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm); \
+ static void Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm);
+ CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR
+
+ static void Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm);
+ static void Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm);
static void InitBuiltinFunctionTable();
diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc
index 9241d2658..fbfaf2615 100644
--- a/deps/v8/src/cached-powers.cc
+++ b/deps/v8/src/cached-powers.cc
@@ -26,8 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h>
-#include <math.h>
#include <limits.h>
+#include <cmath>
#include "../include/v8stdint.h"
#include "globals.h"
diff --git a/deps/v8/src/char-predicates-inl.h b/deps/v8/src/char-predicates-inl.h
index 1a89ef3b1..dee9ccd38 100644
--- a/deps/v8/src/char-predicates-inl.h
+++ b/deps/v8/src/char-predicates-inl.h
@@ -71,6 +71,18 @@ inline bool IsHexDigit(uc32 c) {
}
+inline bool IsOctalDigit(uc32 c) {
+ // ECMA-262, 6th, 7.8.3
+ return IsInRange(c, '0', '7');
+}
+
+
+inline bool IsBinaryDigit(uc32 c) {
+ // ECMA-262, 6th, 7.8.3
+ return c == '0' || c == '1';
+}
+
+
inline bool IsRegExpWord(uc16 c) {
return IsInRange(AsciiAlphaToLower(c), 'a', 'z')
|| IsDecimalDigit(c)
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index b97191f5c..767ad6513 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -40,6 +40,8 @@ inline bool IsCarriageReturn(uc32 c);
inline bool IsLineFeed(uc32 c);
inline bool IsDecimalDigit(uc32 c);
inline bool IsHexDigit(uc32 c);
+inline bool IsOctalDigit(uc32 c);
+inline bool IsBinaryDigit(uc32 c);
inline bool IsRegExpWord(uc32 c);
inline bool IsRegExpNewline(uc32 c);
diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc
index 320fd6b5e..e08cd7c68 100644
--- a/deps/v8/src/checks.cc
+++ b/deps/v8/src/checks.cc
@@ -25,36 +25,63 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <stdarg.h>
+#include "checks.h"
-#include "v8.h"
+#if V8_LIBC_GLIBC || V8_OS_BSD
+# include <cxxabi.h>
+# include <execinfo.h>
+#endif // V8_LIBC_GLIBC || V8_OS_BSD
+#include <stdio.h>
#include "platform.h"
+#include "v8.h"
+
+
+// Attempts to dump a backtrace (if supported).
+static V8_INLINE void DumpBacktrace() {
+#if V8_LIBC_GLIBC || V8_OS_BSD
+ void* trace[100];
+ int size = backtrace(trace, ARRAY_SIZE(trace));
+ char** symbols = backtrace_symbols(trace, size);
+ i::OS::PrintError("\n==== C stack trace ===============================\n\n");
+ if (size == 0) {
+ i::OS::PrintError("(empty)\n");
+ } else if (symbols == NULL) {
+ i::OS::PrintError("(no symbols)\n");
+ } else {
+ for (int i = 1; i < size; ++i) {
+ i::OS::PrintError("%2d: ", i);
+ char mangled[201];
+ if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
+ int status;
+ size_t length;
+ char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
+ i::OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
+ free(demangled);
+ } else {
+ i::OS::PrintError("??\n");
+ }
+ }
+ }
+ free(symbols);
+#endif // V8_LIBC_GLIBC || V8_OS_BSD
+}
-// TODO(isolates): is it necessary to lift this?
-static int fatal_error_handler_nesting_depth = 0;
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
+ i::AllowHandleDereference allow_deref;
+ i::AllowDeferredHandleDereference allow_deferred_deref;
fflush(stdout);
fflush(stderr);
- fatal_error_handler_nesting_depth++;
- // First time we try to print an error message
- if (fatal_error_handler_nesting_depth < 2) {
- i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
- va_list arguments;
- va_start(arguments, format);
- i::OS::VPrintError(format, arguments);
- va_end(arguments);
- i::OS::PrintError("\n#\n\n");
- }
- // First two times we may try to print a stack dump.
- if (fatal_error_handler_nesting_depth < 3) {
- if (i::FLAG_stack_trace_on_abort) {
- // Call this one twice on double fault
- i::Isolate::Current()->PrintStack();
- }
- }
+ i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
+ va_list arguments;
+ va_start(arguments, format);
+ i::OS::VPrintError(format, arguments);
+ va_end(arguments);
+ i::OS::PrintError("\n#\n");
+ DumpBacktrace();
+ fflush(stderr);
i::OS::Abort();
}
@@ -102,8 +129,6 @@ void API_Fatal(const char* location, const char* format, ...) {
namespace v8 { namespace internal {
- bool EnableSlowAsserts() { return FLAG_enable_slow_asserts; }
-
intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
} } // namespace v8::internal
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index d0a0c2b5a..9d2db28d8 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -31,6 +31,7 @@
#include <string.h>
#include "../include/v8stdint.h"
+
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
@@ -196,6 +197,20 @@ inline void CheckEqualsHelper(const char* file,
inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ int64_t expected,
+ const char* value_source,
+ int64_t value) {
+ if (expected == value) {
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
+ expected_source, value_source, expected, value);
+ }
+}
+
+
+inline void CheckNonEqualsHelper(const char* file,
int line,
const char* expected_source,
double expected,
@@ -230,6 +245,11 @@ inline void CheckNonEqualsHelper(const char* file,
#define CHECK_LE(a, b) CHECK((a) <= (b))
+// Use C++11 static_assert if possible, which gives error
+// messages that are easier to understand on first sight.
+#if V8_HAS_CXX11_STATIC_ASSERT
+#define STATIC_CHECK(test) static_assert(test, #test)
+#else
// This is inspired by the static assertion facility in boost. This
// is pretty magical. If it causes you trouble on a platform you may
// find a fix in the boost code.
@@ -249,9 +269,27 @@ template <int> class StaticAssertionHelper { };
typedef \
StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
+#endif
+
+#ifdef DEBUG
+#ifndef OPTIMIZED_DEBUG
+#define ENABLE_SLOW_ASSERTS 1
+#endif
+#endif
+namespace v8 {
+namespace internal {
+#ifdef ENABLE_SLOW_ASSERTS
+#define SLOW_ASSERT(condition) \
+ CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
extern bool FLAG_enable_slow_asserts;
+#else
+#define SLOW_ASSERT(condition) ((void) 0)
+const bool FLAG_enable_slow_asserts = false;
+#endif
+} // namespace internal
+} // namespace v8
// The ASSERT macro is equivalent to CHECK except that it only
@@ -264,7 +302,6 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
-#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts || (condition))
#else
#define ASSERT_RESULT(expr) (expr)
#define ASSERT(condition) ((void) 0)
@@ -273,7 +310,6 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_GE(v1, v2) ((void) 0)
#define ASSERT_LT(v1, v2) ((void) 0)
#define ASSERT_LE(v1, v2) ((void) 0)
-#define SLOW_ASSERT(condition) ((void) 0)
#endif
// Static asserts has no impact on runtime performance, so they can be
// safely enabled in release mode. Moreover, the ((void) 0) expression
diff --git a/deps/v8/src/circular-queue-inl.h b/deps/v8/src/circular-queue-inl.h
index 373bf6092..dfb703157 100644
--- a/deps/v8/src/circular-queue-inl.h
+++ b/deps/v8/src/circular-queue-inl.h
@@ -33,21 +33,60 @@
namespace v8 {
namespace internal {
+template<typename T, unsigned L>
+SamplingCircularQueue<T, L>::SamplingCircularQueue()
+ : enqueue_pos_(buffer_),
+ dequeue_pos_(buffer_) {
+}
+
-void* SamplingCircularQueue::Enqueue() {
- WrapPositionIfNeeded(&producer_pos_->enqueue_pos);
- void* result = producer_pos_->enqueue_pos;
- producer_pos_->enqueue_pos += record_size_;
- return result;
+template<typename T, unsigned L>
+SamplingCircularQueue<T, L>::~SamplingCircularQueue() {
}
-void SamplingCircularQueue::WrapPositionIfNeeded(
- SamplingCircularQueue::Cell** pos) {
- if (**pos == kEnd) *pos = buffer_;
+template<typename T, unsigned L>
+T* SamplingCircularQueue<T, L>::Peek() {
+ MemoryBarrier();
+ if (Acquire_Load(&dequeue_pos_->marker) == kFull) {
+ return &dequeue_pos_->record;
+ }
+ return NULL;
}
+template<typename T, unsigned L>
+void SamplingCircularQueue<T, L>::Remove() {
+ Release_Store(&dequeue_pos_->marker, kEmpty);
+ dequeue_pos_ = Next(dequeue_pos_);
+}
+
+
+template<typename T, unsigned L>
+T* SamplingCircularQueue<T, L>::StartEnqueue() {
+ MemoryBarrier();
+ if (Acquire_Load(&enqueue_pos_->marker) == kEmpty) {
+ return &enqueue_pos_->record;
+ }
+ return NULL;
+}
+
+
+template<typename T, unsigned L>
+void SamplingCircularQueue<T, L>::FinishEnqueue() {
+ Release_Store(&enqueue_pos_->marker, kFull);
+ enqueue_pos_ = Next(enqueue_pos_);
+}
+
+
+template<typename T, unsigned L>
+typename SamplingCircularQueue<T, L>::Entry* SamplingCircularQueue<T, L>::Next(
+ Entry* entry) {
+ Entry* next = entry + 1;
+ if (next == &buffer_[L]) return buffer_;
+ return next;
+}
+
} } // namespace v8::internal
#endif // V8_CIRCULAR_QUEUE_INL_H_
diff --git a/deps/v8/src/circular-queue.cc b/deps/v8/src/circular-queue.cc
deleted file mode 100644
index 928c3f0c0..000000000
--- a/deps/v8/src/circular-queue.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "circular-queue-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
- int desired_chunk_size_in_bytes,
- int buffer_size_in_chunks)
- : record_size_(record_size_in_bytes / sizeof(Cell)),
- chunk_size_in_bytes_(desired_chunk_size_in_bytes / record_size_in_bytes *
- record_size_in_bytes),
- chunk_size_(chunk_size_in_bytes_ / sizeof(Cell)),
- buffer_size_(chunk_size_ * buffer_size_in_chunks),
- // The distance ensures that producer and consumer never step on
- // each other's chunks and helps eviction of produced data from
- // the CPU cache (having that chunk size is bigger than the cache.)
- producer_consumer_distance_(2 * chunk_size_),
- buffer_(NewArray<Cell>(buffer_size_ + 1)) {
- ASSERT(buffer_size_in_chunks > 2);
- // Clean up the whole buffer to avoid encountering a random kEnd
- // while enqueuing.
- for (int i = 0; i < buffer_size_; ++i) {
- buffer_[i] = kClear;
- }
- buffer_[buffer_size_] = kEnd;
-
- // Layout producer and consumer position pointers each on their own
- // cache lines to avoid cache lines thrashing due to simultaneous
- // updates of positions by different processor cores.
- const int positions_size =
- RoundUp(1, kProcessorCacheLineSize) +
- RoundUp(static_cast<int>(sizeof(ProducerPosition)),
- kProcessorCacheLineSize) +
- RoundUp(static_cast<int>(sizeof(ConsumerPosition)),
- kProcessorCacheLineSize);
- positions_ = NewArray<byte>(positions_size);
-
- producer_pos_ = reinterpret_cast<ProducerPosition*>(
- RoundUp(positions_, kProcessorCacheLineSize));
- producer_pos_->enqueue_pos = buffer_;
-
- consumer_pos_ = reinterpret_cast<ConsumerPosition*>(
- reinterpret_cast<byte*>(producer_pos_) + kProcessorCacheLineSize);
- ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <=
- positions_ + positions_size);
- consumer_pos_->dequeue_chunk_pos = buffer_;
- consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance_;
- consumer_pos_->dequeue_pos = NULL;
-}
-
-
-SamplingCircularQueue::~SamplingCircularQueue() {
- DeleteArray(positions_);
- DeleteArray(buffer_);
-}
-
-
-void* SamplingCircularQueue::StartDequeue() {
- if (consumer_pos_->dequeue_pos != NULL) {
- return consumer_pos_->dequeue_pos;
- } else {
- if (*consumer_pos_->dequeue_chunk_poll_pos != kClear) {
- consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos;
- consumer_pos_->dequeue_end_pos = consumer_pos_->dequeue_pos + chunk_size_;
- return consumer_pos_->dequeue_pos;
- } else {
- return NULL;
- }
- }
-}
-
-
-void SamplingCircularQueue::FinishDequeue() {
- consumer_pos_->dequeue_pos += record_size_;
- if (consumer_pos_->dequeue_pos < consumer_pos_->dequeue_end_pos) return;
- // Move to next chunk.
- consumer_pos_->dequeue_pos = NULL;
- *consumer_pos_->dequeue_chunk_pos = kClear;
- consumer_pos_->dequeue_chunk_pos += chunk_size_;
- WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_pos);
- consumer_pos_->dequeue_chunk_poll_pos += chunk_size_;
- WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_poll_pos);
-}
-
-
-void SamplingCircularQueue::FlushResidualRecords() {
- // Eliminate producer / consumer distance.
- consumer_pos_->dequeue_chunk_poll_pos = consumer_pos_->dequeue_chunk_pos;
-}
-
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/circular-queue.h b/deps/v8/src/circular-queue.h
index 73afc6831..94bc89e7d 100644
--- a/deps/v8/src/circular-queue.h
+++ b/deps/v8/src/circular-queue.h
@@ -28,6 +28,8 @@
#ifndef V8_CIRCULAR_QUEUE_H_
#define V8_CIRCULAR_QUEUE_H_
+#include "v8globals.h"
+
namespace v8 {
namespace internal {
@@ -35,64 +37,49 @@ namespace internal {
// Lock-free cache-friendly sampling circular queue for large
// records. Intended for fast transfer of large records between a
// single producer and a single consumer. If the queue is full,
-// previous unread records are overwritten. The queue is designed with
+// StartEnqueue will return NULL. The queue is designed with
// a goal in mind to evade cache lines thrashing by preventing
// simultaneous reads and writes to adjanced memory locations.
-//
-// IMPORTANT: as a producer never checks for chunks cleanness, it is
-// possible that it can catch up and overwrite a chunk that a consumer
-// is currently reading, resulting in a corrupt record being read.
+template<typename T, unsigned Length>
class SamplingCircularQueue {
public:
// Executed on the application thread.
- SamplingCircularQueue(int record_size_in_bytes,
- int desired_chunk_size_in_bytes,
- int buffer_size_in_chunks);
+ SamplingCircularQueue();
~SamplingCircularQueue();
- // Enqueue returns a pointer to a memory location for storing the next
- // record.
- INLINE(void* Enqueue());
+ // StartEnqueue returns a pointer to a memory location for storing the next
+ // record or NULL if all entries are full at the moment.
+ T* StartEnqueue();
+ // Notifies the queue that the producer has complete writing data into the
+ // memory returned by StartEnqueue and it can be passed to the consumer.
+ void FinishEnqueue();
// Executed on the consumer (analyzer) thread.
- // StartDequeue returns a pointer to a memory location for retrieving
- // the next record. After the record had been read by a consumer,
- // FinishDequeue must be called. Until that moment, subsequent calls
- // to StartDequeue will return the same pointer.
- void* StartDequeue();
- void FinishDequeue();
- // Due to a presence of slipping between the producer and the consumer,
- // the queue must be notified whether producing has been finished in order
- // to process remaining records from the buffer.
- void FlushResidualRecords();
-
- typedef AtomicWord Cell;
- // Reserved values for the first cell of a record.
- static const Cell kClear = 0; // Marks clean (processed) chunks.
- static const Cell kEnd = -1; // Marks the end of the buffer.
+ // Retrieves, but does not remove, the head of this queue, returning NULL
+ // if this queue is empty. After the record had been read by a consumer,
+ // Remove must be called.
+ T* Peek();
+ void Remove();
private:
- struct ProducerPosition {
- Cell* enqueue_pos;
+ // Reserved values for the entry marker.
+ enum {
+ kEmpty, // Marks clean (processed) entries.
+ kFull // Marks entries already filled by the producer but not yet
+ // completely processed by the consumer.
};
- struct ConsumerPosition {
- Cell* dequeue_chunk_pos;
- Cell* dequeue_chunk_poll_pos;
- Cell* dequeue_pos;
- Cell* dequeue_end_pos;
+
+ struct V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry {
+ Entry() : marker(kEmpty) {}
+ T record;
+ Atomic32 marker;
};
- INLINE(void WrapPositionIfNeeded(Cell** pos));
+ Entry* Next(Entry* entry);
- const int record_size_;
- const int chunk_size_in_bytes_;
- const int chunk_size_;
- const int buffer_size_;
- const int producer_consumer_distance_;
- Cell* buffer_;
- byte* positions_;
- ProducerPosition* producer_pos_;
- ConsumerPosition* consumer_pos_;
+ Entry buffer_[Length];
+ V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry* enqueue_pos_;
+ V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry* dequeue_pos_;
DISALLOW_COPY_AND_ASSIGN(SamplingCircularQueue);
};
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
new file mode 100644
index 000000000..dfa5ecd8c
--- /dev/null
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -0,0 +1,1266 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "code-stubs.h"
+#include "hydrogen.h"
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+
+static LChunk* OptimizeGraph(HGraph* graph) {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+
+ ASSERT(graph != NULL);
+ BailoutReason bailout_reason = kNoReason;
+ if (!graph->Optimize(&bailout_reason)) {
+ FATAL(GetBailoutReason(bailout_reason));
+ }
+ LChunk* chunk = LChunk::NewChunk(graph);
+ if (chunk == NULL) {
+ FATAL(GetBailoutReason(graph->info()->bailout_reason()));
+ }
+ return chunk;
+}
+
+
+class CodeStubGraphBuilderBase : public HGraphBuilder {
+ public:
+ CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub)
+ : HGraphBuilder(&info_),
+ arguments_length_(NULL),
+ info_(stub, isolate),
+ context_(NULL) {
+ descriptor_ = stub->GetInterfaceDescriptor(isolate);
+ parameters_.Reset(new HParameter*[descriptor_->register_param_count_]);
+ }
+ virtual bool BuildGraph();
+
+ protected:
+ virtual HValue* BuildCodeStub() = 0;
+ HParameter* GetParameter(int parameter) {
+ ASSERT(parameter < descriptor_->register_param_count_);
+ return parameters_[parameter];
+ }
+ HValue* GetArgumentsLength() {
+ // This is initialized in BuildGraph()
+ ASSERT(arguments_length_ != NULL);
+ return arguments_length_;
+ }
+ CompilationInfo* info() { return &info_; }
+ HydrogenCodeStub* stub() { return info_.code_stub(); }
+ HContext* context() { return context_; }
+ Isolate* isolate() { return info_.isolate(); }
+
+ class ArrayContextChecker {
+ public:
+ ArrayContextChecker(HGraphBuilder* builder, HValue* constructor,
+ HValue* array_function)
+ : checker_(builder) {
+ checker_.If<HCompareObjectEqAndBranch, HValue*>(constructor,
+ array_function);
+ checker_.Then();
+ }
+
+ ~ArrayContextChecker() {
+ checker_.ElseDeopt("Array constructor called from different context");
+ checker_.End();
+ }
+ private:
+ IfBuilder checker_;
+ };
+
+ enum ArgumentClass {
+ NONE,
+ SINGLE,
+ MULTIPLE
+ };
+
+ HValue* BuildArrayConstructor(ElementsKind kind,
+ ContextCheckMode context_mode,
+ AllocationSiteOverrideMode override_mode,
+ ArgumentClass argument_class);
+ HValue* BuildInternalArrayConstructor(ElementsKind kind,
+ ArgumentClass argument_class);
+
+ void BuildInstallOptimizedCode(HValue* js_function, HValue* native_context,
+ HValue* code_object);
+ void BuildInstallCode(HValue* js_function, HValue* shared_info);
+ void BuildInstallFromOptimizedCodeMap(HValue* js_function,
+ HValue* shared_info,
+ HValue* native_context);
+
+ private:
+ HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
+ HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
+ ElementsKind kind);
+
+ SmartArrayPointer<HParameter*> parameters_;
+ HValue* arguments_length_;
+ CompilationInfoWithZone info_;
+ CodeStubInterfaceDescriptor* descriptor_;
+ HContext* context_;
+};
+
+
+bool CodeStubGraphBuilderBase::BuildGraph() {
+ // Update the static counter each time a new code stub is generated.
+ isolate()->counters()->code_stubs()->Increment();
+
+ if (FLAG_trace_hydrogen_stubs) {
+ const char* name = CodeStub::MajorName(stub()->MajorKey(), false);
+ PrintF("-----------------------------------------------------------\n");
+ PrintF("Compiling stub %s using hydrogen\n", name);
+ isolate()->GetHTracer()->TraceCompilation(&info_);
+ }
+
+ int param_count = descriptor_->register_param_count_;
+ HEnvironment* start_environment = graph()->start_environment();
+ HBasicBlock* next_block = CreateBasicBlock(start_environment);
+ Goto(next_block);
+ next_block->SetJoinId(BailoutId::StubEntry());
+ set_current_block(next_block);
+
+ for (int i = 0; i < param_count; ++i) {
+ HParameter* param =
+ Add<HParameter>(i, HParameter::REGISTER_PARAMETER);
+ start_environment->Bind(i, param);
+ parameters_[i] = param;
+ }
+
+ HInstruction* stack_parameter_count;
+ if (descriptor_->stack_parameter_count_.is_valid()) {
+ ASSERT(descriptor_->environment_length() == (param_count + 1));
+ stack_parameter_count = New<HParameter>(param_count,
+ HParameter::REGISTER_PARAMETER,
+ Representation::Integer32());
+ stack_parameter_count->set_type(HType::Smi());
+ // It's essential to bind this value to the environment in case of deopt.
+ AddInstruction(stack_parameter_count);
+ start_environment->Bind(param_count, stack_parameter_count);
+ arguments_length_ = stack_parameter_count;
+ } else {
+ ASSERT(descriptor_->environment_length() == param_count);
+ stack_parameter_count = graph()->GetConstantMinus1();
+ arguments_length_ = graph()->GetConstant0();
+ }
+
+ context_ = Add<HContext>();
+ start_environment->BindContext(context_);
+
+ Add<HSimulate>(BailoutId::StubEntry());
+
+ NoObservableSideEffectsScope no_effects(this);
+
+ HValue* return_value = BuildCodeStub();
+
+ // We might have extra expressions to pop from the stack in addition to the
+ // arguments above.
+ HInstruction* stack_pop_count = stack_parameter_count;
+ if (descriptor_->function_mode_ == JS_FUNCTION_STUB_MODE) {
+ if (!stack_parameter_count->IsConstant() &&
+ descriptor_->hint_stack_parameter_count_ < 0) {
+ HInstruction* amount = graph()->GetConstant1();
+ stack_pop_count = Add<HAdd>(stack_parameter_count, amount);
+ stack_pop_count->ChangeRepresentation(Representation::Integer32());
+ stack_pop_count->ClearFlag(HValue::kCanOverflow);
+ } else {
+ int count = descriptor_->hint_stack_parameter_count_;
+ stack_pop_count = Add<HConstant>(count);
+ }
+ }
+
+ if (current_block() != NULL) {
+ HReturn* hreturn_instruction = New<HReturn>(return_value,
+ stack_pop_count);
+ FinishCurrentBlock(hreturn_instruction);
+ }
+ return true;
+}
+
+
+template <class Stub>
+class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
+ public:
+ explicit CodeStubGraphBuilder(Isolate* isolate, Stub* stub)
+ : CodeStubGraphBuilderBase(isolate, stub) {}
+
+ protected:
+ virtual HValue* BuildCodeStub() {
+ if (casted_stub()->IsUninitialized()) {
+ return BuildCodeUninitializedStub();
+ } else {
+ return BuildCodeInitializedStub();
+ }
+ }
+
+ virtual HValue* BuildCodeInitializedStub() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ virtual HValue* BuildCodeUninitializedStub() {
+ // Force a deopt that falls back to the runtime.
+ HValue* undefined = graph()->GetConstantUndefined();
+ IfBuilder builder(this);
+ builder.IfNot<HCompareObjectEqAndBranch, HValue*>(undefined, undefined);
+ builder.Then();
+ builder.ElseDeopt("Forced deopt to runtime");
+ return undefined;
+ }
+
+ Stub* casted_stub() { return static_cast<Stub*>(stub()); }
+};
+
+
+Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
+ Factory* factory = isolate->factory();
+
+ // Generate the new code.
+ MacroAssembler masm(isolate, NULL, 256);
+
+ {
+ // Update the static counter each time a new code stub is generated.
+ isolate->counters()->code_stubs()->Increment();
+
+ // Nested stubs are not allowed for leaves.
+ AllowStubCallsScope allow_scope(&masm, false);
+
+ // Generate the code for the stub.
+ masm.set_generating_stub(true);
+ NoCurrentFrameScope scope(&masm);
+ GenerateLightweightMiss(&masm);
+ }
+
+ // Create the code object.
+ CodeDesc desc;
+ masm.GetCode(&desc);
+
+ // Copy the generated code into a heap object.
+ Code::Flags flags = Code::ComputeFlags(
+ GetCodeKind(),
+ GetICState(),
+ GetExtraICState(),
+ GetStubType(),
+ GetStubFlags());
+ Handle<Code> new_object = factory->NewCode(
+ desc, flags, masm.CodeObject(), NeedsImmovableCode());
+ return new_object;
+}
+
+
+template <class Stub>
+static Handle<Code> DoGenerateCode(Isolate* isolate, Stub* stub) {
+ CodeStub::Major major_key =
+ static_cast<HydrogenCodeStub*>(stub)->MajorKey();
+ CodeStubInterfaceDescriptor* descriptor =
+ isolate->code_stub_interface_descriptor(major_key);
+ if (descriptor->register_param_count_ < 0) {
+ stub->InitializeInterfaceDescriptor(isolate, descriptor);
+ }
+
+ // If we are uninitialized we can use a light-weight stub to enter
+ // the runtime that is significantly faster than using the standard
+ // stub-failure deopt mechanism.
+ if (stub->IsUninitialized() && descriptor->has_miss_handler()) {
+ ASSERT(!descriptor->stack_parameter_count_.is_valid());
+ return stub->GenerateLightweightMissCode(isolate);
+ }
+ ElapsedTimer timer;
+ if (FLAG_profile_hydrogen_code_stub_compilation) {
+ timer.Start();
+ }
+ CodeStubGraphBuilder<Stub> builder(isolate, stub);
+ LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+ Handle<Code> code = chunk->Codegen();
+ if (FLAG_profile_hydrogen_code_stub_compilation) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ PrintF("[Lazy compilation of %s took %0.3f ms]\n", *stub->GetName(), ms);
+ }
+ return code;
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<ToNumberStub>::BuildCodeStub() {
+ HValue* value = GetParameter(0);
+
+ // Check if the parameter is already a SMI or heap number.
+ IfBuilder if_number(this);
+ if_number.If<HIsSmiAndBranch>(value);
+ if_number.OrIf<HCompareMap>(value, isolate()->factory()->heap_number_map());
+ if_number.Then();
+
+ // Return the number.
+ Push(value);
+
+ if_number.Else();
+
+ // Convert the parameter to number using the builtin.
+ HValue* function = AddLoadJSBuiltin(Builtins::TO_NUMBER);
+ Add<HPushArgument>(value);
+ Push(Add<HInvokeFunction>(function, 1));
+
+ if_number.End();
+
+ return Pop();
+}
+
+
+Handle<Code> ToNumberStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() {
+ info()->MarkAsSavesCallerDoubles();
+ HValue* number = GetParameter(NumberToStringStub::kNumber);
+ return BuildNumberToString(number, handle(Type::Number(), isolate()));
+}
+
+
+Handle<Code> NumberToStringStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
+ Factory* factory = isolate()->factory();
+ HValue* undefined = graph()->GetConstantUndefined();
+ AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode();
+ FastCloneShallowArrayStub::Mode mode = casted_stub()->mode();
+ int length = casted_stub()->length();
+
+ HInstruction* allocation_site = Add<HLoadKeyed>(GetParameter(0),
+ GetParameter(1),
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS);
+ IfBuilder checker(this);
+ checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
+ undefined);
+ checker.Then();
+
+ HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kTransitionInfoOffset);
+ HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
+ HValue* push_value;
+ if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
+ HValue* elements = AddLoadElements(boilerplate);
+
+ IfBuilder if_fixed_cow(this);
+ if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
+ if_fixed_cow.Then();
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_ELEMENTS,
+ 0/*copy-on-write*/);
+ environment()->Push(push_value);
+ if_fixed_cow.Else();
+
+ IfBuilder if_fixed(this);
+ if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
+ if_fixed.Then();
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_ELEMENTS,
+ length);
+ environment()->Push(push_value);
+ if_fixed.Else();
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_DOUBLE_ELEMENTS,
+ length);
+ environment()->Push(push_value);
+ } else {
+ ElementsKind elements_kind = casted_stub()->ComputeElementsKind();
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ elements_kind,
+ length);
+ environment()->Push(push_value);
+ }
+
+ checker.ElseDeopt("Uninitialized boilerplate literals");
+ checker.End();
+
+ return environment()->Pop();
+}
+
+
+Handle<Code> FastCloneShallowArrayStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
+ HValue* undefined = graph()->GetConstantUndefined();
+
+ HInstruction* allocation_site = Add<HLoadKeyed>(GetParameter(0),
+ GetParameter(1),
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS);
+
+ IfBuilder checker(this);
+ checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
+ undefined);
+ checker.And();
+
+ HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kTransitionInfoOffset);
+ HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
+
+ int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize;
+ int object_size = size;
+ if (FLAG_allocation_site_pretenuring) {
+ size += AllocationMemento::kSize;
+ }
+
+ HValue* boilerplate_map = Add<HLoadNamedField>(
+ boilerplate, HObjectAccess::ForMap());
+ HValue* boilerplate_size = Add<HLoadNamedField>(
+ boilerplate_map, HObjectAccess::ForMapInstanceSize());
+ HValue* size_in_words = Add<HConstant>(object_size >> kPointerSizeLog2);
+ checker.If<HCompareNumericAndBranch>(boilerplate_size,
+ size_in_words, Token::EQ);
+ checker.Then();
+
+ HValue* size_in_bytes = Add<HConstant>(size);
+
+ HInstruction* object = Add<HAllocate>(size_in_bytes, HType::JSObject(),
+ isolate()->heap()->GetPretenureMode(), JS_OBJECT_TYPE);
+
+ for (int i = 0; i < object_size; i += kPointerSize) {
+ HObjectAccess access = HObjectAccess::ForJSObjectOffset(i);
+ Add<HStoreNamedField>(object, access,
+ Add<HLoadNamedField>(boilerplate, access));
+ }
+
+ ASSERT(FLAG_allocation_site_pretenuring || (size == object_size));
+ if (FLAG_allocation_site_pretenuring) {
+ BuildCreateAllocationMemento(object, object_size, allocation_site);
+ }
+
+ environment()->Push(object);
+ checker.ElseDeopt("Uninitialized boilerplate in fast clone");
+ checker.End();
+
+ return environment()->Pop();
+}
+
+
+Handle<Code> FastCloneShallowObjectStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
+ HValue* size = Add<HConstant>(AllocationSite::kSize);
+ HInstruction* object = Add<HAllocate>(size, HType::JSObject(), TENURED,
+ JS_OBJECT_TYPE);
+
+ // Store the map
+ Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
+ AddStoreMapConstant(object, allocation_site_map);
+
+ // Store the payload (smi elements kind)
+ HValue* initial_elements_kind = Add<HConstant>(GetInitialFastElementsKind());
+ Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kTransitionInfoOffset),
+ initial_elements_kind);
+
+ // Unlike literals, constructed arrays don't have nested sites
+ Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kNestedSiteOffset),
+ graph()->GetConstant0());
+
+ // Store an empty fixed array for the code dependency.
+ HConstant* empty_fixed_array =
+ Add<HConstant>(isolate()->factory()->empty_fixed_array());
+ HStoreNamedField* store = Add<HStoreNamedField>(
+ object,
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kDependentCodeOffset),
+ empty_fixed_array);
+
+ // Link the object to the allocation site list
+ HValue* site_list = Add<HConstant>(
+ ExternalReference::allocation_sites_list_address(isolate()));
+ HValue* site = Add<HLoadNamedField>(site_list,
+ HObjectAccess::ForAllocationSiteList());
+ store = Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteOffset(AllocationSite::kWeakNextOffset),
+ site);
+ store->SkipWriteBarrier();
+ Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(),
+ object);
+
+ // We use a hammer (SkipWriteBarrier()) to indicate that we know the input
+ // cell is really a Cell, and so no write barrier is needed.
+ // TODO(mvstanton): Add a debug_code check to verify the input cell is really
+ // a cell. (perhaps with a new instruction, HAssert).
+ HInstruction* cell = GetParameter(0);
+ HObjectAccess access = HObjectAccess::ForCellValue();
+ store = Add<HStoreNamedField>(cell, access, object);
+ store->SkipWriteBarrier();
+ return cell;
+}
+
+
+Handle<Code> CreateAllocationSiteStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
+ HInstruction* load = BuildUncheckedMonomorphicElementAccess(
+ GetParameter(0), GetParameter(1), NULL,
+ casted_stub()->is_js_array(), casted_stub()->elements_kind(),
+ false, NEVER_RETURN_HOLE, STANDARD_STORE);
+ return load;
+}
+
+
+Handle<Code> KeyedLoadFastElementStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template<>
+HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
+ Representation rep = casted_stub()->representation();
+ HObjectAccess access = casted_stub()->is_inobject() ?
+ HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
+ HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
+ return AddLoadNamedField(GetParameter(0), access);
+}
+
+
+Handle<Code> LoadFieldStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template<>
+HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
+ Representation rep = casted_stub()->representation();
+ HObjectAccess access = casted_stub()->is_inobject() ?
+ HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
+ HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
+ return AddLoadNamedField(GetParameter(0), access);
+}
+
+
+Handle<Code> KeyedLoadFieldStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
+ BuildUncheckedMonomorphicElementAccess(
+ GetParameter(0), GetParameter(1), GetParameter(2),
+ casted_stub()->is_js_array(), casted_stub()->elements_kind(),
+ true, NEVER_RETURN_HOLE, casted_stub()->store_mode());
+
+ return GetParameter(2);
+}
+
+
+Handle<Code> KeyedStoreFastElementStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
+ info()->MarkAsSavesCallerDoubles();
+
+ BuildTransitionElementsKind(GetParameter(0),
+ GetParameter(1),
+ casted_stub()->from_kind(),
+ casted_stub()->to_kind(),
+ true);
+
+ return GetParameter(0);
+}
+
+
+Handle<Code> TransitionElementsKindStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
+ ElementsKind kind,
+ ContextCheckMode context_mode,
+ AllocationSiteOverrideMode override_mode,
+ ArgumentClass argument_class) {
+ HValue* constructor = GetParameter(ArrayConstructorStubBase::kConstructor);
+ if (context_mode == CONTEXT_CHECK_REQUIRED) {
+ HInstruction* array_function = BuildGetArrayFunction();
+ ArrayContextChecker checker(this, constructor, array_function);
+ }
+
+ HValue* property_cell = GetParameter(ArrayConstructorStubBase::kPropertyCell);
+ // Walk through the property cell to the AllocationSite
+ HValue* alloc_site = Add<HLoadNamedField>(property_cell,
+ HObjectAccess::ForCellValue());
+ JSArrayBuilder array_builder(this, kind, alloc_site, constructor,
+ override_mode);
+ HValue* result = NULL;
+ switch (argument_class) {
+ case NONE:
+ result = array_builder.AllocateEmptyArray();
+ break;
+ case SINGLE:
+ result = BuildArraySingleArgumentConstructor(&array_builder);
+ break;
+ case MULTIPLE:
+ result = BuildArrayNArgumentsConstructor(&array_builder, kind);
+ break;
+ }
+
+ return result;
+}
+
+
+HValue* CodeStubGraphBuilderBase::BuildInternalArrayConstructor(
+ ElementsKind kind, ArgumentClass argument_class) {
+ HValue* constructor = GetParameter(
+ InternalArrayConstructorStubBase::kConstructor);
+ JSArrayBuilder array_builder(this, kind, constructor);
+
+ HValue* result = NULL;
+ switch (argument_class) {
+ case NONE:
+ result = array_builder.AllocateEmptyArray();
+ break;
+ case SINGLE:
+ result = BuildArraySingleArgumentConstructor(&array_builder);
+ break;
+ case MULTIPLE:
+ result = BuildArrayNArgumentsConstructor(&array_builder, kind);
+ break;
+ }
+ return result;
+}
+
+
+HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
+ JSArrayBuilder* array_builder) {
+ // Smi check and range check on the input arg.
+ HValue* constant_one = graph()->GetConstant1();
+ HValue* constant_zero = graph()->GetConstant0();
+
+ HInstruction* elements = Add<HArgumentsElements>(false);
+ HInstruction* argument = Add<HAccessArgumentsAt>(
+ elements, constant_one, constant_zero);
+
+ HConstant* max_alloc_length =
+ Add<HConstant>(JSObject::kInitialMaxFastElementArray);
+ const int initial_capacity = JSArray::kPreallocatedArrayElements;
+ HConstant* initial_capacity_node = Add<HConstant>(initial_capacity);
+
+ HInstruction* checked_arg = Add<HBoundsCheck>(argument, max_alloc_length);
+ IfBuilder if_builder(this);
+ if_builder.If<HCompareNumericAndBranch>(checked_arg, constant_zero,
+ Token::EQ);
+ if_builder.Then();
+ Push(initial_capacity_node); // capacity
+ Push(constant_zero); // length
+ if_builder.Else();
+ Push(checked_arg); // capacity
+ Push(checked_arg); // length
+ if_builder.End();
+
+ // Figure out total size
+ HValue* length = Pop();
+ HValue* capacity = Pop();
+ return array_builder->AllocateArray(capacity, length, true);
+}
+
+
+HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
+ JSArrayBuilder* array_builder, ElementsKind kind) {
+ // We need to fill with the hole if it's a smi array in the multi-argument
+ // case because we might have to bail out while copying arguments into
+ // the array because they aren't compatible with a smi array.
+ // If it's a double array, no problem, and if it's fast then no
+ // problem either because doubles are boxed.
+ HValue* length = GetArgumentsLength();
+ bool fill_with_hole = IsFastSmiElementsKind(kind);
+ HValue* new_object = array_builder->AllocateArray(length,
+ length,
+ fill_with_hole);
+ HValue* elements = array_builder->GetElementsLocation();
+ ASSERT(elements != NULL);
+
+ // Now populate the elements correctly.
+ LoopBuilder builder(this,
+ context(),
+ LoopBuilder::kPostIncrement);
+ HValue* start = graph()->GetConstant0();
+ HValue* key = builder.BeginBody(start, length, Token::LT);
+ HInstruction* argument_elements = Add<HArgumentsElements>(false);
+ HInstruction* argument = Add<HAccessArgumentsAt>(
+ argument_elements, length, key);
+
+ Add<HStoreKeyed>(elements, key, argument, kind);
+ builder.EndBody();
+ return new_object;
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
+ ElementsKind kind = casted_stub()->elements_kind();
+ ContextCheckMode context_mode = casted_stub()->context_mode();
+ AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
+ return BuildArrayConstructor(kind, context_mode, override_mode, NONE);
+}
+
+
+Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
+ BuildCodeStub() {
+ ElementsKind kind = casted_stub()->elements_kind();
+ ContextCheckMode context_mode = casted_stub()->context_mode();
+ AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
+ return BuildArrayConstructor(kind, context_mode, override_mode, SINGLE);
+}
+
+
+Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode(
+ Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
+ ElementsKind kind = casted_stub()->elements_kind();
+ ContextCheckMode context_mode = casted_stub()->context_mode();
+ AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
+ return BuildArrayConstructor(kind, context_mode, override_mode, MULTIPLE);
+}
+
+
+Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<InternalArrayNoArgumentConstructorStub>::
+ BuildCodeStub() {
+ ElementsKind kind = casted_stub()->elements_kind();
+ return BuildInternalArrayConstructor(kind, NONE);
+}
+
+
+Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode(
+ Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<InternalArraySingleArgumentConstructorStub>::
+ BuildCodeStub() {
+ ElementsKind kind = casted_stub()->elements_kind();
+ return BuildInternalArrayConstructor(kind, SINGLE);
+}
+
+
+Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode(
+ Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<InternalArrayNArgumentsConstructorStub>::
+ BuildCodeStub() {
+ ElementsKind kind = casted_stub()->elements_kind();
+ return BuildInternalArrayConstructor(kind, MULTIPLE);
+}
+
+
+Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode(
+ Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
+ Isolate* isolate = graph()->isolate();
+ CompareNilICStub* stub = casted_stub();
+ HIfContinuation continuation;
+ Handle<Map> sentinel_map(isolate->heap()->meta_map());
+ Handle<Type> type = stub->GetType(isolate, sentinel_map);
+ BuildCompareNil(GetParameter(0), type, &continuation);
+ IfBuilder if_nil(this, &continuation);
+ if_nil.Then();
+ if (continuation.IsFalseReachable()) {
+ if_nil.Else();
+ if_nil.Return(graph()->GetConstant0());
+ }
+ if_nil.End();
+ return continuation.IsTrueReachable()
+ ? graph()->GetConstant1()
+ : graph()->GetConstantUndefined();
+}
+
+
+Handle<Code> CompareNilICStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<BinaryOpStub>::BuildCodeInitializedStub() {
+ BinaryOpStub* stub = casted_stub();
+ HValue* left = GetParameter(0);
+ HValue* right = GetParameter(1);
+
+ Handle<Type> left_type = stub->GetLeftType(isolate());
+ Handle<Type> right_type = stub->GetRightType(isolate());
+ Handle<Type> result_type = stub->GetResultType(isolate());
+
+ ASSERT(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
+ (stub->HasSideEffects(isolate()) || !result_type->Is(Type::None())));
+
+ HValue* result = NULL;
+ if (stub->operation() == Token::ADD &&
+ (left_type->Maybe(Type::String()) || right_type->Maybe(Type::String())) &&
+ !left_type->Is(Type::String()) && !right_type->Is(Type::String())) {
+ // For the generic add stub a fast case for string addition is performance
+ // critical.
+ if (left_type->Maybe(Type::String())) {
+ IfBuilder if_leftisstring(this);
+ if_leftisstring.If<HIsStringAndBranch>(left);
+ if_leftisstring.Then();
+ {
+ Push(AddInstruction(BuildBinaryOperation(
+ stub->operation(), left, right,
+ handle(Type::String(), isolate()), right_type,
+ result_type, stub->fixed_right_arg(), true)));
+ }
+ if_leftisstring.Else();
+ {
+ Push(AddInstruction(BuildBinaryOperation(
+ stub->operation(), left, right,
+ left_type, right_type, result_type,
+ stub->fixed_right_arg(), true)));
+ }
+ if_leftisstring.End();
+ result = Pop();
+ } else {
+ IfBuilder if_rightisstring(this);
+ if_rightisstring.If<HIsStringAndBranch>(right);
+ if_rightisstring.Then();
+ {
+ Push(AddInstruction(BuildBinaryOperation(
+ stub->operation(), left, right,
+ left_type, handle(Type::String(), isolate()),
+ result_type, stub->fixed_right_arg(), true)));
+ }
+ if_rightisstring.Else();
+ {
+ Push(AddInstruction(BuildBinaryOperation(
+ stub->operation(), left, right,
+ left_type, right_type, result_type,
+ stub->fixed_right_arg(), true)));
+ }
+ if_rightisstring.End();
+ result = Pop();
+ }
+ } else {
+ result = AddInstruction(BuildBinaryOperation(
+ stub->operation(), left, right,
+ left_type, right_type, result_type,
+ stub->fixed_right_arg(), true));
+ }
+
+ // If we encounter a generic argument, the number conversion is
+ // observable, thus we cannot afford to bail out after the fact.
+ if (!stub->HasSideEffects(isolate())) {
+ if (result_type->Is(Type::Smi())) {
+ if (stub->operation() == Token::SHR) {
+ // TODO(olivf) Replace this by a SmiTagU Instruction.
+ // 0x40000000: this number would convert to negative when interpreting
+ // the register as signed value;
+ IfBuilder if_of(this);
+ if_of.IfNot<HCompareNumericAndBranch>(result,
+ Add<HConstant>(static_cast<int>(SmiValuesAre32Bits()
+ ? 0x80000000 : 0x40000000)), Token::EQ_STRICT);
+ if_of.Then();
+ if_of.ElseDeopt("UInt->Smi oveflow");
+ if_of.End();
+ }
+ }
+ result = EnforceNumberType(result, result_type);
+ }
+
+ // Reuse the double box of one of the operands if we are allowed to (i.e.
+ // chained binops).
+ if (stub->CanReuseDoubleBox()) {
+ HValue* operand = (stub->mode() == OVERWRITE_LEFT) ? left : right;
+ IfBuilder if_heap_number(this);
+ if_heap_number.IfNot<HIsSmiAndBranch>(operand);
+ if_heap_number.Then();
+ Add<HStoreNamedField>(operand, HObjectAccess::ForHeapNumberValue(), result);
+ Push(operand);
+ if_heap_number.Else();
+ Push(result);
+ if_heap_number.End();
+ result = Pop();
+ }
+
+ return result;
+}
+
+
+Handle<Code> BinaryOpStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
+ ToBooleanStub* stub = casted_stub();
+
+ IfBuilder if_true(this);
+ if_true.If<HBranch>(GetParameter(0), stub->GetTypes());
+ if_true.Then();
+ if_true.Return(graph()->GetConstant1());
+ if_true.Else();
+ if_true.End();
+ return graph()->GetConstant0();
+}
+
+
+Handle<Code> ToBooleanStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
+ StoreGlobalStub* stub = casted_stub();
+ Handle<Object> hole(isolate()->heap()->the_hole_value(), isolate());
+ Handle<Object> placeholer_value(Smi::FromInt(0), isolate());
+ Handle<PropertyCell> placeholder_cell =
+ isolate()->factory()->NewPropertyCell(placeholer_value);
+
+ HParameter* receiver = GetParameter(0);
+ HParameter* value = GetParameter(2);
+
+ // Check that the map of the global has not changed: use a placeholder map
+ // that will be replaced later with the global object's map.
+ Handle<Map> placeholder_map = isolate()->factory()->meta_map();
+ Add<HCheckMaps>(receiver, placeholder_map, top_info());
+
+ HValue* cell = Add<HConstant>(placeholder_cell);
+ HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
+ HValue* cell_contents = Add<HLoadNamedField>(cell, access);
+
+ if (stub->is_constant()) {
+ IfBuilder builder(this);
+ builder.If<HCompareObjectEqAndBranch>(cell_contents, value);
+ builder.Then();
+ builder.ElseDeopt("Unexpected cell contents in constant global store");
+ builder.End();
+ } else {
+ // Load the payload of the global parameter cell. A hole indicates that the
+ // property has been deleted and that the store must be handled by the
+ // runtime.
+ IfBuilder builder(this);
+ HValue* hole_value = Add<HConstant>(hole);
+ builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
+ builder.Then();
+ builder.Deopt("Unexpected cell contents in global store");
+ builder.Else();
+ Add<HStoreNamedField>(cell, access, value);
+ builder.End();
+ }
+
+ return value;
+}
+
+
+Handle<Code> StoreGlobalStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template<>
+HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
+ HValue* value = GetParameter(0);
+ HValue* map = GetParameter(1);
+ HValue* key = GetParameter(2);
+ HValue* object = GetParameter(3);
+
+ if (FLAG_trace_elements_transitions) {
+ // Tracing elements transitions is the job of the runtime.
+ Add<HDeoptimize>("Tracing elements transitions", Deoptimizer::EAGER);
+ } else {
+ info()->MarkAsSavesCallerDoubles();
+
+ BuildTransitionElementsKind(object, map,
+ casted_stub()->from_kind(),
+ casted_stub()->to_kind(),
+ casted_stub()->is_jsarray());
+
+ BuildUncheckedMonomorphicElementAccess(object, key, value,
+ casted_stub()->is_jsarray(),
+ casted_stub()->to_kind(),
+ true, ALLOW_RETURN_HOLE,
+ casted_stub()->store_mode());
+ }
+
+ return value;
+}
+
+
+Handle<Code> ElementsTransitionAndStoreStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
+ HValue* js_function,
+ HValue* native_context,
+ HValue* code_object) {
+ Counters* counters = isolate()->counters();
+ AddIncrementCounter(counters->fast_new_closure_install_optimized());
+
+ // TODO(fschneider): Idea: store proper code pointers in the optimized code
+ // map and either unmangle them on marking or do nothing as the whole map is
+ // discarded on major GC anyway.
+ Add<HStoreCodeEntry>(js_function, code_object);
+
+ // Now link a function into a list of optimized functions.
+ HValue* optimized_functions_list = Add<HLoadNamedField>(native_context,
+ HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST));
+ Add<HStoreNamedField>(js_function,
+ HObjectAccess::ForNextFunctionLinkPointer(),
+ optimized_functions_list);
+
+ // This store is the only one that should have a write barrier.
+ Add<HStoreNamedField>(native_context,
+ HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST),
+ js_function);
+}
+
+
+void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
+ HValue* shared_info) {
+ Add<HStoreNamedField>(js_function,
+ HObjectAccess::ForNextFunctionLinkPointer(),
+ graph()->GetConstantUndefined());
+ HValue* code_object = Add<HLoadNamedField>(shared_info,
+ HObjectAccess::ForCodeOffset());
+ Add<HStoreCodeEntry>(js_function, code_object);
+}
+
+
+void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
+ HValue* js_function,
+ HValue* shared_info,
+ HValue* native_context) {
+ Counters* counters = isolate()->counters();
+ IfBuilder is_optimized(this);
+ HInstruction* optimized_map = Add<HLoadNamedField>(shared_info,
+ HObjectAccess::ForOptimizedCodeMap());
+ HValue* null_constant = Add<HConstant>(0);
+ is_optimized.If<HCompareObjectEqAndBranch>(optimized_map, null_constant);
+ is_optimized.Then();
+ {
+ BuildInstallCode(js_function, shared_info);
+ }
+ is_optimized.Else();
+ {
+ AddIncrementCounter(counters->fast_new_closure_try_optimized());
+ // optimized_map points to fixed array of 3-element entries
+ // (native context, optimized code, literals).
+ // Map must never be empty, so check the first elements.
+ Label install_optimized;
+ HValue* first_context_slot = Add<HLoadNamedField>(optimized_map,
+ HObjectAccess::ForFirstContextSlot());
+ IfBuilder already_in(this);
+ already_in.If<HCompareObjectEqAndBranch>(native_context,
+ first_context_slot);
+ already_in.Then();
+ {
+ HValue* code_object = Add<HLoadNamedField>(optimized_map,
+ HObjectAccess::ForFirstCodeSlot());
+ BuildInstallOptimizedCode(js_function, native_context, code_object);
+ }
+ already_in.Else();
+ {
+ HValue* shared_function_entry_length =
+ Add<HConstant>(SharedFunctionInfo::kEntryLength);
+ LoopBuilder loop_builder(this,
+ context(),
+ LoopBuilder::kPostDecrement,
+ shared_function_entry_length);
+ HValue* array_length = Add<HLoadNamedField>(optimized_map,
+ HObjectAccess::ForFixedArrayLength());
+ HValue* key = loop_builder.BeginBody(array_length,
+ graph()->GetConstant0(),
+ Token::GT);
+ {
+ // Iterate through the rest of map backwards.
+ // Do not double check first entry.
+ HValue* second_entry_index =
+ Add<HConstant>(SharedFunctionInfo::kSecondEntryIndex);
+ IfBuilder restore_check(this);
+ restore_check.If<HCompareNumericAndBranch>(key, second_entry_index,
+ Token::EQ);
+ restore_check.Then();
+ {
+ // Store the unoptimized code
+ BuildInstallCode(js_function, shared_info);
+ loop_builder.Break();
+ }
+ restore_check.Else();
+ {
+ HValue* keyed_minus = AddUncasted<HSub>(
+ key, shared_function_entry_length);
+ HInstruction* keyed_lookup = Add<HLoadKeyed>(optimized_map,
+ keyed_minus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ IfBuilder done_check(this);
+ done_check.If<HCompareObjectEqAndBranch>(native_context,
+ keyed_lookup);
+ done_check.Then();
+ {
+ // Hit: fetch the optimized code.
+ HValue* keyed_plus = AddUncasted<HAdd>(
+ keyed_minus, graph()->GetConstant1());
+ HValue* code_object = Add<HLoadKeyed>(optimized_map,
+ keyed_plus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ BuildInstallOptimizedCode(js_function, native_context, code_object);
+
+ // Fall out of the loop
+ loop_builder.Break();
+ }
+ done_check.Else();
+ done_check.End();
+ }
+ restore_check.End();
+ }
+ loop_builder.EndBody();
+ }
+ already_in.End();
+ }
+ is_optimized.End();
+}
+
+
+template<>
+HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
+ Counters* counters = isolate()->counters();
+ Factory* factory = isolate()->factory();
+ HInstruction* empty_fixed_array =
+ Add<HConstant>(factory->empty_fixed_array());
+ HValue* shared_info = GetParameter(0);
+
+ AddIncrementCounter(counters->fast_new_closure_total());
+
+ // Create a new closure from the given function info in new space
+ HValue* size = Add<HConstant>(JSFunction::kSize);
+ HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(),
+ NOT_TENURED, JS_FUNCTION_TYPE);
+
+ int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
+ casted_stub()->is_generator());
+
+ // Compute the function map in the current native context and set that
+ // as the map of the allocated object.
+ HInstruction* native_context = BuildGetNativeContext();
+ HInstruction* map_slot_value = Add<HLoadNamedField>(native_context,
+ HObjectAccess::ForContextSlot(map_index));
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForMap(), map_slot_value);
+
+ // Initialize the rest of the function.
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForPropertiesPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForElementsPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForPrototypeOrInitialMap(),
+ graph()->GetConstantHole());
+ Add<HStoreNamedField>(js_function,
+ HObjectAccess::ForSharedFunctionInfoPointer(),
+ shared_info);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
+ shared_info);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
+ context());
+
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ // But first check if there is an optimized version for our context.
+ if (FLAG_cache_optimized_code) {
+ BuildInstallFromOptimizedCodeMap(js_function, shared_info, native_context);
+ } else {
+ BuildInstallCode(js_function, shared_info);
+ }
+
+ return js_function;
+}
+
+
+Handle<Code> FastNewClosureStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 7a720592d..e68a5dd0c 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "code-stubs.h"
+#include "cpu-profiler.h"
#include "stub-cache.h"
#include "factory.h"
#include "gdb-jit.h"
@@ -37,31 +38,29 @@
namespace v8 {
namespace internal {
-bool CodeStub::FindCodeInCache(Code** code_out) {
- Heap* heap = Isolate::Current()->heap();
- int index = heap->code_stubs()->FindEntry(GetKey());
+
+CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
+ : register_param_count_(-1),
+ stack_parameter_count_(no_reg),
+ hint_stack_parameter_count_(-1),
+ function_mode_(NOT_JS_FUNCTION_STUB_MODE),
+ register_params_(NULL),
+ deoptimization_handler_(NULL),
+ miss_handler_(),
+ has_miss_handler_(false) { }
+
+
+bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
+ UnseededNumberDictionary* stubs = isolate->heap()->code_stubs();
+ int index = stubs->FindEntry(GetKey());
if (index != UnseededNumberDictionary::kNotFound) {
- *code_out = Code::cast(heap->code_stubs()->ValueAt(index));
+ *code_out = Code::cast(stubs->ValueAt(index));
return true;
}
return false;
}
-void CodeStub::GenerateCode(MacroAssembler* masm) {
- // Update the static counter each time a new code stub is generated.
- masm->isolate()->counters()->code_stubs()->Increment();
-
- // Nested stubs are not allowed for leaves.
- AllowStubCallsScope allow_scope(masm, false);
-
- // Generate the code for the stub.
- masm->set_generating_stub(true);
- NoCurrentFrameScope scope(masm);
- Generate(masm);
-}
-
-
SmartArrayPointer<const char> CodeStub::GetName() {
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
@@ -72,8 +71,7 @@ SmartArrayPointer<const char> CodeStub::GetName() {
}
-void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
- Isolate* isolate = masm->isolate();
+void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) {
SmartArrayPointer<const char> name = GetName();
PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
@@ -82,43 +80,83 @@ void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
}
-int CodeStub::GetCodeKind() {
+Code::Kind CodeStub::GetCodeKind() const {
return Code::STUB;
}
-Handle<Code> CodeStub::GetCode() {
- Isolate* isolate = Isolate::Current();
+Handle<Code> CodeStub::GetCodeCopyFromTemplate(Isolate* isolate) {
+ Handle<Code> ic = GetCode(isolate);
+ ic = isolate->factory()->CopyCode(ic);
+ RecordCodeGeneration(*ic, isolate);
+ return ic;
+}
+
+
+Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
+ Factory* factory = isolate->factory();
+
+ // Generate the new code.
+ MacroAssembler masm(isolate, NULL, 256);
+
+ {
+ // Update the static counter each time a new code stub is generated.
+ isolate->counters()->code_stubs()->Increment();
+
+ // Nested stubs are not allowed for leaves.
+ AllowStubCallsScope allow_scope(&masm, false);
+
+ // Generate the code for the stub.
+ masm.set_generating_stub(true);
+ NoCurrentFrameScope scope(&masm);
+ Generate(&masm);
+ }
+
+ // Create the code object.
+ CodeDesc desc;
+ masm.GetCode(&desc);
+
+ // Copy the generated code into a heap object.
+ Code::Flags flags = Code::ComputeFlags(
+ GetCodeKind(),
+ GetICState(),
+ GetExtraICState(),
+ GetStubType(),
+ GetStubFlags());
+ Handle<Code> new_object = factory->NewCode(
+ desc, flags, masm.CodeObject(), NeedsImmovableCode());
+ return new_object;
+}
+
+
+void CodeStub::VerifyPlatformFeatures(Isolate* isolate) {
+ ASSERT(CpuFeatures::VerifyCrossCompiling());
+}
+
+
+Handle<Code> CodeStub::GetCode(Isolate* isolate) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
Code* code;
if (UseSpecialCache()
- ? FindCodeInSpecialCache(&code)
- : FindCodeInCache(&code)) {
- ASSERT(IsPregenerated() == code->is_pregenerated());
+ ? FindCodeInSpecialCache(&code, isolate)
+ : FindCodeInCache(&code, isolate)) {
+ ASSERT(IsPregenerated(isolate) == code->is_pregenerated());
+ ASSERT(GetCodeKind() == code->kind());
return Handle<Code>(code);
}
+#ifdef DEBUG
+ VerifyPlatformFeatures(isolate);
+#endif
+
{
HandleScope scope(isolate);
- // Generate the new code.
- MacroAssembler masm(isolate, NULL, 256);
- GenerateCode(&masm);
-
- // Create the code object.
- CodeDesc desc;
- masm.GetCode(&desc);
-
- // Copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(
- static_cast<Code::Kind>(GetCodeKind()),
- GetICState());
- Handle<Code> new_object = factory->NewCode(
- desc, flags, masm.CodeObject(), NeedsImmovableCode());
+ Handle<Code> new_object = GenerateCode(isolate);
new_object->set_major_key(MajorKey());
FinishCode(new_object);
- RecordCodeGeneration(*new_object, &masm);
+ RecordCodeGeneration(*new_object, isolate);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
@@ -164,39 +202,539 @@ const char* CodeStub::MajorName(CodeStub::Major major_key,
}
-void CodeStub::PrintName(StringStream* stream) {
+void CodeStub::PrintBaseName(StringStream* stream) {
stream->Add("%s", MajorName(MajorKey(), false));
}
+void CodeStub::PrintName(StringStream* stream) {
+ PrintBaseName(stream);
+ PrintState(stream);
+}
+
+
+void BinaryOpStub::PrintBaseName(StringStream* stream) {
+ const char* op_name = Token::Name(op_);
+ const char* ovr = "";
+ if (mode_ == OVERWRITE_LEFT) ovr = "_ReuseLeft";
+ if (mode_ == OVERWRITE_RIGHT) ovr = "_ReuseRight";
+ stream->Add("BinaryOpStub_%s%s", op_name, ovr);
+}
+
+
+void BinaryOpStub::PrintState(StringStream* stream) {
+ stream->Add("(");
+ stream->Add(StateToName(left_state_));
+ stream->Add("*");
+ if (fixed_right_arg_.has_value) {
+ stream->Add("%d", fixed_right_arg_.value);
+ } else {
+ stream->Add(StateToName(right_state_));
+ }
+ stream->Add("->");
+ stream->Add(StateToName(result_state_));
+ stream->Add(")");
+}
+
+
+Maybe<Handle<Object> > BinaryOpStub::Result(Handle<Object> left,
+ Handle<Object> right,
+ Isolate* isolate) {
+ Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
+ Builtins::JavaScript func = BinaryOpIC::TokenToJSBuiltin(op_);
+ Object* builtin = builtins->javascript_builtin(func);
+ Handle<JSFunction> builtin_function =
+ Handle<JSFunction>(JSFunction::cast(builtin), isolate);
+ bool caught_exception;
+ Handle<Object> result = Execution::Call(isolate, builtin_function, left,
+ 1, &right, &caught_exception);
+ return Maybe<Handle<Object> >(!caught_exception, result);
+}
+
+
+void BinaryOpStub::Initialize() {
+ fixed_right_arg_.has_value = false;
+ left_state_ = right_state_ = result_state_ = NONE;
+}
+
+
+void BinaryOpStub::Generate(Token::Value op,
+ State left,
+ State right,
+ State result,
+ OverwriteMode mode,
+ Isolate* isolate) {
+ BinaryOpStub stub(INITIALIZED);
+ stub.op_ = op;
+ stub.left_state_ = left;
+ stub.right_state_ = right;
+ stub.result_state_ = result;
+ stub.mode_ = mode;
+ stub.GetCode(isolate);
+}
+
+
+void BinaryOpStub::Generate(Token::Value op,
+ State left,
+ int right,
+ State result,
+ OverwriteMode mode,
+ Isolate* isolate) {
+ BinaryOpStub stub(INITIALIZED);
+ stub.op_ = op;
+ stub.left_state_ = left;
+ stub.fixed_right_arg_.has_value = true;
+ stub.fixed_right_arg_.value = right;
+ stub.right_state_ = SMI;
+ stub.result_state_ = result;
+ stub.mode_ = mode;
+ stub.GetCode(isolate);
+}
+
+
+void BinaryOpStub::GenerateAheadOfTime(Isolate* isolate) {
+ Token::Value binop[] = {Token::SUB, Token::MOD, Token::DIV, Token::MUL,
+ Token::ADD, Token::SAR, Token::BIT_OR, Token::BIT_AND,
+ Token::BIT_XOR, Token::SHL, Token::SHR};
+ for (unsigned i = 0; i < ARRAY_SIZE(binop); i++) {
+ BinaryOpStub stub(UNINITIALIZED);
+ stub.op_ = binop[i];
+ stub.GetCode(isolate);
+ }
+
+ // TODO(olivf) We should investigate why adding stubs to the snapshot is so
+ // expensive at runtime. When solved we should be able to add most binops to
+ // the snapshot instead of hand-picking them.
+ // Generated list of commonly used stubs
+ Generate(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::MOD, SMI, 2, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, 32, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, 4, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::MOD, SMI, 8, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHR, NUMBER, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+}
+
+
+bool BinaryOpStub::can_encode_arg_value(int32_t value) const {
+ return op_ == Token::MOD && value > 0 && IsPowerOf2(value) &&
+ FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
+}
+
+
+int BinaryOpStub::encode_arg_value(int32_t value) const {
+ ASSERT(can_encode_arg_value(value));
+ return WhichPowerOf2(value);
+}
+
+
+int32_t BinaryOpStub::decode_arg_value(int value) const {
+ return 1 << value;
+}
+
+
+int BinaryOpStub::encode_token(Token::Value op) const {
+ ASSERT(op >= FIRST_TOKEN && op <= LAST_TOKEN);
+ return op - FIRST_TOKEN;
+}
+
+
+Token::Value BinaryOpStub::decode_token(int op) const {
+ int res = op + FIRST_TOKEN;
+ ASSERT(res >= FIRST_TOKEN && res <= LAST_TOKEN);
+ return static_cast<Token::Value>(res);
+}
+
+
+const char* BinaryOpStub::StateToName(State state) {
+ switch (state) {
+ case NONE:
+ return "None";
+ case SMI:
+ return "Smi";
+ case INT32:
+ return "Int32";
+ case NUMBER:
+ return "Number";
+ case STRING:
+ return "String";
+ case GENERIC:
+ return "Generic";
+ }
+ return "";
+}
+
+
+void BinaryOpStub::UpdateStatus(Handle<Object> left,
+ Handle<Object> right,
+ Maybe<Handle<Object> > result) {
+ int old_state = GetExtraICState();
+
+ UpdateStatus(left, &left_state_);
+ UpdateStatus(right, &right_state_);
+
+ int32_t value;
+ bool new_has_fixed_right_arg =
+ right->ToInt32(&value) && can_encode_arg_value(value) &&
+ (left_state_ == SMI || left_state_ == INT32) &&
+ (result_state_ == NONE || !fixed_right_arg_.has_value);
+
+ fixed_right_arg_ = Maybe<int32_t>(new_has_fixed_right_arg, value);
+
+ if (result.has_value) UpdateStatus(result.value, &result_state_);
+
+ State max_input = Max(left_state_, right_state_);
+
+ if (!has_int_result() && op_ != Token::SHR &&
+ max_input <= NUMBER && max_input > result_state_) {
+ result_state_ = max_input;
+ }
+
+ ASSERT(result_state_ <= (has_int_result() ? INT32 : NUMBER) ||
+ op_ == Token::ADD);
+
+ if (old_state == GetExtraICState()) {
+ // Tagged operations can lead to non-truncating HChanges
+ if (left->IsUndefined() || left->IsBoolean()) {
+ left_state_ = GENERIC;
+ } else if (right->IsUndefined() || right->IsBoolean()) {
+ right_state_ = GENERIC;
+ } else {
+ // Since the fpu is to precise, we might bail out on numbers which
+ // actually would truncate with 64 bit precision.
+ ASSERT(!CpuFeatures::IsSupported(SSE2) &&
+ result_state_ <= INT32);
+ result_state_ = NUMBER;
+ }
+ }
+}
+
+
+void BinaryOpStub::UpdateStatus(Handle<Object> object,
+ State* state) {
+ bool is_truncating = (op_ == Token::BIT_AND || op_ == Token::BIT_OR ||
+ op_ == Token::BIT_XOR || op_ == Token::SAR ||
+ op_ == Token::SHL || op_ == Token::SHR);
+ v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(object);
+ if (object->IsBoolean() && is_truncating) {
+ // Booleans are converted by truncating by HChange.
+ type = TypeInfo::Integer32();
+ }
+ if (object->IsUndefined()) {
+ // Undefined will be automatically truncated for us by HChange.
+ type = is_truncating ? TypeInfo::Integer32() : TypeInfo::Double();
+ }
+ State int_state = SmiValuesAre32Bits() ? NUMBER : INT32;
+ State new_state = NONE;
+ if (type.IsSmi()) {
+ new_state = SMI;
+ } else if (type.IsInteger32()) {
+ new_state = int_state;
+ } else if (type.IsNumber()) {
+ new_state = NUMBER;
+ } else if (object->IsString() && operation() == Token::ADD) {
+ new_state = STRING;
+ } else {
+ new_state = GENERIC;
+ }
+ if ((new_state <= NUMBER && *state > NUMBER) ||
+ (new_state > NUMBER && *state <= NUMBER && *state != NONE)) {
+ new_state = GENERIC;
+ }
+ *state = Max(*state, new_state);
+}
+
+
+Handle<Type> BinaryOpStub::StateToType(State state,
+ Isolate* isolate) {
+ Handle<Type> t = handle(Type::None(), isolate);
+ switch (state) {
+ case NUMBER:
+ t = handle(Type::Union(t, handle(Type::Double(), isolate)), isolate);
+ // Fall through.
+ case INT32:
+ t = handle(Type::Union(t, handle(Type::Signed32(), isolate)), isolate);
+ // Fall through.
+ case SMI:
+ t = handle(Type::Union(t, handle(Type::Smi(), isolate)), isolate);
+ break;
+
+ case STRING:
+ t = handle(Type::Union(t, handle(Type::String(), isolate)), isolate);
+ break;
+ case GENERIC:
+ return handle(Type::Any(), isolate);
+ break;
+ case NONE:
+ break;
+ }
+ return t;
+}
+
+
+Handle<Type> BinaryOpStub::GetLeftType(Isolate* isolate) const {
+ return StateToType(left_state_, isolate);
+}
+
+
+Handle<Type> BinaryOpStub::GetRightType(Isolate* isolate) const {
+ return StateToType(right_state_, isolate);
+}
+
+
+Handle<Type> BinaryOpStub::GetResultType(Isolate* isolate) const {
+ if (HasSideEffects(isolate)) return StateToType(NONE, isolate);
+ if (result_state_ == GENERIC && op_ == Token::ADD) {
+ return handle(Type::Union(handle(Type::Number(), isolate),
+ handle(Type::String(), isolate)), isolate);
+ }
+ ASSERT(result_state_ != GENERIC);
+ if (result_state_ == NUMBER && op_ == Token::SHR) {
+ return handle(Type::Unsigned32(), isolate);
+ }
+ return StateToType(result_state_, isolate);
+}
+
+
+InlineCacheState ICCompareStub::GetICState() {
+ CompareIC::State state = Max(left_, right_);
+ switch (state) {
+ case CompareIC::UNINITIALIZED:
+ return ::v8::internal::UNINITIALIZED;
+ case CompareIC::SMI:
+ case CompareIC::NUMBER:
+ case CompareIC::INTERNALIZED_STRING:
+ case CompareIC::STRING:
+ case CompareIC::UNIQUE_NAME:
+ case CompareIC::OBJECT:
+ case CompareIC::KNOWN_OBJECT:
+ return MONOMORPHIC;
+ case CompareIC::GENERIC:
+ return ::v8::internal::GENERIC;
+ }
+ UNREACHABLE();
+ return ::v8::internal::UNINITIALIZED;
+}
+
+
void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
ASSERT(*known_map_ != NULL);
Isolate* isolate = new_object->GetIsolate();
Factory* factory = isolate->factory();
return Map::UpdateCodeCache(known_map_,
strict() ?
- factory->strict_compare_ic_symbol() :
- factory->compare_ic_symbol(),
+ factory->strict_compare_ic_string() :
+ factory->compare_ic_string(),
new_object);
}
-bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
- Isolate* isolate = known_map_->GetIsolate();
+bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
Factory* factory = isolate->factory();
Code::Flags flags = Code::ComputeFlags(
- static_cast<Code::Kind>(GetCodeKind()),
+ GetCodeKind(),
UNINITIALIZED);
ASSERT(op_ == Token::EQ || op_ == Token::EQ_STRICT);
Handle<Object> probe(
known_map_->FindInCodeCache(
strict() ?
- *factory->strict_compare_ic_symbol() :
- *factory->compare_ic_symbol(),
- flags));
+ *factory->strict_compare_ic_string() :
+ *factory->compare_ic_string(),
+ flags),
+ isolate);
if (probe->IsCode()) {
*code_out = Code::cast(*probe);
- ASSERT(op_ == (*code_out)->compare_operation() + Token::EQ);
+#ifdef DEBUG
+ Token::Value cached_op;
+ ICCompareStub::DecodeMinorKey((*code_out)->stub_info(), NULL, NULL, NULL,
+ &cached_op);
+ ASSERT(op_ == cached_op);
+#endif
return true;
}
return false;
@@ -204,7 +742,33 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
int ICCompareStub::MinorKey() {
- return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
+ return OpField::encode(op_ - Token::EQ) |
+ LeftStateField::encode(left_) |
+ RightStateField::encode(right_) |
+ HandlerStateField::encode(state_);
+}
+
+
+void ICCompareStub::DecodeMinorKey(int minor_key,
+ CompareIC::State* left_state,
+ CompareIC::State* right_state,
+ CompareIC::State* handler_state,
+ Token::Value* op) {
+ if (left_state) {
+ *left_state =
+ static_cast<CompareIC::State>(LeftStateField::decode(minor_key));
+ }
+ if (right_state) {
+ *right_state =
+ static_cast<CompareIC::State>(RightStateField::decode(minor_key));
+ }
+ if (handler_state) {
+ *handler_state =
+ static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
+ }
+ if (op) {
+ *op = static_cast<Token::Value>(OpField::decode(minor_key) + Token::EQ);
+ }
}
@@ -213,31 +777,139 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
case CompareIC::UNINITIALIZED:
GenerateMiss(masm);
break;
- case CompareIC::SMIS:
+ case CompareIC::SMI:
GenerateSmis(masm);
break;
- case CompareIC::HEAP_NUMBERS:
- GenerateHeapNumbers(masm);
+ case CompareIC::NUMBER:
+ GenerateNumbers(masm);
break;
- case CompareIC::STRINGS:
+ case CompareIC::STRING:
GenerateStrings(masm);
break;
- case CompareIC::SYMBOLS:
- GenerateSymbols(masm);
+ case CompareIC::INTERNALIZED_STRING:
+ GenerateInternalizedStrings(masm);
break;
- case CompareIC::OBJECTS:
+ case CompareIC::UNIQUE_NAME:
+ GenerateUniqueNames(masm);
+ break;
+ case CompareIC::OBJECT:
GenerateObjects(masm);
break;
- case CompareIC::KNOWN_OBJECTS:
+ case CompareIC::KNOWN_OBJECT:
ASSERT(*known_map_ != NULL);
GenerateKnownObjects(masm);
break;
- default:
- UNREACHABLE();
+ case CompareIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
}
}
+void CompareNilICStub::UpdateStatus(Handle<Object> object) {
+ ASSERT(!state_.Contains(GENERIC));
+ State old_state(state_);
+ if (object->IsNull()) {
+ state_.Add(NULL_TYPE);
+ } else if (object->IsUndefined()) {
+ state_.Add(UNDEFINED);
+ } else if (object->IsUndetectableObject() ||
+ object->IsOddball() ||
+ !object->IsHeapObject()) {
+ state_.RemoveAll();
+ state_.Add(GENERIC);
+ } else if (IsMonomorphic()) {
+ state_.RemoveAll();
+ state_.Add(GENERIC);
+ } else {
+ state_.Add(MONOMORPHIC_MAP);
+ }
+ TraceTransition(old_state, state_);
+}
+
+
+template<class StateType>
+void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
+ // Note: Although a no-op transition is semantically OK, it is hinting at a
+ // bug somewhere in our state transition machinery.
+ ASSERT(from != to);
+ #ifdef DEBUG
+ if (!FLAG_trace_ic) return;
+ char buffer[100];
+ NoAllocationStringAllocator allocator(buffer,
+ static_cast<unsigned>(sizeof(buffer)));
+ StringStream stream(&allocator);
+ stream.Add("[");
+ PrintBaseName(&stream);
+ stream.Add(": ");
+ from.Print(&stream);
+ stream.Add("=>");
+ to.Print(&stream);
+ stream.Add("]\n");
+ stream.OutputToStdOut();
+ #endif
+}
+
+
+void CompareNilICStub::PrintBaseName(StringStream* stream) {
+ CodeStub::PrintBaseName(stream);
+ stream->Add((nil_value_ == kNullValue) ? "(NullValue)":
+ "(UndefinedValue)");
+}
+
+
+void CompareNilICStub::PrintState(StringStream* stream) {
+ state_.Print(stream);
+}
+
+
+void CompareNilICStub::State::Print(StringStream* stream) const {
+ stream->Add("(");
+ SimpleListPrinter printer(stream);
+ if (IsEmpty()) printer.Add("None");
+ if (Contains(UNDEFINED)) printer.Add("Undefined");
+ if (Contains(NULL_TYPE)) printer.Add("Null");
+ if (Contains(MONOMORPHIC_MAP)) printer.Add("MonomorphicMap");
+ if (Contains(GENERIC)) printer.Add("Generic");
+ stream->Add(")");
+}
+
+
+Handle<Type> CompareNilICStub::GetType(
+ Isolate* isolate,
+ Handle<Map> map) {
+ if (state_.Contains(CompareNilICStub::GENERIC)) {
+ return handle(Type::Any(), isolate);
+ }
+
+ Handle<Type> result(Type::None(), isolate);
+ if (state_.Contains(CompareNilICStub::UNDEFINED)) {
+ result = handle(Type::Union(result, handle(Type::Undefined(), isolate)),
+ isolate);
+ }
+ if (state_.Contains(CompareNilICStub::NULL_TYPE)) {
+ result = handle(Type::Union(result, handle(Type::Null(), isolate)),
+ isolate);
+ }
+ if (state_.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
+ Type* type = map.is_null() ? Type::Detectable() : Type::Class(map);
+ result = handle(Type::Union(result, handle(type, isolate)), isolate);
+ }
+
+ return result;
+}
+
+
+Handle<Type> CompareNilICStub::GetInputType(
+ Isolate* isolate,
+ Handle<Map> map) {
+ Handle<Type> output_type = GetType(isolate, map);
+ Handle<Type> nil_type = handle(nil_value_ == kNullValue
+ ? Type::Null() : Type::Undefined(), isolate);
+ return handle(Type::Union(output_type, nil_type), isolate);
+}
+
+
void InstanceofStub::PrintName(StringStream* stream) {
const char* args = "";
if (HasArgsInRegisters()) {
@@ -269,36 +941,14 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
}
-void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
- switch (elements_kind_) {
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
- break;
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
- break;
- case DICTIONARY_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
- break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
+void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
+ KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
+}
+
+
+void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
+ CreateAllocationSiteStub stub;
+ stub.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -307,19 +957,9 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS: {
- KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
- is_js_array_,
- elements_kind_,
- grow_mode_);
- }
- break;
+ case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
- is_js_array_,
- grow_mode_);
- break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
@@ -329,7 +969,7 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS:
- KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
+ UNREACHABLE();
break;
case DICTIONARY_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm);
@@ -365,40 +1005,36 @@ void CallConstructStub::PrintName(StringStream* stream) {
}
-void ToBooleanStub::PrintName(StringStream* stream) {
- stream->Add("ToBooleanStub_");
- types_.Print(stream);
+bool ToBooleanStub::UpdateStatus(Handle<Object> object) {
+ Types old_types(types_);
+ bool to_boolean_value = types_.UpdateStatus(object);
+ TraceTransition(old_types, types_);
+ return to_boolean_value;
}
-void ToBooleanStub::Types::Print(StringStream* stream) const {
- if (IsEmpty()) stream->Add("None");
- if (Contains(UNDEFINED)) stream->Add("Undefined");
- if (Contains(BOOLEAN)) stream->Add("Bool");
- if (Contains(NULL_TYPE)) stream->Add("Null");
- if (Contains(SMI)) stream->Add("Smi");
- if (Contains(SPEC_OBJECT)) stream->Add("SpecObject");
- if (Contains(STRING)) stream->Add("String");
- if (Contains(HEAP_NUMBER)) stream->Add("HeapNumber");
+void ToBooleanStub::PrintState(StringStream* stream) {
+ types_.Print(stream);
}
-void ToBooleanStub::Types::TraceTransition(Types to) const {
- if (!FLAG_trace_ic) return;
- char buffer[100];
- NoAllocationStringAllocator allocator(buffer,
- static_cast<unsigned>(sizeof(buffer)));
- StringStream stream(&allocator);
- stream.Add("[ToBooleanIC (");
- Print(&stream);
- stream.Add("->");
- to.Print(&stream);
- stream.Add(")]\n");
- stream.OutputToStdOut();
+void ToBooleanStub::Types::Print(StringStream* stream) const {
+ stream->Add("(");
+ SimpleListPrinter printer(stream);
+ if (IsEmpty()) printer.Add("None");
+ if (Contains(UNDEFINED)) printer.Add("Undefined");
+ if (Contains(BOOLEAN)) printer.Add("Bool");
+ if (Contains(NULL_TYPE)) printer.Add("Null");
+ if (Contains(SMI)) printer.Add("Smi");
+ if (Contains(SPEC_OBJECT)) printer.Add("SpecObject");
+ if (Contains(STRING)) printer.Add("String");
+ if (Contains(SYMBOL)) printer.Add("Symbol");
+ if (Contains(HEAP_NUMBER)) printer.Add("HeapNumber");
+ stream->Add(")");
}
-bool ToBooleanStub::Types::Record(Handle<Object> object) {
+bool ToBooleanStub::Types::UpdateStatus(Handle<Object> object) {
if (object->IsUndefined()) {
Add(UNDEFINED);
return false;
@@ -418,11 +1054,14 @@ bool ToBooleanStub::Types::Record(Handle<Object> object) {
Add(STRING);
return !object->IsUndetectableObject() &&
String::cast(*object)->length() != 0;
+ } else if (object->IsSymbol()) {
+ Add(SYMBOL);
+ return true;
} else if (object->IsHeapNumber()) {
ASSERT(!object->IsUndetectableObject());
Add(HEAP_NUMBER);
double value = HeapNumber::cast(*object)->value();
- return value != 0 && !isnan(value);
+ return value != 0 && !std::isnan(value);
} else {
// We should never see an internal object at runtime here!
UNREACHABLE();
@@ -434,6 +1073,7 @@ bool ToBooleanStub::Types::Record(Handle<Object> object) {
bool ToBooleanStub::Types::NeedsMap() const {
return Contains(ToBooleanStub::SPEC_OBJECT)
|| Contains(ToBooleanStub::STRING)
+ || Contains(ToBooleanStub::SYMBOL)
|| Contains(ToBooleanStub::HEAP_NUMBER);
}
@@ -444,61 +1084,88 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
}
-void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
- Label fail;
- ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_));
- if (!FLAG_trace_elements_transitions) {
- if (IsFastSmiOrObjectElementsKind(to_)) {
- if (IsFastSmiOrObjectElementsKind(from_)) {
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm);
- } else if (IsFastDoubleElementsKind(from_)) {
- ASSERT(!IsFastSmiElementsKind(to_));
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
- } else {
- UNREACHABLE();
- }
- KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
- is_jsarray_,
- to_,
- grow_mode_);
- } else if (IsFastSmiElementsKind(from_) &&
- IsFastDoubleElementsKind(to_)) {
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
- KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
- is_jsarray_,
- grow_mode_);
- } else if (IsFastDoubleElementsKind(from_)) {
- ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm);
- } else {
- UNREACHABLE();
- }
+void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
+ StubFailureTrampolineStub stub1(NOT_JS_FUNCTION_STUB_MODE);
+ StubFailureTrampolineStub stub2(JS_FUNCTION_STUB_MODE);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
+}
+
+
+void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
+ intptr_t stack_pointer,
+ Isolate* isolate) {
+ FunctionEntryHook entry_hook = isolate->function_entry_hook();
+ ASSERT(entry_hook != NULL);
+ entry_hook(function, stack_pointer);
+}
+
+
+static void InstallDescriptor(Isolate* isolate, HydrogenCodeStub* stub) {
+ int major_key = stub->MajorKey();
+ CodeStubInterfaceDescriptor* descriptor =
+ isolate->code_stub_interface_descriptor(major_key);
+ if (!descriptor->initialized()) {
+ stub->InitializeInterfaceDescriptor(isolate, descriptor);
}
- masm->bind(&fail);
- KeyedStoreIC::GenerateRuntimeSetProperty(masm, strict_mode_);
}
-FunctionEntryHook ProfileEntryHookStub::entry_hook_ = NULL;
+void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
+ ArrayNoArgumentConstructorStub stub1(GetInitialFastElementsKind());
+ InstallDescriptor(isolate, &stub1);
+ ArraySingleArgumentConstructorStub stub2(GetInitialFastElementsKind());
+ InstallDescriptor(isolate, &stub2);
+ ArrayNArgumentsConstructorStub stub3(GetInitialFastElementsKind());
+ InstallDescriptor(isolate, &stub3);
+}
-void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
- intptr_t stack_pointer) {
- if (entry_hook_ != NULL)
- entry_hook_(function, stack_pointer);
+void NumberToStringStub::InstallDescriptors(Isolate* isolate) {
+ NumberToStringStub stub;
+ InstallDescriptor(isolate, &stub);
}
-bool ProfileEntryHookStub::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
- // We don't allow setting a new entry hook over one that's
- // already active, as the hooks won't stack.
- if (entry_hook != 0 && entry_hook_ != 0)
- return false;
+void FastNewClosureStub::InstallDescriptors(Isolate* isolate) {
+ FastNewClosureStub stub(STRICT_MODE, false);
+ InstallDescriptor(isolate, &stub);
+}
+
+
+ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
+ : argument_count_(ANY) {
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+}
+
+
+ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
+ int argument_count) {
+ if (argument_count == 0) {
+ argument_count_ = NONE;
+ } else if (argument_count == 1) {
+ argument_count_ = ONE;
+ } else if (argument_count >= 2) {
+ argument_count_ = MORE_THAN_ONE;
+ } else {
+ UNREACHABLE();
+ }
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+}
+
+
+void InternalArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
+ InternalArrayNoArgumentConstructorStub stub1(FAST_ELEMENTS);
+ InstallDescriptor(isolate, &stub1);
+ InternalArraySingleArgumentConstructorStub stub2(FAST_ELEMENTS);
+ InstallDescriptor(isolate, &stub2);
+ InternalArrayNArgumentsConstructorStub stub3(FAST_ELEMENTS);
+ InstallDescriptor(isolate, &stub3);
+}
- entry_hook_ = entry_hook;
- return true;
+InternalArrayConstructorStub::InternalArrayConstructorStub(
+ Isolate* isolate) {
+ InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index a84384172..80d99d8b6 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -29,8 +29,10 @@
#define V8_CODE_STUBS_H_
#include "allocation.h"
-#include "globals.h"
+#include "assembler.h"
#include "codegen.h"
+#include "globals.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
@@ -39,14 +41,17 @@ namespace internal {
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
V(CallFunction) \
V(CallConstruct) \
- V(UnaryOp) \
V(BinaryOp) \
V(StringAdd) \
V(SubString) \
V(StringCompare) \
V(Compare) \
V(CompareIC) \
+ V(CompareNilIC) \
V(MathPow) \
+ V(StringLength) \
+ V(FunctionPrototype) \
+ V(StoreArrayLength) \
V(RecordWrite) \
V(StoreBufferOverflow) \
V(RegExpExec) \
@@ -61,35 +66,50 @@ namespace internal {
V(FastNewBlockContext) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
+ V(CreateAllocationSite) \
V(ToBoolean) \
V(ToNumber) \
V(ArgumentsAccess) \
V(RegExpConstructResult) \
V(NumberToString) \
+ V(DoubleToI) \
V(CEntry) \
V(JSEntry) \
V(KeyedLoadElement) \
+ V(ArrayNoArgumentConstructor) \
+ V(ArraySingleArgumentConstructor) \
+ V(ArrayNArgumentsConstructor) \
+ V(InternalArrayNoArgumentConstructor) \
+ V(InternalArraySingleArgumentConstructor) \
+ V(InternalArrayNArgumentsConstructor) \
V(KeyedStoreElement) \
V(DebuggerStatement) \
- V(StringDictionaryLookup) \
+ V(NameDictionaryLookup) \
V(ElementsTransitionAndStore) \
+ V(TransitionElementsKind) \
V(StoreArrayLiteralElement) \
- V(ProfileEntryHook)
+ V(StubFailureTrampoline) \
+ V(ArrayConstructor) \
+ V(InternalArrayConstructor) \
+ V(ProfileEntryHook) \
+ V(StoreGlobal) \
+ /* IC Handler stubs */ \
+ V(LoadField) \
+ V(KeyedLoadField)
// List of code stubs only used on ARM platforms.
-#ifdef V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM
#define CODE_STUB_LIST_ARM(V) \
V(GetProperty) \
V(SetProperty) \
V(InvokeBuiltin) \
- V(RegExpCEntry) \
V(DirectCEntry)
#else
#define CODE_STUB_LIST_ARM(V)
#endif
// List of code stubs only used on MIPS platforms.
-#ifdef V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
V(RegExpCEntry) \
V(DirectCEntry)
@@ -105,8 +125,6 @@ namespace internal {
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
-enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };
-
// Stub is base classes of all stubs.
class CodeStub BASE_EMBEDDED {
@@ -120,8 +138,10 @@ class CodeStub BASE_EMBEDDED {
};
// Retrieve the code for the stub. Generate the code if needed.
- Handle<Code> GetCode();
+ Handle<Code> GetCode(Isolate* isolate);
+ // Retrieve the code for the stub, make and return a copy of the code.
+ Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate);
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
}
@@ -138,18 +158,18 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {}
- bool CompilingCallsToThisStubIsGCSafe() {
- bool is_pregenerated = IsPregenerated();
+ bool CompilingCallsToThisStubIsGCSafe(Isolate* isolate) {
+ bool is_pregenerated = IsPregenerated(isolate);
Code* code = NULL;
- CHECK(!is_pregenerated || FindCodeInCache(&code));
+ CHECK(!is_pregenerated || FindCodeInCache(&code, isolate));
return is_pregenerated;
}
// See comment above, where Instanceof is defined.
- virtual bool IsPregenerated() { return false; }
+ virtual bool IsPregenerated(Isolate* isolate) { return false; }
- static void GenerateStubsAheadOfTime();
- static void GenerateFPStubs();
+ static void GenerateStubsAheadOfTime(Isolate* isolate);
+ static void GenerateFPStubs(Isolate* isolate);
// Some stubs put untagged junk on the stack that cannot be scanned by the
// GC. This means that we must be statically sure that no GC can occur while
@@ -160,22 +180,49 @@ class CodeStub BASE_EMBEDDED {
virtual bool SometimesSetsUpAFrame() { return true; }
// Lookup the code in the (possibly custom) cache.
- bool FindCodeInCache(Code** code_out);
+ bool FindCodeInCache(Code** code_out, Isolate* isolate);
+
+ // Returns information for computing the number key.
+ virtual Major MajorKey() = 0;
+ virtual int MinorKey() = 0;
+
+ virtual InlineCacheState GetICState() {
+ return UNINITIALIZED;
+ }
+ virtual Code::ExtraICState GetExtraICState() {
+ return Code::kNoExtraICState;
+ }
+ virtual Code::StubType GetStubType() {
+ return Code::NORMAL;
+ }
+ virtual int GetStubFlags() {
+ return -1;
+ }
+
+ virtual void PrintName(StringStream* stream);
+
+ // Returns a name for logging/debugging purposes.
+ SmartArrayPointer<const char> GetName();
protected:
static bool CanUseFPRegisters();
- private:
- // Nonvirtual wrapper around the stub-specific Generate function. Call
- // this function to set up the macro assembler and generate the code.
- void GenerateCode(MacroAssembler* masm);
-
// Generates the assembler code for the stub.
- virtual void Generate(MacroAssembler* masm) = 0;
+ virtual Handle<Code> GenerateCode(Isolate* isolate) = 0;
+
+ virtual void VerifyPlatformFeatures(Isolate* isolate);
+
+ // Returns whether the code generated for this stub needs to be allocated as
+ // a fixed (non-moveable) code object.
+ virtual bool NeedsImmovableCode() { return false; }
+ virtual void PrintBaseName(StringStream* stream);
+ virtual void PrintState(StringStream* stream) { }
+
+ private:
// Perform bookkeeping required after code generation when stub code is
// initially generated.
- void RecordCodeGeneration(Code* code, MacroAssembler* masm);
+ void RecordCodeGeneration(Code* code, Isolate* isolate);
// Finish the code object after it has been generated.
virtual void FinishCode(Handle<Code> code) { }
@@ -184,17 +231,8 @@ class CodeStub BASE_EMBEDDED {
// registering stub in the stub cache.
virtual void Activate(Code* code) { }
- // Returns information for computing the number key.
- virtual Major MajorKey() = 0;
- virtual int MinorKey() = 0;
-
// BinaryOpStub needs to override this.
- virtual int GetCodeKind();
-
- // BinaryOpStub needs to override this.
- virtual InlineCacheState GetICState() {
- return UNINITIALIZED;
- }
+ virtual Code::Kind GetCodeKind() const;
// Add the code to a specialized cache, specific to an individual
// stub type. Please note, this method must add the code object to a
@@ -202,19 +240,13 @@ class CodeStub BASE_EMBEDDED {
virtual void AddToSpecialCache(Handle<Code> new_object) { }
// Find code in a specialized cache, work is delegated to the specific stub.
- virtual bool FindCodeInSpecialCache(Code** code_out) { return false; }
+ virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
+ return false;
+ }
// If a stub uses a special cache override this.
virtual bool UseSpecialCache() { return false; }
- // Returns a name for logging/debugging purposes.
- SmartArrayPointer<const char> GetName();
- virtual void PrintName(StringStream* stream);
-
- // Returns whether the code generated for this stub needs to be allocated as
- // a fixed (non-moveable) code object.
- virtual bool NeedsImmovableCode() { return false; }
-
// Computes the key based on major and minor.
uint32_t GetKey() {
ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
@@ -230,6 +262,122 @@ class CodeStub BASE_EMBEDDED {
};
+class PlatformCodeStub : public CodeStub {
+ public:
+ // Retrieve the code for the stub. Generate the code if needed.
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual Code::Kind GetCodeKind() const { return Code::STUB; }
+
+ protected:
+ // Generates the assembler code for the stub.
+ virtual void Generate(MacroAssembler* masm) = 0;
+};
+
+
+enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
+
+
+struct CodeStubInterfaceDescriptor {
+ CodeStubInterfaceDescriptor();
+ int register_param_count_;
+ Register stack_parameter_count_;
+ // if hint_stack_parameter_count_ > 0, the code stub can optimize the
+ // return sequence. Default value is -1, which means it is ignored.
+ int hint_stack_parameter_count_;
+ StubFunctionMode function_mode_;
+ Register* register_params_;
+ Address deoptimization_handler_;
+
+ int environment_length() const {
+ if (stack_parameter_count_.is_valid()) {
+ return register_param_count_ + 1;
+ }
+ return register_param_count_;
+ }
+
+ bool initialized() const { return register_param_count_ >= 0; }
+
+ void SetMissHandler(ExternalReference handler) {
+ miss_handler_ = handler;
+ has_miss_handler_ = true;
+ }
+
+ ExternalReference miss_handler() {
+ ASSERT(has_miss_handler_);
+ return miss_handler_;
+ }
+
+ bool has_miss_handler() {
+ return has_miss_handler_;
+ }
+
+ private:
+ ExternalReference miss_handler_;
+ bool has_miss_handler_;
+};
+
+// A helper to make up for the fact that type Register is not fully
+// defined outside of the platform directories
+#define DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index) \
+ ((index) == (descriptor)->register_param_count_) \
+ ? (descriptor)->stack_parameter_count_ \
+ : (descriptor)->register_params_[(index)]
+
+
+class HydrogenCodeStub : public CodeStub {
+ public:
+ enum InitializationState {
+ UNINITIALIZED,
+ INITIALIZED
+ };
+
+ explicit HydrogenCodeStub(InitializationState state = INITIALIZED) {
+ is_uninitialized_ = (state == UNINITIALIZED);
+ }
+
+ virtual Code::Kind GetCodeKind() const { return Code::STUB; }
+
+ CodeStubInterfaceDescriptor* GetInterfaceDescriptor(Isolate* isolate) {
+ return isolate->code_stub_interface_descriptor(MajorKey());
+ }
+
+ bool IsUninitialized() { return is_uninitialized_; }
+
+ template<class SubClass>
+ static Handle<Code> GetUninitialized(Isolate* isolate) {
+ SubClass::GenerateAheadOfTime(isolate);
+ return SubClass().GetCode(isolate);
+ }
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) = 0;
+
+ // Retrieve the code for the stub. Generate the code if needed.
+ virtual Handle<Code> GenerateCode(Isolate* isolate) = 0;
+
+ virtual int NotMissMinorKey() = 0;
+
+ Handle<Code> GenerateLightweightMissCode(Isolate* isolate);
+
+ template<class StateType>
+ void TraceTransition(StateType from, StateType to);
+
+ private:
+ class MinorKeyBits: public BitField<int, 0, kStubMinorKeyBits - 1> {};
+ class IsMissBits: public BitField<bool, kStubMinorKeyBits - 1, 1> {};
+
+ void GenerateLightweightMiss(MacroAssembler* masm);
+ virtual int MinorKey() {
+ return IsMissBits::encode(is_uninitialized_) |
+ MinorKeyBits::encode(NotMissMinorKey());
+ }
+
+ bool is_uninitialized_;
+};
+
+
// Helper interface to prepare to/restore after making runtime calls.
class RuntimeCallHelper {
public:
@@ -246,6 +394,20 @@ class RuntimeCallHelper {
DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
};
+
+// TODO(bmeurer): Move to the StringAddStub declaration once we're
+// done with the translation to a hydrogen code stub.
+enum StringAddFlags {
+ // Omit both parameter checks.
+ STRING_ADD_CHECK_NONE = 0,
+ // Check left parameter.
+ STRING_ADD_CHECK_LEFT = 1 << 0,
+ // Check right parameter.
+ STRING_ADD_CHECK_RIGHT = 1 << 1,
+ // Check both parameters.
+ STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT
+};
+
} } // namespace v8::internal
#if V8_TARGET_ARCH_IA32
@@ -287,59 +449,76 @@ class NopRuntimeCallHelper : public RuntimeCallHelper {
};
-class StackCheckStub : public CodeStub {
+class ToNumberStub: public HydrogenCodeStub {
public:
- StackCheckStub() { }
+ ToNumberStub() { }
- void Generate(MacroAssembler* masm);
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
private:
- Major MajorKey() { return StackCheck; }
- int MinorKey() { return 0; }
+ Major MajorKey() { return ToNumber; }
+ int NotMissMinorKey() { return 0; }
};
-class InterruptStub : public CodeStub {
+class NumberToStringStub V8_FINAL : public HydrogenCodeStub {
public:
- InterruptStub() { }
+ NumberToStringStub() {}
- void Generate(MacroAssembler* masm);
+ virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
- private:
- Major MajorKey() { return Interrupt; }
- int MinorKey() { return 0; }
-};
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+ static void InstallDescriptors(Isolate* isolate);
-class ToNumberStub: public CodeStub {
- public:
- ToNumberStub() { }
-
- void Generate(MacroAssembler* masm);
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kNumber = 0;
private:
- Major MajorKey() { return ToNumber; }
- int MinorKey() { return 0; }
+ virtual Major MajorKey() V8_OVERRIDE { return NumberToString; }
+ virtual int NotMissMinorKey() V8_OVERRIDE { return 0; }
};
-class FastNewClosureStub : public CodeStub {
+class FastNewClosureStub : public HydrogenCodeStub {
public:
- explicit FastNewClosureStub(LanguageMode language_mode)
- : language_mode_(language_mode) { }
+ explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator)
+ : language_mode_(language_mode),
+ is_generator_(is_generator) { }
- void Generate(MacroAssembler* masm);
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ static void InstallDescriptors(Isolate* isolate);
+
+ LanguageMode language_mode() const { return language_mode_; }
+ bool is_generator() const { return is_generator_; }
private:
+ class StrictModeBits: public BitField<bool, 0, 1> {};
+ class IsGeneratorBits: public BitField<bool, 1, 1> {};
+
Major MajorKey() { return FastNewClosure; }
- int MinorKey() { return language_mode_ == CLASSIC_MODE
- ? kNonStrictMode : kStrictMode; }
+ int NotMissMinorKey() {
+ return StrictModeBits::encode(language_mode_ != CLASSIC_MODE) |
+ IsGeneratorBits::encode(is_generator_);
+ }
LanguageMode language_mode_;
+ bool is_generator_;
};
-class FastNewContextStub : public CodeStub {
+class FastNewContextStub : public PlatformCodeStub {
public:
static const int kMaximumSlots = 64;
@@ -357,7 +536,7 @@ class FastNewContextStub : public CodeStub {
};
-class FastNewBlockContextStub : public CodeStub {
+class FastNewBlockContextStub : public PlatformCodeStub {
public:
static const int kMaximumSlots = 64;
@@ -374,61 +553,174 @@ class FastNewBlockContextStub : public CodeStub {
int MinorKey() { return slots_; }
};
+class StoreGlobalStub : public HydrogenCodeStub {
+ public:
+ StoreGlobalStub(StrictModeFlag strict_mode, bool is_constant) {
+ bit_field_ = StrictModeBits::encode(strict_mode) |
+ IsConstantBits::encode(is_constant);
+ }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
-class FastCloneShallowArrayStub : public CodeStub {
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ virtual Code::Kind GetCodeKind() const { return Code::STORE_IC; }
+ virtual InlineCacheState GetICState() { return MONOMORPHIC; }
+ virtual Code::ExtraICState GetExtraICState() { return bit_field_; }
+
+ bool is_constant() {
+ return IsConstantBits::decode(bit_field_);
+ }
+ void set_is_constant(bool value) {
+ bit_field_ = IsConstantBits::update(bit_field_, value);
+ }
+
+ Representation representation() {
+ return Representation::FromKind(RepresentationBits::decode(bit_field_));
+ }
+ void set_representation(Representation r) {
+ bit_field_ = RepresentationBits::update(bit_field_, r.kind());
+ }
+
+ private:
+ virtual int NotMissMinorKey() { return GetExtraICState(); }
+ Major MajorKey() { return StoreGlobal; }
+
+ class StrictModeBits: public BitField<StrictModeFlag, 0, 1> {};
+ class IsConstantBits: public BitField<bool, 1, 1> {};
+ class RepresentationBits: public BitField<Representation::Kind, 2, 8> {};
+
+ int bit_field_;
+
+ DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
+};
+
+
+class FastCloneShallowArrayStub : public HydrogenCodeStub {
public:
// Maximum length of copied elements array.
static const int kMaximumClonedLength = 8;
-
enum Mode {
CLONE_ELEMENTS,
CLONE_DOUBLE_ELEMENTS,
COPY_ON_WRITE_ELEMENTS,
- CLONE_ANY_ELEMENTS
+ CLONE_ANY_ELEMENTS,
+ LAST_CLONE_MODE = CLONE_ANY_ELEMENTS
};
- FastCloneShallowArrayStub(Mode mode, int length)
+ static const int kFastCloneModeCount = LAST_CLONE_MODE + 1;
+
+ FastCloneShallowArrayStub(Mode mode,
+ AllocationSiteMode allocation_site_mode,
+ int length)
: mode_(mode),
+ allocation_site_mode_(allocation_site_mode),
length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
ASSERT_GE(length_, 0);
ASSERT_LE(length_, kMaximumClonedLength);
}
- void Generate(MacroAssembler* masm);
+ Mode mode() const { return mode_; }
+ int length() const { return length_; }
+ AllocationSiteMode allocation_site_mode() const {
+ return allocation_site_mode_;
+ }
+
+ ElementsKind ComputeElementsKind() const {
+ switch (mode()) {
+ case CLONE_ELEMENTS:
+ case COPY_ON_WRITE_ELEMENTS:
+ return FAST_ELEMENTS;
+ case CLONE_DOUBLE_ELEMENTS:
+ return FAST_DOUBLE_ELEMENTS;
+ case CLONE_ANY_ELEMENTS:
+ /*fall-through*/;
+ }
+ UNREACHABLE();
+ return LAST_ELEMENTS_KIND;
+ }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
private:
Mode mode_;
+ AllocationSiteMode allocation_site_mode_;
int length_;
+ class AllocationSiteModeBits: public BitField<AllocationSiteMode, 0, 1> {};
+ class ModeBits: public BitField<Mode, 1, 4> {};
+ class LengthBits: public BitField<int, 5, 4> {};
+ // Ensure data fits within available bits.
+ STATIC_ASSERT(LAST_ALLOCATION_SITE_MODE == 1);
+ STATIC_ASSERT(kFastCloneModeCount < 16);
+ STATIC_ASSERT(kMaximumClonedLength < 16);
Major MajorKey() { return FastCloneShallowArray; }
- int MinorKey() {
- ASSERT(mode_ == 0 || mode_ == 1 || mode_ == 2 || mode_ == 3);
- return length_ * 4 + mode_;
+ int NotMissMinorKey() {
+ return AllocationSiteModeBits::encode(allocation_site_mode_)
+ | ModeBits::encode(mode_)
+ | LengthBits::encode(length_);
}
};
-class FastCloneShallowObjectStub : public CodeStub {
+class FastCloneShallowObjectStub : public HydrogenCodeStub {
public:
// Maximum number of properties in copied object.
static const int kMaximumClonedProperties = 6;
- explicit FastCloneShallowObjectStub(int length) : length_(length) {
+ explicit FastCloneShallowObjectStub(int length)
+ : length_(length) {
ASSERT_GE(length_, 0);
ASSERT_LE(length_, kMaximumClonedProperties);
}
- void Generate(MacroAssembler* masm);
+ int length() const { return length_; }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
private:
int length_;
Major MajorKey() { return FastCloneShallowObject; }
- int MinorKey() { return length_; }
+ int NotMissMinorKey() { return length_; }
+
+ DISALLOW_COPY_AND_ASSIGN(FastCloneShallowObjectStub);
+};
+
+
+class CreateAllocationSiteStub : public HydrogenCodeStub {
+ public:
+ explicit CreateAllocationSiteStub() { }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ Major MajorKey() { return CreateAllocationSite; }
+ int NotMissMinorKey() { return 0; }
+
+ DISALLOW_COPY_AND_ASSIGN(CreateAllocationSiteStub);
};
-class InstanceofStub: public CodeStub {
+class InstanceofStub: public PlatformCodeStub {
public:
enum Flags {
kNoFlags = 0,
@@ -466,9 +758,49 @@ class InstanceofStub: public CodeStub {
};
-class MathPowStub: public CodeStub {
+enum AllocationSiteOverrideMode {
+ DONT_OVERRIDE,
+ DISABLE_ALLOCATION_SITES,
+ LAST_ALLOCATION_SITE_OVERRIDE_MODE = DISABLE_ALLOCATION_SITES
+};
+
+
+class ArrayConstructorStub: public PlatformCodeStub {
+ public:
+ enum ArgumentCountKey { ANY, NONE, ONE, MORE_THAN_ONE };
+ ArrayConstructorStub(Isolate* isolate, int argument_count);
+ explicit ArrayConstructorStub(Isolate* isolate);
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ void GenerateDispatchToArrayStub(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode);
+
+ virtual CodeStub::Major MajorKey() { return ArrayConstructor; }
+ virtual int MinorKey() { return argument_count_; }
+
+ ArgumentCountKey argument_count_;
+};
+
+
+class InternalArrayConstructorStub: public PlatformCodeStub {
public:
- enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
+ explicit InternalArrayConstructorStub(Isolate* isolate);
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ virtual CodeStub::Major MajorKey() { return InternalArrayConstructor; }
+ virtual int MinorKey() { return 0; }
+
+ void GenerateCase(MacroAssembler* masm, ElementsKind kind);
+};
+
+
+class MathPowStub: public PlatformCodeStub {
+ public:
+ enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK };
explicit MathPowStub(ExponentType exponent_type)
: exponent_type_(exponent_type) { }
@@ -482,10 +814,374 @@ class MathPowStub: public CodeStub {
};
-class ICCompareStub: public CodeStub {
+class ICStub: public PlatformCodeStub {
+ public:
+ explicit ICStub(Code::Kind kind) : kind_(kind) { }
+ virtual Code::Kind GetCodeKind() const { return kind_; }
+ virtual InlineCacheState GetICState() { return MONOMORPHIC; }
+
+ bool Describes(Code* code) {
+ return GetMajorKey(code) == MajorKey() && code->stub_info() == MinorKey();
+ }
+
+ protected:
+ class KindBits: public BitField<Code::Kind, 0, 4> {};
+ virtual void FinishCode(Handle<Code> code) {
+ code->set_stub_info(MinorKey());
+ }
+ Code::Kind kind() { return kind_; }
+
+ virtual int MinorKey() {
+ return KindBits::encode(kind_);
+ }
+
+ private:
+ Code::Kind kind_;
+};
+
+
+class FunctionPrototypeStub: public ICStub {
+ public:
+ explicit FunctionPrototypeStub(Code::Kind kind) : ICStub(kind) { }
+ virtual void Generate(MacroAssembler* masm);
+
+ private:
+ virtual CodeStub::Major MajorKey() { return FunctionPrototype; }
+};
+
+
+class StringLengthStub: public ICStub {
+ public:
+ explicit StringLengthStub(Code::Kind kind) : ICStub(kind) { }
+ virtual void Generate(MacroAssembler* masm);
+
+ private:
+ STATIC_ASSERT(KindBits::kSize == 4);
+ virtual CodeStub::Major MajorKey() { return StringLength; }
+};
+
+
+class StoreICStub: public ICStub {
+ public:
+ StoreICStub(Code::Kind kind, StrictModeFlag strict_mode)
+ : ICStub(kind), strict_mode_(strict_mode) { }
+
+ protected:
+ virtual Code::ExtraICState GetExtraICState() {
+ return strict_mode_;
+ }
+
+ private:
+ STATIC_ASSERT(KindBits::kSize == 4);
+ class StrictModeBits: public BitField<bool, 4, 1> {};
+ virtual int MinorKey() {
+ return KindBits::encode(kind()) | StrictModeBits::encode(strict_mode_);
+ }
+
+ StrictModeFlag strict_mode_;
+};
+
+
+class StoreArrayLengthStub: public StoreICStub {
+ public:
+ explicit StoreArrayLengthStub(Code::Kind kind, StrictModeFlag strict_mode)
+ : StoreICStub(kind, strict_mode) { }
+ virtual void Generate(MacroAssembler* masm);
+
+ private:
+ virtual CodeStub::Major MajorKey() { return StoreArrayLength; }
+};
+
+
+class HICStub: public HydrogenCodeStub {
+ public:
+ virtual Code::Kind GetCodeKind() const { return kind(); }
+ virtual InlineCacheState GetICState() { return MONOMORPHIC; }
+
+ protected:
+ HICStub() { }
+ class KindBits: public BitField<Code::Kind, 0, 4> {};
+ virtual Code::Kind kind() const = 0;
+};
+
+
+class HandlerStub: public HICStub {
+ public:
+ virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
+ virtual int GetStubFlags() { return kind(); }
+
+ protected:
+ HandlerStub() : HICStub() { }
+};
+
+
+class LoadFieldStub: public HandlerStub {
+ public:
+ LoadFieldStub(bool inobject, int index, Representation representation)
+ : HandlerStub() {
+ Initialize(Code::LOAD_IC, inobject, index, representation);
+ }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ Representation representation() {
+ if (unboxed_double()) return Representation::Double();
+ return Representation::Tagged();
+ }
+
+ virtual Code::Kind kind() const {
+ return KindBits::decode(bit_field_);
+ }
+
+ bool is_inobject() {
+ return InobjectBits::decode(bit_field_);
+ }
+
+ int offset() {
+ int index = IndexBits::decode(bit_field_);
+ int offset = index * kPointerSize;
+ if (is_inobject()) return offset;
+ return FixedArray::kHeaderSize + offset;
+ }
+
+ bool unboxed_double() {
+ return UnboxedDoubleBits::decode(bit_field_);
+ }
+
+ virtual Code::StubType GetStubType() { return Code::FIELD; }
+
+ protected:
+ LoadFieldStub() : HandlerStub() { }
+
+ void Initialize(Code::Kind kind,
+ bool inobject,
+ int index,
+ Representation representation) {
+ bool unboxed_double = FLAG_track_double_fields && representation.IsDouble();
+ bit_field_ = KindBits::encode(kind)
+ | InobjectBits::encode(inobject)
+ | IndexBits::encode(index)
+ | UnboxedDoubleBits::encode(unboxed_double);
+ }
+
+ private:
+ STATIC_ASSERT(KindBits::kSize == 4);
+ class InobjectBits: public BitField<bool, 4, 1> {};
+ class IndexBits: public BitField<int, 5, 11> {};
+ class UnboxedDoubleBits: public BitField<bool, 16, 1> {};
+ virtual CodeStub::Major MajorKey() { return LoadField; }
+ virtual int NotMissMinorKey() { return bit_field_; }
+
+ int bit_field_;
+};
+
+
+class KeyedLoadFieldStub: public LoadFieldStub {
+ public:
+ KeyedLoadFieldStub(bool inobject, int index, Representation representation)
+ : LoadFieldStub() {
+ Initialize(Code::KEYED_LOAD_IC, inobject, index, representation);
+ }
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ private:
+ virtual CodeStub::Major MajorKey() { return KeyedLoadField; }
+};
+
+
+class BinaryOpStub: public HydrogenCodeStub {
+ public:
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
+ : HydrogenCodeStub(UNINITIALIZED), op_(op), mode_(mode) {
+ ASSERT(op <= LAST_TOKEN && op >= FIRST_TOKEN);
+ Initialize();
+ }
+
+ explicit BinaryOpStub(Code::ExtraICState state)
+ : op_(decode_token(OpBits::decode(state))),
+ mode_(OverwriteModeField::decode(state)),
+ fixed_right_arg_(
+ Maybe<int>(HasFixedRightArgBits::decode(state),
+ decode_arg_value(FixedRightArgValueBits::decode(state)))),
+ left_state_(LeftStateField::decode(state)),
+ right_state_(fixed_right_arg_.has_value
+ ? ((fixed_right_arg_.value <= Smi::kMaxValue) ? SMI : INT32)
+ : RightStateField::decode(state)),
+ result_state_(ResultStateField::decode(state)) {
+ // We don't deserialize the SSE2 Field, since this is only used to be able
+ // to include SSE2 as well as non-SSE2 versions in the snapshot. For code
+ // generation we always want it to reflect the current state.
+ ASSERT(!fixed_right_arg_.has_value ||
+ can_encode_arg_value(fixed_right_arg_.value));
+ }
+
+ static const int FIRST_TOKEN = Token::BIT_OR;
+ static const int LAST_TOKEN = Token::MOD;
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate, CodeStubInterfaceDescriptor* descriptor);
+ static void InitializeForIsolate(Isolate* isolate) {
+ BinaryOpStub binopStub(UNINITIALIZED);
+ binopStub.InitializeInterfaceDescriptor(
+ isolate, isolate->code_stub_interface_descriptor(CodeStub::BinaryOp));
+ }
+
+ virtual Code::Kind GetCodeKind() const { return Code::BINARY_OP_IC; }
+ virtual InlineCacheState GetICState() {
+ if (Max(left_state_, right_state_) == NONE) {
+ return ::v8::internal::UNINITIALIZED;
+ }
+ if (Max(left_state_, right_state_) == GENERIC) return MEGAMORPHIC;
+ return MONOMORPHIC;
+ }
+
+ virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
+ ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
+ }
+
+ virtual Code::ExtraICState GetExtraICState() {
+ bool sse_field = Max(result_state_, Max(left_state_, right_state_)) > SMI &&
+ CpuFeatures::IsSafeForSnapshot(SSE2);
+
+ return OpBits::encode(encode_token(op_))
+ | LeftStateField::encode(left_state_)
+ | RightStateField::encode(fixed_right_arg_.has_value
+ ? NONE : right_state_)
+ | ResultStateField::encode(result_state_)
+ | HasFixedRightArgBits::encode(fixed_right_arg_.has_value)
+ | FixedRightArgValueBits::encode(fixed_right_arg_.has_value
+ ? encode_arg_value(
+ fixed_right_arg_.value)
+ : 0)
+ | SSE2Field::encode(sse_field)
+ | OverwriteModeField::encode(mode_);
+ }
+
+ bool CanReuseDoubleBox() {
+ return result_state_ <= NUMBER && result_state_ > SMI &&
+ ((left_state_ > SMI && left_state_ <= NUMBER &&
+ mode_ == OVERWRITE_LEFT) ||
+ (right_state_ > SMI && right_state_ <= NUMBER &&
+ mode_ == OVERWRITE_RIGHT));
+ }
+
+ bool HasSideEffects(Isolate* isolate) const {
+ Handle<Type> left = GetLeftType(isolate);
+ Handle<Type> right = GetRightType(isolate);
+ return left->Maybe(Type::Receiver()) || right->Maybe(Type::Receiver());
+ }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ Maybe<Handle<Object> > Result(Handle<Object> left,
+ Handle<Object> right,
+ Isolate* isolate);
+
+ Token::Value operation() const { return op_; }
+ OverwriteMode mode() const { return mode_; }
+ Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
+
+ Handle<Type> GetLeftType(Isolate* isolate) const;
+ Handle<Type> GetRightType(Isolate* isolate) const;
+ Handle<Type> GetResultType(Isolate* isolate) const;
+
+ void UpdateStatus(Handle<Object> left,
+ Handle<Object> right,
+ Maybe<Handle<Object> > result);
+
+ void PrintState(StringStream* stream);
+
+ private:
+ explicit BinaryOpStub(InitializationState state) : HydrogenCodeStub(state),
+ op_(Token::ADD),
+ mode_(NO_OVERWRITE) {
+ Initialize();
+ }
+ void Initialize();
+
+ enum State { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
+
+ // We truncate the last bit of the token.
+ STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 5));
+ class LeftStateField: public BitField<State, 0, 3> {};
+ // When fixed right arg is set, we don't need to store the right state.
+ // Thus the two fields can overlap.
+ class HasFixedRightArgBits: public BitField<bool, 4, 1> {};
+ class FixedRightArgValueBits: public BitField<int, 5, 4> {};
+ class RightStateField: public BitField<State, 5, 3> {};
+ class ResultStateField: public BitField<State, 9, 3> {};
+ class SSE2Field: public BitField<bool, 12, 1> {};
+ class OverwriteModeField: public BitField<OverwriteMode, 13, 2> {};
+ class OpBits: public BitField<int, 15, 5> {};
+
+ virtual CodeStub::Major MajorKey() { return BinaryOp; }
+ virtual int NotMissMinorKey() { return GetExtraICState(); }
+
+ static Handle<Type> StateToType(State state,
+ Isolate* isolate);
+
+ static void Generate(Token::Value op,
+ State left,
+ int right,
+ State result,
+ OverwriteMode mode,
+ Isolate* isolate);
+
+ static void Generate(Token::Value op,
+ State left,
+ State right,
+ State result,
+ OverwriteMode mode,
+ Isolate* isolate);
+
+ void UpdateStatus(Handle<Object> object,
+ State* state);
+
+ bool can_encode_arg_value(int32_t value) const;
+ int encode_arg_value(int32_t value) const;
+ int32_t decode_arg_value(int value) const;
+ int encode_token(Token::Value op) const;
+ Token::Value decode_token(int op) const;
+
+ bool has_int_result() const {
+ return op_ == Token::BIT_XOR || op_ == Token::BIT_AND ||
+ op_ == Token::BIT_OR || op_ == Token::SAR || op_ == Token::SHL;
+ }
+
+ const char* StateToName(State state);
+
+ void PrintBaseName(StringStream* stream);
+
+ Token::Value op_;
+ OverwriteMode mode_;
+
+ Maybe<int> fixed_right_arg_;
+ State left_state_;
+ State right_state_;
+ State result_state_;
+};
+
+
+class ICCompareStub: public PlatformCodeStub {
public:
- ICCompareStub(Token::Value op, CompareIC::State state)
- : op_(op), state_(state) {
+ ICCompareStub(Token::Value op,
+ CompareIC::State left,
+ CompareIC::State right,
+ CompareIC::State handler)
+ : op_(op),
+ left_(left),
+ right_(right),
+ state_(handler) {
ASSERT(Token::IsCompareOp(op));
}
@@ -493,139 +1189,157 @@ class ICCompareStub: public CodeStub {
void set_known_map(Handle<Map> map) { known_map_ = map; }
+ static void DecodeMinorKey(int minor_key,
+ CompareIC::State* left_state,
+ CompareIC::State* right_state,
+ CompareIC::State* handler_state,
+ Token::Value* op);
+
+ static CompareIC::State CompareState(int minor_key) {
+ return static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
+ }
+
+ virtual InlineCacheState GetICState();
+
private:
class OpField: public BitField<int, 0, 3> { };
- class StateField: public BitField<int, 3, 5> { };
+ class LeftStateField: public BitField<int, 3, 4> { };
+ class RightStateField: public BitField<int, 7, 4> { };
+ class HandlerStateField: public BitField<int, 11, 4> { };
virtual void FinishCode(Handle<Code> code) {
- code->set_compare_state(state_);
- code->set_compare_operation(op_ - Token::EQ);
+ code->set_stub_info(MinorKey());
}
virtual CodeStub::Major MajorKey() { return CompareIC; }
virtual int MinorKey();
- virtual int GetCodeKind() { return Code::COMPARE_IC; }
+ virtual Code::Kind GetCodeKind() const { return Code::COMPARE_IC; }
void GenerateSmis(MacroAssembler* masm);
- void GenerateHeapNumbers(MacroAssembler* masm);
- void GenerateSymbols(MacroAssembler* masm);
+ void GenerateNumbers(MacroAssembler* masm);
+ void GenerateInternalizedStrings(MacroAssembler* masm);
void GenerateStrings(MacroAssembler* masm);
+ void GenerateUniqueNames(MacroAssembler* masm);
void GenerateObjects(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm);
void GenerateKnownObjects(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
virtual void AddToSpecialCache(Handle<Code> new_object);
- virtual bool FindCodeInSpecialCache(Code** code_out);
- virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; }
+ virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate);
+ virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECT; }
Token::Value op_;
+ CompareIC::State left_;
+ CompareIC::State right_;
CompareIC::State state_;
Handle<Map> known_map_;
};
-// Flags that control the compare stub code generation.
-enum CompareFlags {
- NO_COMPARE_FLAGS = 0,
- NO_SMI_COMPARE_IN_STUB = 1 << 0,
- NO_NUMBER_COMPARE_IN_STUB = 1 << 1,
- CANT_BOTH_BE_NAN = 1 << 2
-};
+class CompareNilICStub : public HydrogenCodeStub {
+ public:
+ Handle<Type> GetType(Isolate* isolate, Handle<Map> map = Handle<Map>());
+ Handle<Type> GetInputType(Isolate* isolate, Handle<Map> map);
+ explicit CompareNilICStub(NilValue nil) : nil_value_(nil) { }
-enum NaNInformation {
- kBothCouldBeNaN,
- kCantBothBeNaN
-};
+ CompareNilICStub(Code::ExtraICState ic_state,
+ InitializationState init_state = INITIALIZED)
+ : HydrogenCodeStub(init_state),
+ nil_value_(NilValueField::decode(ic_state)),
+ state_(State(TypesField::decode(ic_state))) {
+ }
+ static Handle<Code> GetUninitialized(Isolate* isolate,
+ NilValue nil) {
+ return CompareNilICStub(nil, UNINITIALIZED).GetCode(isolate);
+ }
-class CompareStub: public CodeStub {
- public:
- CompareStub(Condition cc,
- bool strict,
- CompareFlags flags,
- Register lhs,
- Register rhs) :
- cc_(cc),
- strict_(strict),
- never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
- include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
- include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
- lhs_(lhs),
- rhs_(rhs) { }
-
- CompareStub(Condition cc,
- bool strict,
- CompareFlags flags) :
- cc_(cc),
- strict_(strict),
- never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
- include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
- include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
- lhs_(no_reg),
- rhs_(no_reg) { }
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
- void Generate(MacroAssembler* masm);
+ static void InitializeForIsolate(Isolate* isolate) {
+ CompareNilICStub compare_stub(kNullValue, UNINITIALIZED);
+ compare_stub.InitializeInterfaceDescriptor(
+ isolate,
+ isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC));
+ }
- private:
- Condition cc_;
- bool strict_;
- // Only used for 'equal' comparisons. Tells the stub that we already know
- // that at least one side of the comparison is not NaN. This allows the
- // stub to use object identity in the positive case. We ignore it when
- // generating the minor key for other comparisons to avoid creating more
- // stubs.
- bool never_nan_nan_;
- // Do generate the number comparison code in the stub. Stubs without number
- // comparison code is used when the number comparison has been inlined, and
- // the stub will be called if one of the operands is not a number.
- bool include_number_compare_;
-
- // Generate the comparison code for two smi operands in the stub.
- bool include_smi_compare_;
-
- // Register holding the left hand side of the comparison if the stub gives
- // a choice, no_reg otherwise.
-
- Register lhs_;
- // Register holding the right hand side of the comparison if the stub gives
- // a choice, no_reg otherwise.
- Register rhs_;
-
- // Encoding of the minor key in 16 bits.
- class StrictField: public BitField<bool, 0, 1> {};
- class NeverNanNanField: public BitField<bool, 1, 1> {};
- class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
- class IncludeSmiCompareField: public BitField<bool, 3, 1> {};
- class RegisterField: public BitField<bool, 4, 1> {};
- class ConditionField: public BitField<int, 5, 11> {};
-
- Major MajorKey() { return Compare; }
+ virtual InlineCacheState GetICState() {
+ if (state_.Contains(GENERIC)) {
+ return MEGAMORPHIC;
+ } else if (state_.Contains(MONOMORPHIC_MAP)) {
+ return MONOMORPHIC;
+ } else {
+ return PREMONOMORPHIC;
+ }
+ }
- int MinorKey();
+ virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; }
- virtual int GetCodeKind() { return Code::COMPARE_IC; }
- virtual void FinishCode(Handle<Code> code) {
- code->set_compare_state(CompareIC::GENERIC);
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual Code::ExtraICState GetExtraICState() {
+ return NilValueField::encode(nil_value_) |
+ TypesField::encode(state_.ToIntegral());
}
- // Branch to the label if the given object isn't a symbol.
- void BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch);
+ void UpdateStatus(Handle<Object> object);
- // Unfortunately you have to run without snapshots to see most of these
- // names in the profile since most compare stubs end up in the snapshot.
- virtual void PrintName(StringStream* stream);
+ bool IsMonomorphic() const { return state_.Contains(MONOMORPHIC_MAP); }
+ NilValue GetNilValue() const { return nil_value_; }
+ void ClearState() { state_.RemoveAll(); }
+
+ virtual void PrintState(StringStream* stream);
+ virtual void PrintBaseName(StringStream* stream);
+
+ private:
+ friend class CompareNilIC;
+
+ enum CompareNilType {
+ UNDEFINED,
+ NULL_TYPE,
+ MONOMORPHIC_MAP,
+ GENERIC,
+ NUMBER_OF_TYPES
+ };
+
+ // At most 6 different types can be distinguished, because the Code object
+ // only has room for a single byte to hold a set and there are two more
+ // boolean flags we need to store. :-P
+ STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
+
+ class State : public EnumSet<CompareNilType, byte> {
+ public:
+ State() : EnumSet<CompareNilType, byte>(0) { }
+ explicit State(byte bits) : EnumSet<CompareNilType, byte>(bits) { }
+
+ void Print(StringStream* stream) const;
+ };
+
+ CompareNilICStub(NilValue nil, InitializationState init_state)
+ : HydrogenCodeStub(init_state), nil_value_(nil) { }
+
+ class NilValueField : public BitField<NilValue, 0, 1> {};
+ class TypesField : public BitField<byte, 1, NUMBER_OF_TYPES> {};
+
+ virtual CodeStub::Major MajorKey() { return CompareNilIC; }
+ virtual int NotMissMinorKey() { return GetExtraICState(); }
+
+ NilValue nil_value_;
+ State state_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompareNilICStub);
};
-class CEntryStub : public CodeStub {
+class CEntryStub : public PlatformCodeStub {
public:
explicit CEntryStub(int result_size,
SaveFPRegsMode save_doubles = kDontSaveFPRegs)
@@ -637,8 +1351,13 @@ class CEntryStub : public CodeStub {
// time, so it's OK to call it from other stubs that can't cope with GC during
// their code generation. On machines that always have gp registers (x64) we
// can generate both variants ahead of time.
- virtual bool IsPregenerated();
- static void GenerateAheadOfTime();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
+ static void GenerateAheadOfTime(Isolate* isolate);
+
+ protected:
+ virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
+ ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
+ };
private:
void GenerateCore(MacroAssembler* masm,
@@ -649,6 +1368,7 @@ class CEntryStub : public CodeStub {
bool always_allocate_scope);
// Number of pointers/values returned.
+ Isolate* isolate_;
const int result_size_;
SaveFPRegsMode save_doubles_;
@@ -659,7 +1379,7 @@ class CEntryStub : public CodeStub {
};
-class JSEntryStub : public CodeStub {
+class JSEntryStub : public PlatformCodeStub {
public:
JSEntryStub() { }
@@ -693,7 +1413,7 @@ class JSConstructEntryStub : public JSEntryStub {
};
-class ArgumentsAccessStub: public CodeStub {
+class ArgumentsAccessStub: public PlatformCodeStub {
public:
enum Type {
READ_ELEMENT,
@@ -720,7 +1440,7 @@ class ArgumentsAccessStub: public CodeStub {
};
-class RegExpExecStub: public CodeStub {
+class RegExpExecStub: public PlatformCodeStub {
public:
RegExpExecStub() { }
@@ -732,7 +1452,7 @@ class RegExpExecStub: public CodeStub {
};
-class RegExpConstructResultStub: public CodeStub {
+class RegExpConstructResultStub: public PlatformCodeStub {
public:
RegExpConstructResultStub() { }
@@ -744,7 +1464,7 @@ class RegExpConstructResultStub: public CodeStub {
};
-class CallFunctionStub: public CodeStub {
+class CallFunctionStub: public PlatformCodeStub {
public:
CallFunctionStub(int argc, CallFunctionFlags flags)
: argc_(argc), flags_(flags) { }
@@ -785,7 +1505,7 @@ class CallFunctionStub: public CodeStub {
};
-class CallConstructStub: public CodeStub {
+class CallConstructStub: public PlatformCodeStub {
public:
explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {}
@@ -860,6 +1580,13 @@ class StringCharCodeAtGenerator {
void GenerateSlow(MacroAssembler* masm,
const RuntimeCallHelper& call_helper);
+ // Skip handling slow case and directly jump to bailout.
+ void SkipSlow(MacroAssembler* masm, Label* bailout) {
+ masm->bind(&index_not_smi_);
+ masm->bind(&call_runtime_);
+ masm->jmp(bailout);
+ }
+
private:
Register object_;
Register index_;
@@ -900,6 +1627,12 @@ class StringCharFromCodeGenerator {
void GenerateSlow(MacroAssembler* masm,
const RuntimeCallHelper& call_helper);
+ // Skip handling slow case and directly jump to bailout.
+ void SkipSlow(MacroAssembler* masm, Label* bailout) {
+ masm->bind(&slow_case_);
+ masm->jmp(bailout);
+ }
+
private:
Register code_;
Register result_;
@@ -942,13 +1675,25 @@ class StringCharAtGenerator {
// Generates the fast case code. On the fallthrough path |result|
// register contains the result.
- void GenerateFast(MacroAssembler* masm);
+ void GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+ }
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper);
+ const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
+ }
+
+ // Skip handling slow case and directly jump to bailout.
+ void SkipSlow(MacroAssembler* masm, Label* bailout) {
+ char_code_at_generator_.SkipSlow(masm, bailout);
+ char_from_code_generator_.SkipSlow(masm, bailout);
+ }
private:
StringCharCodeAtGenerator char_code_at_generator_;
@@ -976,60 +1721,447 @@ class AllowStubCallsScope {
};
-class KeyedLoadElementStub : public CodeStub {
+class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
public:
- explicit KeyedLoadElementStub(ElementsKind elements_kind)
- : elements_kind_(elements_kind)
- { }
+ KeyedLoadDictionaryElementStub() {}
+
+ void Generate(MacroAssembler* masm);
+ private:
Major MajorKey() { return KeyedLoadElement; }
- int MinorKey() { return elements_kind_; }
+ int MinorKey() { return DICTIONARY_ELEMENTS; }
+
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
+};
+
+
+class DoubleToIStub : public PlatformCodeStub {
+ public:
+ DoubleToIStub(Register source,
+ Register destination,
+ int offset,
+ bool is_truncating,
+ bool skip_fastpath = false) : bit_field_(0) {
+ bit_field_ = SourceRegisterBits::encode(source.code_) |
+ DestinationRegisterBits::encode(destination.code_) |
+ OffsetBits::encode(offset) |
+ IsTruncatingBits::encode(is_truncating) |
+ SkipFastPathBits::encode(skip_fastpath) |
+ SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
+ CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
+ }
+
+ Register source() {
+ Register result = { SourceRegisterBits::decode(bit_field_) };
+ return result;
+ }
+
+ Register destination() {
+ Register result = { DestinationRegisterBits::decode(bit_field_) };
+ return result;
+ }
+
+ bool is_truncating() {
+ return IsTruncatingBits::decode(bit_field_);
+ }
+
+ bool skip_fastpath() {
+ return SkipFastPathBits::decode(bit_field_);
+ }
+
+ int offset() {
+ return OffsetBits::decode(bit_field_);
+ }
void Generate(MacroAssembler* masm);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ protected:
+ virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
+ ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
+ }
+
private:
- ElementsKind elements_kind_;
+ static const int kBitsPerRegisterNumber = 6;
+ STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
+ class SourceRegisterBits:
+ public BitField<int, 0, kBitsPerRegisterNumber> {}; // NOLINT
+ class DestinationRegisterBits:
+ public BitField<int, kBitsPerRegisterNumber,
+ kBitsPerRegisterNumber> {}; // NOLINT
+ class IsTruncatingBits:
+ public BitField<bool, 2 * kBitsPerRegisterNumber, 1> {}; // NOLINT
+ class OffsetBits:
+ public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
+ class SkipFastPathBits:
+ public BitField<int, 2 * kBitsPerRegisterNumber + 4, 1> {}; // NOLINT
+ class SSEBits:
+ public BitField<int, 2 * kBitsPerRegisterNumber + 5, 2> {}; // NOLINT
+
+ Major MajorKey() { return DoubleToI; }
+ int MinorKey() { return bit_field_; }
+
+ int bit_field_;
+
+ DISALLOW_COPY_AND_ASSIGN(DoubleToIStub);
+};
+
+
+class KeyedLoadFastElementStub : public HydrogenCodeStub {
+ public:
+ KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
+ bit_field_ = ElementsKindBits::encode(elements_kind) |
+ IsJSArrayBits::encode(is_js_array);
+ }
+
+ bool is_js_array() const {
+ return IsJSArrayBits::decode(bit_field_);
+ }
+
+ ElementsKind elements_kind() const {
+ return ElementsKindBits::decode(bit_field_);
+ }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
+ class IsJSArrayBits: public BitField<bool, 8, 1> {};
+ uint32_t bit_field_;
+
+ Major MajorKey() { return KeyedLoadElement; }
+ int NotMissMinorKey() { return bit_field_; }
+
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub);
+};
+
+
+class KeyedStoreFastElementStub : public HydrogenCodeStub {
+ public:
+ KeyedStoreFastElementStub(bool is_js_array,
+ ElementsKind elements_kind,
+ KeyedAccessStoreMode mode) {
+ bit_field_ = ElementsKindBits::encode(elements_kind) |
+ IsJSArrayBits::encode(is_js_array) |
+ StoreModeBits::encode(mode);
+ }
+
+ bool is_js_array() const {
+ return IsJSArrayBits::decode(bit_field_);
+ }
+
+ ElementsKind elements_kind() const {
+ return ElementsKindBits::decode(bit_field_);
+ }
+
+ KeyedAccessStoreMode store_mode() const {
+ return StoreModeBits::decode(bit_field_);
+ }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
+ class StoreModeBits: public BitField<KeyedAccessStoreMode, 8, 4> {};
+ class IsJSArrayBits: public BitField<bool, 12, 1> {};
+ uint32_t bit_field_;
+
+ Major MajorKey() { return KeyedStoreElement; }
+ int NotMissMinorKey() { return bit_field_; }
+
+ DISALLOW_COPY_AND_ASSIGN(KeyedStoreFastElementStub);
+};
+
+
+class TransitionElementsKindStub : public HydrogenCodeStub {
+ public:
+ TransitionElementsKindStub(ElementsKind from_kind,
+ ElementsKind to_kind) {
+ bit_field_ = FromKindBits::encode(from_kind) |
+ ToKindBits::encode(to_kind);
+ }
+
+ ElementsKind from_kind() const {
+ return FromKindBits::decode(bit_field_);
+ }
+
+ ElementsKind to_kind() const {
+ return ToKindBits::decode(bit_field_);
+ }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ class FromKindBits: public BitField<ElementsKind, 8, 8> {};
+ class ToKindBits: public BitField<ElementsKind, 0, 8> {};
+ uint32_t bit_field_;
+
+ Major MajorKey() { return TransitionElementsKind; }
+ int NotMissMinorKey() { return bit_field_; }
+
+ DISALLOW_COPY_AND_ASSIGN(TransitionElementsKindStub);
+};
+
+
+enum ContextCheckMode {
+ CONTEXT_CHECK_REQUIRED,
+ CONTEXT_CHECK_NOT_REQUIRED,
+ LAST_CONTEXT_CHECK_MODE = CONTEXT_CHECK_NOT_REQUIRED
+};
+
+
+class ArrayConstructorStubBase : public HydrogenCodeStub {
+ public:
+ ArrayConstructorStubBase(ElementsKind kind, ContextCheckMode context_mode,
+ AllocationSiteOverrideMode override_mode) {
+ // It only makes sense to override local allocation site behavior
+ // if there is a difference between the global allocation site policy
+ // for an ElementsKind and the desired usage of the stub.
+ ASSERT(!(FLAG_track_allocation_sites &&
+ override_mode == DISABLE_ALLOCATION_SITES) ||
+ AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE);
+ bit_field_ = ElementsKindBits::encode(kind) |
+ AllocationSiteOverrideModeBits::encode(override_mode) |
+ ContextCheckModeBits::encode(context_mode);
+ }
+
+ ElementsKind elements_kind() const {
+ return ElementsKindBits::decode(bit_field_);
+ }
+
+ AllocationSiteOverrideMode override_mode() const {
+ return AllocationSiteOverrideModeBits::decode(bit_field_);
+ }
+
+ ContextCheckMode context_mode() const {
+ return ContextCheckModeBits::decode(bit_field_);
+ }
+
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE {
+ // We only pre-generate stubs that verify correct context
+ return context_mode() == CONTEXT_CHECK_REQUIRED;
+ }
+
+ static void GenerateStubsAheadOfTime(Isolate* isolate);
+ static void InstallDescriptors(Isolate* isolate);
+
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kConstructor = 0;
+ static const int kPropertyCell = 1;
+
+ private:
+ int NotMissMinorKey() { return bit_field_; }
+
+ // Ensure data fits within available bits.
+ STATIC_ASSERT(LAST_ALLOCATION_SITE_OVERRIDE_MODE == 1);
+ STATIC_ASSERT(LAST_CONTEXT_CHECK_MODE == 1);
+
+ class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
+ class AllocationSiteOverrideModeBits: public
+ BitField<AllocationSiteOverrideMode, 8, 1> {}; // NOLINT
+ class ContextCheckModeBits: public BitField<ContextCheckMode, 9, 1> {};
+ uint32_t bit_field_;
+
+ DISALLOW_COPY_AND_ASSIGN(ArrayConstructorStubBase);
+};
+
+
+class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
+ public:
+ ArrayNoArgumentConstructorStub(
+ ElementsKind kind,
+ ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
+ AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
+ : ArrayConstructorStubBase(kind, context_mode, override_mode) {
+ }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ Major MajorKey() { return ArrayNoArgumentConstructor; }
+
+ DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub);
+};
+
+
+class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
+ public:
+ ArraySingleArgumentConstructorStub(
+ ElementsKind kind,
+ ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
+ AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
+ : ArrayConstructorStubBase(kind, context_mode, override_mode) {
+ }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ Major MajorKey() { return ArraySingleArgumentConstructor; }
+
+ DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub);
+};
+
+
+class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
+ public:
+ ArrayNArgumentsConstructorStub(
+ ElementsKind kind,
+ ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
+ AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
+ : ArrayConstructorStubBase(kind, context_mode, override_mode) {
+ }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ Major MajorKey() { return ArrayNArgumentsConstructor; }
+
+ DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub);
+};
+
+
+class InternalArrayConstructorStubBase : public HydrogenCodeStub {
+ public:
+ explicit InternalArrayConstructorStubBase(ElementsKind kind) {
+ kind_ = kind;
+ }
+
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
+ static void GenerateStubsAheadOfTime(Isolate* isolate);
+ static void InstallDescriptors(Isolate* isolate);
+
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kConstructor = 0;
- DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub);
+ ElementsKind elements_kind() const { return kind_; }
+
+ private:
+ int NotMissMinorKey() { return kind_; }
+
+ ElementsKind kind_;
+
+ DISALLOW_COPY_AND_ASSIGN(InternalArrayConstructorStubBase);
+};
+
+
+class InternalArrayNoArgumentConstructorStub : public
+ InternalArrayConstructorStubBase {
+ public:
+ explicit InternalArrayNoArgumentConstructorStub(ElementsKind kind)
+ : InternalArrayConstructorStubBase(kind) { }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ Major MajorKey() { return InternalArrayNoArgumentConstructor; }
+
+ DISALLOW_COPY_AND_ASSIGN(InternalArrayNoArgumentConstructorStub);
+};
+
+
+class InternalArraySingleArgumentConstructorStub : public
+ InternalArrayConstructorStubBase {
+ public:
+ explicit InternalArraySingleArgumentConstructorStub(ElementsKind kind)
+ : InternalArrayConstructorStubBase(kind) { }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ Major MajorKey() { return InternalArraySingleArgumentConstructor; }
+
+ DISALLOW_COPY_AND_ASSIGN(InternalArraySingleArgumentConstructorStub);
+};
+
+
+class InternalArrayNArgumentsConstructorStub : public
+ InternalArrayConstructorStubBase {
+ public:
+ explicit InternalArrayNArgumentsConstructorStub(ElementsKind kind)
+ : InternalArrayConstructorStubBase(kind) { }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ Major MajorKey() { return InternalArrayNArgumentsConstructor; }
+
+ DISALLOW_COPY_AND_ASSIGN(InternalArrayNArgumentsConstructorStub);
};
-class KeyedStoreElementStub : public CodeStub {
+class KeyedStoreElementStub : public PlatformCodeStub {
public:
KeyedStoreElementStub(bool is_js_array,
ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode)
+ KeyedAccessStoreMode store_mode)
: is_js_array_(is_js_array),
elements_kind_(elements_kind),
- grow_mode_(grow_mode),
+ store_mode_(store_mode),
fp_registers_(CanUseFPRegisters()) { }
Major MajorKey() { return KeyedStoreElement; }
int MinorKey() {
return ElementsKindBits::encode(elements_kind_) |
IsJSArrayBits::encode(is_js_array_) |
- GrowModeBits::encode(grow_mode_) |
+ StoreModeBits::encode(store_mode_) |
FPRegisters::encode(fp_registers_);
}
void Generate(MacroAssembler* masm);
private:
- class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
- class GrowModeBits: public BitField<KeyedAccessGrowMode, 8, 1> {};
- class IsJSArrayBits: public BitField<bool, 9, 1> {};
- class FPRegisters: public BitField<bool, 10, 1> {};
+ class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
+ class StoreModeBits: public BitField<KeyedAccessStoreMode, 8, 4> {};
+ class IsJSArrayBits: public BitField<bool, 12, 1> {};
+ class FPRegisters: public BitField<bool, 13, 1> {};
bool is_js_array_;
ElementsKind elements_kind_;
- KeyedAccessGrowMode grow_mode_;
+ KeyedAccessStoreMode store_mode_;
bool fp_registers_;
DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub);
};
-class ToBooleanStub: public CodeStub {
+class ToBooleanStub: public HydrogenCodeStub {
public:
enum Type {
UNDEFINED,
@@ -1038,6 +2170,7 @@ class ToBooleanStub: public CodeStub {
SMI,
SPEC_OBJECT,
STRING,
+ SYMBOL,
HEAP_NUMBER,
NUMBER_OF_TYPES
};
@@ -1046,98 +2179,119 @@ class ToBooleanStub: public CodeStub {
// only has room for a single byte to hold a set of these types. :-P
STATIC_ASSERT(NUMBER_OF_TYPES <= 8);
- class Types {
+ class Types : public EnumSet<Type, byte> {
public:
- Types() {}
- explicit Types(byte bits) : set_(bits) {}
+ Types() : EnumSet<Type, byte>(0) {}
+ explicit Types(byte bits) : EnumSet<Type, byte>(bits) {}
- bool IsEmpty() const { return set_.IsEmpty(); }
- bool Contains(Type type) const { return set_.Contains(type); }
- void Add(Type type) { set_.Add(type); }
- byte ToByte() const { return set_.ToIntegral(); }
+ byte ToByte() const { return ToIntegral(); }
void Print(StringStream* stream) const;
- void TraceTransition(Types to) const;
- bool Record(Handle<Object> object);
+ bool UpdateStatus(Handle<Object> object);
bool NeedsMap() const;
bool CanBeUndetectable() const;
+ bool IsGeneric() const { return ToIntegral() == Generic().ToIntegral(); }
- private:
- EnumSet<Type, byte> set_;
+ static Types Generic() { return Types((1 << NUMBER_OF_TYPES) - 1); }
};
- static Types no_types() { return Types(); }
- static Types all_types() { return Types((1 << NUMBER_OF_TYPES) - 1); }
+ explicit ToBooleanStub(Types types = Types())
+ : types_(types) { }
+ explicit ToBooleanStub(Code::ExtraICState state)
+ : types_(static_cast<byte>(state)) { }
- explicit ToBooleanStub(Register tos, Types types = Types())
- : tos_(tos), types_(types) { }
+ bool UpdateStatus(Handle<Object> object);
+ Types GetTypes() { return types_; }
- void Generate(MacroAssembler* masm);
- virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; }
- virtual void PrintName(StringStream* stream);
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ virtual Code::Kind GetCodeKind() const { return Code::TO_BOOLEAN_IC; }
+ virtual void PrintState(StringStream* stream);
virtual bool SometimesSetsUpAFrame() { return false; }
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }
+ static void InitializeForIsolate(Isolate* isolate) {
+ ToBooleanStub stub;
+ stub.InitializeInterfaceDescriptor(
+ isolate,
+ isolate->code_stub_interface_descriptor(CodeStub::ToBoolean));
+ }
- virtual void FinishCode(Handle<Code> code) {
- code->set_to_boolean_state(types_.ToByte());
+ static Handle<Code> GetUninitialized(Isolate* isolate) {
+ return ToBooleanStub(UNINITIALIZED).GetCode(isolate);
}
- void CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result);
- void GenerateTypeTransition(MacroAssembler* masm);
+ virtual Code::ExtraICState GetExtraICState() {
+ return types_.ToIntegral();
+ }
+
+ virtual InlineCacheState GetICState() {
+ if (types_.IsEmpty()) {
+ return ::v8::internal::UNINITIALIZED;
+ } else {
+ return MONOMORPHIC;
+ }
+ }
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int NotMissMinorKey() { return GetExtraICState(); }
+
+ explicit ToBooleanStub(InitializationState init_state) :
+ HydrogenCodeStub(init_state) {}
- Register tos_;
Types types_;
};
-class ElementsTransitionAndStoreStub : public CodeStub {
+class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
public:
- ElementsTransitionAndStoreStub(ElementsKind from,
- ElementsKind to,
+ ElementsTransitionAndStoreStub(ElementsKind from_kind,
+ ElementsKind to_kind,
bool is_jsarray,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode)
- : from_(from),
- to_(to),
+ KeyedAccessStoreMode store_mode)
+ : from_kind_(from_kind),
+ to_kind_(to_kind),
is_jsarray_(is_jsarray),
- strict_mode_(strict_mode),
- grow_mode_(grow_mode) {}
+ store_mode_(store_mode) {}
+
+ ElementsKind from_kind() const { return from_kind_; }
+ ElementsKind to_kind() const { return to_kind_; }
+ bool is_jsarray() const { return is_jsarray_; }
+ KeyedAccessStoreMode store_mode() const { return store_mode_; }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
private:
- class FromBits: public BitField<ElementsKind, 0, 8> {};
- class ToBits: public BitField<ElementsKind, 8, 8> {};
- class IsJSArrayBits: public BitField<bool, 16, 1> {};
- class StrictModeBits: public BitField<StrictModeFlag, 17, 1> {};
- class GrowModeBits: public BitField<KeyedAccessGrowMode, 18, 1> {};
+ class FromBits: public BitField<ElementsKind, 0, 8> {};
+ class ToBits: public BitField<ElementsKind, 8, 8> {};
+ class IsJSArrayBits: public BitField<bool, 16, 1> {};
+ class StoreModeBits: public BitField<KeyedAccessStoreMode, 17, 4> {};
Major MajorKey() { return ElementsTransitionAndStore; }
- int MinorKey() {
- return FromBits::encode(from_) |
- ToBits::encode(to_) |
+ int NotMissMinorKey() {
+ return FromBits::encode(from_kind_) |
+ ToBits::encode(to_kind_) |
IsJSArrayBits::encode(is_jsarray_) |
- StrictModeBits::encode(strict_mode_) |
- GrowModeBits::encode(grow_mode_);
+ StoreModeBits::encode(store_mode_);
}
- void Generate(MacroAssembler* masm);
-
- ElementsKind from_;
- ElementsKind to_;
+ ElementsKind from_kind_;
+ ElementsKind to_kind_;
bool is_jsarray_;
- StrictModeFlag strict_mode_;
- KeyedAccessGrowMode grow_mode_;
+ KeyedAccessStoreMode store_mode_;
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
};
-class StoreArrayLiteralElementStub : public CodeStub {
+class StoreArrayLiteralElementStub : public PlatformCodeStub {
public:
StoreArrayLiteralElementStub()
: fp_registers_(CanUseFPRegisters()) { }
@@ -1156,7 +2310,35 @@ class StoreArrayLiteralElementStub : public CodeStub {
};
-class ProfileEntryHookStub : public CodeStub {
+class StubFailureTrampolineStub : public PlatformCodeStub {
+ public:
+ explicit StubFailureTrampolineStub(StubFunctionMode function_mode)
+ : fp_registers_(CanUseFPRegisters()), function_mode_(function_mode) {}
+
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+ class FPRegisters: public BitField<bool, 0, 1> {};
+ class FunctionModeField: public BitField<StubFunctionMode, 1, 1> {};
+
+ Major MajorKey() { return StubFailureTrampoline; }
+ int MinorKey() {
+ return FPRegisters::encode(fp_registers_) |
+ FunctionModeField::encode(function_mode_);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ bool fp_registers_;
+ StubFunctionMode function_mode_;
+
+ DISALLOW_COPY_AND_ASSIGN(StubFailureTrampolineStub);
+};
+
+
+class ProfileEntryHookStub : public PlatformCodeStub {
public:
explicit ProfileEntryHookStub() {}
@@ -1166,23 +2348,16 @@ class ProfileEntryHookStub : public CodeStub {
// Generates a call to the entry hook if it's enabled.
static void MaybeCallEntryHook(MacroAssembler* masm);
- // Sets or unsets the entry hook function. Returns true on success,
- // false on an attempt to replace a non-NULL entry hook with another
- // non-NULL hook.
- static bool SetFunctionEntryHook(FunctionEntryHook entry_hook);
-
private:
static void EntryHookTrampoline(intptr_t function,
- intptr_t stack_pointer);
+ intptr_t stack_pointer,
+ Isolate* isolate);
Major MajorKey() { return ProfileEntryHook; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
- // The current function entry hook.
- static FunctionEntryHook entry_hook_;
-
DISALLOW_COPY_AND_ASSIGN(ProfileEntryHookStub);
};
diff --git a/deps/v8/src/code.h b/deps/v8/src/code.h
index 766c932e0..791420cf3 100644
--- a/deps/v8/src/code.h
+++ b/deps/v8/src/code.h
@@ -29,6 +29,8 @@
#define V8_CODE_H_
#include "allocation.h"
+#include "handles.h"
+#include "objects.h"
namespace v8 {
namespace internal {
@@ -44,6 +46,8 @@ class ParameterCount BASE_EMBEDDED {
: reg_(reg), immediate_(0) { }
explicit ParameterCount(int immediate)
: reg_(no_reg), immediate_(immediate) { }
+ explicit ParameterCount(Handle<JSFunction> f)
+ : reg_(no_reg), immediate_(f->shared()->formal_parameter_count()) { }
bool is_reg() const { return !reg_.is(no_reg); }
bool is_immediate() const { return !is_reg(); }
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 0163580e9..573ddc6ce 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -30,6 +30,7 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "compiler.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "prettyprinter.h"
#include "rewriter.h"
@@ -58,13 +59,12 @@ Comment::~Comment() {
#undef __
-void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
-#ifdef DEBUG
+void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
bool print_source = false;
bool print_ast = false;
const char* ftype;
- if (Isolate::Current()->bootstrapper()->IsActive()) {
+ if (info->isolate()->bootstrapper()->IsActive()) {
print_source = FLAG_print_builtin_source;
print_ast = FLAG_print_builtin_ast;
ftype = "builtin";
@@ -75,19 +75,26 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
}
if (FLAG_trace_codegen || print_source || print_ast) {
- PrintF("*** Generate code for %s function: ", ftype);
- info->function()->name()->ShortPrint();
- PrintF(" ***\n");
+ PrintF("[generating %s code for %s function: ", kind, ftype);
+ if (info->IsStub()) {
+ const char* name =
+ CodeStub::MajorName(info->code_stub()->MajorKey(), true);
+ PrintF("%s", name == NULL ? "<unknown>" : name);
+ } else {
+ PrintF("%s", *info->function()->debug_name()->ToCString());
+ }
+ PrintF("]\n");
}
- if (print_source) {
+#ifdef DEBUG
+ if (!info->IsStub() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter().PrintProgram(info->function()));
+ PrettyPrinter(info->isolate()).PrintProgram(info->function()));
}
- if (print_ast) {
+ if (!info->IsStub() && print_ast) {
PrintF("--- AST ---\n%s\n",
- AstPrinter().PrintProgram(info->function()));
+ AstPrinter(info->isolate()).PrintProgram(info->function()));
}
#endif // DEBUG
}
@@ -100,39 +107,52 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
// Allocate and install the code.
CodeDesc desc;
+ bool is_crankshafted =
+ Code::ExtractKindFromFlags(flags) == Code::OPTIMIZED_FUNCTION ||
+ info->IsStub();
masm->GetCode(&desc);
Handle<Code> code =
- isolate->factory()->NewCode(desc, flags, masm->CodeObject());
-
- if (!code.is_null()) {
- isolate->counters()->total_compiled_code_size()->Increment(
- code->instruction_size());
- }
+ isolate->factory()->NewCode(desc, flags, masm->CodeObject(),
+ false, is_crankshafted,
+ info->prologue_offset());
+ isolate->counters()->total_compiled_code_size()->Increment(
+ code->instruction_size());
+ isolate->heap()->IncrementCodeGeneratedBytes(is_crankshafted,
+ code->instruction_size());
return code;
}
void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#ifdef ENABLE_DISASSEMBLER
- bool print_code = Isolate::Current()->bootstrapper()->IsActive()
+ AllowDeferredHandleDereference allow_deference_for_print_code;
+ bool print_code = info->isolate()->bootstrapper()->IsActive()
? FLAG_print_builtin_code
- : (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
+ : (FLAG_print_code ||
+ (info->IsStub() && FLAG_print_code_stubs) ||
+ (info->IsOptimizing() && FLAG_print_opt_code));
if (print_code) {
// Print the source code if available.
FunctionLiteral* function = info->function();
- Handle<Script> script = info->script();
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- PrintF("--- Raw source ---\n");
- StringInputBuffer stream(String::cast(script->source()));
- stream.Seek(function->start_position());
- // fun->end_position() points to the last character in the stream. We
- // need to compensate by adding one to calculate the length.
- int source_len =
- function->end_position() - function->start_position() + 1;
- for (int i = 0; i < source_len; i++) {
- if (stream.has_more()) PrintF("%c", stream.GetNext());
+ bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION ||
+ code->kind() == Code::FUNCTION;
+ if (print_source) {
+ Handle<Script> script = info->script();
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ PrintF("--- Raw source ---\n");
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(String::cast(script->source()),
+ &op,
+ function->start_position());
+ // fun->end_position() points to the last character in the stream. We
+ // need to compensate by adding one to calculate the length.
+ int source_len =
+ function->end_position() - function->start_position() + 1;
+ for (int i = 0; i < source_len; i++) {
+ if (stream.HasMore()) PrintF("%c", stream.GetNext());
+ }
+ PrintF("\n\n");
}
- PrintF("\n\n");
}
if (info->IsOptimizing()) {
if (FLAG_print_unopt_code) {
@@ -144,21 +164,30 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
} else {
PrintF("--- Code ---\n");
}
- code->Disassemble(*function->debug_name()->ToCString());
+ if (print_source) {
+ PrintF("source_position = %d\n", function->start_position());
+ }
+ if (info->IsStub()) {
+ CodeStub::Major major_key = info->code_stub()->MajorKey();
+ code->Disassemble(CodeStub::MajorName(major_key, false));
+ } else {
+ code->Disassemble(*function->debug_name()->ToCString());
+ }
+ PrintF("--- End code ---\n");
}
#endif // ENABLE_DISASSEMBLER
}
-bool CodeGenerator::ShouldGenerateLog(Expression* type) {
+bool CodeGenerator::ShouldGenerateLog(Isolate* isolate, Expression* type) {
ASSERT(type != NULL);
- Isolate* isolate = Isolate::Current();
- if (!isolate->logger()->is_logging() && !CpuProfiler::is_profiling(isolate)) {
+ if (!isolate->logger()->is_logging() &&
+ !isolate->cpu_profiler()->is_profiling()) {
return false;
}
- Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
+ Handle<String> name = Handle<String>::cast(type->AsLiteral()->value());
if (FLAG_log_regexp) {
- if (name->IsEqualTo(CStrVector("regexp")))
+ if (name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("regexp")))
return true;
}
return false;
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 08a777f2a..ea2029691 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -90,19 +90,29 @@ namespace internal {
typedef double (*UnaryMathFunction)(double x);
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type);
+UnaryMathFunction CreateExpFunction();
UnaryMathFunction CreateSqrtFunction();
class ElementsTransitionGenerator : public AllStatic {
public:
- static void GenerateMapChangeElementsTransition(MacroAssembler* masm);
- static void GenerateSmiToDouble(MacroAssembler* masm, Label* fail);
- static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
+ // If |mode| is set to DONT_TRACK_ALLOCATION_SITE,
+ // |allocation_memento_found| may be NULL.
+ static void GenerateMapChangeElementsTransition(MacroAssembler* masm,
+ AllocationSiteMode mode,
+ Label* allocation_memento_found);
+ static void GenerateSmiToDouble(MacroAssembler* masm,
+ AllocationSiteMode mode,
+ Label* fail);
+ static void GenerateDoubleToObject(MacroAssembler* masm,
+ AllocationSiteMode mode,
+ Label* fail);
private:
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
};
+
} } // namespace v8::internal
#endif // V8_CODEGEN_H_
diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js
index d36fe18fa..01537e87b 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/collection.js
@@ -27,22 +27,27 @@
"use strict";
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
var $Set = global.Set;
var $Map = global.Map;
var $WeakMap = global.WeakMap;
-
-//-------------------------------------------------------------------
+var $WeakSet = global.WeakSet;
// Global sentinel to be used instead of undefined keys, which are not
// supported internally but required for Harmony sets and maps.
var undefined_sentinel = {};
+// -------------------------------------------------------------------
+// Harmony Set
function SetConstructor() {
if (%_IsConstructCall()) {
%SetInitialize(this);
} else {
- return new $Set();
+ throw MakeTypeError('constructor_not_function', ['Set']);
}
}
@@ -88,11 +93,55 @@ function SetDelete(key) {
}
+function SetGetSize() {
+ if (!IS_SET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Set.prototype.size', this]);
+ }
+ return %SetGetSize(this);
+}
+
+
+function SetClear() {
+ if (!IS_SET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Set.prototype.clear', this]);
+ }
+ // Replace the internal table with a new empty table.
+ %SetInitialize(this);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetUpSet() {
+ %CheckIsBootstrapping();
+
+ %SetCode($Set, SetConstructor);
+ %FunctionSetPrototype($Set, new $Object());
+ %SetProperty($Set.prototype, "constructor", $Set, DONT_ENUM);
+
+ // Set up the non-enumerable functions on the Set prototype object.
+ InstallGetter($Set.prototype, "size", SetGetSize);
+ InstallFunctions($Set.prototype, DONT_ENUM, $Array(
+ "add", SetAdd,
+ "has", SetHas,
+ "delete", SetDelete,
+ "clear", SetClear
+ ));
+}
+
+SetUpSet();
+
+
+// -------------------------------------------------------------------
+// Harmony Map
+
function MapConstructor() {
if (%_IsConstructCall()) {
%MapInitialize(this);
} else {
- return new $Map();
+ throw MakeTypeError('constructor_not_function', ['Map']);
}
}
@@ -145,11 +194,56 @@ function MapDelete(key) {
}
+function MapGetSize() {
+ if (!IS_MAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Map.prototype.size', this]);
+ }
+ return %MapGetSize(this);
+}
+
+
+function MapClear() {
+ if (!IS_MAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Map.prototype.clear', this]);
+ }
+ // Replace the internal table with a new empty table.
+ %MapInitialize(this);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetUpMap() {
+ %CheckIsBootstrapping();
+
+ %SetCode($Map, MapConstructor);
+ %FunctionSetPrototype($Map, new $Object());
+ %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
+
+ // Set up the non-enumerable functions on the Map prototype object.
+ InstallGetter($Map.prototype, "size", MapGetSize);
+ InstallFunctions($Map.prototype, DONT_ENUM, $Array(
+ "get", MapGet,
+ "set", MapSet,
+ "has", MapHas,
+ "delete", MapDelete,
+ "clear", MapClear
+ ));
+}
+
+SetUpMap();
+
+
+// -------------------------------------------------------------------
+// Harmony WeakMap
+
function WeakMapConstructor() {
if (%_IsConstructCall()) {
- %WeakMapInitialize(this);
+ %WeakCollectionInitialize(this);
} else {
- return new $WeakMap();
+ throw MakeTypeError('constructor_not_function', ['WeakMap']);
}
}
@@ -159,10 +253,10 @@ function WeakMapGet(key) {
throw MakeTypeError('incompatible_method_receiver',
['WeakMap.prototype.get', this]);
}
- if (!IS_SPEC_OBJECT(key)) {
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
- return %WeakMapGet(this, key);
+ return %WeakCollectionGet(this, key);
}
@@ -171,10 +265,10 @@ function WeakMapSet(key, value) {
throw MakeTypeError('incompatible_method_receiver',
['WeakMap.prototype.set', this]);
}
- if (!IS_SPEC_OBJECT(key)) {
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
- return %WeakMapSet(this, key, value);
+ return %WeakCollectionSet(this, key, value);
}
@@ -183,10 +277,10 @@ function WeakMapHas(key) {
throw MakeTypeError('incompatible_method_receiver',
['WeakMap.prototype.has', this]);
}
- if (!IS_SPEC_OBJECT(key)) {
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
- return %WeakMapHas(this, key);
+ return %WeakCollectionHas(this, key);
}
@@ -195,44 +289,30 @@ function WeakMapDelete(key) {
throw MakeTypeError('incompatible_method_receiver',
['WeakMap.prototype.delete', this]);
}
- if (!IS_SPEC_OBJECT(key)) {
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
- return %WeakMapDelete(this, key);
+ return %WeakCollectionDelete(this, key);
}
-// -------------------------------------------------------------------
-
-(function () {
- %CheckIsBootstrapping();
- // Set up the Set and Map constructor function.
- %SetCode($Set, SetConstructor);
- %SetCode($Map, MapConstructor);
+function WeakMapClear() {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.clear', this]);
+ }
+ // Replace the internal table with a new empty table.
+ %WeakCollectionInitialize(this);
+}
- // Set up the constructor property on the Set and Map prototype object.
- %SetProperty($Set.prototype, "constructor", $Set, DONT_ENUM);
- %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
- // Set up the non-enumerable functions on the Set prototype object.
- InstallFunctions($Set.prototype, DONT_ENUM, $Array(
- "add", SetAdd,
- "has", SetHas,
- "delete", SetDelete
- ));
+// -------------------------------------------------------------------
- // Set up the non-enumerable functions on the Map prototype object.
- InstallFunctions($Map.prototype, DONT_ENUM, $Array(
- "get", MapGet,
- "set", MapSet,
- "has", MapHas,
- "delete", MapDelete
- ));
+function SetUpWeakMap() {
+ %CheckIsBootstrapping();
- // Set up the WeakMap constructor function.
%SetCode($WeakMap, WeakMapConstructor);
-
- // Set up the constructor property on the WeakMap prototype object.
+ %FunctionSetPrototype($WeakMap, new $Object());
%SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
// Set up the non-enumerable functions on the WeakMap prototype object.
@@ -240,6 +320,88 @@ function WeakMapDelete(key) {
"get", WeakMapGet,
"set", WeakMapSet,
"has", WeakMapHas,
- "delete", WeakMapDelete
+ "delete", WeakMapDelete,
+ "clear", WeakMapClear
+ ));
+}
+
+SetUpWeakMap();
+
+
+// -------------------------------------------------------------------
+// Harmony WeakSet
+
+function WeakSetConstructor() {
+ if (%_IsConstructCall()) {
+ %WeakCollectionInitialize(this);
+ } else {
+ throw MakeTypeError('constructor_not_function', ['WeakSet']);
+ }
+}
+
+
+function WeakSetAdd(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.add', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionSet(this, value, true);
+}
+
+
+function WeakSetHas(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.has', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionHas(this, value);
+}
+
+
+function WeakSetDelete(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.delete', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionDelete(this, value);
+}
+
+
+function WeakSetClear() {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.clear', this]);
+ }
+ // Replace the internal table with a new empty table.
+ %WeakCollectionInitialize(this);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetUpWeakSet() {
+ %CheckIsBootstrapping();
+
+ %SetCode($WeakSet, WeakSetConstructor);
+ %FunctionSetPrototype($WeakSet, new $Object());
+ %SetProperty($WeakSet.prototype, "constructor", $WeakSet, DONT_ENUM);
+
+ // Set up the non-enumerable functions on the WeakSet prototype object.
+ InstallFunctions($WeakSet.prototype, DONT_ENUM, $Array(
+ "add", WeakSetAdd,
+ "has", WeakSetHas,
+ "delete", WeakSetDelete,
+ "clear", WeakSetClear
));
-})();
+}
+
+SetUpWeakSet();
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index c0645760b..fffe5da71 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -67,7 +67,7 @@ CompilationCache::~CompilationCache() {}
static Handle<CompilationCacheTable> AllocateTable(Isolate* isolate, int size) {
CALL_HEAP_FUNCTION(isolate,
- CompilationCacheTable::Allocate(size),
+ CompilationCacheTable::Allocate(isolate->heap(), size),
CompilationCacheTable);
}
@@ -86,6 +86,7 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
return result;
}
+
void CompilationSubCache::Age() {
// Age the generations implicitly killing off the oldest.
for (int i = generations_ - 1; i > 0; i--) {
@@ -98,7 +99,7 @@ void CompilationSubCache::Age() {
void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
- Object* undefined = isolate()->heap()->raw_unchecked_undefined_value();
+ Object* undefined = isolate()->heap()->undefined_value();
for (int i = 0; i < generations_; i++) {
if (tables_[i] != undefined) {
reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);
@@ -143,7 +144,8 @@ bool CompilationCacheScript::HasOrigin(
Handle<SharedFunctionInfo> function_info,
Handle<Object> name,
int line_offset,
- int column_offset) {
+ int column_offset,
+ bool is_shared_cross_origin) {
Handle<Script> script =
Handle<Script>(Script::cast(function_info->script()), isolate());
// If the script name isn't set, the boilerplate script should have
@@ -156,6 +158,8 @@ bool CompilationCacheScript::HasOrigin(
if (column_offset != script->column_offset()->value()) return false;
// Check that both names are strings. If not, no match.
if (!name->IsString() || !script->name()->IsString()) return false;
+ // Were both scripts tagged by the embedder as being shared cross-origin?
+ if (is_shared_cross_origin != script->is_shared_cross_origin()) return false;
// Compare the two name strings for equality.
return String::cast(*name)->Equals(String::cast(script->name()));
}
@@ -170,6 +174,7 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
Handle<Object> name,
int line_offset,
int column_offset,
+ bool is_shared_cross_origin,
Handle<Context> context) {
Object* result = NULL;
int generation;
@@ -185,7 +190,11 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
Handle<SharedFunctionInfo>::cast(probe);
// Break when we've found a suitable shared function info that
// matches the origin.
- if (HasOrigin(function_info, name, line_offset, column_offset)) {
+ if (HasOrigin(function_info,
+ name,
+ line_offset,
+ column_offset,
+ is_shared_cross_origin)) {
result = *function_info;
break;
}
@@ -213,7 +222,11 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
if (result != NULL) {
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result),
isolate());
- ASSERT(HasOrigin(shared, name, line_offset, column_offset));
+ ASSERT(HasOrigin(shared,
+ name,
+ line_offset,
+ column_offset,
+ is_shared_cross_origin));
// If the script was found in a later generation, we promote it to
// the first generation to let it survive longer in the cache.
if (generation != 0) Put(source, context, shared);
@@ -390,12 +403,18 @@ Handle<SharedFunctionInfo> CompilationCache::LookupScript(
Handle<Object> name,
int line_offset,
int column_offset,
+ bool is_shared_cross_origin,
Handle<Context> context) {
if (!IsEnabled()) {
return Handle<SharedFunctionInfo>::null();
}
- return script_.Lookup(source, name, line_offset, column_offset, context);
+ return script_.Lookup(source,
+ name,
+ line_offset,
+ column_offset,
+ is_shared_cross_origin,
+ context);
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 7a236e8fb..414e09e65 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -99,6 +99,7 @@ class CompilationCacheScript : public CompilationSubCache {
Handle<Object> name,
int line_offset,
int column_offset,
+ bool is_shared_cross_origin,
Handle<Context> context);
void Put(Handle<String> source,
Handle<Context> context,
@@ -119,7 +120,8 @@ class CompilationCacheScript : public CompilationSubCache {
bool HasOrigin(Handle<SharedFunctionInfo> function_info,
Handle<Object> name,
int line_offset,
- int column_offset);
+ int column_offset,
+ bool is_shared_cross_origin);
void* script_histogram_;
bool script_histogram_initialized_;
@@ -212,6 +214,7 @@ class CompilationCache {
Handle<Object> name,
int line_offset,
int column_offset,
+ bool is_shared_cross_origin,
Handle<Context> context);
// Finds the shared function info for a source string for eval in a
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 86374371e..ed0a0c8e6 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -32,9 +32,12 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "compilation-cache.h"
+#include "cpu-profiler.h"
#include "debug.h"
+#include "deoptimizer.h"
#include "full-codegen.h"
#include "gdb-jit.h"
+#include "typing.h"
#include "hydrogen.h"
#include "isolate-inl.h"
#include "lithium.h"
@@ -51,63 +54,160 @@ namespace v8 {
namespace internal {
-CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
- : isolate_(script->GetIsolate()),
- flags_(LanguageModeField::encode(CLASSIC_MODE)),
- function_(NULL),
- scope_(NULL),
- global_scope_(NULL),
+CompilationInfo::CompilationInfo(Handle<Script> script,
+ Zone* zone)
+ : flags_(LanguageModeField::encode(CLASSIC_MODE)),
script_(script),
- extension_(NULL),
- pre_parse_data_(NULL),
osr_ast_id_(BailoutId::None()),
- zone_(zone),
- deferred_handles_(NULL) {
- Initialize(BASE);
+ osr_pc_offset_(0) {
+ Initialize(script->GetIsolate(), BASE, zone);
}
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone)
- : isolate_(shared_info->GetIsolate()),
- flags_(LanguageModeField::encode(CLASSIC_MODE) |
- IsLazy::encode(true)),
- function_(NULL),
- scope_(NULL),
- global_scope_(NULL),
+ : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
- extension_(NULL),
- pre_parse_data_(NULL),
osr_ast_id_(BailoutId::None()),
- zone_(zone),
- deferred_handles_(NULL) {
- Initialize(BASE);
+ osr_pc_offset_(0) {
+ Initialize(script_->GetIsolate(), BASE, zone);
}
-CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
- : isolate_(closure->GetIsolate()),
- flags_(LanguageModeField::encode(CLASSIC_MODE) |
- IsLazy::encode(true)),
- function_(NULL),
- scope_(NULL),
- global_scope_(NULL),
+CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
+ Zone* zone)
+ : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
closure_(closure),
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
- extension_(NULL),
- pre_parse_data_(NULL),
context_(closure->context()),
osr_ast_id_(BailoutId::None()),
- zone_(zone),
- deferred_handles_(NULL) {
- Initialize(BASE);
+ osr_pc_offset_(0) {
+ Initialize(script_->GetIsolate(), BASE, zone);
+}
+
+
+CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
+ Isolate* isolate,
+ Zone* zone)
+ : flags_(LanguageModeField::encode(CLASSIC_MODE) |
+ IsLazy::encode(true)),
+ osr_ast_id_(BailoutId::None()),
+ osr_pc_offset_(0) {
+ Initialize(isolate, STUB, zone);
+ code_stub_ = stub;
+}
+
+
+void CompilationInfo::Initialize(Isolate* isolate,
+ Mode mode,
+ Zone* zone) {
+ isolate_ = isolate;
+ function_ = NULL;
+ scope_ = NULL;
+ global_scope_ = NULL;
+ extension_ = NULL;
+ pre_parse_data_ = NULL;
+ zone_ = zone;
+ deferred_handles_ = NULL;
+ code_stub_ = NULL;
+ prologue_offset_ = Code::kPrologueOffsetNotSet;
+ opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
+ no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
+ ? new List<OffsetRange>(2) : NULL;
+ for (int i = 0; i < DependentCode::kGroupCount; i++) {
+ dependencies_[i] = NULL;
+ }
+ if (mode == STUB) {
+ mode_ = STUB;
+ return;
+ }
+ mode_ = mode;
+ abort_due_to_dependency_ = false;
+ if (script_->type()->value() == Script::TYPE_NATIVE) {
+ MarkAsNative();
+ }
+ if (!shared_info_.is_null()) {
+ ASSERT(language_mode() == CLASSIC_MODE);
+ SetLanguageMode(shared_info_->language_mode());
+ }
+ set_bailout_reason(kUnknown);
}
CompilationInfo::~CompilationInfo() {
delete deferred_handles_;
+ delete no_frame_ranges_;
+#ifdef DEBUG
+ // Check that no dependent maps have been added or added dependent maps have
+ // been rolled back or committed.
+ for (int i = 0; i < DependentCode::kGroupCount; i++) {
+ ASSERT_EQ(NULL, dependencies_[i]);
+ }
+#endif // DEBUG
+}
+
+
+void CompilationInfo::CommitDependencies(Handle<Code> code) {
+ for (int i = 0; i < DependentCode::kGroupCount; i++) {
+ ZoneList<Handle<HeapObject> >* group_objects = dependencies_[i];
+ if (group_objects == NULL) continue;
+ ASSERT(!object_wrapper_.is_null());
+ for (int j = 0; j < group_objects->length(); j++) {
+ DependentCode::DependencyGroup group =
+ static_cast<DependentCode::DependencyGroup>(i);
+ DependentCode* dependent_code =
+ DependentCode::ForObject(group_objects->at(j), group);
+ dependent_code->UpdateToFinishedCode(group, this, *code);
+ }
+ dependencies_[i] = NULL; // Zone-allocated, no need to delete.
+ }
+}
+
+
+void CompilationInfo::RollbackDependencies() {
+ // Unregister from all dependent maps if not yet committed.
+ for (int i = 0; i < DependentCode::kGroupCount; i++) {
+ ZoneList<Handle<HeapObject> >* group_objects = dependencies_[i];
+ if (group_objects == NULL) continue;
+ for (int j = 0; j < group_objects->length(); j++) {
+ DependentCode::DependencyGroup group =
+ static_cast<DependentCode::DependencyGroup>(i);
+ DependentCode* dependent_code =
+ DependentCode::ForObject(group_objects->at(j), group);
+ dependent_code->RemoveCompilationInfo(group, this);
+ }
+ dependencies_[i] = NULL; // Zone-allocated, no need to delete.
+ }
+}
+
+
+int CompilationInfo::num_parameters() const {
+ ASSERT(!IsStub());
+ return scope()->num_parameters();
+}
+
+
+int CompilationInfo::num_heap_slots() const {
+ if (IsStub()) {
+ return 0;
+ } else {
+ return scope()->num_heap_slots();
+ }
+}
+
+
+Code::Flags CompilationInfo::flags() const {
+ if (IsStub()) {
+ return Code::ComputeFlags(code_stub()->GetCodeKind(),
+ code_stub()->GetICState(),
+ code_stub()->GetExtraICState(),
+ code_stub()->GetStubType(),
+ code_stub()->GetStubFlags());
+ } else {
+ return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
+ }
}
@@ -130,18 +230,12 @@ bool CompilationInfo::ShouldSelfOptimize() {
return FLAG_self_optimization &&
FLAG_crankshaft &&
!function()->flags()->Contains(kDontSelfOptimize) &&
- !function()->flags()->Contains(kDontOptimize) &&
+ !function()->dont_optimize() &&
function()->scope()->AllowsLazyCompilation() &&
(shared_info().is_null() || !shared_info()->optimization_disabled());
}
-void CompilationInfo::AbortOptimization() {
- Handle<Code> code(shared_info()->code());
- SetCode(code);
-}
-
-
// Determine whether to use the full compiler for all code. If the flag
// --always-full-compiler is specified this is the case. For the virtual frame
// based compiler the full compiler is also used if a debugger is connected, as
@@ -152,7 +246,7 @@ void CompilationInfo::AbortOptimization() {
// break points has actually been set.
static bool IsDebuggerActive(Isolate* isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
- return V8::UseCrankshaft() ?
+ return isolate->use_crankshaft() ?
isolate->debug()->has_break_points() :
isolate->debugger()->IsDebuggerActive();
#else
@@ -166,18 +260,16 @@ static bool AlwaysFullCompiler(Isolate* isolate) {
}
-void OptimizingCompiler::RecordOptimizationStats() {
+void RecompileJob::RecordOptimizationStats() {
Handle<JSFunction> function = info()->closure();
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
- double ms_creategraph =
- static_cast<double>(time_taken_to_create_graph_) / 1000;
- double ms_optimize = static_cast<double>(time_taken_to_optimize_) / 1000;
- double ms_codegen = static_cast<double>(time_taken_to_codegen_) / 1000;
+ double ms_creategraph = time_taken_to_create_graph_.InMillisecondsF();
+ double ms_optimize = time_taken_to_optimize_.InMillisecondsF();
+ double ms_codegen = time_taken_to_codegen_.InMillisecondsF();
if (FLAG_trace_opt) {
- PrintF("[optimizing: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR, reinterpret_cast<intptr_t>(*function));
+ PrintF("[optimizing ");
+ function->ShortPrint();
PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
ms_codegen);
}
@@ -194,37 +286,78 @@ void OptimizingCompiler::RecordOptimizationStats() {
code_size,
compilation_time);
}
+ if (FLAG_hydrogen_stats) {
+ isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_create_graph_,
+ time_taken_to_optimize_,
+ time_taken_to_codegen_);
+ }
}
// A return value of true indicates the compilation pipeline is still
// going, not necessarily that we optimized the code.
static bool MakeCrankshaftCode(CompilationInfo* info) {
- OptimizingCompiler compiler(info);
- OptimizingCompiler::Status status = compiler.CreateGraph();
+ RecompileJob job(info);
+ RecompileJob::Status status = job.CreateGraph();
- if (status != OptimizingCompiler::SUCCEEDED) {
- return status != OptimizingCompiler::FAILED;
+ if (status != RecompileJob::SUCCEEDED) {
+ return status != RecompileJob::FAILED;
}
- status = compiler.OptimizeGraph();
- if (status != OptimizingCompiler::SUCCEEDED) {
- status = compiler.AbortOptimization();
- return status != OptimizingCompiler::FAILED;
+ status = job.OptimizeGraph();
+ if (status != RecompileJob::SUCCEEDED) {
+ status = job.AbortOptimization();
+ return status != RecompileJob::FAILED;
}
- status = compiler.GenerateAndInstallCode();
- return status != OptimizingCompiler::FAILED;
+ status = job.GenerateAndInstallCode();
+ return status != RecompileJob::FAILED;
}
-OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
- ASSERT(V8::UseCrankshaft());
+class HOptimizedGraphBuilderWithPotisions: public HOptimizedGraphBuilder {
+ public:
+ explicit HOptimizedGraphBuilderWithPotisions(CompilationInfo* info)
+ : HOptimizedGraphBuilder(info) {
+ }
+
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node) V8_OVERRIDE { \
+ if (node->position() != RelocInfo::kNoPosition) { \
+ SetSourcePosition(node->position()); \
+ } \
+ HOptimizedGraphBuilder::Visit##type(node); \
+ }
+ EXPRESSION_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node) V8_OVERRIDE { \
+ if (node->position() != RelocInfo::kNoPosition) { \
+ SetSourcePosition(node->position()); \
+ } \
+ HOptimizedGraphBuilder::Visit##type(node); \
+ }
+ STATEMENT_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node) V8_OVERRIDE { \
+ HOptimizedGraphBuilder::Visit##type(node); \
+ }
+ MODULE_NODE_LIST(DEF_VISIT)
+ DECLARATION_NODE_LIST(DEF_VISIT)
+ AUXILIARY_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+};
+
+
+RecompileJob::Status RecompileJob::CreateGraph() {
+ ASSERT(isolate()->use_crankshaft());
ASSERT(info()->IsOptimizing());
ASSERT(!info()->IsCompilingForDebugging());
// We should never arrive here if there is no code object on the
// shared function object.
- Handle<Code> code(info()->shared_info()->code());
- ASSERT(code->kind() == Code::FUNCTION);
+ ASSERT(info()->shared_info()->code()->kind() == Code::FUNCTION);
// We should never arrive here if optimization has been disabled on the
// shared function info.
@@ -233,8 +366,8 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// Fall back to using the full code generator if it's not possible
// to use the Hydrogen-based optimizing compiler. We already have
// generated code for this from the shared function object.
- if (AlwaysFullCompiler(info()->isolate())) {
- info()->SetCode(code);
+ if (AlwaysFullCompiler(isolate())) {
+ info()->AbortOptimization();
return SetLastStatus(BAILED_OUT);
}
@@ -242,8 +375,8 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// the optimizing compiler.
const int kMaxOptCount =
FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
- if (info()->shared_info()->opt_count() > kMaxOptCount) {
- info()->set_bailout_reason("optimized too many times");
+ if (info()->opt_count() > kMaxOptCount) {
+ info()->set_bailout_reason(kOptimizedTooManyTimes);
return AbortOptimization();
}
@@ -254,40 +387,36 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
//
// The encoding is as a signed value, with parameters and receiver using
// the negative indices and locals the non-negative ones.
- const int parameter_limit = -LUnallocated::kMinFixedIndex;
+ const int parameter_limit = -LUnallocated::kMinFixedSlotIndex;
Scope* scope = info()->scope();
if ((scope->num_parameters() + 1) > parameter_limit) {
- info()->set_bailout_reason("too many parameters");
+ info()->set_bailout_reason(kTooManyParameters);
return AbortOptimization();
}
- const int locals_limit = LUnallocated::kMaxFixedIndex;
- if (!info()->osr_ast_id().IsNone() &&
+ const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
+ if (info()->is_osr() &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
- info()->set_bailout_reason("too many parameters/locals");
+ info()->set_bailout_reason(kTooManyParametersLocals);
return AbortOptimization();
}
// Take --hydrogen-filter into account.
- Handle<String> name = info()->function()->debug_name();
- if (*FLAG_hydrogen_filter != '\0') {
- Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
- if ((filter[0] == '-'
- && name->IsEqualTo(filter.SubVector(1, filter.length())))
- || (filter[0] != '-' && !name->IsEqualTo(filter))) {
- info()->SetCode(code);
- return SetLastStatus(BAILED_OUT);
- }
+ if (!info()->closure()->PassesFilter(FLAG_hydrogen_filter)) {
+ info()->AbortOptimization();
+ return SetLastStatus(BAILED_OUT);
}
// Recompile the unoptimized version of the code if the current version
// doesn't have deoptimization support. Alternatively, we may decide to
// run the full code generator to get a baseline for the compile-time
// performance of the hydrogen-based compiler.
- Timer t(this, &time_taken_to_create_graph_);
bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) {
- HPhase phase(HPhase::kFullCodeGen);
+ ElapsedTimer timer;
+ if (FLAG_hydrogen_stats) {
+ timer.Start();
+ }
CompilationInfoWithZone unoptimized(info()->shared_info());
// Note that we use the same AST that we will use for generating the
// optimized code.
@@ -304,6 +433,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
Compiler::RecordFunctionCompilation(
Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
}
+ if (FLAG_hydrogen_stats) {
+ isolate()->GetHStatistics()->IncrementFullCodeGen(timer.Elapsed());
+ }
}
// Check that the unoptimized, shared code is ready for
@@ -311,23 +443,27 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// optimizable marker in the code object and optimize anyway. This
// is safe as long as the unoptimized code has deoptimization
// support.
- ASSERT(FLAG_always_opt || code->optimizable());
+ ASSERT(FLAG_always_opt || info()->shared_info()->code()->optimizable());
ASSERT(info()->shared_info()->has_deoptimization_support());
if (FLAG_trace_hydrogen) {
+ Handle<String> name = info()->function()->debug_name();
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
- HTracer::Instance()->TraceCompilation(info()->function());
+ isolate()->GetHTracer()->TraceCompilation(info());
}
- Handle<Context> native_context(
- info()->closure()->context()->native_context());
- oracle_ = new(info()->zone()) TypeFeedbackOracle(
- code, native_context, info()->isolate(), info()->zone());
- graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
- HPhase phase(HPhase::kTotal);
+
+ // Type-check the function.
+ AstTyper::Run(info());
+
+ graph_builder_ = FLAG_emit_opt_code_positions
+ ? new(info()->zone()) HOptimizedGraphBuilderWithPotisions(info())
+ : new(info()->zone()) HOptimizedGraphBuilder(info());
+
+ Timer t(this, &time_taken_to_create_graph_);
graph_ = graph_builder_->CreateGraph();
- if (info()->isolate()->has_pending_exception()) {
+ if (isolate()->has_pending_exception()) {
info()->SetCode(Handle<Code>::null());
return SetLastStatus(FAILED);
}
@@ -345,19 +481,28 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
}
}
+ if (info()->HasAbortedDueToDependencyChange()) {
+ info_->set_bailout_reason(kBailedOutDueToDependencyChange);
+ info_->AbortOptimization();
+ return SetLastStatus(BAILED_OUT);
+ }
+
return SetLastStatus(SUCCEEDED);
}
-OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
- AssertNoAllocation no_gc;
- NoHandleAllocation no_handles;
+
+RecompileJob::Status RecompileJob::OptimizeGraph() {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+ DisallowCodeDependencyChange no_dependency_change;
ASSERT(last_status() == SUCCEEDED);
Timer t(this, &time_taken_to_optimize_);
ASSERT(graph_ != NULL);
- SmartArrayPointer<char> bailout_reason;
+ BailoutReason bailout_reason = kNoReason;
if (!graph_->Optimize(&bailout_reason)) {
- if (!bailout_reason.is_empty()) graph_builder_->Bailout(*bailout_reason);
+ if (bailout_reason == kNoReason) graph_builder_->Bailout(bailout_reason);
return SetLastStatus(BAILED_OUT);
} else {
chunk_ = LChunk::NewChunk(graph_);
@@ -369,27 +514,42 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
}
-OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
+RecompileJob::Status RecompileJob::GenerateAndInstallCode() {
ASSERT(last_status() == SUCCEEDED);
- Timer timer(this, &time_taken_to_codegen_);
- ASSERT(chunk_ != NULL);
- ASSERT(graph_ != NULL);
- Handle<Code> optimized_code = chunk_->Codegen();
- if (optimized_code.is_null()) {
- info()->set_bailout_reason("code generation failed");
- return AbortOptimization();
+ ASSERT(!info()->HasAbortedDueToDependencyChange());
+ DisallowCodeDependencyChange no_dependency_change;
+ { // Scope for timer.
+ Timer timer(this, &time_taken_to_codegen_);
+ ASSERT(chunk_ != NULL);
+ ASSERT(graph_ != NULL);
+ // Deferred handles reference objects that were accessible during
+ // graph creation. To make sure that we don't encounter inconsistencies
+ // between graph creation and code generation, we disallow accessing
+ // objects through deferred handles during the latter, with exceptions.
+ DisallowDeferredHandleDereference no_deferred_handle_deref;
+ Handle<Code> optimized_code = chunk_->Codegen();
+ if (optimized_code.is_null()) {
+ if (info()->bailout_reason() == kNoReason) {
+ info()->set_bailout_reason(kCodeGenerationFailed);
+ }
+ return AbortOptimization();
+ }
+ info()->SetCode(optimized_code);
}
- info()->SetCode(optimized_code);
RecordOptimizationStats();
+ // Add to the weak list of optimized code objects.
+ info()->context()->native_context()->AddOptimizedCode(*info()->code());
return SetLastStatus(SUCCEEDED);
}
static bool GenerateCode(CompilationInfo* info) {
- bool is_optimizing = V8::UseCrankshaft() &&
+ bool is_optimizing = info->isolate()->use_crankshaft() &&
!info->IsCompilingForDebugging() &&
info->IsOptimizing();
if (is_optimizing) {
+ Logger::TimerEventScope timer(
+ info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous);
return MakeCrankshaftCode(info);
} else {
if (info->IsOptimizing()) {
@@ -397,6 +557,8 @@ static bool GenerateCode(CompilationInfo* info) {
// BASE or NONOPT.
info->DisableOptimization();
}
+ Logger::TimerEventScope timer(
+ info->isolate(), Logger::TimerEventScope::v8_compile_full_code);
return FullCodeGenerator::MakeCode(info);
}
}
@@ -425,26 +587,59 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
#endif
+static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
+ bool allow_lazy_without_ctx = false) {
+ return LiveEditFunctionTracker::IsActive(info->isolate()) ||
+ (info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx);
+}
+
+
+// Sets the expected number of properties based on estimate from compiler.
+void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
+ int estimate) {
+ // See the comment in SetExpectedNofProperties.
+ if (shared->live_objects_may_exist()) return;
+
+ // If no properties are added in the constructor, they are more likely
+ // to be added later.
+ if (estimate == 0) estimate = 2;
+
+ // TODO(yangguo): check whether those heuristics are still up-to-date.
+ // We do not shrink objects that go into a snapshot (yet), so we adjust
+ // the estimate conservatively.
+ if (Serializer::enabled()) {
+ estimate += 2;
+ } else if (FLAG_clever_optimizations) {
+ // Inobject slack tracking will reclaim redundant inobject space later,
+ // so we can afford to adjust the estimate generously.
+ estimate += 8;
+ } else {
+ estimate += 3;
+ }
+
+ shared->set_expected_nof_properties(estimate);
+}
+
+
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Isolate* isolate = info->isolate();
- ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
PostponeInterruptsScope postpone(isolate);
ASSERT(!isolate->native_context().is_null());
Handle<Script> script = info->script();
- script->set_context_data((*isolate->native_context())->data());
+ // TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
+ FixedArray* array = isolate->native_context()->embedder_data();
+ script->set_context_data(array->get(0));
#ifdef ENABLE_DEBUGGER_SUPPORT
if (info->is_eval()) {
- Script::CompilationType compilation_type = Script::COMPILATION_TYPE_EVAL;
- script->set_compilation_type(Smi::FromInt(compilation_type));
+ script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
// For eval scripts add information on the function from which eval was
// called.
if (info->is_eval()) {
StackTraceFrameIterator it(isolate);
if (!it.done()) {
- script->set_eval_from_shared(
- JSFunction::cast(it.frame()->function())->shared());
+ script->set_eval_from_shared(it.frame()->function()->shared());
Code* code = it.frame()->LookupCode();
int offset = static_cast<int>(
it.frame()->pc() - code->instruction_start());
@@ -459,73 +654,81 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
// Only allow non-global compiles for eval.
ASSERT(info->is_eval() || info->is_global());
- ParsingFlags flags = kNoParsingFlags;
- if (info->pre_parse_data() != NULL ||
- String::cast(script->source())->length() > FLAG_min_preparse_length) {
- flags = kAllowLazy;
- }
- if (!ParserApi::Parse(info, flags)) {
- return Handle<SharedFunctionInfo>::null();
+ {
+ Parser parser(info);
+ if ((info->pre_parse_data() != NULL ||
+ String::cast(script->source())->length() > FLAG_min_preparse_length) &&
+ !DebuggerWantsEagerCompilation(info))
+ parser.set_allow_lazy(true);
+ if (!parser.Parse()) {
+ return Handle<SharedFunctionInfo>::null();
+ }
}
- // Measure how long it takes to do the compilation; only take the
- // rest of the function into account to avoid overlap with the
- // parsing statistics.
- HistogramTimer* rate = info->is_eval()
- ? info->isolate()->counters()->compile_eval()
- : info->isolate()->counters()->compile();
- HistogramTimerScope timer(rate);
-
- // Compile the code.
FunctionLiteral* lit = info->function();
LiveEditFunctionTracker live_edit_tracker(isolate, lit);
- if (!MakeCode(info)) {
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
- return Handle<SharedFunctionInfo>::null();
- }
+ Handle<SharedFunctionInfo> result;
+ {
+ // Measure how long it takes to do the compilation; only take the
+ // rest of the function into account to avoid overlap with the
+ // parsing statistics.
+ HistogramTimer* rate = info->is_eval()
+ ? info->isolate()->counters()->compile_eval()
+ : info->isolate()->counters()->compile();
+ HistogramTimerScope timer(rate);
- // Allocate function.
- ASSERT(!info->code().is_null());
- Handle<SharedFunctionInfo> result =
- isolate->factory()->NewSharedFunctionInfo(
- lit->name(),
- lit->materialized_literal_count(),
- info->code(),
- ScopeInfo::Create(info->scope(), info->zone()));
-
- ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
- Compiler::SetFunctionInfo(result, lit, true, script);
-
- if (script->name()->IsString()) {
- PROFILE(isolate, CodeCreateEvent(
- info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *info->code(),
- *result,
- String::cast(script->name())));
- GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
- script,
- info->code(),
- info));
- } else {
- PROFILE(isolate, CodeCreateEvent(
- info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *info->code(),
- *result,
- isolate->heap()->empty_string()));
- GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
- }
+ // Compile the code.
+ if (!MakeCode(info)) {
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ return Handle<SharedFunctionInfo>::null();
+ }
+
+ // Allocate function.
+ ASSERT(!info->code().is_null());
+ result =
+ isolate->factory()->NewSharedFunctionInfo(
+ lit->name(),
+ lit->materialized_literal_count(),
+ lit->is_generator(),
+ info->code(),
+ ScopeInfo::Create(info->scope(), info->zone()));
- // Hint to the runtime system used when allocating space for initial
- // property space by setting the expected number of properties for
- // the instances of the function.
- SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
+ ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
+ Compiler::SetFunctionInfo(result, lit, true, script);
- script->set_compilation_state(
- Smi::FromInt(Script::COMPILATION_STATE_COMPILED));
+ if (script->name()->IsString()) {
+ PROFILE(isolate, CodeCreateEvent(
+ info->is_eval()
+ ? Logger::EVAL_TAG
+ : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
+ *info->code(),
+ *result,
+ info,
+ String::cast(script->name())));
+ GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
+ script,
+ info->code(),
+ info));
+ } else {
+ PROFILE(isolate, CodeCreateEvent(
+ info->is_eval()
+ ? Logger::EVAL_TAG
+ : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
+ *info->code(),
+ *result,
+ info,
+ isolate->heap()->empty_string()));
+ GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
+ }
+
+ // Hint to the runtime system used when allocating space for initial
+ // property space by setting the expected number of properties for
+ // the instances of the function.
+ SetExpectedNofPropertiesFromEstimate(result,
+ lit->expected_property_count());
+
+ script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
+ }
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
@@ -543,6 +746,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
Handle<Object> script_name,
int line_offset,
int column_offset,
+ bool is_shared_cross_origin,
Handle<Context> context,
v8::Extension* extension,
ScriptDataImpl* pre_data,
@@ -554,7 +758,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
isolate->counters()->total_compile_size()->Increment(source_length);
// The VM is in the COMPILER state until exiting this function.
- VMState state(isolate, COMPILER);
+ VMState<COMPILER> state(isolate);
CompilationCache* compilation_cache = isolate->compilation_cache();
@@ -565,6 +769,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
script_name,
line_offset,
column_offset,
+ is_shared_cross_origin,
context);
}
@@ -579,7 +784,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
// in that case too.
// Create a script object describing the script to be compiled.
- Handle<Script> script = FACTORY->NewScript(source);
+ Handle<Script> script = isolate->factory()->NewScript(source);
if (natives == NATIVES_CODE) {
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
}
@@ -588,8 +793,9 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
script->set_line_offset(Smi::FromInt(line_offset));
script->set_column_offset(Smi::FromInt(column_offset));
}
+ script->set_is_shared_cross_origin(is_shared_cross_origin);
- script->set_data(script_data.is_null() ? HEAP->undefined_value()
+ script->set_data(script_data.is_null() ? isolate->heap()->undefined_value()
: *script_data);
// Compile the function and add it to the cache.
@@ -606,8 +812,8 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
compilation_cache->PutScript(source, context, result);
}
} else {
- if (result->ic_age() != HEAP->global_ic_age()) {
- result->ResetForNewContext(HEAP->global_ic_age());
+ if (result->ic_age() != isolate->heap()->global_ic_age()) {
+ result->ResetForNewContext(isolate->heap()->global_ic_age());
}
}
@@ -620,6 +826,7 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
Handle<Context> context,
bool is_global,
LanguageMode language_mode,
+ ParseRestriction restriction,
int scope_position) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
@@ -627,7 +834,7 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
isolate->counters()->total_compile_size()->Increment(source_length);
// The VM is in the COMPILER state until exiting this function.
- VMState state(isolate, COMPILER);
+ VMState<COMPILER> state(isolate);
// Do a lookup in the compilation cache; if the entry is not there, invoke
// the compiler and add the result to the cache.
@@ -646,12 +853,13 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
info.MarkAsEval();
if (is_global) info.MarkAsGlobal();
info.SetLanguageMode(language_mode);
+ info.SetParseRestriction(restriction);
info.SetContext(context);
result = MakeFunctionInfo(&info);
if (!result.is_null()) {
// Explicitly disable optimization for eval code. We're not yet prepared
// to handle eval-code in the optimizing compiler.
- result->DisableOptimization("eval");
+ result->DisableOptimization(kEval);
// If caller is strict mode, the result must be in strict mode or
// extended mode as well, but not the other way around. Consider:
@@ -667,8 +875,8 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
}
}
} else {
- if (result->ic_age() != HEAP->global_ic_age()) {
- result->ResetForNewContext(HEAP->global_ic_age());
+ if (result->ic_age() != isolate->heap()->global_ic_age()) {
+ result->ResetForNewContext(isolate->heap()->global_ic_age());
}
}
@@ -684,11 +892,12 @@ static bool InstallFullCode(CompilationInfo* info) {
// was flushed. By setting the code object last we avoid this.
Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<Code> code = info->code();
+ CHECK(code->kind() == Code::FUNCTION);
Handle<JSFunction> function = info->closure();
Handle<ScopeInfo> scope_info =
ScopeInfo::Create(info->scope(), info->zone());
shared->set_scope_info(*scope_info);
- shared->set_code(*code);
+ shared->ReplaceCode(*code);
if (!function.is_null()) {
function->ReplaceCode(*code);
ASSERT(!function->IsOptimized());
@@ -699,28 +908,20 @@ static bool InstallFullCode(CompilationInfo* info) {
int expected = lit->expected_property_count();
SetExpectedNofPropertiesFromEstimate(shared, expected);
- // Set the optimization hints after performing lazy compilation, as
- // these are not set when the function is set up as a lazily
- // compiled function.
- shared->SetThisPropertyAssignmentsInfo(
- lit->has_only_simple_this_property_assignments(),
- *lit->this_property_assignments());
-
// Check the function has compiled code.
ASSERT(shared->is_compiled());
- shared->set_code_age(0);
- shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
+ shared->set_dont_optimize_reason(lit->dont_optimize_reason());
shared->set_dont_inline(lit->flags()->Contains(kDontInline));
shared->set_ast_node_count(lit->ast_node_count());
- if (V8::UseCrankshaft() &&
+ if (info->isolate()->use_crankshaft() &&
!function.is_null() &&
!shared->optimization_disabled()) {
// If we're asked to always optimize, we compile the optimized
// version of the function right away - unless the debugger is
// active as it makes no sense to compile optimized code then.
if (FLAG_always_opt &&
- !Isolate::Current()->DebuggerHasBreakPoints()) {
+ !info->isolate()->DebuggerHasBreakPoints()) {
CompilationInfoWithZone optimized(function);
optimized.SetOptimizing(BailoutId::None());
return Compiler::CompileLazy(&optimized);
@@ -740,15 +941,20 @@ static void InstallCodeCommon(CompilationInfo* info) {
// reset this bit when lazy compiling the code again.
if (shared->optimization_disabled()) code->set_optimizable(false);
+ if (shared->code() == *code) {
+ // Do not send compilation event for the same code twice.
+ return;
+ }
Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
}
static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
Handle<Code> code = info->code();
- if (FLAG_cache_optimized_code &&
- info->osr_ast_id().IsNone() &&
- code->kind() == Code::OPTIMIZED_FUNCTION) {
+ if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
+
+ // Cache non-OSR optimized code.
+ if (FLAG_cache_optimized_code && !info->is_osr()) {
Handle<JSFunction> function = info->closure();
Handle<SharedFunctionInfo> shared(function->shared());
Handle<FixedArray> literals(function->literals());
@@ -760,9 +966,10 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
- if (FLAG_cache_optimized_code &&
- info->osr_ast_id().IsNone() &&
- info->IsOptimizing()) {
+ if (!info->IsOptimizing()) return false; // Nothing to look up.
+
+ // Lookup non-OSR optimized code.
+ if (FLAG_cache_optimized_code && !info->is_osr()) {
Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<JSFunction> function = info->closure();
ASSERT(!function.is_null());
@@ -770,9 +977,9 @@ static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
int index = shared->SearchOptimizedCodeMap(*native_context);
if (index > 0) {
if (FLAG_trace_opt) {
- PrintF("[found optimized code for: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(*function));
+ PrintF("[found optimized code for ");
+ function->ShortPrint();
+ PrintF("]\n");
}
// Caching of optimized code enabled and optimized code found.
shared->InstallFromOptimizedCodeMap(*function, index);
@@ -786,10 +993,8 @@ static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
bool Compiler::CompileLazy(CompilationInfo* info) {
Isolate* isolate = info->isolate();
- ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
-
// The VM is in the COMPILER state until exiting this function.
- VMState state(isolate, COMPILER);
+ VMState<COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
@@ -800,7 +1005,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
if (InstallCodeFromOptimizedCodeMap(info)) return true;
// Generate the AST for the lazily compiled function.
- if (ParserApi::Parse(info, kNoParsingFlags)) {
+ if (Parser::Parse(info)) {
// Measure how long it takes to do the lazy compilation; only take the
// rest of the function into account to avoid overlap with the lazy
// parsing statistics.
@@ -820,12 +1025,15 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
InstallCodeCommon(info);
if (info->IsOptimizing()) {
+ // Optimized code successfully created.
Handle<Code> code = info->code();
- ASSERT(shared->scope_info() != ScopeInfo::Empty());
+ ASSERT(shared->scope_info() != ScopeInfo::Empty(isolate));
+ // TODO(titzer): Only replace the code if it was not an OSR compile.
info->closure()->ReplaceCode(*code);
InsertCodeIntoOptimizedCodeMap(info);
return true;
- } else {
+ } else if (!info->is_osr()) {
+ // Compilation failed. Replace with full code if not OSR compile.
return InstallFullCode(info);
}
}
@@ -836,49 +1044,73 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
}
-void Compiler::RecompileParallel(Handle<JSFunction> closure) {
- if (closure->IsInRecompileQueue()) return;
- ASSERT(closure->IsMarkedForParallelRecompilation());
+bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
+ uint32_t osr_pc_offset) {
+ bool compiling_for_osr = (osr_pc_offset != 0);
Isolate* isolate = closure->GetIsolate();
+ // Here we prepare compile data for the concurrent recompilation thread, but
+ // this still happens synchronously and interrupts execution.
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_recompile_synchronous);
+
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
- if (FLAG_trace_parallel_recompilation) {
- PrintF(" ** Compilation queue, will retry opting on next run.\n");
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Compilation queue full, will retry optimizing ");
+ closure->PrintName();
+ PrintF(" on next run.\n");
}
- return;
+ return false;
}
SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
- VMState state(isolate, PARALLEL_COMPILER_PROLOGUE);
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+
+ if (compiling_for_osr) {
+ BailoutId osr_ast_id =
+ shared->code()->TranslatePcOffsetToAstId(osr_pc_offset);
+ ASSERT(!osr_ast_id.IsNone());
+ info->SetOptimizing(osr_ast_id);
+ info->set_osr_pc_offset(osr_pc_offset);
+
+ if (FLAG_trace_osr) {
+ PrintF("[COSR - attempt to queue ");
+ closure->PrintName();
+ PrintF(" at AST id %d]\n", osr_ast_id.ToInt());
+ }
+ } else {
+ info->SetOptimizing(BailoutId::None());
+ }
+
+ VMState<COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
- Handle<SharedFunctionInfo> shared = info->shared_info();
int compiled_size = shared->end_position() - shared->start_position();
isolate->counters()->total_compile_size()->Increment(compiled_size);
- info->SetOptimizing(BailoutId::None());
{
CompilationHandleScope handle_scope(*info);
- if (InstallCodeFromOptimizedCodeMap(*info)) return;
+ if (!compiling_for_osr && InstallCodeFromOptimizedCodeMap(*info)) {
+ return true;
+ }
- if (ParserApi::Parse(*info, kNoParsingFlags)) {
+ if (Parser::Parse(*info)) {
LanguageMode language_mode = info->function()->language_mode();
info->SetLanguageMode(language_mode);
shared->set_language_mode(language_mode);
info->SaveHandles();
if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
- OptimizingCompiler* compiler =
- new(info->zone()) OptimizingCompiler(*info);
- OptimizingCompiler::Status status = compiler->CreateGraph();
- if (status == OptimizingCompiler::SUCCEEDED) {
- isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
- shared->code()->set_profiler_ticks(0);
- closure->ReplaceCode(isolate->builtins()->builtin(
- Builtins::kInRecompileQueue));
+ RecompileJob* job = new(info->zone()) RecompileJob(*info);
+ RecompileJob::Status status = job->CreateGraph();
+ if (status == RecompileJob::SUCCEEDED) {
info.Detach();
- } else if (status == OptimizingCompiler::BAILED_OUT) {
+ shared->code()->set_profiler_ticks(0);
+ isolate->optimizing_compiler_thread()->QueueForOptimization(job);
+ ASSERT(!isolate->has_pending_exception());
+ return true;
+ } else if (status == RecompileJob::BAILED_OUT) {
isolate->clear_pending_exception();
InstallFullCode(*info);
}
@@ -886,40 +1118,73 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
}
}
- if (isolate->has_pending_exception()) {
- isolate->clear_pending_exception();
- }
+ if (isolate->has_pending_exception()) isolate->clear_pending_exception();
+ return false;
}
-void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
- SmartPointer<CompilationInfo> info(optimizing_compiler->info());
+Handle<Code> Compiler::InstallOptimizedCode(RecompileJob* job) {
+ SmartPointer<CompilationInfo> info(job->info());
+ // The function may have already been optimized by OSR. Simply continue.
+ // Except when OSR already disabled optimization for some reason.
+ if (info->shared_info()->optimization_disabled()) {
+ info->AbortOptimization();
+ InstallFullCode(*info);
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** aborting optimization for ");
+ info->closure()->PrintName();
+ PrintF(" as it has been disabled.\n");
+ }
+ ASSERT(!info->closure()->IsInRecompileQueue());
+ return Handle<Code>::null();
+ }
+
+ Isolate* isolate = info->isolate();
+ VMState<COMPILER> state(isolate);
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_recompile_synchronous);
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
- OptimizingCompiler::Status status = optimizing_compiler->last_status();
- if (status != OptimizingCompiler::SUCCEEDED) {
- optimizing_compiler->info()->set_bailout_reason(
- "failed/bailed out last time");
- status = optimizing_compiler->AbortOptimization();
+ RecompileJob::Status status = job->last_status();
+ if (info->HasAbortedDueToDependencyChange()) {
+ info->set_bailout_reason(kBailedOutDueToDependencyChange);
+ status = job->AbortOptimization();
+ } else if (status != RecompileJob::SUCCEEDED) {
+ info->set_bailout_reason(kFailedBailedOutLastTime);
+ status = job->AbortOptimization();
+ } else if (isolate->DebuggerHasBreakPoints()) {
+ info->set_bailout_reason(kDebuggerIsActive);
+ status = job->AbortOptimization();
} else {
- status = optimizing_compiler->GenerateAndInstallCode();
- ASSERT(status == OptimizingCompiler::SUCCEEDED ||
- status == OptimizingCompiler::BAILED_OUT);
+ status = job->GenerateAndInstallCode();
+ ASSERT(status == RecompileJob::SUCCEEDED ||
+ status == RecompileJob::BAILED_OUT);
}
InstallCodeCommon(*info);
- if (status == OptimizingCompiler::SUCCEEDED) {
+ if (status == RecompileJob::SUCCEEDED) {
Handle<Code> code = info->code();
- ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty());
+ ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate));
info->closure()->ReplaceCode(*code);
if (info->shared_info()->SearchOptimizedCodeMap(
info->closure()->context()->native_context()) == -1) {
InsertCodeIntoOptimizedCodeMap(*info);
}
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Optimized code for ");
+ info->closure()->PrintName();
+ PrintF(" installed.\n");
+ }
} else {
- info->SetCode(Handle<Code>(info->shared_info()->code()));
+ info->AbortOptimization();
InstallFullCode(*info);
}
+ // Optimized code is finally replacing unoptimized code. Reset the latter's
+ // profiler ticks to prevent too soon re-opt after a deopt.
+ info->shared_info()->code()->set_profiler_ticks(0);
+ ASSERT(!info->closure()->IsInRecompileQueue());
+ return (status == RecompileJob::SUCCEEDED) ? info->code()
+ : Handle<Code>::null();
}
@@ -931,7 +1196,9 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
info.SetScope(literal->scope());
info.SetLanguageMode(literal->scope()->language_mode());
- LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal);
+ Isolate* isolate = info.isolate();
+ Factory* factory = isolate->factory();
+ LiveEditFunctionTracker live_edit_tracker(isolate, literal);
// Determine if the function can be lazily compiled. This is necessary to
// allow some of our builtin JS files to be lazily compiled. These
// builtins cannot be handled lazily by the parser, since we have to know
@@ -943,14 +1210,13 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// Debug::FindSharedFunctionInfoInScript.
bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
bool allow_lazy = literal->AllowsLazyCompilation() &&
- !LiveEditFunctionTracker::IsActive(info.isolate()) &&
- (!info.isolate()->DebuggerHasBreakPoints() || allow_lazy_without_ctx);
+ !DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
- Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
+ Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate));
// Generate code
if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
- Handle<Code> code = info.isolate()->builtins()->LazyCompile();
+ Handle<Code> code = isolate->builtins()->LazyCompile();
info.SetCode(code);
} else if (GenerateCode(&info)) {
ASSERT(!info.code().is_null());
@@ -961,8 +1227,9 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// Create a shared function info object.
Handle<SharedFunctionInfo> result =
- FACTORY->NewSharedFunctionInfo(literal->name(),
+ factory->NewSharedFunctionInfo(literal->name(),
literal->materialized_literal_count(),
+ literal->is_generator(),
info.code(),
scope_info);
SetFunctionInfo(result, literal, false, script);
@@ -997,9 +1264,6 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_is_anonymous(lit->is_anonymous());
function_info->set_is_toplevel(is_toplevel);
function_info->set_inferred_name(*lit->inferred_name());
- function_info->SetThisPropertyAssignmentsInfo(
- lit->has_only_simple_this_property_assignments(),
- *lit->this_property_assignments());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
function_info->set_allows_lazy_compilation_without_context(
lit->AllowsLazyCompilationWithoutContext());
@@ -1008,9 +1272,10 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
function_info->set_ast_node_count(lit->ast_node_count());
function_info->set_is_function(lit->is_function());
- function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
+ function_info->set_dont_optimize_reason(lit->dont_optimize_reason());
function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
+ function_info->set_is_generator(lit->is_generator());
}
@@ -1024,26 +1289,33 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
// script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free.
if (info->isolate()->logger()->is_logging_code_events() ||
- CpuProfiler::is_profiling(info->isolate())) {
+ info->isolate()->cpu_profiler()->is_profiling()) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();
if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
return;
+ int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
+ int column_num =
+ GetScriptColumnNumber(script, shared->start_position()) + 1;
+ USE(line_num);
if (script->name()->IsString()) {
- int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
- USE(line_num);
PROFILE(info->isolate(),
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*shared,
+ info,
String::cast(script->name()),
- line_num));
+ line_num,
+ column_num));
} else {
PROFILE(info->isolate(),
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*shared,
- shared->DebugName()));
+ info,
+ info->isolate()->heap()->empty_string(),
+ line_num,
+ column_num));
}
}
@@ -1053,4 +1325,35 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
info));
}
+
+CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
+ : name_(name), info_(info), zone_(info->isolate()) {
+ if (FLAG_hydrogen_stats) {
+ info_zone_start_allocation_size_ = info->zone()->allocation_size();
+ timer_.Start();
+ }
+}
+
+
+CompilationPhase::~CompilationPhase() {
+ if (FLAG_hydrogen_stats) {
+ unsigned size = zone()->allocation_size();
+ size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
+ isolate()->GetHStatistics()->SaveTiming(name_, timer_.Elapsed(), size);
+ }
+}
+
+
+bool CompilationPhase::ShouldProduceTraceOutput() const {
+ // Trace if the appropriate trace flag is set and the phase name's first
+ // character is in the FLAG_trace_phase command line parameter.
+ AllowHandleDereference allow_deref;
+ bool tracing_on = info()->IsStub()
+ ? FLAG_trace_hydrogen_stubs
+ : (FLAG_trace_hydrogen &&
+ info()->closure()->PassesFilter(FLAG_trace_hydrogen_filter));
+ return (tracing_on &&
+ OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index af9459566..2d9e52a8e 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -36,24 +36,33 @@ namespace v8 {
namespace internal {
class ScriptDataImpl;
+class HydrogenCodeStub;
+
+// ParseRestriction is used to restrict the set of valid statements in a
+// unit of compilation. Restriction violations cause a syntax error.
+enum ParseRestriction {
+ NO_PARSE_RESTRICTION, // All expressions are allowed.
+ ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
+};
+
+struct OffsetRange {
+ OffsetRange(int from, int to) : from(from), to(to) {}
+ int from;
+ int to;
+};
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
class CompilationInfo {
public:
- CompilationInfo(Handle<Script> script, Zone* zone);
- CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
-
virtual ~CompilationInfo();
- Isolate* isolate() {
- ASSERT(Isolate::Current() == isolate_);
+ Isolate* isolate() const {
return isolate_;
}
- Zone* zone() {
- return zone_;
- }
+ Zone* zone() { return zone_; }
+ bool is_osr() const { return !osr_ast_id_.IsNone(); }
bool is_lazy() const { return IsLazy::decode(flags_); }
bool is_eval() const { return IsEval::decode(flags_); }
bool is_global() const { return IsGlobal::decode(flags_); }
@@ -70,10 +79,16 @@ class CompilationInfo {
Handle<JSFunction> closure() const { return closure_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
Handle<Script> script() const { return script_; }
+ HydrogenCodeStub* code_stub() const {return code_stub_; }
v8::Extension* extension() const { return extension_; }
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
+ uint32_t osr_pc_offset() const { return osr_pc_offset_; }
+ int opt_count() const { return opt_count_; }
+ int num_parameters() const;
+ int num_heap_slots() const;
+ Code::Flags flags() const;
void MarkAsEval() {
ASSERT(!is_lazy());
@@ -96,9 +111,55 @@ class CompilationInfo {
void MarkAsNative() {
flags_ |= IsNative::encode(true);
}
+
bool is_native() const {
return IsNative::decode(flags_);
}
+
+ bool is_calling() const {
+ return is_deferred_calling() || is_non_deferred_calling();
+ }
+
+ void MarkAsDeferredCalling() {
+ flags_ |= IsDeferredCalling::encode(true);
+ }
+
+ bool is_deferred_calling() const {
+ return IsDeferredCalling::decode(flags_);
+ }
+
+ void MarkAsNonDeferredCalling() {
+ flags_ |= IsNonDeferredCalling::encode(true);
+ }
+
+ bool is_non_deferred_calling() const {
+ return IsNonDeferredCalling::decode(flags_);
+ }
+
+ void MarkAsSavesCallerDoubles() {
+ flags_ |= SavesCallerDoubles::encode(true);
+ }
+
+ bool saves_caller_doubles() const {
+ return SavesCallerDoubles::decode(flags_);
+ }
+
+ void MarkAsRequiresFrame() {
+ flags_ |= RequiresFrame::encode(true);
+ }
+
+ bool requires_frame() const {
+ return RequiresFrame::decode(flags_);
+ }
+
+ void SetParseRestriction(ParseRestriction restriction) {
+ flags_ = ParseRestricitonField::update(flags_, restriction);
+ }
+
+ ParseRestriction parse_restriction() const {
+ return ParseRestricitonField::decode(flags_);
+ }
+
void SetFunction(FunctionLiteral* literal) {
ASSERT(function_ == NULL);
function_ = literal;
@@ -137,6 +198,11 @@ class CompilationInfo {
return IsCompilingForDebugging::decode(flags_);
}
+ bool ShouldTrapOnDeopt() const {
+ return (FLAG_trap_on_deopt && IsOptimizing()) ||
+ (FLAG_trap_on_stub_deopt && IsStub());
+ }
+
bool has_global_object() const {
return !closure().is_null() &&
(closure()->context()->global_object() != NULL);
@@ -149,6 +215,7 @@ class CompilationInfo {
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsOptimizable() const { return mode_ == BASE; }
+ bool IsStub() const { return mode_ == STUB; }
void SetOptimizing(BailoutId osr_ast_id) {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
@@ -167,15 +234,28 @@ class CompilationInfo {
// Determines whether or not to insert a self-optimization header.
bool ShouldSelfOptimize();
- // Disable all optimization attempts of this info for the rest of the
- // current compilation pipeline.
- void AbortOptimization();
+ // Reset code to the unoptimized version when optimization is aborted.
+ void AbortOptimization() {
+ SetCode(handle(shared_info()->code()));
+ }
void set_deferred_handles(DeferredHandles* deferred_handles) {
ASSERT(deferred_handles_ == NULL);
deferred_handles_ = deferred_handles;
}
+ ZoneList<Handle<HeapObject> >* dependencies(
+ DependentCode::DependencyGroup group) {
+ if (dependencies_[group] == NULL) {
+ dependencies_[group] = new(zone_) ZoneList<Handle<HeapObject> >(2, zone_);
+ }
+ return dependencies_[group];
+ }
+
+ void CommitDependencies(Handle<Code> code);
+
+ void RollbackDependencies();
+
void SaveHandles() {
SaveHandle(&closure_);
SaveHandle(&shared_info_);
@@ -183,8 +263,66 @@ class CompilationInfo {
SaveHandle(&script_);
}
- const char* bailout_reason() const { return bailout_reason_; }
- void set_bailout_reason(const char* reason) { bailout_reason_ = reason; }
+ BailoutReason bailout_reason() const { return bailout_reason_; }
+ void set_bailout_reason(BailoutReason reason) { bailout_reason_ = reason; }
+
+ int prologue_offset() const {
+ ASSERT_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
+ return prologue_offset_;
+ }
+
+ void set_prologue_offset(int prologue_offset) {
+ ASSERT_EQ(Code::kPrologueOffsetNotSet, prologue_offset_);
+ prologue_offset_ = prologue_offset;
+ }
+
+ // Adds offset range [from, to) where fp register does not point
+ // to the current frame base. Used in CPU profiler to detect stack
+ // samples where top frame is not set up.
+ inline void AddNoFrameRange(int from, int to) {
+ if (no_frame_ranges_) no_frame_ranges_->Add(OffsetRange(from, to));
+ }
+
+ List<OffsetRange>* ReleaseNoFrameRanges() {
+ List<OffsetRange>* result = no_frame_ranges_;
+ no_frame_ranges_ = NULL;
+ return result;
+ }
+
+ Handle<Foreign> object_wrapper() {
+ if (object_wrapper_.is_null()) {
+ object_wrapper_ =
+ isolate()->factory()->NewForeign(reinterpret_cast<Address>(this));
+ }
+ return object_wrapper_;
+ }
+
+ void AbortDueToDependencyChange() {
+ ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ abort_due_to_dependency_ = true;
+ }
+
+ bool HasAbortedDueToDependencyChange() {
+ ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ return abort_due_to_dependency_;
+ }
+
+ void set_osr_pc_offset(uint32_t pc_offset) {
+ osr_pc_offset_ = pc_offset;
+ }
+
+ bool HasSameOsrEntry(Handle<JSFunction> function, uint32_t pc_offset) {
+ return osr_pc_offset_ == pc_offset && function.is_identical_to(closure_);
+ }
+
+ protected:
+ CompilationInfo(Handle<Script> script,
+ Zone* zone);
+ CompilationInfo(Handle<SharedFunctionInfo> shared_info,
+ Zone* zone);
+ CompilationInfo(HydrogenCodeStub* stub,
+ Isolate* isolate,
+ Zone* zone);
private:
Isolate* isolate_;
@@ -197,24 +335,14 @@ class CompilationInfo {
enum Mode {
BASE,
OPTIMIZE,
- NONOPT
+ NONOPT,
+ STUB
};
- void Initialize(Mode mode) {
- mode_ = V8::UseCrankshaft() ? mode : NONOPT;
- ASSERT(!script_.is_null());
- if (script_->type()->value() == Script::TYPE_NATIVE) {
- MarkAsNative();
- }
- if (!shared_info_.is_null()) {
- ASSERT(language_mode() == CLASSIC_MODE);
- SetLanguageMode(shared_info_->language_mode());
- }
- set_bailout_reason("unknown");
- }
+ void Initialize(Isolate* isolate, Mode mode, Zone* zone);
void SetMode(Mode mode) {
- ASSERT(V8::UseCrankshaft());
+ ASSERT(isolate()->use_crankshaft());
mode_ = mode;
}
@@ -237,7 +365,18 @@ class CompilationInfo {
// If compiling for debugging produce just full code matching the
// initial mode setting.
class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
-
+ // If the compiled code contains calls that require building a frame
+ class IsCalling: public BitField<bool, 9, 1> {};
+ // If the compiled code contains calls that require building a frame
+ class IsDeferredCalling: public BitField<bool, 10, 1> {};
+ // If the compiled code contains calls that require building a frame
+ class IsNonDeferredCalling: public BitField<bool, 11, 1> {};
+ // If the compiled code saves double caller registers that it clobbers.
+ class SavesCallerDoubles: public BitField<bool, 12, 1> {};
+ // If the set of valid statements is restricted.
+ class ParseRestricitonField: public BitField<ParseRestriction, 13, 1> {};
+ // If the function requires a frame (for unspecified reasons)
+ class RequiresFrame: public BitField<bool, 14, 1> {};
unsigned flags_;
@@ -249,6 +388,8 @@ class CompilationInfo {
Scope* scope_;
// The global scope provided as a convenience.
Scope* global_scope_;
+ // For compiled stubs, the stub object
+ HydrogenCodeStub* code_stub_;
// The compiled code.
Handle<Code> code_;
@@ -268,6 +409,12 @@ class CompilationInfo {
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
BailoutId osr_ast_id_;
+ // The pc_offset corresponding to osr_ast_id_ in unoptimized code.
+ // We can look this up in the back edge table, but cache it for quick access.
+ uint32_t osr_pc_offset_;
+
+ // Flag whether compilation needs to be aborted due to dependency change.
+ bool abort_due_to_dependency_;
// The zone from which the compilation pipeline working on this
// CompilationInfo allocates.
@@ -275,6 +422,8 @@ class CompilationInfo {
DeferredHandles* deferred_handles_;
+ ZoneList<Handle<HeapObject> >* dependencies_[DependentCode::kGroupCount];
+
template<typename T>
void SaveHandle(Handle<T> *object) {
if (!object->is_null()) {
@@ -283,7 +432,17 @@ class CompilationInfo {
}
}
- const char* bailout_reason_;
+ BailoutReason bailout_reason_;
+
+ int prologue_offset_;
+
+ List<OffsetRange>* no_frame_ranges_;
+
+ // A copy of shared_info()->opt_count() to avoid handle deref
+ // during graph optimization.
+ int opt_count_;
+
+ Handle<Foreign> object_wrapper_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -295,20 +454,26 @@ class CompilationInfoWithZone: public CompilationInfo {
public:
explicit CompilationInfoWithZone(Handle<Script> script)
: CompilationInfo(script, &zone_),
- zone_(script->GetIsolate()),
- zone_scope_(&zone_, DELETE_ON_EXIT) {}
+ zone_(script->GetIsolate()) {}
explicit CompilationInfoWithZone(Handle<SharedFunctionInfo> shared_info)
: CompilationInfo(shared_info, &zone_),
- zone_(shared_info->GetIsolate()),
- zone_scope_(&zone_, DELETE_ON_EXIT) {}
+ zone_(shared_info->GetIsolate()) {}
explicit CompilationInfoWithZone(Handle<JSFunction> closure)
: CompilationInfo(closure, &zone_),
- zone_(closure->GetIsolate()),
- zone_scope_(&zone_, DELETE_ON_EXIT) {}
+ zone_(closure->GetIsolate()) {}
+ CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
+ : CompilationInfo(stub, isolate, &zone_),
+ zone_(isolate) {}
+
+ // Virtual destructor because a CompilationInfoWithZone has to exit the
+ // zone scope and get rid of dependent maps even when the destructor is
+ // called when cast as a CompilationInfo.
+ virtual ~CompilationInfoWithZone() {
+ RollbackDependencies();
+ }
private:
Zone zone_;
- ZoneScope zone_scope_;
};
@@ -330,7 +495,7 @@ class CompilationHandleScope BASE_EMBEDDED {
class HGraph;
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
class LChunk;
// A helper class that calls the three compilation phases in
@@ -339,18 +504,15 @@ class LChunk;
// fail, bail-out to the full code generator or succeed. Apart from
// their return value, the status of the phase last run can be checked
// using last_status().
-class OptimizingCompiler: public ZoneObject {
+class RecompileJob: public ZoneObject {
public:
- explicit OptimizingCompiler(CompilationInfo* info)
+ explicit RecompileJob(CompilationInfo* info)
: info_(info),
- oracle_(NULL),
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
- time_taken_to_create_graph_(0),
- time_taken_to_optimize_(0),
- time_taken_to_codegen_(0),
- last_status_(FAILED) { }
+ last_status_(FAILED),
+ awaiting_install_(false) { }
enum Status {
FAILED, BAILED_OUT, SUCCEEDED
@@ -362,6 +524,7 @@ class OptimizingCompiler: public ZoneObject {
Status last_status() const { return last_status_; }
CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info()->isolate(); }
MUST_USE_RESULT Status AbortOptimization() {
info_->AbortOptimization();
@@ -369,16 +532,23 @@ class OptimizingCompiler: public ZoneObject {
return SetLastStatus(BAILED_OUT);
}
+ void WaitForInstall() {
+ ASSERT(info_->is_osr());
+ awaiting_install_ = true;
+ }
+
+ bool IsWaitingForInstall() { return awaiting_install_; }
+
private:
CompilationInfo* info_;
- TypeFeedbackOracle* oracle_;
- HGraphBuilder* graph_builder_;
+ HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
- int64_t time_taken_to_create_graph_;
- int64_t time_taken_to_optimize_;
- int64_t time_taken_to_codegen_;
+ TimeDelta time_taken_to_create_graph_;
+ TimeDelta time_taken_to_optimize_;
+ TimeDelta time_taken_to_codegen_;
Status last_status_;
+ bool awaiting_install_;
MUST_USE_RESULT Status SetLastStatus(Status status) {
last_status_ = status;
@@ -387,18 +557,19 @@ class OptimizingCompiler: public ZoneObject {
void RecordOptimizationStats();
struct Timer {
- Timer(OptimizingCompiler* compiler, int64_t* location)
- : compiler_(compiler),
- start_(OS::Ticks()),
- location_(location) { }
+ Timer(RecompileJob* job, TimeDelta* location)
+ : job_(job), location_(location) {
+ ASSERT(location_ != NULL);
+ timer_.Start();
+ }
~Timer() {
- *location_ += (OS::Ticks() - start_);
+ *location_ += timer_.Elapsed();
}
- OptimizingCompiler* compiler_;
- int64_t start_;
- int64_t* location_;
+ RecompileJob* job_;
+ ElapsedTimer timer_;
+ TimeDelta* location_;
};
};
@@ -416,8 +587,6 @@ class OptimizingCompiler: public ZoneObject {
class Compiler : public AllStatic {
public:
- static const int kMaxInliningLevels = 3;
-
// Call count before primitive functions trigger their own optimization.
static const int kCallsUntilPrimitiveOpt = 200;
@@ -430,6 +599,7 @@ class Compiler : public AllStatic {
Handle<Object> script_name,
int line_offset,
int column_offset,
+ bool is_shared_cross_origin,
Handle<Context> context,
v8::Extension* extension,
ScriptDataImpl* pre_data,
@@ -441,13 +611,15 @@ class Compiler : public AllStatic {
Handle<Context> context,
bool is_global,
LanguageMode language_mode,
+ ParseRestriction restriction,
int scope_position);
// Compile from function info (used for lazy compilation). Returns true on
// success and false if the compilation resulted in a stack overflow.
static bool CompileLazy(CompilationInfo* info);
- static void RecompileParallel(Handle<JSFunction> function);
+ static bool RecompileConcurrent(Handle<JSFunction> function,
+ uint32_t osr_pc_offset = 0);
// Compile a shared function info object (the function is possibly lazily
// compiled).
@@ -460,7 +632,7 @@ class Compiler : public AllStatic {
bool is_toplevel,
Handle<Script> script);
- static void InstallOptimizedCode(OptimizingCompiler* info);
+ static Handle<Code> InstallOptimizedCode(RecompileJob* job);
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);
@@ -472,6 +644,30 @@ class Compiler : public AllStatic {
};
+class CompilationPhase BASE_EMBEDDED {
+ public:
+ CompilationPhase(const char* name, CompilationInfo* info);
+ ~CompilationPhase();
+
+ protected:
+ bool ShouldProduceTraceOutput() const;
+
+ const char* name() const { return name_; }
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info()->isolate(); }
+ Zone* zone() { return &zone_; }
+
+ private:
+ const char* name_;
+ CompilationInfo* info_;
+ Zone zone_;
+ unsigned info_zone_start_allocation_size_;
+ ElapsedTimer timer_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
+};
+
+
} } // namespace v8::internal
#endif // V8_COMPILER_H_
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 93c979540..710d30aa8 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -55,6 +55,15 @@ JSBuiltinsObject* Context::builtins() {
}
+Context* Context::global_context() {
+ Context* current = this;
+ while (!current->IsGlobalContext()) {
+ current = current->previous();
+ }
+ return current;
+}
+
+
Context* Context::native_context() {
// Fast case: the global object for this context has been set. In
// that case, the global object has a direct pointer to the global
@@ -65,7 +74,7 @@ Context* Context::native_context() {
// During bootstrapping, the global object might not be set and we
// have to search the context chain to find the native context.
- ASSERT(Isolate::Current()->bootstrapper()->IsActive());
+ ASSERT(this->GetIsolate()->bootstrapper()->IsActive());
Context* current = this;
while (!current->IsNativeContext()) {
JSFunction* closure = JSFunction::cast(current->closure());
@@ -79,6 +88,7 @@ JSObject* Context::global_proxy() {
return native_context()->global_proxy_object();
}
+
void Context::set_global_proxy(JSObject* object) {
native_context()->set_global_proxy_object(object);
}
@@ -114,7 +124,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
if (context->IsNativeContext() ||
context->IsWithContext() ||
(context->IsFunctionContext() && context->has_extension())) {
- Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
+ Handle<JSReceiver> object(
+ JSReceiver::cast(context->extension()), isolate);
// Context extension objects needs to behave as if they have no
// prototype. So even if we want to follow prototype chains, we need
// to only do a local lookup for context extension objects.
@@ -124,6 +135,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
} else {
*attributes = object->GetPropertyAttribute(*name);
}
+ if (isolate->has_pending_exception()) return Handle<Object>();
+
if (*attributes != ABSENT) {
if (FLAG_trace_contexts) {
PrintF("=> found property in context object %p\n",
@@ -183,6 +196,10 @@ Handle<Object> Context::Lookup(Handle<String> name,
? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
IMMUTABLE_IS_INITIALIZED_HARMONY;
break;
+ case MODULE:
+ *attributes = READ_ONLY;
+ *binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY;
+ break;
case DYNAMIC:
case DYNAMIC_GLOBAL:
case DYNAMIC_LOCAL:
@@ -242,7 +259,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsNativeContext());
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
while (!element->IsUndefined()) {
@@ -251,8 +268,6 @@ void Context::AddOptimizedFunction(JSFunction* function) {
}
}
- CHECK(function->next_function_link()->IsUndefined());
-
// Check that the context belongs to the weak native contexts list.
bool found = false;
Object* context = GetHeap()->native_contexts_list();
@@ -265,6 +280,16 @@ void Context::AddOptimizedFunction(JSFunction* function) {
}
CHECK(found);
#endif
+
+ // If the function link field is already used then the function was
+ // enqueued as a code flushing candidate and we remove it now.
+ if (!function->next_function_link()->IsUndefined()) {
+ CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
+ flusher->EvictCandidate(function);
+ }
+
+ ASSERT(function->next_function_link()->IsUndefined());
+
function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST));
set(OPTIMIZED_FUNCTIONS_LIST, function);
}
@@ -294,26 +319,57 @@ void Context::RemoveOptimizedFunction(JSFunction* function) {
}
+void Context::SetOptimizedFunctionsListHead(Object* head) {
+ ASSERT(IsNativeContext());
+ set(OPTIMIZED_FUNCTIONS_LIST, head);
+}
+
+
Object* Context::OptimizedFunctionsListHead() {
ASSERT(IsNativeContext());
return get(OPTIMIZED_FUNCTIONS_LIST);
}
-void Context::ClearOptimizedFunctions() {
- set(OPTIMIZED_FUNCTIONS_LIST, GetHeap()->undefined_value());
+void Context::AddOptimizedCode(Code* code) {
+ ASSERT(IsNativeContext());
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ ASSERT(code->next_code_link()->IsUndefined());
+ code->set_next_code_link(get(OPTIMIZED_CODE_LIST));
+ set(OPTIMIZED_CODE_LIST, code);
+}
+
+
+void Context::SetOptimizedCodeListHead(Object* head) {
+ ASSERT(IsNativeContext());
+ set(OPTIMIZED_CODE_LIST, head);
+}
+
+
+Object* Context::OptimizedCodeListHead() {
+ ASSERT(IsNativeContext());
+ return get(OPTIMIZED_CODE_LIST);
+}
+
+
+void Context::SetDeoptimizedCodeListHead(Object* head) {
+ ASSERT(IsNativeContext());
+ set(DEOPTIMIZED_CODE_LIST, head);
+}
+
+
+Object* Context::DeoptimizedCodeListHead() {
+ ASSERT(IsNativeContext());
+ return get(DEOPTIMIZED_CODE_LIST);
}
Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
- Handle<Object> result(error_message_for_code_gen_from_strings());
- if (result->IsUndefined()) {
- const char* error =
- "Code generation from strings disallowed for this context";
- Isolate* isolate = Isolate::Current();
- result = isolate->factory()->NewStringFromAscii(i::CStrVector(error));
- }
- return result;
+ Handle<Object> result(error_message_for_code_gen_from_strings(),
+ GetIsolate());
+ if (!result->IsUndefined()) return result;
+ return GetIsolate()->factory()->NewStringFromAscii(i::CStrVector(
+ "Code generation from strings disallowed for this context"));
}
@@ -322,7 +378,7 @@ bool Context::IsBootstrappingOrValidParentContext(
Object* object, Context* child) {
// During bootstrapping we allow all objects to pass as
// contexts. This is necessary to fix circular dependencies.
- if (Isolate::Current()->bootstrapper()->IsActive()) return true;
+ if (child->GetIsolate()->bootstrapper()->IsActive()) return true;
if (!object->IsContext()) return false;
Context* context = Context::cast(object);
return context->IsNativeContext() || context->IsGlobalContext() ||
@@ -330,10 +386,9 @@ bool Context::IsBootstrappingOrValidParentContext(
}
-bool Context::IsBootstrappingOrGlobalObject(Object* object) {
+bool Context::IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object) {
// During bootstrapping we allow all objects to pass as global
// objects. This is necessary to fix circular dependencies.
- Isolate* isolate = Isolate::Current();
return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
isolate->bootstrapper()->IsActive() ||
object->IsGlobalObject();
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 378185f94..189c215e6 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -103,6 +103,7 @@ enum BindingFlags {
V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
+ V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
@@ -111,6 +112,7 @@ enum BindingFlags {
V(JSON_OBJECT_INDEX, JSObject, json_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
+ V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \
V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun) \
V(TO_STRING_FUN_INDEX, JSFunction, to_string_fun) \
@@ -122,14 +124,22 @@ enum BindingFlags {
V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
+ V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
+ V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
+ V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
+ V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
+ V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
+ V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \
+ V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
+ V(FLOAT_ARRAY_FUN_INDEX, JSFunction, float_array_fun) \
+ V(DOUBLE_ARRAY_FUN_INDEX, JSFunction, double_array_fun) \
+ V(UINT8C_ARRAY_FUN_INDEX, JSFunction, uint8c_array_fun) \
+ V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \
V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
V(STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
strict_mode_function_without_prototype_map) \
- V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
- V(STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX, Map, \
- strict_mode_function_instance_map) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
@@ -152,7 +162,7 @@ enum BindingFlags {
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) \
- V(CONTEXT_DATA_INDEX, Object, data) \
+ V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
@@ -161,7 +171,20 @@ enum BindingFlags {
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
- V(PROXY_ENUMERATE, JSFunction, proxy_enumerate) \
+ V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
+ V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
+ V(OBSERVERS_ENQUEUE_SPLICE_INDEX, JSFunction, observers_enqueue_splice) \
+ V(OBSERVERS_BEGIN_SPLICE_INDEX, JSFunction, \
+ observers_begin_perform_splice) \
+ V(OBSERVERS_END_SPLICE_INDEX, JSFunction, \
+ observers_end_perform_splice) \
+ V(OBSERVERS_DELIVER_CHANGES_INDEX, JSFunction, observers_deliver_changes) \
+ V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \
+ V(STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX, Map, \
+ strict_mode_generator_function_map) \
+ V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
+ generator_object_prototype_map) \
+ V(GENERATOR_RESULT_MAP_INDEX, Map, generator_result_map) \
V(RANDOM_SEED_INDEX, ByteArray, random_seed)
// JSFunctions are pairs (context, function code), sometimes also called
@@ -241,13 +264,13 @@ class Context: public FixedArray {
STRICT_MODE_FUNCTION_MAP_INDEX,
FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
- FUNCTION_INSTANCE_MAP_INDEX,
- STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX,
INITIAL_OBJECT_PROTOTYPE_INDEX,
+ INITIAL_ARRAY_PROTOTYPE_INDEX,
BOOLEAN_FUNCTION_INDEX,
NUMBER_FUNCTION_INDEX,
STRING_FUNCTION_INDEX,
STRING_FUNCTION_PROTOTYPE_MAP_INDEX,
+ SYMBOL_FUNCTION_INDEX,
OBJECT_FUNCTION_INDEX,
INTERNAL_ARRAY_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX,
@@ -267,6 +290,17 @@ class Context: public FixedArray {
GLOBAL_EVAL_FUN_INDEX,
INSTANTIATE_FUN_INDEX,
CONFIGURE_INSTANCE_FUN_INDEX,
+ ARRAY_BUFFER_FUN_INDEX,
+ UINT8_ARRAY_FUN_INDEX,
+ INT8_ARRAY_FUN_INDEX,
+ UINT16_ARRAY_FUN_INDEX,
+ INT16_ARRAY_FUN_INDEX,
+ UINT32_ARRAY_FUN_INDEX,
+ INT32_ARRAY_FUN_INDEX,
+ FLOAT_ARRAY_FUN_INDEX,
+ DOUBLE_ARRAY_FUN_INDEX,
+ UINT8C_ARRAY_FUN_INDEX,
+ DATA_VIEW_FUN_INDEX,
MESSAGE_LISTENERS_INDEX,
MAKE_MESSAGE_FUN_INDEX,
GET_STACK_TRACE_LINE_INDEX,
@@ -281,21 +315,32 @@ class Context: public FixedArray {
OPAQUE_REFERENCE_FUNCTION_INDEX,
CONTEXT_EXTENSION_FUNCTION_INDEX,
OUT_OF_MEMORY_INDEX,
- CONTEXT_DATA_INDEX,
+ EMBEDDER_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
DERIVED_SET_TRAP_INDEX,
- PROXY_ENUMERATE,
+ PROXY_ENUMERATE_INDEX,
+ OBSERVERS_NOTIFY_CHANGE_INDEX,
+ OBSERVERS_ENQUEUE_SPLICE_INDEX,
+ OBSERVERS_BEGIN_SPLICE_INDEX,
+ OBSERVERS_END_SPLICE_INDEX,
+ OBSERVERS_DELIVER_CHANGES_INDEX,
+ GENERATOR_FUNCTION_MAP_INDEX,
+ STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX,
+ GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX,
+ GENERATOR_RESULT_MAP_INDEX,
RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
OPTIMIZED_FUNCTIONS_LIST, // Weak.
- MAP_CACHE_INDEX, // Weak.
- NEXT_CONTEXT_LINK, // Weak.
+ OPTIMIZED_CODE_LIST, // Weak.
+ DEOPTIMIZED_CODE_LIST, // Weak.
+ MAP_CACHE_INDEX, // Weak.
+ NEXT_CONTEXT_LINK, // Weak.
// Total number of slots.
NATIVE_CONTEXT_SLOTS,
@@ -327,7 +372,7 @@ class Context: public FixedArray {
GlobalObject* global_object() {
Object* result = get(GLOBAL_OBJECT_INDEX);
- ASSERT(IsBootstrappingOrGlobalObject(result));
+ ASSERT(IsBootstrappingOrGlobalObject(this->GetIsolate(), result));
return reinterpret_cast<GlobalObject*>(result);
}
void set_global_object(GlobalObject* object) {
@@ -341,6 +386,9 @@ class Context: public FixedArray {
// The builtins object.
JSBuiltinsObject* builtins();
+ // Get the innermost global context by traversing the context chain.
+ Context* global_context();
+
// Compute the native context by traversing the context chain.
Context* native_context();
@@ -382,11 +430,19 @@ class Context: public FixedArray {
// Mark the native context with out of memory.
inline void mark_out_of_memory();
- // A native context hold a list of all functions which have been optimized.
+ // A native context holds a list of all functions with optimized code.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
+ void SetOptimizedFunctionsListHead(Object* head);
Object* OptimizedFunctionsListHead();
- void ClearOptimizedFunctions();
+
+ // The native context also stores a list of all optimized code and a
+ // list of all deoptimized code, which are needed by the deoptimizer.
+ void AddOptimizedCode(Code* code);
+ void SetOptimizedCodeListHead(Object* head);
+ Object* OptimizedCodeListHead();
+ void SetDeoptimizedCodeListHead(Object* head);
+ Object* DeoptimizedCodeListHead();
Handle<Object> ErrorMessageForCodeGenerationFromStrings();
@@ -395,6 +451,10 @@ class Context: public FixedArray {
ASSERT(IsNativeContext()); \
set(index, value); \
} \
+ bool is_##name(type* value) { \
+ ASSERT(IsNativeContext()); \
+ return type::cast(get(index)) == value; \
+ } \
type* name() { \
ASSERT(IsNativeContext()); \
return type::cast(get(index)); \
@@ -430,6 +490,16 @@ class Context: public FixedArray {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
}
+ static int FunctionMapIndex(LanguageMode language_mode, bool is_generator) {
+ return is_generator
+ ? (language_mode == CLASSIC_MODE
+ ? GENERATOR_FUNCTION_MAP_INDEX
+ : STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX)
+ : (language_mode == CLASSIC_MODE
+ ? FUNCTION_MAP_INDEX
+ : STRICT_MODE_FUNCTION_MAP_INDEX);
+ }
+
static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
// GC support.
@@ -448,8 +518,11 @@ class Context: public FixedArray {
#ifdef DEBUG
// Bootstrapping-aware type checks.
static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
- static bool IsBootstrappingOrGlobalObject(Object* object);
+ static bool IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object);
#endif
+
+ STATIC_CHECK(kHeaderSize == Internals::kContextHeaderSize);
+ STATIC_CHECK(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index e272fe6c0..7ba19ba0f 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -29,9 +29,9 @@
#define V8_CONVERSIONS_INL_H_
#include <limits.h> // Required for INT_MAX etc.
-#include <math.h>
#include <float.h> // Required for DBL_MAX and on Win32 for finite()
#include <stdarg.h>
+#include <cmath>
#include "globals.h" // Required for V8_INFINITY
// ----------------------------------------------------------------------------
@@ -77,7 +77,7 @@ inline unsigned int FastD2UI(double x) {
uint32_t result;
Address mantissa_ptr = reinterpret_cast<Address>(&x);
// Copy least significant 32 bits of mantissa.
- memcpy(&result, mantissa_ptr, sizeof(result));
+ OS::MemCopy(&result, mantissa_ptr, sizeof(result));
return negative ? ~result + 1 : result;
}
// Large number (outside uint32 range), Infinity or NaN.
@@ -86,8 +86,8 @@ inline unsigned int FastD2UI(double x) {
inline double DoubleToInteger(double x) {
- if (isnan(x)) return 0;
- if (!isfinite(x) || x == 0) return x;
+ if (std::isnan(x)) return 0;
+ if (!std::isfinite(x) || x == 0) return x;
return (x >= 0) ? floor(x) : ceil(x);
}
@@ -212,7 +212,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache,
}
// Rounding up may cause overflow.
- if ((number & ((int64_t)1 << 53)) != 0) {
+ if ((number & (static_cast<int64_t>(1) << 53)) != 0) {
exponent++;
number >>= 1;
}
@@ -355,7 +355,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
return JunkStringValue();
}
- ASSERT(buffer_pos < kBufferSize);
+ SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
Vector<const char> buffer_vector(buffer, buffer_pos);
return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
@@ -481,9 +481,9 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
sign = NEGATIVE;
}
- static const char kInfinitySymbol[] = "Infinity";
- if (*current == kInfinitySymbol[0]) {
- if (!SubStringEquals(&current, end, kInfinitySymbol)) {
+ static const char kInfinityString[] = "Infinity";
+ if (*current == kInfinityString[0]) {
+ if (!SubStringEquals(&current, end, kInfinityString)) {
return JunkStringValue();
}
@@ -515,6 +515,32 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
end,
false,
allow_trailing_junk);
+
+ // It could be an explicit octal value.
+ } else if ((flags & ALLOW_OCTAL) && (*current == 'o' || *current == 'O')) {
+ ++current;
+ if (current == end || !isDigit(*current, 8) || sign != NONE) {
+ return JunkStringValue(); // "0o".
+ }
+
+ return InternalStringToIntDouble<3>(unicode_cache,
+ current,
+ end,
+ false,
+ allow_trailing_junk);
+
+ // It could be a binary value.
+ } else if ((flags & ALLOW_BINARY) && (*current == 'b' || *current == 'B')) {
+ ++current;
+ if (current == end || !isBinaryDigit(*current) || sign != NONE) {
+ return JunkStringValue(); // "0b".
+ }
+
+ return InternalStringToIntDouble<1>(unicode_cache,
+ current,
+ end,
+ false,
+ allow_trailing_junk);
}
// Ignore leading zeros in the integer part.
@@ -524,7 +550,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
}
}
- bool octal = leading_zero && (flags & ALLOW_OCTALS) != 0;
+ bool octal = leading_zero && (flags & ALLOW_IMPLICIT_OCTAL) != 0;
// Copy significant digits of the integer part (if any) to the buffer.
while (*current >= '0' && *current <= '9') {
@@ -666,7 +692,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
exponent--;
}
- ASSERT(buffer_pos < kBufferSize);
+ SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 5bfddd04c..5f1219eea 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -26,22 +26,31 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h>
-#include <math.h>
#include <limits.h>
+#include <cmath>
#include "conversions-inl.h"
#include "dtoa.h"
+#include "list-inl.h"
#include "strtod.h"
#include "utils.h"
+#ifndef _STLP_VENDOR_CSTD
+// STLPort doesn't import fpclassify into the std namespace.
+using std::fpclassify;
+#endif
+
namespace v8 {
namespace internal {
double StringToDouble(UnicodeCache* unicode_cache,
const char* str, int flags, double empty_string_val) {
- const char* end = str + StrLength(str);
- return InternalStringToDouble(unicode_cache, str, end, flags,
+ // We cast to const uint8_t* here to avoid instantiating the
+ // InternalStringToDouble() template for const char* as well.
+ const uint8_t* start = reinterpret_cast<const uint8_t*>(str);
+ const uint8_t* end = start + StrLength(str);
+ return InternalStringToDouble(unicode_cache, start, end, flags,
empty_string_val);
}
@@ -50,11 +59,15 @@ double StringToDouble(UnicodeCache* unicode_cache,
Vector<const char> str,
int flags,
double empty_string_val) {
- const char* end = str.start() + str.length();
- return InternalStringToDouble(unicode_cache, str.start(), end, flags,
+ // We cast to const uint8_t* here to avoid instantiating the
+ // InternalStringToDouble() template for const char* as well.
+ const uint8_t* start = reinterpret_cast<const uint8_t*>(str.start());
+ const uint8_t* end = start + str.length();
+ return InternalStringToDouble(unicode_cache, start, end, flags,
empty_string_val);
}
+
double StringToDouble(UnicodeCache* unicode_cache,
Vector<const uc16> str,
int flags,
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 1fbb5f118..7aa2d3fb3 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -52,6 +52,11 @@ inline bool isDigit(int x, int radix) {
}
+inline bool isBinaryDigit(int x) {
+ return x == '0' || x == '1';
+}
+
+
// The fast double-to-(unsigned-)int conversion routine does not guarantee
// rounding towards zero.
// For NaN and values outside the int range, return INT_MIN or INT_MAX.
@@ -108,8 +113,10 @@ inline uint32_t DoubleToUint32(double x) {
enum ConversionFlags {
NO_FLAGS = 0,
ALLOW_HEX = 1,
- ALLOW_OCTALS = 2,
- ALLOW_TRAILING_JUNK = 4
+ ALLOW_OCTAL = 2,
+ ALLOW_IMPLICIT_OCTAL = 4,
+ ALLOW_BINARY = 8,
+ ALLOW_TRAILING_JUNK = 16
};
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index 811c0aa2e..e0a6a60a0 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -41,56 +41,42 @@ StatsTable::StatsTable()
int* StatsCounter::FindLocationInStatsTable() const {
- return Isolate::Current()->stats_table()->FindLocation(name_);
+ return isolate_->stats_table()->FindLocation(name_);
}
-// Start the timer.
-void StatsCounterTimer::Start() {
- if (!counter_.Enabled())
- return;
- stop_time_ = 0;
- start_time_ = OS::Ticks();
-}
-
-// Stop the timer and record the results.
-void StatsCounterTimer::Stop() {
- if (!counter_.Enabled())
- return;
- stop_time_ = OS::Ticks();
-
- // Compute the delta between start and stop, in milliseconds.
- int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
- counter_.Increment(milliseconds);
-}
-
void Histogram::AddSample(int sample) {
if (Enabled()) {
- Isolate::Current()->stats_table()->AddHistogramSample(histogram_, sample);
+ isolate()->stats_table()->AddHistogramSample(histogram_, sample);
}
}
void* Histogram::CreateHistogram() const {
- return Isolate::Current()->stats_table()->
+ return isolate()->stats_table()->
CreateHistogram(name_, min_, max_, num_buckets_);
}
+
// Start the timer.
void HistogramTimer::Start() {
- if (histogram_.Enabled()) {
- stop_time_ = 0;
- start_time_ = OS::Ticks();
+ if (Enabled()) {
+ timer_.Start();
+ }
+ if (FLAG_log_internal_timer_events) {
+ LOG(isolate(), TimerEvent(Logger::START, name()));
}
}
+
// Stop the timer and record the results.
void HistogramTimer::Stop() {
- if (histogram_.Enabled()) {
- stop_time_ = OS::Ticks();
-
+ if (Enabled()) {
// Compute the delta between start and stop, in milliseconds.
- int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
- histogram_.AddSample(milliseconds);
+ AddSample(static_cast<int>(timer_.Elapsed().InMilliseconds()));
+ timer_.Stop();
+ }
+ if (FLAG_log_internal_timer_events) {
+ LOG(isolate(), TimerEvent(Logger::END, name()));
}
}
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 577280f44..821c25f8c 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -113,14 +113,11 @@ class StatsTable {
// The row has a 32bit value for each process/thread in the table and also
// a name (stored in the table metadata). Since the storage location can be
// thread-specific, this class cannot be shared across threads.
-//
-// This class is designed to be POD initialized. It will be registered with
-// the counter system on first use. For example:
-// StatsCounter c = { "c:myctr", NULL, false };
-struct StatsCounter {
- const char* name_;
- int* ptr_;
- bool lookup_done_;
+class StatsCounter {
+ public:
+ StatsCounter() { }
+ explicit StatsCounter(Isolate* isolate, const char* name)
+ : isolate_(isolate), name_(name), ptr_(NULL), lookup_done_(false) { }
// Sets the counter to a specific value.
void Set(int value) {
@@ -177,39 +174,30 @@ struct StatsCounter {
private:
int* FindLocationInStatsTable() const;
-};
-
-// StatsCounterTimer t = { { L"t:foo", NULL, false }, 0, 0 };
-struct StatsCounterTimer {
- StatsCounter counter_;
-
- int64_t start_time_;
- int64_t stop_time_;
-
- // Start the timer.
- void Start();
- // Stop the timer and record the results.
- void Stop();
-
- // Returns true if the timer is running.
- bool Running() {
- return counter_.Enabled() && start_time_ != 0 && stop_time_ == 0;
- }
+ Isolate* isolate_;
+ const char* name_;
+ int* ptr_;
+ bool lookup_done_;
};
// A Histogram represents a dynamically created histogram in the StatsTable.
-//
-// This class is designed to be POD initialized. It will be registered with
-// the histogram system on first use. For example:
-// Histogram h = { "myhist", 0, 10000, 50, NULL, false };
-struct Histogram {
- const char* name_;
- int min_;
- int max_;
- int num_buckets_;
- void* histogram_;
- bool lookup_done_;
+// It will be registered with the histogram system on first use.
+class Histogram {
+ public:
+ Histogram() { }
+ Histogram(const char* name,
+ int min,
+ int max,
+ int num_buckets,
+ Isolate* isolate)
+ : name_(name),
+ min_(min),
+ max_(max),
+ num_buckets_(num_buckets),
+ histogram_(NULL),
+ lookup_done_(false),
+ isolate_(isolate) { }
// Add a single sample to this histogram.
void AddSample(int sample);
@@ -234,17 +222,31 @@ struct Histogram {
return histogram_;
}
+ const char* name() { return name_; }
+ Isolate* isolate() const { return isolate_; }
+
private:
void* CreateHistogram() const;
-};
-// A HistogramTimer allows distributions of results to be created
-// HistogramTimer t = { {L"foo", 0, 10000, 50, NULL, false}, 0, 0 };
-struct HistogramTimer {
- Histogram histogram_;
+ const char* name_;
+ int min_;
+ int max_;
+ int num_buckets_;
+ void* histogram_;
+ bool lookup_done_;
+ Isolate* isolate_;
+};
- int64_t start_time_;
- int64_t stop_time_;
+// A HistogramTimer allows distributions of results to be created.
+class HistogramTimer : public Histogram {
+ public:
+ HistogramTimer() { }
+ HistogramTimer(const char* name,
+ int min,
+ int max,
+ int num_buckets,
+ Isolate* isolate)
+ : Histogram(name, min, max, num_buckets, isolate) {}
// Start the timer.
void Start();
@@ -254,26 +256,54 @@ struct HistogramTimer {
// Returns true if the timer is running.
bool Running() {
- return histogram_.Enabled() && (start_time_ != 0) && (stop_time_ == 0);
+ return Enabled() && timer_.IsStarted();
}
- void Reset() {
- histogram_.Reset();
- }
+ // TODO(bmeurer): Remove this when HistogramTimerScope is fixed.
+#ifdef DEBUG
+ ElapsedTimer* timer() { return &timer_; }
+#endif
+
+ private:
+ ElapsedTimer timer_;
};
// Helper class for scoping a HistogramTimer.
+// TODO(bmeurer): The ifdeffery is an ugly hack around the fact that the
+// Parser is currently reentrant (when it throws an error, we call back
+// into JavaScript and all bets are off), but ElapsedTimer is not
+// reentry-safe. Fix this properly and remove |allow_nesting|.
class HistogramTimerScope BASE_EMBEDDED {
public:
- explicit HistogramTimerScope(HistogramTimer* timer) :
- timer_(timer) {
+ explicit HistogramTimerScope(HistogramTimer* timer,
+ bool allow_nesting = false)
+#ifdef DEBUG
+ : timer_(timer),
+ skipped_timer_start_(false) {
+ if (timer_->timer()->IsStarted() && allow_nesting) {
+ skipped_timer_start_ = true;
+ } else {
+ timer_->Start();
+ }
+#else
+ : timer_(timer) {
timer_->Start();
+#endif
}
~HistogramTimerScope() {
+#ifdef DEBUG
+ if (!skipped_timer_start_) {
+ timer_->Stop();
+ }
+#else
timer_->Stop();
+#endif
}
private:
HistogramTimer* timer_;
+#ifdef DEBUG
+ bool skipped_timer_start_;
+#endif
};
diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h
index 4982197ca..7bfbf5c57 100644
--- a/deps/v8/src/cpu-profiler-inl.h
+++ b/deps/v8/src/cpu-profiler-inl.h
@@ -56,22 +56,39 @@ void SharedFunctionInfoMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
}
-TickSample* ProfilerEventsProcessor::TickSampleEvent() {
- generator_->Tick();
+void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
+ CodeEntry* entry = code_map->FindEntry(start);
+ if (!entry) {
+ // Code objects for builtins should already have been added to the map but
+ // some of them have been filtered out by CpuProfiler.
+ return;
+ }
+ entry->SetBuiltinId(builtin_id);
+}
+
+
+TickSample* CpuProfiler::StartTickSample() {
+ if (is_profiling_) return processor_->StartTickSample();
+ return NULL;
+}
+
+
+void CpuProfiler::FinishTickSample() {
+ processor_->FinishTickSample();
+}
+
+
+TickSample* ProfilerEventsProcessor::StartTickSample() {
+ void* address = ticks_buffer_.StartEnqueue();
+ if (address == NULL) return NULL;
TickSampleEventRecord* evt =
- new(ticks_buffer_.Enqueue()) TickSampleEventRecord(enqueue_order_);
+ new(address) TickSampleEventRecord(last_code_event_id_);
return &evt->sample;
}
-bool ProfilerEventsProcessor::FilterOutCodeCreateEvent(
- Logger::LogEventsAndTags tag) {
- return FLAG_prof_browser_mode
- && (tag != Logger::CALLBACK_TAG
- && tag != Logger::FUNCTION_TAG
- && tag != Logger::LAZY_COMPILE_TAG
- && tag != Logger::REG_EXP_TAG
- && tag != Logger::SCRIPT_TAG);
+void ProfilerEventsProcessor::FinishTickSample() {
+ ticks_buffer_.FinishEnqueue();
}
} } // namespace v8::internal
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index 3cbac7785..b1af621cc 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -29,6 +29,7 @@
#include "cpu-profiler-inl.h"
+#include "compiler.h"
#include "frames-inl.h"
#include "hashmap.h"
#include "log-inl.h"
@@ -39,155 +40,53 @@
namespace v8 {
namespace internal {
-static const int kEventsBufferSize = 256 * KB;
-static const int kTickSamplesBufferChunkSize = 64 * KB;
-static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB;
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
+ProfilerEventsProcessor::ProfilerEventsProcessor(
+ ProfileGenerator* generator,
+ Sampler* sampler,
+ TimeDelta period)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
+ sampler_(sampler),
running_(true),
- ticks_buffer_(sizeof(TickSampleEventRecord),
- kTickSamplesBufferChunkSize,
- kTickSamplesBufferChunksCount),
- enqueue_order_(0) {
+ period_(period),
+ last_code_event_id_(0), last_processed_code_event_id_(0) {
}
-void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
- const char* prefix,
- String* name,
- Address start) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, prefix, name);
- rec->size = 1;
- rec->shared = NULL;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
- String* name,
- String* resource_name,
- int line_number,
- Address start,
- unsigned size,
- Address shared) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
- rec->size = size;
- rec->shared = shared;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
- const char* name,
- Address start,
- unsigned size) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, name);
- rec->size = size;
- rec->shared = NULL;
- events_buffer_.Enqueue(evt_rec);
+void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
+ event.generic.order = ++last_code_event_id_;
+ events_buffer_.Enqueue(event);
}
-void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
- int args_count,
- Address start,
- unsigned size) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, args_count);
- rec->size = size;
- rec->shared = NULL;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::CodeMoveEvent(Address from, Address to) {
- CodeEventsContainer evt_rec;
- CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
- rec->type = CodeEventRecord::CODE_MOVE;
- rec->order = ++enqueue_order_;
- rec->from = from;
- rec->to = to;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::SharedFunctionInfoMoveEvent(Address from,
- Address to) {
- CodeEventsContainer evt_rec;
- SharedFunctionInfoMoveEventRecord* rec =
- &evt_rec.SharedFunctionInfoMoveEventRecord_;
- rec->type = CodeEventRecord::SHARED_FUNC_MOVE;
- rec->order = ++enqueue_order_;
- rec->from = from;
- rec->to = to;
- events_buffer_.Enqueue(evt_rec);
+void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
+ TickSampleEventRecord record(last_code_event_id_);
+ RegisterState regs;
+ StackFrameIterator it(isolate);
+ if (!it.done()) {
+ StackFrame* frame = it.frame();
+ regs.sp = frame->sp();
+ regs.fp = frame->fp();
+ regs.pc = frame->pc();
+ }
+ record.sample.Init(isolate, regs);
+ ticks_from_vm_buffer_.Enqueue(record);
}
-void ProfilerEventsProcessor::RegExpCodeCreateEvent(
- Logger::LogEventsAndTags tag,
- const char* prefix,
- String* name,
- Address start,
- unsigned size) {
- if (FilterOutCodeCreateEvent(tag)) return;
- CodeEventsContainer evt_rec;
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->type = CodeEventRecord::CODE_CREATION;
- rec->order = ++enqueue_order_;
- rec->start = start;
- rec->entry = generator_->NewCodeEntry(tag, prefix, name);
- rec->size = size;
- events_buffer_.Enqueue(evt_rec);
-}
-
-
-void ProfilerEventsProcessor::AddCurrentStack() {
- TickSampleEventRecord record(enqueue_order_);
- TickSample* sample = &record.sample;
- Isolate* isolate = Isolate::Current();
- sample->state = isolate->current_vm_state();
- sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
- for (StackTraceFrameIterator it(isolate);
- !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
- it.Advance()) {
- sample->stack[sample->frames_count++] = it.frame()->pc();
- }
- ticks_from_vm_buffer_.Enqueue(record);
+void ProfilerEventsProcessor::StopSynchronously() {
+ if (!running_) return;
+ running_ = false;
+ Join();
}
-bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
- if (!events_buffer_.IsEmpty()) {
- CodeEventsContainer record;
- events_buffer_.Dequeue(&record);
+bool ProfilerEventsProcessor::ProcessCodeEvent() {
+ CodeEventsContainer record;
+ if (events_buffer_.Dequeue(&record)) {
switch (record.generic.type) {
#define PROFILER_TYPE_CASE(type, clss) \
case CodeEventRecord::type: \
@@ -199,219 +98,218 @@ bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
#undef PROFILER_TYPE_CASE
default: return true; // Skip record.
}
- *dequeue_order = record.generic.order;
+ last_processed_code_event_id_ = record.generic.order;
return true;
}
return false;
}
+ProfilerEventsProcessor::SampleProcessingResult
+ ProfilerEventsProcessor::ProcessOneSample() {
+ if (!ticks_from_vm_buffer_.IsEmpty()
+ && ticks_from_vm_buffer_.Peek()->order ==
+ last_processed_code_event_id_) {
+ TickSampleEventRecord record;
+ ticks_from_vm_buffer_.Dequeue(&record);
+ generator_->RecordTickSample(record.sample);
+ return OneSampleProcessed;
+ }
-bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
- while (true) {
- if (!ticks_from_vm_buffer_.IsEmpty()
- && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
- TickSampleEventRecord record;
- ticks_from_vm_buffer_.Dequeue(&record);
- generator_->RecordTickSample(record.sample);
- }
-
- const TickSampleEventRecord* rec =
- TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
- if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
- // Make a local copy of tick sample record to ensure that it won't
- // be modified as we are processing it. This is possible as the
- // sampler writes w/o any sync to the queue, so if the processor
- // will get far behind, a record may be modified right under its
- // feet.
- TickSampleEventRecord record = *rec;
- if (record.order == dequeue_order) {
- // A paranoid check to make sure that we don't get a memory overrun
- // in case of frames_count having a wild value.
- if (record.sample.frames_count < 0
- || record.sample.frames_count > TickSample::kMaxFramesCount)
- record.sample.frames_count = 0;
- generator_->RecordTickSample(record.sample);
- ticks_buffer_.FinishDequeue();
- } else {
- return true;
- }
+ const TickSampleEventRecord* record = ticks_buffer_.Peek();
+ if (record == NULL) {
+ if (ticks_from_vm_buffer_.IsEmpty()) return NoSamplesInQueue;
+ return FoundSampleForNextCodeEvent;
}
+ if (record->order != last_processed_code_event_id_) {
+ return FoundSampleForNextCodeEvent;
+ }
+ generator_->RecordTickSample(record->sample);
+ ticks_buffer_.Remove();
+ return OneSampleProcessed;
}
void ProfilerEventsProcessor::Run() {
- unsigned dequeue_order = 0;
-
while (running_) {
- // Process ticks until we have any.
- if (ProcessTicks(dequeue_order)) {
- // All ticks of the current dequeue_order are processed,
- // proceed to the next code event.
- ProcessCodeEvent(&dequeue_order);
- }
- YieldCPU();
+ ElapsedTimer timer;
+ timer.Start();
+ // Keep processing existing events until we need to do next sample.
+ do {
+ if (FoundSampleForNextCodeEvent == ProcessOneSample()) {
+ // All ticks of the current last_processed_code_event_id_ are
+ // processed, proceed to the next code event.
+ ProcessCodeEvent();
+ }
+ } while (!timer.HasExpired(period_));
+
+ // Schedule next sample. sampler_ is NULL in tests.
+ if (sampler_) sampler_->DoSample();
}
// Process remaining tick events.
- ticks_buffer_.FlushResidualRecords();
- // Perform processing until we have tick events, skip remaining code events.
- while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
-}
-
-
-void CpuProfiler::StartProfiling(const char* title) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
-}
-
-
-void CpuProfiler::StartProfiling(String* title) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
-}
-
-
-CpuProfile* CpuProfiler::StopProfiling(const char* title) {
- Isolate* isolate = Isolate::Current();
- return is_profiling(isolate) ?
- isolate->cpu_profiler()->StopCollectingProfile(title) : NULL;
-}
-
-
-CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
- Isolate* isolate = Isolate::Current();
- return is_profiling(isolate) ?
- isolate->cpu_profiler()->StopCollectingProfile(
- security_token, title) : NULL;
+ do {
+ SampleProcessingResult result;
+ do {
+ result = ProcessOneSample();
+ } while (result == OneSampleProcessed);
+ } while (ProcessCodeEvent());
}
int CpuProfiler::GetProfilesCount() {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
// The count of profiles doesn't depend on a security token.
- return Isolate::Current()->cpu_profiler()->profiles_->Profiles(
- TokenEnumerator::kNoSecurityToken)->length();
-}
-
-
-CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
- const int token = profiler->token_enumerator_->GetTokenId(security_token);
- return profiler->profiles_->Profiles(token)->at(index);
+ return profiles_->profiles()->length();
}
-CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
- const int token = profiler->token_enumerator_->GetTokenId(security_token);
- return profiler->profiles_->GetProfile(token, uid);
-}
-
-
-TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) {
- if (CpuProfiler::is_profiling(isolate)) {
- return isolate->cpu_profiler()->processor_->TickSampleEvent();
- } else {
- return NULL;
- }
+CpuProfile* CpuProfiler::GetProfile(int index) {
+ return profiles_->profiles()->at(index);
}
void CpuProfiler::DeleteAllProfiles() {
- Isolate* isolate = Isolate::Current();
- ASSERT(isolate->cpu_profiler() != NULL);
- if (is_profiling(isolate)) {
- isolate->cpu_profiler()->StopProcessor();
- }
- isolate->cpu_profiler()->ResetProfiles();
+ if (is_profiling_) StopProcessor();
+ ResetProfiles();
}
void CpuProfiler::DeleteProfile(CpuProfile* profile) {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- Isolate::Current()->cpu_profiler()->profiles_->RemoveProfile(profile);
+ profiles_->RemoveProfile(profile);
delete profile;
}
-bool CpuProfiler::HasDetachedProfiles() {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
- return Isolate::Current()->cpu_profiler()->profiles_->HasDetachedProfiles();
+static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag) {
+ return FLAG_prof_browser_mode
+ && (tag != Logger::CALLBACK_TAG
+ && tag != Logger::FUNCTION_TAG
+ && tag != Logger::LAZY_COMPILE_TAG
+ && tag != Logger::REG_EXP_TAG
+ && tag != Logger::SCRIPT_TAG);
}
-void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
- Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
- Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
+void CpuProfiler::CallbackEvent(Name* name, Address entry_point) {
+ if (FilterOutCodeCreateEvent(Logger::CALLBACK_TAG)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = entry_point;
+ rec->entry = profiles_->NewCodeEntry(
+ Logger::CALLBACK_TAG,
+ profiles_->GetName(name));
+ rec->size = 1;
+ rec->shared = NULL;
+ processor_->Enqueue(evt_rec);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, const char* comment) {
- Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
- tag, comment, code->address(), code->ExecutableSize());
+ Code* code,
+ const char* name) {
+ if (FilterOutCodeCreateEvent(tag)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = profiles_->NewCodeEntry(tag, profiles_->GetFunctionName(name));
+ rec->size = code->ExecutableSize();
+ rec->shared = NULL;
+ processor_->Enqueue(evt_rec);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, String* name) {
- Isolate* isolate = Isolate::Current();
- isolate->cpu_profiler()->processor_->CodeCreateEvent(
- tag,
- name,
- isolate->heap()->empty_string(),
- v8::CpuProfileNode::kNoLineNumberInfo,
- code->address(),
- code->ExecutableSize(),
- NULL);
+ Code* code,
+ Name* name) {
+ if (FilterOutCodeCreateEvent(tag)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = profiles_->NewCodeEntry(tag, profiles_->GetFunctionName(name));
+ rec->size = code->ExecutableSize();
+ rec->shared = NULL;
+ processor_->Enqueue(evt_rec);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
- String* name) {
- Isolate* isolate = Isolate::Current();
- isolate->cpu_profiler()->processor_->CodeCreateEvent(
- tag,
- name,
- isolate->heap()->empty_string(),
- v8::CpuProfileNode::kNoLineNumberInfo,
- code->address(),
- code->ExecutableSize(),
- shared->address());
+ CompilationInfo* info,
+ Name* name) {
+ if (FilterOutCodeCreateEvent(tag)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = profiles_->NewCodeEntry(tag, profiles_->GetFunctionName(name));
+ if (info) {
+ rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
+ }
+ if (shared->script()->IsScript()) {
+ ASSERT(Script::cast(shared->script()));
+ Script* script = Script::cast(shared->script());
+ rec->entry->set_script_id(script->id()->value());
+ rec->entry->set_bailout_reason(
+ GetBailoutReason(shared->DisableOptimizationReason()));
+ }
+ rec->size = code->ExecutableSize();
+ rec->shared = shared->address();
+ processor_->Enqueue(evt_rec);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
- String* source, int line) {
- Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
+ CompilationInfo* info,
+ Name* source, int line, int column) {
+ if (FilterOutCodeCreateEvent(tag)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = profiles_->NewCodeEntry(
tag,
- shared->DebugName(),
- source,
+ profiles_->GetFunctionName(shared->DebugName()),
+ CodeEntry::kEmptyNamePrefix,
+ profiles_->GetName(source),
line,
- code->address(),
- code->ExecutableSize(),
- shared->address());
+ column);
+ if (info) {
+ rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
+ }
+ ASSERT(Script::cast(shared->script()));
+ Script* script = Script::cast(shared->script());
+ rec->entry->set_script_id(script->id()->value());
+ rec->size = code->ExecutableSize();
+ rec->shared = shared->address();
+ rec->entry->set_bailout_reason(
+ GetBailoutReason(shared->DisableOptimizationReason()));
+ processor_->Enqueue(evt_rec);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, int args_count) {
- Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
+ Code* code,
+ int args_count) {
+ if (FilterOutCodeCreateEvent(tag)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = profiles_->NewCodeEntry(
tag,
- args_count,
- code->address(),
- code->ExecutableSize());
+ profiles_->GetName(args_count),
+ "args_count: ");
+ rec->size = code->ExecutableSize();
+ rec->shared = NULL;
+ processor_->Enqueue(evt_rec);
}
void CpuProfiler::CodeMoveEvent(Address from, Address to) {
- Isolate::Current()->cpu_profiler()->processor_->CodeMoveEvent(from, to);
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
+ CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
+ rec->from = from;
+ rec->to = to;
+ processor_->Enqueue(evt_rec);
}
@@ -420,108 +318,158 @@ void CpuProfiler::CodeDeleteEvent(Address from) {
void CpuProfiler::SharedFunctionInfoMoveEvent(Address from, Address to) {
- CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
- profiler->processor_->SharedFunctionInfoMoveEvent(from, to);
+ CodeEventsContainer evt_rec(CodeEventRecord::SHARED_FUNC_MOVE);
+ SharedFunctionInfoMoveEventRecord* rec =
+ &evt_rec.SharedFunctionInfoMoveEventRecord_;
+ rec->from = from;
+ rec->to = to;
+ processor_->Enqueue(evt_rec);
}
-void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
- Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
- Logger::CALLBACK_TAG, "get ", name, entry_point);
+void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
+ if (FilterOutCodeCreateEvent(Logger::CALLBACK_TAG)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = entry_point;
+ rec->entry = profiles_->NewCodeEntry(
+ Logger::CALLBACK_TAG,
+ profiles_->GetName(name),
+ "get ");
+ rec->size = 1;
+ rec->shared = NULL;
+ processor_->Enqueue(evt_rec);
}
void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
- Isolate::Current()->cpu_profiler()->processor_->RegExpCodeCreateEvent(
+ if (FilterOutCodeCreateEvent(Logger::REG_EXP_TAG)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = profiles_->NewCodeEntry(
Logger::REG_EXP_TAG,
- "RegExp: ",
- source,
- code->address(),
- code->ExecutableSize());
+ profiles_->GetName(source),
+ "RegExp: ");
+ rec->size = code->ExecutableSize();
+ processor_->Enqueue(evt_rec);
}
-void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
- Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
- Logger::CALLBACK_TAG, "set ", name, entry_point);
+void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) {
+ if (FilterOutCodeCreateEvent(Logger::CALLBACK_TAG)) return;
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = entry_point;
+ rec->entry = profiles_->NewCodeEntry(
+ Logger::CALLBACK_TAG,
+ profiles_->GetName(name),
+ "set ");
+ rec->size = 1;
+ rec->shared = NULL;
+ processor_->Enqueue(evt_rec);
}
-CpuProfiler::CpuProfiler()
- : profiles_(new CpuProfilesCollection()),
+CpuProfiler::CpuProfiler(Isolate* isolate)
+ : isolate_(isolate),
+ sampling_interval_(TimeDelta::FromMicroseconds(
+ FLAG_cpu_profiler_sampling_interval)),
+ profiles_(new CpuProfilesCollection(isolate->heap())),
next_profile_uid_(1),
- token_enumerator_(new TokenEnumerator()),
generator_(NULL),
processor_(NULL),
- need_to_stop_sampler_(false),
+ is_profiling_(false) {
+}
+
+
+CpuProfiler::CpuProfiler(Isolate* isolate,
+ CpuProfilesCollection* test_profiles,
+ ProfileGenerator* test_generator,
+ ProfilerEventsProcessor* test_processor)
+ : isolate_(isolate),
+ sampling_interval_(TimeDelta::FromMicroseconds(
+ FLAG_cpu_profiler_sampling_interval)),
+ profiles_(test_profiles),
+ next_profile_uid_(1),
+ generator_(test_generator),
+ processor_(test_processor),
is_profiling_(false) {
}
CpuProfiler::~CpuProfiler() {
- delete token_enumerator_;
+ ASSERT(!is_profiling_);
delete profiles_;
}
+void CpuProfiler::set_sampling_interval(TimeDelta value) {
+ ASSERT(!is_profiling_);
+ sampling_interval_ = value;
+}
+
+
void CpuProfiler::ResetProfiles() {
delete profiles_;
- profiles_ = new CpuProfilesCollection();
+ profiles_ = new CpuProfilesCollection(isolate()->heap());
}
-void CpuProfiler::StartCollectingProfile(const char* title) {
- if (profiles_->StartProfiling(title, next_profile_uid_++)) {
+
+void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
+ if (profiles_->StartProfiling(title, next_profile_uid_++, record_samples)) {
StartProcessorIfNotStarted();
}
- processor_->AddCurrentStack();
+ processor_->AddCurrentStack(isolate_);
}
-void CpuProfiler::StartCollectingProfile(String* title) {
- StartCollectingProfile(profiles_->GetName(title));
+void CpuProfiler::StartProfiling(String* title, bool record_samples) {
+ StartProfiling(profiles_->GetName(title), record_samples);
}
void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
- Isolate* isolate = Isolate::Current();
-
+ Logger* logger = isolate_->logger();
// Disable logging when using the new implementation.
- saved_logging_nesting_ = isolate->logger()->logging_nesting_;
- isolate->logger()->logging_nesting_ = 0;
+ saved_is_logging_ = logger->is_logging_;
+ logger->is_logging_ = false;
generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(generator_);
- NoBarrier_Store(&is_profiling_, true);
- processor_->Start();
+ Sampler* sampler = logger->sampler();
+#if V8_CC_MSVC && (_MSC_VER >= 1800)
+ // VS2013 reports "warning C4316: 'v8::internal::ProfilerEventsProcessor'
+ // : object allocated on the heap may not be aligned 64". We need to
+ // figure out if this is a legitimate warning or a compiler bug.
+ #pragma warning(push)
+ #pragma warning(disable:4316)
+#endif
+ processor_ = new ProfilerEventsProcessor(
+ generator_, sampler, sampling_interval_);
+#if V8_CC_MSVC && (_MSC_VER >= 1800)
+ #pragma warning(pop)
+#endif
+ is_profiling_ = true;
// Enumerate stuff we already have in the heap.
- if (isolate->heap()->HasBeenSetUp()) {
- if (!FLAG_prof_browser_mode) {
- bool saved_log_code_flag = FLAG_log_code;
- FLAG_log_code = true;
- isolate->logger()->LogCodeObjects();
- FLAG_log_code = saved_log_code_flag;
- }
- isolate->logger()->LogCompiledFunctions();
- isolate->logger()->LogAccessorCallbacks();
+ ASSERT(isolate_->heap()->HasBeenSetUp());
+ if (!FLAG_prof_browser_mode) {
+ logger->LogCodeObjects();
}
+ logger->LogCompiledFunctions();
+ logger->LogAccessorCallbacks();
+ LogBuiltins();
// Enable stack sampling.
- Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
- if (!sampler->IsActive()) {
- sampler->Start();
- need_to_stop_sampler_ = true;
- }
+ sampler->SetHasProcessingThread(true);
sampler->IncreaseProfilingDepth();
+ processor_->StartSynchronously();
}
}
-CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
- const double actual_sampling_rate = generator_->actual_sampling_rate();
+CpuProfile* CpuProfiler::StopProfiling(const char* title) {
+ if (!is_profiling_) return NULL;
StopProcessorIfLastProfile(title);
- CpuProfile* result =
- profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
- title,
- actual_sampling_rate);
+ CpuProfile* result = profiles_->StopProfiling(title);
if (result != NULL) {
result->Print();
}
@@ -529,13 +477,11 @@ CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
}
-CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
- String* title) {
- const double actual_sampling_rate = generator_->actual_sampling_rate();
+CpuProfile* CpuProfiler::StopProfiling(String* title) {
+ if (!is_profiling_) return NULL;
const char* profile_title = profiles_->GetName(title);
StopProcessorIfLastProfile(profile_title);
- int token = token_enumerator_->GetTokenId(security_token);
- return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
+ return profiles_->StopProfiling(profile_title);
}
@@ -545,38 +491,32 @@ void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
void CpuProfiler::StopProcessor() {
- Logger* logger = Isolate::Current()->logger();
+ Logger* logger = isolate_->logger();
Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
- sampler->DecreaseProfilingDepth();
- if (need_to_stop_sampler_) {
- sampler->Stop();
- need_to_stop_sampler_ = false;
- }
- NoBarrier_Store(&is_profiling_, false);
- processor_->Stop();
- processor_->Join();
+ is_profiling_ = false;
+ processor_->StopSynchronously();
delete processor_;
delete generator_;
processor_ = NULL;
generator_ = NULL;
- logger->logging_nesting_ = saved_logging_nesting_;
+ sampler->SetHasProcessingThread(false);
+ sampler->DecreaseProfilingDepth();
+ logger->is_logging_ = saved_is_logging_;
}
-void CpuProfiler::SetUp() {
- Isolate* isolate = Isolate::Current();
- if (isolate->cpu_profiler() == NULL) {
- isolate->set_cpu_profiler(new CpuProfiler());
+void CpuProfiler::LogBuiltins() {
+ Builtins* builtins = isolate_->builtins();
+ ASSERT(builtins->is_initialized());
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
+ ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
+ Builtins::Name id = static_cast<Builtins::Name>(i);
+ rec->start = builtins->builtin(id)->address();
+ rec->builtin_id = id;
+ processor_->Enqueue(evt_rec);
}
}
-void CpuProfiler::TearDown() {
- Isolate* isolate = Isolate::Current();
- if (isolate->cpu_profiler() != NULL) {
- delete isolate->cpu_profiler();
- }
- isolate->set_cpu_profiler(NULL);
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 9cd448420..fcb9a67dd 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -31,6 +31,8 @@
#include "allocation.h"
#include "atomicops.h"
#include "circular-queue.h"
+#include "platform/time.h"
+#include "sampler.h"
#include "unbound-queue.h"
namespace v8 {
@@ -39,15 +41,16 @@ namespace internal {
// Forward declarations.
class CodeEntry;
class CodeMap;
+class CompilationInfo;
class CpuProfile;
class CpuProfilesCollection;
class ProfileGenerator;
-class TokenEnumerator;
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
V(CODE_MOVE, CodeMoveEventRecord) \
- V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)
+ V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord) \
+ V(REPORT_BUILTIN, ReportBuiltinEventRecord)
class CodeEventRecord {
@@ -61,7 +64,7 @@ class CodeEventRecord {
#undef DECLARE_TYPE
Type type;
- unsigned order;
+ mutable unsigned order;
};
@@ -94,29 +97,39 @@ class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
};
+class ReportBuiltinEventRecord : public CodeEventRecord {
+ public:
+ Address start;
+ Builtins::Name builtin_id;
+
+ INLINE(void UpdateCodeMap(CodeMap* code_map));
+};
+
+
class TickSampleEventRecord {
public:
// The parameterless constructor is used when we dequeue data from
// the ticks buffer.
TickSampleEventRecord() { }
- explicit TickSampleEventRecord(unsigned order)
- : filler(1),
- order(order) {
- ASSERT(filler != SamplingCircularQueue::kClear);
- }
+ explicit TickSampleEventRecord(unsigned order) : order(order) { }
- // The first machine word of a TickSampleEventRecord must not ever
- // become equal to SamplingCircularQueue::kClear. As both order and
- // TickSample's first field are not reliable in this sense (order
- // can overflow, TickSample can have all fields reset), we are
- // forced to use an artificial filler field.
- int filler;
unsigned order;
TickSample sample;
+};
+
- static TickSampleEventRecord* cast(void* value) {
- return reinterpret_cast<TickSampleEventRecord*>(value);
+class CodeEventsContainer {
+ public:
+ explicit CodeEventsContainer(
+ CodeEventRecord::Type type = CodeEventRecord::NONE) {
+ generic.type = type;
}
+ union {
+ CodeEventRecord generic;
+#define DECLARE_CLASS(ignore, type) type type##_;
+ CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
+#undef DECLARE_TYPE
+ };
};
@@ -124,156 +137,143 @@ class TickSampleEventRecord {
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
- explicit ProfilerEventsProcessor(ProfileGenerator* generator);
+ ProfilerEventsProcessor(ProfileGenerator* generator,
+ Sampler* sampler,
+ TimeDelta period);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
virtual void Run();
- inline void Stop() { running_ = false; }
+ void StopSynchronously();
INLINE(bool running()) { return running_; }
+ void Enqueue(const CodeEventsContainer& event);
- // Events adding methods. Called by VM threads.
- void CallbackCreateEvent(Logger::LogEventsAndTags tag,
- const char* prefix, String* name,
- Address start);
- void CodeCreateEvent(Logger::LogEventsAndTags tag,
- String* name,
- String* resource_name, int line_number,
- Address start, unsigned size,
- Address shared);
- void CodeCreateEvent(Logger::LogEventsAndTags tag,
- const char* name,
- Address start, unsigned size);
- void CodeCreateEvent(Logger::LogEventsAndTags tag,
- int args_count,
- Address start, unsigned size);
- void CodeMoveEvent(Address from, Address to);
- void CodeDeleteEvent(Address from);
- void SharedFunctionInfoMoveEvent(Address from, Address to);
- void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
- const char* prefix, String* name,
- Address start, unsigned size);
// Puts current stack into tick sample events buffer.
- void AddCurrentStack();
+ void AddCurrentStack(Isolate* isolate);
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
// stack frame entries are filled.) This method returns a pointer to the
// next record of the buffer.
- INLINE(TickSample* TickSampleEvent());
+ inline TickSample* StartTickSample();
+ inline void FinishTickSample();
private:
- union CodeEventsContainer {
- CodeEventRecord generic;
-#define DECLARE_CLASS(ignore, type) type type##_;
- CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
-#undef DECLARE_TYPE
- };
-
// Called from events processing thread (Run() method.)
- bool ProcessCodeEvent(unsigned* dequeue_order);
- bool ProcessTicks(unsigned dequeue_order);
+ bool ProcessCodeEvent();
- INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
+ enum SampleProcessingResult {
+ OneSampleProcessed,
+ FoundSampleForNextCodeEvent,
+ NoSamplesInQueue
+ };
+ SampleProcessingResult ProcessOneSample();
ProfileGenerator* generator_;
+ Sampler* sampler_;
bool running_;
+ // Sampling period in microseconds.
+ const TimeDelta period_;
UnboundQueue<CodeEventsContainer> events_buffer_;
- SamplingCircularQueue ticks_buffer_;
+ static const size_t kTickSampleBufferSize = 1 * MB;
+ static const size_t kTickSampleQueueLength =
+ kTickSampleBufferSize / sizeof(TickSampleEventRecord);
+ SamplingCircularQueue<TickSampleEventRecord,
+ kTickSampleQueueLength> ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
- unsigned enqueue_order_;
+ unsigned last_code_event_id_;
+ unsigned last_processed_code_event_id_;
};
-} } // namespace v8::internal
-
-#define PROFILE(isolate, Call) \
- LOG_CODE_EVENT(isolate, Call); \
- do { \
- if (v8::internal::CpuProfiler::is_profiling(isolate)) { \
- v8::internal::CpuProfiler::Call; \
- } \
+#define PROFILE(IsolateGetter, Call) \
+ do { \
+ Isolate* cpu_profiler_isolate = (IsolateGetter); \
+ v8::internal::Logger* logger = cpu_profiler_isolate->logger(); \
+ CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler(); \
+ if (logger->is_logging_code_events() || cpu_profiler->is_profiling()) { \
+ logger->Call; \
+ } \
} while (false)
-namespace v8 {
-namespace internal {
+class CpuProfiler : public CodeEventListener {
+ public:
+ explicit CpuProfiler(Isolate* isolate);
+ CpuProfiler(Isolate* isolate,
+ CpuProfilesCollection* test_collection,
+ ProfileGenerator* test_generator,
+ ProfilerEventsProcessor* test_processor);
-// TODO(isolates): isolatify this class.
-class CpuProfiler {
- public:
- static void SetUp();
- static void TearDown();
-
- static void StartProfiling(const char* title);
- static void StartProfiling(String* title);
- static CpuProfile* StopProfiling(const char* title);
- static CpuProfile* StopProfiling(Object* security_token, String* title);
- static int GetProfilesCount();
- static CpuProfile* GetProfile(Object* security_token, int index);
- static CpuProfile* FindProfile(Object* security_token, unsigned uid);
- static void DeleteAllProfiles();
- static void DeleteProfile(CpuProfile* profile);
- static bool HasDetachedProfiles();
+ virtual ~CpuProfiler();
+
+ void set_sampling_interval(TimeDelta value);
+ void StartProfiling(const char* title, bool record_samples = false);
+ void StartProfiling(String* title, bool record_samples);
+ CpuProfile* StopProfiling(const char* title);
+ CpuProfile* StopProfiling(String* title);
+ int GetProfilesCount();
+ CpuProfile* GetProfile(int index);
+ void DeleteAllProfiles();
+ void DeleteProfile(CpuProfile* profile);
// Invoked from stack sampler (thread or signal handler.)
- static TickSample* TickSampleEvent(Isolate* isolate);
+ inline TickSample* StartTickSample();
+ inline void FinishTickSample();
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
- static void CallbackEvent(String* name, Address entry_point);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, const char* comment);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, String* name);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* name);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* source, int line);
- static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, int args_count);
- static void CodeMovingGCEvent() {}
- static void CodeMoveEvent(Address from, Address to);
- static void CodeDeleteEvent(Address from);
- static void GetterCallbackEvent(String* name, Address entry_point);
- static void RegExpCodeCreateEvent(Code* code, String* source);
- static void SetterCallbackEvent(String* name, Address entry_point);
- static void SharedFunctionInfoMoveEvent(Address from, Address to);
-
- // TODO(isolates): this doesn't have to use atomics anymore.
-
- static INLINE(bool is_profiling(Isolate* isolate)) {
- CpuProfiler* profiler = isolate->cpu_profiler();
- return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
+ virtual void CallbackEvent(Name* name, Address entry_point);
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code, const char* comment);
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code, Name* name);
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ CompilationInfo* info,
+ Name* name);
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ CompilationInfo* info,
+ Name* source, int line, int column);
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code, int args_count);
+ virtual void CodeMovingGCEvent() {}
+ virtual void CodeMoveEvent(Address from, Address to);
+ virtual void CodeDeleteEvent(Address from);
+ virtual void GetterCallbackEvent(Name* name, Address entry_point);
+ virtual void RegExpCodeCreateEvent(Code* code, String* source);
+ virtual void SetterCallbackEvent(Name* name, Address entry_point);
+ virtual void SharedFunctionInfoMoveEvent(Address from, Address to);
+
+ INLINE(bool is_profiling() const) { return is_profiling_; }
+ bool* is_profiling_address() {
+ return &is_profiling_;
}
+ ProfileGenerator* generator() const { return generator_; }
+ ProfilerEventsProcessor* processor() const { return processor_; }
+ Isolate* isolate() const { return isolate_; }
+
private:
- CpuProfiler();
- ~CpuProfiler();
- void StartCollectingProfile(const char* title);
- void StartCollectingProfile(String* title);
void StartProcessorIfNotStarted();
- CpuProfile* StopCollectingProfile(const char* title);
- CpuProfile* StopCollectingProfile(Object* security_token, String* title);
void StopProcessorIfLastProfile(const char* title);
void StopProcessor();
void ResetProfiles();
+ void LogBuiltins();
+ Isolate* isolate_;
+ TimeDelta sampling_interval_;
CpuProfilesCollection* profiles_;
unsigned next_profile_uid_;
- TokenEnumerator* token_enumerator_;
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
- int saved_logging_nesting_;
- bool need_to_stop_sampler_;
- Atomic32 is_profiling_;
+ bool saved_is_logging_;
+ bool is_profiling_;
- private:
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
};
diff --git a/deps/v8/src/cpu.cc b/deps/v8/src/cpu.cc
new file mode 100644
index 000000000..2bf51a7f6
--- /dev/null
+++ b/deps/v8/src/cpu.cc
@@ -0,0 +1,466 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "cpu.h"
+
+#if V8_CC_MSVC
+#include <intrin.h> // __cpuid()
+#endif
+#if V8_OS_POSIX
+#include <unistd.h> // sysconf()
+#endif
+
+#include <algorithm>
+#include <cctype>
+#include <climits>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+#include "checks.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
+// Define __cpuid() for non-MSVC compilers.
+#if !V8_CC_MSVC
+
+static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
+#if defined(__i386__) && defined(__pic__)
+ // Make sure to preserve ebx, which contains the pointer
+ // to the GOT in case we're generating PIC.
+ __asm__ volatile (
+ "mov %%ebx, %%edi\n\t"
+ "cpuid\n\t"
+ "xchg %%edi, %%ebx\n\t"
+ : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+ : "a"(info_type)
+ );
+#else
+ __asm__ volatile (
+ "cpuid \n\t"
+ : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+ : "a"(info_type)
+ );
+#endif // defined(__i386__) && defined(__pic__)
+}
+
+#endif // !V8_CC_MSVC
+
+#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
+
+#if V8_HOST_ARCH_ARM
+
+// See <uapi/asm/hwcap.h> kernel header.
+/*
+ * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
+ */
+#define HWCAP_SWP (1 << 0)
+#define HWCAP_HALF (1 << 1)
+#define HWCAP_THUMB (1 << 2)
+#define HWCAP_26BIT (1 << 3) /* Play it safe */
+#define HWCAP_FAST_MULT (1 << 4)
+#define HWCAP_FPA (1 << 5)
+#define HWCAP_VFP (1 << 6)
+#define HWCAP_EDSP (1 << 7)
+#define HWCAP_JAVA (1 << 8)
+#define HWCAP_IWMMXT (1 << 9)
+#define HWCAP_CRUNCH (1 << 10)
+#define HWCAP_THUMBEE (1 << 11)
+#define HWCAP_NEON (1 << 12)
+#define HWCAP_VFPv3 (1 << 13)
+#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
+#define HWCAP_TLS (1 << 15)
+#define HWCAP_VFPv4 (1 << 16)
+#define HWCAP_IDIVA (1 << 17)
+#define HWCAP_IDIVT (1 << 18)
+#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
+#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
+#define HWCAP_LPAE (1 << 20)
+
+#define AT_HWCAP 16
+
+// Read the ELF HWCAP flags by parsing /proc/self/auxv.
+static uint32_t ReadELFHWCaps() {
+ uint32_t result = 0;
+ FILE* fp = fopen("/proc/self/auxv", "r");
+ if (fp != NULL) {
+ struct { uint32_t tag; uint32_t value; } entry;
+ for (;;) {
+ size_t n = fread(&entry, sizeof(entry), 1, fp);
+ if (n == 0 || (entry.tag == 0 && entry.value == 0)) {
+ break;
+ }
+ if (entry.tag == AT_HWCAP) {
+ result = entry.value;
+ break;
+ }
+ }
+ fclose(fp);
+ }
+ return result;
+}
+
+#endif // V8_HOST_ARCH_ARM
+
+// Extract the information exposed by the kernel via /proc/cpuinfo.
+class CPUInfo V8_FINAL BASE_EMBEDDED {
+ public:
+ CPUInfo() : datalen_(0) {
+ // Get the size of the cpuinfo file by reading it until the end. This is
+ // required because files under /proc do not always return a valid size
+ // when using fseek(0, SEEK_END) + ftell(). Nor can the be mmap()-ed.
+ static const char PATHNAME[] = "/proc/cpuinfo";
+ FILE* fp = fopen(PATHNAME, "r");
+ if (fp != NULL) {
+ for (;;) {
+ char buffer[256];
+ size_t n = fread(buffer, 1, sizeof(buffer), fp);
+ if (n == 0) {
+ break;
+ }
+ datalen_ += n;
+ }
+ fclose(fp);
+ }
+
+ // Read the contents of the cpuinfo file.
+ data_ = new char[datalen_ + 1];
+ fp = fopen(PATHNAME, "r");
+ if (fp != NULL) {
+ for (size_t offset = 0; offset < datalen_; ) {
+ size_t n = fread(data_ + offset, 1, datalen_ - offset, fp);
+ if (n == 0) {
+ break;
+ }
+ offset += n;
+ }
+ fclose(fp);
+ }
+
+ // Zero-terminate the data.
+ data_[datalen_] = '\0';
+ }
+
+ ~CPUInfo() {
+ delete[] data_;
+ }
+
+ // Extract the content of a the first occurence of a given field in
+ // the content of the cpuinfo file and return it as a heap-allocated
+ // string that must be freed by the caller using delete[].
+ // Return NULL if not found.
+ char* ExtractField(const char* field) const {
+ ASSERT(field != NULL);
+
+ // Look for first field occurence, and ensure it starts the line.
+ size_t fieldlen = strlen(field);
+ char* p = data_;
+ for (;;) {
+ p = strstr(p, field);
+ if (p == NULL) {
+ return NULL;
+ }
+ if (p == data_ || p[-1] == '\n') {
+ break;
+ }
+ p += fieldlen;
+ }
+
+ // Skip to the first colon followed by a space.
+ p = strchr(p + fieldlen, ':');
+ if (p == NULL || !isspace(p[1])) {
+ return NULL;
+ }
+ p += 2;
+
+ // Find the end of the line.
+ char* q = strchr(p, '\n');
+ if (q == NULL) {
+ q = data_ + datalen_;
+ }
+
+ // Copy the line into a heap-allocated buffer.
+ size_t len = q - p;
+ char* result = new char[len + 1];
+ if (result != NULL) {
+ memcpy(result, p, len);
+ result[len] = '\0';
+ }
+ return result;
+ }
+
+ private:
+ char* data_;
+ size_t datalen_;
+};
+
+
+// Checks that a space-separated list of items contains one given 'item'.
+static bool HasListItem(const char* list, const char* item) {
+ ssize_t item_len = strlen(item);
+ const char* p = list;
+ if (p != NULL) {
+ while (*p != '\0') {
+ // Skip whitespace.
+ while (isspace(*p)) ++p;
+
+ // Find end of current list item.
+ const char* q = p;
+ while (*q != '\0' && !isspace(*q)) ++q;
+
+ if (item_len == q - p && memcmp(p, item, item_len) == 0) {
+ return true;
+ }
+
+ // Skip to next item.
+ p = q;
+ }
+ }
+ return false;
+}
+
+#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
+CPU::CPU() : stepping_(0),
+ model_(0),
+ ext_model_(0),
+ family_(0),
+ ext_family_(0),
+ type_(0),
+ implementer_(0),
+ architecture_(0),
+ part_(0),
+ has_fpu_(false),
+ has_cmov_(false),
+ has_sahf_(false),
+ has_mmx_(false),
+ has_sse_(false),
+ has_sse2_(false),
+ has_sse3_(false),
+ has_ssse3_(false),
+ has_sse41_(false),
+ has_sse42_(false),
+ has_idiva_(false),
+ has_neon_(false),
+ has_thumbee_(false),
+ has_vfp_(false),
+ has_vfp3_(false),
+ has_vfp3_d32_(false) {
+ memcpy(vendor_, "Unknown", 8);
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ int cpu_info[4];
+
+ // __cpuid with an InfoType argument of 0 returns the number of
+ // valid Ids in CPUInfo[0] and the CPU identification string in
+ // the other three array elements. The CPU identification string is
+ // not in linear order. The code below arranges the information
+ // in a human readable form. The human readable order is CPUInfo[1] |
+ // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
+ // before using memcpy to copy these three array elements to cpu_string.
+ __cpuid(cpu_info, 0);
+ unsigned num_ids = cpu_info[0];
+ std::swap(cpu_info[2], cpu_info[3]);
+ memcpy(vendor_, cpu_info + 1, 12);
+ vendor_[12] = '\0';
+
+ // Interpret CPU feature information.
+ if (num_ids > 0) {
+ __cpuid(cpu_info, 1);
+ stepping_ = cpu_info[0] & 0xf;
+ model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
+ family_ = (cpu_info[0] >> 8) & 0xf;
+ type_ = (cpu_info[0] >> 12) & 0x3;
+ ext_model_ = (cpu_info[0] >> 16) & 0xf;
+ ext_family_ = (cpu_info[0] >> 20) & 0xff;
+ has_fpu_ = (cpu_info[3] & 0x00000001) != 0;
+ has_cmov_ = (cpu_info[3] & 0x00008000) != 0;
+ has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
+ has_sse_ = (cpu_info[3] & 0x02000000) != 0;
+ has_sse2_ = (cpu_info[3] & 0x04000000) != 0;
+ has_sse3_ = (cpu_info[2] & 0x00000001) != 0;
+ has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
+ has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
+ has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+ }
+
+ // Query extended IDs.
+ __cpuid(cpu_info, 0x80000000);
+ unsigned num_ext_ids = cpu_info[0];
+
+ // Interpret extended CPU feature information.
+ if (num_ext_ids > 0x80000000) {
+ __cpuid(cpu_info, 0x80000001);
+ // SAHF is always available in compat/legacy mode,
+ // but must be probed in long mode.
+#if V8_HOST_ARCH_IA32
+ has_sahf_ = true;
+#else
+ has_sahf_ = (cpu_info[2] & 0x00000001) != 0;
+#endif
+ }
+#elif V8_HOST_ARCH_ARM
+ CPUInfo cpu_info;
+
+ // Extract implementor from the "CPU implementer" field.
+ char* implementer = cpu_info.ExtractField("CPU implementer");
+ if (implementer != NULL) {
+ char* end ;
+ implementer_ = strtol(implementer, &end, 0);
+ if (end == implementer) {
+ implementer_ = 0;
+ }
+ delete[] implementer;
+ }
+
+ // Extract part number from the "CPU part" field.
+ char* part = cpu_info.ExtractField("CPU part");
+ if (part != NULL) {
+ char* end ;
+ part_ = strtol(part, &end, 0);
+ if (end == part) {
+ part_ = 0;
+ }
+ delete[] part;
+ }
+
+ // Extract architecture from the "CPU Architecture" field.
+ // The list is well-known, unlike the the output of
+ // the 'Processor' field which can vary greatly.
+ // See the definition of the 'proc_arch' array in
+ // $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
+ // same file.
+ char* architecture = cpu_info.ExtractField("CPU architecture");
+ if (architecture != NULL) {
+ char* end;
+ architecture_ = strtol(architecture, &end, 10);
+ if (end == architecture) {
+ architecture_ = 0;
+ }
+ delete[] architecture;
+
+ // Unfortunately, it seems that certain ARMv6-based CPUs
+ // report an incorrect architecture number of 7!
+ //
+ // See http://code.google.com/p/android/issues/detail?id=10812
+ //
+ // We try to correct this by looking at the 'elf_format'
+ // field reported by the 'Processor' field, which is of the
+ // form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for
+ // an ARMv6-one. For example, the Raspberry Pi is one popular
+ // ARMv6 device that reports architecture 7.
+ if (architecture_ == 7) {
+ char* processor = cpu_info.ExtractField("Processor");
+ if (HasListItem(processor, "(v6l)")) {
+ architecture_ = 6;
+ }
+ delete[] processor;
+ }
+ }
+
+ // Try to extract the list of CPU features from ELF hwcaps.
+ uint32_t hwcaps = ReadELFHWCaps();
+ if (hwcaps != 0) {
+ has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0;
+ has_neon_ = (hwcaps & HWCAP_NEON) != 0;
+ has_thumbee_ = (hwcaps & HWCAP_THUMBEE) != 0;
+ has_vfp_ = (hwcaps & HWCAP_VFP) != 0;
+ has_vfp3_ = (hwcaps & (HWCAP_VFPv3 | HWCAP_VFPv3D16 | HWCAP_VFPv4)) != 0;
+ has_vfp3_d32_ = (has_vfp3_ && ((hwcaps & HWCAP_VFPv3D16) == 0 ||
+ (hwcaps & HWCAP_VFPD32) != 0));
+ } else {
+ // Try to fallback to "Features" CPUInfo field.
+ char* features = cpu_info.ExtractField("Features");
+ has_idiva_ = HasListItem(features, "idiva");
+ has_neon_ = HasListItem(features, "neon");
+ has_thumbee_ = HasListItem(features, "thumbee");
+ has_vfp_ = HasListItem(features, "vfp");
+ if (HasListItem(features, "vfpv3")) {
+ has_vfp3_ = true;
+ has_vfp3_d32_ = true;
+ } else if (HasListItem(features, "vfpv3d16")) {
+ has_vfp3_ = true;
+ }
+ delete[] features;
+ }
+
+ // Some old kernels will report vfp not vfpv3. Here we make an attempt
+ // to detect vfpv3 by checking for vfp *and* neon, since neon is only
+ // available on architectures with vfpv3. Checking neon on its own is
+ // not enough as it is possible to have neon without vfp.
+ if (has_vfp_ && has_neon_) {
+ has_vfp3_ = true;
+ }
+
+ // VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
+ if (architecture_ < 7 && has_vfp3_) {
+ architecture_ = 7;
+ }
+
+ // ARMv7 implies ThumbEE.
+ if (architecture_ >= 7) {
+ has_thumbee_ = true;
+ }
+
+ // The earliest architecture with ThumbEE is ARMv6T2.
+ if (has_thumbee_ && architecture_ < 6) {
+ architecture_ = 6;
+ }
+
+ // We don't support any FPUs other than VFP.
+ has_fpu_ = has_vfp_;
+#elif V8_HOST_ARCH_MIPS
+ // Simple detection of FPU at runtime for Linux.
+ // It is based on /proc/cpuinfo, which reveals hardware configuration
+ // to user-space applications. According to MIPS (early 2010), no similar
+ // facility is universally available on the MIPS architectures,
+ // so it's up to individual OSes to provide such.
+ CPUInfo cpu_info;
+ char* cpu_model = cpu_info.ExtractField("cpu model");
+ has_fpu_ = HasListItem(cpu_model, "FPU");
+ delete[] cpu_model;
+#endif
+}
+
+
+// static
+int CPU::NumberOfProcessorsOnline() {
+#if V8_OS_WIN
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ return info.dwNumberOfProcessors;
+#else
+ return static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
+#endif
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/cpu.h b/deps/v8/src/cpu.h
index 247af71aa..b2e9f7da7 100644
--- a/deps/v8/src/cpu.h
+++ b/deps/v8/src/cpu.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,14 +44,64 @@ namespace internal {
// ----------------------------------------------------------------------------
// CPU
//
-// This class has static methods for the architecture specific functions. Add
-// methods here to cope with differences between the supported architectures.
+// Query information about the processor.
//
-// For each architecture the file cpu_<arch>.cc contains the implementation of
-// these functions.
+// This class also has static methods for the architecture specific functions.
+// Add methods here to cope with differences between the supported
+// architectures. For each architecture the file cpu_<arch>.cc contains the
+// implementation of these static functions.
-class CPU : public AllStatic {
+class CPU V8_FINAL BASE_EMBEDDED {
public:
+ CPU();
+
+ // x86 CPUID information
+ const char* vendor() const { return vendor_; }
+ int stepping() const { return stepping_; }
+ int model() const { return model_; }
+ int ext_model() const { return ext_model_; }
+ int family() const { return family_; }
+ int ext_family() const { return ext_family_; }
+ int type() const { return type_; }
+
+ // arm implementer/part information
+ int implementer() const { return implementer_; }
+ static const int ARM = 0x41;
+ static const int QUALCOMM = 0x51;
+ int architecture() const { return architecture_; }
+ int part() const { return part_; }
+ static const int ARM_CORTEX_A5 = 0xc05;
+ static const int ARM_CORTEX_A7 = 0xc07;
+ static const int ARM_CORTEX_A8 = 0xc08;
+ static const int ARM_CORTEX_A9 = 0xc09;
+ static const int ARM_CORTEX_A12 = 0xc0c;
+ static const int ARM_CORTEX_A15 = 0xc0f;
+
+ // General features
+ bool has_fpu() const { return has_fpu_; }
+
+ // x86 features
+ bool has_cmov() const { return has_cmov_; }
+ bool has_sahf() const { return has_sahf_; }
+ bool has_mmx() const { return has_mmx_; }
+ bool has_sse() const { return has_sse_; }
+ bool has_sse2() const { return has_sse2_; }
+ bool has_sse3() const { return has_sse3_; }
+ bool has_ssse3() const { return has_ssse3_; }
+ bool has_sse41() const { return has_sse41_; }
+ bool has_sse42() const { return has_sse42_; }
+
+ // arm features
+ bool has_idiva() const { return has_idiva_; }
+ bool has_neon() const { return has_neon_; }
+ bool has_thumbee() const { return has_thumbee_; }
+ bool has_vfp() const { return has_vfp_; }
+ bool has_vfp3() const { return has_vfp3_; }
+ bool has_vfp3_d32() const { return has_vfp3_d32_; }
+
+ // Returns the number of processors online.
+ static int NumberOfProcessorsOnline();
+
// Initializes the cpu architecture support. Called once at VM startup.
static void SetUp();
@@ -60,8 +110,33 @@ class CPU : public AllStatic {
// Flush instruction cache.
static void FlushICache(void* start, size_t size);
- // Try to activate a system level debugger.
- static void DebugBreak();
+ private:
+ char vendor_[13];
+ int stepping_;
+ int model_;
+ int ext_model_;
+ int family_;
+ int ext_family_;
+ int type_;
+ int implementer_;
+ int architecture_;
+ int part_;
+ bool has_fpu_;
+ bool has_cmov_;
+ bool has_sahf_;
+ bool has_mmx_;
+ bool has_sse_;
+ bool has_sse2_;
+ bool has_sse3_;
+ bool has_ssse3_;
+ bool has_sse41_;
+ bool has_sse42_;
+ bool has_idiva_;
+ bool has_neon_;
+ bool has_thumbee_;
+ bool has_vfp_;
+ bool has_vfp3_;
+ bool has_vfp3_d32_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc
index de0faa8ae..379631cb7 100644
--- a/deps/v8/src/d8-debug.cc
+++ b/deps/v8/src/d8-debug.cc
@@ -29,7 +29,6 @@
#include "d8.h"
#include "d8-debug.h"
-#include "platform.h"
#include "debug-agent.h"
@@ -50,12 +49,12 @@ void PrintPrompt() {
}
-void HandleDebugEvent(DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- Handle<Value> data) {
- HandleScope scope;
+void HandleDebugEvent(const Debug::EventDetails& event_details) {
+ // TODO(svenpanne) There should be a way to retrieve this in the callback.
+ Isolate* isolate = Isolate::GetCurrent();
+ HandleScope scope(isolate);
+ DebugEvent event = event_details.GetEvent();
// Check for handled event.
if (event != Break && event != Exception && event != AfterCompile) {
return;
@@ -65,19 +64,20 @@ void HandleDebugEvent(DebugEvent event,
// Get the toJSONProtocol function on the event and get the JSON format.
Local<String> to_json_fun_name = String::New("toJSONProtocol");
+ Handle<Object> event_data = event_details.GetEventData();
Local<Function> to_json_fun =
- Function::Cast(*event_data->Get(to_json_fun_name));
+ Local<Function>::Cast(event_data->Get(to_json_fun_name));
Local<Value> event_json = to_json_fun->Call(event_data, 0, NULL);
if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
+ Shell::ReportException(isolate, &try_catch);
return;
}
// Print the event details.
Handle<Object> details =
- Shell::DebugMessageDetails(Handle<String>::Cast(event_json));
+ Shell::DebugMessageDetails(isolate, Handle<String>::Cast(event_json));
if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
+ Shell::ReportException(isolate, &try_catch);
return;
}
String::Utf8Value str(details->Get(String::New("text")));
@@ -89,11 +89,12 @@ void HandleDebugEvent(DebugEvent event,
// Get the debug command processor.
Local<String> fun_name = String::New("debugCommandProcessor");
- Local<Function> fun = Function::Cast(*exec_state->Get(fun_name));
+ Handle<Object> exec_state = event_details.GetExecutionState();
+ Local<Function> fun = Local<Function>::Cast(exec_state->Get(fun_name));
Local<Object> cmd_processor =
- Object::Cast(*fun->Call(exec_state, 0, NULL));
+ Local<Object>::Cast(fun->Call(exec_state, 0, NULL));
if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
+ Shell::ReportException(isolate, &try_catch);
return;
}
@@ -112,9 +113,9 @@ void HandleDebugEvent(DebugEvent event,
// Convert the debugger command to a JSON debugger request.
Handle<Value> request =
- Shell::DebugCommandToJSONRequest(String::New(command));
+ Shell::DebugCommandToJSONRequest(isolate, String::New(command));
if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
+ Shell::ReportException(isolate, &try_catch);
continue;
}
@@ -138,15 +139,16 @@ void HandleDebugEvent(DebugEvent event,
args[0] = request;
Handle<Value> response_val = fun->Call(cmd_processor, kArgc, args);
if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
+ Shell::ReportException(isolate, &try_catch);
continue;
}
Handle<String> response = Handle<String>::Cast(response_val);
// Convert the debugger response into text details and the running state.
- Handle<Object> response_details = Shell::DebugMessageDetails(response);
+ Handle<Object> response_details =
+ Shell::DebugMessageDetails(isolate, response);
if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
+ Shell::ReportException(isolate, &try_catch);
continue;
}
String::Utf8Value text_str(response_details->Get(String::New("text")));
@@ -159,8 +161,8 @@ void HandleDebugEvent(DebugEvent event,
}
-void RunRemoteDebugger(int port) {
- RemoteDebugger debugger(port);
+void RunRemoteDebugger(Isolate* isolate, int port) {
+ RemoteDebugger debugger(isolate, port);
debugger.Run();
}
@@ -168,21 +170,14 @@ void RunRemoteDebugger(int port) {
void RemoteDebugger::Run() {
bool ok;
- // Make sure that socket support is initialized.
- ok = i::Socket::SetUp();
- if (!ok) {
- printf("Unable to initialize socket support %d\n", i::Socket::LastError());
- return;
- }
-
// Connect to the debugger agent.
- conn_ = i::OS::CreateSocket();
+ conn_ = new i::Socket;
static const int kPortStrSize = 6;
char port_str[kPortStrSize];
i::OS::SNPrintF(i::Vector<char>(port_str, kPortStrSize), "%d", port_);
ok = conn_->Connect("localhost", port_str);
if (!ok) {
- printf("Unable to connect to debug agent %d\n", i::Socket::LastError());
+ printf("Unable to connect to debug agent %d\n", i::Socket::GetLastError());
return;
}
@@ -198,7 +193,7 @@ void RemoteDebugger::Run() {
// Process events received from debugged VM and from the keyboard.
bool terminate = false;
while (!terminate) {
- event_available_->Wait();
+ event_available_.Wait();
RemoteDebuggerEvent* event = GetEvent();
switch (event->type()) {
case RemoteDebuggerEvent::kMessage:
@@ -245,7 +240,7 @@ void RemoteDebugger::ConnectionClosed() {
void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
- i::ScopedLock lock(event_access_);
+ i::LockGuard<i::Mutex> lock_guard(&event_access_);
if (head_ == NULL) {
ASSERT(tail_ == NULL);
head_ = event;
@@ -255,12 +250,12 @@ void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
tail_->set_next(event);
tail_ = event;
}
- event_available_->Signal();
+ event_available_.Signal();
}
RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
- i::ScopedLock lock(event_access_);
+ i::LockGuard<i::Mutex> lock_guard(&event_access_);
ASSERT(head_ != NULL);
RemoteDebuggerEvent* result = head_;
head_ = head_->next();
@@ -273,15 +268,16 @@ RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
void RemoteDebugger::HandleMessageReceived(char* message) {
- Locker lock;
- HandleScope scope;
+ Locker lock(isolate_);
+ HandleScope scope(isolate_);
// Print the event details.
TryCatch try_catch;
Handle<Object> details =
- Shell::DebugMessageDetails(Handle<String>::Cast(String::New(message)));
+ Shell::DebugMessageDetails(isolate_,
+ Handle<String>::Cast(String::New(message)));
if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
+ Shell::ReportException(isolate_, &try_catch);
PrintPrompt();
return;
}
@@ -302,15 +298,15 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
void RemoteDebugger::HandleKeyboardCommand(char* command) {
- Locker lock;
- HandleScope scope;
+ Locker lock(isolate_);
+ HandleScope scope(isolate_);
// Convert the debugger command to a JSON debugger request.
TryCatch try_catch;
Handle<Value> request =
- Shell::DebugCommandToJSONRequest(String::New(command));
+ Shell::DebugCommandToJSONRequest(isolate_, String::New(command));
if (try_catch.HasCaught()) {
- Shell::ReportException(&try_catch);
+ Shell::ReportException(isolate_, &try_catch);
PrintPrompt();
return;
}
diff --git a/deps/v8/src/d8-debug.h b/deps/v8/src/d8-debug.h
index aeff3c121..55876229a 100644
--- a/deps/v8/src/d8-debug.h
+++ b/deps/v8/src/d8-debug.h
@@ -36,14 +36,11 @@
namespace v8 {
-void HandleDebugEvent(DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- Handle<Value> data);
+void HandleDebugEvent(const Debug::EventDetails& event_details);
// Start the remove debugger connecting to a V8 debugger agent on the specified
// port.
-void RunRemoteDebugger(int port);
+void RunRemoteDebugger(Isolate* isolate, int port);
// Forward declerations.
class RemoteDebuggerEvent;
@@ -53,10 +50,10 @@ class ReceiverThread;
// Remote debugging class.
class RemoteDebugger {
public:
- explicit RemoteDebugger(int port)
- : port_(port),
- event_access_(i::OS::CreateMutex()),
- event_available_(i::OS::CreateSemaphore(0)),
+ explicit RemoteDebugger(Isolate* isolate, int port)
+ : isolate_(isolate),
+ port_(port),
+ event_available_(0),
head_(NULL), tail_(NULL) {}
void Run();
@@ -79,14 +76,15 @@ class RemoteDebugger {
// Get connection to agent in debugged V8.
i::Socket* conn() { return conn_; }
+ Isolate* isolate_;
int port_; // Port used to connect to debugger V8.
i::Socket* conn_; // Connection to debugger agent in debugged V8.
// Linked list of events from debugged V8 and from keyboard input. Access to
// the list is guarded by a mutex and a semaphore signals new items in the
// list.
- i::Mutex* event_access_;
- i::Semaphore* event_available_;
+ i::Mutex event_access_;
+ i::Semaphore event_available_;
RemoteDebuggerEvent* head_;
RemoteDebuggerEvent* tail_;
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index 8a278e4e4..81c15ae74 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -238,14 +238,15 @@ class ExecArgs {
// Gets the optional timeouts from the arguments to the system() call.
-static bool GetTimeouts(const Arguments& args,
+static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
int* read_timeout,
int* total_timeout) {
if (args.Length() > 3) {
if (args[3]->IsNumber()) {
*total_timeout = args[3]->Int32Value();
} else {
- ThrowException(String::New("system: Argument 4 must be a number"));
+ args.GetIsolate()->ThrowException(
+ String::New("system: Argument 4 must be a number"));
return false;
}
}
@@ -253,7 +254,8 @@ static bool GetTimeouts(const Arguments& args,
if (args[2]->IsNumber()) {
*read_timeout = args[2]->Int32Value();
} else {
- ThrowException(String::New("system: Argument 3 must be a number"));
+ args.GetIsolate()->ThrowException(
+ String::New("system: Argument 3 must be a number"));
return false;
}
}
@@ -448,25 +450,31 @@ static bool WaitForChild(int pid,
// Implementation of the system() function (see d8.h for details).
-Handle<Value> Shell::System(const Arguments& args) {
- HandleScope scope;
+void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
int read_timeout = -1;
int total_timeout = -1;
- if (!GetTimeouts(args, &read_timeout, &total_timeout)) return v8::Undefined();
+ if (!GetTimeouts(args, &read_timeout, &total_timeout)) return;
Handle<Array> command_args;
if (args.Length() > 1) {
if (!args[1]->IsArray()) {
- return ThrowException(String::New("system: Argument 2 must be an array"));
+ args.GetIsolate()->ThrowException(
+ String::New("system: Argument 2 must be an array"));
+ return;
}
command_args = Handle<Array>::Cast(args[1]);
} else {
command_args = Array::New(0);
}
if (command_args->Length() > ExecArgs::kMaxArgs) {
- return ThrowException(String::New("Too many arguments to system()"));
+ args.GetIsolate()->ThrowException(
+ String::New("Too many arguments to system()"));
+ return;
}
if (args.Length() < 1) {
- return ThrowException(String::New("Too few arguments to system()"));
+ args.GetIsolate()->ThrowException(
+ String::New("Too few arguments to system()"));
+ return;
}
struct timeval start_time;
@@ -474,16 +482,20 @@ Handle<Value> Shell::System(const Arguments& args) {
ExecArgs exec_args;
if (!exec_args.Init(args[0], command_args)) {
- return v8::Undefined();
+ return;
}
int exec_error_fds[2];
int stdout_fds[2];
if (pipe(exec_error_fds) != 0) {
- return ThrowException(String::New("pipe syscall failed."));
+ args.GetIsolate()->ThrowException(
+ String::New("pipe syscall failed."));
+ return;
}
if (pipe(stdout_fds) != 0) {
- return ThrowException(String::New("pipe syscall failed."));
+ args.GetIsolate()->ThrowException(
+ String::New("pipe syscall failed."));
+ return;
}
pid_t pid = fork();
@@ -499,7 +511,7 @@ Handle<Value> Shell::System(const Arguments& args) {
OpenFDCloser error_read_closer(exec_error_fds[kReadFD]);
OpenFDCloser stdout_read_closer(stdout_fds[kReadFD]);
- if (!ChildLaunchedOK(exec_error_fds)) return v8::Undefined();
+ if (!ChildLaunchedOK(exec_error_fds)) return;
Handle<Value> accumulator = GetStdout(stdout_fds[kReadFD],
start_time,
@@ -507,7 +519,8 @@ Handle<Value> Shell::System(const Arguments& args) {
total_timeout);
if (accumulator->IsUndefined()) {
kill(pid, SIGINT); // On timeout, kill the subprocess.
- return accumulator;
+ args.GetReturnValue().Set(accumulator);
+ return;
}
if (!WaitForChild(pid,
@@ -515,42 +528,47 @@ Handle<Value> Shell::System(const Arguments& args) {
start_time,
read_timeout,
total_timeout)) {
- return v8::Undefined();
+ return;
}
- return scope.Close(accumulator);
+ args.GetReturnValue().Set(accumulator);
}
-Handle<Value> Shell::ChangeDirectory(const Arguments& args) {
+void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "chdir() takes one argument";
- return ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
+ return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.chdir(): String conversion of argument failed.";
- return ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
+ return;
}
if (chdir(*directory) != 0) {
- return ThrowException(String::New(strerror(errno)));
+ args.GetIsolate()->ThrowException(String::New(strerror(errno)));
+ return;
}
- return v8::Undefined();
}
-Handle<Value> Shell::SetUMask(const Arguments& args) {
+void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "umask() takes one argument";
- return ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
+ return;
}
if (args[0]->IsNumber()) {
mode_t mask = args[0]->Int32Value();
int previous = umask(mask);
- return Number::New(previous);
+ args.GetReturnValue().Set(previous);
+ return;
} else {
const char* message = "umask() argument must be numeric";
- return ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
+ return;
}
}
@@ -598,79 +616,85 @@ static bool mkdirp(char* directory, mode_t mask) {
}
-Handle<Value> Shell::MakeDirectory(const Arguments& args) {
+void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
mode_t mask = 0777;
if (args.Length() == 2) {
if (args[1]->IsNumber()) {
mask = args[1]->Int32Value();
} else {
const char* message = "mkdirp() second argument must be numeric";
- return ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
+ return;
}
} else if (args.Length() != 1) {
const char* message = "mkdirp() takes one or two arguments";
- return ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
+ return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.mkdirp(): String conversion of argument failed.";
- return ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
+ return;
}
mkdirp(*directory, mask);
- return v8::Undefined();
}
-Handle<Value> Shell::RemoveDirectory(const Arguments& args) {
+void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "rmdir() takes one or two arguments";
- return ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
+ return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.rmdir(): String conversion of argument failed.";
- return ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
+ return;
}
rmdir(*directory);
- return v8::Undefined();
}
-Handle<Value> Shell::SetEnvironment(const Arguments& args) {
+void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2) {
const char* message = "setenv() takes two arguments";
- return ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
+ return;
}
String::Utf8Value var(args[0]);
String::Utf8Value value(args[1]);
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
- return ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
+ return;
}
if (*value == NULL) {
const char* message =
"os.setenv(): String conversion of variable contents failed.";
- return ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
+ return;
}
setenv(*var, *value, 1);
- return v8::Undefined();
}
-Handle<Value> Shell::UnsetEnvironment(const Arguments& args) {
+void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "unsetenv() takes one argument";
- return ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
+ return;
}
String::Utf8Value var(args[0]);
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
- return ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
+ return;
}
unsetenv(*var);
- return v8::Undefined();
}
diff --git a/deps/v8/src/d8-readline.cc b/deps/v8/src/d8-readline.cc
index ed7721c51..0226f31c0 100644
--- a/deps/v8/src/d8-readline.cc
+++ b/deps/v8/src/d8-readline.cc
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
#include <cstdio> // NOLINT
+#include <string.h> // NOLINT
#include <readline/readline.h> // NOLINT
#include <readline/history.h> // NOLINT
@@ -35,7 +35,6 @@
#include "d8.h"
-
// There are incompatibilities between different versions and different
// implementations of readline. This smooths out one known incompatibility.
#if RL_READLINE_VERSION >= 0x0500
@@ -50,7 +49,7 @@ class ReadLineEditor: public LineEditor {
public:
ReadLineEditor() : LineEditor(LineEditor::READLINE, "readline") { }
virtual Handle<String> Prompt(const char* prompt);
- virtual bool Open();
+ virtual bool Open(Isolate* isolate);
virtual bool Close();
virtual void AddHistory(const char* str);
@@ -58,9 +57,13 @@ class ReadLineEditor: public LineEditor {
static const int kMaxHistoryEntries;
private:
+#ifndef V8_SHARED
static char** AttemptedCompletion(const char* text, int start, int end);
static char* CompletionGenerator(const char* text, int state);
+#endif // V8_SHARED
static char kWordBreakCharacters[];
+
+ Isolate* isolate_;
};
@@ -74,9 +77,19 @@ const char* ReadLineEditor::kHistoryFileName = ".d8_history";
const int ReadLineEditor::kMaxHistoryEntries = 1000;
-bool ReadLineEditor::Open() {
+bool ReadLineEditor::Open(Isolate* isolate) {
+ isolate_ = isolate;
+
rl_initialize();
+
+#ifdef V8_SHARED
+ // Don't do completion on shared library mode
+ // http://cnswww.cns.cwru.edu/php/chet/readline/readline.html#SEC24
+ rl_bind_key('\t', rl_insert);
+#else
rl_attempted_completion_function = AttemptedCompletion;
+#endif // V8_SHARED
+
rl_completer_word_break_characters = kWordBreakCharacters;
rl_bind_key('\t', rl_complete);
using_history();
@@ -122,6 +135,7 @@ void ReadLineEditor::AddHistory(const char* str) {
}
+#ifndef V8_SHARED
char** ReadLineEditor::AttemptedCompletion(const char* text,
int start,
int end) {
@@ -134,27 +148,30 @@ char** ReadLineEditor::AttemptedCompletion(const char* text,
char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
static unsigned current_index;
static Persistent<Array> current_completions;
+ Isolate* isolate = read_line_editor.isolate_;
+ Locker lock(isolate);
+ HandleScope scope(isolate);
+ Handle<Array> completions;
if (state == 0) {
- HandleScope scope;
Local<String> full_text = String::New(rl_line_buffer, rl_point);
- Handle<Array> completions =
- Shell::GetCompletions(String::New(text), full_text);
- current_completions = Persistent<Array>::New(completions);
+ completions = Shell::GetCompletions(isolate, String::New(text), full_text);
+ current_completions.Reset(isolate, completions);
current_index = 0;
+ } else {
+ completions = Local<Array>::New(isolate, current_completions);
}
- if (current_index < current_completions->Length()) {
- HandleScope scope;
+ if (current_index < completions->Length()) {
Handle<Integer> index = Integer::New(current_index);
- Handle<Value> str_obj = current_completions->Get(index);
+ Handle<Value> str_obj = completions->Get(index);
current_index++;
String::Utf8Value str(str_obj);
return strdup(*str);
} else {
- current_completions.Dispose();
- current_completions.Clear();
+ current_completions.Reset();
return NULL;
}
}
+#endif // V8_SHARED
} // namespace v8
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index b3b1bb8a1..357c8a489 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -42,9 +42,21 @@
#ifdef V8_SHARED
#include <assert.h>
+#endif // V8_SHARED
+
+#ifndef V8_SHARED
+#include <algorithm>
+#endif // !V8_SHARED
+
+#ifdef V8_SHARED
+#include "../include/v8-defaults.h"
#include "../include/v8-testing.h"
#endif // V8_SHARED
+#ifdef ENABLE_VTUNE_JIT_INTERFACE
+#include "third_party/vtune/v8-vtune.h"
+#endif
+
#include "d8.h"
#ifndef V8_SHARED
@@ -55,6 +67,7 @@
#include "natives.h"
#include "platform.h"
#include "v8.h"
+#include "v8-defaults.h"
#endif // V8_SHARED
#if !defined(_WIN32) && !defined(_WIN64)
@@ -67,42 +80,77 @@
namespace v8 {
-LineEditor *LineEditor::first_ = NULL;
-
-LineEditor::LineEditor(Type type, const char* name)
- : type_(type),
- name_(name),
- next_(first_) {
- first_ = this;
+static Handle<Value> Throw(const char* message) {
+ return ThrowException(String::New(message));
}
-LineEditor* LineEditor::Get() {
- LineEditor* current = first_;
- LineEditor* best = current;
- while (current != NULL) {
- if (current->type_ > best->type_)
- best = current;
- current = current->next_;
+
+class PerIsolateData {
+ public:
+ explicit PerIsolateData(Isolate* isolate) : isolate_(isolate), realms_(NULL) {
+ HandleScope scope(isolate);
+ isolate->SetData(this);
+ }
+
+ ~PerIsolateData() {
+ isolate_->SetData(NULL); // Not really needed, just to be sure...
+ }
+
+ inline static PerIsolateData* Get(Isolate* isolate) {
+ return reinterpret_cast<PerIsolateData*>(isolate->GetData());
}
- return best;
+
+ class RealmScope {
+ public:
+ explicit RealmScope(PerIsolateData* data);
+ ~RealmScope();
+ private:
+ PerIsolateData* data_;
+ };
+
+ private:
+ friend class Shell;
+ friend class RealmScope;
+ Isolate* isolate_;
+ int realm_count_;
+ int realm_current_;
+ int realm_switch_;
+ Persistent<Context>* realms_;
+ Persistent<Value> realm_shared_;
+
+ int RealmFind(Handle<Context> context);
+};
+
+
+LineEditor *LineEditor::current_ = NULL;
+
+
+LineEditor::LineEditor(Type type, const char* name)
+ : type_(type), name_(name) {
+ if (current_ == NULL || current_->type_ < type) current_ = this;
}
class DumbLineEditor: public LineEditor {
public:
- DumbLineEditor() : LineEditor(LineEditor::DUMB, "dumb") { }
+ explicit DumbLineEditor(Isolate* isolate)
+ : LineEditor(LineEditor::DUMB, "dumb"), isolate_(isolate) { }
virtual Handle<String> Prompt(const char* prompt);
+ private:
+ Isolate* isolate_;
};
-static DumbLineEditor dumb_line_editor;
-
-
Handle<String> DumbLineEditor::Prompt(const char* prompt) {
printf("%s", prompt);
- return Shell::ReadFromStdin();
+#if defined(__native_client__)
+ // Native Client libc is used to being embedded in Chrome and
+ // has trouble recognizing when to flush.
+ fflush(stdout);
+#endif
+ return Shell::ReadFromStdin(isolate_);
}
@@ -111,11 +159,11 @@ CounterMap* Shell::counter_map_;
i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
-i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
+i::Mutex Shell::context_mutex_;
+const i::TimeTicks Shell::kInitialTicks = i::TimeTicks::HighResolutionNow();
Persistent<Context> Shell::utility_context_;
#endif // V8_SHARED
-LineEditor* Shell::console = NULL;
Persistent<Context> Shell::evaluation_context_;
ShellOptions Shell::options;
const char* Shell::kPrompt = "d8> ";
@@ -140,7 +188,8 @@ const char* Shell::ToCString(const v8::String::Utf8Value& value) {
// Executes a string within the current v8 context.
-bool Shell::ExecuteString(Handle<String> source,
+bool Shell::ExecuteString(Isolate* isolate,
+ Handle<String> source,
Handle<Value> name,
bool print_result,
bool report_exceptions) {
@@ -149,36 +198,62 @@ bool Shell::ExecuteString(Handle<String> source,
#else
bool FLAG_debugger = false;
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- HandleScope handle_scope;
+ HandleScope handle_scope(isolate);
TryCatch try_catch;
options.script_executed = true;
if (FLAG_debugger) {
// When debugging make exceptions appear to be uncaught.
try_catch.SetVerbose(true);
}
- Handle<Script> script = Script::Compile(source, name);
+ Handle<Script> script = Script::New(source, name);
if (script.IsEmpty()) {
// Print errors that happened during compilation.
if (report_exceptions && !FLAG_debugger)
- ReportException(&try_catch);
+ ReportException(isolate, &try_catch);
return false;
} else {
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ Local<Context> realm =
+ Local<Context>::New(isolate, data->realms_[data->realm_current_]);
+ realm->Enter();
Handle<Value> result = script->Run();
+ realm->Exit();
+ data->realm_current_ = data->realm_switch_;
if (result.IsEmpty()) {
ASSERT(try_catch.HasCaught());
// Print errors that happened during execution.
if (report_exceptions && !FLAG_debugger)
- ReportException(&try_catch);
+ ReportException(isolate, &try_catch);
return false;
} else {
ASSERT(!try_catch.HasCaught());
- if (print_result && !result->IsUndefined()) {
- // If all went well and the result wasn't undefined then print
- // the returned value.
- v8::String::Utf8Value str(result);
- size_t count = fwrite(*str, sizeof(**str), str.length(), stdout);
- (void) count; // Silence GCC-4.5.x "unused result" warning.
- printf("\n");
+ if (print_result) {
+#if !defined(V8_SHARED)
+ if (options.test_shell) {
+#endif
+ if (!result->IsUndefined()) {
+ // If all went well and the result wasn't undefined then print
+ // the returned value.
+ v8::String::Utf8Value str(result);
+ fwrite(*str, sizeof(**str), str.length(), stdout);
+ printf("\n");
+ }
+#if !defined(V8_SHARED)
+ } else {
+ v8::TryCatch try_catch;
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate, utility_context_);
+ v8::Context::Scope context_scope(context);
+ Handle<Object> global = context->Global();
+ Handle<Value> fun = global->Get(String::New("Stringify"));
+ Handle<Value> argv[1] = { result };
+ Handle<Value> s = Handle<Function>::Cast(fun)->Call(global, 1, argv);
+ if (try_catch.HasCaught()) return true;
+ v8::String::Utf8Value str(s);
+ fwrite(*str, sizeof(**str), str.length(), stdout);
+ printf("\n");
+ }
+#endif
}
return true;
}
@@ -186,680 +261,306 @@ bool Shell::ExecuteString(Handle<String> source,
}
-Handle<Value> Shell::Print(const Arguments& args) {
- Handle<Value> val = Write(args);
- printf("\n");
- fflush(stdout);
- return val;
+PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
+ data_->realm_count_ = 1;
+ data_->realm_current_ = 0;
+ data_->realm_switch_ = 0;
+ data_->realms_ = new Persistent<Context>[1];
+ data_->realms_[0].Reset(data_->isolate_,
+ data_->isolate_->GetEnteredContext());
+ data_->realm_shared_.Clear();
}
-Handle<Value> Shell::Write(const Arguments& args) {
- for (int i = 0; i < args.Length(); i++) {
- HandleScope handle_scope;
- if (i != 0) {
- printf(" ");
- }
+PerIsolateData::RealmScope::~RealmScope() {
+ // Drop realms to avoid keeping them alive.
+ for (int i = 0; i < data_->realm_count_; ++i)
+ data_->realms_[i].Dispose();
+ delete[] data_->realms_;
+ if (!data_->realm_shared_.IsEmpty())
+ data_->realm_shared_.Dispose();
+}
- // Explicitly catch potential exceptions in toString().
- v8::TryCatch try_catch;
- Handle<String> str_obj = args[i]->ToString();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- v8::String::Utf8Value str(str_obj);
- int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), stdout));
- if (n != str.length()) {
- printf("Error in fwrite\n");
- Exit(1);
- }
+int PerIsolateData::RealmFind(Handle<Context> context) {
+ for (int i = 0; i < realm_count_; ++i) {
+ if (realms_[i] == context) return i;
}
- return Undefined();
+ return -1;
}
-Handle<Value> Shell::EnableProfiler(const Arguments& args) {
- V8::ResumeProfiler();
- return Undefined();
+#ifndef V8_SHARED
+// performance.now() returns a time stamp as double, measured in milliseconds.
+void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ i::TimeDelta delta = i::TimeTicks::HighResolutionNow() - kInitialTicks;
+ args.GetReturnValue().Set(delta.InMillisecondsF());
}
+#endif // V8_SHARED
-Handle<Value> Shell::DisableProfiler(const Arguments& args) {
- V8::PauseProfiler();
- return Undefined();
+// Realm.current() returns the index of the currently active realm.
+void Shell::RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ int index = data->RealmFind(isolate->GetEnteredContext());
+ if (index == -1) return;
+ args.GetReturnValue().Set(index);
}
-Handle<Value> Shell::Read(const Arguments& args) {
- String::Utf8Value file(args[0]);
- if (*file == NULL) {
- return ThrowException(String::New("Error loading file"));
- }
- Handle<String> source = ReadFile(*file);
- if (source.IsEmpty()) {
- return ThrowException(String::New("Error loading file"));
+// Realm.owner(o) returns the index of the realm that created o.
+void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ if (args.Length() < 1 || !args[0]->IsObject()) {
+ Throw("Invalid argument");
+ return;
}
- return source;
+ int index = data->RealmFind(args[0]->ToObject()->CreationContext());
+ if (index == -1) return;
+ args.GetReturnValue().Set(index);
}
-Handle<String> Shell::ReadFromStdin() {
- static const int kBufferSize = 256;
- char buffer[kBufferSize];
- Handle<String> accumulator = String::New("");
- int length;
- while (true) {
- // Continue reading if the line ends with an escape '\\' or the line has
- // not been fully read into the buffer yet (does not end with '\n').
- // If fgets gets an error, just give up.
- char* input = NULL;
- { // Release lock for blocking input.
- Unlocker unlock(Isolate::GetCurrent());
- input = fgets(buffer, kBufferSize, stdin);
- }
- if (input == NULL) return Handle<String>();
- length = static_cast<int>(strlen(buffer));
- if (length == 0) {
- return accumulator;
- } else if (buffer[length-1] != '\n') {
- accumulator = String::Concat(accumulator, String::New(buffer, length));
- } else if (length > 1 && buffer[length-2] == '\\') {
- buffer[length-2] = '\n';
- accumulator = String::Concat(accumulator, String::New(buffer, length-1));
- } else {
- return String::Concat(accumulator, String::New(buffer, length-1));
- }
+// Realm.global(i) returns the global object of realm i.
+// (Note that properties of global objects cannot be read/written cross-realm.)
+void Shell::RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ PerIsolateData* data = PerIsolateData::Get(args.GetIsolate());
+ if (args.Length() < 1 || !args[0]->IsNumber()) {
+ Throw("Invalid argument");
+ return;
}
-}
-
-
-Handle<Value> Shell::Load(const Arguments& args) {
- for (int i = 0; i < args.Length(); i++) {
- HandleScope handle_scope;
- String::Utf8Value file(args[i]);
- if (*file == NULL) {
- return ThrowException(String::New("Error loading file"));
- }
- Handle<String> source = ReadFile(*file);
- if (source.IsEmpty()) {
- return ThrowException(String::New("Error loading file"));
- }
- if (!ExecuteString(source, String::New(*file), false, true)) {
- return ThrowException(String::New("Error executing file"));
- }
+ int index = args[0]->Uint32Value();
+ if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
+ Throw("Invalid realm index");
+ return;
}
- return Undefined();
+ args.GetReturnValue().Set(
+ Local<Context>::New(args.GetIsolate(), data->realms_[index])->Global());
}
-static int32_t convertToInt(Local<Value> value_in, TryCatch* try_catch) {
- if (value_in->IsInt32()) {
- return value_in->Int32Value();
- }
-
- Local<Value> number = value_in->ToNumber();
- if (try_catch->HasCaught()) return 0;
-
- ASSERT(number->IsNumber());
- Local<Int32> int32 = number->ToInt32();
- if (try_catch->HasCaught() || int32.IsEmpty()) return 0;
- int32_t value = int32->Int32Value();
- if (try_catch->HasCaught()) return 0;
-
- return value;
+// Realm.create() creates a new realm and returns its index.
+void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ Persistent<Context>* old_realms = data->realms_;
+ int index = data->realm_count_;
+ data->realms_ = new Persistent<Context>[++data->realm_count_];
+ for (int i = 0; i < index; ++i) {
+ data->realms_[i].Reset(isolate, old_realms[i]);
+ }
+ delete[] old_realms;
+ Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
+ data->realms_[index].Reset(
+ isolate, Context::New(isolate, NULL, global_template));
+ args.GetReturnValue().Set(index);
}
-static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
- int32_t raw_value = convertToInt(value_in, try_catch);
- if (try_catch->HasCaught()) return 0;
-
- if (raw_value < 0) {
- ThrowException(String::New("Array length must not be negative."));
- return 0;
+// Realm.dispose(i) disposes the reference to the realm i.
+void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ if (args.Length() < 1 || !args[0]->IsNumber()) {
+ Throw("Invalid argument");
+ return;
}
-
- static const int kMaxLength = 0x3fffffff;
-#ifndef V8_SHARED
- ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
-#endif // V8_SHARED
- if (raw_value > static_cast<int32_t>(kMaxLength)) {
- ThrowException(
- String::New("Array length exceeds maximum length."));
+ int index = args[0]->Uint32Value();
+ if (index >= data->realm_count_ || data->realms_[index].IsEmpty() ||
+ index == 0 ||
+ index == data->realm_current_ || index == data->realm_switch_) {
+ Throw("Invalid realm index");
+ return;
}
- return raw_value;
+ data->realms_[index].Dispose();
+ data->realms_[index].Clear();
}
-// TODO(rossberg): should replace these by proper uses of HasInstance,
-// once we figure out a good way to make the templates global.
-const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
-const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
-
-
-Handle<Value> Shell::CreateExternalArrayBuffer(Handle<Object> buffer,
- int32_t length) {
- static const int32_t kMaxSize = 0x7fffffff;
- // Make sure the total size fits into a (signed) int.
- if (length < 0 || length > kMaxSize) {
- return ThrowException(String::New("ArrayBuffer exceeds maximum size (2G)"));
+// Realm.switch(i) switches to the realm i for consecutive interactive inputs.
+void Shell::RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ if (args.Length() < 1 || !args[0]->IsNumber()) {
+ Throw("Invalid argument");
+ return;
}
- uint8_t* data = new uint8_t[length];
- if (data == NULL) {
- return ThrowException(String::New("Memory allocation failed"));
+ int index = args[0]->Uint32Value();
+ if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
+ Throw("Invalid realm index");
+ return;
}
- memset(data, 0, length);
-
- buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
- Persistent<Object> persistent_array = Persistent<Object>::New(buffer);
- persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
- persistent_array.MarkIndependent();
- V8::AdjustAmountOfExternalAllocatedMemory(length);
-
- buffer->SetIndexedPropertiesToExternalArrayData(
- data, v8::kExternalByteArray, length);
- buffer->Set(String::New("byteLength"), Int32::New(length), ReadOnly);
-
- return buffer;
+ data->realm_switch_ = index;
}
-Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
- if (!args.IsConstructCall()) {
- Handle<Value>* rec_args = new Handle<Value>[args.Length()];
- for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
- Handle<Value> result = args.Callee()->NewInstance(args.Length(), rec_args);
- delete[] rec_args;
- return result;
+// Realm.eval(i, s) evaluates s in realm i and returns the result.
+void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ if (args.Length() < 2 || !args[0]->IsNumber() || !args[1]->IsString()) {
+ Throw("Invalid argument");
+ return;
}
-
- if (args.Length() == 0) {
- return ThrowException(
- String::New("ArrayBuffer constructor must have one argument"));
+ int index = args[0]->Uint32Value();
+ if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
+ Throw("Invalid realm index");
+ return;
}
- TryCatch try_catch;
- int32_t length = convertToUint(args[0], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- return CreateExternalArrayBuffer(args.This(), length);
+ Handle<Script> script = Script::New(args[1]->ToString());
+ if (script.IsEmpty()) return;
+ Local<Context> realm = Local<Context>::New(isolate, data->realms_[index]);
+ realm->Enter();
+ Handle<Value> result = script->Run();
+ realm->Exit();
+ args.GetReturnValue().Set(result);
}
-Handle<Object> Shell::CreateExternalArray(Handle<Object> array,
- Handle<Object> buffer,
- ExternalArrayType type,
- int32_t length,
- int32_t byteLength,
- int32_t byteOffset,
- int32_t element_size) {
- ASSERT(element_size == 1 || element_size == 2 ||
- element_size == 4 || element_size == 8);
- ASSERT(byteLength == length * element_size);
+// Realm.shared is an accessor for a single shared value across realms.
+void Shell::RealmSharedGet(Local<String> property,
+ const PropertyCallbackInfo<Value>& info) {
+ Isolate* isolate = info.GetIsolate();
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ if (data->realm_shared_.IsEmpty()) return;
+ info.GetReturnValue().Set(data->realm_shared_);
+}
- void* data = buffer->GetIndexedPropertiesExternalArrayData();
- ASSERT(data != NULL);
+void Shell::RealmSharedSet(Local<String> property,
+ Local<Value> value,
+ const PropertyCallbackInfo<void>& info) {
+ Isolate* isolate = info.GetIsolate();
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ if (!data->realm_shared_.IsEmpty()) data->realm_shared_.Dispose();
+ data->realm_shared_.Reset(isolate, value);
+}
- array->SetIndexedPropertiesToExternalArrayData(
- static_cast<uint8_t*>(data) + byteOffset, type, length);
- array->SetHiddenValue(String::New(kArrayMarkerPropName), Int32::New(type));
- array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly);
- array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly);
- array->Set(String::New("length"), Int32::New(length), ReadOnly);
- array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
- array->Set(String::New("buffer"), buffer, ReadOnly);
- return array;
+void Shell::Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Write(args);
+ printf("\n");
+ fflush(stdout);
}
-Handle<Value> Shell::CreateExternalArray(const Arguments& args,
- ExternalArrayType type,
- int32_t element_size) {
- if (!args.IsConstructCall()) {
- Handle<Value>* rec_args = new Handle<Value>[args.Length()];
- for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
- Handle<Value> result = args.Callee()->NewInstance(args.Length(), rec_args);
- delete[] rec_args;
- return result;
- }
-
- TryCatch try_catch;
- ASSERT(element_size == 1 || element_size == 2 ||
- element_size == 4 || element_size == 8);
-
- // All of the following constructors are supported:
- // TypedArray(unsigned long length)
- // TypedArray(type[] array)
- // TypedArray(TypedArray array)
- // TypedArray(ArrayBuffer buffer,
- // optional unsigned long byteOffset,
- // optional unsigned long length)
- Handle<Object> buffer;
- int32_t length;
- int32_t byteLength;
- int32_t byteOffset;
- bool init_from_array = false;
- if (args.Length() == 0) {
- return ThrowException(
- String::New("Array constructor must have at least one argument"));
- }
- if (args[0]->IsObject() &&
- !args[0]->ToObject()->GetHiddenValue(
- String::New(kArrayBufferMarkerPropName)).IsEmpty()) {
- // Construct from ArrayBuffer.
- buffer = args[0]->ToObject();
- int32_t bufferLength =
- convertToUint(buffer->Get(String::New("byteLength")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (args.Length() < 2 || args[1]->IsUndefined()) {
- byteOffset = 0;
- } else {
- byteOffset = convertToUint(args[1], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (byteOffset > bufferLength) {
- return ThrowException(String::New("byteOffset out of bounds"));
- }
- if (byteOffset % element_size != 0) {
- return ThrowException(
- String::New("byteOffset must be multiple of element size"));
- }
+void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ for (int i = 0; i < args.Length(); i++) {
+ HandleScope handle_scope(args.GetIsolate());
+ if (i != 0) {
+ printf(" ");
}
- if (args.Length() < 3 || args[2]->IsUndefined()) {
- byteLength = bufferLength - byteOffset;
- length = byteLength / element_size;
- if (byteLength % element_size != 0) {
- return ThrowException(
- String::New("buffer size must be multiple of element size"));
- }
- } else {
- length = convertToUint(args[2], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- byteLength = length * element_size;
- if (byteOffset + byteLength > bufferLength) {
- return ThrowException(String::New("length out of bounds"));
- }
- }
- } else {
- if (args[0]->IsObject() &&
- args[0]->ToObject()->Has(String::New("length"))) {
- // Construct from array.
- length = convertToUint(
- args[0]->ToObject()->Get(String::New("length")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- init_from_array = true;
- } else {
- // Construct from size.
- length = convertToUint(args[0], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
+ // Explicitly catch potential exceptions in toString().
+ v8::TryCatch try_catch;
+ Handle<String> str_obj = args[i]->ToString();
+ if (try_catch.HasCaught()) {
+ try_catch.ReThrow();
+ return;
}
- byteLength = length * element_size;
- byteOffset = 0;
-
- Handle<Object> global = Context::GetCurrent()->Global();
- Handle<Value> array_buffer = global->Get(String::New("ArrayBuffer"));
- ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction());
- Handle<Value> buffer_args[] = { Uint32::New(byteLength) };
- Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance(
- 1, buffer_args);
- if (try_catch.HasCaught()) return result;
- buffer = result->ToObject();
- }
-
- Handle<Object> array = CreateExternalArray(
- args.This(), buffer, type, length, byteLength, byteOffset, element_size);
-
- if (init_from_array) {
- Handle<Object> init = args[0]->ToObject();
- for (int i = 0; i < length; ++i) array->Set(i, init->Get(i));
- }
-
- return array;
-}
-
-
-Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) {
- TryCatch try_catch;
-
- if (!args.This()->IsObject()) {
- return ThrowException(
- String::New("'slice' invoked on non-object receiver"));
- }
- Local<Object> self = args.This();
- Local<Value> marker =
- self->GetHiddenValue(String::New(kArrayBufferMarkerPropName));
- if (marker.IsEmpty()) {
- return ThrowException(
- String::New("'slice' invoked on wrong receiver type"));
- }
-
- int32_t length =
- convertToUint(self->Get(String::New("byteLength")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (args.Length() == 0) {
- return ThrowException(
- String::New("'slice' must have at least one argument"));
- }
- int32_t begin = convertToInt(args[0], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (begin < 0) begin += length;
- if (begin < 0) begin = 0;
- if (begin > length) begin = length;
-
- int32_t end;
- if (args.Length() < 2 || args[1]->IsUndefined()) {
- end = length;
- } else {
- end = convertToInt(args[1], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (end < 0) end += length;
- if (end < 0) end = 0;
- if (end > length) end = length;
- if (end < begin) end = begin;
+ v8::String::Utf8Value str(str_obj);
+ int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), stdout));
+ if (n != str.length()) {
+ printf("Error in fwrite\n");
+ Exit(1);
+ }
}
-
- Local<Function> constructor = Local<Function>::Cast(self->GetConstructor());
- Handle<Value> new_args[] = { Uint32::New(end - begin) };
- Handle<Value> result = constructor->NewInstance(1, new_args);
- if (try_catch.HasCaught()) return result;
- Handle<Object> buffer = result->ToObject();
- uint8_t* dest =
- static_cast<uint8_t*>(buffer->GetIndexedPropertiesExternalArrayData());
- uint8_t* src = begin + static_cast<uint8_t*>(
- self->GetIndexedPropertiesExternalArrayData());
- memcpy(dest, src, end - begin);
-
- return buffer;
}
-Handle<Value> Shell::ArraySubArray(const Arguments& args) {
- TryCatch try_catch;
-
- if (!args.This()->IsObject()) {
- return ThrowException(
- String::New("'subarray' invoked on non-object receiver"));
- }
-
- Local<Object> self = args.This();
- Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
- if (marker.IsEmpty()) {
- return ThrowException(
- String::New("'subarray' invoked on wrong receiver type"));
- }
-
- Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t length =
- convertToUint(self->Get(String::New("length")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t byteOffset =
- convertToUint(self->Get(String::New("byteOffset")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t element_size =
- convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (args.Length() == 0) {
- return ThrowException(
- String::New("'subarray' must have at least one argument"));
+void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ String::Utf8Value file(args[0]);
+ if (*file == NULL) {
+ Throw("Error loading file");
+ return;
}
- int32_t begin = convertToInt(args[0], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (begin < 0) begin += length;
- if (begin < 0) begin = 0;
- if (begin > length) begin = length;
-
- int32_t end;
- if (args.Length() < 2 || args[1]->IsUndefined()) {
- end = length;
- } else {
- end = convertToInt(args[1], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (end < 0) end += length;
- if (end < 0) end = 0;
- if (end > length) end = length;
- if (end < begin) end = begin;
+ Handle<String> source = ReadFile(args.GetIsolate(), *file);
+ if (source.IsEmpty()) {
+ Throw("Error loading file");
+ return;
}
-
- length = end - begin;
- byteOffset += begin * element_size;
-
- Local<Function> constructor = Local<Function>::Cast(self->GetConstructor());
- Handle<Value> construct_args[] = {
- buffer, Uint32::New(byteOffset), Uint32::New(length)
- };
- return constructor->NewInstance(3, construct_args);
+ args.GetReturnValue().Set(source);
}
-Handle<Value> Shell::ArraySet(const Arguments& args) {
- TryCatch try_catch;
-
- if (!args.This()->IsObject()) {
- return ThrowException(
- String::New("'set' invoked on non-object receiver"));
- }
-
- Local<Object> self = args.This();
- Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
- if (marker.IsEmpty()) {
- return ThrowException(
- String::New("'set' invoked on wrong receiver type"));
- }
- int32_t length =
- convertToUint(self->Get(String::New("length")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t element_size =
- convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (args.Length() == 0) {
- return ThrowException(
- String::New("'set' must have at least one argument"));
- }
- if (!args[0]->IsObject() ||
- !args[0]->ToObject()->Has(String::New("length"))) {
- return ThrowException(
- String::New("'set' invoked with non-array argument"));
- }
- Handle<Object> source = args[0]->ToObject();
- int32_t source_length =
- convertToUint(source->Get(String::New("length")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- int32_t offset;
- if (args.Length() < 2 || args[1]->IsUndefined()) {
- offset = 0;
- } else {
- offset = convertToUint(args[1], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- }
- if (offset + source_length > length) {
- return ThrowException(String::New("offset or source length out of bounds"));
- }
-
- int32_t source_element_size;
- if (source->GetHiddenValue(String::New(kArrayMarkerPropName)).IsEmpty()) {
- source_element_size = 0;
- } else {
- source_element_size =
- convertToUint(source->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- }
-
- if (element_size == source_element_size &&
- self->GetConstructor()->StrictEquals(source->GetConstructor())) {
- // Use memmove on the array buffers.
- Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- Handle<Object> source_buffer =
- source->Get(String::New("buffer"))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t byteOffset =
- convertToUint(self->Get(String::New("byteOffset")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t source_byteOffset =
- convertToUint(source->Get(String::New("byteOffset")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>(
- buffer->GetIndexedPropertiesExternalArrayData());
- uint8_t* src = source_byteOffset + static_cast<uint8_t*>(
- source_buffer->GetIndexedPropertiesExternalArrayData());
- memmove(dest, src, source_length * element_size);
- } else if (source_element_size == 0) {
- // Source is not a typed array, copy element-wise sequentially.
- for (int i = 0; i < source_length; ++i) {
- self->Set(offset + i, source->Get(i));
- if (try_catch.HasCaught()) return try_catch.ReThrow();
+Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
+ static const int kBufferSize = 256;
+ char buffer[kBufferSize];
+ Handle<String> accumulator = String::New("");
+ int length;
+ while (true) {
+ // Continue reading if the line ends with an escape '\\' or the line has
+ // not been fully read into the buffer yet (does not end with '\n').
+ // If fgets gets an error, just give up.
+ char* input = NULL;
+ { // Release lock for blocking input.
+ Unlocker unlock(isolate);
+ input = fgets(buffer, kBufferSize, stdin);
}
- } else {
- // Need to copy element-wise to make the right conversions.
- Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- Handle<Object> source_buffer =
- source->Get(String::New("buffer"))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (buffer->StrictEquals(source_buffer)) {
- // Same backing store, need to handle overlap correctly.
- // This gets a bit tricky in the case of different element sizes
- // (which, of course, is extremely unlikely to ever occur in practice).
- int32_t byteOffset =
- convertToUint(self->Get(String::New("byteOffset")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t source_byteOffset =
- convertToUint(source->Get(String::New("byteOffset")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- // Copy as much as we can from left to right.
- int i = 0;
- int32_t next_dest_offset = byteOffset + (offset + 1) * element_size;
- int32_t next_src_offset = source_byteOffset + source_element_size;
- while (i < length && next_dest_offset <= next_src_offset) {
- self->Set(offset + i, source->Get(i));
- ++i;
- next_dest_offset += element_size;
- next_src_offset += source_element_size;
- }
- // Of what's left, copy as much as we can from right to left.
- int j = length - 1;
- int32_t dest_offset = byteOffset + (offset + j) * element_size;
- int32_t src_offset = source_byteOffset + j * source_element_size;
- while (j >= i && dest_offset >= src_offset) {
- self->Set(offset + j, source->Get(j));
- --j;
- dest_offset -= element_size;
- src_offset -= source_element_size;
- }
- // There can be at most 8 entries left in the middle that need buffering
- // (because the largest element_size is 8 times the smallest).
- ASSERT(j+1 - i <= 8);
- Handle<Value> temp[8];
- for (int k = i; k <= j; ++k) {
- temp[k - i] = source->Get(k);
- }
- for (int k = i; k <= j; ++k) {
- self->Set(offset + k, temp[k - i]);
- }
+ if (input == NULL) return Handle<String>();
+ length = static_cast<int>(strlen(buffer));
+ if (length == 0) {
+ return accumulator;
+ } else if (buffer[length-1] != '\n') {
+ accumulator = String::Concat(accumulator, String::New(buffer, length));
+ } else if (length > 1 && buffer[length-2] == '\\') {
+ buffer[length-2] = '\n';
+ accumulator = String::Concat(accumulator, String::New(buffer, length-1));
} else {
- // Different backing stores, safe to copy element-wise sequentially.
- for (int i = 0; i < source_length; ++i)
- self->Set(offset + i, source->Get(i));
+ return String::Concat(accumulator, String::New(buffer, length-1));
}
}
-
- return Undefined();
-}
-
-
-void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
- HandleScope scope;
- int32_t length =
- object->ToObject()->Get(String::New("byteLength"))->Uint32Value();
- V8::AdjustAmountOfExternalAllocatedMemory(-length);
- delete[] static_cast<uint8_t*>(data);
- object.Dispose();
}
-Handle<Value> Shell::Int8Array(const Arguments& args) {
- return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
-}
-
-
-Handle<Value> Shell::Uint8Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalUnsignedByteArray, sizeof(uint8_t));
-}
-
-
-Handle<Value> Shell::Int16Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalShortArray, sizeof(int16_t));
-}
-
-
-Handle<Value> Shell::Uint16Array(const Arguments& args) {
- return CreateExternalArray(
- args, kExternalUnsignedShortArray, sizeof(uint16_t));
-}
-
-
-Handle<Value> Shell::Int32Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalIntArray, sizeof(int32_t));
-}
-
-
-Handle<Value> Shell::Uint32Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalUnsignedIntArray, sizeof(uint32_t));
-}
-
-
-Handle<Value> Shell::Float32Array(const Arguments& args) {
- return CreateExternalArray(
- args, kExternalFloatArray, sizeof(float)); // NOLINT
-}
-
-
-Handle<Value> Shell::Float64Array(const Arguments& args) {
- return CreateExternalArray(
- args, kExternalDoubleArray, sizeof(double)); // NOLINT
-}
-
-
-Handle<Value> Shell::Uint8ClampedArray(const Arguments& args) {
- return CreateExternalArray(args, kExternalPixelArray, sizeof(uint8_t));
-}
-
-
-Handle<Value> Shell::Yield(const Arguments& args) {
- v8::Unlocker unlocker;
- return Undefined();
+void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ for (int i = 0; i < args.Length(); i++) {
+ HandleScope handle_scope(args.GetIsolate());
+ String::Utf8Value file(args[i]);
+ if (*file == NULL) {
+ Throw("Error loading file");
+ return;
+ }
+ Handle<String> source = ReadFile(args.GetIsolate(), *file);
+ if (source.IsEmpty()) {
+ Throw("Error loading file");
+ return;
+ }
+ if (!ExecuteString(args.GetIsolate(),
+ source,
+ String::New(*file),
+ false,
+ true)) {
+ Throw("Error executing file");
+ return;
+ }
+ }
}
-Handle<Value> Shell::Quit(const Arguments& args) {
+void Shell::Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
int exit_code = args[0]->Int32Value();
-#ifndef V8_SHARED
OnExit();
-#endif // V8_SHARED
exit(exit_code);
- return Undefined();
}
-Handle<Value> Shell::Version(const Arguments& args) {
- return String::New(V8::GetVersion());
+void Shell::Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ args.GetReturnValue().Set(String::New(V8::GetVersion()));
}
-void Shell::ReportException(v8::TryCatch* try_catch) {
- HandleScope handle_scope;
+void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
+ HandleScope handle_scope(isolate);
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+ Handle<Context> utility_context;
bool enter_context = !Context::InContext();
- if (enter_context) utility_context_->Enter();
+ if (enter_context) {
+ utility_context = Local<Context>::New(isolate, utility_context_);
+ utility_context->Enter();
+ }
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
v8::String::Utf8Value exception(try_catch->Exception());
const char* exception_string = ToCString(exception);
@@ -896,28 +597,38 @@ void Shell::ReportException(v8::TryCatch* try_catch) {
}
printf("\n");
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- if (enter_context) utility_context_->Exit();
+ if (enter_context) utility_context->Exit();
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
}
#ifndef V8_SHARED
-Handle<Array> Shell::GetCompletions(Handle<String> text, Handle<String> full) {
- HandleScope handle_scope;
- Context::Scope context_scope(utility_context_);
- Handle<Object> global = utility_context_->Global();
+Handle<Array> Shell::GetCompletions(Isolate* isolate,
+ Handle<String> text,
+ Handle<String> full) {
+ HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> utility_context =
+ v8::Local<v8::Context>::New(isolate, utility_context_);
+ v8::Context::Scope context_scope(utility_context);
+ Handle<Object> global = utility_context->Global();
Handle<Value> fun = global->Get(String::New("GetCompletions"));
static const int kArgc = 3;
- Handle<Value> argv[kArgc] = { evaluation_context_->Global(), text, full };
+ v8::Local<v8::Context> evaluation_context =
+ v8::Local<v8::Context>::New(isolate, evaluation_context_);
+ Handle<Value> argv[kArgc] = { evaluation_context->Global(), text, full };
Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
return handle_scope.Close(Handle<Array>::Cast(val));
}
#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Object> Shell::DebugMessageDetails(Handle<String> message) {
- Context::Scope context_scope(utility_context_);
- Handle<Object> global = utility_context_->Global();
+Handle<Object> Shell::DebugMessageDetails(Isolate* isolate,
+ Handle<String> message) {
+ HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate, utility_context_);
+ v8::Context::Scope context_scope(context);
+ Handle<Object> global = context->Global();
Handle<Value> fun = global->Get(String::New("DebugMessageDetails"));
static const int kArgc = 1;
Handle<Value> argv[kArgc] = { message };
@@ -926,9 +637,13 @@ Handle<Object> Shell::DebugMessageDetails(Handle<String> message) {
}
-Handle<Value> Shell::DebugCommandToJSONRequest(Handle<String> command) {
- Context::Scope context_scope(utility_context_);
- Handle<Object> global = utility_context_->Global();
+Handle<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
+ Handle<String> command) {
+ HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate, utility_context_);
+ v8::Context::Scope context_scope(context);
+ Handle<Object> global = context->Global();
Handle<Value> fun = global->Get(String::New("DebugCommandToJSONRequest"));
static const int kArgc = 1;
Handle<Value> argv[kArgc] = { command };
@@ -938,7 +653,11 @@ Handle<Value> Shell::DebugCommandToJSONRequest(Handle<String> command) {
void Shell::DispatchDebugMessages() {
- v8::Context::Scope scope(Shell::evaluation_context_);
+ Isolate* isolate = v8::Isolate::GetCurrent();
+ HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate, Shell::evaluation_context_);
+ v8::Context::Scope context_scope(context);
v8::Debug::ProcessDebugMessages();
}
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -1044,25 +763,30 @@ void Shell::AddHistogramSample(void* histogram, int sample) {
}
-void Shell::InstallUtilityScript() {
- Locker lock;
- HandleScope scope;
+void Shell::InstallUtilityScript(Isolate* isolate) {
+ Locker lock(isolate);
+ HandleScope scope(isolate);
// If we use the utility context, we have to set the security tokens so that
// utility, evaluation and debug context can all access each other.
- utility_context_->SetSecurityToken(Undefined());
- evaluation_context_->SetSecurityToken(Undefined());
- Context::Scope utility_scope(utility_context_);
+ v8::Local<v8::Context> utility_context =
+ v8::Local<v8::Context>::New(isolate, utility_context_);
+ v8::Local<v8::Context> evaluation_context =
+ v8::Local<v8::Context>::New(isolate, evaluation_context_);
+ utility_context->SetSecurityToken(Undefined(isolate));
+ evaluation_context->SetSecurityToken(Undefined(isolate));
+ v8::Context::Scope context_scope(utility_context);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
// Install the debugger object in the utility scope
- i::Debug* debug = i::Isolate::Current()->debug();
+ i::Debug* debug = reinterpret_cast<i::Isolate*>(isolate)->debug();
debug->Load();
i::Handle<i::JSObject> js_debug
= i::Handle<i::JSObject>(debug->debug_context()->global_object());
- utility_context_->Global()->Set(String::New("$debug"),
+ utility_context->Global()->Set(String::New("$debug"),
Utils::ToLocal(js_debug));
- debug->debug_context()->set_security_token(HEAP->undefined_value());
+ debug->debug_context()->set_security_token(
+ reinterpret_cast<i::Isolate*>(isolate)->heap()->undefined_value());
#endif // ENABLE_DEBUGGER_SUPPORT
// Run the d8 shell utility script in the utility context
@@ -1090,7 +814,7 @@ void Shell::InstallUtilityScript() {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Start the in-process debugger if requested.
if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
- v8::Debug::SetDebugEventListener(HandleDebugEvent);
+ v8::Debug::SetDebugEventListener2(HandleDebugEvent);
}
#endif // ENABLE_DEBUGGER_SUPPORT
}
@@ -1125,27 +849,7 @@ class BZip2Decompressor : public v8::StartupDataDecompressor {
#endif
-Handle<FunctionTemplate> Shell::CreateArrayBufferTemplate(
- InvocationCallback fun) {
- Handle<FunctionTemplate> buffer_template = FunctionTemplate::New(fun);
- Local<Template> proto_template = buffer_template->PrototypeTemplate();
- proto_template->Set(String::New("slice"),
- FunctionTemplate::New(ArrayBufferSlice));
- return buffer_template;
-}
-
-
-Handle<FunctionTemplate> Shell::CreateArrayTemplate(InvocationCallback fun) {
- Handle<FunctionTemplate> array_template = FunctionTemplate::New(fun);
- Local<Template> proto_template = array_template->PrototypeTemplate();
- proto_template->Set(String::New("set"), FunctionTemplate::New(ArraySet));
- proto_template->Set(String::New("subarray"),
- FunctionTemplate::New(ArraySubArray));
- return array_template;
-}
-
-
-Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
+Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write));
@@ -1157,40 +861,33 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
global_template->Set(String::New("load"), FunctionTemplate::New(Load));
global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
global_template->Set(String::New("version"), FunctionTemplate::New(Version));
- global_template->Set(String::New("enableProfiler"),
- FunctionTemplate::New(EnableProfiler));
- global_template->Set(String::New("disableProfiler"),
- FunctionTemplate::New(DisableProfiler));
-
- // Bind the handlers for external arrays.
- PropertyAttribute attr =
- static_cast<PropertyAttribute>(ReadOnly | DontDelete);
- global_template->Set(String::New("ArrayBuffer"),
- CreateArrayBufferTemplate(ArrayBuffer), attr);
- global_template->Set(String::New("Int8Array"),
- CreateArrayTemplate(Int8Array), attr);
- global_template->Set(String::New("Uint8Array"),
- CreateArrayTemplate(Uint8Array), attr);
- global_template->Set(String::New("Int16Array"),
- CreateArrayTemplate(Int16Array), attr);
- global_template->Set(String::New("Uint16Array"),
- CreateArrayTemplate(Uint16Array), attr);
- global_template->Set(String::New("Int32Array"),
- CreateArrayTemplate(Int32Array), attr);
- global_template->Set(String::New("Uint32Array"),
- CreateArrayTemplate(Uint32Array), attr);
- global_template->Set(String::New("Float32Array"),
- CreateArrayTemplate(Float32Array), attr);
- global_template->Set(String::New("Float64Array"),
- CreateArrayTemplate(Float64Array), attr);
- global_template->Set(String::New("Uint8ClampedArray"),
- CreateArrayTemplate(Uint8ClampedArray), attr);
-
-#ifdef LIVE_OBJECT_LIST
- global_template->Set(String::New("lol_is_enabled"), True());
-#else
- global_template->Set(String::New("lol_is_enabled"), False());
-#endif
+
+ // Bind the Realm object.
+ Handle<ObjectTemplate> realm_template = ObjectTemplate::New();
+ realm_template->Set(String::New("current"),
+ FunctionTemplate::New(RealmCurrent));
+ realm_template->Set(String::New("owner"),
+ FunctionTemplate::New(RealmOwner));
+ realm_template->Set(String::New("global"),
+ FunctionTemplate::New(RealmGlobal));
+ realm_template->Set(String::New("create"),
+ FunctionTemplate::New(RealmCreate));
+ realm_template->Set(String::New("dispose"),
+ FunctionTemplate::New(RealmDispose));
+ realm_template->Set(String::New("switch"),
+ FunctionTemplate::New(RealmSwitch));
+ realm_template->Set(String::New("eval"),
+ FunctionTemplate::New(RealmEval));
+ realm_template->SetAccessor(String::New("shared"),
+ RealmSharedGet, RealmSharedSet);
+ global_template->Set(String::New("Realm"), realm_template);
+
+#ifndef V8_SHARED
+ Handle<ObjectTemplate> performance_template = ObjectTemplate::New();
+ performance_template->Set(String::New("now"),
+ FunctionTemplate::New(PerformanceNow));
+ global_template->Set(String::New("performance"), performance_template);
+#endif // V8_SHARED
#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64)
Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
@@ -1202,7 +899,7 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
}
-void Shell::Initialize() {
+void Shell::Initialize(Isolate* isolate) {
#ifdef COMPRESS_STARTUP_DATA_BZ2
BZip2Decompressor startup_data_decompressor;
int bz2_result = startup_data_decompressor.Decompress();
@@ -1223,13 +920,17 @@ void Shell::Initialize() {
V8::SetAddHistogramSampleFunction(AddHistogramSample);
}
#endif // V8_SHARED
- if (options.test_shell) return;
+}
+
+void Shell::InitializeDebugger(Isolate* isolate) {
+ if (options.test_shell) return;
#ifndef V8_SHARED
- Locker lock;
- HandleScope scope;
- Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
- utility_context_ = Context::New(NULL, global_template);
+ Locker lock(isolate);
+ HandleScope scope(isolate);
+ Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
+ utility_context_.Reset(isolate,
+ Context::New(isolate, NULL, global_template));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Start the debugger agent if requested.
@@ -1242,32 +943,34 @@ void Shell::Initialize() {
}
-Persistent<Context> Shell::CreateEvaluationContext() {
+Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
- i::ScopedLock lock(context_mutex_);
+ i::LockGuard<i::Mutex> lock_guard(&context_mutex_);
#endif // V8_SHARED
// Initialize the global objects
- Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
- Persistent<Context> context = Context::New(NULL, global_template);
+ Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
+ HandleScope handle_scope(isolate);
+ Local<Context> context = Context::New(isolate, NULL, global_template);
ASSERT(!context.IsEmpty());
Context::Scope scope(context);
#ifndef V8_SHARED
+ i::Factory* factory = reinterpret_cast<i::Isolate*>(isolate)->factory();
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
- FACTORY->NewFixedArray(js_args.argc());
- for (int j = 0; j < js_args.argc(); j++) {
+ factory->NewFixedArray(js_args.argc);
+ for (int j = 0; j < js_args.argc; j++) {
i::Handle<i::String> arg =
- FACTORY->NewStringFromUtf8(i::CStrVector(js_args[j]));
+ factory->NewStringFromUtf8(i::CStrVector(js_args[j]));
arguments_array->set(j, *arg);
}
i::Handle<i::JSArray> arguments_jsarray =
- FACTORY->NewJSArrayWithElements(arguments_array);
+ factory->NewJSArrayWithElements(arguments_array);
context->Global()->Set(String::New("arguments"),
Utils::ToLocal(arguments_jsarray));
#endif // V8_SHARED
- return context;
+ return handle_scope.Close(context);
}
@@ -1287,14 +990,16 @@ struct CounterAndKey {
};
-int CompareKeys(const void* a, const void* b) {
- return strcmp(static_cast<const CounterAndKey*>(a)->key,
- static_cast<const CounterAndKey*>(b)->key);
+inline bool operator<(const CounterAndKey& lhs, const CounterAndKey& rhs) {
+ return strcmp(lhs.key, rhs.key) < 0;
}
+#endif // V8_SHARED
void Shell::OnExit() {
- if (console != NULL) console->Close();
+ LineEditor* line_editor = LineEditor::Get();
+ if (line_editor) line_editor->Close();
+#ifndef V8_SHARED
if (i::FLAG_dump_counters) {
int number_of_counters = 0;
for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
@@ -1306,7 +1011,7 @@ void Shell::OnExit() {
counters[j].counter = i.CurrentValue();
counters[j].key = i.CurrentKey();
}
- qsort(counters, number_of_counters, sizeof(counters[0]), CompareKeys);
+ std::sort(counters, counters + number_of_counters);
printf("+----------------------------------------------------------------+"
"-------------+\n");
printf("| Name |"
@@ -1329,8 +1034,9 @@ void Shell::OnExit() {
}
delete counters_file_;
delete counter_map_;
-}
#endif // V8_SHARED
+}
+
static FILE* FOpen(const char* path, const char* mode) {
@@ -1354,9 +1060,9 @@ static FILE* FOpen(const char* path, const char* mode) {
}
-static char* ReadChars(const char* name, int* size_out) {
+static char* ReadChars(Isolate* isolate, const char* name, int* size_out) {
// Release the V8 lock while reading files.
- v8::Unlocker unlocker(Isolate::GetCurrent());
+ v8::Unlocker unlocker(isolate);
FILE* file = FOpen(name, "rb");
if (file == NULL) return NULL;
@@ -1375,31 +1081,42 @@ static char* ReadChars(const char* name, int* size_out) {
return chars;
}
+static void ReadBufferWeakCallback(v8::Isolate* isolate,
+ Persistent<ArrayBuffer>* array_buffer,
+ uint8_t* data) {
+ size_t byte_length =
+ Local<ArrayBuffer>::New(isolate, *array_buffer)->ByteLength();
+ isolate->AdjustAmountOfExternalAllocatedMemory(
+ -static_cast<intptr_t>(byte_length));
-Handle<Value> Shell::ReadBuffer(const Arguments& args) {
+ delete[] data;
+ array_buffer->Dispose();
+}
+
+
+void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
ASSERT(sizeof(char) == sizeof(uint8_t)); // NOLINT
String::Utf8Value filename(args[0]);
int length;
if (*filename == NULL) {
- return ThrowException(String::New("Error loading file"));
+ Throw("Error loading file");
+ return;
}
- uint8_t* data = reinterpret_cast<uint8_t*>(ReadChars(*filename, &length));
+ Isolate* isolate = args.GetIsolate();
+ uint8_t* data = reinterpret_cast<uint8_t*>(
+ ReadChars(args.GetIsolate(), *filename, &length));
if (data == NULL) {
- return ThrowException(String::New("Error reading file"));
+ Throw("Error reading file");
+ return;
}
- Handle<Object> buffer = Object::New();
- buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
- Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer);
- persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback);
- persistent_buffer.MarkIndependent();
- V8::AdjustAmountOfExternalAllocatedMemory(length);
+ Handle<v8::ArrayBuffer> buffer = ArrayBuffer::New(data, length);
+ v8::Persistent<v8::ArrayBuffer> weak_handle(isolate, buffer);
+ weak_handle.MakeWeak(data, ReadBufferWeakCallback);
+ weak_handle.MarkIndependent();
+ isolate->AdjustAmountOfExternalAllocatedMemory(length);
- buffer->SetIndexedPropertiesToExternalArrayData(
- data, kExternalUnsignedByteArray, length);
- buffer->Set(String::New("byteLength"),
- Int32::New(static_cast<int32_t>(length)), ReadOnly);
- return buffer;
+ args.GetReturnValue().Set(buffer);
}
@@ -1427,29 +1144,32 @@ static char* ReadWord(char* data) {
// Reads a file into a v8 string.
-Handle<String> Shell::ReadFile(const char* name) {
+Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
int size = 0;
- char* chars = ReadChars(name, &size);
+ char* chars = ReadChars(isolate, name, &size);
if (chars == NULL) return Handle<String>();
- Handle<String> result = String::New(chars);
+ Handle<String> result = String::New(chars, size);
delete[] chars;
return result;
}
-void Shell::RunShell() {
- Locker locker;
- Context::Scope context_scope(evaluation_context_);
- HandleScope outer_scope;
+void Shell::RunShell(Isolate* isolate) {
+ Locker locker(isolate);
+ HandleScope outer_scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate, evaluation_context_);
+ v8::Context::Scope context_scope(context);
+ PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
Handle<String> name = String::New("(d8)");
- console = LineEditor::Get();
+ LineEditor* console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
- console->Open();
+ console->Open(isolate);
while (true) {
- HandleScope inner_scope;
+ HandleScope inner_scope(isolate);
Handle<String> input = console->Prompt(Shell::kPrompt);
if (input.IsEmpty()) break;
- ExecuteString(input, name, true, true);
+ ExecuteString(isolate, input, name, true, true);
}
printf("\n");
}
@@ -1459,9 +1179,9 @@ void Shell::RunShell() {
class ShellThread : public i::Thread {
public:
// Takes ownership of the underlying char array of |files|.
- ShellThread(int no, char* files)
+ ShellThread(Isolate* isolate, char* files)
: Thread("d8:ShellThread"),
- no_(no), files_(files) { }
+ isolate_(isolate), files_(files) { }
~ShellThread() {
delete[] files_;
@@ -1469,7 +1189,7 @@ class ShellThread : public i::Thread {
virtual void Run();
private:
- int no_;
+ Isolate* isolate_;
char* files_;
};
@@ -1487,13 +1207,15 @@ void ShellThread::Run() {
}
// Prepare the context for this thread.
- Locker locker;
- HandleScope outer_scope;
- Persistent<Context> thread_context = Shell::CreateEvaluationContext();
+ Locker locker(isolate_);
+ HandleScope outer_scope(isolate_);
+ Local<Context> thread_context =
+ Shell::CreateEvaluationContext(isolate_);
Context::Scope context_scope(thread_context);
+ PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate_));
while ((ptr != NULL) && (*ptr != '\0')) {
- HandleScope inner_scope;
+ HandleScope inner_scope(isolate_);
char* filename = ptr;
ptr = ReadWord(ptr);
@@ -1502,16 +1224,15 @@ void ShellThread::Run() {
continue;
}
- Handle<String> str = Shell::ReadFile(filename);
+ Handle<String> str = Shell::ReadFile(isolate_, filename);
if (str.IsEmpty()) {
printf("File '%s' not found\n", filename);
Shell::Exit(1);
}
- Shell::ExecuteString(str, String::New(filename), false, false);
+ Shell::ExecuteString(isolate_, str, String::New(filename), false, false);
}
- thread_context.Dispose();
ptr = next_line;
}
}
@@ -1520,50 +1241,52 @@ void ShellThread::Run() {
SourceGroup::~SourceGroup() {
#ifndef V8_SHARED
- delete next_semaphore_;
- next_semaphore_ = NULL;
- delete done_semaphore_;
- done_semaphore_ = NULL;
delete thread_;
thread_ = NULL;
#endif // V8_SHARED
}
-void SourceGroup::Execute() {
+void SourceGroup::Execute(Isolate* isolate) {
+ bool exception_was_thrown = false;
for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i];
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
// Execute argument given to -e option directly.
- HandleScope handle_scope;
+ HandleScope handle_scope(isolate);
Handle<String> file_name = String::New("unnamed");
Handle<String> source = String::New(argv_[i + 1]);
- if (!Shell::ExecuteString(source, file_name, false, true)) {
- Shell::Exit(1);
+ if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
+ exception_was_thrown = true;
+ break;
}
++i;
} else if (arg[0] == '-') {
// Ignore other options. They have been parsed already.
} else {
// Use all other arguments as names of files to load and run.
- HandleScope handle_scope;
+ HandleScope handle_scope(isolate);
Handle<String> file_name = String::New(arg);
- Handle<String> source = ReadFile(arg);
+ Handle<String> source = ReadFile(isolate, arg);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", arg);
Shell::Exit(1);
}
- if (!Shell::ExecuteString(source, file_name, false, true)) {
- Shell::Exit(1);
+ if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
+ exception_was_thrown = true;
+ break;
}
}
}
+ if (exception_was_thrown != Shell::options.expected_to_throw) {
+ Shell::Exit(1);
+ }
}
-Handle<String> SourceGroup::ReadFile(const char* name) {
+Handle<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) {
int size;
- char* chars = ReadChars(name, &size);
+ char* chars = ReadChars(isolate, name, &size);
if (chars == NULL) return Handle<String>();
Handle<String> result = String::New(chars, size);
delete[] chars;
@@ -1584,24 +1307,27 @@ i::Thread::Options SourceGroup::GetThreadOptions() {
void SourceGroup::ExecuteInThread() {
Isolate* isolate = Isolate::New();
do {
- if (next_semaphore_ != NULL) next_semaphore_->Wait();
+ next_semaphore_.Wait();
{
Isolate::Scope iscope(isolate);
Locker lock(isolate);
- HandleScope scope;
- Persistent<Context> context = Shell::CreateEvaluationContext();
{
- Context::Scope cscope(context);
- Execute();
+ HandleScope scope(isolate);
+ PerIsolateData data(isolate);
+ Local<Context> context = Shell::CreateEvaluationContext(isolate);
+ {
+ Context::Scope cscope(context);
+ PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
+ Execute(isolate);
+ }
}
- context.Dispose();
if (Shell::options.send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
V8::IdleNotification(kLongIdlePauseInMs);
}
}
- if (done_semaphore_ != NULL) done_semaphore_->Signal();
+ done_semaphore_.Signal();
} while (!Shell::options.last_run);
isolate->Dispose();
}
@@ -1612,7 +1338,7 @@ void SourceGroup::StartExecuteInThread() {
thread_ = new IsolateThread(this);
thread_->Start();
}
- next_semaphore_->Signal();
+ next_semaphore_.Signal();
}
@@ -1621,7 +1347,7 @@ void SourceGroup::WaitForThread() {
if (Shell::options.last_run) {
thread_->Join();
} else {
- done_semaphore_->Wait();
+ done_semaphore_.Wait();
}
}
#endif // V8_SHARED
@@ -1702,6 +1428,17 @@ bool Shell::SetOptions(int argc, char* argv[]) {
#else
options.num_parallel_files++;
#endif // V8_SHARED
+ } else if (strcmp(argv[i], "--dump-heap-constants") == 0) {
+#ifdef V8_SHARED
+ printf("D8 with shared library does not support constant dumping\n");
+ return false;
+#else
+ options.dump_heap_constants = true;
+ argv[i] = NULL;
+#endif
+ } else if (strcmp(argv[i], "--throws") == 0) {
+ options.expected_to_throw = true;
+ argv[i] = NULL;
}
#ifdef V8_SHARED
else if (strcmp(argv[i], "--dump-counters") == 0) {
@@ -1760,21 +1497,21 @@ bool Shell::SetOptions(int argc, char* argv[]) {
}
-int Shell::RunMain(int argc, char* argv[]) {
+int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
#ifndef V8_SHARED
i::List<i::Thread*> threads(1);
if (options.parallel_files != NULL) {
for (int i = 0; i < options.num_parallel_files; i++) {
char* files = NULL;
- { Locker lock(Isolate::GetCurrent());
+ { Locker lock(isolate);
int size = 0;
- files = ReadChars(options.parallel_files[i], &size);
+ files = ReadChars(isolate, options.parallel_files[i], &size);
}
if (files == NULL) {
printf("File list '%s' not found\n", options.parallel_files[i]);
Exit(1);
}
- ShellThread* thread = new ShellThread(threads.length(), files);
+ ShellThread* thread = new ShellThread(isolate, files);
thread->Start();
threads.Add(thread);
}
@@ -1784,26 +1521,28 @@ int Shell::RunMain(int argc, char* argv[]) {
}
#endif // V8_SHARED
{ // NOLINT
- Locker lock;
- HandleScope scope;
- Persistent<Context> context = CreateEvaluationContext();
- if (options.last_run) {
- // Keep using the same context in the interactive shell.
- evaluation_context_ = context;
+ Locker lock(isolate);
+ {
+ HandleScope scope(isolate);
+ Local<Context> context = CreateEvaluationContext(isolate);
+ if (options.last_run) {
+ // Keep using the same context in the interactive shell.
+ evaluation_context_.Reset(isolate, context);
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- // If the interactive debugger is enabled make sure to activate
- // it before running the files passed on the command line.
- if (i::FLAG_debugger) {
- InstallUtilityScript();
- }
+ // If the interactive debugger is enabled make sure to activate
+ // it before running the files passed on the command line.
+ if (i::FLAG_debugger) {
+ InstallUtilityScript(isolate);
+ }
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- }
- {
- Context::Scope cscope(context);
- options.isolate_sources[0].Execute();
+ }
+ {
+ Context::Scope cscope(context);
+ PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
+ options.isolate_sources[0].Execute(isolate);
+ }
}
if (!options.last_run) {
- context.Dispose();
if (options.send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
@@ -1815,7 +1554,7 @@ int Shell::RunMain(int argc, char* argv[]) {
// Start preemption if threads have been created and preemption is enabled.
if (threads.length() > 0
&& options.use_preemption) {
- Locker::StartPreemption(options.preemption_interval);
+ Locker::StartPreemption(isolate, options.preemption_interval);
}
#endif // V8_SHARED
}
@@ -1832,74 +1571,185 @@ int Shell::RunMain(int argc, char* argv[]) {
}
if (threads.length() > 0 && options.use_preemption) {
- Locker lock;
- Locker::StopPreemption();
+ Locker lock(isolate);
+ Locker::StopPreemption(isolate);
}
#endif // V8_SHARED
return 0;
}
+#ifdef V8_SHARED
+static void SetStandaloneFlagsViaCommandLine() {
+ int fake_argc = 2;
+ char **fake_argv = new char*[2];
+ fake_argv[0] = NULL;
+ fake_argv[1] = strdup("--trace-hydrogen-file=hydrogen.cfg");
+ v8::V8::SetFlagsFromCommandLine(&fake_argc, fake_argv, false);
+ free(fake_argv[1]);
+ delete[] fake_argv;
+}
+#endif
+
+
+#ifndef V8_SHARED
+static void DumpHeapConstants(i::Isolate* isolate) {
+ i::Heap* heap = isolate->heap();
+
+ // Dump the INSTANCE_TYPES table to the console.
+ printf("# List of known V8 instance types.\n");
+#define DUMP_TYPE(T) printf(" %d: \"%s\",\n", i::T, #T);
+ printf("INSTANCE_TYPES = {\n");
+ INSTANCE_TYPE_LIST(DUMP_TYPE)
+ printf("}\n");
+#undef DUMP_TYPE
+
+ // Dump the KNOWN_MAP table to the console.
+ printf("\n# List of known V8 maps.\n");
+#define ROOT_LIST_CASE(type, name, camel_name) \
+ if (n == NULL && o == heap->name()) n = #camel_name;
+#define STRUCT_LIST_CASE(upper_name, camel_name, name) \
+ if (n == NULL && o == heap->name##_map()) n = #camel_name "Map";
+ i::HeapObjectIterator it(heap->map_space());
+ printf("KNOWN_MAPS = {\n");
+ for (i::Object* o = it.Next(); o != NULL; o = it.Next()) {
+ i::Map* m = i::Map::cast(o);
+ const char* n = NULL;
+ intptr_t p = reinterpret_cast<intptr_t>(m) & 0xfffff;
+ int t = m->instance_type();
+ ROOT_LIST(ROOT_LIST_CASE)
+ STRUCT_LIST(STRUCT_LIST_CASE)
+ if (n == NULL) continue;
+ printf(" 0x%05" V8PRIxPTR ": (%d, \"%s\"),\n", p, t, n);
+ }
+ printf("}\n");
+#undef STRUCT_LIST_CASE
+#undef ROOT_LIST_CASE
+
+ // Dump the KNOWN_OBJECTS table to the console.
+ printf("\n# List of known V8 objects.\n");
+#define ROOT_LIST_CASE(type, name, camel_name) \
+ if (n == NULL && o == heap->name()) n = #camel_name;
+ i::OldSpaces spit(heap);
+ printf("KNOWN_OBJECTS = {\n");
+ for (i::PagedSpace* s = spit.next(); s != NULL; s = spit.next()) {
+ i::HeapObjectIterator it(s);
+ const char* sname = AllocationSpaceName(s->identity());
+ for (i::Object* o = it.Next(); o != NULL; o = it.Next()) {
+ const char* n = NULL;
+ intptr_t p = reinterpret_cast<intptr_t>(o) & 0xfffff;
+ ROOT_LIST(ROOT_LIST_CASE)
+ if (n == NULL) continue;
+ printf(" (\"%s\", 0x%05" V8PRIxPTR "): \"%s\",\n", sname, p, n);
+ }
+ }
+ printf("}\n");
+#undef ROOT_LIST_CASE
+}
+#endif // V8_SHARED
+
+
+class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+ virtual void* Allocate(size_t length) {
+ void* result = malloc(length);
+ memset(result, 0, length);
+ return result;
+ }
+ virtual void* AllocateUninitialized(size_t length) {
+ return malloc(length);
+ }
+ virtual void Free(void* data, size_t) { free(data); }
+ // TODO(dslomov): Remove when v8:2823 is fixed.
+ virtual void Free(void* data) {
+#ifndef V8_SHARED
+ UNREACHABLE();
+#endif
+ }
+};
+
+
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
- Initialize();
-
+ v8::V8::InitializeICU();
+#ifndef V8_SHARED
+ i::FLAG_trace_hydrogen_file = "hydrogen.cfg";
+#else
+ SetStandaloneFlagsViaCommandLine();
+#endif
+ v8::SetDefaultResourceConstraintsForCurrentPlatform();
+ ShellArrayBufferAllocator array_buffer_allocator;
+ v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
int result = 0;
- if (options.stress_opt || options.stress_deopt) {
- Testing::SetStressRunType(
- options.stress_opt ? Testing::kStressTypeOpt
- : Testing::kStressTypeDeopt);
- int stress_runs = Testing::GetStressRuns();
- for (int i = 0; i < stress_runs && result == 0; i++) {
- printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
- Testing::PrepareStressRun(i);
- options.last_run = (i == stress_runs - 1);
- result = RunMain(argc, argv);
+ Isolate* isolate = Isolate::GetCurrent();
+ DumbLineEditor dumb_line_editor(isolate);
+ {
+ Initialize(isolate);
+#ifdef ENABLE_VTUNE_JIT_INTERFACE
+ vTune::InitializeVtuneForV8();
+#endif
+ PerIsolateData data(isolate);
+ InitializeDebugger(isolate);
+
+#ifndef V8_SHARED
+ if (options.dump_heap_constants) {
+ DumpHeapConstants(reinterpret_cast<i::Isolate*>(isolate));
+ return 0;
}
- printf("======== Full Deoptimization =======\n");
- Testing::DeoptimizeAll();
+#endif
+
+ if (options.stress_opt || options.stress_deopt) {
+ Testing::SetStressRunType(options.stress_opt
+ ? Testing::kStressTypeOpt
+ : Testing::kStressTypeDeopt);
+ int stress_runs = Testing::GetStressRuns();
+ for (int i = 0; i < stress_runs && result == 0; i++) {
+ printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
+ Testing::PrepareStressRun(i);
+ options.last_run = (i == stress_runs - 1);
+ result = RunMain(isolate, argc, argv);
+ }
+ printf("======== Full Deoptimization =======\n");
+ Testing::DeoptimizeAll();
#if !defined(V8_SHARED)
- } else if (i::FLAG_stress_runs > 0) {
- int stress_runs = i::FLAG_stress_runs;
- for (int i = 0; i < stress_runs && result == 0; i++) {
- printf("============ Run %d/%d ============\n", i + 1, stress_runs);
- options.last_run = (i == stress_runs - 1);
- result = RunMain(argc, argv);
- }
+ } else if (i::FLAG_stress_runs > 0) {
+ int stress_runs = i::FLAG_stress_runs;
+ for (int i = 0; i < stress_runs && result == 0; i++) {
+ printf("============ Run %d/%d ============\n", i + 1, stress_runs);
+ options.last_run = (i == stress_runs - 1);
+ result = RunMain(isolate, argc, argv);
+ }
#endif
- } else {
- result = RunMain(argc, argv);
- }
+ } else {
+ result = RunMain(isolate, argc, argv);
+ }
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- // Run remote debugger if requested, but never on --test
- if (i::FLAG_remote_debugger && !options.test_shell) {
- InstallUtilityScript();
- RunRemoteDebugger(i::FLAG_debugger_port);
- return 0;
- }
+ // Run remote debugger if requested, but never on --test
+ if (i::FLAG_remote_debugger && !options.test_shell) {
+ InstallUtilityScript(isolate);
+ RunRemoteDebugger(isolate, i::FLAG_debugger_port);
+ return 0;
+ }
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- // Run interactive shell if explicitly requested or if no script has been
- // executed, but never on --test
+ // Run interactive shell if explicitly requested or if no script has been
+ // executed, but never on --test
- if (( options.interactive_shell
- || !options.script_executed )
- && !options.test_shell ) {
+ if (( options.interactive_shell || !options.script_executed )
+ && !options.test_shell ) {
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- if (!i::FLAG_debugger) {
- InstallUtilityScript();
- }
+ if (!i::FLAG_debugger) {
+ InstallUtilityScript(isolate);
+ }
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- RunShell();
+ RunShell(isolate);
+ }
}
-
V8::Dispose();
-#ifndef V8_SHARED
OnExit();
-#endif // V8_SHARED
return result;
}
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index a8361e6b4..097abc046 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -26,10 +26,14 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
- 'includes': ['../build/common.gypi'],
'variables': {
+ 'v8_code': 1,
'console%': '',
+ # Enable support for Intel VTune. Supported on ia32/x64 only
+ 'v8_enable_vtunejit%': 0,
+ 'v8_enable_i18n_support%': 1,
},
+ 'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'targets': [
{
'target_name': 'd8',
@@ -45,6 +49,10 @@
'd8.cc',
],
'conditions': [
+ [ 'console=="readline"', {
+ 'libraries': [ '-lreadline', ],
+ 'sources': [ 'd8-readline.cc' ],
+ }],
[ 'component!="shared_library"', {
'sources': [ 'd8-debug.cc', '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc', ],
'conditions': [
@@ -57,10 +65,6 @@
'd8_js2c',
],
}],
- [ 'console=="readline"', {
- 'libraries': [ '-lreadline', ],
- 'sources': [ 'd8-readline.cc' ],
- }],
['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
or OS=="openbsd" or OS=="solaris" or OS=="android")', {
'sources': [ 'd8-posix.cc', ]
@@ -70,6 +74,22 @@
}],
],
}],
+ ['v8_enable_vtunejit==1', {
+ 'dependencies': [
+ '../src/third_party/vtune/v8vtune.gyp:v8_vtune',
+ ],
+ }],
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
+ ],
+ }],
+ ['OS=="win" and v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icudata',
+ ],
+ }],
],
},
{
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index a62a81fd9..411dfdda3 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -123,17 +123,16 @@ class LineEditor {
virtual ~LineEditor() { }
virtual Handle<String> Prompt(const char* prompt) = 0;
- virtual bool Open() { return true; }
+ virtual bool Open(Isolate* isolate) { return true; }
virtual bool Close() { return true; }
virtual void AddHistory(const char* str) { }
const char* name() { return name_; }
- static LineEditor* Get();
+ static LineEditor* Get() { return current_; }
private:
Type type_;
const char* name_;
- LineEditor* next_;
- static LineEditor* first_;
+ static LineEditor* current_;
};
@@ -141,8 +140,8 @@ class SourceGroup {
public:
SourceGroup() :
#ifndef V8_SHARED
- next_semaphore_(v8::internal::OS::CreateSemaphore(0)),
- done_semaphore_(v8::internal::OS::CreateSemaphore(0)),
+ next_semaphore_(0),
+ done_semaphore_(0),
thread_(NULL),
#endif // V8_SHARED
argv_(NULL),
@@ -158,7 +157,7 @@ class SourceGroup {
void End(int offset) { end_offset_ = offset; }
- void Execute();
+ void Execute(Isolate* isolate);
#ifndef V8_SHARED
void StartExecuteInThread();
@@ -181,13 +180,13 @@ class SourceGroup {
static i::Thread::Options GetThreadOptions();
void ExecuteInThread();
- i::Semaphore* next_semaphore_;
- i::Semaphore* done_semaphore_;
+ i::Semaphore next_semaphore_;
+ i::Semaphore done_semaphore_;
i::Thread* thread_;
#endif // V8_SHARED
void ExitShell(int exit_code);
- Handle<String> ReadFile(const char* name);
+ Handle<String> ReadFile(Isolate* isolate, const char* name);
const char** argv_;
int begin_offset_;
@@ -232,6 +231,8 @@ class ShellOptions {
stress_deopt(false),
interactive_shell(false),
test_shell(false),
+ dump_heap_constants(false),
+ expected_to_throw(false),
num_isolates(1),
isolate_sources(NULL) { }
@@ -255,6 +256,8 @@ class ShellOptions {
bool stress_deopt;
bool interactive_shell;
bool test_shell;
+ bool dump_heap_constants;
+ bool expected_to_throw;
int num_isolates;
SourceGroup* isolate_sources;
};
@@ -266,22 +269,24 @@ class Shell : public i::AllStatic {
#endif // V8_SHARED
public:
- static bool ExecuteString(Handle<String> source,
+ static bool ExecuteString(Isolate* isolate,
+ Handle<String> source,
Handle<Value> name,
bool print_result,
bool report_exceptions);
static const char* ToCString(const v8::String::Utf8Value& value);
- static void ReportException(TryCatch* try_catch);
- static Handle<String> ReadFile(const char* name);
- static Persistent<Context> CreateEvaluationContext();
- static int RunMain(int argc, char* argv[]);
+ static void ReportException(Isolate* isolate, TryCatch* try_catch);
+ static Handle<String> ReadFile(Isolate* isolate, const char* name);
+ static Local<Context> CreateEvaluationContext(Isolate* isolate);
+ static int RunMain(Isolate* isolate, int argc, char* argv[]);
static int Main(int argc, char* argv[]);
static void Exit(int exit_code);
+ static void OnExit();
#ifndef V8_SHARED
- static Handle<Array> GetCompletions(Handle<String> text,
+ static Handle<Array> GetCompletions(Isolate* isolate,
+ Handle<String> text,
Handle<String> full);
- static void OnExit();
static int* LookupCounter(const char* name);
static void* CreateHistogram(const char* name,
int min,
@@ -291,43 +296,54 @@ class Shell : public i::AllStatic {
static void MapCounters(const char* name);
#ifdef ENABLE_DEBUGGER_SUPPORT
- static Handle<Object> DebugMessageDetails(Handle<String> message);
- static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
+ static Handle<Object> DebugMessageDetails(Isolate* isolate,
+ Handle<String> message);
+ static Handle<Value> DebugCommandToJSONRequest(Isolate* isolate,
+ Handle<String> command);
static void DispatchDebugMessages();
#endif // ENABLE_DEBUGGER_SUPPORT
+
+ static void PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args);
#endif // V8_SHARED
-#ifdef WIN32
-#undef Yield
-#endif
-
- static Handle<Value> Print(const Arguments& args);
- static Handle<Value> Write(const Arguments& args);
- static Handle<Value> Yield(const Arguments& args);
- static Handle<Value> Quit(const Arguments& args);
- static Handle<Value> Version(const Arguments& args);
- static Handle<Value> EnableProfiler(const Arguments& args);
- static Handle<Value> DisableProfiler(const Arguments& args);
- static Handle<Value> Read(const Arguments& args);
- static Handle<Value> ReadBuffer(const Arguments& args);
- static Handle<String> ReadFromStdin();
- static Handle<Value> ReadLine(const Arguments& args) {
- return ReadFromStdin();
+ static void RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmSharedGet(Local<String> property,
+ const PropertyCallbackInfo<Value>& info);
+ static void RealmSharedSet(Local<String> property,
+ Local<Value> value,
+ const PropertyCallbackInfo<void>& info);
+
+ static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Write(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Read(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static Handle<String> ReadFromStdin(Isolate* isolate);
+ static void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ args.GetReturnValue().Set(ReadFromStdin(args.GetIsolate()));
}
- static Handle<Value> Load(const Arguments& args);
- static Handle<Value> ArrayBuffer(const Arguments& args);
- static Handle<Value> Int8Array(const Arguments& args);
- static Handle<Value> Uint8Array(const Arguments& args);
- static Handle<Value> Int16Array(const Arguments& args);
- static Handle<Value> Uint16Array(const Arguments& args);
- static Handle<Value> Int32Array(const Arguments& args);
- static Handle<Value> Uint32Array(const Arguments& args);
- static Handle<Value> Float32Array(const Arguments& args);
- static Handle<Value> Float64Array(const Arguments& args);
- static Handle<Value> Uint8ClampedArray(const Arguments& args);
- static Handle<Value> ArrayBufferSlice(const Arguments& args);
- static Handle<Value> ArraySubArray(const Arguments& args);
- static Handle<Value> ArraySet(const Arguments& args);
+ static void Load(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void ArrayBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Int8Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Uint8Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Int16Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Uint16Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Int32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Uint32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Float32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Float64Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Uint8ClampedArray(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void ArrayBufferSlice(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void ArraySubArray(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void ArraySet(const v8::FunctionCallbackInfo<v8::Value>& args);
// The OS object on the global object contains methods for performing
// operating system calls:
//
@@ -354,18 +370,17 @@ class Shell : public i::AllStatic {
// with the current umask. Intermediate directories are created if necessary.
// An exception is not thrown if the directory already exists. Analogous to
// the "mkdir -p" command.
- static Handle<Value> OSObject(const Arguments& args);
- static Handle<Value> System(const Arguments& args);
- static Handle<Value> ChangeDirectory(const Arguments& args);
- static Handle<Value> SetEnvironment(const Arguments& args);
- static Handle<Value> UnsetEnvironment(const Arguments& args);
- static Handle<Value> SetUMask(const Arguments& args);
- static Handle<Value> MakeDirectory(const Arguments& args);
- static Handle<Value> RemoveDirectory(const Arguments& args);
+ static void OSObject(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void System(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
static void AddOSMethods(Handle<ObjectTemplate> os_template);
- static LineEditor* console;
static const char* kPrompt;
static ShellOptions options;
@@ -379,30 +394,37 @@ class Shell : public i::AllStatic {
static CounterCollection local_counters_;
static CounterCollection* counters_;
static i::OS::MemoryMappedFile* counters_file_;
- static i::Mutex* context_mutex_;
+ static i::Mutex context_mutex_;
+ static const i::TimeTicks kInitialTicks;
static Counter* GetCounter(const char* name, bool is_histogram);
- static void InstallUtilityScript();
+ static void InstallUtilityScript(Isolate* isolate);
#endif // V8_SHARED
- static void Initialize();
- static void RunShell();
+ static void Initialize(Isolate* isolate);
+ static void InitializeDebugger(Isolate* isolate);
+ static void RunShell(Isolate* isolate);
static bool SetOptions(int argc, char* argv[]);
- static Handle<ObjectTemplate> CreateGlobalTemplate();
- static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback);
- static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback);
- static Handle<Value> CreateExternalArrayBuffer(Handle<Object> buffer,
+ static Handle<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
+ static Handle<FunctionTemplate> CreateArrayBufferTemplate(FunctionCallback);
+ static Handle<FunctionTemplate> CreateArrayTemplate(FunctionCallback);
+ static Handle<Value> CreateExternalArrayBuffer(Isolate* isolate,
+ Handle<Object> buffer,
int32_t size);
- static Handle<Object> CreateExternalArray(Handle<Object> array,
+ static Handle<Object> CreateExternalArray(Isolate* isolate,
+ Handle<Object> array,
Handle<Object> buffer,
ExternalArrayType type,
int32_t length,
int32_t byteLength,
int32_t byteOffset,
int32_t element_size);
- static Handle<Value> CreateExternalArray(const Arguments& args,
- ExternalArrayType type,
- int32_t element_size);
- static void ExternalArrayWeakCallback(Persistent<Value> object, void* data);
+ static void CreateExternalArray(
+ const v8::FunctionCallbackInfo<v8::Value>& args,
+ ExternalArrayType type,
+ int32_t element_size);
+ static void ExternalArrayWeakCallback(Isolate* isolate,
+ Persistent<Object>* object,
+ uint8_t* data);
};
diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js
index 819135add..35b61d54e 100644
--- a/deps/v8/src/d8.js
+++ b/deps/v8/src/d8.js
@@ -40,7 +40,7 @@ function log10(num) {
function ToInspectableObject(obj) {
if (!obj && typeof obj === 'object') {
- return void 0;
+ return UNDEFINED;
} else {
return Object(obj);
}
@@ -71,7 +71,7 @@ function GetCompletions(global, last, full) {
result.push(name);
}
}
- current = ToInspectableObject(current.__proto__);
+ current = ToInspectableObject(Object.getPrototypeOf(current));
}
return result;
}
@@ -123,10 +123,6 @@ Debug.State = {
var trace_compile = false; // Tracing all compile events?
var trace_debug_json = false; // Tracing all debug json packets?
var last_cmd = '';
-//var lol_is_enabled; // Set to true in d8.cc if LIVE_OBJECT_LIST is defined.
-var lol_next_dump_index = 0;
-var kDefaultLolLinesToPrintAtATime = 10;
-var kMaxLolLinesToPrintAtATime = 1000;
var repeat_cmd_line = '';
var is_running = true;
// Global variable used to store whether a handle was requested.
@@ -337,7 +333,7 @@ function DebugRequest(cmd_line) {
}
if ((cmd === undefined) || !cmd) {
- this.request_ = void 0;
+ this.request_ = UNDEFINED;
return;
}
@@ -496,7 +492,7 @@ function DebugRequest(cmd_line) {
case 'trace':
case 'tr':
// Return undefined to indicate command handled internally (no JSON).
- this.request_ = void 0;
+ this.request_ = UNDEFINED;
this.traceCommand_(args);
break;
@@ -504,16 +500,9 @@ function DebugRequest(cmd_line) {
case '?':
this.helpCommand_(args);
// Return undefined to indicate command handled internally (no JSON).
- this.request_ = void 0;
+ this.request_ = UNDEFINED;
break;
- case 'liveobjectlist':
- case 'lol':
- if (lol_is_enabled) {
- this.request_ = this.lolToJSONRequest_(args, is_repeating);
- break;
- }
-
default:
throw new Error('Unknown command "' + cmd + '"');
}
@@ -558,53 +547,10 @@ DebugRequest.prototype.createRequest = function(command) {
};
-// Note: we use detected command repetition as a signal for continuation here.
-DebugRequest.prototype.createLOLRequest = function(command,
- start_index,
- lines_to_dump,
- is_continuation) {
- if (is_continuation) {
- start_index = lol_next_dump_index;
- }
-
- if (lines_to_dump) {
- lines_to_dump = parseInt(lines_to_dump);
- } else {
- lines_to_dump = kDefaultLolLinesToPrintAtATime;
- }
- if (lines_to_dump > kMaxLolLinesToPrintAtATime) {
- lines_to_dump = kMaxLolLinesToPrintAtATime;
- }
-
- // Save the next start_index to dump from:
- lol_next_dump_index = start_index + lines_to_dump;
-
- var request = this.createRequest(command);
- request.arguments = {};
- request.arguments.start = start_index;
- request.arguments.count = lines_to_dump;
-
- return request;
-};
-
-
// Create a JSON request for the evaluation command.
DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
lookup_handle = null;
- if (lol_is_enabled) {
- // Check if the expression is a obj id in the form @<obj id>.
- var obj_id_match = expression.match(/^@([0-9]+)$/);
- if (obj_id_match) {
- var obj_id = parseInt(obj_id_match[1]);
- // Build a dump request.
- var request = this.createRequest('getobj');
- request.arguments = {};
- request.arguments.obj_id = obj_id;
- return request.toJSONProtocol();
- }
- }
-
// Check if the expression is a handle id in the form #<handle>#.
var handle_match = expression.match(/^#([0-9]*)#$/);
if (handle_match) {
@@ -1074,7 +1020,7 @@ DebugRequest.prototype.changeBreakpointCommandToJSONRequest_ =
args.substring(nextPos + 1, args.length) : 'all';
if (!arg2) {
arg2 = 'all'; // if unspecified, set for all.
- } if (arg2 == 'unc') { // check for short cut.
+ } else if (arg2 == 'unc') { // check for short cut.
arg2 = 'uncaught';
}
excType = arg2;
@@ -1170,10 +1116,6 @@ DebugRequest.prototype.infoCommandToJSONRequest_ = function(args) {
// Build a evaluate request from the text command.
request = this.createRequest('frame');
last_cmd = 'info args';
- } else if (lol_is_enabled &&
- args && (args == 'liveobjectlist' || args == 'lol')) {
- // Build a evaluate request from the text command.
- return this.liveObjectListToJSONRequest_(null);
} else {
throw new Error('Invalid info arguments.');
}
@@ -1224,262 +1166,6 @@ DebugRequest.prototype.gcToJSONRequest_ = function(args) {
};
-// Args: [v[erbose]] [<N>] [i[ndex] <i>] [t[ype] <type>] [sp[ace] <space>]
-DebugRequest.prototype.lolMakeListRequest =
- function(cmd, args, first_arg_index, is_repeating) {
-
- var request;
- var start_index = 0;
- var dump_limit = void 0;
- var type_filter = void 0;
- var space_filter = void 0;
- var prop_filter = void 0;
- var is_verbose = false;
- var i;
-
- for (i = first_arg_index; i < args.length; i++) {
- var arg = args[i];
- // Check for [v[erbose]]:
- if (arg === 'verbose' || arg === 'v') {
- // Nothing to do. This is already implied by args.length > 3.
- is_verbose = true;
-
- // Check for [<N>]:
- } else if (arg.match(/^[0-9]+$/)) {
- dump_limit = arg;
- is_verbose = true;
-
- // Check for i[ndex] <i>:
- } else if (arg === 'index' || arg === 'i') {
- i++;
- if (args.length < i) {
- throw new Error('Missing index after ' + arg + '.');
- }
- start_index = parseInt(args[i]);
- // The user input start index starts at 1:
- if (start_index <= 0) {
- throw new Error('Invalid index ' + args[i] + '.');
- }
- start_index -= 1;
- is_verbose = true;
-
- // Check for t[ype] <type>:
- } else if (arg === 'type' || arg === 't') {
- i++;
- if (args.length < i) {
- throw new Error('Missing type after ' + arg + '.');
- }
- type_filter = args[i];
-
- // Check for space <heap space name>:
- } else if (arg === 'space' || arg === 'sp') {
- i++;
- if (args.length < i) {
- throw new Error('Missing space name after ' + arg + '.');
- }
- space_filter = args[i];
-
- // Check for property <prop name>:
- } else if (arg === 'property' || arg === 'prop') {
- i++;
- if (args.length < i) {
- throw new Error('Missing property name after ' + arg + '.');
- }
- prop_filter = args[i];
-
- } else {
- throw new Error('Unknown args at ' + arg + '.');
- }
- }
-
- // Build the verbose request:
- if (is_verbose) {
- request = this.createLOLRequest('lol-'+cmd,
- start_index,
- dump_limit,
- is_repeating);
- request.arguments.verbose = true;
- } else {
- request = this.createRequest('lol-'+cmd);
- request.arguments = {};
- }
-
- request.arguments.filter = {};
- if (type_filter) {
- request.arguments.filter.type = type_filter;
- }
- if (space_filter) {
- request.arguments.filter.space = space_filter;
- }
- if (prop_filter) {
- request.arguments.filter.prop = prop_filter;
- }
-
- return request;
-};
-
-
-function extractObjId(args) {
- var id = args;
- id = id.match(/^@([0-9]+)$/);
- if (id) {
- id = id[1];
- } else {
- throw new Error('Invalid obj id ' + args + '.');
- }
- return parseInt(id);
-}
-
-
-DebugRequest.prototype.lolToJSONRequest_ = function(args, is_repeating) {
- var request;
- // Use default command if one is not specified:
- if (!args) {
- args = 'info';
- }
-
- var orig_args = args;
- var first_arg_index;
-
- var arg, i;
- var args = args.split(/\s+/g);
- var cmd = args[0];
- var id;
-
- // Command: <id> [v[erbose]] ...
- if (cmd.match(/^[0-9]+$/)) {
- // Convert to the padded list command:
- // Command: l[ist] <dummy> <id> [v[erbose]] ...
-
- // Insert the implicit 'list' in front and process as normal:
- cmd = 'list';
- args.unshift(cmd);
- }
-
- switch(cmd) {
- // Command: c[apture]
- case 'capture':
- case 'c':
- request = this.createRequest('lol-capture');
- break;
-
- // Command: clear|d[elete] <id>|all
- case 'clear':
- case 'delete':
- case 'del': {
- if (args.length < 2) {
- throw new Error('Missing argument after ' + cmd + '.');
- } else if (args.length > 2) {
- throw new Error('Too many arguments after ' + cmd + '.');
- }
- id = args[1];
- if (id.match(/^[0-9]+$/)) {
- // Delete a specific lol record:
- request = this.createRequest('lol-delete');
- request.arguments = {};
- request.arguments.id = parseInt(id);
- } else if (id === 'all') {
- // Delete all:
- request = this.createRequest('lol-reset');
- } else {
- throw new Error('Invalid argument after ' + cmd + '.');
- }
- break;
- }
-
- // Command: diff <id1> <id2> [<dump options>]
- case 'diff':
- first_arg_index = 3;
-
- // Command: list <dummy> <id> [<dump options>]
- case 'list':
-
- // Command: ret[ainers] <obj id> [<dump options>]
- case 'retainers':
- case 'ret':
- case 'retaining-paths':
- case 'rp': {
- if (cmd === 'ret') cmd = 'retainers';
- else if (cmd === 'rp') cmd = 'retaining-paths';
-
- if (!first_arg_index) first_arg_index = 2;
-
- if (args.length < first_arg_index) {
- throw new Error('Too few arguments after ' + cmd + '.');
- }
-
- var request_cmd = (cmd === 'list') ? 'diff':cmd;
- request = this.lolMakeListRequest(request_cmd,
- args,
- first_arg_index,
- is_repeating);
-
- if (cmd === 'diff') {
- request.arguments.id1 = parseInt(args[1]);
- request.arguments.id2 = parseInt(args[2]);
- } else if (cmd == 'list') {
- request.arguments.id1 = 0;
- request.arguments.id2 = parseInt(args[1]);
- } else {
- request.arguments.id = extractObjId(args[1]);
- }
- break;
- }
-
- // Command: getid
- case 'getid': {
- request = this.createRequest('lol-getid');
- request.arguments = {};
- request.arguments.address = args[1];
- break;
- }
-
- // Command: inf[o] [<N>]
- case 'info':
- case 'inf': {
- if (args.length > 2) {
- throw new Error('Too many arguments after ' + cmd + '.');
- }
- // Built the info request:
- request = this.createLOLRequest('lol-info', 0, args[1], is_repeating);
- break;
- }
-
- // Command: path <obj id 1> <obj id 2>
- case 'path': {
- request = this.createRequest('lol-path');
- request.arguments = {};
- if (args.length > 2) {
- request.arguments.id1 = extractObjId(args[1]);
- request.arguments.id2 = extractObjId(args[2]);
- } else {
- request.arguments.id1 = 0;
- request.arguments.id2 = extractObjId(args[1]);
- }
- break;
- }
-
- // Command: print
- case 'print': {
- request = this.createRequest('lol-print');
- request.arguments = {};
- request.arguments.id = extractObjId(args[1]);
- break;
- }
-
- // Command: reset
- case 'reset': {
- request = this.createRequest('lol-reset');
- break;
- }
-
- default:
- throw new Error('Invalid arguments.');
- }
- return request.toJSONProtocol();
-};
-
-
// Create a JSON request for the threads command.
DebugRequest.prototype.threadsCommandToJSONRequest_ = function(args) {
// Build a threads request from the text command.
@@ -1545,7 +1231,6 @@ DebugRequest.prototype.helpCommand_ = function(args) {
print('inf[o] br[eak] - prints info about breakpoints in use');
print('inf[o] ar[gs] - prints info about arguments of the current function');
print('inf[o] lo[cals] - prints info about locals in the current function');
- print('inf[o] liveobjectlist|lol - same as \'lol info\'');
print('');
print('step [in | next | out| min [step count]]');
print('c[ontinue] - continue executing after a breakpoint');
@@ -1566,49 +1251,6 @@ DebugRequest.prototype.helpCommand_ = function(args) {
print('');
print('gc - runs the garbage collector');
print('');
-
- if (lol_is_enabled) {
- print('liveobjectlist|lol <command> - live object list tracking.');
- print(' where <command> can be:');
- print(' c[apture] - captures a LOL list.');
- print(' clear|del[ete] <id>|all - clears LOL of id <id>.');
- print(' If \'all\' is unspecified instead, will clear all.');
- print(' diff <id1> <id2> [<dump options>]');
- print(' - prints the diff between LOLs id1 and id2.');
- print(' - also see <dump options> below.');
- print(' getid <address> - gets the obj id for the specified address if available.');
- print(' The address must be in hex form prefixed with 0x.');
- print(' inf[o] [<N>] - lists summary info of all LOL lists.');
- print(' If N is specified, will print N items at a time.');
- print(' [l[ist]] <id> [<dump options>]');
- print(' - prints the listing of objects in LOL id.');
- print(' - also see <dump options> below.');
- print(' reset - clears all LOL lists.');
- print(' ret[ainers] <id> [<dump options>]');
- print(' - prints the list of retainers of obj id.');
- print(' - also see <dump options> below.');
- print(' path <id1> <id2> - prints the retaining path from obj id1 to id2.');
- print(' If only one id is specified, will print the path from');
- print(' roots to the specified object if available.');
- print(' print <id> - prints the obj for the specified obj id if available.');
- print('');
- print(' <dump options> includes:');
- print(' [v[erbose]] - do verbose dump.');
- print(' [<N>] - dump N items at a time. Implies verbose dump.');
- print(' If unspecified, N will default to '+
- kDefaultLolLinesToPrintAtATime+'. Max N is '+
- kMaxLolLinesToPrintAtATime+'.');
- print(' [i[ndex] <i>] - start dump from index i. Implies verbose dump.');
- print(' [t[ype] <type>] - filter by type.');
- print(' [sp[ace] <space name>] - filter by heap space where <space name> is one of');
- print(' { cell, code, lo, map, new, old-data, old-pointer }.');
- print('');
- print(' If the verbose option, or an option that implies a verbose dump');
- print(' is specified, then a verbose dump will requested. Else, a summary dump');
- print(' will be requested.');
- print('');
- }
-
print('trace compile');
// hidden command: trace debug json - toggles tracing of debug json packets
print('');
@@ -1709,237 +1351,6 @@ function refObjectToString_(protocolPackage, handle) {
}
-function decodeLolCaptureResponse(body) {
- var result;
- result = 'Captured live object list '+ body.id +
- ': count '+ body.count + ' size ' + body.size;
- return result;
-}
-
-
-function decodeLolDeleteResponse(body) {
- var result;
- result = 'Deleted live object list '+ body.id;
- return result;
-}
-
-
-function digitsIn(value) {
- var digits = 0;
- if (value === 0) value = 1;
- while (value >= 1) {
- digits++;
- value /= 10;
- }
- return digits;
-}
-
-
-function padding(value, max_digits) {
- var padding_digits = max_digits - digitsIn(value);
- var padding = '';
- while (padding_digits > 0) {
- padding += ' ';
- padding_digits--;
- }
- return padding;
-}
-
-
-function decodeLolInfoResponse(body) {
- var result;
- var lists = body.lists;
- var length = lists.length;
- var first_index = body.first_index + 1;
- var has_more = ((first_index + length) <= body.count);
- result = 'captured live object lists';
- if (has_more || (first_index != 1)) {
- result += ' ['+ length +' of '+ body.count +
- ': starting from '+ first_index +']';
- }
- result += ':\n';
- var max_digits = digitsIn(body.count);
- var last_count = 0;
- var last_size = 0;
- for (var i = 0; i < length; i++) {
- var entry = lists[i];
- var count = entry.count;
- var size = entry.size;
- var index = first_index + i;
- result += ' [' + padding(index, max_digits) + index + '] id '+ entry.id +
- ': count '+ count;
- if (last_count > 0) {
- result += '(+' + (count - last_count) + ')';
- }
- result += ' size '+ size;
- if (last_size > 0) {
- result += '(+' + (size - last_size) + ')';
- }
- result += '\n';
- last_count = count;
- last_size = size;
- }
- result += ' total: '+length+' lists\n';
- if (has_more) {
- result += ' -- press <enter> for more --\n';
- } else {
- repeat_cmd_line = '';
- }
- if (length === 0) result += ' none\n';
-
- return result;
-}
-
-
-function decodeLolListResponse(body, title) {
-
- var result;
- var total_count = body.count;
- var total_size = body.size;
- var length;
- var max_digits;
- var i;
- var entry;
- var index;
-
- var max_count_digits = digitsIn(total_count);
- var max_size_digits;
-
- var summary = body.summary;
- if (summary) {
-
- var roots_count = 0;
- var found_root = body.found_root || 0;
- var found_weak_root = body.found_weak_root || 0;
-
- // Print the summary result:
- result = 'summary of objects:\n';
- length = summary.length;
- if (found_root !== 0) {
- roots_count++;
- }
- if (found_weak_root !== 0) {
- roots_count++;
- }
- max_digits = digitsIn(length + roots_count);
- max_size_digits = digitsIn(total_size);
-
- index = 1;
- if (found_root !== 0) {
- result += ' [' + padding(index, max_digits) + index + '] ' +
- ' count '+ 1 + padding(0, max_count_digits) +
- ' '+ padding(0, max_size_digits+1) +
- ' : <root>\n';
- index++;
- }
- if (found_weak_root !== 0) {
- result += ' [' + padding(index, max_digits) + index + '] ' +
- ' count '+ 1 + padding(0, max_count_digits) +
- ' '+ padding(0, max_size_digits+1) +
- ' : <weak root>\n';
- index++;
- }
-
- for (i = 0; i < length; i++) {
- entry = summary[i];
- var count = entry.count;
- var size = entry.size;
- result += ' [' + padding(index, max_digits) + index + '] ' +
- ' count '+ count + padding(count, max_count_digits) +
- ' size '+ size + padding(size, max_size_digits) +
- ' : <' + entry.desc + '>\n';
- index++;
- }
- result += '\n total count: '+(total_count+roots_count)+'\n';
- if (body.size) {
- result += ' total size: '+body.size+'\n';
- }
-
- } else {
- // Print the full dump result:
- var first_index = body.first_index + 1;
- var elements = body.elements;
- length = elements.length;
- var has_more = ((first_index + length) <= total_count);
- result = title;
- if (has_more || (first_index != 1)) {
- result += ' ['+ length +' of '+ total_count +
- ': starting from '+ first_index +']';
- }
- result += ':\n';
- if (length === 0) result += ' none\n';
- max_digits = digitsIn(length);
-
- var max_id = 0;
- var max_size = 0;
- for (i = 0; i < length; i++) {
- entry = elements[i];
- if (entry.id > max_id) max_id = entry.id;
- if (entry.size > max_size) max_size = entry.size;
- }
- var max_id_digits = digitsIn(max_id);
- max_size_digits = digitsIn(max_size);
-
- for (i = 0; i < length; i++) {
- entry = elements[i];
- index = first_index + i;
- result += ' ['+ padding(index, max_digits) + index +']';
- if (entry.id !== 0) {
- result += ' @' + entry.id + padding(entry.id, max_id_digits) +
- ': size ' + entry.size + ', ' +
- padding(entry.size, max_size_digits) + entry.desc + '\n';
- } else {
- // Must be a root or weak root:
- result += ' ' + entry.desc + '\n';
- }
- }
- if (has_more) {
- result += ' -- press <enter> for more --\n';
- } else {
- repeat_cmd_line = '';
- }
- if (length === 0) result += ' none\n';
- }
-
- return result;
-}
-
-
-function decodeLolDiffResponse(body) {
- var title = 'objects';
- return decodeLolListResponse(body, title);
-}
-
-
-function decodeLolRetainersResponse(body) {
- var title = 'retainers for @' + body.id;
- return decodeLolListResponse(body, title);
-}
-
-
-function decodeLolPathResponse(body) {
- return body.path;
-}
-
-
-function decodeLolResetResponse(body) {
- return 'Reset all live object lists.';
-}
-
-
-function decodeLolGetIdResponse(body) {
- if (body.id == 0) {
- return 'Address is invalid, or object has been moved or collected';
- }
- return 'obj id is @' + body.id;
-}
-
-
-function decodeLolPrintResponse(body) {
- return body.dump;
-}
-
-
// Rounds number 'num' to 'length' decimal places.
function roundNumber(num, length) {
var factor = Math.pow(10, length);
@@ -2276,34 +1687,6 @@ function DebugResponseDetails(response) {
}
break;
- case 'lol-capture':
- details.text = decodeLolCaptureResponse(body);
- break;
- case 'lol-delete':
- details.text = decodeLolDeleteResponse(body);
- break;
- case 'lol-diff':
- details.text = decodeLolDiffResponse(body);
- break;
- case 'lol-getid':
- details.text = decodeLolGetIdResponse(body);
- break;
- case 'lol-info':
- details.text = decodeLolInfoResponse(body);
- break;
- case 'lol-print':
- details.text = decodeLolPrintResponse(body);
- break;
- case 'lol-reset':
- details.text = decodeLolResetResponse(body);
- break;
- case 'lol-retainers':
- details.text = decodeLolRetainersResponse(body);
- break;
- case 'lol-path':
- details.text = decodeLolPathResponse(body);
- break;
-
default:
details.text =
'Response for unknown command \'' + response.command() + '\'' +
@@ -2741,7 +2124,7 @@ function SimpleObjectToJSON_(object) {
var property_value_json;
switch (typeof property_value) {
case 'object':
- if (property_value === null) {
+ if (IS_NULL(property_value)) {
property_value_json = 'null';
} else if (typeof property_value.toJSONProtocol == 'function') {
property_value_json = property_value.toJSONProtocol(true);
@@ -2811,3 +2194,60 @@ function SimpleArrayToJSON_(array) {
json += ']';
return json;
}
+
+
+// A more universal stringify that supports more types than JSON.
+// Used by the d8 shell to output results.
+var stringifyDepthLimit = 4; // To avoid crashing on cyclic objects
+
+function Stringify(x, depth) {
+ if (depth === undefined)
+ depth = stringifyDepthLimit;
+ else if (depth === 0)
+ return "*";
+ switch (typeof x) {
+ case "undefined":
+ return "undefined";
+ case "boolean":
+ case "number":
+ case "function":
+ return x.toString();
+ case "string":
+ return "\"" + x.toString() + "\"";
+ case "symbol":
+ return "Symbol(" + (x.name ? Stringify(x.name, depth) : "") + ")"
+ case "object":
+ if (IS_NULL(x)) return "null";
+ if (x.constructor && x.constructor.name === "Array") {
+ var elems = [];
+ for (var i = 0; i < x.length; ++i) {
+ elems.push(
+ {}.hasOwnProperty.call(x, i) ? Stringify(x[i], depth - 1) : "");
+ }
+ return "[" + elems.join(", ") + "]";
+ }
+ try {
+ var string = String(x);
+ if (string && string !== "[object Object]") return string;
+ } catch(e) {}
+ var props = [];
+ for (var name in x) {
+ var desc = Object.getOwnPropertyDescriptor(x, name);
+ if (IS_UNDEFINED(desc)) continue;
+ if ("value" in desc) {
+ props.push(name + ": " + Stringify(desc.value, depth - 1));
+ }
+ if ("get" in desc) {
+ var getter = desc.get.toString();
+ props.push("get " + name + getter.slice(getter.indexOf('(')));
+ }
+ if ("set" in desc) {
+ var setter = desc.set.toString();
+ props.push("set " + name + setter.slice(setter.indexOf('(')));
+ }
+ }
+ return "{" + props.join(", ") + "}";
+ default:
+ return "[crazy non-standard shit]";
+ }
+}
diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h
index 71f56e718..8ceccf67c 100644
--- a/deps/v8/src/data-flow.h
+++ b/deps/v8/src/data-flow.h
@@ -199,6 +199,63 @@ class BitVector: public ZoneObject {
uint32_t* data_;
};
+class GrowableBitVector BASE_EMBEDDED {
+ public:
+ class Iterator BASE_EMBEDDED {
+ public:
+ Iterator(const GrowableBitVector* target, Zone* zone)
+ : it_(target->bits_ == NULL
+ ? new(zone) BitVector(1, zone)
+ : target->bits_) { }
+ bool Done() const { return it_.Done(); }
+ void Advance() { it_.Advance(); }
+ int Current() const { return it_.Current(); }
+ private:
+ BitVector::Iterator it_;
+ };
+
+ GrowableBitVector() : bits_(NULL) { }
+ GrowableBitVector(int length, Zone* zone)
+ : bits_(new(zone) BitVector(length, zone)) { }
+
+ bool Contains(int value) const {
+ if (!InBitsRange(value)) return false;
+ return bits_->Contains(value);
+ }
+
+ void Add(int value, Zone* zone) {
+ EnsureCapacity(value, zone);
+ bits_->Add(value);
+ }
+
+ void Union(const GrowableBitVector& other, Zone* zone) {
+ for (Iterator it(&other, zone); !it.Done(); it.Advance()) {
+ Add(it.Current(), zone);
+ }
+ }
+
+ void Clear() { if (bits_ != NULL) bits_->Clear(); }
+
+ private:
+ static const int kInitialLength = 1024;
+
+ bool InBitsRange(int value) const {
+ return bits_ != NULL && bits_->length() > value;
+ }
+
+ void EnsureCapacity(int value, Zone* zone) {
+ if (InBitsRange(value)) return;
+ int new_length = bits_ == NULL ? kInitialLength : bits_->length();
+ while (new_length <= value) new_length *= 2;
+ BitVector* new_bits = new(zone) BitVector(new_length, zone);
+ if (bits_ != NULL) new_bits->CopyFrom(*bits_);
+ bits_ = new_bits;
+ }
+
+ BitVector* bits_;
+};
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index a54cb238c..1b128c3a0 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -25,27 +25,23 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
// This file relies on the fact that the following declarations have been made
// in v8natives.js:
// var $isFinite = GlobalIsFinite;
+var $Date = global.Date;
+
// -------------------------------------------------------------------
// This file contains date support implemented in JavaScript.
-// Keep reference to original values of some global properties. This
-// has the added benefit that the code in this file is isolated from
-// changes to these properties.
-var $Date = global.Date;
-
// Helper function to throw error.
function ThrowDateTypeError() {
throw new $TypeError('this is not a Date object.');
}
-var timezone_cache_time = $NaN;
+var timezone_cache_time = NAN;
var timezone_cache_timezone;
function LocalTimezone(t) {
@@ -70,10 +66,10 @@ function UTC(time) {
// ECMA 262 - 15.9.1.11
function MakeTime(hour, min, sec, ms) {
- if (!$isFinite(hour)) return $NaN;
- if (!$isFinite(min)) return $NaN;
- if (!$isFinite(sec)) return $NaN;
- if (!$isFinite(ms)) return $NaN;
+ if (!$isFinite(hour)) return NAN;
+ if (!$isFinite(min)) return NAN;
+ if (!$isFinite(sec)) return NAN;
+ if (!$isFinite(ms)) return NAN;
return TO_INTEGER(hour) * msPerHour
+ TO_INTEGER(min) * msPerMinute
+ TO_INTEGER(sec) * msPerSecond
@@ -94,7 +90,7 @@ function TimeInYear(year) {
// MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1)
// MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11)
function MakeDay(year, month, date) {
- if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
+ if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return NAN;
// Convert to integer and map -0 to 0.
year = TO_INTEGER_MAP_MINUS_ZERO(year);
@@ -103,11 +99,11 @@ function MakeDay(year, month, date) {
if (year < kMinYear || year > kMaxYear ||
month < kMinMonth || month > kMaxMonth) {
- return $NaN;
+ return NAN;
}
// Now we rely on year and month being SMIs.
- return %DateMakeDay(year, month) + date - 1;
+ return %DateMakeDay(year | 0, month | 0) + date - 1;
}
@@ -119,15 +115,15 @@ function MakeDate(day, time) {
// is no way that the time can be within range even after UTC
// conversion we return NaN immediately instead of relying on
// TimeClip to do it.
- if ($abs(time) > MAX_TIME_BEFORE_UTC) return $NaN;
+ if ($abs(time) > MAX_TIME_BEFORE_UTC) return NAN;
return time;
}
// ECMA 262 - 15.9.1.14
function TimeClip(time) {
- if (!$isFinite(time)) return $NaN;
- if ($abs(time) > MAX_TIME_MS) return $NaN;
+ if (!$isFinite(time)) return NAN;
+ if ($abs(time) > MAX_TIME_MS) return NAN;
return TO_INTEGER(time);
}
@@ -136,13 +132,13 @@ function TimeClip(time) {
// strings over and over again.
var Date_cache = {
// Cached time value.
- time: $NaN,
+ time: NAN,
// String input for which the cached time is valid.
string: null
};
-%SetCode($Date, function(year, month, date, hours, minutes, seconds, ms) {
+function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
if (!%_IsConstructCall()) {
// ECMA 262 - 15.9.2
return (new $Date()).toString();
@@ -199,10 +195,7 @@ var Date_cache = {
value = MakeDate(day, time);
SET_LOCAL_DATE_VALUE(this, value);
}
-});
-
-
-%FunctionSetPrototype($Date, new $Date($NaN));
+}
var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
@@ -276,7 +269,7 @@ var parse_buffer = $Array(8);
// ECMA 262 - 15.9.4.2
function DateParse(string) {
var arr = %DateParseString(ToString(string), parse_buffer);
- if (IS_NULL(arr)) return $NaN;
+ if (IS_NULL(arr)) return NAN;
var day = MakeDay(arr[0], arr[1], arr[2]);
var time = MakeTime(arr[3], arr[4], arr[5], arr[6]);
@@ -678,7 +671,7 @@ function DateGetYear() {
function DateSetYear(year) {
CHECK_DATE(this);
year = ToNumber(year);
- if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, $NaN);
+ if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, NAN);
year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
var t = LOCAL_DATE_VALUE(this);
@@ -753,12 +746,12 @@ function DateToJSON(key) {
function ResetDateCache() {
// Reset the timezone cache:
- timezone_cache_time = $NaN;
+ timezone_cache_time = NAN;
timezone_cache_timezone = undefined;
// Reset the date cache:
cache = Date_cache;
- cache.time = $NaN;
+ cache.time = NAN;
cache.string = null;
}
@@ -767,6 +760,10 @@ function ResetDateCache() {
function SetUpDate() {
%CheckIsBootstrapping();
+
+ %SetCode($Date, DateConstructor);
+ %FunctionSetPrototype($Date, new $Date(NAN));
+
// Set up non-enumerable properties of the Date object itself.
InstallFunctions($Date, DONT_ENUM, $Array(
"UTC", DateUTC,
diff --git a/deps/v8/src/dateparser.cc b/deps/v8/src/dateparser.cc
index 4a0721fe8..3964e8117 100644
--- a/deps/v8/src/dateparser.cc
+++ b/deps/v8/src/dateparser.cc
@@ -112,6 +112,7 @@ bool DateParser::TimeComposer::Write(FixedArray* output) {
return true;
}
+
bool DateParser::TimeZoneComposer::Write(FixedArray* output) {
if (sign_ != kNone) {
if (hour_ == kNone) hour_ = 0;
diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc
index e85622277..51823aaf2 100644
--- a/deps/v8/src/debug-agent.cc
+++ b/deps/v8/src/debug-agent.cc
@@ -25,12 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifdef ENABLE_DEBUGGER_SUPPORT
#include "v8.h"
#include "debug.h"
#include "debug-agent.h"
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
+#include "platform/socket.h"
namespace v8 {
namespace internal {
@@ -38,16 +38,36 @@ namespace internal {
// Public V8 debugger API message handler function. This function just delegates
// to the debugger agent through it's data parameter.
void DebuggerAgentMessageHandler(const v8::Debug::Message& message) {
- DebuggerAgent* agent = Isolate::Current()->debugger_agent_instance();
+ Isolate* isolate = reinterpret_cast<Isolate*>(message.GetIsolate());
+ DebuggerAgent* agent = isolate->debugger_agent_instance();
ASSERT(agent != NULL);
agent->DebuggerMessage(message);
}
+DebuggerAgent::DebuggerAgent(Isolate* isolate, const char* name, int port)
+ : Thread(name),
+ isolate_(isolate),
+ name_(StrDup(name)),
+ port_(port),
+ server_(new Socket),
+ terminate_(false),
+ session_(NULL),
+ terminate_now_(0),
+ listening_(0) {
+ ASSERT(isolate_->debugger_agent_instance() == NULL);
+ isolate_->set_debugger_agent_instance(this);
+}
+
+
+DebuggerAgent::~DebuggerAgent() {
+ isolate_->set_debugger_agent_instance(NULL);
+ delete server_;
+}
+
+
// Debugger agent main thread.
void DebuggerAgent::Run() {
- const int kOneSecondInMicros = 1000000;
-
// Allow this socket to reuse port even if still in TIME_WAIT.
server_->SetReuseAddress(true);
@@ -60,16 +80,20 @@ void DebuggerAgent::Run() {
// would be that the port is already in use so this avoids a busy loop and
// make the agent take over the port when it becomes free.
if (!bound) {
+ const TimeDelta kTimeout = TimeDelta::FromSeconds(1);
PrintF("Failed to open socket on port %d, "
- "waiting %d ms before retrying\n", port_, kOneSecondInMicros / 1000);
- terminate_now_->Wait(kOneSecondInMicros);
+ "waiting %d ms before retrying\n", port_,
+ static_cast<int>(kTimeout.InMilliseconds()));
+ if (!terminate_now_.WaitFor(kTimeout)) {
+ if (terminate_) return;
+ }
}
}
// Accept connections on the bound port.
while (!terminate_) {
bool ok = server_->Listen(1);
- listening_->Signal();
+ listening_.Signal();
if (ok) {
// Accept the new connection.
Socket* client = server_->Accept();
@@ -89,7 +113,7 @@ void DebuggerAgent::Shutdown() {
// Signal termination and make the server exit either its listen call or its
// binding loop. This makes sure that no new sessions can be established.
- terminate_now_->Signal();
+ terminate_now_.Signal();
server_->Shutdown();
Join();
@@ -99,19 +123,21 @@ void DebuggerAgent::Shutdown() {
void DebuggerAgent::WaitUntilListening() {
- listening_->Wait();
+ listening_.Wait();
}
static const char* kCreateSessionMessage =
"Remote debugging session already active\r\n";
void DebuggerAgent::CreateSession(Socket* client) {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// If another session is already established terminate this one.
if (session_ != NULL) {
- client->Send(kCreateSessionMessage, StrLength(kCreateSessionMessage));
+ int len = StrLength(kCreateSessionMessage);
+ int res = client->Send(kCreateSessionMessage, len);
delete client;
+ USE(res);
return;
}
@@ -123,7 +149,7 @@ void DebuggerAgent::CreateSession(Socket* client) {
void DebuggerAgent::CloseSession() {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// Terminate the session.
if (session_ != NULL) {
@@ -136,7 +162,7 @@ void DebuggerAgent::CloseSession() {
void DebuggerAgent::DebuggerMessage(const v8::Debug::Message& message) {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// Forward the message handling to the session.
if (session_ != NULL) {
@@ -154,7 +180,7 @@ void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) {
}
// Terminate the session.
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
ASSERT(session == session_);
if (session == session_) {
session_->Shutdown();
@@ -192,21 +218,14 @@ void DebuggerAgentSession::Run() {
}
// Convert UTF-8 to UTF-16.
- unibrow::Utf8InputBuffer<> buf(msg, StrLength(msg));
- int len = 0;
- while (buf.has_more()) {
- buf.GetNext();
- len++;
- }
- ScopedVector<int16_t> temp(len + 1);
- buf.Reset(msg, StrLength(msg));
- for (int i = 0; i < len; i++) {
- temp[i] = buf.GetNext();
- }
+ unibrow::Utf8Decoder<128> decoder(msg, StrLength(msg));
+ int utf16_length = decoder.Utf16Length();
+ ScopedVector<uint16_t> temp(utf16_length + 1);
+ decoder.WriteUtf16(temp.start(), utf16_length);
// Send the request received to the debugger.
- v8::Debug::SendCommand(reinterpret_cast<const uint16_t *>(temp.start()),
- len,
+ v8::Debug::SendCommand(temp.start(),
+ utf16_length,
NULL,
reinterpret_cast<v8::Isolate*>(agent_->isolate()));
@@ -233,7 +252,7 @@ void DebuggerAgentSession::Shutdown() {
const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
-SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
+SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(Socket* conn) {
int received;
// Read header.
@@ -250,7 +269,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
prev_c = c;
received = conn->Receive(&c, 1);
if (received == 0) {
- PrintF("Error %d\n", Socket::LastError());
+ PrintF("Error %d\n", Socket::GetLastError());
return SmartArrayPointer<char>();
}
@@ -312,7 +331,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
char* buffer = NewArray<char>(content_length + 1);
received = ReceiveAll(conn, buffer, content_length);
if (received < content_length) {
- PrintF("Error %d\n", Socket::LastError());
+ PrintF("Error %d\n", Socket::GetLastError());
return SmartArrayPointer<char>();
}
buffer[content_length] = '\0';
@@ -321,7 +340,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
}
-bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
+bool DebuggerAgentUtil::SendConnectMessage(Socket* conn,
const char* embedding_host) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer.
@@ -367,7 +386,7 @@ bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
}
-bool DebuggerAgentUtil::SendMessage(const Socket* conn,
+bool DebuggerAgentUtil::SendMessage(Socket* conn,
const Vector<uint16_t> message) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer both for header and body.
@@ -382,14 +401,17 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
}
// Send the header.
- int len;
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "%s: %d\r\n", kContentLength, utf8_len);
- conn->Send(buffer, len);
+ int len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+ "%s: %d\r\n", kContentLength, utf8_len);
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
// Terminate header with empty line.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- conn->Send(buffer, len);
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
// Send message body as UTF-8.
int buffer_position = 0; // Current buffer position.
@@ -409,13 +431,19 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
const int kEncodedSurrogateLength =
unibrow::Utf16::kUtf8BytesToCodeASurrogate;
ASSERT(buffer_position >= kEncodedSurrogateLength);
- conn->Send(buffer, buffer_position - kEncodedSurrogateLength);
+ len = buffer_position - kEncodedSurrogateLength;
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
for (int i = 0; i < kEncodedSurrogateLength; i++) {
buffer[i] = buffer[buffer_position + i];
}
buffer_position = kEncodedSurrogateLength;
} else {
- conn->Send(buffer, buffer_position);
+ len = buffer_position;
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
buffer_position = 0;
}
}
@@ -426,7 +454,7 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
}
-bool DebuggerAgentUtil::SendMessage(const Socket* conn,
+bool DebuggerAgentUtil::SendMessage(Socket* conn,
const v8::Handle<v8::String> request) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer both for header and body.
@@ -435,24 +463,30 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
v8::String::Utf8Value utf8_request(request);
// Send the header.
- int len;
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Content-Length: %d\r\n", utf8_request.length());
- conn->Send(buffer, len);
+ int len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+ "Content-Length: %d\r\n", utf8_request.length());
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
// Terminate header with empty line.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- conn->Send(buffer, len);
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
// Send message body as UTF-8.
- conn->Send(*utf8_request, utf8_request.length());
+ len = utf8_request.length();
+ if (conn->Send(*utf8_request, len) < len) {
+ return false;
+ }
return true;
}
// Receive the full buffer before returning unless an error occours.
-int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
+int DebuggerAgentUtil::ReceiveAll(Socket* conn, char* data, int len) {
int total_received = 0;
while (total_received < len) {
int received = conn->Receive(data + total_received, len - total_received);
diff --git a/deps/v8/src/debug-agent.h b/deps/v8/src/debug-agent.h
index 61151900f..138e51acc 100644
--- a/deps/v8/src/debug-agent.h
+++ b/deps/v8/src/debug-agent.h
@@ -37,27 +37,15 @@ namespace internal {
// Forward decelrations.
class DebuggerAgentSession;
+class Socket;
// Debugger agent which starts a socket listener on the debugger port and
// handles connection from a remote debugger.
class DebuggerAgent: public Thread {
public:
- DebuggerAgent(const char* name, int port)
- : Thread(name),
- isolate_(Isolate::Current()),
- name_(StrDup(name)), port_(port),
- server_(OS::CreateSocket()), terminate_(false),
- session_access_(OS::CreateMutex()), session_(NULL),
- terminate_now_(OS::CreateSemaphore(0)),
- listening_(OS::CreateSemaphore(0)) {
- ASSERT(isolate_->debugger_agent_instance() == NULL);
- isolate_->set_debugger_agent_instance(this);
- }
- ~DebuggerAgent() {
- isolate_->set_debugger_agent_instance(NULL);
- delete server_;
- }
+ DebuggerAgent(Isolate* isolate, const char* name, int port);
+ ~DebuggerAgent();
void Shutdown();
void WaitUntilListening();
@@ -76,10 +64,10 @@ class DebuggerAgent: public Thread {
int port_; // Port to use for the agent.
Socket* server_; // Server socket for listen/accept.
bool terminate_; // Termination flag.
- Mutex* session_access_; // Mutex guarging access to session_.
+ RecursiveMutex session_access_; // Mutex guarding access to session_.
DebuggerAgentSession* session_; // Current active session if any.
- Semaphore* terminate_now_; // Semaphore to signal termination.
- Semaphore* listening_;
+ Semaphore terminate_now_; // Semaphore to signal termination.
+ Semaphore listening_;
friend class DebuggerAgentSession;
friend void DebuggerAgentMessageHandler(const v8::Debug::Message& message);
@@ -116,13 +104,11 @@ class DebuggerAgentUtil {
public:
static const char* const kContentLength;
- static SmartArrayPointer<char> ReceiveMessage(const Socket* conn);
- static bool SendConnectMessage(const Socket* conn,
- const char* embedding_host);
- static bool SendMessage(const Socket* conn, const Vector<uint16_t> message);
- static bool SendMessage(const Socket* conn,
- const v8::Handle<v8::String> message);
- static int ReceiveAll(const Socket* conn, char* data, int len);
+ static SmartArrayPointer<char> ReceiveMessage(Socket* conn);
+ static bool SendConnectMessage(Socket* conn, const char* embedding_host);
+ static bool SendMessage(Socket* conn, const Vector<uint16_t> message);
+ static bool SendMessage(Socket* conn, const v8::Handle<v8::String> message);
+ static int ReceiveAll(Socket* conn, char* data, int len);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index dfad902d7..b159ae3b2 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -71,6 +71,13 @@ Debug.ScriptBreakPointType = { ScriptId: 0,
ScriptName: 1,
ScriptRegExp: 2 };
+// The different types of breakpoint position alignments.
+// Must match BreakPositionAlignment in debug.h.
+Debug.BreakPositionAlignment = {
+ Statement: 0,
+ BreakPosition: 1
+};
+
function ScriptTypeFlag(type) {
return (1 << type);
}
@@ -110,7 +117,6 @@ var debugger_flags = {
}
},
};
-var lol_is_enabled = %HasLOLEnabled();
// Create a new break point object and add it to the list of break points.
@@ -252,7 +258,7 @@ function IsBreakPointTriggered(break_id, break_point) {
// script name or script id and the break point is represented as line and
// column.
function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
- opt_groupId) {
+ opt_groupId, opt_position_alignment) {
this.type_ = type;
if (type == Debug.ScriptBreakPointType.ScriptId) {
this.script_id_ = script_id_or_name;
@@ -266,6 +272,8 @@ function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
this.line_ = opt_line || 0;
this.column_ = opt_column;
this.groupId_ = opt_groupId;
+ this.position_alignment_ = IS_UNDEFINED(opt_position_alignment)
+ ? Debug.BreakPositionAlignment.Statement : opt_position_alignment;
this.hit_count_ = 0;
this.active_ = true;
this.condition_ = null;
@@ -277,7 +285,8 @@ function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
//Creates a clone of script breakpoint that is linked to another script.
ScriptBreakPoint.prototype.cloneForOtherScript = function (other_script) {
var copy = new ScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
- other_script.id, this.line_, this.column_, this.groupId_);
+ other_script.id, this.line_, this.column_, this.groupId_,
+ this.position_alignment_);
copy.number_ = next_break_point_number++;
script_break_points.push(copy);
@@ -439,12 +448,14 @@ ScriptBreakPoint.prototype.set = function (script) {
// If the position is not found in the script (the script might be shorter
// than it used to be) just ignore it.
- if (position === null) return;
+ if (IS_NULL(position)) return;
// Create a break point object and set the break point.
break_point = MakeBreakPoint(position, this);
break_point.setIgnoreCount(this.ignoreCount());
- var actual_position = %SetScriptBreakPoint(script, position, break_point);
+ var actual_position = %SetScriptBreakPoint(script, position,
+ this.position_alignment_,
+ break_point);
if (IS_UNDEFINED(actual_position)) {
actual_position = position;
}
@@ -510,9 +521,11 @@ Debug.breakExecution = function(f) {
%Break();
};
-Debug.breakLocations = function(f) {
+Debug.breakLocations = function(f, opt_position_aligment) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %GetBreakLocations(f);
+ var position_aligment = IS_UNDEFINED(opt_position_aligment)
+ ? Debug.BreakPositionAlignment.Statement : opt_position_aligment;
+ return %GetBreakLocations(f, position_aligment);
};
// Returns a Script object. If the parameter is a function the return value
@@ -675,7 +688,8 @@ Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
- condition, enabled)
+ condition, enabled,
+ opt_position_alignment)
{
break_point = MakeBreakPoint(position);
break_point.setCondition(condition);
@@ -683,10 +697,12 @@ Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
break_point.disable();
}
var scripts = this.scripts();
+ var position_alignment = IS_UNDEFINED(opt_position_alignment)
+ ? Debug.BreakPositionAlignment.Statement : opt_position_alignment;
for (var i = 0; i < scripts.length; i++) {
if (script_id == scripts[i].id) {
break_point.actual_position = %SetScriptBreakPoint(scripts[i], position,
- break_point);
+ position_alignment, break_point);
break;
}
}
@@ -781,11 +797,11 @@ Debug.findScriptBreakPoint = function(break_point_number, remove) {
// specified source line and column within that line.
Debug.setScriptBreakPoint = function(type, script_id_or_name,
opt_line, opt_column, opt_condition,
- opt_groupId) {
+ opt_groupId, opt_position_alignment) {
// Create script break point object.
var script_break_point =
new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
- opt_groupId);
+ opt_groupId, opt_position_alignment);
// Assign number to the new script break point and add it.
script_break_point.number_ = next_break_point_number++;
@@ -807,10 +823,12 @@ Debug.setScriptBreakPoint = function(type, script_id_or_name,
Debug.setScriptBreakPointById = function(script_id,
opt_line, opt_column,
- opt_condition, opt_groupId) {
+ opt_condition, opt_groupId,
+ opt_position_alignment) {
return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
script_id, opt_line, opt_column,
- opt_condition, opt_groupId);
+ opt_condition, opt_groupId,
+ opt_position_alignment);
};
@@ -894,11 +912,11 @@ Debug.isBreakOnUncaughtException = function() {
return !!%IsBreakOnException(Debug.ExceptionBreak.Uncaught);
};
-Debug.showBreakPoints = function(f, full) {
+Debug.showBreakPoints = function(f, full, opt_position_alignment) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
var source = full ? this.scriptSource(f) : this.source(f);
var offset = full ? this.sourcePosition(f) : 0;
- var locations = this.breakLocations(f);
+ var locations = this.breakLocations(f, opt_position_alignment);
if (!locations) return source;
locations.sort(function(x, y) { return x - y; });
var result = "";
@@ -939,12 +957,17 @@ function ExecutionState(break_id) {
this.selected_frame = 0;
}
-ExecutionState.prototype.prepareStep = function(opt_action, opt_count) {
+ExecutionState.prototype.prepareStep = function(opt_action, opt_count,
+ opt_callframe) {
var action = Debug.StepAction.StepIn;
if (!IS_UNDEFINED(opt_action)) action = %ToNumber(opt_action);
var count = opt_count ? %ToNumber(opt_count) : 1;
+ var callFrameId = 0;
+ if (!IS_UNDEFINED(opt_callframe)) {
+ callFrameId = opt_callframe.details_.frameId();
+ }
- return %PrepareStep(this.break_id, action, count);
+ return %PrepareStep(this.break_id, action, count, callFrameId);
};
ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
@@ -1306,9 +1329,12 @@ ProtocolMessage.prototype.setOption = function(name, value) {
};
-ProtocolMessage.prototype.failed = function(message) {
+ProtocolMessage.prototype.failed = function(message, opt_details) {
this.success = false;
this.message = message;
+ if (IS_OBJECT(opt_details)) {
+ this.error_details = opt_details;
+ }
};
@@ -1355,6 +1381,9 @@ ProtocolMessage.prototype.toJSONProtocol = function() {
if (this.message) {
json.message = this.message;
}
+ if (this.error_details) {
+ json.error_details = this.error_details;
+ }
json.running = this.running;
return JSON.stringify(json);
};
@@ -1431,8 +1460,6 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
this.setVariableValueRequest_(request, response);
} else if (request.command == 'evaluate') {
this.evaluateRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'getobj') {
- this.getobjRequest_(request, response);
} else if (request.command == 'lookup') {
this.lookupRequest_(request, response);
} else if (request.command == 'references') {
@@ -1447,8 +1474,6 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
this.suspendRequest_(request, response);
} else if (request.command == 'version') {
this.versionRequest_(request, response);
- } else if (request.command == 'profile') {
- this.profileRequest_(request, response);
} else if (request.command == 'changelive') {
this.changeLiveRequest_(request, response);
} else if (request.command == 'restartframe') {
@@ -1462,28 +1487,6 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
} else if (request.command == 'gc') {
this.gcRequest_(request, response);
- // LiveObjectList tools:
- } else if (lol_is_enabled && request.command == 'lol-capture') {
- this.lolCaptureRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-delete') {
- this.lolDeleteRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-diff') {
- this.lolDiffRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-getid') {
- this.lolGetIdRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-info') {
- this.lolInfoRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-reset') {
- this.lolResetRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-retainers') {
- this.lolRetainersRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-path') {
- this.lolPathRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-print') {
- this.lolPrintRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-stats') {
- this.lolStatsRequest_(request, response);
-
} else {
throw new Error('Unknown command "' + request.command + '" in request');
}
@@ -2061,7 +2064,7 @@ DebugCommandProcessor.resolveValue_ = function(value_description) {
} else if ("value" in value_description) {
return value_description.value;
} else if (value_description.type == UNDEFINED_TYPE) {
- return void 0;
+ return UNDEFINED;
} else if (value_description.type == NULL_TYPE) {
return null;
} else {
@@ -2137,16 +2140,14 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
additional_context_object = {};
for (var i = 0; i < additional_context.length; i++) {
var mapping = additional_context[i];
- if (!IS_STRING(mapping.name) || !IS_NUMBER(mapping.handle)) {
+
+ if (!IS_STRING(mapping.name)) {
return response.failed("Context element #" + i +
- " must contain name:string and handle:number");
- }
- var context_value_mirror = LookupMirror(mapping.handle);
- if (!context_value_mirror) {
- return response.failed("Context object '" + mapping.name +
- "' #" + mapping.handle + "# not found");
+ " doesn't contain name:string property");
}
- additional_context_object[mapping.name] = context_value_mirror.value();
+
+ var raw_value = DebugCommandProcessor.resolveValue_(mapping);
+ additional_context_object[mapping.name] = raw_value;
}
}
@@ -2187,24 +2188,6 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
};
-DebugCommandProcessor.prototype.getobjRequest_ = function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
-
- // Pull out arguments.
- var obj_id = request.arguments.obj_id;
-
- // Check for legal arguments.
- if (IS_UNDEFINED(obj_id)) {
- return response.failed('Argument "obj_id" missing');
- }
-
- // Dump the object.
- response.body = MakeMirror(%GetLOLObj(obj_id));
-};
-
-
DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
if (!request.arguments) {
return response.failed('Missing arguments');
@@ -2420,18 +2403,6 @@ DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
};
-DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
- if (request.arguments.command == 'resume') {
- %ProfilerResume();
- } else if (request.arguments.command == 'pause') {
- %ProfilerPause();
- } else {
- return response.failed('Unknown command');
- }
- response.body = {};
-};
-
-
DebugCommandProcessor.prototype.changeLiveRequest_ = function(
request, response) {
if (!request.arguments) {
@@ -2461,8 +2432,17 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(
var new_source = request.arguments.new_source;
- var result_description = Debug.LiveEdit.SetScriptSource(the_script,
- new_source, preview_only, change_log);
+ var result_description;
+ try {
+ result_description = Debug.LiveEdit.SetScriptSource(the_script,
+ new_source, preview_only, change_log);
+ } catch (e) {
+ if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
+ response.failed(e.message, e.details);
+ return;
+ }
+ throw e;
+ }
response.body = {change_log: change_log, result: result_description};
if (!preview_only && !this.running_ && result_description.stack_modified) {
@@ -2554,86 +2534,6 @@ DebugCommandProcessor.prototype.gcRequest_ = function(request, response) {
};
-DebugCommandProcessor.prototype.lolCaptureRequest_ =
- function(request, response) {
- response.body = %CaptureLOL();
-};
-
-
-DebugCommandProcessor.prototype.lolDeleteRequest_ =
- function(request, response) {
- var id = request.arguments.id;
- var result = %DeleteLOL(id);
- if (result) {
- response.body = { id: id };
- } else {
- response.failed('Failed to delete: live object list ' + id + ' not found.');
- }
-};
-
-
-DebugCommandProcessor.prototype.lolDiffRequest_ = function(request, response) {
- var id1 = request.arguments.id1;
- var id2 = request.arguments.id2;
- var verbose = request.arguments.verbose;
- var filter = request.arguments.filter;
- if (verbose === true) {
- var start = request.arguments.start;
- var count = request.arguments.count;
- response.body = %DumpLOL(id1, id2, start, count, filter);
- } else {
- response.body = %SummarizeLOL(id1, id2, filter);
- }
-};
-
-
-DebugCommandProcessor.prototype.lolGetIdRequest_ = function(request, response) {
- var address = request.arguments.address;
- response.body = {};
- response.body.id = %GetLOLObjId(address);
-};
-
-
-DebugCommandProcessor.prototype.lolInfoRequest_ = function(request, response) {
- var start = request.arguments.start;
- var count = request.arguments.count;
- response.body = %InfoLOL(start, count);
-};
-
-
-DebugCommandProcessor.prototype.lolResetRequest_ = function(request, response) {
- %ResetLOL();
-};
-
-
-DebugCommandProcessor.prototype.lolRetainersRequest_ =
- function(request, response) {
- var id = request.arguments.id;
- var verbose = request.arguments.verbose;
- var start = request.arguments.start;
- var count = request.arguments.count;
- var filter = request.arguments.filter;
-
- response.body = %GetLOLObjRetainers(id, Mirror.prototype, verbose,
- start, count, filter);
-};
-
-
-DebugCommandProcessor.prototype.lolPathRequest_ = function(request, response) {
- var id1 = request.arguments.id1;
- var id2 = request.arguments.id2;
- response.body = {};
- response.body.path = %GetLOLPath(id1, id2, Mirror.prototype);
-};
-
-
-DebugCommandProcessor.prototype.lolPrintRequest_ = function(request, response) {
- var id = request.arguments.id;
- response.body = {};
- response.body.dump = %PrintLOLObj(id);
-};
-
-
// Check whether the previously processed command caused the VM to become
// running.
DebugCommandProcessor.prototype.isRunning = function() {
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 48c5519f7..35970e5ee 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -76,18 +76,19 @@ Debug::~Debug() {
static void PrintLn(v8::Local<v8::Value> value) {
v8::Local<v8::String> s = value->ToString();
- ScopedVector<char> data(s->Length() + 1);
+ ScopedVector<char> data(s->Utf8Length() + 1);
if (data.start() == NULL) {
V8::FatalProcessOutOfMemory("PrintLn");
return;
}
- s->WriteAscii(data.start());
+ s->WriteUtf8(data.start());
PrintF("%s\n", data.start());
}
-static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
- Isolate* isolate = Isolate::Current();
+static Handle<Code> ComputeCallDebugPrepareStepIn(Isolate* isolate,
+ int argc,
+ Code::Kind kind) {
return isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind);
}
@@ -121,7 +122,7 @@ BreakLocationIterator::~BreakLocationIterator() {
void BreakLocationIterator::Next() {
- AssertNoAllocation nogc;
+ DisallowHeapAllocation no_gc;
ASSERT(!RinfoDone());
// Iterate through reloc info for code and original code stopping at each
@@ -159,7 +160,6 @@ void BreakLocationIterator::Next() {
Code* code = Code::GetCodeFromTargetAddress(target);
if ((code->is_inline_cache_stub() &&
!code->is_binary_op_stub() &&
- !code->is_unary_op_stub() &&
!code->is_compare_ic_stub() &&
!code->is_to_boolean_ic_stub()) ||
RelocInfo::IsConstructCall(rmode())) {
@@ -211,14 +211,15 @@ void BreakLocationIterator::Next(int count) {
}
-// Find the break point closest to the supplied address.
+// Find the break point at the supplied address, or the closest one before
+// the address.
void BreakLocationIterator::FindBreakLocationFromAddress(Address pc) {
// Run through all break points to locate the one closest to the address.
int closest_break_point = 0;
int distance = kMaxInt;
while (!Done()) {
// Check if this break point is closer that what was previously found.
- if (this->pc() < pc && pc - this->pc() < distance) {
+ if (this->pc() <= pc && pc - this->pc() < distance) {
closest_break_point = break_point();
distance = static_cast<int>(pc - this->pc());
// Check whether we can't get any closer.
@@ -234,17 +235,30 @@ void BreakLocationIterator::FindBreakLocationFromAddress(Address pc) {
// Find the break point closest to the supplied source position.
-void BreakLocationIterator::FindBreakLocationFromPosition(int position) {
+void BreakLocationIterator::FindBreakLocationFromPosition(int position,
+ BreakPositionAlignment alignment) {
// Run through all break points to locate the one closest to the source
// position.
int closest_break_point = 0;
int distance = kMaxInt;
+
while (!Done()) {
+ int next_position;
+ switch (alignment) {
+ case STATEMENT_ALIGNED:
+ next_position = this->statement_position();
+ break;
+ case BREAK_POSITION_ALIGNED:
+ next_position = this->position();
+ break;
+ default:
+ UNREACHABLE();
+ next_position = this->statement_position();
+ }
// Check if this break point is closer that what was previously found.
- if (position <= statement_position() &&
- statement_position() - position < distance) {
+ if (position <= next_position && next_position - position < distance) {
closest_break_point = break_point();
- distance = statement_position() - position;
+ distance = next_position - position;
// Check whether we can't get any closer.
if (distance == 0) break;
}
@@ -261,8 +275,12 @@ void BreakLocationIterator::Reset() {
// Create relocation iterators for the two code objects.
if (reloc_iterator_ != NULL) delete reloc_iterator_;
if (reloc_iterator_original_ != NULL) delete reloc_iterator_original_;
- reloc_iterator_ = new RelocIterator(debug_info_->code());
- reloc_iterator_original_ = new RelocIterator(debug_info_->original_code());
+ reloc_iterator_ = new RelocIterator(
+ debug_info_->code(),
+ ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
+ reloc_iterator_original_ = new RelocIterator(
+ debug_info_->original_code(),
+ ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
// Position at the first break point.
break_point_ = -1;
@@ -385,8 +403,25 @@ void BreakLocationIterator::ClearDebugBreak() {
}
-void BreakLocationIterator::PrepareStepIn() {
- HandleScope scope;
+bool BreakLocationIterator::IsStepInLocation(Isolate* isolate) {
+ if (RelocInfo::IsConstructCall(original_rmode())) {
+ return true;
+ } else if (RelocInfo::IsCodeTarget(rmode())) {
+ HandleScope scope(debug_info_->GetIsolate());
+ Address target = original_rinfo()->target_address();
+ Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
+ if (target_code->kind() == Code::STUB) {
+ return target_code->major_key() == CodeStub::CallFunction;
+ }
+ return target_code->is_call_stub() || target_code->is_keyed_call_stub();
+ } else {
+ return false;
+ }
+}
+
+
+void BreakLocationIterator::PrepareStepIn(Isolate* isolate) {
+ HandleScope scope(isolate);
// Step in can only be prepared if currently positioned on an IC call,
// construct call or CallFunction stub call.
@@ -399,7 +434,7 @@ void BreakLocationIterator::PrepareStepIn() {
// the call in the original code as it is the code there that will be
// executed in place of the debug break call.
Handle<Code> stub = ComputeCallDebugPrepareStepIn(
- target_code->arguments_count(), target_code->kind());
+ isolate, target_code->arguments_count(), target_code->kind());
if (IsDebugBreak()) {
original_rinfo()->set_target_address(stub->entry());
} else {
@@ -547,9 +582,9 @@ void Debug::ThreadInit() {
char* Debug::ArchiveDebug(char* storage) {
char* to = storage;
- memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+ OS::MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
to += sizeof(ThreadLocal);
- memcpy(to, reinterpret_cast<char*>(&registers_), sizeof(registers_));
+ OS::MemCopy(to, reinterpret_cast<char*>(&registers_), sizeof(registers_));
ThreadInit();
ASSERT(to <= storage + ArchiveSpacePerThread());
return storage + ArchiveSpacePerThread();
@@ -558,9 +593,10 @@ char* Debug::ArchiveDebug(char* storage) {
char* Debug::RestoreDebug(char* storage) {
char* from = storage;
- memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ OS::MemCopy(
+ reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
from += sizeof(ThreadLocal);
- memcpy(reinterpret_cast<char*>(&registers_), from, sizeof(registers_));
+ OS::MemCopy(reinterpret_cast<char*>(&registers_), from, sizeof(registers_));
ASSERT(from <= storage + ArchiveSpacePerThread());
return storage + ArchiveSpacePerThread();
}
@@ -598,31 +634,30 @@ const int Debug::kFrameDropperFrameSize = 4;
void ScriptCache::Add(Handle<Script> script) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = isolate_->global_handles();
// Create an entry in the hash map for the script.
- int id = Smi::cast(script->id())->value();
+ int id = script->id()->value();
HashMap::Entry* entry =
HashMap::Lookup(reinterpret_cast<void*>(id), Hash(id), true);
if (entry->value != NULL) {
ASSERT(*script == *reinterpret_cast<Script**>(entry->value));
return;
}
-
// Globalize the script object, make it weak and use the location of the
// global handle as the value in the hash map.
Handle<Script> script_ =
Handle<Script>::cast(
(global_handles->Create(*script)));
- global_handles->MakeWeak(
- reinterpret_cast<Object**>(script_.location()),
- this,
- ScriptCache::HandleWeakScript);
+ global_handles->MakeWeak(reinterpret_cast<Object**>(script_.location()),
+ this,
+ ScriptCache::HandleWeakScript);
entry->value = script_.location();
}
Handle<FixedArray> ScriptCache::GetScripts() {
- Handle<FixedArray> instances = FACTORY->NewFixedArray(occupancy());
+ Factory* factory = isolate_->factory();
+ Handle<FixedArray> instances = factory->NewFixedArray(occupancy());
int count = 0;
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
ASSERT(entry->value != NULL);
@@ -636,7 +671,7 @@ Handle<FixedArray> ScriptCache::GetScripts() {
void ScriptCache::ProcessCollectedScripts() {
- Debugger* debugger = Isolate::Current()->debugger();
+ Debugger* debugger = isolate_->debugger();
for (int i = 0; i < collected_scripts_.length(); i++) {
debugger->OnScriptCollected(collected_scripts_[i]);
}
@@ -645,7 +680,7 @@ void ScriptCache::ProcessCollectedScripts() {
void ScriptCache::Clear() {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = isolate_->global_handles();
// Iterate the script cache to get rid of all the weak handles.
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
ASSERT(entry != NULL);
@@ -659,21 +694,22 @@ void ScriptCache::Clear() {
}
-void ScriptCache::HandleWeakScript(v8::Persistent<v8::Value> obj, void* data) {
+void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
+ v8::Persistent<v8::Value>* obj,
+ void* data) {
ScriptCache* script_cache = reinterpret_cast<ScriptCache*>(data);
// Find the location of the global handle.
Script** location =
- reinterpret_cast<Script**>(Utils::OpenHandle(*obj).location());
+ reinterpret_cast<Script**>(Utils::OpenPersistent(*obj).location());
ASSERT((*location)->IsScript());
// Remove the entry from the cache.
- int id = Smi::cast((*location)->id())->value();
+ int id = (*location)->id()->value();
script_cache->Remove(reinterpret_cast<void*>(id), Hash(id));
script_cache->collected_scripts_.Add(id);
// Clear the weak handle.
- obj.Dispose();
- obj.Clear();
+ obj->Dispose();
}
@@ -692,8 +728,10 @@ void Debug::SetUp(bool create_heap_objects) {
}
-void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
- Debug* debug = Isolate::Current()->debug();
+void Debug::HandleWeakDebugInfo(v8::Isolate* isolate,
+ v8::Persistent<v8::Value>* obj,
+ void* data) {
+ Debug* debug = reinterpret_cast<Isolate*>(isolate)->debug();
DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
// We need to clear all breakpoints associated with the function to restore
// original code and avoid patching the code twice later because
@@ -713,25 +751,23 @@ void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
// Globalize the request debug info object and make it weak.
debug_info_ = Handle<DebugInfo>::cast(
(global_handles->Create(debug_info)));
- global_handles->MakeWeak(
- reinterpret_cast<Object**>(debug_info_.location()),
- this,
- Debug::HandleWeakDebugInfo);
+ global_handles->MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
+ this,
+ Debug::HandleWeakDebugInfo);
}
DebugInfoListNode::~DebugInfoListNode() {
- Isolate::Current()->global_handles()->Destroy(
+ debug_info_->GetIsolate()->global_handles()->Destroy(
reinterpret_cast<Object**>(debug_info_.location()));
}
-bool Debug::CompileDebuggerScript(int index) {
- Isolate* isolate = Isolate::Current();
+bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
@@ -752,6 +788,7 @@ bool Debug::CompileDebuggerScript(int index) {
function_info = Compiler::Compile(source_code,
script_name,
0, 0,
+ false,
context,
NULL, NULL,
Handle<String>::null(),
@@ -770,8 +807,11 @@ bool Debug::CompileDebuggerScript(int index) {
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
Handle<Object> exception =
- Execution::TryCall(function, Handle<Object>(context->global_object()),
- 0, NULL, &caught_exception);
+ Execution::TryCall(function,
+ Handle<Object>(context->global_object(), isolate),
+ 0,
+ NULL,
+ &caught_exception);
// Check for caught exceptions.
if (caught_exception) {
@@ -779,12 +819,14 @@ bool Debug::CompileDebuggerScript(int index) {
MessageLocation computed_location;
isolate->ComputeLocation(&computed_location);
Handle<Object> message = MessageHandler::MakeMessageObject(
- "error_loading_debugger", &computed_location,
+ isolate, "error_loading_debugger", &computed_location,
Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception());
- isolate->set_pending_exception(*exception);
- MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
- isolate->clear_pending_exception();
+ if (!exception.is_null()) {
+ isolate->set_pending_exception(*exception);
+ MessageHandler::ReportMessage(isolate, NULL, message);
+ isolate->clear_pending_exception();
+ }
return false;
}
@@ -810,14 +852,13 @@ bool Debug::Load() {
// Disable breakpoints and interrupts while compiling and running the
// debugger scripts including the context creation code.
- DisableBreak disable(true);
+ DisableBreak disable(isolate_, true);
PostponeInterruptsScope postpone(isolate_);
// Create the debugger context.
HandleScope scope(isolate_);
Handle<Context> context =
isolate_->bootstrapper()->CreateEnvironment(
- isolate_,
Handle<Object>::null(),
v8::Handle<ObjectTemplate>(),
NULL);
@@ -830,23 +871,27 @@ bool Debug::Load() {
isolate_->set_context(*context);
// Expose the builtins object in the debugger context.
- Handle<String> key = isolate_->factory()->LookupAsciiSymbol("builtins");
+ Handle<String> key = isolate_->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("builtins"));
Handle<GlobalObject> global = Handle<GlobalObject>(context->global_object());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate_,
- JSReceiver::SetProperty(global, key, Handle<Object>(global->builtins()),
- NONE, kNonStrictMode),
+ JSReceiver::SetProperty(global,
+ key,
+ Handle<Object>(global->builtins(), isolate_),
+ NONE,
+ kNonStrictMode),
false);
// Compile the JavaScript for the debugger in the debugger context.
debugger->set_compiling_natives(true);
bool caught_exception =
- !CompileDebuggerScript(Natives::GetIndex("mirror")) ||
- !CompileDebuggerScript(Natives::GetIndex("debug"));
+ !CompileDebuggerScript(isolate_, Natives::GetIndex("mirror")) ||
+ !CompileDebuggerScript(isolate_, Natives::GetIndex("debug"));
if (FLAG_enable_liveedit) {
caught_exception = caught_exception ||
- !CompileDebuggerScript(Natives::GetIndex("liveedit"));
+ !CompileDebuggerScript(isolate_, Natives::GetIndex("liveedit"));
}
debugger->set_compiling_natives(false);
@@ -858,8 +903,9 @@ bool Debug::Load() {
// Check for caught exceptions.
if (caught_exception) return false;
- // Debugger loaded.
- debug_context_ = context;
+ // Debugger loaded, create debugger context global handle.
+ debug_context_ = Handle<Context>::cast(
+ isolate_->global_handles()->Create(*context));
return true;
}
@@ -875,7 +921,7 @@ void Debug::Unload() {
DestroyScriptCache();
// Clear debugger context global handle.
- Isolate::Current()->global_handles()->Destroy(
+ isolate_->global_handles()->Destroy(
reinterpret_cast<Object**>(debug_context_.location()));
debug_context_ = Handle<Context>();
}
@@ -912,7 +958,7 @@ Object* Debug::Break(Arguments args) {
}
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) {
return heap->undefined_value();
}
@@ -922,13 +968,15 @@ Object* Debug::Break(Arguments args) {
// Get the debug info (create it if it does not exist).
Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+ Handle<SharedFunctionInfo>(frame->function()->shared());
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
// Find the break point where execution has stopped.
BreakLocationIterator break_location_iterator(debug_info,
ALL_BREAK_LOCATIONS);
- break_location_iterator.FindBreakLocationFromAddress(frame->pc());
+ // pc points to the instruction after the current one, possibly a break
+ // location as well. So the "- 1" to exclude it from the search.
+ break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
// Check whether step next reached a new statement.
if (!StepNextContinue(&break_location_iterator, frame)) {
@@ -940,10 +988,10 @@ Object* Debug::Break(Arguments args) {
// If there is one or more real break points check whether any of these are
// triggered.
- Handle<Object> break_points_hit(heap->undefined_value());
+ Handle<Object> break_points_hit(heap->undefined_value(), isolate_);
if (break_location_iterator.HasBreakPoint()) {
Handle<Object> break_point_objects =
- Handle<Object>(break_location_iterator.BreakPointObjects());
+ Handle<Object>(break_location_iterator.BreakPointObjects(), isolate_);
break_points_hit = CheckBreakPoints(break_point_objects);
}
@@ -969,7 +1017,7 @@ Object* Debug::Break(Arguments args) {
// Clear queue
thread_local_.queued_step_count_ = 0;
- PrepareStep(StepNext, step_count);
+ PrepareStep(StepNext, step_count, StackFrame::NO_ID);
} else {
// Notify the debug event listeners.
isolate_->debugger()->OnDebugBreak(break_points_hit, false);
@@ -1007,7 +1055,7 @@ Object* Debug::Break(Arguments args) {
ClearStepping();
// Set up for the remaining steps.
- PrepareStep(step_action, step_count);
+ PrepareStep(step_action, step_count, StackFrame::NO_ID);
}
if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
@@ -1061,7 +1109,7 @@ Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
break_points_hit = factory->NewFixedArray(array->length());
for (int i = 0; i < array->length(); i++) {
- Handle<Object> o(array->get(i));
+ Handle<Object> o(array->get(i), isolate_);
if (CheckBreakPoint(o)) {
break_points_hit->set(break_points_hit_count++, *o);
}
@@ -1093,12 +1141,13 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
if (!break_point_object->IsJSObject()) return true;
// Get the function IsBreakPointTriggered (defined in debug-debugger.js).
- Handle<String> is_break_point_triggered_symbol =
- factory->LookupAsciiSymbol("IsBreakPointTriggered");
+ Handle<String> is_break_point_triggered_string =
+ factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("IsBreakPointTriggered"));
Handle<JSFunction> check_break_point =
Handle<JSFunction>(JSFunction::cast(
debug_context()->global_object()->GetPropertyNoExceptionThrown(
- *is_break_point_triggered_symbol)));
+ *is_break_point_triggered_string)));
// Get the break id as an object.
Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
@@ -1157,7 +1206,7 @@ void Debug::SetBreakPoint(Handle<JSFunction> function,
// Find the break point and change it.
BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(*source_position);
+ it.FindBreakLocationFromPosition(*source_position, STATEMENT_ALIGNED);
it.SetBreakPoint(break_point_object);
*source_position = it.position();
@@ -1169,7 +1218,8 @@ void Debug::SetBreakPoint(Handle<JSFunction> function,
bool Debug::SetBreakPointForScript(Handle<Script> script,
Handle<Object> break_point_object,
- int* source_position) {
+ int* source_position,
+ BreakPositionAlignment alignment) {
HandleScope scope(isolate_);
PrepareForBreakPoints();
@@ -1200,7 +1250,7 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
// Find the break point and change it.
BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(position);
+ it.FindBreakLocationFromPosition(position, alignment);
it.SetBreakPoint(break_point_object);
*source_position = it.position() + shared->start_position();
@@ -1222,15 +1272,11 @@ void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
// Get information in the break point.
BreakPointInfo* break_point_info = BreakPointInfo::cast(result);
Handle<DebugInfo> debug_info = node->debug_info();
- Handle<SharedFunctionInfo> shared(debug_info->shared());
- int source_position = break_point_info->statement_position()->value();
-
- // Source positions starts with zero.
- ASSERT(source_position >= 0);
// Find the break point and clear it.
BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(source_position);
+ it.FindBreakLocationFromAddress(debug_info->code()->entry() +
+ break_point_info->code_position()->value());
it.ClearBreakPoint(break_point_object);
// If there are no more break points left remove the debug info for this
@@ -1283,7 +1329,8 @@ void Debug::FloodWithOneShot(Handle<JSFunction> function) {
void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
Handle<FixedArray> new_bindings(function->function_bindings());
- Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex));
+ Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex),
+ isolate_);
if (!bindee.is_null() && bindee->IsJSFunction() &&
!JSFunction::cast(*bindee)->IsBuiltin()) {
@@ -1304,8 +1351,7 @@ void Debug::FloodHandlerWithOneShot() {
JavaScriptFrame* frame = it.frame();
if (frame->HasHandler()) {
// Flood the function with the catch block with break points
- JSFunction* function = JSFunction::cast(frame->function());
- FloodWithOneShot(Handle<JSFunction>(function));
+ FloodWithOneShot(Handle<JSFunction>(frame->function()));
return;
}
}
@@ -1330,7 +1376,9 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
}
-void Debug::PrepareStep(StepAction step_action, int step_count) {
+void Debug::PrepareStep(StepAction step_action,
+ int step_count,
+ StackFrame::Id frame_id) {
HandleScope scope(isolate_);
PrepareForBreakPoints();
@@ -1356,6 +1404,9 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// If there is no JavaScript stack don't do anything.
return;
}
+ if (frame_id != StackFrame::NO_ID) {
+ id = frame_id;
+ }
JavaScriptFrameIterator frames_it(isolate_, id);
JavaScriptFrame* frame = frames_it.frame();
@@ -1371,13 +1422,13 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// breakpoints.
frames_it.Advance();
// Fill the function to return to with one-shot break points.
- JSFunction* function = JSFunction::cast(frames_it.frame()->function());
+ JSFunction* function = frames_it.frame()->function();
FloodWithOneShot(Handle<JSFunction>(function));
return;
}
// Get the debug info (create it if it does not exist).
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<JSFunction> function(frame->function());
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
// Return if ensuring debug info failed.
@@ -1387,7 +1438,9 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// Find the break location where execution has stopped.
BreakLocationIterator it(debug_info, ALL_BREAK_LOCATIONS);
- it.FindBreakLocationFromAddress(frame->pc());
+ // pc points to the instruction after the current one, possibly a break
+ // location as well. So the "- 1" to exclude it from the search.
+ it.FindBreakLocationFromAddress(frame->pc() - 1);
// Compute whether or not the target is a call target.
bool is_load_or_store = false;
@@ -1440,15 +1493,14 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
frames_it.Advance();
}
// Skip builtin functions on the stack.
- while (!frames_it.done() &&
- JSFunction::cast(frames_it.frame()->function())->IsBuiltin()) {
+ while (!frames_it.done() && frames_it.frame()->function()->IsBuiltin()) {
frames_it.Advance();
}
// Step out: If there is a JavaScript caller frame, we need to
// flood it with breakpoints.
if (!frames_it.done()) {
// Fill the function to return to with one-shot break points.
- JSFunction* function = JSFunction::cast(frames_it.frame()->function());
+ JSFunction* function = frames_it.frame()->function();
FloodWithOneShot(Handle<JSFunction>(function));
// Set target frame pointer.
ActivateStepOut(frames_it.frame());
@@ -1481,7 +1533,8 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// from the code object.
Handle<Object> obj(
isolate_->heap()->code_stubs()->SlowReverseLookup(
- *call_function_stub));
+ *call_function_stub),
+ isolate_);
ASSERT(!obj.is_null());
ASSERT(!(*obj)->IsUndefined());
ASSERT(obj->IsSmi());
@@ -1535,7 +1588,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
}
// Step in or Step in min
- it.PrepareStepIn();
+ it.PrepareStepIn(isolate_);
ActivateStepIn(frame);
}
}
@@ -1579,7 +1632,7 @@ bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
// object.
bool Debug::IsDebugBreak(Address addr) {
Code* code = Code::GetCodeFromTargetAddress(addr);
- return code->ic_state() == DEBUG_BREAK;
+ return code->is_debug_stub() && code->extra_ic_state() == DEBUG_BREAK;
}
@@ -1601,7 +1654,7 @@ bool Debug::IsBreakStub(Code* code) {
// Find the builtin to use for invoking the debug break
Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = code->GetIsolate();
// Find the builtin debug break function matching the calling convention
// used by the call site.
@@ -1624,6 +1677,9 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
case Code::KEYED_STORE_IC:
return isolate->builtins()->KeyedStoreIC_DebugBreak();
+ case Code::COMPARE_NIL_IC:
+ return isolate->builtins()->CompareNilIC_DebugBreak();
+
default:
UNREACHABLE();
}
@@ -1651,13 +1707,16 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
// Simple function for returning the source positions for active break points.
Handle<Object> Debug::GetSourceBreakLocations(
- Handle<SharedFunctionInfo> shared) {
- Isolate* isolate = Isolate::Current();
+ Handle<SharedFunctionInfo> shared,
+ BreakPositionAlignment position_alignment) {
+ Isolate* isolate = shared->GetIsolate();
Heap* heap = isolate->heap();
- if (!HasDebugInfo(shared)) return Handle<Object>(heap->undefined_value());
+ if (!HasDebugInfo(shared)) {
+ return Handle<Object>(heap->undefined_value(), isolate);
+ }
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
if (debug_info->GetBreakPointCount() == 0) {
- return Handle<Object>(heap->undefined_value());
+ return Handle<Object>(heap->undefined_value(), isolate);
}
Handle<FixedArray> locations =
isolate->factory()->NewFixedArray(debug_info->GetBreakPointCount());
@@ -1667,7 +1726,20 @@ Handle<Object> Debug::GetSourceBreakLocations(
BreakPointInfo* break_point_info =
BreakPointInfo::cast(debug_info->break_points()->get(i));
if (break_point_info->GetBreakPointCount() > 0) {
- locations->set(count++, break_point_info->statement_position());
+ Smi* position;
+ switch (position_alignment) {
+ case STATEMENT_ALIGNED:
+ position = break_point_info->statement_position();
+ break;
+ case BREAK_POSITION_ALIGNED:
+ position = break_point_info->source_position();
+ break;
+ default:
+ UNREACHABLE();
+ position = break_point_info->statement_position();
+ }
+
+ locations->set(count++, position);
}
}
}
@@ -1692,9 +1764,10 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
Handle<Object> holder,
Address fp,
bool is_constructor) {
+ Isolate* isolate = function->GetIsolate();
// If the frame pointer is not supplied by the caller find it.
if (fp == 0) {
- StackFrameIterator it;
+ StackFrameIterator it(isolate);
it.Advance();
// For constructor functions skip another frame.
if (is_constructor) {
@@ -1713,17 +1786,21 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
} else if (!function->IsBuiltin()) {
// Don't allow step into functions in the native context.
if (function->shared()->code() ==
- Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply) ||
+ isolate->builtins()->builtin(Builtins::kFunctionApply) ||
function->shared()->code() ==
- Isolate::Current()->builtins()->builtin(Builtins::kFunctionCall)) {
+ isolate->builtins()->builtin(Builtins::kFunctionCall)) {
// Handle function.apply and function.call separately to flood the
// function to be called and not the code for Builtins::FunctionApply or
// Builtins::FunctionCall. The receiver of call/apply is the target
// function.
- if (!holder.is_null() && holder->IsJSFunction() &&
- !JSFunction::cast(*holder)->IsBuiltin()) {
+ if (!holder.is_null() && holder->IsJSFunction()) {
Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
- Debug::FloodWithOneShot(js_function);
+ if (!js_function->IsBuiltin()) {
+ Debug::FloodWithOneShot(js_function);
+ } else if (js_function->shared()->bound()) {
+ // Handle Function.prototype.bind
+ Debug::FloodBoundFunctionWithOneShot(js_function);
+ }
}
} else {
Debug::FloodWithOneShot(function);
@@ -1744,6 +1821,7 @@ void Debug::ClearStepping() {
thread_local_.step_count_ = 0;
}
+
// Clears all the one-shot break points that are currently set. Normally this
// function is called each time a break point is hit as one shot break points
// are used to support stepping.
@@ -1814,7 +1892,7 @@ static bool CompileFullCodeForDebugging(Handle<JSFunction> function,
// Use compile lazy which will end up compiling the full code in the
// configuration configured above.
bool result = Compiler::CompileLazy(&info);
- ASSERT(result != Isolate::Current()->has_pending_exception());
+ ASSERT(result != info.isolate()->has_pending_exception());
info.isolate()->clear_pending_exception();
#if DEBUG
if (result) {
@@ -1840,7 +1918,7 @@ static void CollectActiveFunctionsFromThread(
for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (frame->is_optimized()) {
- List<JSFunction*> functions(Compiler::kMaxInliningLevels + 1);
+ List<JSFunction*> functions(FLAG_max_inlining_levels + 1);
frame->GetFunctions(&functions);
for (int i = 0; i < functions.length(); i++) {
JSFunction* function = functions[i];
@@ -1848,7 +1926,7 @@ static void CollectActiveFunctionsFromThread(
function->shared()->code()->set_gc_metadata(active_code_marker);
}
} else if (frame->function()->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(frame->function());
+ JSFunction* function = frame->function();
ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
active_functions->Add(Handle<JSFunction>(function));
function->shared()->code()->set_gc_metadata(active_code_marker);
@@ -1865,7 +1943,7 @@ static void RedirectActivationsToRecompiledCodeOnThread(
if (frame->is_optimized() || !frame->function()->IsJSFunction()) continue;
- JSFunction* function = JSFunction::cast(frame->function());
+ JSFunction* function = frame->function();
ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
@@ -1978,7 +2056,11 @@ void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
- Deoptimizer::DeoptimizeAll();
+ if (FLAG_concurrent_recompilation) {
+ isolate_->optimizing_compiler_thread()->Flush();
+ }
+
+ Deoptimizer::DeoptimizeAll(isolate_);
Handle<Code> lazy_compile =
Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
@@ -1993,14 +2075,15 @@ void Debug::PrepareForBreakPoints() {
{
// We are going to iterate heap to find all functions without
// debug break slots.
- isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "preparing for breakpoints");
+ Heap* heap = isolate_->heap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "preparing for breakpoints");
// Ensure no GC in this scope as we are going to use gc_metadata
// field in the Code object to mark active functions.
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
- Object* active_code_marker = isolate_->heap()->the_hole_value();
+ Object* active_code_marker = heap->the_hole_value();
CollectActiveFunctionsFromThread(isolate_,
isolate_->thread_local_top(),
@@ -2014,19 +2097,36 @@ void Debug::PrepareForBreakPoints() {
// Scan the heap for all non-optimized functions which have no
// debug break slots and are not active or inlined into an active
// function and mark them for lazy compilation.
- HeapIterator iterator;
+ HeapIterator iterator(heap);
HeapObject* obj = NULL;
while (((obj = iterator.next()) != NULL)) {
if (obj->IsJSFunction()) {
JSFunction* function = JSFunction::cast(obj);
SharedFunctionInfo* shared = function->shared();
- if (shared->allows_lazy_compilation() &&
- shared->script()->IsScript() &&
- function->code()->kind() == Code::FUNCTION &&
- !function->code()->has_debug_break_slots() &&
- shared->code()->gc_metadata() != active_code_marker) {
+
+ if (!shared->allows_lazy_compilation()) continue;
+ if (!shared->script()->IsScript()) continue;
+ if (function->IsBuiltin()) continue;
+ if (shared->code()->gc_metadata() == active_code_marker) continue;
+
+ Code::Kind kind = function->code()->kind();
+ if (kind == Code::FUNCTION &&
+ !function->code()->has_debug_break_slots()) {
function->set_code(*lazy_compile);
function->shared()->set_code(*lazy_compile);
+ } else if (kind == Code::BUILTIN &&
+ (function->IsInRecompileQueue() ||
+ function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForConcurrentRecompilation())) {
+ // Abort in-flight compilation.
+ Code* shared_code = function->shared()->code();
+ if (shared_code->kind() == Code::FUNCTION &&
+ shared_code->has_debug_break_slots()) {
+ function->set_code(shared_code);
+ } else {
+ function->set_code(*lazy_compile);
+ function->shared()->set_code(*lazy_compile);
+ }
}
}
}
@@ -2109,11 +2209,12 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
int target_start_position = RelocInfo::kNoPosition;
Handle<JSFunction> target_function;
Handle<SharedFunctionInfo> target;
+ Heap* heap = isolate_->heap();
while (!done) {
{ // Extra scope for iterator and no-allocation.
- isolate_->heap()->EnsureHeapIsIterable();
- AssertNoAllocation no_alloc_during_heap_iteration;
- HeapIterator iterator;
+ heap->EnsureHeapIsIterable();
+ DisallowHeapAllocation no_alloc_during_heap_iteration;
+ HeapIterator iterator(heap);
for (HeapObject* obj = iterator.next();
obj != NULL; obj = iterator.next()) {
bool found_next_candidate = false;
@@ -2173,9 +2274,7 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
} // End for loop.
} // End no-allocation scope.
- if (target.is_null()) {
- return isolate_->heap()->undefined_value();
- }
+ if (target.is_null()) return heap->undefined_value();
// There will be at least one break point when we are done.
has_break_points_ = true;
@@ -2203,6 +2302,8 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
// Ensures the debug information is present for shared.
bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
Handle<JSFunction> function) {
+ Isolate* isolate = shared->GetIsolate();
+
// Return if we already have the debug info for shared.
if (HasDebugInfo(shared)) {
ASSERT(shared->is_compiled());
@@ -2219,7 +2320,7 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
}
// Create the debug info object.
- Handle<DebugInfo> debug_info = FACTORY->NewDebugInfo(shared);
+ Handle<DebugInfo> debug_info = isolate->factory()->NewDebugInfo(shared);
// Add debug info to the list.
DebugInfoListNode* node = new DebugInfoListNode(*debug_info);
@@ -2419,11 +2520,11 @@ void Debug::ClearMirrorCache() {
ASSERT(isolate_->context() == *Debug::debug_context());
// Clear the mirror cache.
- Handle<String> function_name =
- isolate_->factory()->LookupSymbol(CStrVector("ClearMirrorCache"));
+ Handle<String> function_name = isolate_->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("ClearMirrorCache"));
Handle<Object> fun(
- Isolate::Current()->global_object()->GetPropertyNoExceptionThrown(
- *function_name));
+ isolate_->global_object()->GetPropertyNoExceptionThrown(*function_name),
+ isolate_);
ASSERT(fun->IsJSFunction());
bool caught_exception;
Execution::TryCall(Handle<JSFunction>::cast(fun),
@@ -2445,12 +2546,12 @@ void Debug::CreateScriptCache() {
"Debug::CreateScriptCache");
ASSERT(script_cache_ == NULL);
- script_cache_ = new ScriptCache();
+ script_cache_ = new ScriptCache(isolate_);
// Scan heap for Script objects.
int count = 0;
- HeapIterator iterator;
- AssertNoAllocation no_allocation;
+ HeapIterator iterator(heap);
+ DisallowHeapAllocation no_allocation;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
@@ -2520,24 +2621,18 @@ Debugger::Debugger(Isolate* isolate)
message_handler_(NULL),
debugger_unload_pending_(false),
host_dispatch_handler_(NULL),
- dispatch_handler_access_(OS::CreateMutex()),
debug_message_dispatch_handler_(NULL),
message_dispatch_helper_thread_(NULL),
- host_dispatch_micros_(100 * 1000),
+ host_dispatch_period_(TimeDelta::FromMilliseconds(100)),
agent_(NULL),
command_queue_(isolate->logger(), kQueueInitialSize),
- command_received_(OS::CreateSemaphore(0)),
+ command_received_(0),
event_command_queue_(isolate->logger(), kQueueInitialSize),
isolate_(isolate) {
}
-Debugger::~Debugger() {
- delete dispatch_handler_access_;
- dispatch_handler_access_ = 0;
- delete command_received_;
- command_received_ = 0;
-}
+Debugger::~Debugger() {}
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
@@ -2548,10 +2643,10 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
// Create the execution state object.
Handle<String> constructor_str =
- isolate_->factory()->LookupSymbol(constructor_name);
+ isolate_->factory()->InternalizeUtf8String(constructor_name);
Handle<Object> constructor(
- isolate_->global_object()->GetPropertyNoExceptionThrown(
- *constructor_str));
+ isolate_->global_object()->GetPropertyNoExceptionThrown(*constructor_str),
+ isolate_);
ASSERT(constructor->IsJSFunction());
if (!constructor->IsJSFunction()) {
*caught_exception = true;
@@ -2639,7 +2734,7 @@ Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
bool* caught_exception) {
// Create the script collected event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
- Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
+ Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id), isolate_);
Handle<Object> argv[] = { exec_state, id_object };
return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
@@ -2668,7 +2763,7 @@ void Debugger::OnException(Handle<Object> exception, bool uncaught) {
}
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// Clear all current stepping setup.
@@ -2734,7 +2829,7 @@ void Debugger::OnBeforeCompile(Handle<Script> script) {
if (!EventActive(v8::BeforeCompile)) return;
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// Create the event data object.
@@ -2771,18 +2866,21 @@ void Debugger::OnAfterCompile(Handle<Script> script,
bool in_debugger = debug->InDebugger();
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// If debugging there might be script break points registered for this
// script. Make sure that these break points are set.
// Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
- Handle<String> update_script_break_points_symbol =
- isolate_->factory()->LookupAsciiSymbol("UpdateScriptBreakPoints");
+ Handle<String> update_script_break_points_string =
+ isolate_->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("UpdateScriptBreakPoints"));
Handle<Object> update_script_break_points =
- Handle<Object>(debug->debug_context()->global_object()->
- GetPropertyNoExceptionThrown(*update_script_break_points_symbol));
+ Handle<Object>(
+ debug->debug_context()->global_object()->GetPropertyNoExceptionThrown(
+ *update_script_break_points_string),
+ isolate_);
if (!update_script_break_points->IsJSFunction()) {
return;
}
@@ -2796,7 +2894,7 @@ void Debugger::OnAfterCompile(Handle<Script> script,
bool caught_exception;
Handle<Object> argv[] = { wrapper };
Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
- Isolate::Current()->js_builtins_object(),
+ isolate_->js_builtins_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
@@ -2831,7 +2929,7 @@ void Debugger::OnScriptCollected(int id) {
if (!Debugger::EventActive(v8::ScriptCollected)) return;
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// Create the script collected state object.
@@ -2932,7 +3030,7 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
// Invoke the JavaScript debug event listener.
- Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event)),
+ Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event), isolate_),
exec_state,
event_data,
event_listener_data_ };
@@ -2948,7 +3046,7 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
Handle<Context> Debugger::GetDebugContext() {
never_unload_debugger_ = true;
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
return isolate_->debug()->debug_context();
}
@@ -3036,13 +3134,13 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
v8::Local<v8::String> fun_name =
v8::String::New("debugCommandProcessor");
v8::Local<v8::Function> fun =
- v8::Function::Cast(*api_exec_state->Get(fun_name));
+ v8::Local<v8::Function>::Cast(api_exec_state->Get(fun_name));
- v8::Handle<v8::Boolean> running =
- auto_continue ? v8::True() : v8::False();
+ v8::Handle<v8::Boolean> running = v8::Boolean::New(auto_continue);
static const int kArgc = 1;
v8::Handle<Value> argv[kArgc] = { running };
- cmd_processor = v8::Object::Cast(*fun->Call(api_exec_state, kArgc, argv));
+ cmd_processor = v8::Local<v8::Object>::Cast(
+ fun->Call(api_exec_state, kArgc, argv));
if (try_catch.HasCaught()) {
PrintLn(try_catch.Exception());
return;
@@ -3056,14 +3154,14 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
// Wait for new command in the queue.
if (Debugger::host_dispatch_handler_) {
// In case there is a host dispatch - do periodic dispatches.
- if (!command_received_->Wait(host_dispatch_micros_)) {
+ if (!command_received_.WaitFor(host_dispatch_period_)) {
// Timout expired, do the dispatch.
Debugger::host_dispatch_handler_();
continue;
}
} else {
// In case there is no host dispatch - just wait.
- command_received_->Wait();
+ command_received_.Wait();
}
// Get the command from the queue.
@@ -3082,7 +3180,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
v8::Local<v8::Value> request;
v8::TryCatch try_catch;
fun_name = v8::String::New("processDebugRequest");
- fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
+ fun = v8::Local<v8::Function>::Cast(cmd_processor->Get(fun_name));
request = v8::String::New(command.text().start(),
command.text().length());
@@ -3095,7 +3193,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
if (!try_catch.HasCaught()) {
// Get response string.
if (!response_val->IsUndefined()) {
- response = v8::String::Cast(*response_val);
+ response = v8::Local<v8::String>::Cast(response_val);
} else {
response = v8::String::New("");
}
@@ -3108,7 +3206,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
// Get the running state.
fun_name = v8::String::New("isRunning");
- fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
+ fun = v8::Local<v8::Function>::Cast(cmd_processor->Get(fun_name));
static const int kArgc = 1;
v8::Handle<Value> argv[kArgc] = { response };
v8::Local<v8::Value> running_val = fun->Call(cmd_processor, kArgc, argv);
@@ -3176,7 +3274,7 @@ void Debugger::SetEventListener(Handle<Object> callback,
void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
message_handler_ = handler;
ListenersChanged();
@@ -3205,15 +3303,15 @@ void Debugger::ListenersChanged() {
void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
- int period) {
+ TimeDelta period) {
host_dispatch_handler_ = handler;
- host_dispatch_micros_ = period * 1000;
+ host_dispatch_period_ = period;
}
void Debugger::SetDebugMessageDispatchHandler(
v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) {
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
debug_message_dispatch_handler_ = handler;
if (provide_locker && message_dispatch_helper_thread_ == NULL) {
@@ -3226,7 +3324,7 @@ void Debugger::SetDebugMessageDispatchHandler(
// Calls the registered debug message handler. This callback is part of the
// public API.
void Debugger::InvokeMessageHandler(MessageImpl message) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
if (message_handler_ != NULL) {
message_handler_(message);
@@ -3247,7 +3345,7 @@ void Debugger::ProcessCommand(Vector<const uint16_t> command,
client_data);
isolate_->logger()->DebugTag("Put command on command_queue.");
command_queue_.Put(message);
- command_received_->Signal();
+ command_received_.Signal();
// Set the debug command break flag to have the command processed.
if (!isolate_->debug()->InDebugger()) {
@@ -3256,7 +3354,7 @@ void Debugger::ProcessCommand(Vector<const uint16_t> command,
MessageDispatchHelperThread* dispatch_thread;
{
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
dispatch_thread = message_dispatch_helper_thread_;
}
@@ -3285,7 +3383,7 @@ void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
bool Debugger::IsDebuggerActive() {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
return message_handler_ != NULL ||
!event_listener_.is_null() ||
@@ -3300,7 +3398,7 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
Debugger::never_unload_debugger_ = true;
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) {
return isolate_->factory()->undefined_value();
}
@@ -3314,8 +3412,10 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
Handle<Object> argv[] = { exec_state, data };
Handle<Object> result = Execution::Call(
+ isolate_,
fun,
- Handle<Object>(isolate_->debug()->debug_context_->global_proxy()),
+ Handle<Object>(isolate_->debug()->debug_context_->global_proxy(),
+ isolate_),
ARRAY_SIZE(argv),
argv,
pending_exception);
@@ -3330,7 +3430,6 @@ static void StubMessageHandler2(const v8::Debug::Message& message) {
bool Debugger::StartAgent(const char* name, int port,
bool wait_for_connection) {
- ASSERT(Isolate::Current() == isolate_);
if (wait_for_connection) {
// Suspend V8 if it is already running or set V8 to suspend whenever
// it starts.
@@ -3342,20 +3441,15 @@ bool Debugger::StartAgent(const char* name, int port,
v8::Debug::DebugBreak();
}
- if (Socket::SetUp()) {
- if (agent_ == NULL) {
- agent_ = new DebuggerAgent(name, port);
- agent_->Start();
- }
- return true;
+ if (agent_ == NULL) {
+ agent_ = new DebuggerAgent(isolate_, name, port);
+ agent_->Start();
}
-
- return false;
+ return true;
}
void Debugger::StopAgent() {
- ASSERT(Isolate::Current() == isolate_);
if (agent_ != NULL) {
agent_->Shutdown();
agent_->Join();
@@ -3366,7 +3460,6 @@ void Debugger::StopAgent() {
void Debugger::WaitForAgent() {
- ASSERT(Isolate::Current() == isolate_);
if (agent_ != NULL)
agent_->WaitUntilListening();
}
@@ -3375,7 +3468,7 @@ void Debugger::WaitForAgent() {
void Debugger::CallMessageDispatchHandler() {
v8::Debug::DebugMessageDispatchHandler handler;
{
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
handler = Debugger::debug_message_dispatch_handler_;
}
if (handler != NULL) {
@@ -3384,8 +3477,8 @@ void Debugger::CallMessageDispatchHandler() {
}
-EnterDebugger::EnterDebugger()
- : isolate_(Isolate::Current()),
+EnterDebugger::EnterDebugger(Isolate* isolate)
+ : isolate_(isolate),
prev_(isolate_->debug()->debugger_entry()),
it_(isolate_),
has_js_frames_(!it_.done()),
@@ -3420,7 +3513,6 @@ EnterDebugger::EnterDebugger()
EnterDebugger::~EnterDebugger() {
- ASSERT(Isolate::Current() == isolate_);
Debug* debug = isolate_->debug();
// Restore to the previous break state.
@@ -3535,13 +3627,19 @@ v8::Handle<v8::Object> MessageImpl::GetExecutionState() const {
}
+v8::Isolate* MessageImpl::GetIsolate() const {
+ return reinterpret_cast<v8::Isolate*>(exec_state_->GetIsolate());
+}
+
+
v8::Handle<v8::Object> MessageImpl::GetEventData() const {
return v8::Utils::ToLocal(event_data_);
}
v8::Handle<v8::String> MessageImpl::GetJSON() const {
- v8::HandleScope scope;
+ v8::HandleScope scope(
+ reinterpret_cast<v8::Isolate*>(event_data_->GetIsolate()));
if (IsEvent()) {
// Call toJSONProtocol on the debug event object.
@@ -3564,7 +3662,7 @@ v8::Handle<v8::String> MessageImpl::GetJSON() const {
v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = event_data_->GetIsolate();
v8::Handle<v8::Context> context = GetDebugEventContext(isolate);
// Isolate::context() may be NULL when "script collected" event occures.
ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected);
@@ -3605,7 +3703,7 @@ v8::Handle<v8::Object> EventDetailsImpl::GetEventData() const {
v8::Handle<v8::Context> EventDetailsImpl::GetEventContext() const {
- return GetDebugEventContext(Isolate::Current());
+ return GetDebugEventContext(exec_state_->GetIsolate());
}
@@ -3695,24 +3793,17 @@ void CommandMessageQueue::Expand() {
LockingCommandMessageQueue::LockingCommandMessageQueue(Logger* logger, int size)
- : logger_(logger), queue_(size) {
- lock_ = OS::CreateMutex();
-}
-
-
-LockingCommandMessageQueue::~LockingCommandMessageQueue() {
- delete lock_;
-}
+ : logger_(logger), queue_(size) {}
bool LockingCommandMessageQueue::IsEmpty() const {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
return queue_.IsEmpty();
}
CommandMessage LockingCommandMessageQueue::Get() {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
CommandMessage result = queue_.Get();
logger_->DebugEvent("Get", result.text());
return result;
@@ -3720,53 +3811,47 @@ CommandMessage LockingCommandMessageQueue::Get() {
void LockingCommandMessageQueue::Put(const CommandMessage& message) {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
queue_.Put(message);
logger_->DebugEvent("Put", message.text());
}
void LockingCommandMessageQueue::Clear() {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
queue_.Clear();
}
MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate)
: Thread("v8:MsgDispHelpr"),
- sem_(OS::CreateSemaphore(0)), mutex_(OS::CreateMutex()),
+ isolate_(isolate), sem_(0),
already_signalled_(false) {
}
-MessageDispatchHelperThread::~MessageDispatchHelperThread() {
- delete mutex_;
- delete sem_;
-}
-
-
void MessageDispatchHelperThread::Schedule() {
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(&mutex_);
if (already_signalled_) {
return;
}
already_signalled_ = true;
}
- sem_->Signal();
+ sem_.Signal();
}
void MessageDispatchHelperThread::Run() {
while (true) {
- sem_->Wait();
+ sem_.Wait();
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(&mutex_);
already_signalled_ = false;
}
{
- Locker locker;
- Isolate::Current()->debugger()->CallMessageDispatchHandler();
+ Locker locker(reinterpret_cast<v8::Isolate*>(isolate_));
+ isolate_->debugger()->CallMessageDispatchHandler();
}
}
}
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 150e29e30..8e71ea670 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -38,6 +38,7 @@
#include "frames-inl.h"
#include "hashmap.h"
#include "platform.h"
+#include "platform/socket.h"
#include "string-stream.h"
#include "v8threads.h"
@@ -79,6 +80,14 @@ enum BreakLocatorType {
};
+// The different types of breakpoint position alignments.
+// Must match Debug.BreakPositionAlignment in debug-debugger.js
+enum BreakPositionAlignment {
+ STATEMENT_ALIGNED = 0,
+ BREAK_POSITION_ALIGNED = 1
+};
+
+
// Class for iterating through the break points in a function and changing
// them.
class BreakLocationIterator {
@@ -90,14 +99,16 @@ class BreakLocationIterator {
void Next();
void Next(int count);
void FindBreakLocationFromAddress(Address pc);
- void FindBreakLocationFromPosition(int position);
+ void FindBreakLocationFromPosition(int position,
+ BreakPositionAlignment alignment);
void Reset();
bool Done() const;
void SetBreakPoint(Handle<Object> break_point_object);
void ClearBreakPoint(Handle<Object> break_point_object);
void SetOneShot();
void ClearOneShot();
- void PrepareStepIn();
+ bool IsStepInLocation(Isolate* isolate);
+ void PrepareStepIn(Isolate* isolate);
bool IsExit() const;
bool HasBreakPoint();
bool IsDebugBreak();
@@ -164,7 +175,8 @@ class BreakLocationIterator {
// the cache is the script id.
class ScriptCache : private HashMap {
public:
- ScriptCache() : HashMap(ScriptMatch), collected_scripts_(10) {}
+ explicit ScriptCache(Isolate* isolate)
+ : HashMap(ScriptMatch), isolate_(isolate), collected_scripts_(10) {}
virtual ~ScriptCache() { Clear(); }
// Add script to the cache.
@@ -189,8 +201,11 @@ class ScriptCache : private HashMap {
void Clear();
// Weak handle callback for scripts in the cache.
- static void HandleWeakScript(v8::Persistent<v8::Value> obj, void* data);
+ static void HandleWeakScript(v8::Isolate* isolate,
+ v8::Persistent<v8::Value>* obj,
+ void* data);
+ Isolate* isolate_;
// List used during GC to temporarily store id's of collected scripts.
List<int> collected_scripts_;
};
@@ -238,7 +253,8 @@ class Debug {
int* source_position);
bool SetBreakPointForScript(Handle<Script> script,
Handle<Object> break_point_object,
- int* source_position);
+ int* source_position,
+ BreakPositionAlignment alignment);
void ClearBreakPoint(Handle<Object> break_point_object);
void ClearAllBreakPoints();
void FloodWithOneShot(Handle<JSFunction> function);
@@ -246,7 +262,9 @@ class Debug {
void FloodHandlerWithOneShot();
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
- void PrepareStep(StepAction step_action, int step_count);
+ void PrepareStep(StepAction step_action,
+ int step_count,
+ StackFrame::Id frame_id);
void ClearStepping();
void ClearStepOut();
bool IsStepping() { return thread_local_.step_count_ > 0; }
@@ -281,7 +299,8 @@ class Debug {
static Handle<Code> FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode);
static Handle<Object> GetSourceBreakLocations(
- Handle<SharedFunctionInfo> shared);
+ Handle<SharedFunctionInfo> shared,
+ BreakPositionAlignment position_aligment);
// Getter for the debug_context.
inline Handle<Context> debug_context() { return debug_context_; }
@@ -384,7 +403,9 @@ class Debug {
static const int kEstimatedNofBreakPointsInFunction = 16;
// Passed to MakeWeak.
- static void HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data);
+ static void HandleWeakDebugInfo(v8::Isolate* isolate,
+ v8::Persistent<v8::Value>* obj,
+ void* data);
friend class Debugger;
friend Handle<FixedArray> GetDebuggedFunctions(); // In test-debug.cc
@@ -414,6 +435,7 @@ class Debug {
static void GenerateStoreICDebugBreak(MacroAssembler* masm);
static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm);
static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
+ static void GenerateCompareNilICDebugBreak(MacroAssembler* masm);
static void GenerateReturnDebugBreak(MacroAssembler* masm);
static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm);
static void GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm);
@@ -515,7 +537,7 @@ class Debug {
explicit Debug(Isolate* isolate);
~Debug();
- static bool CompileDebuggerScript(int index);
+ static bool CompileDebuggerScript(Isolate* isolate, int index);
void ClearOneShot();
void ActivateStepIn(StackFrame* frame);
void ClearStepIn();
@@ -647,6 +669,7 @@ class MessageImpl: public v8::Debug::Message {
virtual v8::Handle<v8::String> GetJSON() const;
virtual v8::Handle<v8::Context> GetEventContext() const;
virtual v8::Debug::ClientData* GetClientData() const;
+ virtual v8::Isolate* GetIsolate() const;
private:
MessageImpl(bool is_event,
@@ -745,7 +768,6 @@ class MessageDispatchHelperThread;
class LockingCommandMessageQueue BASE_EMBEDDED {
public:
LockingCommandMessageQueue(Logger* logger, int size);
- ~LockingCommandMessageQueue();
bool IsEmpty() const;
CommandMessage Get();
void Put(const CommandMessage& message);
@@ -753,7 +775,7 @@ class LockingCommandMessageQueue BASE_EMBEDDED {
private:
Logger* logger_;
CommandMessageQueue queue_;
- Mutex* lock_;
+ mutable Mutex mutex_;
DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
};
@@ -804,7 +826,7 @@ class Debugger {
void SetEventListener(Handle<Object> callback, Handle<Object> data);
void SetMessageHandler(v8::Debug::MessageHandler2 handler);
void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
- int period);
+ TimeDelta period);
void SetDebugMessageDispatchHandler(
v8::Debug::DebugMessageDispatchHandler handler,
bool provide_locker);
@@ -846,7 +868,7 @@ class Debugger {
friend void ForceUnloadDebugger(); // In test-debug.cc
inline bool EventActive(v8::DebugEvent event) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> lock_guard(debugger_access_);
// Check whether the message handler was been cleared.
if (debugger_unload_pending_) {
@@ -875,7 +897,9 @@ class Debugger {
void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
bool is_loading_debugger() const { return is_loading_debugger_; }
void set_live_edit_enabled(bool v) { live_edit_enabled_ = v; }
- bool live_edit_enabled() const { return live_edit_enabled_; }
+ bool live_edit_enabled() const {
+ return FLAG_enable_liveedit && live_edit_enabled_ ;
+ }
void set_force_debugger_active(bool force_debugger_active) {
force_debugger_active_ = force_debugger_active;
}
@@ -899,7 +923,7 @@ class Debugger {
Handle<Object> event_data);
void ListenersChanged();
- Mutex* debugger_access_; // Mutex guarding debugger variables.
+ RecursiveMutex* debugger_access_; // Mutex guarding debugger variables.
Handle<Object> event_listener_; // Global handle to listener.
Handle<Object> event_listener_data_;
bool compiling_natives_; // Are we compiling natives?
@@ -910,16 +934,16 @@ class Debugger {
v8::Debug::MessageHandler2 message_handler_;
bool debugger_unload_pending_; // Was message handler cleared?
v8::Debug::HostDispatchHandler host_dispatch_handler_;
- Mutex* dispatch_handler_access_; // Mutex guarding dispatch handler.
+ Mutex dispatch_handler_access_; // Mutex guarding dispatch handler.
v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
MessageDispatchHelperThread* message_dispatch_helper_thread_;
- int host_dispatch_micros_;
+ TimeDelta host_dispatch_period_;
DebuggerAgent* agent_;
static const int kQueueInitialSize = 4;
LockingCommandMessageQueue command_queue_;
- Semaphore* command_received_; // Signaled for each command received.
+ Semaphore command_received_; // Signaled for each command received.
LockingCommandMessageQueue event_command_queue_;
Isolate* isolate_;
@@ -937,7 +961,7 @@ class Debugger {
// some reason could not be entered FailedToEnter will return true.
class EnterDebugger BASE_EMBEDDED {
public:
- EnterDebugger();
+ explicit EnterDebugger(Isolate* isolate);
~EnterDebugger();
// Check whether the debugger could be entered.
@@ -964,12 +988,12 @@ class EnterDebugger BASE_EMBEDDED {
// Stack allocated class for disabling break.
class DisableBreak BASE_EMBEDDED {
public:
- explicit DisableBreak(bool disable_break) : isolate_(Isolate::Current()) {
+ explicit DisableBreak(Isolate* isolate, bool disable_break)
+ : isolate_(isolate) {
prev_disable_break_ = isolate_->debug()->disable_break();
isolate_->debug()->set_disable_break(disable_break);
}
~DisableBreak() {
- ASSERT(Isolate::Current() == isolate_);
isolate_->debug()->set_disable_break(prev_disable_break_);
}
@@ -1028,15 +1052,16 @@ class Debug_Address {
class MessageDispatchHelperThread: public Thread {
public:
explicit MessageDispatchHelperThread(Isolate* isolate);
- ~MessageDispatchHelperThread();
+ ~MessageDispatchHelperThread() {}
void Schedule();
private:
void Run();
- Semaphore* const sem_;
- Mutex* const mutex_;
+ Isolate* isolate_;
+ Semaphore sem_;
+ Mutex mutex_;
bool already_signalled_;
DISALLOW_COPY_AND_ASSIGN(MessageDispatchHelperThread);
diff --git a/deps/v8/src/defaults.cc b/deps/v8/src/defaults.cc
new file mode 100644
index 000000000..a03cf69b0
--- /dev/null
+++ b/deps/v8/src/defaults.cc
@@ -0,0 +1,70 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The GYP based build ends up defining USING_V8_SHARED when compiling this
+// file.
+#undef USING_V8_SHARED
+#include "../include/v8-defaults.h"
+
+#include "platform.h"
+#include "globals.h"
+#include "v8.h"
+
+namespace v8 {
+
+
+bool ConfigureResourceConstraintsForCurrentPlatform(
+ ResourceConstraints* constraints) {
+ if (constraints == NULL) {
+ return false;
+ }
+
+ int lump_of_memory = (i::kPointerSize / 4) * i::MB;
+
+ // The young_space_size should be a power of 2 and old_generation_size should
+ // be a multiple of Page::kPageSize.
+#if V8_OS_ANDROID
+ constraints->set_max_young_space_size(8 * lump_of_memory);
+ constraints->set_max_old_space_size(256 * lump_of_memory);
+ constraints->set_max_executable_size(192 * lump_of_memory);
+#else
+ constraints->set_max_young_space_size(16 * lump_of_memory);
+ constraints->set_max_old_space_size(700 * lump_of_memory);
+ constraints->set_max_executable_size(256 * lump_of_memory);
+#endif
+ return true;
+}
+
+
+bool SetDefaultResourceConstraintsForCurrentPlatform() {
+ ResourceConstraints constraints;
+ if (!ConfigureResourceConstraintsForCurrentPlatform(&constraints))
+ return false;
+ return SetResourceConstraints(&constraints);
+}
+
+} // namespace v8
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index ad893b3b4..84e80b9d9 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -40,27 +40,37 @@
namespace v8 {
namespace internal {
-DeoptimizerData::DeoptimizerData() {
- eager_deoptimization_entry_code_ = NULL;
- lazy_deoptimization_entry_code_ = NULL;
- current_ = NULL;
- deoptimizing_code_list_ = NULL;
+static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
+ return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
+ OS::CommitPageSize(),
+#if defined(__native_client__)
+ // The Native Client port of V8 uses an interpreter,
+ // so code pages don't need PROT_EXEC.
+ NOT_EXECUTABLE,
+#else
+ EXECUTABLE,
+#endif
+ NULL);
+}
+
+
+DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
+ : allocator_(allocator),
#ifdef ENABLE_DEBUGGER_SUPPORT
- deoptimized_frame_info_ = NULL;
+ deoptimized_frame_info_(NULL),
#endif
+ current_(NULL) {
+ for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
+ deopt_entry_code_entries_[i] = -1;
+ deopt_entry_code_[i] = AllocateCodeChunk(allocator);
+ }
}
DeoptimizerData::~DeoptimizerData() {
- if (eager_deoptimization_entry_code_ != NULL) {
- Isolate::Current()->memory_allocator()->Free(
- eager_deoptimization_entry_code_);
- eager_deoptimization_entry_code_ = NULL;
- }
- if (lazy_deoptimization_entry_code_ != NULL) {
- Isolate::Current()->memory_allocator()->Free(
- lazy_deoptimization_entry_code_);
- lazy_deoptimization_entry_code_ = NULL;
+ for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
+ allocator_->Free(deopt_entry_code_[i]);
+ deopt_entry_code_[i] = NULL;
}
}
@@ -74,6 +84,22 @@ void DeoptimizerData::Iterate(ObjectVisitor* v) {
#endif
+Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
+ if (function_->IsHeapObject()) {
+ // Search all deoptimizing code in the native context of the function.
+ Context* native_context = function_->context()->native_context();
+ Object* element = native_context->DeoptimizedCodeListHead();
+ while (!element->IsUndefined()) {
+ Code* code = Code::cast(element);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ if (code->contains(addr)) return code;
+ element = code->next_code_link();
+ }
+ }
+ return NULL;
+}
+
+
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(JSFunction* function,
@@ -82,7 +108,6 @@ Deoptimizer* Deoptimizer::New(JSFunction* function,
Address from,
int fp_to_sp_delta,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
Deoptimizer* deoptimizer = new Deoptimizer(isolate,
function,
type,
@@ -96,8 +121,21 @@ Deoptimizer* Deoptimizer::New(JSFunction* function,
}
+// No larger than 2K on all platforms
+static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
+
+
+size_t Deoptimizer::GetMaxDeoptTableSize() {
+ int entries_size =
+ Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
+ int commit_page_size = static_cast<int>(OS::CommitPageSize());
+ int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
+ commit_page_size) + 1;
+ return static_cast<size_t>(commit_page_size * page_count);
+}
+
+
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
Deoptimizer* result = isolate->deoptimizer_data()->current_;
ASSERT(result != NULL);
result->DeleteFrameDescriptions();
@@ -127,12 +165,11 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
JavaScriptFrame* frame,
int jsframe_index,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
ASSERT(frame->is_optimized());
ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
// Get the function and code from the frame.
- JSFunction* function = JSFunction::cast(frame->function());
+ JSFunction* function = frame->function();
Code* code = frame->LookupCode();
// Locate the deoptimization point in the code. As we are at a call the
@@ -213,7 +250,6 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info);
delete info;
isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL;
@@ -228,121 +264,219 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
-class DeoptimizingVisitor : public OptimizedFunctionVisitor {
- public:
- virtual void EnterContext(Context* context) {
- if (FLAG_trace_deopt) {
- PrintF("[deoptimize context: %" V8PRIxPTR "]\n",
- reinterpret_cast<intptr_t>(context));
- }
- }
+void Deoptimizer::VisitAllOptimizedFunctionsForContext(
+ Context* context, OptimizedFunctionVisitor* visitor) {
+ DisallowHeapAllocation no_allocation;
- virtual void VisitFunction(JSFunction* function) {
- Deoptimizer::DeoptimizeFunction(function);
- }
+ ASSERT(context->IsNativeContext());
+
+ visitor->EnterContext(context);
- virtual void LeaveContext(Context* context) {
- context->ClearOptimizedFunctions();
+ // Visit the list of optimized functions, removing elements that
+ // no longer refer to optimized code.
+ JSFunction* prev = NULL;
+ Object* element = context->OptimizedFunctionsListHead();
+ while (!element->IsUndefined()) {
+ JSFunction* function = JSFunction::cast(element);
+ Object* next = function->next_function_link();
+ if (function->code()->kind() != Code::OPTIMIZED_FUNCTION ||
+ (visitor->VisitFunction(function),
+ function->code()->kind() != Code::OPTIMIZED_FUNCTION)) {
+ // The function no longer refers to optimized code, or the visitor
+ // changed the code to which it refers to no longer be optimized code.
+ // Remove the function from this list.
+ if (prev != NULL) {
+ prev->set_next_function_link(next);
+ } else {
+ context->SetOptimizedFunctionsListHead(next);
+ }
+ // The visitor should not alter the link directly.
+ ASSERT(function->next_function_link() == next);
+ // Set the next function link to undefined to indicate it is no longer
+ // in the optimized functions list.
+ function->set_next_function_link(context->GetHeap()->undefined_value());
+ } else {
+ // The visitor should not alter the link directly.
+ ASSERT(function->next_function_link() == next);
+ // preserve this element.
+ prev = function;
+ }
+ element = next;
}
-};
+ visitor->LeaveContext(context);
+}
-void Deoptimizer::DeoptimizeAll() {
- AssertNoAllocation no_allocation;
- if (FLAG_trace_deopt) {
- PrintF("[deoptimize all contexts]\n");
- }
+void Deoptimizer::VisitAllOptimizedFunctions(
+ Isolate* isolate,
+ OptimizedFunctionVisitor* visitor) {
+ DisallowHeapAllocation no_allocation;
- DeoptimizingVisitor visitor;
- VisitAllOptimizedFunctions(&visitor);
+ // Run through the list of all native contexts.
+ Object* context = isolate->heap()->native_contexts_list();
+ while (!context->IsUndefined()) {
+ VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
+ context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+ }
}
-void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
- AssertNoAllocation no_allocation;
+// Unlink functions referring to code marked for deoptimization, then move
+// marked code from the optimized code list to the deoptimized code list,
+// and patch code for lazy deopt.
+void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
+ DisallowHeapAllocation no_allocation;
- DeoptimizingVisitor visitor;
- VisitAllOptimizedFunctionsForGlobalObject(object, &visitor);
-}
+ // A "closure" that unlinks optimized code that is going to be
+ // deoptimized from the functions that refer to it.
+ class SelectedCodeUnlinker: public OptimizedFunctionVisitor {
+ public:
+ virtual void EnterContext(Context* context) { } // Don't care.
+ virtual void LeaveContext(Context* context) { } // Don't care.
+ virtual void VisitFunction(JSFunction* function) {
+ Code* code = function->code();
+ if (!code->marked_for_deoptimization()) return;
+ // Unlink this function and evict from optimized code map.
+ SharedFunctionInfo* shared = function->shared();
+ function->set_code(shared->code());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized function");
-void Deoptimizer::VisitAllOptimizedFunctionsForContext(
- Context* context, OptimizedFunctionVisitor* visitor) {
- Isolate* isolate = context->GetIsolate();
- ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
- AssertNoAllocation no_allocation;
+ if (FLAG_trace_deopt) {
+ PrintF("[deoptimizer unlinked: ");
+ function->PrintName();
+ PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+ }
+ }
+ };
- ASSERT(context->IsNativeContext());
+ // Unlink all functions that refer to marked code.
+ SelectedCodeUnlinker unlinker;
+ VisitAllOptimizedFunctionsForContext(context, &unlinker);
- visitor->EnterContext(context);
+ // Move marked code from the optimized code list to the deoptimized
+ // code list, collecting them into a ZoneList.
+ Isolate* isolate = context->GetHeap()->isolate();
+ Zone zone(isolate);
+ ZoneList<Code*> codes(10, &zone);
- // Create a snapshot of the optimized functions list. This is needed because
- // visitors might remove more than one link from the list at once.
- ZoneList<JSFunction*> snapshot(1, isolate->runtime_zone());
- Object* element = context->OptimizedFunctionsListHead();
+ // Walk over all optimized code objects in this native context.
+ Code* prev = NULL;
+ Object* element = context->OptimizedCodeListHead();
while (!element->IsUndefined()) {
- JSFunction* element_function = JSFunction::cast(element);
- snapshot.Add(element_function, isolate->runtime_zone());
- element = element_function->next_function_link();
+ Code* code = Code::cast(element);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ Object* next = code->next_code_link();
+ if (code->marked_for_deoptimization()) {
+ // Put the code into the list for later patching.
+ codes.Add(code, &zone);
+
+ if (prev != NULL) {
+ // Skip this code in the optimized code list.
+ prev->set_next_code_link(next);
+ } else {
+ // There was no previous node, the next node is the new head.
+ context->SetOptimizedCodeListHead(next);
+ }
+
+ // Move the code to the _deoptimized_ code list.
+ code->set_next_code_link(context->DeoptimizedCodeListHead());
+ context->SetDeoptimizedCodeListHead(code);
+ } else {
+ // Not marked; preserve this element.
+ prev = code;
+ }
+ element = next;
}
- // Run through the snapshot of optimized functions and visit them.
- for (int i = 0; i < snapshot.length(); ++i) {
- visitor->VisitFunction(snapshot.at(i));
+ // TODO(titzer): we need a handle scope only because of the macro assembler,
+ // which is only used in EnsureCodeForDeoptimizationEntry.
+ HandleScope scope(isolate);
+ // Now patch all the codes for deoptimization.
+ for (int i = 0; i < codes.length(); i++) {
+ // It is finally time to die, code object.
+ // Do platform-specific patching to force any activations to lazy deopt.
+ PatchCodeForDeoptimization(isolate, codes[i]);
+
+ // We might be in the middle of incremental marking with compaction.
+ // Tell collector to treat this code object in a special way and
+ // ignore all slots that might have been recorded on it.
+ isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
}
+}
- visitor->LeaveContext(context);
+
+void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
+ if (FLAG_trace_deopt) {
+ PrintF("[deoptimize all code in all contexts]\n");
+ }
+ DisallowHeapAllocation no_allocation;
+ // For all contexts, mark all code, then deoptimize.
+ Object* context = isolate->heap()->native_contexts_list();
+ while (!context->IsUndefined()) {
+ Context* native_context = Context::cast(context);
+ MarkAllCodeForContext(native_context);
+ DeoptimizeMarkedCodeForContext(native_context);
+ context = native_context->get(Context::NEXT_CONTEXT_LINK);
+ }
}
-void Deoptimizer::VisitAllOptimizedFunctionsForGlobalObject(
- JSObject* object, OptimizedFunctionVisitor* visitor) {
- AssertNoAllocation no_allocation;
+void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
+ if (FLAG_trace_deopt) {
+ PrintF("[deoptimize marked code in all contexts]\n");
+ }
+ DisallowHeapAllocation no_allocation;
+ // For all contexts, deoptimize code already marked.
+ Object* context = isolate->heap()->native_contexts_list();
+ while (!context->IsUndefined()) {
+ Context* native_context = Context::cast(context);
+ DeoptimizeMarkedCodeForContext(native_context);
+ context = native_context->get(Context::NEXT_CONTEXT_LINK);
+ }
+}
+
+void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
+ if (FLAG_trace_deopt) {
+ PrintF("[deoptimize global object @ 0x%08" V8PRIxPTR "]\n",
+ reinterpret_cast<intptr_t>(object));
+ }
if (object->IsJSGlobalProxy()) {
Object* proto = object->GetPrototype();
ASSERT(proto->IsJSGlobalObject());
- VisitAllOptimizedFunctionsForContext(
- GlobalObject::cast(proto)->native_context(), visitor);
+ Context* native_context = GlobalObject::cast(proto)->native_context();
+ MarkAllCodeForContext(native_context);
+ DeoptimizeMarkedCodeForContext(native_context);
} else if (object->IsGlobalObject()) {
- VisitAllOptimizedFunctionsForContext(
- GlobalObject::cast(object)->native_context(), visitor);
+ Context* native_context = GlobalObject::cast(object)->native_context();
+ MarkAllCodeForContext(native_context);
+ DeoptimizeMarkedCodeForContext(native_context);
}
}
-void Deoptimizer::VisitAllOptimizedFunctions(
- OptimizedFunctionVisitor* visitor) {
- AssertNoAllocation no_allocation;
-
- // Run through the list of all native contexts and deoptimize.
- Object* context = Isolate::Current()->heap()->native_contexts_list();
- while (!context->IsUndefined()) {
- // GC can happen when the context is not fully initialized,
- // so the global field of the context can be undefined.
- Object* global = Context::cast(context)->get(Context::GLOBAL_OBJECT_INDEX);
- if (!global->IsUndefined()) {
- VisitAllOptimizedFunctionsForGlobalObject(JSObject::cast(global),
- visitor);
- }
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+void Deoptimizer::MarkAllCodeForContext(Context* context) {
+ Object* element = context->OptimizedCodeListHead();
+ while (!element->IsUndefined()) {
+ Code* code = Code::cast(element);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ code->set_marked_for_deoptimization(true);
+ element = code->next_code_link();
}
}
-void Deoptimizer::HandleWeakDeoptimizedCode(
- v8::Persistent<v8::Value> obj, void* data) {
- DeoptimizingCodeListNode* node =
- reinterpret_cast<DeoptimizingCodeListNode*>(data);
- RemoveDeoptimizingCode(*node->code());
-#ifdef DEBUG
- node = Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
- while (node != NULL) {
- ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data));
- node = node->next();
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ Code* code = function->code();
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ // Mark the code for deoptimization and unlink any functions that also
+ // refer to that code. The code cannot be shared across native contexts,
+ // so we only need to search one.
+ code->set_marked_for_deoptimization(true);
+ DeoptimizeMarkedCodeForContext(function->context()->native_context());
}
-#endif
}
@@ -351,6 +485,34 @@ void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
}
+bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
+ StackFrame::Type frame_type) {
+ switch (deopt_type) {
+ case EAGER:
+ case SOFT:
+ case LAZY:
+ case DEBUGGER:
+ return (frame_type == StackFrame::STUB)
+ ? FLAG_trace_stub_failures
+ : FLAG_trace_deopt;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+const char* Deoptimizer::MessageFor(BailoutType type) {
+ switch (type) {
+ case EAGER: return "eager";
+ case SOFT: return "soft";
+ case LAZY: return "lazy";
+ case DEBUGGER: return "debugger";
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
Deoptimizer::Deoptimizer(Isolate* isolate,
JSFunction* function,
BailoutType type,
@@ -369,76 +531,89 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
output_count_(0),
jsframe_count_(0),
output_(NULL),
- deferred_arguments_objects_values_(0),
- deferred_arguments_objects_(0),
- deferred_heap_numbers_(0) {
- if (FLAG_trace_deopt && type != OSR) {
- if (type == DEBUGGER) {
- PrintF("**** DEOPT FOR DEBUGGER: ");
- } else {
- PrintF("**** DEOPT: ");
- }
- function->PrintName();
- PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
- bailout_id,
- reinterpret_cast<intptr_t>(from),
- fp_to_sp_delta - (2 * kPointerSize));
- } else if (FLAG_trace_osr && type == OSR) {
- PrintF("**** OSR: ");
- function->PrintName();
- PrintF(" at ast id #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
- bailout_id,
- reinterpret_cast<intptr_t>(from),
- fp_to_sp_delta - (2 * kPointerSize));
- }
- function->shared()->increment_deopt_count();
- // Find the optimized code.
- if (type == EAGER) {
- ASSERT(from == NULL);
- optimized_code_ = function_->code();
- if (FLAG_trace_deopt && FLAG_code_comments) {
- // Print instruction associated with this bailout.
- const char* last_comment = NULL;
- int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
- | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
- for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->rmode() == RelocInfo::COMMENT) {
- last_comment = reinterpret_cast<const char*>(info->data());
- }
- if (info->rmode() == RelocInfo::RUNTIME_ENTRY) {
- unsigned id = Deoptimizer::GetDeoptimizationId(
- info->target_address(), Deoptimizer::EAGER);
- if (id == bailout_id && last_comment != NULL) {
- PrintF(" %s\n", last_comment);
- break;
- }
- }
- }
+ deferred_objects_tagged_values_(0),
+ deferred_objects_double_values_(0),
+ deferred_objects_(0),
+ deferred_heap_numbers_(0),
+ jsframe_functions_(0),
+ jsframe_has_adapted_arguments_(0),
+ materialized_values_(NULL),
+ materialized_objects_(NULL),
+ materialization_value_index_(0),
+ materialization_object_index_(0),
+ trace_(false) {
+ // For COMPILED_STUBs called from builtins, the function pointer is a SMI
+ // indicating an internal frame.
+ if (function->IsSmi()) {
+ function = NULL;
+ }
+ ASSERT(from != NULL);
+ if (function != NULL && function->IsOptimized()) {
+ function->shared()->increment_deopt_count();
+ if (bailout_type_ == Deoptimizer::SOFT) {
+ isolate->counters()->soft_deopts_executed()->Increment();
+ // Soft deopts shouldn't count against the overall re-optimization count
+ // that can eventually lead to disabling optimization for a function.
+ int opt_count = function->shared()->opt_count();
+ if (opt_count > 0) opt_count--;
+ function->shared()->set_opt_count(opt_count);
}
- } else if (type == LAZY) {
- optimized_code_ = FindDeoptimizingCodeFromAddress(from);
- ASSERT(optimized_code_ != NULL);
- } else if (type == OSR) {
- // The function has already been optimized and we're transitioning
- // from the unoptimized shared version to the optimized one in the
- // function. The return address (from) points to unoptimized code.
- optimized_code_ = function_->code();
- ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
- ASSERT(!optimized_code_->contains(from));
- } else if (type == DEBUGGER) {
- optimized_code_ = optimized_code;
- ASSERT(optimized_code_->contains(from));
- }
- ASSERT(HEAP->allow_allocation(false));
+ }
+ compiled_code_ = FindOptimizedCode(function, optimized_code);
+
+#if DEBUG
+ ASSERT(compiled_code_ != NULL);
+ if (type == EAGER || type == SOFT || type == LAZY) {
+ ASSERT(compiled_code_->kind() != Code::FUNCTION);
+ }
+#endif
+
+ StackFrame::Type frame_type = function == NULL
+ ? StackFrame::STUB
+ : StackFrame::JAVA_SCRIPT;
+ trace_ = TraceEnabledFor(type, frame_type);
+#ifdef DEBUG
+ CHECK(AllowHeapAllocation::IsAllowed());
+ disallow_heap_allocation_ = new DisallowHeapAllocation();
+#endif // DEBUG
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
- input_->SetFrameType(StackFrame::JAVA_SCRIPT);
+ input_->SetFrameType(frame_type);
+}
+
+
+Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
+ Code* optimized_code) {
+ switch (bailout_type_) {
+ case Deoptimizer::SOFT:
+ case Deoptimizer::EAGER:
+ case Deoptimizer::LAZY: {
+ Code* compiled_code = FindDeoptimizingCode(from_);
+ return (compiled_code == NULL)
+ ? static_cast<Code*>(isolate_->FindCodeObject(from_))
+ : compiled_code;
+ }
+ case Deoptimizer::DEBUGGER:
+ ASSERT(optimized_code->contains(from_));
+ return optimized_code;
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Deoptimizer::PrintFunctionName() {
+ if (function_->IsJSFunction()) {
+ function_->PrintName();
+ } else {
+ PrintF("%s", Code::Kind2String(compiled_code_->kind()));
+ }
}
Deoptimizer::~Deoptimizer() {
ASSERT(input_ == NULL && output_ == NULL);
+ ASSERT(disallow_heap_allocation_ == NULL);
}
@@ -450,48 +625,47 @@ void Deoptimizer::DeleteFrameDescriptions() {
delete[] output_;
input_ = NULL;
output_ = NULL;
- ASSERT(!HEAP->allow_allocation(true));
+#ifdef DEBUG
+ CHECK(!AllowHeapAllocation::IsAllowed());
+ CHECK(disallow_heap_allocation_ != NULL);
+ delete disallow_heap_allocation_;
+ disallow_heap_allocation_ = NULL;
+#endif // DEBUG
}
-Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
+Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
+ int id,
+ BailoutType type,
+ GetEntryMode mode) {
ASSERT(id >= 0);
- if (id >= kNumberOfEntries) return NULL;
- MemoryChunk* base = NULL;
- DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
- if (type == EAGER) {
- if (data->eager_deoptimization_entry_code_ == NULL) {
- data->eager_deoptimization_entry_code_ = CreateCode(type);
- }
- base = data->eager_deoptimization_entry_code_;
+ if (id >= kMaxNumberOfEntries) return NULL;
+ if (mode == ENSURE_ENTRY_CODE) {
+ EnsureCodeForDeoptimizationEntry(isolate, type, id);
} else {
- if (data->lazy_deoptimization_entry_code_ == NULL) {
- data->lazy_deoptimization_entry_code_ = CreateCode(type);
- }
- base = data->lazy_deoptimization_entry_code_;
+ ASSERT(mode == CALCULATE_ENTRY_ADDRESS);
}
- return
- static_cast<Address>(base->area_start()) + (id * table_entry_size_);
+ DeoptimizerData* data = isolate->deoptimizer_data();
+ ASSERT(type < kBailoutTypesWithCodeEntry);
+ MemoryChunk* base = data->deopt_entry_code_[type];
+ return base->area_start() + (id * table_entry_size_);
}
-int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
- MemoryChunk* base = NULL;
- DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
- if (type == EAGER) {
- base = data->eager_deoptimization_entry_code_;
- } else {
- base = data->lazy_deoptimization_entry_code_;
- }
+int Deoptimizer::GetDeoptimizationId(Isolate* isolate,
+ Address addr,
+ BailoutType type) {
+ DeoptimizerData* data = isolate->deoptimizer_data();
+ MemoryChunk* base = data->deopt_entry_code_[type];
+ Address start = base->area_start();
if (base == NULL ||
- addr < base->area_start() ||
- addr >= base->area_start() +
- (kNumberOfEntries * table_entry_size_)) {
+ addr < start ||
+ addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
- static_cast<int>(addr - base->area_start()) % table_entry_size_);
- return static_cast<int>(addr - base->area_start()) / table_entry_size_;
+ static_cast<int>(addr - start) % table_entry_size_);
+ return static_cast<int>(addr - start) / table_entry_size_;
}
@@ -515,18 +689,25 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
shared->SourceCodePrint(&stream, -1);
PrintF("[source:\n%s\n]", *stream.ToCString());
- UNREACHABLE();
+ FATAL("unable to find pc offset during deoptimization");
return -1;
}
int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
int length = 0;
- DeoptimizingCodeListNode* node =
- isolate->deoptimizer_data()->deoptimizing_code_list_;
- while (node != NULL) {
- length++;
- node = node->next();
+ // Count all entries in the deoptimizing code list of every context.
+ Object* context = isolate->heap()->native_contexts_list();
+ while (!context->IsUndefined()) {
+ Context* native_context = Context::cast(context);
+ Object* element = native_context->DeoptimizedCodeListHead();
+ while (!element->IsUndefined()) {
+ Code* code = Code::cast(element);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ length++;
+ element = code->next_code_link();
+ }
+ context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
}
return length;
}
@@ -535,25 +716,28 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
- if (bailout_type_ == OSR) {
- DoComputeOsrOutputFrame();
- return;
- }
-
// Print some helpful diagnostic information.
- int64_t start = OS::Ticks();
- if (FLAG_trace_deopt) {
- PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ",
- (bailout_type_ == LAZY ? " (lazy)" : ""),
+ if (FLAG_log_timer_events &&
+ compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
+ LOG(isolate(), CodeDeoptEvent(compiled_code_));
+ }
+ ElapsedTimer timer;
+ if (trace_) {
+ timer.Start();
+ PrintF("[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ",
+ MessageFor(bailout_type_),
reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" @%d]\n", bailout_id_);
+ PrintFunctionName();
+ PrintF(" @%d, FP to SP delta: %d]\n", bailout_id_, fp_to_sp_delta_);
+ if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
+ compiled_code_->PrintDeoptLocation(bailout_id_);
+ }
}
// Determine basic deoptimization information. The optimized frame is
// described by the input data.
DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
+ DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
@@ -598,6 +782,9 @@ void Deoptimizer::DoComputeOutputFrames() {
case Translation::SETTER_STUB_FRAME:
DoComputeAccessorStubFrame(&iterator, i, true);
break;
+ case Translation::COMPILED_STUB_FRAME:
+ DoComputeCompiledStubFrame(&iterator, i);
+ break;
case Translation::BEGIN:
case Translation::REGISTER:
case Translation::INT32_REGISTER:
@@ -609,22 +796,24 @@ void Deoptimizer::DoComputeOutputFrames() {
case Translation::DOUBLE_STACK_SLOT:
case Translation::LITERAL:
case Translation::ARGUMENTS_OBJECT:
- case Translation::DUPLICATE:
+ default:
UNREACHABLE();
break;
}
}
// Print some helpful diagnostic information.
- if (FLAG_trace_deopt) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ if (trace_) {
+ double ms = timer.Elapsed().InMillisecondsF();
int index = output_count_ - 1; // Index of the topmost frame.
JSFunction* function = output_[index]->GetFunction();
- PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
+ PrintF("[deoptimizing (%s): end 0x%08" V8PRIxPTR " ",
+ MessageFor(bailout_type_),
reinterpret_cast<intptr_t>(function));
- function->PrintName();
- PrintF(" => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
+ PrintFunctionName();
+ PrintF(" @%d => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
" took %0.3f ms]\n",
+ bailout_id_,
node_id.ToInt(),
output_[index]->GetPc(),
FullCodeGenerator::State2String(
@@ -636,78 +825,941 @@ void Deoptimizer::DoComputeOutputFrames() {
}
+void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
+ int frame_index) {
+ BailoutId node_id = BailoutId(iterator->Next());
+ JSFunction* function;
+ if (frame_index != 0) {
+ function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ } else {
+ int closure_id = iterator->Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+ function = function_;
+ }
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (trace_) {
+ PrintF(" translating ");
+ function->PrintName();
+ PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
+ }
+
+ // The 'fixed' part of the frame consists of the incoming parameters and
+ // the part described by JavaScriptFrameConstants.
+ unsigned fixed_frame_size = ComputeFixedSize(function);
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
+
+ bool is_bottommost = (0 == frame_index);
+ bool is_topmost = (output_count_ - 1 == frame_index);
+ ASSERT(frame_index >= 0 && frame_index < output_count_);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address for the bottommost output frame can be computed from
+ // the input frame pointer and the output frame's height. For all
+ // subsequent output frames, it can be computed from the previous one's
+ // top address and the current frame's size.
+ Register fp_reg = JavaScriptFrame::fp_register();
+ intptr_t top_address;
+ if (is_bottommost) {
+ // Determine whether the input frame contains alignment padding.
+ has_alignment_padding_ = HasAlignmentPadding(function) ? 1 : 0;
+ // 2 = context and function in the frame.
+ // If the optimized frame had alignment padding, adjust the frame pointer
+ // to point to the new position of the old frame pointer after padding
+ // is removed. Subtract 2 * kPointerSize for the context and function slots.
+ top_address = input_->GetRegister(fp_reg.code()) - (2 * kPointerSize) -
+ height_in_bytes + has_alignment_padding_ * kPointerSize;
+ } else {
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ }
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = function->shared()->formal_parameter_count() + 1;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+
+ // There are no translation commands for the caller's pc and fp, the
+ // context, and the function. Synthesize their values and set them up
+ // explicitly.
+ //
+ // The caller's pc for the bottommost output frame is the same as in the
+ // input frame. For all subsequent output frames, it can be read from the
+ // previous one. This frame's pc can be computed from the non-optimized
+ // function code and AST id of the bailout.
+ output_offset -= kPCOnStackSize;
+ input_offset -= kPCOnStackSize;
+ intptr_t value;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetPc();
+ }
+ output_frame->SetCallerPc(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's pc\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The caller's frame pointer for the bottommost output frame is the same
+ // as in the input frame. For all subsequent output frames, it can be
+ // read from the previous one. Also compute and set this frame's frame
+ // pointer.
+ output_offset -= kFPOnStackSize;
+ input_offset -= kFPOnStackSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetFp();
+ }
+ output_frame->SetCallerFp(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ ASSERT(!is_bottommost || (input_->GetRegister(fp_reg.code()) +
+ has_alignment_padding_ * kPointerSize) == fp_value);
+ output_frame->SetFp(fp_value);
+ if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+ ASSERT(!is_bottommost || !has_alignment_padding_ ||
+ (fp_value & kPointerSize) != 0);
+
+ // For the bottommost output frame the context can be gotten from the input
+ // frame. For all subsequent output frames it can be gotten from the function
+ // so long as we don't inline functions that need local contexts.
+ Register context_reg = JavaScriptFrame::context_register();
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = reinterpret_cast<intptr_t>(function->context());
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetContext(value);
+ if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR "; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The function was mentioned explicitly in the BEGIN_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ // The function for the bottommost output frame should also agree with the
+ // input frame.
+ ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR "; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Translate the rest of the frame.
+ for (unsigned i = 0; i < height; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ ASSERT(0 == output_offset);
+
+ // Compute this frame's PC, state, and continuation.
+ Code* non_optimized_code = function->shared()->code();
+ FixedArray* raw_data = non_optimized_code->deoptimization_data();
+ DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+ Address start = non_optimized_code->instruction_start();
+ unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+ unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+ intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
+ output_frame->SetPc(pc_value);
+
+ FullCodeGenerator::State state =
+ FullCodeGenerator::StateField::decode(pc_and_state);
+ output_frame->SetState(Smi::FromInt(state));
+
+ // Set the continuation for the topmost frame.
+ if (is_topmost && bailout_type_ != DEBUGGER) {
+ Builtins* builtins = isolate_->builtins();
+ Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
+ if (bailout_type_ == LAZY) {
+ continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
+ } else if (bailout_type_ == SOFT) {
+ continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
+ } else {
+ ASSERT(bailout_type_ == EAGER);
+ }
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(continuation->entry()));
+ }
+}
+
+
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+ int frame_index) {
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (trace_) {
+ PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
+ }
+
+ unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
+
+ // Arguments adaptor can not be topmost or bottommost.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous
+ // frame's top and this frame's size.
+ intptr_t top_address;
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = height;
+ unsigned output_offset = output_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPCOnStackSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetCallerPc(output_offset, callers_pc);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kFPOnStackSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetCallerFp(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // A marker value is used in place of the context.
+ output_offset -= kPointerSize;
+ intptr_t context = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ output_frame->SetFrameSlot(output_offset, context);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; context (adaptor sentinel)\n",
+ top_address + output_offset, output_offset, context);
+ }
+
+ // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Number of incoming arguments.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; argc (%d)\n",
+ top_address + output_offset, output_offset, value, height - 1);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Builtins* builtins = isolate_->builtins();
+ Code* adaptor_trampoline =
+ builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
+ intptr_t pc_value = reinterpret_cast<intptr_t>(
+ adaptor_trampoline->instruction_start() +
+ isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
+ output_frame->SetPc(pc_value);
+}
+
+
+void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ Builtins* builtins = isolate_->builtins();
+ Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (trace_) {
+ PrintF(" translating construct stub => height=%d\n", height_in_bytes);
+ }
+
+ unsigned fixed_frame_size = ConstructFrameConstants::kFrameSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::CONSTRUCT);
+
+ // Construct stub can not be topmost or bottommost.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous
+ // frame's top and this frame's size.
+ intptr_t top_address;
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = height;
+ unsigned output_offset = output_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ int deferred_object_index = deferred_objects_.length();
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ // The allocated receiver of a construct stub frame is passed as the
+ // receiver parameter through the translation. It might be encoding
+ // a captured object, patch the slot address for a captured object.
+ if (i == 0 && deferred_objects_.length() > deferred_object_index) {
+ ASSERT(!deferred_objects_[deferred_object_index].is_arguments());
+ deferred_objects_[deferred_object_index].patch_slot_address(top_address);
+ }
+ }
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPCOnStackSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetCallerPc(output_offset, callers_pc);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kFPOnStackSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetCallerFp(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; function (construct sentinel)\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The output frame reflects a JSConstructStubGeneric frame.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(construct_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Number of incoming arguments.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; argc (%d)\n",
+ top_address + output_offset, output_offset, value, height - 1);
+ }
+
+ // Constructor function being invoked by the stub (only present on some
+ // architectures, indicated by kConstructorOffset).
+ if (ConstructFrameConstants::kConstructorOffset != kMinInt) {
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; constructor function\n",
+ top_address + output_offset, output_offset, value);
+ }
+ }
+
+ // The newly allocated object was passed as receiver in the artificial
+ // constructor stub environment created by HEnvironment::CopyForInlining().
+ output_offset -= kPointerSize;
+ value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; allocated receiver\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ ASSERT(0 == output_offset);
+
+ intptr_t pc = reinterpret_cast<intptr_t>(
+ construct_stub->instruction_start() +
+ isolate_->heap()->construct_stub_deopt_pc_offset()->value());
+ output_frame->SetPc(pc);
+}
+
+
+void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
+ int frame_index,
+ bool is_setter_stub_frame) {
+ JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ // The receiver (and the implicit return value, if any) are expected in
+ // registers by the LoadIC/StoreIC, so they don't belong to the output stack
+ // frame. This means that we have to use a height of 0.
+ unsigned height = 0;
+ unsigned height_in_bytes = height * kPointerSize;
+ const char* kind = is_setter_stub_frame ? "setter" : "getter";
+ if (trace_) {
+ PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
+ }
+
+ // We need 1 stack entry for the return address + 4 stack entries from
+ // StackFrame::INTERNAL (FP, context, frame type, code object, see
+ // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
+ // entry for the implicit return value, see
+ // StoreStubCompiler::CompileStoreViaSetter.
+ unsigned fixed_frame_entries = (kPCOnStackSize / kPointerSize) +
+ (kFPOnStackSize / kPointerSize) + 3 +
+ (is_setter_stub_frame ? 1 : 0);
+ unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, accessor);
+ output_frame->SetFrameType(StackFrame::INTERNAL);
+
+ // A frame for an accessor stub can not be the topmost or bottommost one.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
+ intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ unsigned output_offset = output_frame_size;
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPCOnStackSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetCallerPc(output_offset, callers_pc);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kFPOnStackSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetCallerFp(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; function (%s sentinel)\n",
+ top_address + output_offset, output_offset, value, kind);
+ }
+
+ // Get Code object from accessor stub.
+ output_offset -= kPointerSize;
+ Builtins::Name name = is_setter_stub_frame ?
+ Builtins::kStoreIC_Setter_ForDeopt :
+ Builtins::kLoadIC_Getter_ForDeopt;
+ Code* accessor_stub = isolate_->builtins()->builtin(name);
+ value = reinterpret_cast<intptr_t>(accessor_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Skip receiver.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+
+ if (is_setter_stub_frame) {
+ // The implicit return value was part of the artificial setter stub
+ // environment.
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Smi* offset = is_setter_stub_frame ?
+ isolate_->heap()->setter_stub_deopt_pc_offset() :
+ isolate_->heap()->getter_stub_deopt_pc_offset();
+ intptr_t pc = reinterpret_cast<intptr_t>(
+ accessor_stub->instruction_start() + offset->value());
+ output_frame->SetPc(pc);
+}
+
+
+void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ //
+ // FROM TO
+ // | .... | | .... |
+ // +-------------------------+ +-------------------------+
+ // | JSFunction continuation | | JSFunction continuation |
+ // +-------------------------+ +-------------------------+
+ // | | saved frame (FP) | | saved frame (FP) |
+ // | +=========================+<-fpreg +=========================+<-fpreg
+ // | | JSFunction context | | JSFunction context |
+ // v +-------------------------+ +-------------------------|
+ // | COMPILED_STUB marker | | STUB_FAILURE marker |
+ // +-------------------------+ +-------------------------+
+ // | | | caller args.arguments_ |
+ // | ... | +-------------------------+
+ // | | | caller args.length_ |
+ // |-------------------------|<-spreg +-------------------------+
+ // | caller args pointer |
+ // +-------------------------+
+ // | caller stack param 1 |
+ // parameters in registers +-------------------------+
+ // and spilled to stack | .... |
+ // +-------------------------+
+ // | caller stack param n |
+ // +-------------------------+<-spreg
+ // reg = number of parameters
+ // reg = failure handler address
+ // reg = saved frame
+ // reg = JSFunction context
+ //
+
+ ASSERT(compiled_code_->is_crankshafted() &&
+ compiled_code_->kind() != Code::OPTIMIZED_FUNCTION);
+ int major_key = compiled_code_->major_key();
+ CodeStubInterfaceDescriptor* descriptor =
+ isolate_->code_stub_interface_descriptor(major_key);
+
+ // The output frame must have room for all pushed register parameters
+ // and the standard stack frame slots. Include space for an argument
+ // object to the callee and optionally the space to pass the argument
+ // object to the stub failure handler.
+ ASSERT(descriptor->register_param_count_ >= 0);
+ int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
+ sizeof(Arguments) + kPointerSize;
+ int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
+ int input_frame_size = input_->GetFrameSize();
+ int output_frame_size = height_in_bytes + fixed_frame_size;
+ if (trace_) {
+ PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
+ CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
+ height_in_bytes);
+ }
+
+ // The stub failure trampoline is a single frame.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, NULL);
+ output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ ASSERT(frame_index == 0);
+ output_[frame_index] = output_frame;
+
+ // The top address for the output frame can be computed from the input
+ // frame pointer and the output frame's height. Subtract space for the
+ // context and function slots.
+ Register fp_reg = StubFailureTrampolineFrame::fp_register();
+ intptr_t top_address = input_->GetRegister(fp_reg.code()) -
+ (2 * kPointerSize) - height_in_bytes;
+ output_frame->SetTop(top_address);
+
+ // Read caller's PC (JSFunction continuation) from the input frame.
+ unsigned input_frame_offset = input_frame_size - kPCOnStackSize;
+ unsigned output_frame_offset = output_frame_size - kFPOnStackSize;
+ intptr_t value = input_->GetFrameSlot(input_frame_offset);
+ output_frame->SetCallerPc(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's pc\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
+
+ // Read caller's FP from the input frame, and set this frame's FP.
+ input_frame_offset -= kFPOnStackSize;
+ value = input_->GetFrameSlot(input_frame_offset);
+ output_frame_offset -= kFPOnStackSize;
+ output_frame->SetCallerFp(output_frame_offset, value);
+ intptr_t frame_ptr = input_->GetRegister(fp_reg.code());
+ output_frame->SetRegister(fp_reg.code(), frame_ptr);
+ output_frame->SetFp(frame_ptr);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's fp\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
+
+ // The context can be gotten from the input frame.
+ Register context_reg = StubFailureTrampolineFrame::context_register();
+ input_frame_offset -= kPointerSize;
+ value = input_->GetFrameSlot(input_frame_offset);
+ output_frame->SetRegister(context_reg.code(), value);
+ output_frame_offset -= kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; context\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_frame_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; function (stub failure sentinel)\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
+
+ intptr_t caller_arg_count = 0;
+ bool arg_count_known = !descriptor->stack_parameter_count_.is_valid();
+
+ // Build the Arguments object for the caller's parameters and a pointer to it.
+ output_frame_offset -= kPointerSize;
+ int args_arguments_offset = output_frame_offset;
+ intptr_t the_hole = reinterpret_cast<intptr_t>(
+ isolate_->heap()->the_hole_value());
+ if (arg_count_known) {
+ value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
+ (caller_arg_count - 1) * kPointerSize;
+ } else {
+ value = the_hole;
+ }
+
+ output_frame->SetFrameSlot(args_arguments_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; args.arguments %s\n",
+ top_address + args_arguments_offset, args_arguments_offset, value,
+ arg_count_known ? "" : "(the hole)");
+ }
+
+ output_frame_offset -= kPointerSize;
+ int length_frame_offset = output_frame_offset;
+ value = arg_count_known ? caller_arg_count : the_hole;
+ output_frame->SetFrameSlot(length_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; args.length %s\n",
+ top_address + length_frame_offset, length_frame_offset, value,
+ arg_count_known ? "" : "(the hole)");
+ }
+
+ output_frame_offset -= kPointerSize;
+ value = frame_ptr + StandardFrameConstants::kCallerSPOffset -
+ (output_frame_size - output_frame_offset) + kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; args*\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
+
+ // Copy the register parameters to the failure frame.
+ for (int i = 0; i < descriptor->register_param_count_; ++i) {
+ output_frame_offset -= kPointerSize;
+ DoTranslateCommand(iterator, 0, output_frame_offset);
+ }
+
+ if (!arg_count_known) {
+ DoTranslateCommand(iterator, 0, length_frame_offset,
+ TRANSLATED_VALUE_IS_NATIVE);
+ caller_arg_count = output_frame->GetFrameSlot(length_frame_offset);
+ value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
+ (caller_arg_count - 1) * kPointerSize;
+ output_frame->SetFrameSlot(args_arguments_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; args.arguments\n",
+ top_address + args_arguments_offset, args_arguments_offset, value);
+ }
+ }
+
+ ASSERT(0 == output_frame_offset);
+
+ // Copy the double registers from the input into the output frame.
+ CopyDoubleRegisters(output_frame);
+
+ // Fill registers containing handler and number of parameters.
+ SetPlatformCompiledStubRegisters(output_frame, descriptor);
+
+ // Compute this frame's PC, state, and continuation.
+ Code* trampoline = NULL;
+ StubFunctionMode function_mode = descriptor->function_mode_;
+ StubFailureTrampolineStub(function_mode).FindCodeInCache(&trampoline,
+ isolate_);
+ ASSERT(trampoline != NULL);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(
+ trampoline->instruction_start()));
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ Code* notify_failure =
+ isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(notify_failure->entry()));
+}
+
+
+Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
+ int object_index = materialization_object_index_++;
+ ObjectMaterializationDescriptor desc = deferred_objects_[object_index];
+ const int length = desc.object_length();
+
+ if (desc.duplicate_object() >= 0) {
+ // Found a previously materialized object by de-duplication.
+ object_index = desc.duplicate_object();
+ materialized_objects_->Add(Handle<Object>());
+ } else if (desc.is_arguments() && ArgumentsObjectIsAdapted(object_index)) {
+ // Use the arguments adapter frame we just built to materialize the
+ // arguments object. FunctionGetArguments can't throw an exception.
+ Handle<JSFunction> function = ArgumentsObjectFunction(object_index);
+ Handle<JSObject> arguments = Handle<JSObject>::cast(
+ Accessors::FunctionGetArguments(function));
+ materialized_objects_->Add(arguments);
+ materialization_value_index_ += length;
+ } else if (desc.is_arguments()) {
+ // Construct an arguments object and copy the parameters to a newly
+ // allocated arguments object backing store.
+ Handle<JSFunction> function = ArgumentsObjectFunction(object_index);
+ Handle<JSObject> arguments =
+ isolate_->factory()->NewArgumentsObject(function, length);
+ Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
+ ASSERT(array->length() == length);
+ arguments->set_elements(*array);
+ materialized_objects_->Add(arguments);
+ for (int i = 0; i < length; ++i) {
+ Handle<Object> value = MaterializeNextValue();
+ array->set(i, *value);
+ }
+ } else {
+ // Dispatch on the instance type of the object to be materialized.
+ // We also need to make sure that the representation of all fields
+ // in the given object are general enough to hold a tagged value.
+ Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
+ Handle<Map>::cast(MaterializeNextValue()), Representation::Tagged());
+ switch (map->instance_type()) {
+ case HEAP_NUMBER_TYPE: {
+ Handle<HeapNumber> object = isolate_->factory()->NewHeapNumber(0.0);
+ materialized_objects_->Add(object);
+ Handle<Object> number = MaterializeNextValue();
+ object->set_value(number->Number());
+ materialization_value_index_ += kDoubleSize / kPointerSize - 1;
+ break;
+ }
+ case JS_OBJECT_TYPE: {
+ Handle<JSObject> object =
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED, false);
+ materialized_objects_->Add(object);
+ Handle<Object> properties = MaterializeNextValue();
+ Handle<Object> elements = MaterializeNextValue();
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ for (int i = 0; i < length - 3; ++i) {
+ Handle<Object> value = MaterializeNextValue();
+ object->FastPropertyAtPut(i, *value);
+ }
+ break;
+ }
+ case JS_ARRAY_TYPE: {
+ Handle<JSArray> object =
+ isolate_->factory()->NewJSArray(0, map->elements_kind());
+ materialized_objects_->Add(object);
+ Handle<Object> properties = MaterializeNextValue();
+ Handle<Object> elements = MaterializeNextValue();
+ Handle<Object> length = MaterializeNextValue();
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_length(*length);
+ break;
+ }
+ default:
+ PrintF("[couldn't handle instance type %d]\n", map->instance_type());
+ UNREACHABLE();
+ }
+ }
+
+ return materialized_objects_->at(object_index);
+}
+
+
+Handle<Object> Deoptimizer::MaterializeNextValue() {
+ int value_index = materialization_value_index_++;
+ Handle<Object> value = materialized_values_->at(value_index);
+ if (*value == isolate_->heap()->arguments_marker()) {
+ value = MaterializeNextHeapObject();
+ }
+ return value;
+}
+
+
void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
ASSERT_NE(DEBUGGER, bailout_type_);
- // Handlify all argument object values before triggering any allocation.
- List<Handle<Object> > values(deferred_arguments_objects_values_.length());
- for (int i = 0; i < deferred_arguments_objects_values_.length(); ++i) {
- values.Add(Handle<Object>(deferred_arguments_objects_values_[i]));
+ // Walk all JavaScript output frames with the given frame iterator.
+ for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
+ if (frame_index != 0) it->Advance();
+ JavaScriptFrame* frame = it->frame();
+ jsframe_functions_.Add(handle(frame->function(), isolate_));
+ jsframe_has_adapted_arguments_.Add(frame->has_adapted_arguments());
+ }
+
+ // Handlify all tagged object values before triggering any allocation.
+ List<Handle<Object> > values(deferred_objects_tagged_values_.length());
+ for (int i = 0; i < deferred_objects_tagged_values_.length(); ++i) {
+ values.Add(Handle<Object>(deferred_objects_tagged_values_[i], isolate_));
}
// Play it safe and clear all unhandlified values before we continue.
- deferred_arguments_objects_values_.Clear();
+ deferred_objects_tagged_values_.Clear();
// Materialize all heap numbers before looking at arguments because when the
// output frames are used to materialize arguments objects later on they need
// to already contain valid heap numbers.
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
- HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
+ HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
- if (FLAG_trace_deopt) {
- PrintF("Materializing a new heap number %p [%e] in slot %p\n",
+ if (trace_) {
+ PrintF("Materialized a new heap number %p [%e] in slot %p\n",
reinterpret_cast<void*>(*num),
d.value(),
- d.slot_address());
+ d.destination());
}
- Memory::Object_at(d.slot_address()) = *num;
+ Memory::Object_at(d.destination()) = *num;
}
- // Materialize arguments objects one frame at a time.
- for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
- if (frame_index != 0) it->Advance();
- JavaScriptFrame* frame = it->frame();
- Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate_);
- Handle<JSObject> arguments;
- for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
- if (frame->GetExpression(i) == isolate_->heap()->arguments_marker()) {
- ArgumentsObjectMaterializationDescriptor descriptor =
- deferred_arguments_objects_.RemoveLast();
- const int length = descriptor.arguments_length();
- if (arguments.is_null()) {
- if (frame->has_adapted_arguments()) {
- // Use the arguments adapter frame we just built to materialize the
- // arguments object. FunctionGetArguments can't throw an exception,
- // so cast away the doubt with an assert.
- arguments = Handle<JSObject>(JSObject::cast(
- Accessors::FunctionGetArguments(*function,
- NULL)->ToObjectUnchecked()));
- values.RewindBy(length);
- } else {
- // Construct an arguments object and copy the parameters to a newly
- // allocated arguments object backing store.
- arguments =
- isolate_->factory()->NewArgumentsObject(function, length);
- Handle<FixedArray> array =
- isolate_->factory()->NewFixedArray(length);
- ASSERT(array->length() == length);
- for (int i = length - 1; i >= 0 ; --i) {
- array->set(i, *values.RemoveLast());
- }
- arguments->set_elements(*array);
- }
- }
- frame->SetExpression(i, *arguments);
- ASSERT_EQ(Memory::Object_at(descriptor.slot_address()), *arguments);
- if (FLAG_trace_deopt) {
- PrintF("Materializing %sarguments object for %p: ",
- frame->has_adapted_arguments() ? "(adapted) " : "",
+ // Materialize all heap numbers required for arguments/captured objects.
+ for (int i = 0; i < deferred_objects_double_values_.length(); i++) {
+ HeapNumberMaterializationDescriptor<int> d =
+ deferred_objects_double_values_[i];
+ Handle<Object> num = isolate_->factory()->NewNumber(d.value());
+ if (trace_) {
+ PrintF("Materialized a new heap number %p [%e] for object at %d\n",
+ reinterpret_cast<void*>(*num),
+ d.value(),
+ d.destination());
+ }
+ ASSERT(values.at(d.destination())->IsTheHole());
+ values.Set(d.destination(), num);
+ }
+
+ // Play it safe and clear all object double values before we continue.
+ deferred_objects_double_values_.Clear();
+
+ // Materialize arguments/captured objects.
+ if (!deferred_objects_.is_empty()) {
+ List<Handle<Object> > materialized_objects(deferred_objects_.length());
+ materialized_objects_ = &materialized_objects;
+ materialized_values_ = &values;
+
+ while (materialization_object_index_ < deferred_objects_.length()) {
+ int object_index = materialization_object_index_;
+ ObjectMaterializationDescriptor descriptor =
+ deferred_objects_.at(object_index);
+
+ // Find a previously materialized object by de-duplication or
+ // materialize a new instance of the object if necessary. Store
+ // the materialized object into the frame slot.
+ Handle<Object> object = MaterializeNextHeapObject();
+ Memory::Object_at(descriptor.slot_address()) = *object;
+ if (trace_) {
+ if (descriptor.is_arguments()) {
+ PrintF("Materialized %sarguments object of length %d for %p: ",
+ ArgumentsObjectIsAdapted(object_index) ? "(adapted) " : "",
+ Handle<JSObject>::cast(object)->elements()->length(),
+ reinterpret_cast<void*>(descriptor.slot_address()));
+ } else {
+ PrintF("Materialized captured object of size %d for %p: ",
+ Handle<HeapObject>::cast(object)->Size(),
reinterpret_cast<void*>(descriptor.slot_address()));
- arguments->ShortPrint();
- PrintF("\n");
}
+ object->ShortPrint();
+ PrintF("\n");
}
}
+
+ ASSERT(materialization_object_index_ == materialized_objects_->length());
+ ASSERT(materialization_value_index_ == materialized_values_->length());
}
}
@@ -723,23 +1775,23 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
Address parameters_bottom = parameters_top + parameters_size;
Address expressions_bottom = expressions_top + expressions_size;
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
- HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
+ HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
// Check of the heap number to materialize actually belong to the frame
// being extracted.
- Address slot = d.slot_address();
+ Address slot = d.destination();
if (parameters_top <= slot && slot < parameters_bottom) {
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
int index = (info->parameters_count() - 1) -
static_cast<int>(slot - parameters_top) / kPointerSize;
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF("Materializing a new heap number %p [%e] in slot %p"
"for parameter slot #%d\n",
reinterpret_cast<void*>(*num),
d.value(),
- d.slot_address(),
+ d.destination(),
index);
}
@@ -750,12 +1802,12 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
int index = info->expression_count() - 1 -
static_cast<int>(slot - expressions_top) / kPointerSize;
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF("Materializing a new heap number %p [%e] in slot %p"
"for expression slot #%d\n",
reinterpret_cast<void*>(*num),
d.value(),
- d.slot_address(),
+ d.destination(),
index);
}
@@ -766,21 +1818,262 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
#endif
+static const char* TraceValueType(bool is_smi, bool is_native = false) {
+ if (is_native) {
+ return "native";
+ } else if (is_smi) {
+ return "smi";
+ }
+
+ return "heap number";
+}
+
+
+void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
+ int object_index,
+ int field_index) {
+ disasm::NameConverter converter;
+ Address object_slot = deferred_objects_[object_index].slot_address();
+
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+
+ switch (opcode) {
+ case Translation::BEGIN:
+ case Translation::JS_FRAME:
+ case Translation::ARGUMENTS_ADAPTOR_FRAME:
+ case Translation::CONSTRUCT_STUB_FRAME:
+ case Translation::GETTER_STUB_FRAME:
+ case Translation::SETTER_STUB_FRAME:
+ case Translation::COMPILED_STUB_FRAME:
+ UNREACHABLE();
+ return;
+
+ case Translation::REGISTER: {
+ int input_reg = iterator->Next();
+ intptr_t input_value = input_->GetRegister(input_reg);
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("0x%08" V8PRIxPTR " ; %s ", input_value,
+ converter.NameOfCPURegister(input_reg));
+ reinterpret_cast<Object*>(input_value)->ShortPrint();
+ PrintF("\n");
+ }
+ AddObjectTaggedValue(input_value);
+ return;
+ }
+
+ case Translation::INT32_REGISTER: {
+ int input_reg = iterator->Next();
+ intptr_t value = input_->GetRegister(input_reg);
+ bool is_smi = Smi::IsValid(value);
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("%" V8PRIdPTR " ; %s (%s)\n", value,
+ converter.NameOfCPURegister(input_reg),
+ TraceValueType(is_smi));
+ }
+ if (is_smi) {
+ intptr_t tagged_value =
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
+ AddObjectTaggedValue(tagged_value);
+ } else {
+ double double_value = static_cast<double>(static_cast<int32_t>(value));
+ AddObjectDoubleValue(double_value);
+ }
+ return;
+ }
+
+ case Translation::UINT32_REGISTER: {
+ int input_reg = iterator->Next();
+ uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
+ bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("%" V8PRIdPTR " ; uint %s (%s)\n", value,
+ converter.NameOfCPURegister(input_reg),
+ TraceValueType(is_smi));
+ }
+ if (is_smi) {
+ intptr_t tagged_value =
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
+ AddObjectTaggedValue(tagged_value);
+ } else {
+ double double_value = static_cast<double>(static_cast<uint32_t>(value));
+ AddObjectDoubleValue(double_value);
+ }
+ return;
+ }
+
+ case Translation::DOUBLE_REGISTER: {
+ int input_reg = iterator->Next();
+ double value = input_->GetDoubleRegister(input_reg);
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("%e ; %s\n", value,
+ DoubleRegister::AllocationIndexToString(input_reg));
+ }
+ AddObjectDoubleValue(value);
+ return;
+ }
+
+ case Translation::STACK_SLOT: {
+ int input_slot_index = iterator->Next();
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
+ intptr_t input_value = input_->GetFrameSlot(input_offset);
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("0x%08" V8PRIxPTR " ; [sp + %d] ", input_value, input_offset);
+ reinterpret_cast<Object*>(input_value)->ShortPrint();
+ PrintF("\n");
+ }
+ AddObjectTaggedValue(input_value);
+ return;
+ }
+
+ case Translation::INT32_STACK_SLOT: {
+ int input_slot_index = iterator->Next();
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
+ intptr_t value = input_->GetFrameSlot(input_offset);
+ bool is_smi = Smi::IsValid(value);
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("%" V8PRIdPTR " ; [sp + %d] (%s)\n",
+ value, input_offset, TraceValueType(is_smi));
+ }
+ if (is_smi) {
+ intptr_t tagged_value =
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
+ AddObjectTaggedValue(tagged_value);
+ } else {
+ double double_value = static_cast<double>(static_cast<int32_t>(value));
+ AddObjectDoubleValue(double_value);
+ }
+ return;
+ }
+
+ case Translation::UINT32_STACK_SLOT: {
+ int input_slot_index = iterator->Next();
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
+ uintptr_t value =
+ static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
+ bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("%" V8PRIdPTR " ; [sp + %d] (uint %s)\n",
+ value, input_offset, TraceValueType(is_smi));
+ }
+ if (is_smi) {
+ intptr_t tagged_value =
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
+ AddObjectTaggedValue(tagged_value);
+ } else {
+ double double_value = static_cast<double>(static_cast<uint32_t>(value));
+ AddObjectDoubleValue(double_value);
+ }
+ return;
+ }
+
+ case Translation::DOUBLE_STACK_SLOT: {
+ int input_slot_index = iterator->Next();
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
+ double value = input_->GetDoubleFrameSlot(input_offset);
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF("%e ; [sp + %d]\n", value, input_offset);
+ }
+ AddObjectDoubleValue(value);
+ return;
+ }
+
+ case Translation::LITERAL: {
+ Object* literal = ComputeLiteral(iterator->Next());
+ if (trace_) {
+ PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ literal->ShortPrint();
+ PrintF(" ; literal\n");
+ }
+ intptr_t value = reinterpret_cast<intptr_t>(literal);
+ AddObjectTaggedValue(value);
+ return;
+ }
+
+ case Translation::DUPLICATED_OBJECT: {
+ int object_index = iterator->Next();
+ if (trace_) {
+ PrintF(" nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ isolate_->heap()->arguments_marker()->ShortPrint();
+ PrintF(" ; duplicate of object #%d\n", object_index);
+ }
+ // Use the materialization marker value as a sentinel and fill in
+ // the object after the deoptimized frame is built.
+ intptr_t value = reinterpret_cast<intptr_t>(
+ isolate_->heap()->arguments_marker());
+ AddObjectDuplication(0, object_index);
+ AddObjectTaggedValue(value);
+ return;
+ }
+
+ case Translation::ARGUMENTS_OBJECT:
+ case Translation::CAPTURED_OBJECT: {
+ int length = iterator->Next();
+ bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
+ if (trace_) {
+ PrintF(" nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ isolate_->heap()->arguments_marker()->ShortPrint();
+ PrintF(" ; object (length = %d, is_args = %d)\n", length, is_args);
+ }
+ // Use the materialization marker value as a sentinel and fill in
+ // the object after the deoptimized frame is built.
+ intptr_t value = reinterpret_cast<intptr_t>(
+ isolate_->heap()->arguments_marker());
+ AddObjectStart(0, length, is_args);
+ AddObjectTaggedValue(value);
+ // We save the object values on the side and materialize the actual
+ // object after the deoptimized frame is built.
+ int object_index = deferred_objects_.length() - 1;
+ for (int i = 0; i < length; i++) {
+ DoTranslateObject(iterator, object_index, i);
+ }
+ return;
+ }
+ }
+}
+
+
void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
- int frame_index,
- unsigned output_offset) {
+ int frame_index,
+ unsigned output_offset,
+ DeoptimizerTranslatedValueType value_type) {
disasm::NameConverter converter;
// A GC-safe temporary placeholder that we can put in the output frame.
const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+ bool is_native = value_type == TRANSLATED_VALUE_IS_NATIVE;
- // Ignore commands marked as duplicate and act on the first non-duplicate.
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
- while (opcode == Translation::DUPLICATE) {
- opcode = static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
- opcode = static_cast<Translation::Opcode>(iterator->Next());
- }
switch (opcode) {
case Translation::BEGIN:
@@ -789,14 +2082,14 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
- case Translation::DUPLICATE:
+ case Translation::COMPILED_STUB_FRAME:
UNREACHABLE();
return;
case Translation::REGISTER: {
int input_reg = iterator->Next();
intptr_t input_value = input_->GetRegister(input_reg);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
output_[frame_index]->GetTop() + output_offset,
@@ -813,23 +2106,27 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::INT32_REGISTER: {
int input_reg = iterator->Next();
intptr_t value = input_->GetRegister(input_reg);
- bool is_smi = Smi::IsValid(value);
- if (FLAG_trace_deopt) {
+ bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
+ Smi::IsValid(value);
+ if (trace_) {
PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
converter.NameOfCPURegister(input_reg),
- is_smi ? "smi" : "heap number");
+ TraceValueType(is_smi, is_native));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
+ } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
+ output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
+ ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<int32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -840,8 +2137,9 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::UINT32_REGISTER: {
int input_reg = iterator->Next();
uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
- bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (FLAG_trace_deopt) {
+ bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
+ (value <= static_cast<uintptr_t>(Smi::kMaxValue));
+ if (trace_) {
PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
" ; uint %s (%s)\n",
@@ -849,15 +2147,18 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
output_offset,
value,
converter.NameOfCPURegister(input_reg),
- is_smi ? "smi" : "heap number");
+ TraceValueType(is_smi, is_native));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
+ } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
+ output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
+ ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<uint32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -868,7 +2169,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::DOUBLE_REGISTER: {
int input_reg = iterator->Next();
double value = input_->GetDoubleRegister(input_reg);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
@@ -884,10 +2185,9 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::STACK_SLOT: {
int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(input_slot_index);
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
@@ -903,26 +2203,29 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::INT32_STACK_SLOT: {
int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(input_slot_index);
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t value = input_->GetFrameSlot(input_offset);
- bool is_smi = Smi::IsValid(value);
- if (FLAG_trace_deopt) {
+ bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
+ Smi::IsValid(value);
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
output_offset,
value,
input_offset,
- is_smi ? "smi" : "heap number");
+ TraceValueType(is_smi, is_native));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
+ } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
+ output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
+ ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<int32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -932,27 +2235,30 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::UINT32_STACK_SLOT: {
int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(input_slot_index);
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
uintptr_t value =
static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
- bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (FLAG_trace_deopt) {
+ bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
+ (value <= static_cast<uintptr_t>(Smi::kMaxValue));
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
output_offset,
value,
input_offset,
- is_smi ? "smi" : "heap number");
+ TraceValueType(is_smi, is_native));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
+ } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
+ output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
+ ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<uint32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -962,10 +2268,9 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::DOUBLE_STACK_SLOT: {
int input_slot_index = iterator->Next();
- unsigned input_offset =
- input_->GetOffsetFromSlotIndex(input_slot_index);
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
@@ -981,7 +2286,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::LITERAL: {
Object* literal = ComputeLiteral(iterator->Next());
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
@@ -993,315 +2298,52 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
return;
}
- case Translation::ARGUMENTS_OBJECT: {
- int args_index = iterator->Next() + 1; // Skip receiver.
- int args_length = iterator->Next() - 1; // Skip receiver.
- if (FLAG_trace_deopt) {
+ case Translation::DUPLICATED_OBJECT: {
+ int object_index = iterator->Next();
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
isolate_->heap()->arguments_marker()->ShortPrint();
- PrintF(" ; arguments object\n");
+ PrintF(" ; duplicate of object #%d\n", object_index);
}
- // Use the arguments marker value as a sentinel and fill in the arguments
- // object after the deoptimized frame is built.
+ // Use the materialization marker value as a sentinel and fill in
+ // the object after the deoptimized frame is built.
intptr_t value = reinterpret_cast<intptr_t>(
isolate_->heap()->arguments_marker());
- AddArgumentsObject(
- output_[frame_index]->GetTop() + output_offset, args_length);
+ AddObjectDuplication(output_[frame_index]->GetTop() + output_offset,
+ object_index);
output_[frame_index]->SetFrameSlot(output_offset, value);
- // We save the tagged argument values on the side and materialize the
- // actual arguments object after the deoptimized frame is built.
- for (int i = 0; i < args_length; i++) {
- unsigned input_offset = input_->GetOffsetFromSlotIndex(args_index + i);
- intptr_t input_value = input_->GetFrameSlot(input_offset);
- AddArgumentsObjectValue(input_value);
- }
return;
}
- }
-}
-
-
-static bool ObjectToInt32(Object* obj, int32_t* value) {
- if (obj->IsSmi()) {
- *value = Smi::cast(obj)->value();
- return true;
- }
-
- if (obj->IsHeapNumber()) {
- double num = HeapNumber::cast(obj)->value();
- if (FastI2D(FastD2I(num)) != num) {
- if (FLAG_trace_osr) {
- PrintF("**** %g could not be converted to int32 ****\n",
- HeapNumber::cast(obj)->value());
- }
- return false;
- }
-
- *value = FastD2I(num);
- return true;
- }
-
- return false;
-}
-
-
-static bool ObjectToUint32(Object* obj, uint32_t* value) {
- if (obj->IsSmi()) {
- if (Smi::cast(obj)->value() < 0) return false;
-
- *value = static_cast<uint32_t>(Smi::cast(obj)->value());
- return true;
- }
-
- if (obj->IsHeapNumber()) {
- double num = HeapNumber::cast(obj)->value();
- if ((num < 0) || (FastUI2D(FastD2UI(num)) != num)) {
- if (FLAG_trace_osr) {
- PrintF("**** %g could not be converted to uint32 ****\n",
- HeapNumber::cast(obj)->value());
- }
- return false;
- }
-
- *value = FastD2UI(num);
- return true;
- }
-
- return false;
-}
-
-
-bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
- int* input_offset) {
- disasm::NameConverter converter;
- FrameDescription* output = output_[0];
-
- // The input values are all part of the unoptimized frame so they
- // are all tagged pointers.
- uintptr_t input_value = input_->GetFrameSlot(*input_offset);
- Object* input_object = reinterpret_cast<Object*>(input_value);
-
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- bool duplicate = (opcode == Translation::DUPLICATE);
- if (duplicate) {
- opcode = static_cast<Translation::Opcode>(iterator->Next());
- }
-
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::JS_FRAME:
- case Translation::ARGUMENTS_ADAPTOR_FRAME:
- case Translation::CONSTRUCT_STUB_FRAME:
- case Translation::GETTER_STUB_FRAME:
- case Translation::SETTER_STUB_FRAME:
- case Translation::DUPLICATE:
- UNREACHABLE(); // Malformed input.
- return false;
-
- case Translation::REGISTER: {
- int output_reg = iterator->Next();
- if (FLAG_trace_osr) {
- PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- input_value,
- *input_offset);
- }
- output->SetRegister(output_reg, input_value);
- break;
- }
-
- case Translation::INT32_REGISTER: {
- int32_t int32_value = 0;
- if (!ObjectToInt32(input_object, &int32_value)) return false;
-
- int output_reg = iterator->Next();
- if (FLAG_trace_osr) {
- PrintF(" %s <- %d (int32) ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- int32_value,
- *input_offset);
- }
- output->SetRegister(output_reg, int32_value);
- break;
- }
-
- case Translation::UINT32_REGISTER: {
- uint32_t uint32_value = 0;
- if (!ObjectToUint32(input_object, &uint32_value)) return false;
-
- int output_reg = iterator->Next();
- if (FLAG_trace_osr) {
- PrintF(" %s <- %u (uint32) ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- uint32_value,
- *input_offset);
- }
- output->SetRegister(output_reg, static_cast<int32_t>(uint32_value));
- }
-
-
- case Translation::DOUBLE_REGISTER: {
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
-
- int output_reg = iterator->Next();
- double double_value = input_object->Number();
- if (FLAG_trace_osr) {
- PrintF(" %s <- %g (double) ; [sp + %d]\n",
- DoubleRegister::AllocationIndexToString(output_reg),
- double_value,
- *input_offset);
- }
- output->SetDoubleRegister(output_reg, double_value);
- break;
- }
-
- case Translation::STACK_SLOT: {
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
- output_offset,
- input_value,
- *input_offset);
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
- }
- output->SetFrameSlot(output_offset, input_value);
- break;
- }
-
- case Translation::INT32_STACK_SLOT: {
- int32_t int32_value = 0;
- if (!ObjectToInt32(input_object, &int32_value)) return false;
-
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n",
- output_offset,
- int32_value,
- *input_offset);
- }
- output->SetFrameSlot(output_offset, int32_value);
- break;
- }
- case Translation::UINT32_STACK_SLOT: {
- uint32_t uint32_value = 0;
- if (!ObjectToUint32(input_object, &uint32_value)) return false;
-
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- %u (uint32) ; [sp + %d]\n",
- output_offset,
- uint32_value,
- *input_offset);
+ case Translation::ARGUMENTS_OBJECT:
+ case Translation::CAPTURED_OBJECT: {
+ int length = iterator->Next();
+ bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset);
+ isolate_->heap()->arguments_marker()->ShortPrint();
+ PrintF(" ; object (length = %d, is_args = %d)\n", length, is_args);
}
- output->SetFrameSlot(output_offset, static_cast<int32_t>(uint32_value));
- break;
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- static const int kLowerOffset = 0 * kPointerSize;
- static const int kUpperOffset = 1 * kPointerSize;
-
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
-
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- double double_value = input_object->Number();
- uint64_t int_value = BitCast<uint64_t, double>(double_value);
- int32_t lower = static_cast<int32_t>(int_value);
- int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n",
- output_offset + kUpperOffset,
- upper,
- double_value,
- *input_offset);
- PrintF(" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n",
- output_offset + kLowerOffset,
- lower,
- double_value,
- *input_offset);
+ // Use the materialization marker value as a sentinel and fill in
+ // the object after the deoptimized frame is built.
+ intptr_t value = reinterpret_cast<intptr_t>(
+ isolate_->heap()->arguments_marker());
+ AddObjectStart(output_[frame_index]->GetTop() + output_offset,
+ length, is_args);
+ output_[frame_index]->SetFrameSlot(output_offset, value);
+ // We save the object values on the side and materialize the actual
+ // object after the deoptimized frame is built.
+ int object_index = deferred_objects_.length() - 1;
+ for (int i = 0; i < length; i++) {
+ DoTranslateObject(iterator, object_index, i);
}
- output->SetFrameSlot(output_offset + kLowerOffset, lower);
- output->SetFrameSlot(output_offset + kUpperOffset, upper);
- break;
- }
-
- case Translation::LITERAL: {
- // Just ignore non-materialized literals.
- iterator->Next();
- break;
- }
-
- case Translation::ARGUMENTS_OBJECT: {
- // Optimized code assumes that the argument object has not been
- // materialized and so bypasses it when doing arguments access.
- // We should have bailed out before starting the frame
- // translation.
- UNREACHABLE();
- return false;
+ return;
}
}
-
- if (!duplicate) *input_offset -= kPointerSize;
- return true;
-}
-
-
-void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code) {
- // Iterate over the stack check table and patch every stack check
- // call to an unconditional call to the replacement code.
- ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Address stack_check_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->stack_check_table_offset();
- uint32_t table_length = Memory::uint32_at(stack_check_cursor);
- stack_check_cursor += kIntSize;
- for (uint32_t i = 0; i < table_length; ++i) {
- uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
- Address pc_after = unoptimized_code->instruction_start() + pc_offset;
- PatchStackCheckCodeAt(unoptimized_code,
- pc_after,
- check_code,
- replacement_code);
- stack_check_cursor += 2 * kIntSize;
- }
-}
-
-
-void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code) {
- // Iterate over the stack check table and revert the patched
- // stack check calls.
- ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Address stack_check_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->stack_check_table_offset();
- uint32_t table_length = Memory::uint32_at(stack_check_cursor);
- stack_check_cursor += kIntSize;
- for (uint32_t i = 0; i < table_length; ++i) {
- uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
- Address pc_after = unoptimized_code->instruction_start() + pc_offset;
- RevertStackCheckCodeAt(unoptimized_code,
- pc_after,
- check_code,
- replacement_code);
- stack_check_cursor += 2 * kIntSize;
- }
}
@@ -1311,13 +2353,8 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
// into account so we have to avoid double counting them (-2).
unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
#ifdef DEBUG
- if (bailout_type_ == OSR) {
- // TODO(kasperl): It would be nice if we could verify that the
- // size matches with the stack height we can compute based on the
- // environment at the OSR entry. The code for that his built into
- // the DoComputeOsrOutputFrame function for now.
- } else {
- unsigned stack_slots = optimized_code_->stack_slots();
+ if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
+ unsigned stack_slots = compiled_code_->stack_slots();
unsigned outgoing_size = ComputeOutgoingArgumentSize();
ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
}
@@ -1337,6 +2374,10 @@ unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
// The incoming arguments is the values for formal parameters and
// the receiver. Every slot contains a pointer.
+ if (function->IsSmi()) {
+ ASSERT(Smi::cast(function) == Smi::FromInt(StackFrame::STUB));
+ return 0;
+ }
unsigned arguments = function->shared()->formal_parameter_count() + 1;
return arguments * kPointerSize;
}
@@ -1344,7 +2385,7 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
return height * kPointerSize;
}
@@ -1352,142 +2393,77 @@ unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
Object* Deoptimizer::ComputeLiteral(int index) const {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
FixedArray* literals = data->LiteralArray();
return literals->get(index);
}
-void Deoptimizer::AddArgumentsObject(intptr_t slot_address, int argc) {
- ArgumentsObjectMaterializationDescriptor object_desc(
- reinterpret_cast<Address>(slot_address), argc);
- deferred_arguments_objects_.Add(object_desc);
+void Deoptimizer::AddObjectStart(intptr_t slot, int length, bool is_args) {
+ ObjectMaterializationDescriptor object_desc(
+ reinterpret_cast<Address>(slot), jsframe_count_, length, -1, is_args);
+ deferred_objects_.Add(object_desc);
+}
+
+
+void Deoptimizer::AddObjectDuplication(intptr_t slot, int object_index) {
+ ObjectMaterializationDescriptor object_desc(
+ reinterpret_cast<Address>(slot), jsframe_count_, -1, object_index, false);
+ deferred_objects_.Add(object_desc);
+}
+
+
+void Deoptimizer::AddObjectTaggedValue(intptr_t value) {
+ deferred_objects_tagged_values_.Add(reinterpret_cast<Object*>(value));
}
-void Deoptimizer::AddArgumentsObjectValue(intptr_t value) {
- deferred_arguments_objects_values_.Add(reinterpret_cast<Object*>(value));
+void Deoptimizer::AddObjectDoubleValue(double value) {
+ deferred_objects_tagged_values_.Add(isolate()->heap()->the_hole_value());
+ HeapNumberMaterializationDescriptor<int> value_desc(
+ deferred_objects_tagged_values_.length() - 1, value);
+ deferred_objects_double_values_.Add(value_desc);
}
void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
- HeapNumberMaterializationDescriptor value_desc(
+ HeapNumberMaterializationDescriptor<Address> value_desc(
reinterpret_cast<Address>(slot_address), value);
deferred_heap_numbers_.Add(value_desc);
}
-MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
+void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
+ BailoutType type,
+ int max_entry_id) {
// We cannot run this if the serializer is enabled because this will
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all.
- ASSERT(!Serializer::enabled());
-
- MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
+ ASSERT(type == EAGER || type == SOFT || type == LAZY);
+ DeoptimizerData* data = isolate->deoptimizer_data();
+ int entry_count = data->deopt_entry_code_entries_[type];
+ if (max_entry_id < entry_count) return;
+ entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
+ while (max_entry_id >= entry_count) entry_count *= 2;
+ ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries);
+
+ MacroAssembler masm(isolate, NULL, 16 * KB);
masm.set_emit_debug_code(false);
- GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
+ GenerateDeoptimizationEntries(&masm, entry_count, type);
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
-
- MemoryChunk* chunk =
- Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
- EXECUTABLE,
- NULL);
- ASSERT(chunk->area_size() >= desc.instr_size);
- if (chunk == NULL) {
- V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
- }
- memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ MemoryChunk* chunk = data->deopt_entry_code_[type];
+ ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
+ desc.instr_size);
+ chunk->CommitArea(desc.instr_size);
+ CopyBytes(chunk->area_start(), desc.buffer,
+ static_cast<size_t>(desc.instr_size));
CPU::FlushICache(chunk->area_start(), desc.instr_size);
- return chunk;
-}
-
-
-Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) {
- DeoptimizingCodeListNode* node =
- Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
- while (node != NULL) {
- if (node->code()->contains(addr)) return *node->code();
- node = node->next();
- }
- return NULL;
-}
-
-
-void Deoptimizer::RemoveDeoptimizingCode(Code* code) {
- DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
- ASSERT(data->deoptimizing_code_list_ != NULL);
- // Run through the code objects to find this one and remove it.
- DeoptimizingCodeListNode* prev = NULL;
- DeoptimizingCodeListNode* current = data->deoptimizing_code_list_;
- while (current != NULL) {
- if (*current->code() == code) {
- // Unlink from list. If prev is NULL we are looking at the first element.
- if (prev == NULL) {
- data->deoptimizing_code_list_ = current->next();
- } else {
- prev->set_next(current->next());
- }
- delete current;
- return;
- }
- // Move to next in list.
- prev = current;
- current = current->next();
- }
- // Deoptimizing code is removed through weak callback. Each object is expected
- // to be removed once and only once.
- UNREACHABLE();
-}
-
-static Object* CutOutRelatedFunctionsList(Context* context,
- Code* code,
- Object* undefined) {
- Object* result_list_head = undefined;
- Object* head;
- Object* current;
- current = head = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
- JSFunction* prev = NULL;
- while (current != undefined) {
- JSFunction* func = JSFunction::cast(current);
- current = func->next_function_link();
- if (func->code() == code) {
- func->set_next_function_link(result_list_head);
- result_list_head = func;
- if (prev) {
- prev->set_next_function_link(current);
- } else {
- head = current;
- }
- } else {
- prev = func;
- }
- }
- if (head != context->get(Context::OPTIMIZED_FUNCTIONS_LIST)) {
- context->set(Context::OPTIMIZED_FUNCTIONS_LIST, head);
- }
- return result_list_head;
-}
-
-
-void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function,
- Code* code) {
- Context* context = function->context()->native_context();
-
- SharedFunctionInfo* shared = function->shared();
-
- Object* undefined = Isolate::Current()->heap()->undefined_value();
- Object* current = CutOutRelatedFunctionsList(context, code, undefined);
-
- while (current != undefined) {
- JSFunction* func = JSFunction::cast(current);
- current = func->next_function_link();
- func->set_code(shared->code());
- func->set_next_function_link(undefined);
- }
+ data->deopt_entry_code_entries_[type] = entry_count;
}
@@ -1541,6 +2517,8 @@ int FrameDescription::ComputeParametersCount() {
// Can't use GetExpression(0) because it would cause infinite recursion.
return reinterpret_cast<Smi*>(*GetFrameSlotPointer(0))->value();
}
+ case StackFrame::STUB:
+ return -1; // Minus receiver.
default:
UNREACHABLE();
return 0;
@@ -1603,11 +2581,11 @@ int32_t TranslationIterator::Next() {
}
-Handle<ByteArray> TranslationBuffer::CreateByteArray() {
+Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
int length = contents_.length();
- Handle<ByteArray> result =
- Isolate::Current()->factory()->NewByteArray(length, TENURED);
- memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
+ Handle<ByteArray> result = factory->NewByteArray(length, TENURED);
+ OS::MemCopy(
+ result->GetDataStartAddress(), contents_.ToVector().start(), length);
return result;
}
@@ -1648,6 +2626,29 @@ void Translation::BeginJSFrame(BailoutId node_id,
}
+void Translation::BeginCompiledStubFrame() {
+ buffer_->Add(COMPILED_STUB_FRAME, zone());
+}
+
+
+void Translation::BeginArgumentsObject(int args_length) {
+ buffer_->Add(ARGUMENTS_OBJECT, zone());
+ buffer_->Add(args_length, zone());
+}
+
+
+void Translation::BeginCapturedObject(int length) {
+ buffer_->Add(CAPTURED_OBJECT, zone());
+ buffer_->Add(length, zone());
+}
+
+
+void Translation::DuplicateObject(int object_index) {
+ buffer_->Add(DUPLICATED_OBJECT, zone());
+ buffer_->Add(object_index, zone());
+}
+
+
void Translation::StoreRegister(Register reg) {
buffer_->Add(REGISTER, zone());
buffer_->Add(reg.code(), zone());
@@ -1702,24 +2703,23 @@ void Translation::StoreLiteral(int literal_id) {
}
-void Translation::StoreArgumentsObject(int args_index, int args_length) {
+void Translation::StoreArgumentsObject(bool args_known,
+ int args_index,
+ int args_length) {
buffer_->Add(ARGUMENTS_OBJECT, zone());
+ buffer_->Add(args_known, zone());
buffer_->Add(args_index, zone());
buffer_->Add(args_length, zone());
}
-void Translation::MarkDuplicate() {
- buffer_->Add(DUPLICATE, zone());
-}
-
-
int Translation::NumberOfOperandsFor(Opcode opcode) {
switch (opcode) {
- case DUPLICATE:
- return 0;
case GETTER_STUB_FRAME:
case SETTER_STUB_FRAME:
+ case DUPLICATED_OBJECT:
+ case ARGUMENTS_OBJECT:
+ case CAPTURED_OBJECT:
case REGISTER:
case INT32_REGISTER:
case UINT32_REGISTER:
@@ -1729,11 +2729,11 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case UINT32_STACK_SLOT:
case DOUBLE_STACK_SLOT:
case LITERAL:
+ case COMPILED_STUB_FRAME:
return 1;
case BEGIN:
case ARGUMENTS_ADAPTOR_FRAME:
case CONSTRUCT_STUB_FRAME:
- case ARGUMENTS_OBJECT:
return 2;
case JS_FRAME:
return 3;
@@ -1746,42 +2746,11 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
const char* Translation::StringFor(Opcode opcode) {
+#define TRANSLATION_OPCODE_CASE(item) case item: return #item;
switch (opcode) {
- case BEGIN:
- return "BEGIN";
- case JS_FRAME:
- return "JS_FRAME";
- case ARGUMENTS_ADAPTOR_FRAME:
- return "ARGUMENTS_ADAPTOR_FRAME";
- case CONSTRUCT_STUB_FRAME:
- return "CONSTRUCT_STUB_FRAME";
- case GETTER_STUB_FRAME:
- return "GETTER_STUB_FRAME";
- case SETTER_STUB_FRAME:
- return "SETTER_STUB_FRAME";
- case REGISTER:
- return "REGISTER";
- case INT32_REGISTER:
- return "INT32_REGISTER";
- case UINT32_REGISTER:
- return "UINT32_REGISTER";
- case DOUBLE_REGISTER:
- return "DOUBLE_REGISTER";
- case STACK_SLOT:
- return "STACK_SLOT";
- case INT32_STACK_SLOT:
- return "INT32_STACK_SLOT";
- case UINT32_STACK_SLOT:
- return "UINT32_STACK_SLOT";
- case DOUBLE_STACK_SLOT:
- return "DOUBLE_STACK_SLOT";
- case LITERAL:
- return "LITERAL";
- case ARGUMENTS_OBJECT:
- return "ARGUMENTS_OBJECT";
- case DUPLICATE:
- return "DUPLICATE";
+ TRANSLATION_OPCODE_LIST(TRANSLATION_OPCODE_CASE)
}
+#undef TRANSLATION_OPCODE_CASE
UNREACHABLE();
return "";
}
@@ -1789,22 +2758,6 @@ const char* Translation::StringFor(Opcode opcode) {
#endif
-DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- // Globalize the code object and make it weak.
- code_ = Handle<Code>::cast(global_handles->Create(code));
- global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()),
- this,
- Deoptimizer::HandleWeakDeoptimizedCode);
-}
-
-
-DeoptimizingCodeListNode::~DeoptimizingCodeListNode() {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- global_handles->Destroy(reinterpret_cast<Object**>(code_.location()));
-}
-
-
// We can't intermix stack decoding and allocations because
// deoptimization infrastracture is not GC safe.
// Thus we build a temporary structure in malloced space.
@@ -1824,7 +2777,9 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
// Peeled off before getting here.
break;
+ case Translation::DUPLICATED_OBJECT:
case Translation::ARGUMENTS_OBJECT:
+ case Translation::CAPTURED_OBJECT:
// This can be only emitted for local slots not for argument slots.
break;
@@ -1832,7 +2787,6 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
case Translation::INT32_REGISTER:
case Translation::UINT32_REGISTER:
case Translation::DOUBLE_REGISTER:
- case Translation::DUPLICATE:
// We are at safepoint which corresponds to call. All registers are
// saved by caller so there would be no live registers at this
// point. Thus these translation commands should not be used.
@@ -1864,8 +2818,13 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
case Translation::LITERAL: {
int literal_index = iterator->Next();
- return SlotRef(data->LiteralArray()->get(literal_index));
+ return SlotRef(data->GetIsolate(),
+ data->LiteralArray()->get(literal_index));
}
+
+ case Translation::COMPILED_STUB_FRAME:
+ UNREACHABLE();
+ break;
}
UNREACHABLE();
@@ -1894,7 +2853,7 @@ Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
JavaScriptFrame* frame,
int inlined_jsframe_index,
int formal_parameter_count) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
@@ -1961,7 +2920,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
expression_stack_ = new Object*[expression_count_];
// Get the source position using the unoptimized code.
Address pc = reinterpret_cast<Address>(output_frame->GetPc());
- Code* code = Code::cast(Isolate::Current()->heap()->FindCodeObject(pc));
+ Code* code = Code::cast(deoptimizer->isolate()->FindCodeObject(pc));
source_position_ = code->SourcePosition(pc);
for (int i = 0; i < expression_count_; i++) {
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index f67f986ba..4e9d281ea 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -38,36 +38,70 @@
namespace v8 {
namespace internal {
+
+static inline double read_double_value(Address p) {
+#ifdef V8_HOST_CAN_READ_UNALIGNED
+ return Memory::double_at(p);
+#else // V8_HOST_CAN_READ_UNALIGNED
+ // Prevent gcc from using load-double (mips ldc1) on (possibly)
+ // non-64-bit aligned address.
+ union conversion {
+ double d;
+ uint32_t u[2];
+ } c;
+ c.u[0] = *reinterpret_cast<uint32_t*>(p);
+ c.u[1] = *reinterpret_cast<uint32_t*>(p + 4);
+ return c.d;
+#endif // V8_HOST_CAN_READ_UNALIGNED
+}
+
+
class FrameDescription;
class TranslationIterator;
-class DeoptimizingCodeListNode;
class DeoptimizedFrameInfo;
+template<typename T>
class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
public:
- HeapNumberMaterializationDescriptor(Address slot_address, double val)
- : slot_address_(slot_address), val_(val) { }
+ HeapNumberMaterializationDescriptor(T destination, double value)
+ : destination_(destination), value_(value) { }
- Address slot_address() const { return slot_address_; }
- double value() const { return val_; }
+ T destination() const { return destination_; }
+ double value() const { return value_; }
private:
- Address slot_address_;
- double val_;
+ T destination_;
+ double value_;
};
-class ArgumentsObjectMaterializationDescriptor BASE_EMBEDDED {
+class ObjectMaterializationDescriptor BASE_EMBEDDED {
public:
- ArgumentsObjectMaterializationDescriptor(Address slot_address, int argc)
- : slot_address_(slot_address), arguments_length_(argc) { }
+ ObjectMaterializationDescriptor(
+ Address slot_address, int frame, int length, int duplicate, bool is_args)
+ : slot_address_(slot_address),
+ jsframe_index_(frame),
+ object_length_(length),
+ duplicate_object_(duplicate),
+ is_arguments_(is_args) { }
Address slot_address() const { return slot_address_; }
- int arguments_length() const { return arguments_length_; }
+ int jsframe_index() const { return jsframe_index_; }
+ int object_length() const { return object_length_; }
+ int duplicate_object() const { return duplicate_object_; }
+ bool is_arguments() const { return is_arguments_; }
+
+ // Only used for allocated receivers in DoComputeConstructStubFrame.
+ void patch_slot_address(intptr_t slot) {
+ slot_address_ = reinterpret_cast<Address>(slot);
+ }
private:
Address slot_address_;
- int arguments_length_;
+ int jsframe_index_;
+ int object_length_;
+ int duplicate_object_;
+ bool is_arguments_;
};
@@ -87,52 +121,43 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
};
-class Deoptimizer;
-
-
-class DeoptimizerData {
- public:
- DeoptimizerData();
- ~DeoptimizerData();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void Iterate(ObjectVisitor* v);
-#endif
-
- private:
- MemoryChunk* eager_deoptimization_entry_code_;
- MemoryChunk* lazy_deoptimization_entry_code_;
- Deoptimizer* current_;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- DeoptimizedFrameInfo* deoptimized_frame_info_;
-#endif
-
- // List of deoptimized code which still have references from active stack
- // frames. These code objects are needed by the deoptimizer when deoptimizing
- // a frame for which the code object for the function function has been
- // changed from the code present when deoptimizing was done.
- DeoptimizingCodeListNode* deoptimizing_code_list_;
-
- friend class Deoptimizer;
-
- DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
-};
-
-
class Deoptimizer : public Malloced {
public:
enum BailoutType {
EAGER,
LAZY,
- OSR,
+ SOFT,
// This last bailout type is not really a bailout, but used by the
// debugger to deoptimize stack frames to allow inspection.
DEBUGGER
};
+ static const int kBailoutTypesWithCodeEntry = SOFT + 1;
+
+ struct JumpTableEntry {
+ inline JumpTableEntry(Address entry,
+ Deoptimizer::BailoutType type,
+ bool frame)
+ : label(),
+ address(entry),
+ bailout_type(type),
+ needs_frame(frame) { }
+ Label label;
+ Address address;
+ Deoptimizer::BailoutType bailout_type;
+ bool needs_frame;
+ };
+
+ static bool TraceEnabledFor(BailoutType deopt_type,
+ StackFrame::Type frame_type);
+ static const char* MessageFor(BailoutType type);
+
int output_count() const { return output_count_; }
+ Handle<JSFunction> function() const { return Handle<JSFunction>(function_); }
+ Handle<Code> compiled_code() const { return Handle<Code>(compiled_code_); }
+ BailoutType bailout_type() const { return bailout_type_; }
+
// Number of created JS frames. Not all created frames are necessarily JS.
int jsframe_count() const { return jsframe_count_; }
@@ -166,52 +191,24 @@ class Deoptimizer : public Malloced {
// execution returns.
static void DeoptimizeFunction(JSFunction* function);
- // Iterate over all the functions which share the same code object
- // and make them use unoptimized version.
- static void ReplaceCodeForRelatedFunctions(JSFunction* function, Code* code);
-
- // Deoptimize all functions in the heap.
- static void DeoptimizeAll();
+ // Deoptimize all code in the given isolate.
+ static void DeoptimizeAll(Isolate* isolate);
+ // Deoptimize code associated with the given global object.
static void DeoptimizeGlobalObject(JSObject* object);
- static void VisitAllOptimizedFunctionsForContext(
- Context* context, OptimizedFunctionVisitor* visitor);
-
- static void VisitAllOptimizedFunctionsForGlobalObject(
- JSObject* object, OptimizedFunctionVisitor* visitor);
+ // Deoptimizes all optimized code that has been previously marked
+ // (via code->set_marked_for_deoptimization) and unlinks all functions that
+ // refer to that code.
+ static void DeoptimizeMarkedCode(Isolate* isolate);
- static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);
+ // Visit all the known optimized functions in a given isolate.
+ static void VisitAllOptimizedFunctions(
+ Isolate* isolate, OptimizedFunctionVisitor* visitor);
// The size in bytes of the code required at a lazy deopt patch site.
static int patch_size();
- // Patch all stack guard checks in the unoptimized code to
- // unconditionally call replacement_code.
- static void PatchStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code);
-
- // Patch stack guard check at instruction before pc_after in
- // the unoptimized code to unconditionally call replacement_code.
- static void PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code);
-
- // Change all patched stack guard checks in the unoptimized code
- // back to a normal stack guard check.
- static void RevertStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code);
-
- // Change all patched stack guard checks in the unoptimized code
- // back to a normal stack guard check.
- static void RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code);
-
~Deoptimizer();
void MaterializeHeapObjects(JavaScriptFrameIterator* it);
@@ -226,8 +223,21 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
- static Address GetDeoptimizationEntry(int id, BailoutType type);
- static int GetDeoptimizationId(Address addr, BailoutType type);
+
+ enum GetEntryMode {
+ CALCULATE_ENTRY_ADDRESS,
+ ENSURE_ENTRY_CODE
+ };
+
+
+ static Address GetDeoptimizationEntry(
+ Isolate* isolate,
+ int id,
+ BailoutType type,
+ GetEntryMode mode = ENSURE_ENTRY_CODE);
+ static int GetDeoptimizationId(Isolate* isolate,
+ Address addr,
+ BailoutType type);
static int GetOutputInfo(DeoptimizationOutputData* data,
BailoutId node_id,
SharedFunctionInfo* shared);
@@ -259,6 +269,7 @@ class Deoptimizer : public Malloced {
protected:
MacroAssembler* masm() const { return masm_; }
BailoutType type() const { return type_; }
+ Isolate* isolate() const { return masm_->isolate(); }
virtual void GeneratePrologue() { }
@@ -283,8 +294,17 @@ class Deoptimizer : public Malloced {
int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
+ static size_t GetMaxDeoptTableSize();
+
+ static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
+ BailoutType type,
+ int max_entry_id);
+
+ Isolate* isolate() const { return isolate_; }
+
private:
- static const int kNumberOfEntries = 16384;
+ static const int kMinNumberOfEntries = 64;
+ static const int kMaxNumberOfEntries = 16384;
Deoptimizer(Isolate* isolate,
JSFunction* function,
@@ -293,10 +313,11 @@ class Deoptimizer : public Malloced {
Address from,
int fp_to_sp_delta,
Code* optimized_code);
+ Code* FindOptimizedCode(JSFunction* function, Code* optimized_code);
+ void PrintFunctionName();
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
- void DoComputeOsrOutputFrame();
void DoComputeJSFrame(TranslationIterator* iterator, int frame_index);
void DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
int frame_index);
@@ -305,15 +326,22 @@ class Deoptimizer : public Malloced {
void DoComputeAccessorStubFrame(TranslationIterator* iterator,
int frame_index,
bool is_setter_stub_frame);
+ void DoComputeCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index);
+
+ void DoTranslateObject(TranslationIterator* iterator,
+ int object_index,
+ int field_index);
+
+ enum DeoptimizerTranslatedValueType {
+ TRANSLATED_VALUE_IS_NATIVE,
+ TRANSLATED_VALUE_IS_TAGGED
+ };
+
void DoTranslateCommand(TranslationIterator* iterator,
- int frame_index,
- unsigned output_offset);
- // Translate a command for OSR. Updates the input offset to be used for
- // the next command. Returns false if translation of the command failed
- // (e.g., a number conversion failed) and may or may not have updated the
- // input offset.
- bool DoOsrTranslateCommand(TranslationIterator* iterator,
- int* input_offset);
+ int frame_index,
+ unsigned output_offset,
+ DeoptimizerTranslatedValueType value_type = TRANSLATED_VALUE_IS_TAGGED);
unsigned ComputeInputFrameSize() const;
unsigned ComputeFixedSize(JSFunction* function) const;
@@ -323,28 +351,70 @@ class Deoptimizer : public Malloced {
Object* ComputeLiteral(int index) const;
- void AddArgumentsObject(intptr_t slot_address, int argc);
- void AddArgumentsObjectValue(intptr_t value);
+ void AddObjectStart(intptr_t slot_address, int argc, bool is_arguments);
+ void AddObjectDuplication(intptr_t slot, int object_index);
+ void AddObjectTaggedValue(intptr_t value);
+ void AddObjectDoubleValue(double value);
void AddDoubleValue(intptr_t slot_address, double value);
- static MemoryChunk* CreateCode(BailoutType type);
+ bool ArgumentsObjectIsAdapted(int object_index) {
+ ObjectMaterializationDescriptor desc = deferred_objects_.at(object_index);
+ int reverse_jsframe_index = jsframe_count_ - desc.jsframe_index() - 1;
+ return jsframe_has_adapted_arguments_[reverse_jsframe_index];
+ }
+
+ Handle<JSFunction> ArgumentsObjectFunction(int object_index) {
+ ObjectMaterializationDescriptor desc = deferred_objects_.at(object_index);
+ int reverse_jsframe_index = jsframe_count_ - desc.jsframe_index() - 1;
+ return jsframe_functions_[reverse_jsframe_index];
+ }
+
+ // Helper function for heap object materialization.
+ Handle<Object> MaterializeNextHeapObject();
+ Handle<Object> MaterializeNextValue();
+
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
- // Weak handle callback for deoptimizing code objects.
- static void HandleWeakDeoptimizedCode(
- v8::Persistent<v8::Value> obj, void* data);
- static Code* FindDeoptimizingCodeFromAddress(Address addr);
- static void RemoveDeoptimizingCode(Code* code);
+ // Marks all the code in the given context for deoptimization.
+ static void MarkAllCodeForContext(Context* native_context);
+
+ // Visit all the known optimized functions in a given context.
+ static void VisitAllOptimizedFunctionsForContext(
+ Context* context, OptimizedFunctionVisitor* visitor);
+
+ // Deoptimizes all code marked in the given context.
+ static void DeoptimizeMarkedCodeForContext(Context* native_context);
+
+ // Patch the given code so that it will deoptimize itself.
+ static void PatchCodeForDeoptimization(Isolate* isolate, Code* code);
+
+ // Searches the list of known deoptimizing code for a Code object
+ // containing the given address (which is supposedly faster than
+ // searching all code objects).
+ Code* FindDeoptimizingCode(Address addr);
// Fill the input from from a JavaScript frame. This is used when
// the debugger needs to inspect an optimized frame. For normal
// deoptimizations the input frame is filled in generated code.
void FillInputFrame(Address tos, JavaScriptFrame* frame);
+ // Fill the given output frame's registers to contain the failure handler
+ // address and the number of parameters for a stub failure trampoline.
+ void SetPlatformCompiledStubRegisters(FrameDescription* output_frame,
+ CodeStubInterfaceDescriptor* desc);
+
+ // Fill the given output frame's double registers with the original values
+ // from the input frame's double registers.
+ void CopyDoubleRegisters(FrameDescription* output_frame);
+
+ // Determines whether the input frame contains alignment padding by looking
+ // at the dynamic alignment state slot inside the frame.
+ bool HasAlignmentPadding(JSFunction* function);
+
Isolate* isolate_;
JSFunction* function_;
- Code* optimized_code_;
+ Code* compiled_code_;
unsigned bailout_id_;
BailoutType bailout_type_;
Address from_;
@@ -360,14 +430,32 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions.
FrameDescription** output_;
- List<Object*> deferred_arguments_objects_values_;
- List<ArgumentsObjectMaterializationDescriptor> deferred_arguments_objects_;
- List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
+ // Deferred values to be materialized.
+ List<Object*> deferred_objects_tagged_values_;
+ List<HeapNumberMaterializationDescriptor<int> >
+ deferred_objects_double_values_;
+ List<ObjectMaterializationDescriptor> deferred_objects_;
+ List<HeapNumberMaterializationDescriptor<Address> > deferred_heap_numbers_;
+
+ // Output frame information. Only used during heap object materialization.
+ List<Handle<JSFunction> > jsframe_functions_;
+ List<bool> jsframe_has_adapted_arguments_;
+
+ // Materialized objects. Only used during heap object materialization.
+ List<Handle<Object> >* materialized_values_;
+ List<Handle<Object> >* materialized_objects_;
+ int materialization_value_index_;
+ int materialization_object_index_;
+
+#ifdef DEBUG
+ DisallowHeapAllocation* disallow_heap_allocation_;
+#endif // DEBUG
+
+ bool trace_;
static const int table_entry_size_;
friend class FrameDescription;
- friend class DeoptimizingCodeListNode;
friend class DeoptimizedFrameInfo;
};
@@ -406,27 +494,27 @@ class FrameDescription {
double GetDoubleFrameSlot(unsigned offset) {
intptr_t* ptr = GetFrameSlotPointer(offset);
-#if V8_TARGET_ARCH_MIPS
- // Prevent gcc from using load-double (mips ldc1) on (possibly)
- // non-64-bit aligned double. Uses two lwc1 instructions.
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- c.u[0] = *reinterpret_cast<uint32_t*>(ptr);
- c.u[1] = *(reinterpret_cast<uint32_t*>(ptr) + 1);
- return c.d;
-#else
- return *reinterpret_cast<double*>(ptr);
-#endif
+ return read_double_value(reinterpret_cast<Address>(ptr));
}
void SetFrameSlot(unsigned offset, intptr_t value) {
*GetFrameSlotPointer(offset) = value;
}
+ void SetCallerPc(unsigned offset, intptr_t value);
+
+ void SetCallerFp(unsigned offset, intptr_t value);
+
intptr_t GetRegister(unsigned n) const {
- ASSERT(n < ARRAY_SIZE(registers_));
+#if DEBUG
+ // This convoluted ASSERT is needed to work around a gcc problem that
+ // improperly detects an array bounds overflow in optimized debug builds
+ // when using a plain ASSERT.
+ if (n >= ARRAY_SIZE(registers_)) {
+ ASSERT(false);
+ return 0;
+ }
+#endif
return registers_[n];
}
@@ -514,16 +602,13 @@ class FrameDescription {
uintptr_t frame_size_; // Number of bytes.
JSFunction* function_;
intptr_t registers_[Register::kNumRegisters];
- double double_registers_[DoubleRegister::kNumAllocatableRegisters];
+ double double_registers_[DoubleRegister::kMaxNumRegisters];
intptr_t top_;
intptr_t pc_;
intptr_t fp_;
intptr_t context_;
StackFrame::Type type_;
Smi* state_;
-#ifdef DEBUG
- Code::Kind kind_;
-#endif
// Continuation is the PC where the execution continues after
// deoptimizing.
@@ -543,6 +628,32 @@ class FrameDescription {
};
+class DeoptimizerData {
+ public:
+ explicit DeoptimizerData(MemoryAllocator* allocator);
+ ~DeoptimizerData();
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ void Iterate(ObjectVisitor* v);
+#endif
+
+ private:
+ MemoryAllocator* allocator_;
+ int deopt_entry_code_entries_[Deoptimizer::kBailoutTypesWithCodeEntry];
+ MemoryChunk* deopt_entry_code_[Deoptimizer::kBailoutTypesWithCodeEntry];
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ DeoptimizedFrameInfo* deoptimized_frame_info_;
+#endif
+
+ Deoptimizer* current_;
+
+ friend class Deoptimizer;
+
+ DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
+};
+
+
class TranslationBuffer BASE_EMBEDDED {
public:
explicit TranslationBuffer(Zone* zone) : contents_(256, zone) { }
@@ -550,7 +661,7 @@ class TranslationBuffer BASE_EMBEDDED {
int CurrentIndex() const { return contents_.length(); }
void Add(int32_t value, Zone* zone);
- Handle<ByteArray> CreateByteArray();
+ Handle<ByteArray> CreateByteArray(Factory* factory);
private:
ZoneList<uint8_t> contents_;
@@ -578,30 +689,36 @@ class TranslationIterator BASE_EMBEDDED {
};
+#define TRANSLATION_OPCODE_LIST(V) \
+ V(BEGIN) \
+ V(JS_FRAME) \
+ V(CONSTRUCT_STUB_FRAME) \
+ V(GETTER_STUB_FRAME) \
+ V(SETTER_STUB_FRAME) \
+ V(ARGUMENTS_ADAPTOR_FRAME) \
+ V(COMPILED_STUB_FRAME) \
+ V(DUPLICATED_OBJECT) \
+ V(ARGUMENTS_OBJECT) \
+ V(CAPTURED_OBJECT) \
+ V(REGISTER) \
+ V(INT32_REGISTER) \
+ V(UINT32_REGISTER) \
+ V(DOUBLE_REGISTER) \
+ V(STACK_SLOT) \
+ V(INT32_STACK_SLOT) \
+ V(UINT32_STACK_SLOT) \
+ V(DOUBLE_STACK_SLOT) \
+ V(LITERAL)
+
+
class Translation BASE_EMBEDDED {
public:
+#define DECLARE_TRANSLATION_OPCODE_ENUM(item) item,
enum Opcode {
- BEGIN,
- JS_FRAME,
- CONSTRUCT_STUB_FRAME,
- GETTER_STUB_FRAME,
- SETTER_STUB_FRAME,
- ARGUMENTS_ADAPTOR_FRAME,
- REGISTER,
- INT32_REGISTER,
- UINT32_REGISTER,
- DOUBLE_REGISTER,
- STACK_SLOT,
- INT32_STACK_SLOT,
- UINT32_STACK_SLOT,
- DOUBLE_STACK_SLOT,
- LITERAL,
- ARGUMENTS_OBJECT,
-
- // A prefix indicating that the next command is a duplicate of the one
- // that follows it.
- DUPLICATE
+ TRANSLATION_OPCODE_LIST(DECLARE_TRANSLATION_OPCODE_ENUM)
+ LAST = LITERAL
};
+#undef DECLARE_TRANSLATION_OPCODE_ENUM
Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count,
Zone* zone)
@@ -617,10 +734,14 @@ class Translation BASE_EMBEDDED {
// Commands.
void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
+ void BeginCompiledStubFrame();
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(int literal_id, unsigned height);
void BeginGetterStubFrame(int literal_id);
void BeginSetterStubFrame(int literal_id);
+ void BeginArgumentsObject(int args_length);
+ void BeginCapturedObject(int length);
+ void DuplicateObject(int object_index);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
void StoreUint32Register(Register reg);
@@ -630,8 +751,7 @@ class Translation BASE_EMBEDDED {
void StoreUint32StackSlot(int index);
void StoreDoubleStackSlot(int index);
void StoreLiteral(int literal_id);
- void StoreArgumentsObject(int args_index, int args_length);
- void MarkDuplicate();
+ void StoreArgumentsObject(bool args_known, int args_index, int args_length);
Zone* zone() const { return zone_; }
@@ -651,26 +771,6 @@ class Translation BASE_EMBEDDED {
};
-// Linked list holding deoptimizing code objects. The deoptimizing code objects
-// are kept as weak handles until they are no longer activated on the stack.
-class DeoptimizingCodeListNode : public Malloced {
- public:
- explicit DeoptimizingCodeListNode(Code* code);
- ~DeoptimizingCodeListNode();
-
- DeoptimizingCodeListNode* next() const { return next_; }
- void set_next(DeoptimizingCodeListNode* next) { next_ = next; }
- Handle<Code> code() const { return code_; }
-
- private:
- // Global (weak) handle to the deoptimizing code object.
- Handle<Code> code_;
-
- // Next pointer for linked list.
- DeoptimizingCodeListNode* next_;
-};
-
-
class SlotRef BASE_EMBEDDED {
public:
enum SlotRepresentation {
@@ -688,36 +788,35 @@ class SlotRef BASE_EMBEDDED {
SlotRef(Address addr, SlotRepresentation representation)
: addr_(addr), representation_(representation) { }
- explicit SlotRef(Object* literal)
- : literal_(literal), representation_(LITERAL) { }
+ SlotRef(Isolate* isolate, Object* literal)
+ : literal_(literal, isolate), representation_(LITERAL) { }
- Handle<Object> GetValue() {
+ Handle<Object> GetValue(Isolate* isolate) {
switch (representation_) {
case TAGGED:
- return Handle<Object>(Memory::Object_at(addr_));
+ return Handle<Object>(Memory::Object_at(addr_), isolate);
case INT32: {
int value = Memory::int32_at(addr_);
if (Smi::IsValid(value)) {
- return Handle<Object>(Smi::FromInt(value));
+ return Handle<Object>(Smi::FromInt(value), isolate);
} else {
- return Isolate::Current()->factory()->NewNumberFromInt(value);
+ return isolate->factory()->NewNumberFromInt(value);
}
}
case UINT32: {
uint32_t value = Memory::uint32_at(addr_);
if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Handle<Object>(Smi::FromInt(static_cast<int>(value)));
+ return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate);
} else {
- return Isolate::Current()->factory()->NewNumber(
- static_cast<double>(value));
+ return isolate->factory()->NewNumber(static_cast<double>(value));
}
}
case DOUBLE: {
- double value = Memory::double_at(addr_);
- return Isolate::Current()->factory()->NewNumber(value);
+ double value = read_double_value(addr_);
+ return isolate->factory()->NewNumber(value);
}
case LITERAL:
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 9f8b9a820..d7898ddcd 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -50,8 +50,8 @@ void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
pc - begin,
*pc);
} else {
- fprintf(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
- reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);
+ PrintF(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
+ reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);
}
}
}
@@ -71,7 +71,7 @@ class V8NameConverter: public disasm::NameConverter {
const char* V8NameConverter::NameOfAddress(byte* pc) const {
- const char* name = Isolate::Current()->builtins()->Lookup(pc);
+ const char* name = code_->GetIsolate()->builtins()->Lookup(pc);
if (name != NULL) {
OS::SNPrintF(v8_buffer_, "%s (%p)", name, pc);
return v8_buffer_.start();
@@ -101,24 +101,24 @@ static void DumpBuffer(FILE* f, StringBuilder* out) {
if (f == NULL) {
PrintF("%s\n", out->Finalize());
} else {
- fprintf(f, "%s\n", out->Finalize());
+ PrintF(f, "%s\n", out->Finalize());
}
out->Reset();
}
-
static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength;
static const int kRelocInfoPosition = 57;
-static int DecodeIt(FILE* f,
+static int DecodeIt(Isolate* isolate,
+ FILE* f,
const V8NameConverter& converter,
byte* begin,
byte* end) {
- NoHandleAllocation ha;
- AssertNoAllocation no_alloc;
- ExternalReferenceEncoder ref_encoder;
- Heap* heap = HEAP;
+ SealHandleScope shs(isolate);
+ DisallowHeapAllocation no_alloc;
+ ExternalReferenceEncoder ref_encoder(isolate);
+ Heap* heap = isolate->heap();
v8::internal::EmbeddedVector<char, 128> decode_buffer;
v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;
@@ -250,7 +250,7 @@ static int DecodeIt(FILE* f,
if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) {
out.AddFormatted(", argc = %d", code->arguments_count());
}
- } else if (kind == Code::STUB) {
+ } else if (kind == Code::STUB || kind == Code::HANDLER) {
// Reverse lookup required as the minor key cannot be retrieved
// from the code object.
Object* obj = heap->code_stubs()->SlowReverseLookup(code);
@@ -281,13 +281,29 @@ static int DecodeIt(FILE* f,
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
out.AddFormatted(" (id = %d)", static_cast<int>(relocinfo.data()));
}
- } else if (rmode == RelocInfo::RUNTIME_ENTRY &&
- Isolate::Current()->deoptimizer_data() != NULL) {
+ } else if (RelocInfo::IsRuntimeEntry(rmode) &&
+ isolate->deoptimizer_data() != NULL) {
// A runtime entry reloinfo might be a deoptimization bailout.
Address addr = relocinfo.target_address();
- int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
+ int id = Deoptimizer::GetDeoptimizationId(isolate,
+ addr,
+ Deoptimizer::EAGER);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
- out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
+ id = Deoptimizer::GetDeoptimizationId(isolate,
+ addr,
+ Deoptimizer::LAZY);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ id = Deoptimizer::GetDeoptimizationId(isolate,
+ addr,
+ Deoptimizer::SOFT);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
+ } else {
+ out.AddFormatted(" ;; soft deoptimization bailout %d", id);
+ }
+ } else {
+ out.AddFormatted(" ;; lazy deoptimization bailout %d", id);
+ }
} else {
out.AddFormatted(" ;; deoptimization bailout %d", id);
}
@@ -314,33 +330,38 @@ static int DecodeIt(FILE* f,
}
-int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
+int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) {
V8NameConverter defaultConverter(NULL);
- return DecodeIt(f, defaultConverter, begin, end);
+ return DecodeIt(isolate, f, defaultConverter, begin, end);
}
// Called by Code::CodePrint.
void Disassembler::Decode(FILE* f, Code* code) {
- int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
+ Isolate* isolate = code->GetIsolate();
+ int decode_size = code->is_crankshafted()
? static_cast<int>(code->safepoint_table_offset())
: code->instruction_size();
- // If there might be a stack check table, stop before reaching it.
+ // If there might be a back edge table, stop before reaching it.
if (code->kind() == Code::FUNCTION) {
decode_size =
- Min(decode_size, static_cast<int>(code->stack_check_table_offset()));
+ Min(decode_size, static_cast<int>(code->back_edge_table_offset()));
}
byte* begin = code->instruction_start();
byte* end = begin + decode_size;
V8NameConverter v8NameConverter(code);
- DecodeIt(f, v8NameConverter, begin, end);
+ DecodeIt(isolate, f, v8NameConverter, begin, end);
}
#else // ENABLE_DISASSEMBLER
void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
-int Disassembler::Decode(FILE* f, byte* begin, byte* end) { return 0; }
+int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) {
+ return 0;
+}
+
+
void Disassembler::Decode(FILE* f, Code* code) {}
#endif // ENABLE_DISASSEMBLER
diff --git a/deps/v8/src/disassembler.h b/deps/v8/src/disassembler.h
index 4a87dca67..878915003 100644
--- a/deps/v8/src/disassembler.h
+++ b/deps/v8/src/disassembler.h
@@ -41,7 +41,7 @@ class Disassembler : public AllStatic {
// Decode instructions in the the interval [begin, end) and print the
// code into f. Returns the number of bytes disassembled or 1 if no
// instruction could be decoded.
- static int Decode(FILE* f, byte* begin, byte* end);
+ static int Decode(Isolate* isolate, FILE* f, byte* begin, byte* end);
// Decode instructions in code.
static void Decode(FILE* f, Code* code);
diff --git a/deps/v8/src/dtoa.cc b/deps/v8/src/dtoa.cc
index 00233a882..bda67205c 100644
--- a/deps/v8/src/dtoa.cc
+++ b/deps/v8/src/dtoa.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <math.h>
+#include <cmath>
#include "../include/v8stdint.h"
#include "checks.h"
diff --git a/deps/v8/src/effects.h b/deps/v8/src/effects.h
new file mode 100644
index 000000000..afb8f9e54
--- /dev/null
+++ b/deps/v8/src/effects.h
@@ -0,0 +1,361 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EFFECTS_H_
+#define V8_EFFECTS_H_
+
+#include "v8.h"
+
+#include "types.h"
+
+namespace v8 {
+namespace internal {
+
+
+// A simple struct to represent (write) effects. A write is represented as a
+// modification of type bounds (e.g. of a variable).
+//
+// An effect can either be definite, if the write is known to have taken place,
+// or 'possible', if it was optional. The difference is relevant when composing
+// effects.
+//
+// There are two ways to compose effects: sequentially (they happen one after
+// the other) or alternatively (either one or the other happens). A definite
+// effect cancels out any previous effect upon sequencing. A possible effect
+// merges into a previous effect, i.e., type bounds are merged. Alternative
+// composition always merges bounds. It yields a possible effect if at least
+// one was only possible.
+struct Effect {
+ enum Modality { POSSIBLE, DEFINITE };
+
+ Modality modality;
+ Bounds bounds;
+
+ Effect() : modality(DEFINITE) {}
+ Effect(Bounds b, Modality m = DEFINITE) : modality(m), bounds(b) {}
+
+ // The unknown effect.
+ static Effect Unknown(Isolate* isolate) {
+ return Effect(Bounds::Unbounded(isolate), POSSIBLE);
+ }
+
+ static Effect Forget(Isolate* isolate) {
+ return Effect(Bounds::Unbounded(isolate), DEFINITE);
+ }
+
+ // Sequential composition, as in 'e1; e2'.
+ static Effect Seq(Effect e1, Effect e2, Isolate* isolate) {
+ if (e2.modality == DEFINITE) return e2;
+ return Effect(Bounds::Either(e1.bounds, e2.bounds, isolate), e1.modality);
+ }
+
+ // Alternative composition, as in 'cond ? e1 : e2'.
+ static Effect Alt(Effect e1, Effect e2, Isolate* isolate) {
+ return Effect(
+ Bounds::Either(e1.bounds, e2.bounds, isolate),
+ e1.modality == POSSIBLE ? POSSIBLE : e2.modality);
+ }
+};
+
+
+// Classes encapsulating sets of effects on variables.
+//
+// Effects maps variables to effects and supports sequential and alternative
+// composition.
+//
+// NestedEffects is an incremental representation that supports persistence
+// through functional extension. It represents the map as an adjoin of a list
+// of maps, whose tail can be shared.
+//
+// Both classes provide similar interfaces, implemented in parts through the
+// EffectsMixin below (using sandwich style, to work around the style guide's
+// MI restriction).
+//
+// We also (ab)use Effects/NestedEffects as a representation for abstract
+// store typings. In that case, only definite effects are of interest.
+
+template<class Var, class Base, class Effects>
+class EffectsMixin: public Base {
+ public:
+ explicit EffectsMixin(Zone* zone) : Base(zone) {}
+
+ Effect Lookup(Var var) {
+ Locator locator;
+ return this->Find(var, &locator)
+ ? locator.value() : Effect::Unknown(Base::isolate());
+ }
+
+ Bounds LookupBounds(Var var) {
+ Effect effect = Lookup(var);
+ return effect.modality == Effect::DEFINITE
+ ? effect.bounds : Bounds::Unbounded(Base::isolate());
+ }
+
+ // Sequential composition.
+ void Seq(Var var, Effect effect) {
+ Locator locator;
+ if (!this->Insert(var, &locator)) {
+ effect = Effect::Seq(locator.value(), effect, Base::isolate());
+ }
+ locator.set_value(effect);
+ }
+
+ void Seq(Effects that) {
+ SeqMerger<EffectsMixin> merge = { *this };
+ that.ForEach(&merge);
+ }
+
+ // Alternative composition.
+ void Alt(Var var, Effect effect) {
+ Locator locator;
+ if (!this->Insert(var, &locator)) {
+ effect = Effect::Alt(locator.value(), effect, Base::isolate());
+ }
+ locator.set_value(effect);
+ }
+
+ void Alt(Effects that) {
+ AltWeakener<EffectsMixin> weaken = { *this, that };
+ this->ForEach(&weaken);
+ AltMerger<EffectsMixin> merge = { *this };
+ that.ForEach(&merge);
+ }
+
+ // Invalidation.
+ void Forget() {
+ Overrider override = {
+ Effect::Forget(Base::isolate()), Effects(Base::zone()) };
+ this->ForEach(&override);
+ Seq(override.effects);
+ }
+
+ protected:
+ typedef typename Base::Locator Locator;
+
+ template<class Self>
+ struct SeqMerger {
+ void Call(Var var, Effect effect) { self.Seq(var, effect); }
+ Self self;
+ };
+
+ template<class Self>
+ struct AltMerger {
+ void Call(Var var, Effect effect) { self.Alt(var, effect); }
+ Self self;
+ };
+
+ template<class Self>
+ struct AltWeakener {
+ void Call(Var var, Effect effect) {
+ if (effect.modality == Effect::DEFINITE && !other.Contains(var)) {
+ effect.modality = Effect::POSSIBLE;
+ Locator locator;
+ self.Insert(var, &locator);
+ locator.set_value(effect);
+ }
+ }
+ Self self;
+ Effects other;
+ };
+
+ struct Overrider {
+ void Call(Var var, Effect effect) { effects.Seq(var, new_effect); }
+ Effect new_effect;
+ Effects effects;
+ };
+};
+
+
+template<class Var, Var kNoVar> class Effects;
+template<class Var, Var kNoVar> class NestedEffectsBase;
+
+template<class Var, Var kNoVar>
+class EffectsBase {
+ public:
+ explicit EffectsBase(Zone* zone) : map_(new(zone) Mapping(zone)) {}
+
+ bool IsEmpty() { return map_->is_empty(); }
+
+ protected:
+ friend class NestedEffectsBase<Var, kNoVar>;
+ friend class
+ EffectsMixin<Var, NestedEffectsBase<Var, kNoVar>, Effects<Var, kNoVar> >;
+
+ Zone* zone() { return map_->allocator().zone(); }
+ Isolate* isolate() { return zone()->isolate(); }
+
+ struct SplayTreeConfig {
+ typedef Var Key;
+ typedef Effect Value;
+ static const Var kNoKey = kNoVar;
+ static Effect NoValue() { return Effect(); }
+ static int Compare(int x, int y) { return y - x; }
+ };
+ typedef ZoneSplayTree<SplayTreeConfig> Mapping;
+ typedef typename Mapping::Locator Locator;
+
+ bool Contains(Var var) {
+ ASSERT(var != kNoVar);
+ return map_->Contains(var);
+ }
+ bool Find(Var var, Locator* locator) {
+ ASSERT(var != kNoVar);
+ return map_->Find(var, locator);
+ }
+ bool Insert(Var var, Locator* locator) {
+ ASSERT(var != kNoVar);
+ return map_->Insert(var, locator);
+ }
+
+ template<class Callback>
+ void ForEach(Callback* callback) {
+ return map_->ForEach(callback);
+ }
+
+ private:
+ Mapping* map_;
+};
+
+template<class Var, Var kNoVar>
+const Var EffectsBase<Var, kNoVar>::SplayTreeConfig::kNoKey;
+
+template<class Var, Var kNoVar>
+class Effects: public
+ EffectsMixin<Var, EffectsBase<Var, kNoVar>, Effects<Var, kNoVar> > {
+ public:
+ explicit Effects(Zone* zone)
+ : EffectsMixin<Var, EffectsBase<Var, kNoVar>, Effects<Var, kNoVar> >(zone)
+ {}
+};
+
+
+template<class Var, Var kNoVar>
+class NestedEffectsBase {
+ public:
+ explicit NestedEffectsBase(Zone* zone) : node_(new(zone) Node(zone)) {}
+
+ template<class Callback>
+ void ForEach(Callback* callback) {
+ if (node_->previous) NestedEffectsBase(node_->previous).ForEach(callback);
+ node_->effects.ForEach(callback);
+ }
+
+ Effects<Var, kNoVar> Top() { return node_->effects; }
+
+ bool IsEmpty() {
+ for (Node* node = node_; node != NULL; node = node->previous) {
+ if (!node->effects.IsEmpty()) return false;
+ }
+ return true;
+ }
+
+ protected:
+ typedef typename EffectsBase<Var, kNoVar>::Locator Locator;
+
+ Zone* zone() { return node_->zone; }
+ Isolate* isolate() { return zone()->isolate(); }
+
+ void push() { node_ = new(node_->zone) Node(node_->zone, node_); }
+ void pop() { node_ = node_->previous; }
+ bool is_empty() { return node_ == NULL; }
+
+ bool Contains(Var var) {
+ ASSERT(var != kNoVar);
+ for (Node* node = node_; node != NULL; node = node->previous) {
+ if (node->effects.Contains(var)) return true;
+ }
+ return false;
+ }
+
+ bool Find(Var var, Locator* locator) {
+ ASSERT(var != kNoVar);
+ for (Node* node = node_; node != NULL; node = node->previous) {
+ if (node->effects.Find(var, locator)) return true;
+ }
+ return false;
+ }
+
+ bool Insert(Var var, Locator* locator);
+
+ private:
+ struct Node: ZoneObject {
+ Zone* zone;
+ Effects<Var, kNoVar> effects;
+ Node* previous;
+ explicit Node(Zone* zone, Node* previous = NULL)
+ : zone(zone), effects(zone), previous(previous) {}
+ };
+
+ explicit NestedEffectsBase(Node* node) : node_(node) {}
+
+ Node* node_;
+};
+
+
+template<class Var, Var kNoVar>
+bool NestedEffectsBase<Var, kNoVar>::Insert(Var var, Locator* locator) {
+ ASSERT(var != kNoVar);
+ if (!node_->effects.Insert(var, locator)) return false;
+ Locator shadowed;
+ for (Node* node = node_->previous; node != NULL; node = node->previous) {
+ if (node->effects.Find(var, &shadowed)) {
+ // Initialize with shadowed entry.
+ locator->set_value(shadowed.value());
+ return false;
+ }
+ }
+ return true;
+}
+
+
+template<class Var, Var kNoVar>
+class NestedEffects: public
+ EffectsMixin<Var, NestedEffectsBase<Var, kNoVar>, Effects<Var, kNoVar> > {
+ public:
+ explicit NestedEffects(Zone* zone) :
+ EffectsMixin<Var, NestedEffectsBase<Var, kNoVar>, Effects<Var, kNoVar> >(
+ zone) {}
+
+ // Create an extension of the current effect set. The current set should not
+ // be modified while the extension is in use.
+ NestedEffects Push() {
+ NestedEffects result = *this;
+ result.push();
+ return result;
+ }
+
+ NestedEffects Pop() {
+ NestedEffects result = *this;
+ result.pop();
+ ASSERT(!this->is_empty());
+ return result;
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EFFECTS_H_
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index c93a602c0..8129051a6 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -65,9 +65,14 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
}
-void PrintElementsKind(FILE* out, ElementsKind kind) {
+const char* ElementsKindToString(ElementsKind kind) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
- PrintF(out, "%s", accessor->name());
+ return accessor->name();
+}
+
+
+void PrintElementsKind(FILE* out, ElementsKind kind) {
+ PrintF(out, "%s", ElementsKindToString(kind));
}
@@ -108,6 +113,7 @@ ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
return fast_elements_kind_sequence.Get()[sequence_number];
}
+
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
for (int i = 0; i < kFastElementsKindCount; ++i) {
if (fast_elements_kind_sequence.Get()[i] == elements_kind) {
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index c5d9df86b..69b405781 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -78,7 +78,7 @@ const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
FIRST_FAST_ELEMENTS_KIND + 1;
int ElementsKindToShiftSize(ElementsKind elements_kind);
-
+const char* ElementsKindToString(ElementsKind kind);
void PrintElementsKind(FILE* out, ElementsKind kind);
ElementsKind GetInitialFastElementsKind();
@@ -111,6 +111,18 @@ inline bool IsFastDoubleElementsKind(ElementsKind kind) {
}
+inline bool IsExternalFloatOrDoubleElementsKind(ElementsKind kind) {
+ return kind == EXTERNAL_DOUBLE_ELEMENTS ||
+ kind == EXTERNAL_FLOAT_ELEMENTS;
+}
+
+
+inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
+ return IsFastDoubleElementsKind(kind) ||
+ IsExternalFloatOrDoubleElementsKind(kind);
+}
+
+
inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS ||
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 6afbcc0ee..0b745c450 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -27,10 +27,11 @@
#include "v8.h"
+#include "arguments.h"
#include "objects.h"
#include "elements.h"
#include "utils.h"
-
+#include "v8conversions.h"
// Each concrete ElementsAccessor can handle exactly one ElementsKind,
// several abstract ElementsAccessor classes are used to allow sharing
@@ -146,40 +147,44 @@ static Failure* ThrowArrayLengthRangeError(Heap* heap) {
}
-void CopyObjectToObjectElements(FixedArray* from,
- ElementsKind from_kind,
- uint32_t from_start,
- FixedArray* to,
- ElementsKind to_kind,
- uint32_t to_start,
- int raw_copy_size) {
- ASSERT(to->map() != HEAP->fixed_cow_array_map());
+static void CopyObjectToObjectElements(FixedArrayBase* from_base,
+ ElementsKind from_kind,
+ uint32_t from_start,
+ FixedArrayBase* to_base,
+ ElementsKind to_kind,
+ uint32_t to_start,
+ int raw_copy_size) {
+ ASSERT(to_base->map() !=
+ from_base->GetIsolate()->heap()->fixed_cow_array_map());
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from->length() - from_start,
- to->length() - to_start);
-#ifdef DEBUG
- // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
- // marked with the hole.
+ copy_size = Min(from_base->length() - from_start,
+ to_base->length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to->length(); ++i) {
- ASSERT(to->get(i)->IsTheHole());
+ int start = to_start + copy_size;
+ int length = to_base->length() - start;
+ if (length > 0) {
+ Heap* heap = from_base->GetHeap();
+ MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ heap->the_hole_value(), length);
}
}
-#endif
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
- (copy_size + static_cast<int>(from_start)) <= from->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
+ FixedArray* from = FixedArray::cast(from_base);
+ FixedArray* to = FixedArray::cast(to_base);
ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
Address to_address = to->address() + FixedArray::kHeaderSize;
Address from_address = from->address() + FixedArray::kHeaderSize;
CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
reinterpret_cast<Object**>(from_address) + from_start,
- copy_size);
+ static_cast<size_t>(copy_size));
if (IsFastObjectElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Heap* heap = from->GetHeap();
@@ -193,31 +198,34 @@ void CopyObjectToObjectElements(FixedArray* from,
}
-static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
+static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
uint32_t from_start,
- FixedArray* to,
+ FixedArrayBase* to_base,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
+ SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
Heap* heap = from->GetHeap();
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
-#ifdef DEBUG
- // Fast object arrays cannot be uninitialized. Ensure they are already
- // marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to->length(); ++i) {
- ASSERT(to->get(i)->IsTheHole());
+ int start = to_start + copy_size;
+ int length = to_base->length() - start;
+ if (length > 0) {
+ Heap* heap = from->GetHeap();
+ MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ heap->the_hole_value(), length);
}
}
-#endif
}
- ASSERT(to != from);
+ ASSERT(to_base != from_base);
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
if (copy_size == 0) return;
+ FixedArray* to = FixedArray::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@@ -244,9 +252,9 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
- FixedDoubleArray* from,
+ FixedArrayBase* from_base,
uint32_t from_start,
- FixedArray* to,
+ FixedArrayBase* to_base,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
@@ -255,21 +263,26 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from->length() - from_start,
- to->length() - to_start);
-#ifdef DEBUG
- // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
- // marked with the hole.
+ copy_size = Min(from_base->length() - from_start,
+ to_base->length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to->length(); ++i) {
- ASSERT(to->get(i)->IsTheHole());
+ // Also initialize the area that will be copied over since HeapNumber
+ // allocation below can cause an incremental marking step, requiring all
+ // existing heap objects to be propertly initialized.
+ int start = to_start;
+ int length = to_base->length() - start;
+ if (length > 0) {
+ Heap* heap = from_base->GetHeap();
+ MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ heap->the_hole_value(), length);
}
}
-#endif
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
- (copy_size + static_cast<int>(from_start)) <= from->length());
- if (copy_size == 0) return from;
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base->length());
+ if (copy_size == 0) return from_base;
+ FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
+ FixedArray* to = FixedArray::cast(to_base);
for (int i = 0; i < copy_size; ++i) {
if (IsFastSmiElementsKind(to_kind)) {
UNIMPLEMENTED();
@@ -298,26 +311,28 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
}
-static void CopyDoubleToDoubleElements(FixedDoubleArray* from,
+static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
uint32_t from_start,
- FixedDoubleArray* to,
+ FixedArrayBase* to_base,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from->length() - from_start,
- to->length() - to_start);
+ copy_size = Min(from_base->length() - from_start,
+ to_base->length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to->length(); ++i) {
- to->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to_base->length(); ++i) {
+ FixedDoubleArray::cast(to_base)->set_the_hole(i);
}
}
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
- (copy_size + static_cast<int>(from_start)) <= from->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
+ FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
+ FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
to_address += kDoubleSize * to_start;
@@ -325,29 +340,31 @@ static void CopyDoubleToDoubleElements(FixedDoubleArray* from,
int words_per_double = (kDoubleSize / kPointerSize);
CopyWords(reinterpret_cast<Object**>(to_address),
reinterpret_cast<Object**>(from_address),
- words_per_double * copy_size);
+ static_cast<size_t>(words_per_double * copy_size));
}
-static void CopySmiToDoubleElements(FixedArray* from,
+static void CopySmiToDoubleElements(FixedArrayBase* from_base,
uint32_t from_start,
- FixedDoubleArray* to,
+ FixedArrayBase* to_base,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from->length() - from_start;
+ copy_size = from_base->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to->length(); ++i) {
- to->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to_base->length(); ++i) {
+ FixedDoubleArray::cast(to_base)->set_the_hole(i);
}
}
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
- (copy_size + static_cast<int>(from_start)) <= from->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
+ FixedArray* from = FixedArray::cast(from_base);
+ FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
Object* the_hole = from->GetHeap()->the_hole_value();
for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
from_start < from_end; from_start++, to_start++) {
@@ -361,9 +378,9 @@ static void CopySmiToDoubleElements(FixedArray* from,
}
-static void CopyPackedSmiToDoubleElements(FixedArray* from,
+static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
uint32_t from_start,
- FixedDoubleArray* to,
+ FixedArrayBase* to_base,
uint32_t to_start,
int packed_size,
int raw_copy_size) {
@@ -372,52 +389,55 @@ static void CopyPackedSmiToDoubleElements(FixedArray* from,
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from->length() - from_start;
+ copy_size = packed_size - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- to_end = to->length();
+ to_end = to_base->length();
+ for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
+ FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ }
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
}
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
}
- ASSERT(static_cast<int>(to_end) <= to->length());
+ ASSERT(static_cast<int>(to_end) <= to_base->length());
ASSERT(packed_size >= 0 && packed_size <= copy_size);
- ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
- (copy_size + static_cast<int>(from_start)) <= from->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
+ FixedArray* from = FixedArray::cast(from_base);
+ FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
from_start < from_end; from_start++, to_start++) {
Object* smi = from->get(from_start);
ASSERT(!smi->IsTheHole());
to->set(to_start, Smi::cast(smi)->value());
}
-
- while (to_start < to_end) {
- to->set_the_hole(to_start++);
- }
}
-static void CopyObjectToDoubleElements(FixedArray* from,
+static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
uint32_t from_start,
- FixedDoubleArray* to,
+ FixedArrayBase* to_base,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from->length() - from_start;
+ copy_size = from_base->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to->length(); ++i) {
- to->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to_base->length(); ++i) {
+ FixedDoubleArray::cast(to_base)->set_the_hole(i);
}
}
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
- (copy_size + static_cast<int>(from_start)) <= from->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
+ FixedArray* from = FixedArray::cast(from_base);
+ FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
Object* the_hole = from->GetHeap()->the_hole_value();
for (uint32_t from_end = from_start + copy_size;
from_start < from_end; from_start++, to_start++) {
@@ -431,23 +451,25 @@ static void CopyObjectToDoubleElements(FixedArray* from,
}
-static void CopyDictionaryToDoubleElements(SeededNumberDictionary* from,
+static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
uint32_t from_start,
- FixedDoubleArray* to,
+ FixedArrayBase* to_base,
uint32_t to_start,
int raw_copy_size) {
+ SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
if (copy_size < 0) {
ASSERT(copy_size == ElementsAccessor::kCopyToEnd ||
copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to->length(); ++i) {
- to->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to_base->length(); ++i) {
+ FixedDoubleArray::cast(to_base)->set_the_hole(i);
}
}
}
if (copy_size == 0) return;
+ FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@@ -463,6 +485,65 @@ static void CopyDictionaryToDoubleElements(SeededNumberDictionary* from,
}
+static void TraceTopFrame(Isolate* isolate) {
+ StackFrameIterator it(isolate);
+ if (it.done()) {
+ PrintF("unknown location (no JavaScript frames present)");
+ return;
+ }
+ StackFrame* raw_frame = it.frame();
+ if (raw_frame->is_internal()) {
+ Code* apply_builtin = isolate->builtins()->builtin(
+ Builtins::kFunctionApply);
+ if (raw_frame->unchecked_code() == apply_builtin) {
+ PrintF("apply from ");
+ it.Advance();
+ raw_frame = it.frame();
+ }
+ }
+ JavaScriptFrame::PrintTop(isolate, stdout, false, true);
+}
+
+
+void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key,
+ bool allow_appending) {
+ Object* raw_length = NULL;
+ const char* elements_type = "array";
+ if (obj->IsJSArray()) {
+ JSArray* array = JSArray::cast(obj);
+ raw_length = array->length();
+ } else {
+ raw_length = Smi::FromInt(obj->elements()->length());
+ elements_type = "object";
+ }
+
+ if (raw_length->IsNumber()) {
+ double n = raw_length->Number();
+ if (FastI2D(FastD2UI(n)) == n) {
+ int32_t int32_length = DoubleToInt32(n);
+ uint32_t compare_length = static_cast<uint32_t>(int32_length);
+ if (allow_appending) compare_length++;
+ if (key >= compare_length) {
+ PrintF("[OOB %s %s (%s length = %d, element accessed = %d) in ",
+ elements_type, op, elements_type,
+ static_cast<int>(int32_length),
+ static_cast<int>(key));
+ TraceTopFrame(obj->GetIsolate());
+ PrintF("]\n");
+ }
+ } else {
+ PrintF("[%s elements length not integer value in ", elements_type);
+ TraceTopFrame(obj->GetIsolate());
+ PrintF("]\n");
+ }
+ } else {
+ PrintF("[%s elements length not a number in ", elements_type);
+ TraceTopFrame(obj->GetIsolate());
+ PrintF("]\n");
+ }
+}
+
+
// Base class for element handler implementations. Contains the
// the common logic for objects with different ElementsKinds.
// Subclasses must specialize method for which the element
@@ -500,14 +581,8 @@ class ElementsAccessorBase : public ElementsAccessor {
// When objects are first allocated, its elements are Failures.
if (fixed_array_base->IsFailure()) return;
if (!fixed_array_base->IsHeapObject()) return;
- Map* map = fixed_array_base->map();
// Arrays that have been shifted in place can't be verified.
- Heap* heap = holder->GetHeap();
- if (map == heap->raw_unchecked_one_pointer_filler_map() ||
- map == heap->raw_unchecked_two_pointer_filler_map() ||
- map == heap->free_space_map()) {
- return;
- }
+ if (fixed_array_base->IsFiller()) return;
int length = 0;
if (holder->IsJSArray()) {
Object* length_obj = JSArray::cast(holder)->length();
@@ -527,10 +602,9 @@ class ElementsAccessorBase : public ElementsAccessor {
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
- BackingStore* backing_store) {
- MaybeObject* element =
- ElementsAccessorSubclass::GetImpl(receiver, holder, key, backing_store);
- return !element->IsTheHole();
+ FixedArrayBase* backing_store) {
+ return ElementsAccessorSubclass::GetAttributesImpl(
+ receiver, holder, key, backing_store) != ABSENT;
}
virtual bool HasElement(Object* receiver,
@@ -541,7 +615,7 @@ class ElementsAccessorBase : public ElementsAccessor {
backing_store = holder->elements();
}
return ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, BackingStore::cast(backing_store));
+ receiver, holder, key, backing_store);
}
MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver,
@@ -551,29 +625,107 @@ class ElementsAccessorBase : public ElementsAccessor {
if (backing_store == NULL) {
backing_store = holder->elements();
}
+
+ if (!IsExternalArrayElementsKind(ElementsTraits::Kind) &&
+ FLAG_trace_js_array_abuse) {
+ CheckArrayAbuse(holder, "elements read", key);
+ }
+
+ if (IsExternalArrayElementsKind(ElementsTraits::Kind) &&
+ FLAG_trace_external_array_abuse) {
+ CheckArrayAbuse(holder, "external elements read", key);
+ }
+
return ElementsAccessorSubclass::GetImpl(
- receiver, holder, key, BackingStore::cast(backing_store));
+ receiver, holder, key, backing_store);
}
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
- BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store))
- ? backing_store->get(key)
+ ? BackingStore::cast(backing_store)->get(key)
: backing_store->GetHeap()->the_hole_value();
}
+ MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ if (backing_store == NULL) {
+ backing_store = holder->elements();
+ }
+ return ElementsAccessorSubclass::GetAttributesImpl(
+ receiver, holder, key, backing_store);
+ }
+
+ MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
+ return ABSENT;
+ }
+ return BackingStore::cast(backing_store)->is_the_hole(key) ? ABSENT : NONE;
+ }
+
+ MUST_USE_RESULT virtual PropertyType GetType(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ if (backing_store == NULL) {
+ backing_store = holder->elements();
+ }
+ return ElementsAccessorSubclass::GetTypeImpl(
+ receiver, holder, key, backing_store);
+ }
+
+ MUST_USE_RESULT static PropertyType GetTypeImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
+ return NONEXISTENT;
+ }
+ return BackingStore::cast(backing_store)->is_the_hole(key)
+ ? NONEXISTENT : FIELD;
+ }
+
+ MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ if (backing_store == NULL) {
+ backing_store = holder->elements();
+ }
+ return ElementsAccessorSubclass::GetAccessorPairImpl(
+ receiver, holder, key, backing_store);
+ }
+
+ MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ return NULL;
+ }
+
MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
Object* length) {
return ElementsAccessorSubclass::SetLengthImpl(
- array, length, BackingStore::cast(array->elements()));
+ array, length, array->elements());
}
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
- BackingStore* backing_store);
+ FixedArrayBase* backing_store);
MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(
JSArray* array,
@@ -600,7 +752,7 @@ class ElementsAccessorBase : public ElementsAccessor {
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
- ElementsKind to_kind,
+ ElementsKind from_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
@@ -610,8 +762,8 @@ class ElementsAccessorBase : public ElementsAccessor {
MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder,
uint32_t from_start,
+ ElementsKind from_kind,
FixedArrayBase* to,
- ElementsKind to_kind,
uint32_t to_start,
int copy_size,
FixedArrayBase* from) {
@@ -621,8 +773,7 @@ class ElementsAccessorBase : public ElementsAccessor {
}
if (from_holder) {
- ElementsKind elements_kind = from_holder->GetElementsKind();
- bool is_packed = IsFastPackedElementsKind(elements_kind) &&
+ bool is_packed = IsFastPackedElementsKind(from_kind) &&
from_holder->IsJSArray();
if (is_packed) {
packed_size = Smi::cast(JSArray::cast(from_holder)->length())->value();
@@ -631,11 +782,8 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
}
- if (from->length() == 0) {
- return from;
- }
return ElementsAccessorSubclass::CopyElementsImpl(
- from, from_start, to, to_kind, to_start, packed_size, copy_size);
+ from, from_start, to, from_kind, to_start, packed_size, copy_size);
}
MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
@@ -644,7 +792,7 @@ class ElementsAccessorBase : public ElementsAccessor {
FixedArray* to,
FixedArrayBase* from) {
int len0 = to->length();
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < len0; i++) {
ASSERT(!to->get(i)->IsTheHole());
@@ -654,25 +802,22 @@ class ElementsAccessorBase : public ElementsAccessor {
if (from == NULL) {
from = holder->elements();
}
- BackingStore* backing_store = BackingStore::cast(from);
- uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(backing_store);
// Optimize if 'other' is empty.
// We cannot optimize if 'this' is empty, as other may have holes.
+ uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(from);
if (len1 == 0) return to;
// Compute how many elements are not in other.
uint32_t extra = 0;
for (uint32_t y = 0; y < len1; y++) {
- uint32_t key =
- ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
+ uint32_t key = ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
if (ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, backing_store)) {
+ receiver, holder, key, from)) {
MaybeObject* maybe_value =
- ElementsAccessorSubclass::GetImpl(receiver, holder,
- key, backing_store);
+ ElementsAccessorSubclass::GetImpl(receiver, holder, key, from);
Object* value;
- if (!maybe_value->ToObject(&value)) return maybe_value;
+ if (!maybe_value->To(&value)) return maybe_value;
ASSERT(!value->IsTheHole());
if (!HasKey(to, value)) {
extra++;
@@ -684,13 +829,12 @@ class ElementsAccessorBase : public ElementsAccessor {
// Allocate the result
FixedArray* result;
- MaybeObject* maybe_obj =
- backing_store->GetHeap()->AllocateFixedArray(len0 + extra);
- if (!maybe_obj->To<FixedArray>(&result)) return maybe_obj;
+ MaybeObject* maybe_obj = from->GetHeap()->AllocateFixedArray(len0 + extra);
+ if (!maybe_obj->To(&result)) return maybe_obj;
// Fill in the content
{
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len0; i++) {
Object* e = to->get(i);
@@ -702,14 +846,13 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t index = 0;
for (uint32_t y = 0; y < len1; y++) {
uint32_t key =
- ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
+ ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
if (ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, backing_store)) {
+ receiver, holder, key, from)) {
MaybeObject* maybe_value =
- ElementsAccessorSubclass::GetImpl(receiver, holder,
- key, backing_store);
+ ElementsAccessorSubclass::GetImpl(receiver, holder, key, from);
Object* value;
- if (!maybe_value->ToObject(&value)) return maybe_value;
+ if (!maybe_value->To(&value)) return maybe_value;
if (!value->IsTheHole() && !HasKey(to, value)) {
result->set(len0 + index, value);
index++;
@@ -721,24 +864,22 @@ class ElementsAccessorBase : public ElementsAccessor {
}
protected:
- static uint32_t GetCapacityImpl(BackingStore* backing_store) {
+ static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
return backing_store->length();
}
virtual uint32_t GetCapacity(FixedArrayBase* backing_store) {
- return ElementsAccessorSubclass::GetCapacityImpl(
- BackingStore::cast(backing_store));
+ return ElementsAccessorSubclass::GetCapacityImpl(backing_store);
}
- static uint32_t GetKeyForIndexImpl(BackingStore* backing_store,
+ static uint32_t GetKeyForIndexImpl(FixedArrayBase* backing_store,
uint32_t index) {
return index;
}
virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
uint32_t index) {
- return ElementsAccessorSubclass::GetKeyForIndexImpl(
- BackingStore::cast(backing_store), index);
+ return ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
}
private:
@@ -764,17 +905,17 @@ class FastElementsAccessor
// Adjusts the length of the fast backing store or returns the new length or
// undefined in case conversion to a slow backing store should be performed.
- static MaybeObject* SetLengthWithoutNormalize(BackingStore* backing_store,
+ static MaybeObject* SetLengthWithoutNormalize(FixedArrayBase* backing_store,
JSArray* array,
Object* length_object,
uint32_t length) {
uint32_t old_capacity = backing_store->length();
Object* old_length = array->length();
- bool same_size = old_length->IsSmi() &&
- static_cast<uint32_t>(Smi::cast(old_length)->value()) == length;
+ bool same_or_smaller_size = old_length->IsSmi() &&
+ static_cast<uint32_t>(Smi::cast(old_length)->value()) >= length;
ElementsKind kind = array->GetElementsKind();
- if (!same_size && IsFastElementsKind(kind) &&
+ if (!same_or_smaller_size && IsFastElementsKind(kind) &&
!IsFastHoleyElementsKind(kind)) {
kind = GetHoleyElementsKind(kind);
MaybeObject* maybe_obj = array->TransitionElementsKind(kind);
@@ -802,7 +943,7 @@ class FastElementsAccessor
// Otherwise, fill the unused tail with holes.
int old_length = FastD2IChecked(array->length()->Number());
for (int i = length; i < old_length; i++) {
- backing_store->set_the_hole(i);
+ BackingStore::cast(backing_store)->set_the_hole(i);
}
}
return length_object;
@@ -829,32 +970,38 @@ class FastElementsAccessor
ASSERT(obj->HasFastSmiOrObjectElements() ||
obj->HasFastDoubleElements() ||
obj->HasFastArgumentsElements());
- typename KindTraits::BackingStore* backing_store =
- KindTraits::BackingStore::cast(obj->elements());
Heap* heap = obj->GetHeap();
- if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
- backing_store =
- KindTraits::BackingStore::cast(
- FixedArray::cast(backing_store)->get(1));
- } else {
- ElementsKind kind = KindTraits::Kind;
- if (IsFastPackedElementsKind(kind)) {
- MaybeObject* transitioned =
- obj->TransitionElementsKind(GetHoleyElementsKind(kind));
- if (transitioned->IsFailure()) return transitioned;
- }
- if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
- Object* writable;
- MaybeObject* maybe = obj->EnsureWritableFastElements();
- if (!maybe->ToObject(&writable)) return maybe;
- backing_store = KindTraits::BackingStore::cast(writable);
- }
+ Object* elements = obj->elements();
+ if (elements == heap->empty_fixed_array()) {
+ return heap->true_value();
+ }
+ typename KindTraits::BackingStore* backing_store =
+ KindTraits::BackingStore::cast(elements);
+ bool is_non_strict_arguments_elements_map =
+ backing_store->map() == heap->non_strict_arguments_elements_map();
+ if (is_non_strict_arguments_elements_map) {
+ backing_store = KindTraits::BackingStore::cast(
+ FixedArray::cast(backing_store)->get(1));
}
uint32_t length = static_cast<uint32_t>(
obj->IsJSArray()
? Smi::cast(JSArray::cast(obj)->length())->value()
: backing_store->length());
if (key < length) {
+ if (!is_non_strict_arguments_elements_map) {
+ ElementsKind kind = KindTraits::Kind;
+ if (IsFastPackedElementsKind(kind)) {
+ MaybeObject* transitioned =
+ obj->TransitionElementsKind(GetHoleyElementsKind(kind));
+ if (transitioned->IsFailure()) return transitioned;
+ }
+ if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
+ Object* writable;
+ MaybeObject* maybe = obj->EnsureWritableFastElements();
+ if (!maybe->ToObject(&writable)) return maybe;
+ backing_store = KindTraits::BackingStore::cast(writable);
+ }
+ }
backing_store->set_the_hole(key);
// If an old space backing store is larger than a certain size and
// has too few used values, normalize it.
@@ -890,11 +1037,11 @@ class FastElementsAccessor
Object* receiver,
JSObject* holder,
uint32_t key,
- typename KindTraits::BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
if (key >= static_cast<uint32_t>(backing_store->length())) {
return false;
}
- return !backing_store->is_the_hole(key);
+ return !BackingStore::cast(backing_store)->is_the_hole(key);
}
static void ValidateContents(JSObject* holder, int length) {
@@ -921,6 +1068,41 @@ class FastElementsAccessor
};
+static inline ElementsKind ElementsKindForArray(FixedArrayBase* array) {
+ switch (array->map()->instance_type()) {
+ case FIXED_ARRAY_TYPE:
+ if (array->IsDictionary()) {
+ return DICTIONARY_ELEMENTS;
+ } else {
+ return FAST_HOLEY_ELEMENTS;
+ }
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ return FAST_HOLEY_DOUBLE_ELEMENTS;
+ case EXTERNAL_BYTE_ARRAY_TYPE:
+ return EXTERNAL_BYTE_ELEMENTS;
+ case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+ return EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
+ case EXTERNAL_SHORT_ARRAY_TYPE:
+ return EXTERNAL_SHORT_ELEMENTS;
+ case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+ return EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
+ case EXTERNAL_INT_ARRAY_TYPE:
+ return EXTERNAL_INT_ELEMENTS;
+ case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+ return EXTERNAL_UNSIGNED_INT_ELEMENTS;
+ case EXTERNAL_FLOAT_ARRAY_TYPE:
+ return EXTERNAL_FLOAT_ELEMENTS;
+ case EXTERNAL_DOUBLE_ARRAY_TYPE:
+ return EXTERNAL_DOUBLE_ELEMENTS;
+ case EXTERNAL_PIXEL_ARRAY_TYPE:
+ return EXTERNAL_PIXEL_ELEMENTS;
+ default:
+ UNREACHABLE();
+ }
+ return FAST_HOLEY_ELEMENTS;
+}
+
+
template<typename FastElementsAccessorSubclass,
typename KindTraits>
class FastSmiOrObjectElementsAccessor
@@ -936,36 +1118,49 @@ class FastSmiOrObjectElementsAccessor
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
- ElementsKind to_kind,
+ ElementsKind from_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
- if (IsFastSmiOrObjectElementsKind(to_kind)) {
- CopyObjectToObjectElements(
- FixedArray::cast(from), KindTraits::Kind, from_start,
- FixedArray::cast(to), to_kind, to_start, copy_size);
- } else if (IsFastDoubleElementsKind(to_kind)) {
- if (IsFastSmiElementsKind(KindTraits::Kind)) {
- if (IsFastPackedElementsKind(KindTraits::Kind) &&
- packed_size != kPackedSizeNotKnown) {
- CopyPackedSmiToDoubleElements(
- FixedArray::cast(from), from_start,
- FixedDoubleArray::cast(to), to_start,
- packed_size, copy_size);
- } else {
- CopySmiToDoubleElements(
- FixedArray::cast(from), from_start,
- FixedDoubleArray::cast(to), to_start, copy_size);
- }
- } else {
- CopyObjectToDoubleElements(
- FixedArray::cast(from), from_start,
- FixedDoubleArray::cast(to), to_start, copy_size);
+ ElementsKind to_kind = KindTraits::Kind;
+ switch (from_kind) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ CopyObjectToObjectElements(
+ from, from_kind, from_start, to, to_kind, to_start, copy_size);
+ return to->GetHeap()->undefined_value();
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ return CopyDoubleToObjectElements(
+ from, from_start, to, to_kind, to_start, copy_size);
+ case DICTIONARY_ELEMENTS:
+ CopyDictionaryToObjectElements(
+ from, from_start, to, to_kind, to_start, copy_size);
+ return to->GetHeap()->undefined_value();
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ // TODO(verwaest): This is a temporary hack to support extending
+ // NON_STRICT_ARGUMENTS_ELEMENTS in SetFastElementsCapacityAndLength.
+ // This case should be UNREACHABLE().
+ FixedArray* parameter_map = FixedArray::cast(from);
+ FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+ ElementsKind from_kind = ElementsKindForArray(arguments);
+ return CopyElementsImpl(arguments, from_start, to, from_kind,
+ to_start, packed_size, copy_size);
}
- } else {
- UNREACHABLE();
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
+ UNREACHABLE();
}
- return to->GetHeap()->undefined_value();
+ return NULL;
}
@@ -1054,25 +1249,40 @@ class FastDoubleElementsAccessor
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
- ElementsKind to_kind,
+ ElementsKind from_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
- switch (to_kind) {
+ switch (from_kind) {
case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
+ CopyPackedSmiToDoubleElements(
+ from, from_start, to, to_start, packed_size, copy_size);
+ break;
case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- return CopyDoubleToObjectElements(
- FixedDoubleArray::cast(from), from_start, FixedArray::cast(to),
- to_kind, to_start, copy_size);
+ CopySmiToDoubleElements(from, from_start, to, to_start, copy_size);
+ break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start,
- FixedDoubleArray::cast(to),
- to_start, copy_size);
- return from;
- default:
+ CopyDoubleToDoubleElements(from, from_start, to, to_start, copy_size);
+ break;
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size);
+ break;
+ case DICTIONARY_ELEMENTS:
+ CopyDictionaryToDoubleElements(
+ from, from_start, to, to_start, copy_size);
+ break;
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
UNREACHABLE();
}
return to->GetHeap()->undefined_value();
@@ -1129,17 +1339,37 @@ class ExternalElementsAccessor
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
- BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
return
key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
- ? backing_store->get(key)
+ ? BackingStore::cast(backing_store)->get(key)
: backing_store->GetHeap()->undefined_value();
}
+ MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ return
+ key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
+ ? NONE : ABSENT;
+ }
+
+ MUST_USE_RESULT static PropertyType GetTypeImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ return
+ key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
+ ? FIELD : NONEXISTENT;
+ }
+
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
- BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
// External arrays do not support changing their length.
UNREACHABLE();
return obj;
@@ -1155,7 +1385,7 @@ class ExternalElementsAccessor
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
- BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
uint32_t capacity =
ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store);
return key < capacity;
@@ -1264,10 +1494,11 @@ class DictionaryElementsAccessor
// Adjusts the length of the dictionary backing store and returns the new
// length according to ES5 section 15.4.5.2 behavior.
MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize(
- SeededNumberDictionary* dict,
+ FixedArrayBase* store,
JSArray* array,
Object* length_object,
uint32_t length) {
+ SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
Heap* heap = array->GetHeap();
int capacity = dict->Capacity();
uint32_t new_length = length;
@@ -1340,7 +1571,7 @@ class DictionaryElementsAccessor
if (mode == JSObject::STRICT_DELETION) {
// Deleting a non-configurable property in strict mode.
HandleScope scope(isolate);
- Handle<Object> holder(obj);
+ Handle<Object> holder(obj, isolate);
Handle<Object> name = isolate->factory()->NewNumberFromUint(key);
Handle<Object> args[2] = { name, holder };
Handle<Object> error =
@@ -1367,29 +1598,12 @@ class DictionaryElementsAccessor
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
- ElementsKind to_kind,
+ ElementsKind from_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
- switch (to_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- CopyDictionaryToObjectElements(
- SeededNumberDictionary::cast(from), from_start,
- FixedArray::cast(to), to_kind, to_start, copy_size);
- return from;
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- CopyDictionaryToDoubleElements(
- SeededNumberDictionary::cast(from), from_start,
- FixedDoubleArray::cast(to), to_start, copy_size);
- return from;
- default:
- UNREACHABLE();
- }
- return to->GetHeap()->undefined_value();
+ UNREACHABLE();
+ return NULL;
}
@@ -1407,7 +1621,8 @@ class DictionaryElementsAccessor
Object* receiver,
JSObject* obj,
uint32_t key,
- SeededNumberDictionary* backing_store) {
+ FixedArrayBase* store) {
+ SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
int entry = backing_store->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
Object* element = backing_store->ValueAt(entry);
@@ -1424,16 +1639,59 @@ class DictionaryElementsAccessor
return obj->GetHeap()->the_hole_value();
}
+ MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ SeededNumberDictionary* dictionary =
+ SeededNumberDictionary::cast(backing_store);
+ int entry = dictionary->FindEntry(key);
+ if (entry != SeededNumberDictionary::kNotFound) {
+ return dictionary->DetailsAt(entry).attributes();
+ }
+ return ABSENT;
+ }
+
+ MUST_USE_RESULT static PropertyType GetTypeImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* store) {
+ SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
+ int entry = backing_store->FindEntry(key);
+ if (entry != SeededNumberDictionary::kNotFound) {
+ return backing_store->DetailsAt(entry).type();
+ }
+ return NONEXISTENT;
+ }
+
+ MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* store) {
+ SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
+ int entry = backing_store->FindEntry(key);
+ if (entry != SeededNumberDictionary::kNotFound &&
+ backing_store->DetailsAt(entry).type() == CALLBACKS &&
+ backing_store->ValueAt(entry)->IsAccessorPair()) {
+ return AccessorPair::cast(backing_store->ValueAt(entry));
+ }
+ return NULL;
+ }
+
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
- SeededNumberDictionary* backing_store) {
- return backing_store->FindEntry(key) !=
+ FixedArrayBase* backing_store) {
+ return SeededNumberDictionary::cast(backing_store)->FindEntry(key) !=
SeededNumberDictionary::kNotFound;
}
- static uint32_t GetKeyForIndexImpl(SeededNumberDictionary* dict,
+ static uint32_t GetKeyForIndexImpl(FixedArrayBase* store,
uint32_t index) {
+ SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
Object* key = dict->KeyAt(index);
return Smi::cast(key)->value();
}
@@ -1456,7 +1714,8 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
- FixedArray* parameter_map) {
+ FixedArrayBase* parameters) {
+ FixedArray* parameter_map = FixedArray::cast(parameters);
Object* probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
Context* context = Context::cast(parameter_map->get(0));
@@ -1483,10 +1742,61 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
}
}
+ MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ FixedArray* parameter_map = FixedArray::cast(backing_store);
+ Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ if (!probe->IsTheHole()) {
+ return NONE;
+ } else {
+ // If not aliased, check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ return ElementsAccessor::ForArray(arguments)->GetAttributes(
+ receiver, obj, key, arguments);
+ }
+ }
+
+ MUST_USE_RESULT static PropertyType GetTypeImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* parameters) {
+ FixedArray* parameter_map = FixedArray::cast(parameters);
+ Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ if (!probe->IsTheHole()) {
+ return FIELD;
+ } else {
+ // If not aliased, check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ return ElementsAccessor::ForArray(arguments)->GetType(
+ receiver, obj, key, arguments);
+ }
+ }
+
+ MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* parameters) {
+ FixedArray* parameter_map = FixedArray::cast(parameters);
+ Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ if (!probe->IsTheHole()) {
+ return NULL;
+ } else {
+ // If not aliased, check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ return ElementsAccessor::ForArray(arguments)->GetAccessorPair(
+ receiver, obj, key, arguments);
+ }
+ }
+
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
- FixedArray* parameter_map) {
+ FixedArrayBase* parameter_map) {
// TODO(mstarzinger): This was never implemented but will be used once we
// correctly implement [[DefineOwnProperty]] on arrays.
UNIMPLEMENTED();
@@ -1520,24 +1830,22 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
- ElementsKind to_kind,
+ ElementsKind from_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
- FixedArray* parameter_map = FixedArray::cast(from);
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
- return accessor->CopyElements(NULL, from_start, to, to_kind,
- to_start, copy_size, arguments);
+ UNREACHABLE();
+ return NULL;
}
- static uint32_t GetCapacityImpl(FixedArray* parameter_map) {
+ static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
+ FixedArray* parameter_map = FixedArray::cast(backing_store);
FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
return Max(static_cast<uint32_t>(parameter_map->length() - 2),
ForArray(arguments)->GetCapacity(arguments));
}
- static uint32_t GetKeyForIndexImpl(FixedArray* dict,
+ static uint32_t GetKeyForIndexImpl(FixedArrayBase* dict,
uint32_t index) {
return index;
}
@@ -1545,12 +1853,14 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArray* parameter_map) {
+ FixedArrayBase* parameters) {
+ FixedArray* parameter_map = FixedArray::cast(parameters);
Object* probe = GetParameterMapArg(holder, parameter_map, key);
if (!probe->IsTheHole()) {
return true;
} else {
- FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+ FixedArrayBase* arguments =
+ FixedArrayBase::cast(FixedArray::cast(parameter_map)->get(1));
ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
return !accessor->Get(receiver, holder, key, arguments)->IsTheHole();
}
@@ -1563,7 +1873,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
uint32_t length = holder->IsJSArray()
? Smi::cast(JSArray::cast(holder)->length())->value()
: parameter_map->length();
- return key < (length - 2 )
+ return key < (length - 2)
? parameter_map->get(key + 2)
: parameter_map->GetHeap()->the_hole_value();
}
@@ -1571,35 +1881,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
- switch (array->map()->instance_type()) {
- case FIXED_ARRAY_TYPE:
- if (array->IsDictionary()) {
- return elements_accessors_[DICTIONARY_ELEMENTS];
- } else {
- return elements_accessors_[FAST_HOLEY_ELEMENTS];
- }
- case EXTERNAL_BYTE_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_BYTE_ELEMENTS];
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_UNSIGNED_BYTE_ELEMENTS];
- case EXTERNAL_SHORT_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_SHORT_ELEMENTS];
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_UNSIGNED_SHORT_ELEMENTS];
- case EXTERNAL_INT_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_INT_ELEMENTS];
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_UNSIGNED_INT_ELEMENTS];
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_FLOAT_ELEMENTS];
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_DOUBLE_ELEMENTS];
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_PIXEL_ELEMENTS];
- default:
- UNREACHABLE();
- return NULL;
- }
+ return elements_accessors_[ElementsKindForArray(array)];
}
@@ -1630,7 +1912,7 @@ MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
ElementsKindTraits>::
SetLengthImpl(JSObject* obj,
Object* length,
- typename ElementsKindTraits::BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
JSArray* array = JSArray::cast(obj);
// Fast case: The new length fits into a Smi.
@@ -1686,4 +1968,100 @@ MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
}
+MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
+ JSArray* array, Arguments* args) {
+ Heap* heap = array->GetIsolate()->heap();
+
+ // Optimize the case where there is one argument and the argument is a
+ // small smi.
+ if (args->length() == 1) {
+ Object* obj = (*args)[0];
+ if (obj->IsSmi()) {
+ int len = Smi::cast(obj)->value();
+ if (len > 0 && len < JSObject::kInitialMaxFastElementArray) {
+ ElementsKind elements_kind = array->GetElementsKind();
+ MaybeObject* maybe_array = array->Initialize(len, len);
+ if (maybe_array->IsFailure()) return maybe_array;
+
+ if (!IsFastHoleyElementsKind(elements_kind)) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ maybe_array = array->TransitionElementsKind(elements_kind);
+ if (maybe_array->IsFailure()) return maybe_array;
+ }
+
+ return array;
+ } else if (len == 0) {
+ return array->Initialize(JSArray::kPreallocatedArrayElements);
+ }
+ }
+
+ // Take the argument as the length.
+ MaybeObject* maybe_obj = array->Initialize(0);
+ if (!maybe_obj->To(&obj)) return maybe_obj;
+
+ return array->SetElementsLength((*args)[0]);
+ }
+
+ // Optimize the case where there are no parameters passed.
+ if (args->length() == 0) {
+ return array->Initialize(JSArray::kPreallocatedArrayElements);
+ }
+
+ // Set length and elements on the array.
+ int number_of_elements = args->length();
+ MaybeObject* maybe_object =
+ array->EnsureCanContainElements(args, 0, number_of_elements,
+ ALLOW_CONVERTED_DOUBLE_ELEMENTS);
+ if (maybe_object->IsFailure()) return maybe_object;
+
+ // Allocate an appropriately typed elements array.
+ MaybeObject* maybe_elms;
+ ElementsKind elements_kind = array->GetElementsKind();
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
+ number_of_elements);
+ } else {
+ maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
+ }
+ FixedArrayBase* elms;
+ if (!maybe_elms->To(&elms)) return maybe_elms;
+
+ // Fill in the content
+ switch (array->GetElementsKind()) {
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ELEMENTS: {
+ FixedArray* smi_elms = FixedArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ smi_elms->set(index, (*args)[index], SKIP_WRITE_BARRIER);
+ }
+ break;
+ }
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_ELEMENTS: {
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ FixedArray* object_elms = FixedArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ object_elms->set(index, (*args)[index], mode);
+ }
+ break;
+ }
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ double_elms->set(index, (*args)[index]->Number());
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ array->set_elements(elms);
+ array->set_length(Smi::FromInt(number_of_elements));
+ return array;
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 822fca50e..6353aaecf 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -71,6 +71,39 @@ class ElementsAccessor {
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
+ // Returns an element's attributes, or ABSENT if there is no such
+ // element. This method doesn't iterate up the prototype chain. The caller
+ // can optionally pass in the backing store to use for the check, which must
+ // be compatible with the ElementsKind of the ElementsAccessor. If
+ // backing_store is NULL, the holder->elements() is used as the backing store.
+ MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store = NULL) = 0;
+
+ // Returns an element's type, or NONEXISTENT if there is no such
+ // element. This method doesn't iterate up the prototype chain. The caller
+ // can optionally pass in the backing store to use for the check, which must
+ // be compatible with the ElementsKind of the ElementsAccessor. If
+ // backing_store is NULL, the holder->elements() is used as the backing store.
+ MUST_USE_RESULT virtual PropertyType GetType(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store = NULL) = 0;
+
+ // Returns an element's accessors, or NULL if the element does not exist or
+ // is plain. This method doesn't iterate up the prototype chain. The caller
+ // can optionally pass in the backing store to use for the check, which must
+ // be compatible with the ElementsKind of the ElementsAccessor. If
+ // backing_store is NULL, the holder->elements() is used as the backing store.
+ MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store = NULL) = 0;
+
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
@@ -110,17 +143,17 @@ class ElementsAccessor {
MUST_USE_RESULT virtual MaybeObject* CopyElements(
JSObject* source_holder,
uint32_t source_start,
+ ElementsKind source_kind,
FixedArrayBase* destination,
- ElementsKind destination_kind,
uint32_t destination_start,
int copy_size,
FixedArrayBase* source = NULL) = 0;
MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder,
FixedArrayBase* to,
- ElementsKind to_kind,
+ ElementsKind from_kind,
FixedArrayBase* from = NULL) {
- return CopyElements(from_holder, 0, to, to_kind, 0,
+ return CopyElements(from_holder, 0, from_kind, to, 0,
kCopyToEndAndInitializeToHole, from);
}
@@ -164,15 +197,11 @@ class ElementsAccessor {
DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
+void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key,
+ bool allow_appending = false);
-void CopyObjectToObjectElements(FixedArray* from_obj,
- ElementsKind from_kind,
- uint32_t from_start,
- FixedArray* to_obj,
- ElementsKind to_kind,
- uint32_t to_start,
- int copy_size);
-
+MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
+ JSArray* array, Arguments* args);
} } // namespace v8::internal
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 89091ba42..979641a9d 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -33,6 +33,7 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "debug.h"
+#include "deoptimizer.h"
#include "isolate-inl.h"
#include "runtime-profiler.h"
#include "simulator.h"
@@ -75,7 +76,7 @@ static Handle<Object> Invoke(bool is_construct,
Isolate* isolate = function->GetIsolate();
// Entering JavaScript.
- VMState state(isolate, JS);
+ VMState<JS> state(isolate);
// Placeholder for return value.
MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
@@ -106,7 +107,7 @@ static Handle<Object> Invoke(bool is_construct,
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
SaveContext save(isolate);
- NoHandleAllocation na;
+ SealHandleScope shs(isolate);
JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub.
@@ -124,10 +125,10 @@ static Handle<Object> Invoke(bool is_construct,
// Update the pending exception flag and return the value.
*has_pending_exception = value->IsException();
- ASSERT(*has_pending_exception == Isolate::Current()->has_pending_exception());
+ ASSERT(*has_pending_exception == isolate->has_pending_exception());
if (*has_pending_exception) {
isolate->ReportPendingMessages();
- if (isolate->pending_exception() == Failure::OutOfMemoryException()) {
+ if (isolate->pending_exception()->IsOutOfMemory()) {
if (!isolate->ignore_out_of_memory()) {
V8::FatalProcessOutOfMemory("JS", true);
}
@@ -147,7 +148,8 @@ static Handle<Object> Invoke(bool is_construct,
}
-Handle<Object> Execution::Call(Handle<Object> callable,
+Handle<Object> Execution::Call(Isolate* isolate,
+ Handle<Object> callable,
Handle<Object> receiver,
int argc,
Handle<Object> argv[],
@@ -156,7 +158,7 @@ Handle<Object> Execution::Call(Handle<Object> callable,
*pending_exception = false;
if (!callable->IsJSFunction()) {
- callable = TryGetFunctionDelegate(callable, pending_exception);
+ callable = TryGetFunctionDelegate(isolate, callable, pending_exception);
if (*pending_exception) return callable;
}
Handle<JSFunction> func = Handle<JSFunction>::cast(callable);
@@ -169,9 +171,11 @@ Handle<Object> Execution::Call(Handle<Object> callable,
// Under some circumstances, 'global' can be the JSBuiltinsObject
// In that case, don't rewrite. (FWIW, the same holds for
// GetIsolate()->global_object()->global_receiver().)
- if (!global->IsJSBuiltinsObject()) receiver = Handle<Object>(global);
+ if (!global->IsJSBuiltinsObject()) {
+ receiver = Handle<Object>(global, func->GetIsolate());
+ }
} else {
- receiver = ToObject(receiver, pending_exception);
+ receiver = ToObject(isolate, receiver, pending_exception);
}
if (*pending_exception) return callable;
}
@@ -184,7 +188,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func,
int argc,
Handle<Object> argv[],
bool* pending_exception) {
- return Invoke(true, func, Isolate::Current()->global_object(), argc, argv,
+ return Invoke(true, func, func->GetIsolate()->global_object(), argc, argv,
pending_exception);
}
@@ -203,14 +207,19 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
catcher.SetCaptureMessage(false);
*caught_exception = false;
+ // Get isolate now, because handle might be persistent
+ // and get destroyed in the next call.
+ Isolate* isolate = func->GetIsolate();
Handle<Object> result = Invoke(false, func, receiver, argc, args,
caught_exception);
if (*caught_exception) {
ASSERT(catcher.HasCaught());
- Isolate* isolate = Isolate::Current();
ASSERT(isolate->has_pending_exception());
ASSERT(isolate->external_caught_exception());
+ if (isolate->is_out_of_memory() && !isolate->ignore_out_of_memory()) {
+ V8::FatalProcessOutOfMemory("OOM during Execution::TryCall");
+ }
if (isolate->pending_exception() ==
isolate->heap()->termination_exception()) {
result = isolate->factory()->termination_exception();
@@ -220,15 +229,15 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
isolate->OptionalRescheduleException(true);
}
- ASSERT(!Isolate::Current()->has_pending_exception());
- ASSERT(!Isolate::Current()->external_caught_exception());
+ ASSERT(!isolate->has_pending_exception());
+ ASSERT(!isolate->external_caught_exception());
return result;
}
-Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
+Handle<Object> Execution::GetFunctionDelegate(Isolate* isolate,
+ Handle<Object> object) {
ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
// If you return a function from here, it will be called when an
@@ -239,7 +248,7 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
while (fun->IsJSFunctionProxy()) {
fun = JSFunctionProxy::cast(fun)->call_trap();
}
- if (fun->IsJSFunction()) return Handle<Object>(fun);
+ if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
// Objects created through the API can have an instance-call handler
// that should be used when calling the object as a function.
@@ -253,17 +262,17 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
}
-Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
+Handle<Object> Execution::TryGetFunctionDelegate(Isolate* isolate,
+ Handle<Object> object,
bool* has_pending_exception) {
ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
// If object is a function proxy, get its handler. Iterate if necessary.
Object* fun = *object;
while (fun->IsJSFunctionProxy()) {
fun = JSFunctionProxy::cast(fun)->call_trap();
}
- if (fun->IsJSFunction()) return Handle<Object>(fun);
+ if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
// Objects created through the API can have an instance-call handler
// that should be used when calling the object as a function.
@@ -284,9 +293,9 @@ Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
}
-Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
+Handle<Object> Execution::GetConstructorDelegate(Isolate* isolate,
+ Handle<Object> object) {
ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
// If you return a function from here, it will be called when an
// attempt is made to call the given object as a constructor.
@@ -296,7 +305,7 @@ Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
while (fun->IsJSFunctionProxy()) {
fun = JSFunctionProxy::cast(fun)->call_trap();
}
- if (fun->IsJSFunction()) return Handle<Object>(fun);
+ if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
// Objects created through the API can have an instance-call handler
// that should be used when calling the object as a function.
@@ -311,10 +320,10 @@ Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
Handle<Object> Execution::TryGetConstructorDelegate(
+ Isolate* isolate,
Handle<Object> object,
bool* has_pending_exception) {
ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
// If you return a function from here, it will be called when an
// attempt is made to call the given object as a constructor.
@@ -324,7 +333,7 @@ Handle<Object> Execution::TryGetConstructorDelegate(
while (fun->IsJSFunctionProxy()) {
fun = JSFunctionProxy::cast(fun)->call_trap();
}
- if (fun->IsJSFunction()) return Handle<Object>(fun);
+ if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
// Objects created through the API can have an instance-call handler
// that should be used when calling the object as a function.
@@ -420,6 +429,13 @@ bool StackGuard::IsTerminateExecution() {
}
+void StackGuard::CancelTerminateExecution() {
+ ExecutionAccess access(isolate_);
+ Continue(TERMINATE);
+ isolate_->CancelTerminateExecution();
+}
+
+
void StackGuard::TerminateExecution() {
ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= TERMINATE;
@@ -427,57 +443,48 @@ void StackGuard::TerminateExecution() {
}
-bool StackGuard::IsRuntimeProfilerTick() {
+bool StackGuard::IsGCRequest() {
ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
+ return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
}
-void StackGuard::RequestRuntimeProfilerTick() {
- // Ignore calls if we're not optimizing or if we can't get the lock.
- if (FLAG_opt && ExecutionAccess::TryLock(isolate_)) {
- thread_local_.interrupt_flags_ |= RUNTIME_PROFILER_TICK;
- if (thread_local_.postpone_interrupts_nesting_ == 0) {
- thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
- isolate_->heap()->SetStackLimits();
- }
- ExecutionAccess::Unlock(isolate_);
+void StackGuard::RequestGC() {
+ ExecutionAccess access(isolate_);
+ thread_local_.interrupt_flags_ |= GC_REQUEST;
+ if (thread_local_.postpone_interrupts_nesting_ == 0) {
+ thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
+ isolate_->heap()->SetStackLimits();
}
}
-void StackGuard::RequestCodeReadyEvent() {
- ASSERT(FLAG_parallel_recompilation);
- if (ExecutionAccess::TryLock(isolate_)) {
- thread_local_.interrupt_flags_ |= CODE_READY;
- if (thread_local_.postpone_interrupts_nesting_ == 0) {
- thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
- isolate_->heap()->SetStackLimits();
- }
- ExecutionAccess::Unlock(isolate_);
- }
+bool StackGuard::IsInstallCodeRequest() {
+ ExecutionAccess access(isolate_);
+ return (thread_local_.interrupt_flags_ & INSTALL_CODE) != 0;
}
-bool StackGuard::IsCodeReadyEvent() {
+void StackGuard::RequestInstallCode() {
ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & CODE_READY) != 0;
+ thread_local_.interrupt_flags_ |= INSTALL_CODE;
+ if (thread_local_.postpone_interrupts_nesting_ == 0) {
+ thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
+ isolate_->heap()->SetStackLimits();
+ }
}
-bool StackGuard::IsGCRequest() {
+bool StackGuard::IsFullDeopt() {
ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
+ return (thread_local_.interrupt_flags_ & FULL_DEOPT) != 0;
}
-void StackGuard::RequestGC() {
+void StackGuard::FullDeopt() {
ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= GC_REQUEST;
- if (thread_local_.postpone_interrupts_nesting_ == 0) {
- thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
- isolate_->heap()->SetStackLimits();
- }
+ thread_local_.interrupt_flags_ |= FULL_DEOPT;
+ set_interrupt_limits(access);
}
@@ -521,7 +528,7 @@ void StackGuard::Continue(InterruptFlag after_what) {
char* StackGuard::ArchiveStackGuard(char* to) {
ExecutionAccess access(isolate_);
- memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+ OS::MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
ThreadLocal blank;
// Set the stack limits using the old thread_local_.
@@ -538,7 +545,8 @@ char* StackGuard::ArchiveStackGuard(char* to) {
char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access(isolate_);
- memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ OS::MemCopy(
+ reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
isolate_->heap()->SetStackLimits();
return from + sizeof(ThreadLocal);
}
@@ -605,70 +613,61 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
#define RETURN_NATIVE_CALL(name, args, has_pending_exception) \
do { \
- Isolate* isolate = Isolate::Current(); \
Handle<Object> argv[] = args; \
ASSERT(has_pending_exception != NULL); \
- return Call(isolate->name##_fun(), \
+ return Call(isolate, \
+ isolate->name##_fun(), \
isolate->js_builtins_object(), \
ARRAY_SIZE(argv), argv, \
has_pending_exception); \
} while (false)
-Handle<Object> Execution::ToBoolean(Handle<Object> obj) {
- // See the similar code in runtime.js:ToBoolean.
- if (obj->IsBoolean()) return obj;
- bool result = true;
- if (obj->IsString()) {
- result = Handle<String>::cast(obj)->length() != 0;
- } else if (obj->IsNull() || obj->IsUndefined()) {
- result = false;
- } else if (obj->IsNumber()) {
- double value = obj->Number();
- result = !((value == 0) || isnan(value));
- }
- return Handle<Object>(HEAP->ToBoolean(result));
-}
-
-
-Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToNumber(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_number, { obj }, exc);
}
-Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToString(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_string, { obj }, exc);
}
-Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToDetailString(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_detail_string, { obj }, exc);
}
-Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToObject(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
if (obj->IsSpecObject()) return obj;
RETURN_NATIVE_CALL(to_object, { obj }, exc);
}
-Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToInteger(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_integer, { obj }, exc);
}
-Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToUint32(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_uint32, { obj }, exc);
}
-Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToInt32(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_int32, { obj }, exc);
}
-Handle<Object> Execution::NewDate(double time, bool* exc) {
- Handle<Object> time_obj = FACTORY->NewNumber(time);
+Handle<Object> Execution::NewDate(Isolate* isolate, double time, bool* exc) {
+ Handle<Object> time_obj = isolate->factory()->NewNumber(time);
RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
}
@@ -697,9 +696,8 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
return factory->undefined_value();
}
- Handle<Object> char_at =
- GetProperty(isolate->js_builtins_object(),
- factory->char_at_symbol());
+ Handle<Object> char_at = GetProperty(
+ isolate, isolate->js_builtins_object(), factory->char_at_string());
if (!char_at->IsJSFunction()) {
return factory->undefined_value();
}
@@ -723,15 +721,18 @@ Handle<JSFunction> Execution::InstantiateFunction(
Handle<FunctionTemplateInfo> data,
bool* exc) {
Isolate* isolate = data->GetIsolate();
- // Fast case: see if the function has already been instantiated
- int serial_number = Smi::cast(data->serial_number())->value();
- Object* elm =
- isolate->native_context()->function_cache()->
- GetElementNoExceptionThrown(serial_number);
- if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
+ if (!data->do_not_cache()) {
+ // Fast case: see if the function has already been instantiated
+ int serial_number = Smi::cast(data->serial_number())->value();
+ Object* elm =
+ isolate->native_context()->function_cache()->
+ GetElementNoExceptionThrown(isolate, serial_number);
+ if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
+ }
// The function has not yet been instantiated in this context; do it.
Handle<Object> args[] = { data };
- Handle<Object> result = Call(isolate->instantiate_fun(),
+ Handle<Object> result = Call(isolate,
+ isolate->instantiate_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
@@ -763,7 +764,8 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
return Handle<JSObject>(JSObject::cast(result));
} else {
Handle<Object> args[] = { data };
- Handle<Object> result = Call(isolate->instantiate_fun(),
+ Handle<Object> result = Call(isolate,
+ isolate->instantiate_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
@@ -774,12 +776,13 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
}
-void Execution::ConfigureInstance(Handle<Object> instance,
+void Execution::ConfigureInstance(Isolate* isolate,
+ Handle<Object> instance,
Handle<Object> instance_template,
bool* exc) {
- Isolate* isolate = Isolate::Current();
Handle<Object> args[] = { instance, instance_template };
- Execution::Call(isolate->configure_instance_fun(),
+ Execution::Call(isolate,
+ isolate->configure_instance_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
@@ -800,16 +803,14 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
args,
&caught_exception);
if (caught_exception || !result->IsString()) {
- return isolate->factory()->empty_symbol();
+ return isolate->factory()->empty_string();
}
return Handle<String>::cast(result);
}
-static Object* RuntimePreempt() {
- Isolate* isolate = Isolate::Current();
-
+static Object* RuntimePreempt(Isolate* isolate) {
// Clear the preempt request flag.
isolate->stack_guard()->Continue(PREEMPT);
@@ -838,9 +839,7 @@ static Object* RuntimePreempt() {
#ifdef ENABLE_DEBUGGER_SUPPORT
-Object* Execution::DebugBreakHelper() {
- Isolate* isolate = Isolate::Current();
-
+Object* Execution::DebugBreakHelper(Isolate* isolate) {
// Just continue if breaks are disabled.
if (isolate->debug()->disable_break()) {
return isolate->heap()->undefined_value();
@@ -886,14 +885,15 @@ Object* Execution::DebugBreakHelper() {
// Clear the debug break request flag.
isolate->stack_guard()->Continue(DEBUGBREAK);
- ProcessDebugMessages(debug_command_only);
+ ProcessDebugMessages(isolate, debug_command_only);
// Return to continue execution.
return isolate->heap()->undefined_value();
}
-void Execution::ProcessDebugMessages(bool debug_command_only) {
- Isolate* isolate = Isolate::Current();
+
+void Execution::ProcessDebugMessages(Isolate* isolate,
+ bool debug_command_only) {
// Clear the debug command request flag.
isolate->stack_guard()->Continue(DEBUGCOMMAND);
@@ -904,7 +904,7 @@ void Execution::ProcessDebugMessages(bool debug_command_only) {
HandleScope scope(isolate);
// Enter the debugger. Just continue if we fail to enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate);
if (debugger.FailedToEnter()) {
return;
}
@@ -930,31 +930,14 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(GC_REQUEST);
}
- if (stack_guard->IsCodeReadyEvent()) {
- ASSERT(FLAG_parallel_recompilation);
- if (FLAG_trace_parallel_recompilation) {
- PrintF(" ** CODE_READY event received.\n");
- }
- stack_guard->Continue(CODE_READY);
- }
- if (!stack_guard->IsTerminateExecution()) {
- isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
- }
-
isolate->counters()->stack_interrupts()->Increment();
- // If FLAG_count_based_interrupts, every interrupt is a profiler interrupt.
- if (FLAG_count_based_interrupts ||
- stack_guard->IsRuntimeProfilerTick()) {
- isolate->counters()->runtime_profiler_ticks()->Increment();
- stack_guard->Continue(RUNTIME_PROFILER_TICK);
- isolate->runtime_profiler()->OptimizeNow();
- }
+ isolate->counters()->runtime_profiler_ticks()->Increment();
#ifdef ENABLE_DEBUGGER_SUPPORT
if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
- DebugBreakHelper();
+ DebugBreakHelper(isolate);
}
#endif
- if (stack_guard->IsPreempted()) RuntimePreempt();
+ if (stack_guard->IsPreempted()) RuntimePreempt(isolate);
if (stack_guard->IsTerminateExecution()) {
stack_guard->Continue(TERMINATE);
return isolate->TerminateExecution();
@@ -963,6 +946,16 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(INTERRUPT);
return isolate->StackOverflow();
}
+ if (stack_guard->IsFullDeopt()) {
+ stack_guard->Continue(FULL_DEOPT);
+ Deoptimizer::DeoptimizeAll(isolate);
+ }
+ if (stack_guard->IsInstallCodeRequest()) {
+ ASSERT(FLAG_concurrent_recompilation);
+ stack_guard->Continue(INSTALL_CODE);
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ }
+ isolate->runtime_profiler()->OptimizeNow();
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 9f5d9ff2c..371ea309d 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -41,9 +41,9 @@ enum InterruptFlag {
DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
- RUNTIME_PROFILER_TICK = 1 << 5,
- GC_REQUEST = 1 << 6,
- CODE_READY = 1 << 7
+ GC_REQUEST = 1 << 5,
+ FULL_DEOPT = 1 << 6,
+ INSTALL_CODE = 1 << 7
};
@@ -63,7 +63,8 @@ class Execution : public AllStatic {
// and the function called is not in strict mode, receiver is converted to
// an object.
//
- static Handle<Object> Call(Handle<Object> callable,
+ static Handle<Object> Call(Isolate* isolate,
+ Handle<Object> callable,
Handle<Object> receiver,
int argc,
Handle<Object> argv[],
@@ -92,32 +93,37 @@ class Execution : public AllStatic {
Handle<Object> argv[],
bool* caught_exception);
- // ECMA-262 9.2
- static Handle<Object> ToBoolean(Handle<Object> obj);
-
// ECMA-262 9.3
- static Handle<Object> ToNumber(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToNumber(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.4
- static Handle<Object> ToInteger(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToInteger(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.5
- static Handle<Object> ToInt32(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToInt32(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.6
- static Handle<Object> ToUint32(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToUint32(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.8
- static Handle<Object> ToString(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToString(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.8
- static Handle<Object> ToDetailString(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToDetailString(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.9
- static Handle<Object> ToObject(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToObject(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// Create a new date object from 'time'.
- static Handle<Object> NewDate(double time, bool* exc);
+ static Handle<Object> NewDate(
+ Isolate* isolate, double time, bool* exc);
// Create a new regular expression object from 'pattern' and 'flags'.
static Handle<JSRegExp> NewJSRegExp(Handle<String> pattern,
@@ -132,7 +138,8 @@ class Execution : public AllStatic {
Handle<FunctionTemplateInfo> data, bool* exc);
static Handle<JSObject> InstantiateObject(Handle<ObjectTemplateInfo> data,
bool* exc);
- static void ConfigureInstance(Handle<Object> instance,
+ static void ConfigureInstance(Isolate* isolate,
+ Handle<Object> instance,
Handle<Object> data,
bool* exc);
static Handle<String> GetStackTraceLine(Handle<Object> recv,
@@ -140,8 +147,8 @@ class Execution : public AllStatic {
Handle<Object> pos,
Handle<Object> is_global);
#ifdef ENABLE_DEBUGGER_SUPPORT
- static Object* DebugBreakHelper();
- static void ProcessDebugMessages(bool debug_command_only);
+ static Object* DebugBreakHelper(Isolate* isolate);
+ static void ProcessDebugMessages(Isolate* isolate, bool debug_command_only);
#endif
// If the stack guard is triggered, but it is not an actual
@@ -151,14 +158,18 @@ class Execution : public AllStatic {
// Get a function delegate (or undefined) for the given non-function
// object. Used for support calling objects as functions.
- static Handle<Object> GetFunctionDelegate(Handle<Object> object);
- static Handle<Object> TryGetFunctionDelegate(Handle<Object> object,
+ static Handle<Object> GetFunctionDelegate(Isolate* isolate,
+ Handle<Object> object);
+ static Handle<Object> TryGetFunctionDelegate(Isolate* isolate,
+ Handle<Object> object,
bool* has_pending_exception);
// Get a function delegate (or undefined) for the given non-function
// object. Used for support calling objects as constructors.
- static Handle<Object> GetConstructorDelegate(Handle<Object> object);
- static Handle<Object> TryGetConstructorDelegate(Handle<Object> object,
+ static Handle<Object> GetConstructorDelegate(Isolate* isolate,
+ Handle<Object> object);
+ static Handle<Object> TryGetConstructorDelegate(Isolate* isolate,
+ Handle<Object> object,
bool* has_pending_exception);
};
@@ -194,10 +205,7 @@ class StackGuard {
void Interrupt();
bool IsTerminateExecution();
void TerminateExecution();
- bool IsRuntimeProfilerTick();
- void RequestRuntimeProfilerTick();
- bool IsCodeReadyEvent();
- void RequestCodeReadyEvent();
+ void CancelTerminateExecution();
#ifdef ENABLE_DEBUGGER_SUPPORT
bool IsDebugBreak();
void DebugBreak();
@@ -206,6 +214,10 @@ class StackGuard {
#endif
bool IsGCRequest();
void RequestGC();
+ bool IsInstallCodeRequest();
+ void RequestInstallCode();
+ bool IsFullDeopt();
+ void FullDeopt();
void Continue(InterruptFlag after_what);
// This provides an asynchronous read of the stack limits for the current
@@ -258,7 +270,7 @@ class StackGuard {
void EnableInterrupts();
void DisableInterrupts();
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 50d876136..9fdb194e4 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -63,44 +63,48 @@ const char* const ExternalizeStringExtension::kSource =
v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
v8::Handle<v8::String> str) {
- if (strcmp(*v8::String::AsciiValue(str), "externalizeString") == 0) {
+ if (strcmp(*v8::String::Utf8Value(str), "externalizeString") == 0) {
return v8::FunctionTemplate::New(ExternalizeStringExtension::Externalize);
} else {
- ASSERT(strcmp(*v8::String::AsciiValue(str), "isAsciiString") == 0);
+ ASSERT(strcmp(*v8::String::Utf8Value(str), "isAsciiString") == 0);
return v8::FunctionTemplate::New(ExternalizeStringExtension::IsAscii);
}
}
-v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
- const v8::Arguments& args) {
+void ExternalizeStringExtension::Externalize(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1 || !args[0]->IsString()) {
- return v8::ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::New(
"First parameter to externalizeString() must be a string."));
+ return;
}
bool force_two_byte = false;
if (args.Length() >= 2) {
if (args[1]->IsBoolean()) {
force_two_byte = args[1]->BooleanValue();
} else {
- return v8::ThrowException(v8::String::New(
- "Second parameter to externalizeString() must be a boolean."));
+ args.GetIsolate()->ThrowException(v8::String::New(
+ "Second parameter to externalizeString() must be a boolean."));
+ return;
}
}
bool result = false;
Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
if (string->IsExternalString()) {
- return v8::ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::New(
"externalizeString() can't externalize twice."));
+ return;
}
- if (string->IsAsciiRepresentation() && !force_two_byte) {
- char* data = new char[string->length()];
+ if (string->IsOneByteRepresentation() && !force_two_byte) {
+ uint8_t* data = new uint8_t[string->length()];
String::WriteToFlat(*string, data, 0, string->length());
SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
- data, string->length());
+ reinterpret_cast<char*>(data), string->length());
result = string->MakeExternal(resource);
- if (result && !string->IsSymbol()) {
- HEAP->external_string_table()->AddString(*string);
+ if (result && !string->IsInternalizedString()) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ isolate->heap()->external_string_table()->AddString(*string);
}
if (!result) delete resource;
} else {
@@ -109,26 +113,30 @@ v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource(
data, string->length());
result = string->MakeExternal(resource);
- if (result && !string->IsSymbol()) {
- HEAP->external_string_table()->AddString(*string);
+ if (result && !string->IsInternalizedString()) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ isolate->heap()->external_string_table()->AddString(*string);
}
if (!result) delete resource;
}
if (!result) {
- return v8::ThrowException(v8::String::New("externalizeString() failed."));
+ args.GetIsolate()->ThrowException(
+ v8::String::New("externalizeString() failed."));
+ return;
}
- return v8::Undefined();
}
-v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
- const v8::Arguments& args) {
+void ExternalizeStringExtension::IsAscii(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
- return v8::ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::New(
"isAsciiString() requires a single string argument."));
+ return;
}
- return Utils::OpenHandle(*args[0].As<v8::String>())->IsAsciiRepresentation() ?
- v8::True() : v8::False();
+ bool is_one_byte =
+ Utils::OpenHandle(*args[0].As<v8::String>())->IsOneByteRepresentation();
+ args.GetReturnValue().Set(is_one_byte);
}
diff --git a/deps/v8/src/extensions/externalize-string-extension.h b/deps/v8/src/extensions/externalize-string-extension.h
index b97b4962c..ecbc1cf44 100644
--- a/deps/v8/src/extensions/externalize-string-extension.h
+++ b/deps/v8/src/extensions/externalize-string-extension.h
@@ -38,8 +38,8 @@ class ExternalizeStringExtension : public v8::Extension {
ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
- static v8::Handle<v8::Value> Externalize(const v8::Arguments& args);
- static v8::Handle<v8::Value> IsAscii(const v8::Arguments& args);
+ static void Externalize(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void IsAscii(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Register();
private:
static const char* const kSource;
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index f921552aa..308879115 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -26,12 +26,11 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "gc-extension.h"
+#include "platform.h"
namespace v8 {
namespace internal {
-const char* const GCExtension::kSource = "native function gc();";
-
v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::String> str) {
@@ -39,14 +38,26 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
}
-v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
- HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
- return v8::Undefined();
+void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ if (args[0]->BooleanValue()) {
+ isolate->heap()->CollectGarbage(NEW_SPACE, "gc extension");
+ } else {
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
+ }
}
void GCExtension::Register() {
- static GCExtension gc_extension;
+ static char buffer[50];
+ Vector<char> temp_vector(buffer, sizeof(buffer));
+ if (FLAG_expose_gc_as != NULL && strlen(FLAG_expose_gc_as) != 0) {
+ OS::SNPrintF(temp_vector, "native function %s();", FLAG_expose_gc_as);
+ } else {
+ OS::SNPrintF(temp_vector, "native function gc();");
+ }
+
+ static GCExtension gc_extension(buffer);
static v8::DeclareExtension declaration(&gc_extension);
}
diff --git a/deps/v8/src/extensions/gc-extension.h b/deps/v8/src/extensions/gc-extension.h
index 06ea4ed21..e412b92a4 100644
--- a/deps/v8/src/extensions/gc-extension.h
+++ b/deps/v8/src/extensions/gc-extension.h
@@ -35,13 +35,11 @@ namespace internal {
class GCExtension : public v8::Extension {
public:
- GCExtension() : v8::Extension("v8/gc", kSource) {}
+ explicit GCExtension(const char* source) : v8::Extension("v8/gc", source) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
- static v8::Handle<v8::Value> GC(const v8::Arguments& args);
+ static void GC(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Register();
- private:
- static const char* const kSource;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 7ae090c98..651d99d45 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -36,7 +36,7 @@ const char* const StatisticsExtension::kSource =
v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunction(
v8::Handle<v8::String> str) {
- ASSERT(strcmp(*v8::String::AsciiValue(str), "getV8Statistics") == 0);
+ ASSERT(strcmp(*v8::String::Utf8Value(str), "getV8Statistics") == 0);
return v8::FunctionTemplate::New(StatisticsExtension::GetCounters);
}
@@ -58,9 +58,9 @@ static void AddNumber(v8::Local<v8::Object> object,
}
-v8::Handle<v8::Value> StatisticsExtension::GetCounters(
- const v8::Arguments& args) {
- Isolate* isolate = Isolate::Current();
+void StatisticsExtension::GetCounters(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
Heap* heap = isolate->heap();
if (args.Length() > 0) { // GC if first argument evaluates to true.
@@ -133,6 +133,12 @@ v8::Handle<v8::Value> StatisticsExtension::GetCounters(
"cell_space_available_bytes");
AddNumber(result, heap->cell_space()->CommittedMemory(),
"cell_space_commited_bytes");
+ AddNumber(result, heap->property_cell_space()->Size(),
+ "property_cell_space_live_bytes");
+ AddNumber(result, heap->property_cell_space()->Available(),
+ "property_cell_space_available_bytes");
+ AddNumber(result, heap->property_cell_space()->CommittedMemory(),
+ "property_cell_space_commited_bytes");
AddNumber(result, heap->lo_space()->Size(),
"lo_space_live_bytes");
AddNumber(result, heap->lo_space()->Available(),
@@ -141,7 +147,7 @@ v8::Handle<v8::Value> StatisticsExtension::GetCounters(
"lo_space_commited_bytes");
AddNumber(result, heap->amount_of_external_allocated_memory(),
"amount_of_external_allocated_memory");
- return result;
+ args.GetReturnValue().Set(result);
}
diff --git a/deps/v8/src/extensions/statistics-extension.h b/deps/v8/src/extensions/statistics-extension.h
index 433c4cf68..bfd9c4134 100644
--- a/deps/v8/src/extensions/statistics-extension.h
+++ b/deps/v8/src/extensions/statistics-extension.h
@@ -38,7 +38,7 @@ class StatisticsExtension : public v8::Extension {
StatisticsExtension() : v8::Extension("v8/statistics", kSource) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
- static v8::Handle<v8::Value> GetCounters(const v8::Arguments& args);
+ static void GetCounters(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Register();
private:
static const char* const kSource;
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index a2bb9391e..1dd246fc4 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,6 +31,7 @@
#include "debug.h"
#include "execution.h"
#include "factory.h"
+#include "isolate-inl.h"
#include "macro-assembler.h"
#include "objects.h"
#include "objects-visiting.h"
@@ -41,6 +42,14 @@ namespace v8 {
namespace internal {
+Handle<Box> Factory::NewBox(Handle<Object> value, PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateBox(*value, pretenure),
+ Box);
+}
+
+
Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
ASSERT(0 <= size);
CALL_HEAP_FUNCTION(
@@ -70,11 +79,27 @@ Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size,
}
-Handle<StringDictionary> Factory::NewStringDictionary(int at_least_space_for) {
+Handle<ConstantPoolArray> Factory::NewConstantPoolArray(
+ int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
+ number_of_int32_entries > 0);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateConstantPoolArray(number_of_int64_entries,
+ number_of_ptr_entries,
+ number_of_int32_entries),
+ ConstantPoolArray);
+}
+
+
+Handle<NameDictionary> Factory::NewNameDictionary(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
- StringDictionary::Allocate(at_least_space_for),
- StringDictionary);
+ NameDictionary::Allocate(isolate()->heap(),
+ at_least_space_for),
+ NameDictionary);
}
@@ -82,7 +107,8 @@ Handle<SeededNumberDictionary> Factory::NewSeededNumberDictionary(
int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
- SeededNumberDictionary::Allocate(at_least_space_for),
+ SeededNumberDictionary::Allocate(isolate()->heap(),
+ at_least_space_for),
SeededNumberDictionary);
}
@@ -91,7 +117,8 @@ Handle<UnseededNumberDictionary> Factory::NewUnseededNumberDictionary(
int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
- UnseededNumberDictionary::Allocate(at_least_space_for),
+ UnseededNumberDictionary::Allocate(isolate()->heap(),
+ at_least_space_for),
UnseededNumberDictionary);
}
@@ -99,7 +126,8 @@ Handle<UnseededNumberDictionary> Factory::NewUnseededNumberDictionary(
Handle<ObjectHashSet> Factory::NewObjectHashSet(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
- ObjectHashSet::Allocate(at_least_space_for),
+ ObjectHashSet::Allocate(isolate()->heap(),
+ at_least_space_for),
ObjectHashSet);
}
@@ -107,16 +135,30 @@ Handle<ObjectHashSet> Factory::NewObjectHashSet(int at_least_space_for) {
Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
- ObjectHashTable::Allocate(at_least_space_for),
+ ObjectHashTable::Allocate(isolate()->heap(),
+ at_least_space_for),
ObjectHashTable);
}
+Handle<WeakHashTable> Factory::NewWeakHashTable(int at_least_space_for) {
+ ASSERT(0 <= at_least_space_for);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ WeakHashTable::Allocate(isolate()->heap(),
+ at_least_space_for,
+ WeakHashTable::USE_DEFAULT_MINIMUM_CAPACITY,
+ TENURED),
+ WeakHashTable);
+}
+
+
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
int slack) {
ASSERT(0 <= number_of_descriptors);
CALL_HEAP_FUNCTION(isolate(),
- DescriptorArray::Allocate(number_of_descriptors, slack),
+ DescriptorArray::Allocate(
+ isolate(), number_of_descriptors, slack),
DescriptorArray);
}
@@ -126,7 +168,8 @@ Handle<DeoptimizationInputData> Factory::NewDeoptimizationInputData(
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
CALL_HEAP_FUNCTION(isolate(),
- DeoptimizationInputData::Allocate(deopt_entry_count,
+ DeoptimizationInputData::Allocate(isolate(),
+ deopt_entry_count,
pretenure),
DeoptimizationInputData);
}
@@ -137,7 +180,8 @@ Handle<DeoptimizationOutputData> Factory::NewDeoptimizationOutputData(
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
CALL_HEAP_FUNCTION(isolate(),
- DeoptimizationOutputData::Allocate(deopt_entry_count,
+ DeoptimizationOutputData::Allocate(isolate(),
+ deopt_entry_count,
pretenure),
DeoptimizationOutputData);
}
@@ -157,50 +201,50 @@ Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() {
}
-// Symbols are created in the old generation (data space).
-Handle<String> Factory::LookupSymbol(Vector<const char> string) {
+// Internalized strings are created in the old generation (data space).
+Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupSymbol(string),
+ isolate()->heap()->InternalizeUtf8String(string),
String);
}
-// Symbols are created in the old generation (data space).
-Handle<String> Factory::LookupSymbol(Handle<String> string) {
+
+// Internalized strings are created in the old generation (data space).
+Handle<String> Factory::InternalizeString(Handle<String> string) {
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupSymbol(*string),
+ isolate()->heap()->InternalizeString(*string),
String);
}
-Handle<String> Factory::LookupAsciiSymbol(Vector<const char> string) {
+
+Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupAsciiSymbol(string),
+ isolate()->heap()->InternalizeOneByteString(string),
String);
}
-Handle<String> Factory::LookupAsciiSymbol(Handle<SeqAsciiString> string,
- int from,
- int length) {
+Handle<String> Factory::InternalizeOneByteString(
+ Handle<SeqOneByteString> string, int from, int length) {
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupAsciiSymbol(string,
- from,
- length),
+ isolate()->heap()->InternalizeOneByteString(
+ string, from, length),
String);
}
-Handle<String> Factory::LookupTwoByteSymbol(Vector<const uc16> string) {
+Handle<String> Factory::InternalizeTwoByteString(Vector<const uc16> string) {
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupTwoByteSymbol(string),
+ isolate()->heap()->InternalizeTwoByteString(string),
String);
}
-Handle<String> Factory::NewStringFromAscii(Vector<const char> string,
- PretenureFlag pretenure) {
+Handle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
+ PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateStringFromAscii(string, pretenure),
+ isolate()->heap()->AllocateStringFromOneByte(string, pretenure),
String);
}
@@ -222,12 +266,12 @@ Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
}
-Handle<SeqAsciiString> Factory::NewRawAsciiString(int length,
+Handle<SeqOneByteString> Factory::NewRawOneByteString(int length,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateRawAsciiString(length, pretenure),
- SeqAsciiString);
+ isolate()->heap()->AllocateRawOneByteString(length, pretenure),
+ SeqOneByteString);
}
@@ -248,6 +292,32 @@ Handle<String> Factory::NewConsString(Handle<String> first,
}
+template<typename SinkChar, typename StringType>
+Handle<String> ConcatStringContent(Handle<StringType> result,
+ Handle<String> first,
+ Handle<String> second) {
+ DisallowHeapAllocation pointer_stays_valid;
+ SinkChar* sink = result->GetChars();
+ String::WriteToFlat(*first, sink, 0, first->length());
+ String::WriteToFlat(*second, sink + first->length(), 0, second->length());
+ return result;
+}
+
+
+Handle<String> Factory::NewFlatConcatString(Handle<String> first,
+ Handle<String> second) {
+ int total_length = first->length() + second->length();
+ if (first->IsOneByteRepresentationUnderneath() &&
+ second->IsOneByteRepresentationUnderneath()) {
+ return ConcatStringContent<uint8_t>(
+ NewRawOneByteString(total_length), first, second);
+ } else {
+ return ConcatStringContent<uc16>(
+ NewRawTwoByteString(total_length), first, second);
+ }
+}
+
+
Handle<String> Factory::NewSubString(Handle<String> str,
int begin,
int end) {
@@ -285,6 +355,14 @@ Handle<String> Factory::NewExternalStringFromTwoByte(
}
+Handle<Symbol> Factory::NewSymbol() {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateSymbol(),
+ Symbol);
+}
+
+
Handle<Context> Factory::NewNativeContext() {
CALL_HEAP_FUNCTION(
isolate(),
@@ -363,9 +441,25 @@ Handle<Struct> Factory::NewStruct(InstanceType type) {
}
-Handle<AccessorInfo> Factory::NewAccessorInfo() {
- Handle<AccessorInfo> info =
- Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE));
+Handle<DeclaredAccessorDescriptor> Factory::NewDeclaredAccessorDescriptor() {
+ return Handle<DeclaredAccessorDescriptor>::cast(
+ NewStruct(DECLARED_ACCESSOR_DESCRIPTOR_TYPE));
+}
+
+
+Handle<DeclaredAccessorInfo> Factory::NewDeclaredAccessorInfo() {
+ Handle<DeclaredAccessorInfo> info =
+ Handle<DeclaredAccessorInfo>::cast(
+ NewStruct(DECLARED_ACCESSOR_INFO_TYPE));
+ info->set_flag(0); // Must clear the flag, it was initialized as undefined.
+ return info;
+}
+
+
+Handle<ExecutableAccessorInfo> Factory::NewExecutableAccessorInfo() {
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(
+ NewStruct(EXECUTABLE_ACCESSOR_INFO_TYPE));
info->set_flag(0); // Must clear the flag, it was initialized as undefined.
return info;
}
@@ -373,39 +467,27 @@ Handle<AccessorInfo> Factory::NewAccessorInfo() {
Handle<Script> Factory::NewScript(Handle<String> source) {
// Generate id for this script.
- int id;
Heap* heap = isolate()->heap();
- if (heap->last_script_id()->IsUndefined()) {
- // Script ids start from one.
- id = 1;
- } else {
- // Increment id, wrap when positive smi is exhausted.
- id = Smi::cast(heap->last_script_id())->value();
- id++;
- if (!Smi::IsValid(id)) {
- id = 0;
- }
- }
- heap->SetLastScriptId(Smi::FromInt(id));
+ int id = heap->last_script_id()->value() + 1;
+ if (!Smi::IsValid(id) || id < 0) id = 1;
+ heap->set_last_script_id(Smi::FromInt(id));
// Create and initialize script object.
Handle<Foreign> wrapper = NewForeign(0, TENURED);
Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
script->set_source(*source);
script->set_name(heap->undefined_value());
- script->set_id(heap->last_script_id());
+ script->set_id(Smi::FromInt(id));
script->set_line_offset(Smi::FromInt(0));
script->set_column_offset(Smi::FromInt(0));
script->set_data(heap->undefined_value());
script->set_context_data(heap->undefined_value());
script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
- script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
- script->set_compilation_state(
- Smi::FromInt(Script::COMPILATION_STATE_INITIAL));
script->set_wrapper(*wrapper);
script->set_line_ends(heap->undefined_value());
script->set_eval_from_shared(heap->undefined_value());
script->set_eval_from_instructions_offset(Smi::FromInt(0));
+ script->set_flags(Smi::FromInt(0));
return script;
}
@@ -447,12 +529,36 @@ Handle<ExternalArray> Factory::NewExternalArray(int length,
}
-Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
- Handle<Object> value) {
+Handle<Cell> Factory::NewCell(Handle<Object> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateCell(*value),
+ Cell);
+}
+
+
+Handle<PropertyCell> Factory::NewPropertyCellWithHole() {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateJSGlobalPropertyCell(*value),
- JSGlobalPropertyCell);
+ isolate()->heap()->AllocatePropertyCell(),
+ PropertyCell);
+}
+
+
+Handle<PropertyCell> Factory::NewPropertyCell(Handle<Object> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ Handle<PropertyCell> cell = NewPropertyCellWithHole();
+ PropertyCell::SetValueInferType(cell, value);
+ return cell;
+}
+
+
+Handle<AllocationSite> Factory::NewAllocationSite() {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateAllocationSite(),
+ AllocationSite);
}
@@ -525,12 +631,27 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
}
+Handle<FixedArray> Factory::CopySizeFixedArray(Handle<FixedArray> array,
+ int new_length,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(isolate(),
+ array->CopySize(new_length, pretenure),
+ FixedArray);
+}
+
+
Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
Handle<FixedDoubleArray> array) {
CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray);
}
+Handle<ConstantPoolArray> Factory::CopyConstantPoolArray(
+ Handle<ConstantPoolArray> array) {
+ CALL_HEAP_FUNCTION(isolate(), array->Copy(), ConstantPoolArray);
+}
+
+
Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Map> function_map,
@@ -545,15 +666,22 @@ Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
}
+static Handle<Map> MapForNewFunction(Isolate *isolate,
+ Handle<SharedFunctionInfo> function_info) {
+ Context *context = isolate->context()->native_context();
+ int map_index = Context::FunctionMapIndex(function_info->language_mode(),
+ function_info->is_generator());
+ return Handle<Map>(Map::cast(context->get(map_index)));
+}
+
+
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Context> context,
PretenureFlag pretenure) {
Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
function_info,
- function_info->is_classic_mode()
- ? isolate()->function_map()
- : isolate()->strict_mode_function_map(),
+ MapForNewFunction(isolate(), function_info),
pretenure);
if (function_info->ic_age() != isolate()->heap()->global_ic_age()) {
@@ -582,12 +710,13 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
return result;
}
- if (V8::UseCrankshaft() &&
+ if (isolate()->use_crankshaft() &&
FLAG_always_opt &&
result->is_compiled() &&
!function_info->is_toplevel() &&
function_info->allows_lazy_compilation() &&
- !function_info->optimization_disabled()) {
+ !function_info->optimization_disabled() &&
+ !isolate()->DebuggerHasBreakPoints()) {
result->MarkForLazyRecompilation();
}
return result;
@@ -618,6 +747,14 @@ Handle<Object> Factory::NewNumberFromUint(uint32_t value,
}
+Handle<HeapNumber> Factory::NewHeapNumber(double value,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateHeapNumber(value, pretenure), HeapNumber);
+}
+
+
Handle<JSObject> Factory::NewNeanderObject() {
CALL_HEAP_FUNCTION(
isolate(),
@@ -627,9 +764,9 @@ Handle<JSObject> Factory::NewNeanderObject() {
}
-Handle<Object> Factory::NewTypeError(const char* type,
+Handle<Object> Factory::NewTypeError(const char* message,
Vector< Handle<Object> > args) {
- return NewError("MakeTypeError", type, args);
+ return NewError("MakeTypeError", message, args);
}
@@ -638,9 +775,9 @@ Handle<Object> Factory::NewTypeError(Handle<String> message) {
}
-Handle<Object> Factory::NewRangeError(const char* type,
+Handle<Object> Factory::NewRangeError(const char* message,
Vector< Handle<Object> > args) {
- return NewError("MakeRangeError", type, args);
+ return NewError("MakeRangeError", message, args);
}
@@ -649,8 +786,9 @@ Handle<Object> Factory::NewRangeError(Handle<String> message) {
}
-Handle<Object> Factory::NewSyntaxError(const char* type, Handle<JSArray> args) {
- return NewError("MakeSyntaxError", type, args);
+Handle<Object> Factory::NewSyntaxError(const char* message,
+ Handle<JSArray> args) {
+ return NewError("MakeSyntaxError", message, args);
}
@@ -659,9 +797,9 @@ Handle<Object> Factory::NewSyntaxError(Handle<String> message) {
}
-Handle<Object> Factory::NewReferenceError(const char* type,
+Handle<Object> Factory::NewReferenceError(const char* message,
Vector< Handle<Object> > args) {
- return NewError("MakeReferenceError", type, args);
+ return NewError("MakeReferenceError", message, args);
}
@@ -670,32 +808,34 @@ Handle<Object> Factory::NewReferenceError(Handle<String> message) {
}
-Handle<Object> Factory::NewError(const char* maker, const char* type,
- Vector< Handle<Object> > args) {
- v8::HandleScope scope; // Instantiate a closeable HandleScope for EscapeFrom.
+Handle<Object> Factory::NewError(const char* maker,
+ const char* message,
+ Vector< Handle<Object> > args) {
+ // Instantiate a closeable HandleScope for EscapeFrom.
+ v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate()));
Handle<FixedArray> array = NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
array->set(i, *args[i]);
}
Handle<JSArray> object = NewJSArrayWithElements(array);
- Handle<Object> result = NewError(maker, type, object);
+ Handle<Object> result = NewError(maker, message, object);
return result.EscapeFrom(&scope);
}
-Handle<Object> Factory::NewEvalError(const char* type,
+Handle<Object> Factory::NewEvalError(const char* message,
Vector< Handle<Object> > args) {
- return NewError("MakeEvalError", type, args);
+ return NewError("MakeEvalError", message, args);
}
-Handle<Object> Factory::NewError(const char* type,
+Handle<Object> Factory::NewError(const char* message,
Vector< Handle<Object> > args) {
- return NewError("MakeError", type, args);
+ return NewError("MakeError", message, args);
}
-Handle<String> Factory::EmergencyNewError(const char* type,
+Handle<String> Factory::EmergencyNewError(const char* message,
Handle<JSArray> args) {
const int kBufferSize = 1000;
char buffer[kBufferSize];
@@ -703,8 +843,8 @@ Handle<String> Factory::EmergencyNewError(const char* type,
char* p = &buffer[0];
Vector<char> v(buffer, kBufferSize);
- OS::StrNCpy(v, type, space);
- space -= Min(space, strlen(type));
+ OS::StrNCpy(v, message, space);
+ space -= Min(space, strlen(message));
p = &buffer[kBufferSize] - space;
for (unsigned i = 0; i < ARRAY_SIZE(args); i++) {
@@ -712,7 +852,7 @@ Handle<String> Factory::EmergencyNewError(const char* type,
*p++ = ' ';
space--;
if (space > 0) {
- MaybeObject* maybe_arg = args->GetElement(i);
+ MaybeObject* maybe_arg = args->GetElement(isolate(), i);
Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
const char* arg = *arg_str->ToCString();
Vector<char> v2(p, static_cast<int>(space));
@@ -733,19 +873,20 @@ Handle<String> Factory::EmergencyNewError(const char* type,
Handle<Object> Factory::NewError(const char* maker,
- const char* type,
+ const char* message,
Handle<JSArray> args) {
- Handle<String> make_str = LookupAsciiSymbol(maker);
+ Handle<String> make_str = InternalizeUtf8String(maker);
Handle<Object> fun_obj(
- isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str));
+ isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str),
+ isolate());
// If the builtins haven't been properly configured yet this error
// constructor may not have been defined. Bail out.
if (!fun_obj->IsJSFunction()) {
- return EmergencyNewError(type, args);
+ return EmergencyNewError(message, args);
}
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
- Handle<Object> type_obj = LookupAsciiSymbol(type);
- Handle<Object> argv[] = { type_obj, args };
+ Handle<Object> message_obj = InternalizeUtf8String(message);
+ Handle<Object> argv[] = { message_obj, args };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
@@ -766,7 +907,7 @@ Handle<Object> Factory::NewError(Handle<String> message) {
Handle<Object> Factory::NewError(const char* constructor,
Handle<String> message) {
- Handle<String> constr = LookupAsciiSymbol(constructor);
+ Handle<String> constr = InternalizeUtf8String(constructor);
Handle<JSFunction> fun = Handle<JSFunction>(
JSFunction::cast(isolate()->js_builtins_object()->
GetPropertyNoExceptionThrown(*constr)));
@@ -838,14 +979,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
initial_map->set_constructor(*function);
}
- // Set function.prototype and give the prototype a constructor
- // property that refers to the function.
- SetPrototypeProperty(function, prototype);
- // Currently safe because it is only invoked from Genesis.
- CHECK_NOT_EMPTY_HANDLE(isolate(),
- JSObject::SetLocalPropertyIgnoreAttributes(
- prototype, constructor_symbol(),
- function, DONT_ENUM));
+ JSFunction::SetPrototype(function, prototype);
return function;
}
@@ -870,13 +1004,23 @@ Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
}
+Handle<JSObject> Factory::NewExternal(void* value) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateExternal(value),
+ JSObject);
+}
+
+
Handle<Code> Factory::NewCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_ref,
- bool immovable) {
+ bool immovable,
+ bool crankshafted,
+ int prologue_offset) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->CreateCode(
- desc, flags, self_ref, immovable),
+ desc, flags, self_ref, immovable, crankshafted,
+ prologue_offset),
Code);
}
@@ -895,9 +1039,9 @@ Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
}
-Handle<String> Factory::SymbolFromString(Handle<String> value) {
+Handle<String> Factory::InternalizedStringFromString(Handle<String> value) {
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupSymbol(*value), String);
+ isolate()->heap()->InternalizeString(*value), String);
}
@@ -917,19 +1061,86 @@ Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
}
-Handle<GlobalObject> Factory::NewGlobalObject(
- Handle<JSFunction> constructor) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateGlobalObject(*constructor),
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyDetails details) {
+ CALL_HEAP_FUNCTION(dict->GetIsolate(),
+ dict->Add(*name, *value, details),
+ NameDictionary);
+}
+
+
+static Handle<GlobalObject> NewGlobalObjectFromMap(Isolate* isolate,
+ Handle<Map> map) {
+ CALL_HEAP_FUNCTION(isolate,
+ isolate->heap()->Allocate(*map, OLD_POINTER_SPACE),
GlobalObject);
}
+Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
+ ASSERT(constructor->has_initial_map());
+ Handle<Map> map(constructor->initial_map());
+ ASSERT(map->is_dictionary_map());
+
+ // Make sure no field properties are described in the initial map.
+ // This guarantees us that normalizing the properties does not
+ // require us to change property values to PropertyCells.
+ ASSERT(map->NextFreePropertyIndex() == 0);
+
+ // Make sure we don't have a ton of pre-allocated slots in the
+ // global objects. They will be unused once we normalize the object.
+ ASSERT(map->unused_property_fields() == 0);
+ ASSERT(map->inobject_properties() == 0);
+
+ // Initial size of the backing store to avoid resize of the storage during
+ // bootstrapping. The size differs between the JS global object ad the
+ // builtins object.
+ int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
+
+ // Allocate a dictionary object for backing storage.
+ int at_least_space_for = map->NumberOfOwnDescriptors() * 2 + initial_size;
+ Handle<NameDictionary> dictionary = NewNameDictionary(at_least_space_for);
+
+ // The global object might be created from an object template with accessors.
+ // Fill these accessors into the dictionary.
+ Handle<DescriptorArray> descs(map->instance_descriptors());
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
+ PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
+ Handle<Name> name(descs->GetKey(i));
+ Handle<Object> value(descs->GetCallbacksObject(i), isolate());
+ Handle<PropertyCell> cell = NewPropertyCell(value);
+ NameDictionaryAdd(dictionary, name, cell, d);
+ }
+
+ // Allocate the global object and initialize it with the backing store.
+ Handle<GlobalObject> global = NewGlobalObjectFromMap(isolate(), map);
+ isolate()->heap()->InitializeJSObjectFromMap(*global, *dictionary, *map);
+
+ // Create a new map for the global object.
+ Handle<Map> new_map = Map::CopyDropDescriptors(map);
+ new_map->set_dictionary_map(true);
-Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
+ // Set up the global object as a normalized object.
+ global->set_map(*new_map);
+ global->set_properties(*dictionary);
+
+ // Make sure result is a global object with properties in dictionary.
+ ASSERT(global->IsGlobalObject() && !global->HasFastProperties());
+ return global;
+}
+
+
+Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map,
+ PretenureFlag pretenure,
+ bool alloc_props) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(*map, NOT_TENURED),
+ isolate()->heap()->AllocateJSObjectFromMap(*map, pretenure, alloc_props),
JSObject);
}
@@ -937,6 +1148,9 @@ Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
Handle<JSArray> Factory::NewJSArray(int capacity,
ElementsKind elements_kind,
PretenureFlag pretenure) {
+ if (capacity != 0) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ }
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateJSArrayAndStorage(
elements_kind,
@@ -955,6 +1169,7 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
isolate(),
isolate()->heap()->AllocateJSArrayWithElements(*elements,
elements_kind,
+ elements->length(),
pretenure),
JSArray);
}
@@ -978,20 +1193,71 @@ void Factory::SetContent(Handle<JSArray> array,
}
-void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
- CALL_HEAP_FUNCTION_VOID(
+Handle<JSArrayBuffer> Factory::NewJSArrayBuffer() {
+ Handle<JSFunction> array_buffer_fun(
+ isolate()->context()->native_context()->array_buffer_fun());
+ CALL_HEAP_FUNCTION(
isolate(),
- array->EnsureCanContainHeapObjectElements());
+ isolate()->heap()->AllocateJSObject(*array_buffer_fun),
+ JSArrayBuffer);
}
-void Factory::EnsureCanContainElements(Handle<JSArray> array,
- Handle<FixedArrayBase> elements,
- uint32_t length,
- EnsureElementsMode mode) {
- CALL_HEAP_FUNCTION_VOID(
+Handle<JSDataView> Factory::NewJSDataView() {
+ Handle<JSFunction> data_view_fun(
+ isolate()->context()->native_context()->data_view_fun());
+ CALL_HEAP_FUNCTION(
isolate(),
- array->EnsureCanContainElements(*elements, length, mode));
+ isolate()->heap()->AllocateJSObject(*data_view_fun),
+ JSDataView);
+}
+
+
+static JSFunction* GetTypedArrayFun(ExternalArrayType type,
+ Isolate* isolate) {
+ Context* native_context = isolate->context()->native_context();
+ switch (type) {
+ case kExternalUnsignedByteArray:
+ return native_context->uint8_array_fun();
+
+ case kExternalByteArray:
+ return native_context->int8_array_fun();
+
+ case kExternalUnsignedShortArray:
+ return native_context->uint16_array_fun();
+
+ case kExternalShortArray:
+ return native_context->int16_array_fun();
+
+ case kExternalUnsignedIntArray:
+ return native_context->uint32_array_fun();
+
+ case kExternalIntArray:
+ return native_context->int32_array_fun();
+
+ case kExternalFloatArray:
+ return native_context->float_array_fun();
+
+ case kExternalDoubleArray:
+ return native_context->double_array_fun();
+
+ case kExternalPixelArray:
+ return native_context->uint8c_array_fun();
+
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type) {
+ Handle<JSFunction> typed_array_fun_handle(GetTypedArrayFun(type, isolate()));
+
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSObject(*typed_array_fun_handle),
+ JSTypedArray);
}
@@ -1020,16 +1286,10 @@ void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
}
-void Factory::SetIdentityHash(Handle<JSObject> object, Smi* hash) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- object->SetIdentityHash(hash, ALLOW_CREATION));
-}
-
-
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name,
int number_of_literals,
+ bool is_generator,
Handle<Code> code,
Handle<ScopeInfo> scope_info) {
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
@@ -1043,6 +1303,10 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
literals_array_size += JSFunction::kLiteralsPrefixSize;
}
shared->set_num_literals(literals_array_size);
+ if (is_generator) {
+ shared->set_instance_class_name(isolate()->heap()->Generator_string());
+ shared->DisableOptimization(kGenerator);
+ }
return shared;
}
@@ -1066,6 +1330,7 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
JSMessageObject);
}
+
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateSharedFunctionInfo(*name),
@@ -1152,7 +1417,7 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
Handle<Object> Factory::ToObject(Handle<Object> object) {
- CALL_HEAP_FUNCTION(isolate(), object->ToObject(), Object);
+ CALL_HEAP_FUNCTION(isolate(), object->ToObject(isolate()), Object);
}
@@ -1217,8 +1482,10 @@ Handle<JSFunction> Factory::CreateApiFunction(
Smi::cast(instance_template->internal_field_count())->value();
}
+ // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
+ // JSObject::GetHeaderSize.
int instance_size = kPointerSize * internal_field_count;
- InstanceType type = INVALID_TYPE;
+ InstanceType type;
switch (instance_type) {
case JavaScriptObject:
type = JS_OBJECT_TYPE;
@@ -1233,18 +1500,23 @@ Handle<JSFunction> Factory::CreateApiFunction(
instance_size += JSGlobalProxy::kSize;
break;
default:
+ UNREACHABLE();
+ type = JS_OBJECT_TYPE; // Keep the compiler happy.
break;
}
- ASSERT(type != INVALID_TYPE);
Handle<JSFunction> result =
- NewFunction(Factory::empty_symbol(),
+ NewFunction(Factory::empty_string(),
type,
instance_size,
code,
true);
+
+ // Set length.
+ result->shared()->set_length(obj->length());
+
// Set class name.
- Handle<Object> class_name = Handle<Object>(obj->class_name());
+ Handle<Object> class_name = Handle<Object>(obj->class_name(), isolate());
if (class_name->IsString()) {
result->shared()->set_instance_class_name(*class_name);
result->shared()->set_name(*class_name);
@@ -1284,15 +1556,29 @@ Handle<JSFunction> Factory::CreateApiFunction(
result->shared()->set_construct_stub(*construct_stub);
result->shared()->DontAdaptArguments();
- // Recursively copy parent templates' accessors, 'data' may be modified.
+ // Recursively copy parent instance templates' accessors,
+ // 'data' may be modified.
int max_number_of_additional_properties = 0;
+ int max_number_of_static_properties = 0;
FunctionTemplateInfo* info = *obj;
while (true) {
- Object* props = info->property_accessors();
- if (!props->IsUndefined()) {
- Handle<Object> props_handle(props);
- NeanderArray props_array(props_handle);
- max_number_of_additional_properties += props_array.length();
+ if (!info->instance_template()->IsUndefined()) {
+ Object* props =
+ ObjectTemplateInfo::cast(
+ info->instance_template())->property_accessors();
+ if (!props->IsUndefined()) {
+ Handle<Object> props_handle(props, isolate());
+ NeanderArray props_array(props_handle);
+ max_number_of_additional_properties += props_array.length();
+ }
+ }
+ if (!info->property_accessors()->IsUndefined()) {
+ Object* props = info->property_accessors();
+ if (!props->IsUndefined()) {
+ Handle<Object> props_handle(props, isolate());
+ NeanderArray props_array(props_handle);
+ max_number_of_static_properties += props_array.length();
+ }
}
Object* parent = info->parent_template();
if (parent->IsUndefined()) break;
@@ -1301,16 +1587,44 @@ Handle<JSFunction> Factory::CreateApiFunction(
Map::EnsureDescriptorSlack(map, max_number_of_additional_properties);
+ // Use a temporary FixedArray to acculumate static accessors
+ int valid_descriptors = 0;
+ Handle<FixedArray> array;
+ if (max_number_of_static_properties > 0) {
+ array = NewFixedArray(max_number_of_static_properties);
+ }
+
while (true) {
- Handle<Object> props = Handle<Object>(obj->property_accessors());
- if (!props->IsUndefined()) {
- Map::AppendCallbackDescriptors(map, props);
+ // Install instance descriptors
+ if (!obj->instance_template()->IsUndefined()) {
+ Handle<ObjectTemplateInfo> instance =
+ Handle<ObjectTemplateInfo>(
+ ObjectTemplateInfo::cast(obj->instance_template()), isolate());
+ Handle<Object> props = Handle<Object>(instance->property_accessors(),
+ isolate());
+ if (!props->IsUndefined()) {
+ Map::AppendCallbackDescriptors(map, props);
+ }
+ }
+ // Accumulate static accessors
+ if (!obj->property_accessors()->IsUndefined()) {
+ Handle<Object> props = Handle<Object>(obj->property_accessors(),
+ isolate());
+ valid_descriptors =
+ AccessorInfo::AppendUnique(props, array, valid_descriptors);
}
- Handle<Object> parent = Handle<Object>(obj->parent_template());
+ // Climb parent chain
+ Handle<Object> parent = Handle<Object>(obj->parent_template(), isolate());
if (parent->IsUndefined()) break;
obj = Handle<FunctionTemplateInfo>::cast(parent);
}
+ // Install accumulated static accessors
+ for (int i = 0; i < valid_descriptors; i++) {
+ Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)));
+ JSObject::SetAccessor(result, accessor);
+ }
+
ASSERT(result->shared()->IsApiFunction());
return result;
}
@@ -1318,7 +1632,9 @@ Handle<JSFunction> Factory::CreateApiFunction(
Handle<MapCache> Factory::NewMapCache(int at_least_space_for) {
CALL_HEAP_FUNCTION(isolate(),
- MapCache::Allocate(at_least_space_for), MapCache);
+ MapCache::Allocate(isolate()->heap(),
+ at_least_space_for),
+ MapCache);
}
@@ -1353,7 +1669,7 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
// Check to see whether there is a matching element in the cache.
Handle<MapCache> cache =
Handle<MapCache>(MapCache::cast(context->map_cache()));
- Handle<Object> result = Handle<Object>(cache->Lookup(*keys));
+ Handle<Object> result = Handle<Object>(cache->Lookup(*keys), isolate());
if (result->IsMap()) return Handle<Map>::cast(result);
// Create a new map and add it to the cache.
Handle<Map> map =
@@ -1405,9 +1721,10 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
bool* pending_exception) {
// Configure the instance by adding the properties specified by the
// instance template.
- Handle<Object> instance_template = Handle<Object>(desc->instance_template());
+ Handle<Object> instance_template(desc->instance_template(), isolate());
if (!instance_template->IsUndefined()) {
- Execution::ConfigureInstance(instance,
+ Execution::ConfigureInstance(isolate(),
+ instance,
instance_template,
pending_exception);
} else {
@@ -1418,17 +1735,15 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
Handle<Object> Factory::GlobalConstantFor(Handle<String> name) {
Heap* h = isolate()->heap();
- if (name->Equals(h->undefined_symbol())) return undefined_value();
- if (name->Equals(h->nan_symbol())) return nan_value();
- if (name->Equals(h->infinity_symbol())) return infinity_value();
+ if (name->Equals(h->undefined_string())) return undefined_value();
+ if (name->Equals(h->nan_string())) return nan_value();
+ if (name->Equals(h->infinity_string())) return infinity_value();
return Handle<Object>::null();
}
Handle<Object> Factory::ToBoolean(bool value) {
- return Handle<Object>(value
- ? isolate()->heap()->true_value()
- : isolate()->heap()->false_value());
+ return value ? true_value() : false_value();
}
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 51065aac4..ee25bf23d 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -39,6 +39,11 @@ namespace internal {
class Factory {
public:
+ // Allocate a new boxed value.
+ Handle<Box> NewBox(
+ Handle<Object> value,
+ PretenureFlag pretenure = NOT_TENURED);
+
// Allocate a new uninitialized fixed array.
Handle<FixedArray> NewFixedArray(
int size,
@@ -54,18 +59,25 @@ class Factory {
int size,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<ConstantPoolArray> NewConstantPoolArray(
+ int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries);
+
Handle<SeededNumberDictionary> NewSeededNumberDictionary(
int at_least_space_for);
Handle<UnseededNumberDictionary> NewUnseededNumberDictionary(
int at_least_space_for);
- Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
+ Handle<NameDictionary> NewNameDictionary(int at_least_space_for);
Handle<ObjectHashSet> NewObjectHashSet(int at_least_space_for);
Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
+ Handle<WeakHashTable> NewWeakHashTable(int at_least_space_for);
+
Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors,
int slack = 0);
Handle<DeoptimizationInputData> NewDeoptimizationInputData(
@@ -79,16 +91,16 @@ class Factory {
Handle<TypeFeedbackInfo> NewTypeFeedbackInfo();
- Handle<String> LookupSymbol(Vector<const char> str);
- Handle<String> LookupSymbol(Handle<String> str);
- Handle<String> LookupAsciiSymbol(Vector<const char> str);
- Handle<String> LookupAsciiSymbol(Handle<SeqAsciiString>,
+ Handle<String> InternalizeUtf8String(Vector<const char> str);
+ Handle<String> InternalizeUtf8String(const char* str) {
+ return InternalizeUtf8String(CStrVector(str));
+ }
+ Handle<String> InternalizeString(Handle<String> str);
+ Handle<String> InternalizeOneByteString(Vector<const uint8_t> str);
+ Handle<String> InternalizeOneByteString(Handle<SeqOneByteString>,
int from,
int length);
- Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
- Handle<String> LookupAsciiSymbol(const char* str) {
- return LookupSymbol(CStrVector(str));
- }
+ Handle<String> InternalizeTwoByteString(Vector<const uc16> str);
// String creation functions. Most of the string creation functions take
@@ -113,9 +125,15 @@ class Factory {
// two byte.
//
// ASCII strings are pretenured when used as keys in the SourceCodeCache.
- Handle<String> NewStringFromAscii(
- Vector<const char> str,
+ Handle<String> NewStringFromOneByte(
+ Vector<const uint8_t> str,
PretenureFlag pretenure = NOT_TENURED);
+ // TODO(dcarney): remove this function.
+ inline Handle<String> NewStringFromAscii(
+ Vector<const char> str,
+ PretenureFlag pretenure = NOT_TENURED) {
+ return NewStringFromOneByte(Vector<const uint8_t>::cast(str), pretenure);
+ }
// UTF8 strings are pretenured when used for regexp literal patterns and
// flags in the parser.
@@ -130,7 +148,7 @@ class Factory {
// Allocates and partially initializes an ASCII or TwoByte String. The
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
- Handle<SeqAsciiString> NewRawAsciiString(
+ Handle<SeqOneByteString> NewRawOneByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);
Handle<SeqTwoByteString> NewRawTwoByteString(
@@ -141,6 +159,10 @@ class Factory {
Handle<String> NewConsString(Handle<String> first,
Handle<String> second);
+ // Create a new sequential string containing the concatenation of the inputs.
+ Handle<String> NewFlatConcatString(Handle<String> first,
+ Handle<String> second);
+
// Create a new string object which holds a substring of a string.
Handle<String> NewSubString(Handle<String> str,
int begin,
@@ -160,6 +182,9 @@ class Factory {
Handle<String> NewExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource);
+ // Create a symbol.
+ Handle<Symbol> NewSymbol();
+
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewNativeContext();
@@ -189,14 +214,18 @@ class Factory {
Handle<Context> previous,
Handle<ScopeInfo> scope_info);
- // Return the Symbol matching the passed in string.
- Handle<String> SymbolFromString(Handle<String> value);
+ // Return the internalized version of the passed in string.
+ Handle<String> InternalizedStringFromString(Handle<String> value);
// Allocate a new struct. The struct is pretenured (allocated directly in
// the old generation).
Handle<Struct> NewStruct(InstanceType type);
- Handle<AccessorInfo> NewAccessorInfo();
+ Handle<DeclaredAccessorDescriptor> NewDeclaredAccessorDescriptor();
+
+ Handle<DeclaredAccessorInfo> NewDeclaredAccessorInfo();
+
+ Handle<ExecutableAccessorInfo> NewExecutableAccessorInfo();
Handle<Script> NewScript(Handle<String> source);
@@ -217,8 +246,13 @@ class Factory {
void* external_pointer,
PretenureFlag pretenure = NOT_TENURED);
- Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
- Handle<Object> value);
+ Handle<Cell> NewCell(Handle<Object> value);
+
+ Handle<PropertyCell> NewPropertyCellWithHole();
+
+ Handle<PropertyCell> NewPropertyCell(Handle<Object> value);
+
+ Handle<AllocationSite> NewAllocationSite();
Handle<Map> NewMap(
InstanceType type,
@@ -239,9 +273,16 @@ class Factory {
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
+ Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array,
+ int new_length,
+ PretenureFlag pretenure = NOT_TENURED);
+
Handle<FixedDoubleArray> CopyFixedDoubleArray(
Handle<FixedDoubleArray> array);
+ Handle<ConstantPoolArray> CopyConstantPoolArray(
+ Handle<ConstantPoolArray> array);
+
// Numbers (e.g. literals) are pretenured by the parser.
Handle<Object> NewNumber(double value,
PretenureFlag pretenure = NOT_TENURED);
@@ -250,6 +291,11 @@ class Factory {
PretenureFlag pretenure = NOT_TENURED);
Handle<Object> NewNumberFromUint(uint32_t value,
PretenureFlag pretenure = NOT_TENURED);
+ inline Handle<Object> NewNumberFromSize(size_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+ Handle<HeapNumber> NewHeapNumber(double value,
+ PretenureFlag pretenure = NOT_TENURED);
+
// These objects are used by the api to create env-independent data
// structures in the heap.
@@ -262,12 +308,17 @@ class Factory {
Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure = NOT_TENURED);
- // Global objects are pretenured.
+ // Global objects are pretenured and initialized based on a constructor.
Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
- Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
+ Handle<JSObject> NewJSObjectFromMap(Handle<Map> map,
+ PretenureFlag pretenure = NOT_TENURED,
+ bool allocate_properties = true);
+
+ Handle<JSObject> NewJSObjectFromMapForDeoptimizer(
+ Handle<Map> map, PretenureFlag pretenure = NOT_TENURED);
// JS modules are pretenured.
Handle<JSModule> NewJSModule(Handle<Context> context,
@@ -290,11 +341,11 @@ class Factory {
void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
- void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
- void EnsureCanContainElements(Handle<JSArray> array,
- Handle<FixedArrayBase> elements,
- uint32_t length,
- EnsureElementsMode mode);
+ Handle<JSArrayBuffer> NewJSArrayBuffer();
+
+ Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type);
+
+ Handle<JSDataView> NewJSDataView();
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
@@ -302,8 +353,6 @@ class Factory {
void BecomeJSObject(Handle<JSReceiver> object);
void BecomeJSFunction(Handle<JSReceiver> object);
- void SetIdentityHash(Handle<JSObject> object, Smi* hash);
-
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Object> prototype);
@@ -325,10 +374,14 @@ class Factory {
Handle<ScopeInfo> NewScopeInfo(int length);
+ Handle<JSObject> NewExternal(void* value);
+
Handle<Code> NewCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_reference,
- bool immovable = false);
+ bool immovable = false,
+ bool crankshafted = false,
+ int prologue_offset = Code::kPrologueOffsetNotSet);
Handle<Code> CopyCode(Handle<Code> code);
@@ -340,33 +393,33 @@ class Factory {
// Interface for creating error objects.
- Handle<Object> NewError(const char* maker, const char* type,
+ Handle<Object> NewError(const char* maker, const char* message,
Handle<JSArray> args);
- Handle<String> EmergencyNewError(const char* type, Handle<JSArray> args);
- Handle<Object> NewError(const char* maker, const char* type,
+ Handle<String> EmergencyNewError(const char* message, Handle<JSArray> args);
+ Handle<Object> NewError(const char* maker, const char* message,
Vector< Handle<Object> > args);
- Handle<Object> NewError(const char* type,
+ Handle<Object> NewError(const char* message,
Vector< Handle<Object> > args);
Handle<Object> NewError(Handle<String> message);
Handle<Object> NewError(const char* constructor,
Handle<String> message);
- Handle<Object> NewTypeError(const char* type,
+ Handle<Object> NewTypeError(const char* message,
Vector< Handle<Object> > args);
Handle<Object> NewTypeError(Handle<String> message);
- Handle<Object> NewRangeError(const char* type,
+ Handle<Object> NewRangeError(const char* message,
Vector< Handle<Object> > args);
Handle<Object> NewRangeError(Handle<String> message);
- Handle<Object> NewSyntaxError(const char* type, Handle<JSArray> args);
+ Handle<Object> NewSyntaxError(const char* message, Handle<JSArray> args);
Handle<Object> NewSyntaxError(Handle<String> message);
- Handle<Object> NewReferenceError(const char* type,
+ Handle<Object> NewReferenceError(const char* message,
Vector< Handle<Object> > args);
Handle<Object> NewReferenceError(Handle<String> message);
- Handle<Object> NewEvalError(const char* type,
+ Handle<Object> NewEvalError(const char* message,
Vector< Handle<Object> > args);
@@ -418,23 +471,32 @@ class Factory {
&isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
}
ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR_ACCESSOR
+#undef ROOT_ACCESSOR
+
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+ inline Handle<Map> name##_map() { \
+ return Handle<Map>(BitCast<Map**>( \
+ &isolate()->heap()->roots_[Heap::k##Name##MapRootIndex])); \
+ }
+ STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
-#define SYMBOL_ACCESSOR(name, str) \
+#define STRING_ACCESSOR(name, str) \
inline Handle<String> name() { \
return Handle<String>(BitCast<String**>( \
&isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
}
- SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
+ INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
- Handle<String> hidden_symbol() {
- return Handle<String>(&isolate()->heap()->hidden_symbol_);
+ Handle<String> hidden_string() {
+ return Handle<String>(&isolate()->heap()->hidden_string_);
}
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
Handle<String> name,
int number_of_literals,
+ bool is_generator,
Handle<Code> code,
Handle<ScopeInfo> scope_info);
Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
@@ -511,6 +573,112 @@ class Factory {
};
+Handle<Object> Factory::NewNumberFromSize(size_t value,
+ PretenureFlag pretenure) {
+ if (Smi::IsValid(static_cast<intptr_t>(value))) {
+ return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)),
+ isolate());
+ } else {
+ return NewNumber(static_cast<double>(value), pretenure);
+ }
+}
+
+
+// Used to "safely" transition from pointer-based runtime code to Handle-based
+// runtime code. When a GC happens during the called Handle-based code, a
+// failure object is returned to the pointer-based code to cause it abort and
+// re-trigger a gc of it's own. Since this double-gc will cause the Handle-based
+// code to be called twice, it must be idempotent.
+class IdempotentPointerToHandleCodeTrampoline {
+ public:
+ explicit IdempotentPointerToHandleCodeTrampoline(Isolate* isolate)
+ : isolate_(isolate) {}
+
+ template<typename R>
+ MUST_USE_RESULT MaybeObject* Call(R (*function)()) {
+ int collections = isolate_->heap()->gc_count();
+ (*function)();
+ return (collections == isolate_->heap()->gc_count())
+ ? isolate_->heap()->true_value()
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
+ template<typename R>
+ MUST_USE_RESULT MaybeObject* CallWithReturnValue(R (*function)()) {
+ int collections = isolate_->heap()->gc_count();
+ Object* result = (*function)();
+ return (collections == isolate_->heap()->gc_count())
+ ? result
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
+ template<typename R, typename P1>
+ MUST_USE_RESULT MaybeObject* Call(R (*function)(P1), P1 p1) {
+ int collections = isolate_->heap()->gc_count();
+ (*function)(p1);
+ return (collections == isolate_->heap()->gc_count())
+ ? isolate_->heap()->true_value()
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
+ template<typename R, typename P1>
+ MUST_USE_RESULT MaybeObject* CallWithReturnValue(
+ R (*function)(P1),
+ P1 p1) {
+ int collections = isolate_->heap()->gc_count();
+ Object* result = (*function)(p1);
+ return (collections == isolate_->heap()->gc_count())
+ ? result
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
+ template<typename R, typename P1, typename P2>
+ MUST_USE_RESULT MaybeObject* Call(
+ R (*function)(P1, P2),
+ P1 p1,
+ P2 p2) {
+ int collections = isolate_->heap()->gc_count();
+ (*function)(p1, p2);
+ return (collections == isolate_->heap()->gc_count())
+ ? isolate_->heap()->true_value()
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
+ template<typename R, typename P1, typename P2>
+ MUST_USE_RESULT MaybeObject* CallWithReturnValue(
+ R (*function)(P1, P2),
+ P1 p1,
+ P2 p2) {
+ int collections = isolate_->heap()->gc_count();
+ Object* result = (*function)(p1, p2);
+ return (collections == isolate_->heap()->gc_count())
+ ? result
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
+ template<typename R, typename P1, typename P2, typename P3, typename P4,
+ typename P5, typename P6, typename P7>
+ MUST_USE_RESULT MaybeObject* CallWithReturnValue(
+ R (*function)(P1, P2, P3, P4, P5, P6, P7),
+ P1 p1,
+ P2 p2,
+ P3 p3,
+ P4 p4,
+ P5 p5,
+ P6 p6,
+ P7 p7) {
+ int collections = isolate_->heap()->gc_count();
+ Handle<Object> result = (*function)(p1, p2, p3, p4, p5, p6, p7);
+ return (collections == isolate_->heap()->gc_count())
+ ? *result
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
+ private:
+ Isolate* isolate_;
+};
+
+
} } // namespace v8::internal
#endif // V8_FACTORY_H_
diff --git a/deps/v8/src/fixed-dtoa.cc b/deps/v8/src/fixed-dtoa.cc
index 1fd974c3e..fd90eca90 100644
--- a/deps/v8/src/fixed-dtoa.cc
+++ b/deps/v8/src/fixed-dtoa.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <math.h>
+#include <cmath>
#include "../include/v8stdint.h"
#include "checks.h"
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 0427e7d85..865413e70 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -41,15 +41,12 @@
extern ctype FLAG_##nam;
#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
static ctype const FLAG_##nam = def;
-#define DEFINE_implication(whenflag, thenflag)
// We want to supply the actual storage and value for the flag variable in the
// .cc file. We only do this for writable flags.
#elif defined(FLAG_MODE_DEFINE)
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
ctype FLAG_##nam = def;
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
// We need to define all of our default values so that the Flag structure can
// access them by pointer. These are just used internally inside of one .cc,
@@ -57,21 +54,18 @@
#elif defined(FLAG_MODE_DEFINE_DEFAULTS)
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
static ctype const FLAGDEFAULT_##nam = def;
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
// We want to write entries into our meta data table, for internal parsing and
// printing / etc in the flag parser code. We only do this for writable flags.
#elif defined(FLAG_MODE_META)
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
{ Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false },
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
+#define FLAG_ALIAS(ftype, ctype, alias, nam) \
+ { Flag::TYPE_##ftype, #alias, &FLAG_##nam, &FLAGDEFAULT_##nam, \
+ "alias for --"#nam, false },
// We produce the code to set flags when it is implied by another flag.
#elif defined(FLAG_MODE_DEFINE_IMPLICATIONS)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt)
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
#define DEFINE_implication(whenflag, thenflag) \
if (FLAG_##whenflag) FLAG_##thenflag = true;
@@ -79,51 +73,85 @@
#error No mode supplied when including flags.defs
#endif
+// Dummy defines for modes where it is not relevant.
+#ifndef FLAG_FULL
+#define FLAG_FULL(ftype, ctype, nam, def, cmt)
+#endif
+
+#ifndef FLAG_READONLY
+#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
+#endif
+
+#ifndef FLAG_ALIAS
+#define FLAG_ALIAS(ftype, ctype, alias, nam)
+#endif
+
+#ifndef DEFINE_implication
+#define DEFINE_implication(whenflag, thenflag)
+#endif
+
+#define COMMA ,
+
#ifdef FLAG_MODE_DECLARE
// Structure used to hold a collection of arguments to the JavaScript code.
-#define JSARGUMENTS_INIT {{}}
struct JSArguments {
public:
- inline int argc() const {
- return static_cast<int>(storage_[0]);
- }
- inline const char** argv() const {
- return reinterpret_cast<const char**>(storage_[1]);
- }
inline const char*& operator[] (int idx) const {
- return argv()[idx];
- }
- inline JSArguments& operator=(JSArguments args) {
- set_argc(args.argc());
- set_argv(args.argv());
- return *this;
+ return argv[idx];
}
static JSArguments Create(int argc, const char** argv) {
JSArguments args;
- args.set_argc(argc);
- args.set_argv(argv);
+ args.argc = argc;
+ args.argv = argv;
return args;
}
-private:
- void set_argc(int argc) {
- storage_[0] = argc;
- }
- void set_argv(const char** argv) {
- storage_[1] = reinterpret_cast<AtomicWord>(argv);
+ int argc;
+ const char** argv;
+};
+
+struct MaybeBoolFlag {
+ static MaybeBoolFlag Create(bool has_value, bool value) {
+ MaybeBoolFlag flag;
+ flag.has_value = has_value;
+ flag.value = value;
+ return flag;
}
-public:
- // Contains argc and argv. Unfortunately we have to store these two fields
- // into a single one to avoid making the initialization macro (which would be
- // "{ 0, NULL }") contain a coma.
- AtomicWord storage_[2];
+ bool has_value;
+ bool value;
};
#endif
-#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
-#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
-#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
+#if (defined CAN_USE_VFP3_INSTRUCTIONS) || !(defined ARM_TEST)
+# define ENABLE_VFP3_DEFAULT true
+#else
+# define ENABLE_VFP3_DEFAULT false
+#endif
+#if (defined CAN_USE_ARMV7_INSTRUCTIONS) || !(defined ARM_TEST)
+# define ENABLE_ARMV7_DEFAULT true
+#else
+# define ENABLE_ARMV7_DEFAULT false
+#endif
+#if (defined CAN_USE_VFP32DREGS) || !(defined ARM_TEST)
+# define ENABLE_32DREGS_DEFAULT true
+#else
+# define ENABLE_32DREGS_DEFAULT false
+#endif
+
+#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
+#define DEFINE_maybe_bool(nam, cmt) FLAG(MAYBE_BOOL, MaybeBoolFlag, nam, \
+ { false COMMA false }, cmt)
+#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
+#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
#define DEFINE_string(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
-#define DEFINE_args(nam, def, cmt) FLAG(ARGS, JSArguments, nam, def, cmt)
+#define DEFINE_args(nam, cmt) FLAG(ARGS, JSArguments, nam, \
+ { 0 COMMA NULL }, cmt)
+
+#define DEFINE_ALIAS_bool(alias, nam) FLAG_ALIAS(BOOL, bool, alias, nam)
+#define DEFINE_ALIAS_int(alias, nam) FLAG_ALIAS(INT, int, alias, nam)
+#define DEFINE_ALIAS_float(alias, nam) FLAG_ALIAS(FLOAT, double, alias, nam)
+#define DEFINE_ALIAS_string(alias, nam) \
+ FLAG_ALIAS(STRING, const char*, alias, nam)
+#define DEFINE_ALIAS_args(alias, nam) FLAG_ALIAS(ARGS, JSArguments, alias, nam)
//
// Flags in all modes.
@@ -141,22 +169,60 @@ DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
DEFINE_bool(harmony_modules, false,
"enable harmony modules (implies block scoping)")
+DEFINE_bool(harmony_symbols, false,
+ "enable harmony symbols (a.k.a. private names)")
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)")
+DEFINE_bool(harmony_observation, false,
+ "enable harmony object observation (implies harmony collections")
+DEFINE_bool(harmony_generators, false, "enable harmony generators")
+DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)")
+DEFINE_bool(harmony_numeric_literals, false,
+ "enable harmony numeric literals (0o77, 0b11)")
+DEFINE_bool(harmony_strings, false, "enable harmony string")
+DEFINE_bool(harmony_arrays, false, "enable harmony arrays")
+DEFINE_bool(harmony_maths, false, "enable harmony math functions")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
+DEFINE_implication(harmony, harmony_symbols)
DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections)
+DEFINE_implication(harmony, harmony_observation)
+DEFINE_implication(harmony, harmony_generators)
+DEFINE_implication(harmony, harmony_iteration)
+DEFINE_implication(harmony, harmony_numeric_literals)
+DEFINE_implication(harmony, harmony_strings)
+DEFINE_implication(harmony, harmony_arrays)
+DEFINE_implication(harmony, harmony_maths)
DEFINE_implication(harmony_modules, harmony_scoping)
+DEFINE_implication(harmony_observation, harmony_collections)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
-DEFINE_bool(clever_optimizations,
- true,
+DEFINE_bool(clever_optimizations, true,
"Optimize object size, Array shift, DOM strings and string +")
+DEFINE_bool(pretenuring, true, "allocate objects in old space")
+// TODO(hpayer): We will remove this flag as soon as we have pretenuring
+// support for specific allocation sites.
+DEFINE_bool(pretenuring_call_new, false, "pretenure call new")
+DEFINE_bool(allocation_site_pretenuring, false,
+ "pretenure with allocation sites")
+DEFINE_bool(track_fields, true, "track fields with only smi values")
+DEFINE_bool(track_double_fields, true, "track fields with double values")
+DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
+DEFINE_bool(track_computed_fields, true, "track computed boilerplate fields")
+DEFINE_implication(track_double_fields, track_fields)
+DEFINE_implication(track_heap_object_fields, track_fields)
+DEFINE_implication(track_computed_fields, track_fields)
+DEFINE_bool(smi_binop, true, "support smi representation in binary operations")
+
+// Flags for optimization types.
+DEFINE_bool(optimize_for_size, false,
+ "Enables optimizations which favor memory size over execution "
+ "speed.")
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
@@ -164,47 +230,73 @@ DEFINE_bool(string_slices, true, "use string slices")
// Flags for Crankshaft.
DEFINE_bool(crankshaft, true, "use crankshaft")
-DEFINE_string(hydrogen_filter, "", "optimization filter")
+DEFINE_string(hydrogen_filter, "*", "optimization filter")
DEFINE_bool(use_range, true, "use hydrogen range analysis")
-DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
+DEFINE_bool(use_escape_analysis, true, "use hydrogen escape analysis")
+DEFINE_bool(use_allocation_folding, true, "use allocation folding")
+DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_int(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
DEFINE_int(max_inlined_nodes, 196,
"maximum number of AST nodes considered for a single inlining")
-DEFINE_int(max_inlined_nodes_cumulative, 196,
+DEFINE_int(max_inlined_nodes_cumulative, 400,
"maximum cumulative number of AST nodes considered for inlining")
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
-DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
- true,
+DEFINE_bool(fast_math, true, "faster (but maybe less accurate) math functions")
+DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,
"crankshaft harvests type feedback from stub cache")
DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
+DEFINE_bool(trace_check_elimination, false, "trace check elimination phase")
DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
-DEFINE_string(trace_phase, "Z", "trace generated IR for specified phases")
+DEFINE_string(trace_hydrogen_filter, "*", "hydrogen tracing filter")
+DEFINE_bool(trace_hydrogen_stubs, false, "trace generated hydrogen for stubs")
+DEFINE_string(trace_hydrogen_file, NULL, "trace hydrogen to given file name")
+DEFINE_string(trace_phase, "HLZ", "trace generated IR for specified phases")
DEFINE_bool(trace_inlining, false, "trace inlining decisions")
+DEFINE_bool(trace_load_elimination, false, "trace load elimination")
DEFINE_bool(trace_alloc, false, "trace register allocator")
DEFINE_bool(trace_all_uses, false, "trace all use positions")
DEFINE_bool(trace_range, false, "trace range analysis")
DEFINE_bool(trace_gvn, false, "trace global value numbering")
DEFINE_bool(trace_representation, false, "trace representation types")
+DEFINE_bool(trace_escape_analysis, false, "trace hydrogen escape analysis")
+DEFINE_bool(trace_allocation_folding, false, "trace allocation folding")
+DEFINE_bool(trace_track_allocation_sites, false,
+ "trace the tracking of allocation sites")
+DEFINE_bool(trace_migration, false, "trace object migration")
+DEFINE_bool(trace_generalization, false, "trace map generalization")
DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction")
DEFINE_bool(stress_environments, false, "environment for every instruction")
-DEFINE_int(deopt_every_n_times,
- 0,
+DEFINE_int(deopt_every_n_times, 0,
"deoptimize every n times a deopt point is passed")
+DEFINE_int(deopt_every_n_garbage_collections, 0,
+ "deoptimize every n garbage collections")
+DEFINE_bool(print_deopt_stress, false, "print number of possible deopt points")
DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
+DEFINE_bool(trap_on_stub_deopt, false,
+ "put a break point before deoptimizing a stub")
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination")
+DEFINE_bool(array_bounds_checks_hoisting, false,
+ "perform array bounds checks hoisting")
DEFINE_bool(array_index_dehoisting, true,
"perform array index dehoisting")
+DEFINE_bool(analyze_environment_liveness, true,
+ "analyze liveness of environment slots and zap dead values")
+DEFINE_bool(load_elimination, false, "use load elimination")
+DEFINE_bool(check_elimination, false, "use check elimination")
DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
+DEFINE_bool(fold_constants, true, "use constant folding")
DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
-
+DEFINE_bool(unreachable_code_elimination, true, "eliminate unreachable code")
+DEFINE_bool(track_allocation_sites, true,
+ "Use allocation site info to reduce transitions")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
@@ -213,22 +305,38 @@ DEFINE_bool(lookup_sample_by_shared, true,
"info, not JSFunction itself")
DEFINE_bool(cache_optimized_code, true,
"cache optimized code for closures")
+DEFINE_bool(flush_optimized_code_cache, true,
+ "flushes the cache of optimized code for closures on every GC")
DEFINE_bool(inline_construct, true, "inline constructor calls")
DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
DEFINE_bool(inline_accessors, true, "inline JavaScript accessors")
DEFINE_int(loop_weight, 1, "loop weight for representation inference")
+DEFINE_int(escape_analysis_iterations, 1,
+ "maximum number of escape analysis fix-point iterations")
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
DEFINE_bool(opt_safe_uint32_operations, true,
- "allow uint32 values on optimize frames if they are used only in"
+ "allow uint32 values on optimize frames if they are used only in "
"safe operations")
-DEFINE_bool(parallel_recompilation, false,
+DEFINE_bool(concurrent_recompilation, true,
"optimizing hot functions asynchronously on a separate thread")
-DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
-DEFINE_int(parallel_recompilation_queue_length, 2,
- "the length of the parallel compilation queue")
+DEFINE_bool(trace_concurrent_recompilation, false,
+ "track concurrent recompilation")
+DEFINE_int(concurrent_recompilation_queue_length, 8,
+ "the length of the concurrent compilation queue")
+DEFINE_int(concurrent_recompilation_delay, 0,
+ "artificial compilation delay in ms")
+DEFINE_bool(block_concurrent_recompilation, false,
+ "block queued jobs until released")
+DEFINE_bool(concurrent_osr, false,
+ "concurrent on-stack replacement")
+DEFINE_implication(concurrent_osr, concurrent_recompilation)
+
+DEFINE_bool(omit_map_checks_for_leaf_maps, true,
+ "do not emit check maps for constant values that have a leaf map, "
+ "deoptimize the optimized code if the layout of the maps changes.")
// Experimental profiler changes.
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
@@ -239,8 +347,6 @@ DEFINE_bool(self_optimization, false,
DEFINE_bool(direct_self_opt, false,
"call recompile stub directly when self-optimizing")
DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed")
-DEFINE_bool(count_based_interrupts, false,
- "trigger profiler ticks based on counting instead of timing")
DEFINE_bool(interrupt_at_exit, false,
"insert an interrupt check at function exit")
DEFINE_bool(weighted_back_edges, false,
@@ -248,7 +354,7 @@ DEFINE_bool(weighted_back_edges, false,
// 0x1700 fits in the immediate field of an ARM instruction.
DEFINE_int(interrupt_budget, 0x1700,
"execution budget before interrupt is triggered")
-DEFINE_int(type_info_threshold, 15,
+DEFINE_int(type_info_threshold, 25,
"percentage of ICs that must have type info to allow optimization")
DEFINE_int(self_opt_count, 130, "call count before self-optimization")
@@ -256,7 +362,6 @@ DEFINE_implication(experimental_profiler, watch_ic_patching)
DEFINE_implication(experimental_profiler, self_optimization)
// Not implying direct_self_opt here because it seems to be a bad idea.
DEFINE_implication(experimental_profiler, retry_self_opt)
-DEFINE_implication(experimental_profiler, count_based_interrupts)
DEFINE_implication(experimental_profiler, interrupt_at_exit)
DEFINE_implication(experimental_profiler, weighted_back_edges)
@@ -275,17 +380,14 @@ DEFINE_bool(enable_sse4_1, true,
"enable use of SSE4.1 instructions if available")
DEFINE_bool(enable_cmov, true,
"enable use of CMOV instruction if available")
-DEFINE_bool(enable_rdtsc, true,
- "enable use of RDTSC instruction if available")
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
-DEFINE_bool(enable_vfp3, true,
- "enable use of VFP3 instructions if available - this implies "
- "enabling ARMv7 and VFP2 instructions (ARM only)")
-DEFINE_bool(enable_vfp2, true,
- "enable use of VFP2 instructions if available")
-DEFINE_bool(enable_armv7, true,
+DEFINE_bool(enable_vfp3, ENABLE_VFP3_DEFAULT,
+ "enable use of VFP3 instructions if available")
+DEFINE_bool(enable_armv7, ENABLE_ARMV7_DEFAULT,
"enable use of ARMv7 instructions if available (ARM only)")
+DEFINE_bool(enable_neon, true,
+ "enable use of NEON instructions if available (ARM only)")
DEFINE_bool(enable_sudiv, true,
"enable use of SDIV and UDIV instructions if available (ARM only)")
DEFINE_bool(enable_movw_movt, false,
@@ -293,13 +395,18 @@ DEFINE_bool(enable_movw_movt, false,
"instruction pairs (ARM only)")
DEFINE_bool(enable_unaligned_accesses, true,
"enable unaligned accesses for ARMv7 (ARM only)")
-DEFINE_bool(enable_fpu, true,
- "enable use of MIPS FPU instructions if available (MIPS only)")
+DEFINE_bool(enable_32dregs, ENABLE_32DREGS_DEFAULT,
+ "enable use of d16-d31 registers on ARM - this requires VFP3")
+DEFINE_bool(enable_vldr_imm, false,
+ "enable use of constant pools for double immediate (ARM only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
DEFINE_bool(expose_gc, false, "expose gc extension")
+DEFINE_string(expose_gc_as, NULL,
+ "expose gc extension under the specified name")
+DEFINE_implication(expose_gc_as, expose_gc)
DEFINE_bool(expose_externalize_string, false,
"expose externalize string extension")
DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
@@ -315,9 +422,10 @@ DEFINE_bool(stack_trace_on_abort, true,
"print a stack trace if an assertion failure occurs")
// codegen-ia32.cc / codegen-arm.cc
+DEFINE_bool(trace_codegen, false,
+ "print name of functions for which code is generated")
DEFINE_bool(trace, false, "trace function calls")
-DEFINE_bool(mask_constants_with_cookie,
- true,
+DEFINE_bool(mask_constants_with_cookie, true,
"use random jit cookie to mask large constants")
// codegen.cc
@@ -326,8 +434,11 @@ DEFINE_bool(trace_opt, false, "trace lazy optimization")
DEFINE_bool(trace_opt_stats, false, "trace lazy optimization statistics")
DEFINE_bool(opt, true, "use adaptive optimizations")
DEFINE_bool(always_opt, false, "always try to optimize functions")
+DEFINE_bool(always_osr, false, "always try to OSR functions")
DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
-DEFINE_bool(trace_deopt, false, "trace deoptimization")
+DEFINE_bool(trace_deopt, false, "trace optimize function deoptimization")
+DEFINE_bool(trace_stub_failures, false,
+ "trace deoptimization of generated code stubs")
// compiler.cc
DEFINE_int(min_preparse_length, 1024,
@@ -342,8 +453,20 @@ DEFINE_bool(compilation_cache, true, "enable compilation cache")
DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
+// cpu-profiler.cc
+DEFINE_int(cpu_profiler_sampling_interval, 1000,
+ "CPU profiler sampling interval in microseconds")
+
// debug.cc
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
+DEFINE_bool(trace_js_array_abuse, false,
+ "trace out-of-bounds accesses to JS arrays")
+DEFINE_bool(trace_external_array_abuse, false,
+ "trace out-of-bounds-accesses to external arrays")
+DEFINE_bool(trace_array_abuse, false,
+ "trace out-of-bounds accesses to all arrays")
+DEFINE_implication(trace_array_abuse, trace_js_array_abuse)
+DEFINE_implication(trace_array_abuse, trace_external_array_abuse)
DEFINE_bool(debugger_auto_break, true,
"automatically set the debug break flag when debugger commands are "
"in the queue")
@@ -388,30 +511,44 @@ DEFINE_bool(trace_external_memory, false,
"it is adjusted.")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
+DEFINE_bool(weak_embedded_maps_in_optimized_code, true,
+ "make maps embedded in optimized code weak")
+DEFINE_bool(weak_embedded_objects_in_optimized_code, true,
+ "make objects embedded in optimized code weak")
DEFINE_bool(flush_code, true,
- "flush code that we expect not to use again before full gc")
+ "flush code that we expect not to use again (during full gc)")
+DEFINE_bool(flush_code_incrementally, true,
+ "flush code that we expect not to use again (incrementally)")
+DEFINE_bool(trace_code_flushing, false, "trace code flushing progress")
+DEFINE_bool(age_code, true,
+ "track un-executed functions to age code and flush only "
+ "old code (required for code flushing)")
DEFINE_bool(incremental_marking, true, "use incremental marking")
DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
DEFINE_bool(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_bool(track_gc_object_stats, false,
"track object counts and memory usage")
+DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping")
+DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
+DEFINE_int(sweeper_threads, 0,
+ "number of parallel and concurrent sweeping threads")
#ifdef VERIFY_HEAP
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
#endif
+
+// heap-snapshot-generator.cc
+DEFINE_bool(heap_profiler_trace_objects, false,
+ "Dump heap object allocations/movements/size_updates")
+
+
// v8.cc
DEFINE_bool(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
-#ifdef LIVE_OBJECT_LIST
-// liveobjectlist.cc
-DEFINE_string(lol_workdir, NULL, "path for lol temp files")
-DEFINE_bool(verify_lol, false, "perform debugging verification for lol")
-#endif
-
// macro-assembler-ia32.cc
DEFINE_bool(native_code_counters, false,
"generate extra code for manipulating stats counters")
@@ -429,6 +566,9 @@ DEFINE_bool(incremental_code_compaction, true,
DEFINE_bool(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and "
"flush code caches in maps during mark compact cycle.")
+DEFINE_bool(use_marking_progress_bar, true,
+ "Use a progress bar to scan large objects in increments when "
+ "incremental marking is active.")
DEFINE_int(random_seed, 0,
"Default seed for initializing random generator "
"(0, the default, means to use system random).")
@@ -455,15 +595,17 @@ DEFINE_bool(trace_exception, false,
"print stack trace when throwing exceptions")
DEFINE_bool(preallocate_message_memory, false,
"preallocate some memory to build stack traces.")
-DEFINE_bool(randomize_hashes,
- true,
+DEFINE_bool(randomize_hashes, true,
"randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)")
-DEFINE_int(hash_seed,
- 0,
+DEFINE_int(hash_seed, 0,
"Fixed seed to use to hash property keys (0 means random)"
"(with snapshots this option cannot override the baked-in seed)")
+// snapshot-common.cc
+DEFINE_bool(profile_deserialization, false,
+ "Print the time it takes to deserialize the snapshot.")
+
// v8.cc
DEFINE_bool(preemption, false,
"activate a 100ms timer that switches between V8 threads")
@@ -473,11 +615,12 @@ DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
+DEFINE_maybe_bool(testing_maybe_bool_flag, "testing_maybe_bool_flag")
DEFINE_int(testing_int_flag, 13, "testing_int_flag")
DEFINE_float(testing_float_flag, 2.5, "float-flag")
DEFINE_string(testing_string_flag, "Hello, world!", "string-flag")
DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness")
-#ifdef WIN32
+#ifdef _WIN32
DEFINE_string(testing_serialization_file, "C:\\Windows\\Temp\\serdes",
"file in which to testing_serialize heap")
#else
@@ -489,6 +632,10 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes",
DEFINE_string(extra_code, NULL, "A filename with extra code to be included in"
" the snapshot (mksnapshot only)")
+// code-stubs-hydrogen.cc
+DEFINE_bool(profile_hydrogen_code_stub_compilation, false,
+ "Print the time it takes to lazily compile hydrogen code stubs.")
+
//
// Dev shell flags
//
@@ -505,7 +652,7 @@ DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
#endif // ENABLE_DEBUGGER_SUPPORT
DEFINE_string(map_counters, "", "Map counters to a file")
-DEFINE_args(js_arguments, JSARGUMENTS_INIT,
+DEFINE_args(js_arguments,
"Pass all remaining arguments to the script. Alias for \"--\".")
#if defined(WEBOS__)
@@ -549,18 +696,19 @@ DEFINE_bool(stress_compaction, false,
#endif
// checks.cc
+#ifdef ENABLE_SLOW_ASSERTS
DEFINE_bool(enable_slow_asserts, false,
"enable asserts that are slow to execute")
+#endif
-// codegen-ia32.cc / codegen-arm.cc
-DEFINE_bool(trace_codegen, false,
- "print name of functions for which code is generated")
+// codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc
DEFINE_bool(print_source, false, "pretty print source code")
DEFINE_bool(print_builtin_source, false,
"pretty print source code for builtins")
DEFINE_bool(print_ast, false, "print source AST")
DEFINE_bool(print_builtin_ast, false, "print source AST for builtins")
DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
+DEFINE_bool(trap_on_abort, false, "replace aborts by breakpoints")
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
@@ -588,8 +736,7 @@ DEFINE_bool(print_interface_details, false, "print interface inference details")
DEFINE_int(print_interface_depth, 5, "depth for printing interfaces")
// objects.cc
-DEFINE_bool(trace_normalization,
- false,
+DEFINE_bool(trace_normalization, false,
"prints when objects are turned into dictionaries.")
// runtime.cc
@@ -602,16 +749,11 @@ DEFINE_bool(collect_heap_spill_statistics, false,
DEFINE_bool(trace_isolates, false, "trace isolate state changes")
-// VM state
-DEFINE_bool(log_state_changes, false, "Log state changes.")
-
// Regexp
-DEFINE_bool(regexp_possessive_quantifier,
- false,
+DEFINE_bool(regexp_possessive_quantifier, false,
"enable possessive quantifier syntax for testing")
DEFINE_bool(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
-DEFINE_bool(trace_regexp_assembler,
- false,
+DEFINE_bool(trace_regexp_assembler, false,
"trace regexp macro assembler calls.")
//
@@ -636,20 +778,19 @@ DEFINE_bool(log_snapshot_positions, false,
DEFINE_bool(log_suspect, false, "Log suspect operations.")
DEFINE_bool(prof, false,
"Log statistical profiling information (implies --log-code).")
-DEFINE_bool(prof_auto, true,
- "Used with --prof, starts profiling automatically")
-DEFINE_bool(prof_lazy, false,
- "Used with --prof, only does sampling and logging"
- " when profiler is active (implies --noprof_auto).")
DEFINE_bool(prof_browser_mode, true,
"Used with --prof, turns on browser-compatible mode for profiling.")
DEFINE_bool(log_regexp, false, "Log regular expression execution.")
-DEFINE_bool(sliding_state_window, false,
- "Update sliding state window counters.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
+DEFINE_bool(logfile_per_isolate, true, "Separate log files for each isolate.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
+DEFINE_bool(log_internal_timer_events, false, "Time internal events.")
+DEFINE_bool(log_timer_events, false,
+ "Time events including external callbacks.")
+DEFINE_implication(log_timer_events, log_internal_timer_events)
+DEFINE_implication(log_internal_timer_events, prof)
//
// Disassembler only flags
@@ -664,16 +805,18 @@ DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
// elements.cc
DEFINE_bool(trace_elements_transitions, false, "trace elements transitions")
+DEFINE_bool(trace_creation_allocation_sites, false,
+ "trace the creation of allocation sites")
+
// code-stubs.cc
DEFINE_bool(print_code_stubs, false, "print code stubs")
-DEFINE_bool(test_secondary_stub_cache,
- false,
+DEFINE_bool(test_secondary_stub_cache, false,
"test secondary stub cache by disabling the primary one")
-DEFINE_bool(test_primary_stub_cache,
- false,
+DEFINE_bool(test_primary_stub_cache, false,
"test primary stub cache by disabling the secondary one")
+
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(print_code, false, "print generated code")
DEFINE_bool(print_opt_code, false, "print optimized code")
@@ -681,8 +824,19 @@ DEFINE_bool(print_unopt_code, false, "print unoptimized code before "
"printing optimized code based on it")
DEFINE_bool(print_code_verbose, false, "print more information for code")
DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
+DEFINE_bool(emit_opt_code_positions, false,
+ "annotate optimize code with source code positions")
#ifdef ENABLE_DISASSEMBLER
+DEFINE_bool(sodium, false, "print generated code output suitable for use with "
+ "the Sodium code viewer")
+
+DEFINE_implication(sodium, print_code_stubs)
+DEFINE_implication(sodium, print_code)
+DEFINE_implication(sodium, print_opt_code)
+DEFINE_implication(sodium, emit_opt_code_positions)
+DEFINE_implication(sodium, code_comments)
+
DEFINE_bool(print_all_code, false, "enable all flags related to printing code")
DEFINE_implication(print_all_code, print_code)
DEFINE_implication(print_all_code, print_opt_code)
@@ -696,18 +850,39 @@ DEFINE_implication(print_all_code, trace_codegen)
#endif
#endif
+//
+// Read-only flags
+//
+#undef FLAG
+#define FLAG FLAG_READONLY
+
+// assembler-arm.h
+DEFINE_bool(enable_ool_constant_pool, false,
+ "enable use of out-of-line constant pools (ARM only)")
+
// Cleanup...
#undef FLAG_FULL
#undef FLAG_READONLY
#undef FLAG
+#undef FLAG_ALIAS
#undef DEFINE_bool
+#undef DEFINE_maybe_bool
#undef DEFINE_int
#undef DEFINE_string
+#undef DEFINE_float
+#undef DEFINE_args
#undef DEFINE_implication
+#undef DEFINE_ALIAS_bool
+#undef DEFINE_ALIAS_int
+#undef DEFINE_ALIAS_string
+#undef DEFINE_ALIAS_float
+#undef DEFINE_ALIAS_args
#undef FLAG_MODE_DECLARE
#undef FLAG_MODE_DEFINE
#undef FLAG_MODE_DEFINE_DEFAULTS
#undef FLAG_MODE_META
#undef FLAG_MODE_DEFINE_IMPLICATIONS
+
+#undef COMMA
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index bca0eff58..0c36aed33 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -34,6 +34,9 @@
#include "smart-pointers.h"
#include "string-stream.h"
+#if V8_TARGET_ARCH_ARM
+#include "arm/assembler-arm-inl.h"
+#endif
namespace v8 {
namespace internal {
@@ -52,7 +55,8 @@ namespace {
// to the actual flag, default value, comment, etc. This is designed to be POD
// initialized as to avoid requiring static constructors.
struct Flag {
- enum FlagType { TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS };
+ enum FlagType { TYPE_BOOL, TYPE_MAYBE_BOOL, TYPE_INT, TYPE_FLOAT,
+ TYPE_STRING, TYPE_ARGS };
FlagType type_; // What type of flag, bool, int, or string.
const char* name_; // Name of the flag, ex "my_flag".
@@ -72,6 +76,11 @@ struct Flag {
return reinterpret_cast<bool*>(valptr_);
}
+ MaybeBoolFlag* maybe_bool_variable() const {
+ ASSERT(type_ == TYPE_MAYBE_BOOL);
+ return reinterpret_cast<MaybeBoolFlag*>(valptr_);
+ }
+
int* int_variable() const {
ASSERT(type_ == TYPE_INT);
return reinterpret_cast<int*>(valptr_);
@@ -130,6 +139,8 @@ struct Flag {
switch (type_) {
case TYPE_BOOL:
return *bool_variable() == bool_default();
+ case TYPE_MAYBE_BOOL:
+ return maybe_bool_variable()->has_value == false;
case TYPE_INT:
return *int_variable() == int_default();
case TYPE_FLOAT:
@@ -142,7 +153,7 @@ struct Flag {
return strcmp(str1, str2) == 0;
}
case TYPE_ARGS:
- return args_variable()->argc() == 0;
+ return args_variable()->argc == 0;
}
UNREACHABLE();
return true;
@@ -154,6 +165,9 @@ struct Flag {
case TYPE_BOOL:
*bool_variable() = bool_default();
break;
+ case TYPE_MAYBE_BOOL:
+ *maybe_bool_variable() = MaybeBoolFlag::Create(false, false);
+ break;
case TYPE_INT:
*int_variable() = int_default();
break;
@@ -183,6 +197,7 @@ const size_t num_flags = sizeof(flags) / sizeof(*flags);
static const char* Type2String(Flag::FlagType type) {
switch (type) {
case Flag::TYPE_BOOL: return "bool";
+ case Flag::TYPE_MAYBE_BOOL: return "maybe_bool";
case Flag::TYPE_INT: return "int";
case Flag::TYPE_FLOAT: return "float";
case Flag::TYPE_STRING: return "string";
@@ -200,6 +215,11 @@ static SmartArrayPointer<const char> ToString(Flag* flag) {
case Flag::TYPE_BOOL:
buffer.Add("%s", (*flag->bool_variable() ? "true" : "false"));
break;
+ case Flag::TYPE_MAYBE_BOOL:
+ buffer.Add("%s", flag->maybe_bool_variable()->has_value
+ ? (flag->maybe_bool_variable()->value ? "true" : "false")
+ : "unset");
+ break;
case Flag::TYPE_INT:
buffer.Add("%d", *flag->int_variable());
break;
@@ -213,9 +233,9 @@ static SmartArrayPointer<const char> ToString(Flag* flag) {
}
case Flag::TYPE_ARGS: {
JSArguments args = *flag->args_variable();
- if (args.argc() > 0) {
+ if (args.argc > 0) {
buffer.Add("%s", args[0]);
- for (int i = 1; i < args.argc(); i++) {
+ for (int i = 1; i < args.argc; i++) {
buffer.Add(" %s", args[i]);
}
}
@@ -257,7 +277,7 @@ List<const char*>* FlagList::argv() {
buffer.Add("--%s", args_flag->name());
args->Add(buffer.ToCString().Detach());
JSArguments jsargs = *args_flag->args_variable();
- for (int j = 0; j < jsargs.argc(); j++) {
+ for (int j = 0; j < jsargs.argc; j++) {
args->Add(StrDup(jsargs[j]));
}
}
@@ -265,6 +285,11 @@ List<const char*>* FlagList::argv() {
}
+inline char NormalizeChar(char ch) {
+ return ch == '_' ? '-' : ch;
+}
+
+
// Helper function to parse flags: Takes an argument arg and splits it into
// a flag name and flag value (or NULL if they are missing). is_bool is set
// if the arg started with "-no" or "--no". The buffer may be used to NUL-
@@ -292,6 +317,7 @@ static void SplitArgument(const char* arg,
}
if (arg[0] == 'n' && arg[1] == 'o') {
arg += 2; // remove "no"
+ if (NormalizeChar(arg[0]) == '-') arg++; // remove dash after "no".
*is_bool = true;
}
*name = arg;
@@ -305,7 +331,7 @@ static void SplitArgument(const char* arg,
// make a copy so we can NUL-terminate flag name
size_t n = arg - *name;
CHECK(n < static_cast<size_t>(buffer_size)); // buffer is too small
- memcpy(buffer, *name, n);
+ OS::MemCopy(buffer, *name, n);
buffer[n] = '\0';
*name = buffer;
// get the value
@@ -315,11 +341,6 @@ static void SplitArgument(const char* arg,
}
-inline char NormalizeChar(char ch) {
- return ch == '_' ? '-' : ch;
-}
-
-
static bool EqualNames(const char* a, const char* b) {
for (int i = 0; NormalizeChar(a[i]) == NormalizeChar(b[i]); i++) {
if (a[i] == '\0') {
@@ -367,8 +388,8 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
// sense there.
continue;
} else {
- fprintf(stderr, "Error: unrecognized flag %s\n"
- "Try --help for options\n", arg);
+ PrintF(stderr, "Error: unrecognized flag %s\n"
+ "Try --help for options\n", arg);
return_code = j;
break;
}
@@ -376,14 +397,15 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
// if we still need a flag value, use the next argument if available
if (flag->type() != Flag::TYPE_BOOL &&
+ flag->type() != Flag::TYPE_MAYBE_BOOL &&
flag->type() != Flag::TYPE_ARGS &&
value == NULL) {
if (i < *argc) {
value = argv[i++];
} else {
- fprintf(stderr, "Error: missing value for flag %s of type %s\n"
- "Try --help for options\n",
- arg, Type2String(flag->type()));
+ PrintF(stderr, "Error: missing value for flag %s of type %s\n"
+ "Try --help for options\n",
+ arg, Type2String(flag->type()));
return_code = j;
break;
}
@@ -395,6 +417,9 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
case Flag::TYPE_BOOL:
*flag->bool_variable() = !is_bool;
break;
+ case Flag::TYPE_MAYBE_BOOL:
+ *flag->maybe_bool_variable() = MaybeBoolFlag::Create(true, !is_bool);
+ break;
case Flag::TYPE_INT:
*flag->int_variable() = strtol(value, &endp, 10); // NOLINT
break;
@@ -421,12 +446,13 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
}
// handle errors
- if ((flag->type() == Flag::TYPE_BOOL && value != NULL) ||
- (flag->type() != Flag::TYPE_BOOL && is_bool) ||
+ bool is_bool_type = flag->type() == Flag::TYPE_BOOL ||
+ flag->type() == Flag::TYPE_MAYBE_BOOL;
+ if ((is_bool_type && value != NULL) || (!is_bool_type && is_bool) ||
*endp != '\0') {
- fprintf(stderr, "Error: illegal value for flag %s of type %s\n"
- "Try --help for options\n",
- arg, Type2String(flag->type()));
+ PrintF(stderr, "Error: illegal value for flag %s of type %s\n"
+ "Try --help for options\n",
+ arg, Type2String(flag->type()));
return_code = j;
break;
}
@@ -475,7 +501,7 @@ static char* SkipBlackSpace(char* p) {
int FlagList::SetFlagsFromString(const char* str, int len) {
// make a 0-terminated copy of str
ScopedVector<char> copy0(len + 1);
- memcpy(copy0.start(), str, len);
+ OS::MemCopy(copy0.start(), str, len);
copy0[len] = '\0';
// strip leading white space
@@ -517,6 +543,12 @@ void FlagList::ResetAllFlags() {
// static
void FlagList::PrintHelp() {
+#if V8_TARGET_ARCH_ARM
+ CpuFeatures::PrintTarget();
+ CpuFeatures::Probe();
+ CpuFeatures::PrintFeatures();
+#endif // V8_TARGET_ARCH_ARM
+
printf("Usage:\n");
printf(" shell [options] -e string\n");
printf(" execute string in V8\n");
@@ -539,9 +571,11 @@ void FlagList::PrintHelp() {
}
+// static
void FlagList::EnforceFlagImplications() {
#define FLAG_MODE_DEFINE_IMPLICATIONS
#include "flag-definitions.h"
+#undef FLAG_MODE_DEFINE_IMPLICATIONS
}
} } // namespace v8::internal
diff --git a/deps/v8/src/flags.h b/deps/v8/src/flags.h
index f0b239b6f..fe182e522 100644
--- a/deps/v8/src/flags.h
+++ b/deps/v8/src/flags.h
@@ -24,9 +24,12 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
#ifndef V8_FLAGS_H_
#define V8_FLAGS_H_
+#include "atomicops.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 27a526cef..2b15bfffa 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -98,6 +98,12 @@ inline StackHandler::Kind StackHandler::kind() const {
}
+inline unsigned StackHandler::index() const {
+ const int offset = StackHandlerConstants::kStateOffset;
+ return IndexField::decode(Memory::unsigned_at(address() + offset));
+}
+
+
inline Object** StackHandler::context_address() const {
const int offset = StackHandlerConstants::kContextOffset;
return reinterpret_cast<Object**>(address() + offset);
@@ -110,7 +116,7 @@ inline Object** StackHandler::code_address() const {
}
-inline StackFrame::StackFrame(StackFrameIterator* iterator)
+inline StackFrame::StackFrame(StackFrameIteratorBase* iterator)
: iterator_(iterator), isolate_(iterator_->isolate()) {
}
@@ -130,22 +136,34 @@ inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
}
-inline EntryFrame::EntryFrame(StackFrameIterator* iterator)
+inline Address* StackFrame::ResolveReturnAddressLocation(Address* pc_address) {
+ if (return_address_location_resolver_ == NULL) {
+ return pc_address;
+ } else {
+ return reinterpret_cast<Address*>(
+ return_address_location_resolver_(
+ reinterpret_cast<uintptr_t>(pc_address)));
+ }
+}
+
+
+inline EntryFrame::EntryFrame(StackFrameIteratorBase* iterator)
: StackFrame(iterator) {
}
-inline EntryConstructFrame::EntryConstructFrame(StackFrameIterator* iterator)
+inline EntryConstructFrame::EntryConstructFrame(
+ StackFrameIteratorBase* iterator)
: EntryFrame(iterator) {
}
-inline ExitFrame::ExitFrame(StackFrameIterator* iterator)
+inline ExitFrame::ExitFrame(StackFrameIteratorBase* iterator)
: StackFrame(iterator) {
}
-inline StandardFrame::StandardFrame(StackFrameIterator* iterator)
+inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
: StackFrame(iterator) {
}
@@ -195,7 +213,7 @@ inline bool StandardFrame::IsConstructFrame(Address fp) {
}
-inline JavaScriptFrame::JavaScriptFrame(StackFrameIterator* iterator)
+inline JavaScriptFrame::JavaScriptFrame(StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {
}
@@ -213,6 +231,34 @@ Object* JavaScriptFrame::GetParameter(int index) const {
}
+inline Address JavaScriptFrame::GetOperandSlot(int index) const {
+ Address base = fp() + JavaScriptFrameConstants::kLocal0Offset;
+ ASSERT(IsAddressAligned(base, kPointerSize));
+ ASSERT_EQ(type(), JAVA_SCRIPT);
+ ASSERT_LT(index, ComputeOperandsCount());
+ ASSERT_LE(0, index);
+ // Operand stack grows down.
+ return base - index * kPointerSize;
+}
+
+
+inline Object* JavaScriptFrame::GetOperand(int index) const {
+ return Memory::Object_at(GetOperandSlot(index));
+}
+
+
+inline int JavaScriptFrame::ComputeOperandsCount() const {
+ Address base = fp() + JavaScriptFrameConstants::kLocal0Offset;
+ // Base points to low address of first operand and stack grows down, so add
+ // kPointerSize to get the actual stack size.
+ intptr_t stack_size_in_bytes = (base + kPointerSize) - sp();
+ ASSERT(IsAligned(stack_size_in_bytes, kPointerSize));
+ ASSERT(type() == JAVA_SCRIPT);
+ ASSERT(stack_size_in_bytes >= 0);
+ return static_cast<int>(stack_size_in_bytes >> kPointerSizeLog2);
+}
+
+
inline Object* JavaScriptFrame::receiver() const {
return GetParameter(-1);
}
@@ -228,51 +274,56 @@ inline bool JavaScriptFrame::has_adapted_arguments() const {
}
-inline Object* JavaScriptFrame::function() const {
- Object* result = function_slot_object();
- ASSERT(result->IsJSFunction());
- return result;
+inline JSFunction* JavaScriptFrame::function() const {
+ return JSFunction::cast(function_slot_object());
}
-inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
+inline StubFrame::StubFrame(StackFrameIteratorBase* iterator)
+ : StandardFrame(iterator) {
+}
+
+
+inline OptimizedFrame::OptimizedFrame(StackFrameIteratorBase* iterator)
: JavaScriptFrame(iterator) {
}
inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
- StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
+ StackFrameIteratorBase* iterator) : JavaScriptFrame(iterator) {
}
-inline InternalFrame::InternalFrame(StackFrameIterator* iterator)
+inline InternalFrame::InternalFrame(StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {
}
-inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator)
+inline StubFailureTrampolineFrame::StubFailureTrampolineFrame(
+ StackFrameIteratorBase* iterator) : StandardFrame(iterator) {
+}
+
+
+inline ConstructFrame::ConstructFrame(StackFrameIteratorBase* iterator)
: InternalFrame(iterator) {
}
-template<typename Iterator>
-inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
+inline JavaScriptFrameIterator::JavaScriptFrameIterator(
Isolate* isolate)
: iterator_(isolate) {
if (!done()) Advance();
}
-template<typename Iterator>
-inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
+inline JavaScriptFrameIterator::JavaScriptFrameIterator(
Isolate* isolate, ThreadLocalTop* top)
: iterator_(isolate, top) {
if (!done()) Advance();
}
-template<typename Iterator>
-inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
+inline JavaScriptFrame* JavaScriptFrameIterator::frame() const {
// TODO(1233797): The frame hierarchy needs to change. It's
// problematic that we can't use the safe-cast operator to cast to
// the JavaScript frame type, because we may encounter arguments
@@ -283,43 +334,10 @@ inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
}
-template<typename Iterator>
-JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
- Isolate* isolate, StackFrame::Id id)
- : iterator_(isolate) {
- AdvanceToId(id);
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::Advance() {
- do {
- iterator_.Advance();
- } while (!iterator_.done() && !iterator_.frame()->is_java_script());
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToArgumentsFrame() {
- if (!frame()->has_adapted_arguments()) return;
- iterator_.Advance();
- ASSERT(iterator_.frame()->is_arguments_adaptor());
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToId(StackFrame::Id id) {
- while (!done()) {
- Advance();
- if (frame()->id() == id) return;
- }
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::Reset() {
- iterator_.Reset();
- if (!done()) Advance();
+inline StackFrame* SafeStackFrameIterator::frame() const {
+ ASSERT(!done());
+ ASSERT(frame_->is_java_script() || frame_->is_exit());
+ return frame_;
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 8af92b904..167277f79 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -36,6 +36,7 @@
#include "safepoint-table.h"
#include "scopeinfo.h"
#include "string-stream.h"
+#include "vm-state-inl.h"
#include "allocation-inl.h"
@@ -43,19 +44,8 @@ namespace v8 {
namespace internal {
-static ReturnAddressLocationResolver return_address_location_resolver = NULL;
-
-
-// Resolves pc_address through the resolution address function if one is set.
-static inline Address* ResolveReturnAddressLocation(Address* pc_address) {
- if (return_address_location_resolver == NULL) {
- return pc_address;
- } else {
- return reinterpret_cast<Address*>(
- return_address_location_resolver(
- reinterpret_cast<uintptr_t>(pc_address)));
- }
-}
+ReturnAddressLocationResolver
+ StackFrame::return_address_location_resolver_ = NULL;
// Iterator that supports traversing the stack handlers of a
@@ -88,47 +78,29 @@ class StackHandlerIterator BASE_EMBEDDED {
#define INITIALIZE_SINGLETON(type, field) field##_(this),
-StackFrameIterator::StackFrameIterator()
- : isolate_(Isolate::Current()),
+StackFrameIteratorBase::StackFrameIteratorBase(Isolate* isolate,
+ bool can_access_heap_objects)
+ : isolate_(isolate),
STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
frame_(NULL), handler_(NULL),
- thread_(isolate_->thread_local_top()),
- fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
- Reset();
+ can_access_heap_objects_(can_access_heap_objects) {
}
+#undef INITIALIZE_SINGLETON
+
+
StackFrameIterator::StackFrameIterator(Isolate* isolate)
- : isolate_(isolate),
- STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL),
- thread_(isolate_->thread_local_top()),
- fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
- Reset();
+ : StackFrameIteratorBase(isolate, true) {
+ Reset(isolate->thread_local_top());
}
+
+
StackFrameIterator::StackFrameIterator(Isolate* isolate, ThreadLocalTop* t)
- : isolate_(isolate),
- STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL), thread_(t),
- fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
- Reset();
-}
-StackFrameIterator::StackFrameIterator(Isolate* isolate,
- bool use_top, Address fp, Address sp)
- : isolate_(isolate),
- STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL),
- thread_(use_top ? isolate_->thread_local_top() : NULL),
- fp_(use_top ? NULL : fp), sp_(sp),
- advance_(use_top ? &StackFrameIterator::AdvanceWithHandler :
- &StackFrameIterator::AdvanceWithoutHandler) {
- if (use_top || fp != NULL) {
- Reset();
- }
+ : StackFrameIteratorBase(isolate, true) {
+ Reset(t);
}
-#undef INITIALIZE_SINGLETON
-
-void StackFrameIterator::AdvanceWithHandler() {
+void StackFrameIterator::Advance() {
ASSERT(!done());
// Compute the state of the calling frame before restoring
// callee-saved registers and unwinding handlers. This allows the
@@ -151,37 +123,17 @@ void StackFrameIterator::AdvanceWithHandler() {
}
-void StackFrameIterator::AdvanceWithoutHandler() {
- // A simpler version of Advance which doesn't care about handler.
- ASSERT(!done());
+void StackFrameIterator::Reset(ThreadLocalTop* top) {
StackFrame::State state;
- StackFrame::Type type = frame_->GetCallerState(&state);
- frame_ = SingletonFor(type, &state);
-}
-
-
-void StackFrameIterator::Reset() {
- StackFrame::State state;
- StackFrame::Type type;
- if (thread_ != NULL) {
- type = ExitFrame::GetStateForFramePointer(
- Isolate::c_entry_fp(thread_), &state);
- handler_ = StackHandler::FromAddress(
- Isolate::handler(thread_));
- } else {
- ASSERT(fp_ != NULL);
- state.fp = fp_;
- state.sp = sp_;
- state.pc_address = ResolveReturnAddressLocation(
- reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_)));
- type = StackFrame::ComputeType(isolate(), &state);
- }
+ StackFrame::Type type = ExitFrame::GetStateForFramePointer(
+ Isolate::c_entry_fp(top), &state);
+ handler_ = StackHandler::FromAddress(Isolate::handler(top));
if (SingletonFor(type) == NULL) return;
frame_ = SingletonFor(type, &state);
}
-StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type,
+StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type,
StackFrame::State* state) {
if (type == StackFrame::NONE) return NULL;
StackFrame* result = SingletonFor(type);
@@ -191,7 +143,7 @@ StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type,
}
-StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type) {
+StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
#define FRAME_TYPE_CASE(type, field) \
case StackFrame::type: result = &field##_; break;
@@ -210,11 +162,33 @@ StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type) {
// -------------------------------------------------------------------------
-StackTraceFrameIterator::StackTraceFrameIterator() {
- if (!done() && !IsValidFrame()) Advance();
+JavaScriptFrameIterator::JavaScriptFrameIterator(
+ Isolate* isolate, StackFrame::Id id)
+ : iterator_(isolate) {
+ while (!done()) {
+ Advance();
+ if (frame()->id() == id) return;
+ }
+}
+
+
+void JavaScriptFrameIterator::Advance() {
+ do {
+ iterator_.Advance();
+ } while (!iterator_.done() && !iterator_.frame()->is_java_script());
+}
+
+
+void JavaScriptFrameIterator::AdvanceToArgumentsFrame() {
+ if (!frame()->has_adapted_arguments()) return;
+ iterator_.Advance();
+ ASSERT(iterator_.frame()->is_arguments_adaptor());
}
+// -------------------------------------------------------------------------
+
+
StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
: JavaScriptFrameIterator(isolate) {
if (!done() && !IsValidFrame()) Advance();
@@ -229,9 +203,10 @@ void StackTraceFrameIterator::Advance() {
}
}
+
bool StackTraceFrameIterator::IsValidFrame() {
if (!frame()->function()->IsJSFunction()) return false;
- Object* script = JSFunction::cast(frame()->function())->shared()->script();
+ Object* script = frame()->function()->shared()->script();
// Don't show functions from native scripts to user.
return (script->IsScript() &&
Script::TYPE_NATIVE != Script::cast(script)->type()->value());
@@ -241,85 +216,93 @@ bool StackTraceFrameIterator::IsValidFrame() {
// -------------------------------------------------------------------------
-bool SafeStackFrameIterator::ExitFrameValidator::IsValidFP(Address fp) {
- if (!validator_.IsValid(fp)) return false;
- Address sp = ExitFrame::ComputeStackPointer(fp);
- if (!validator_.IsValid(sp)) return false;
+SafeStackFrameIterator::SafeStackFrameIterator(
+ Isolate* isolate,
+ Address fp, Address sp, Address js_entry_sp)
+ : StackFrameIteratorBase(isolate, false),
+ low_bound_(sp),
+ high_bound_(js_entry_sp),
+ top_frame_type_(StackFrame::NONE),
+ external_callback_scope_(isolate->external_callback_scope()) {
StackFrame::State state;
- ExitFrame::FillState(fp, sp, &state);
- if (!validator_.IsValid(reinterpret_cast<Address>(state.pc_address))) {
- return false;
+ StackFrame::Type type;
+ ThreadLocalTop* top = isolate->thread_local_top();
+ if (IsValidTop(top)) {
+ type = ExitFrame::GetStateForFramePointer(Isolate::c_entry_fp(top), &state);
+ top_frame_type_ = type;
+ } else if (IsValidStackAddress(fp)) {
+ ASSERT(fp != NULL);
+ state.fp = fp;
+ state.sp = sp;
+ state.pc_address = StackFrame::ResolveReturnAddressLocation(
+ reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp)));
+ // StackFrame::ComputeType will read both kContextOffset and kMarkerOffset,
+ // we check only that kMarkerOffset is within the stack bounds and do
+ // compile time check that kContextOffset slot is pushed on the stack before
+ // kMarkerOffset.
+ STATIC_ASSERT(StandardFrameConstants::kMarkerOffset <
+ StandardFrameConstants::kContextOffset);
+ Address frame_marker = fp + StandardFrameConstants::kMarkerOffset;
+ if (IsValidStackAddress(frame_marker)) {
+ type = StackFrame::ComputeType(this, &state);
+ top_frame_type_ = type;
+ } else {
+ // Mark the frame as JAVA_SCRIPT if we cannot determine its type.
+ // The frame anyways will be skipped.
+ type = StackFrame::JAVA_SCRIPT;
+ // Top frame is incomplete so we cannot reliably determine its type.
+ top_frame_type_ = StackFrame::NONE;
+ }
+ } else {
+ return;
}
- return *state.pc_address != NULL;
-}
-
-
-SafeStackFrameIterator::ActiveCountMaintainer::ActiveCountMaintainer(
- Isolate* isolate)
- : isolate_(isolate) {
- isolate_->set_safe_stack_iterator_counter(
- isolate_->safe_stack_iterator_counter() + 1);
-}
-
-
-SafeStackFrameIterator::ActiveCountMaintainer::~ActiveCountMaintainer() {
- isolate_->set_safe_stack_iterator_counter(
- isolate_->safe_stack_iterator_counter() - 1);
-}
+ if (SingletonFor(type) == NULL) return;
+ frame_ = SingletonFor(type, &state);
+ if (frame_ == NULL) return;
+ Advance();
-SafeStackFrameIterator::SafeStackFrameIterator(
- Isolate* isolate,
- Address fp, Address sp, Address low_bound, Address high_bound) :
- maintainer_(isolate),
- stack_validator_(low_bound, high_bound),
- is_valid_top_(IsValidTop(isolate, low_bound, high_bound)),
- is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)),
- is_working_iterator_(is_valid_top_ || is_valid_fp_),
- iteration_done_(!is_working_iterator_),
- iterator_(isolate, is_valid_top_, is_valid_fp_ ? fp : NULL, sp) {
-}
-
-bool SafeStackFrameIterator::is_active(Isolate* isolate) {
- return isolate->safe_stack_iterator_counter() > 0;
+ if (frame_ != NULL && !frame_->is_exit() &&
+ external_callback_scope_ != NULL &&
+ external_callback_scope_->scope_address() < frame_->fp()) {
+ // Skip top ExternalCallbackScope if we already advanced to a JS frame
+ // under it. Sampler will anyways take this top external callback.
+ external_callback_scope_ = external_callback_scope_->previous();
+ }
}
-bool SafeStackFrameIterator::IsValidTop(Isolate* isolate,
- Address low_bound, Address high_bound) {
- ThreadLocalTop* top = isolate->thread_local_top();
- Address fp = Isolate::c_entry_fp(top);
- ExitFrameValidator validator(low_bound, high_bound);
- if (!validator.IsValidFP(fp)) return false;
- return Isolate::handler(top) != NULL;
+bool SafeStackFrameIterator::IsValidTop(ThreadLocalTop* top) const {
+ Address c_entry_fp = Isolate::c_entry_fp(top);
+ if (!IsValidExitFrame(c_entry_fp)) return false;
+ // There should be at least one JS_ENTRY stack handler.
+ Address handler = Isolate::handler(top);
+ if (handler == NULL) return false;
+ // Check that there are no js frames on top of the native frames.
+ return c_entry_fp < handler;
}
-void SafeStackFrameIterator::Advance() {
- ASSERT(is_working_iterator_);
+void SafeStackFrameIterator::AdvanceOneFrame() {
ASSERT(!done());
- StackFrame* last_frame = iterator_.frame();
+ StackFrame* last_frame = frame_;
Address last_sp = last_frame->sp(), last_fp = last_frame->fp();
- // Before advancing to the next stack frame, perform pointer validity tests
- iteration_done_ = !IsValidFrame(last_frame) ||
- !CanIterateHandles(last_frame, iterator_.handler()) ||
- !IsValidCaller(last_frame);
- if (iteration_done_) return;
-
- iterator_.Advance();
- if (iterator_.done()) return;
- // Check that we have actually moved to the previous frame in the stack
- StackFrame* prev_frame = iterator_.frame();
- iteration_done_ = prev_frame->sp() < last_sp || prev_frame->fp() < last_fp;
-}
+ // Before advancing to the next stack frame, perform pointer validity tests.
+ if (!IsValidFrame(last_frame) || !IsValidCaller(last_frame)) {
+ frame_ = NULL;
+ return;
+ }
+ // Advance to the previous frame.
+ StackFrame::State state;
+ StackFrame::Type type = frame_->GetCallerState(&state);
+ frame_ = SingletonFor(type, &state);
+ if (frame_ == NULL) return;
-bool SafeStackFrameIterator::CanIterateHandles(StackFrame* frame,
- StackHandler* handler) {
- // If StackIterator iterates over StackHandles, verify that
- // StackHandlerIterator can be instantiated (see StackHandlerIterator
- // constructor.)
- return !is_valid_top_ || (frame->sp() <= handler->address());
+ // Check that we have actually moved to the previous frame in the stack.
+ if (frame_->sp() < last_sp || frame_->fp() < last_fp) {
+ frame_ = NULL;
+ }
}
@@ -336,8 +319,7 @@ bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
// sure that caller FP address is valid.
Address caller_fp = Memory::Address_at(
frame->fp() + EntryFrameConstants::kCallerFPOffset);
- ExitFrameValidator validator(stack_validator_);
- if (!validator.IsValidFP(caller_fp)) return false;
+ if (!IsValidExitFrame(caller_fp)) return false;
} else if (frame->is_arguments_adaptor()) {
// See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that
// the number of arguments is stored on stack as Smi. We need to check
@@ -350,38 +332,53 @@ bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
}
frame->ComputeCallerState(&state);
return IsValidStackAddress(state.sp) && IsValidStackAddress(state.fp) &&
- iterator_.SingletonFor(frame->GetCallerState(&state)) != NULL;
+ SingletonFor(frame->GetCallerState(&state)) != NULL;
}
-void SafeStackFrameIterator::Reset() {
- if (is_working_iterator_) {
- iterator_.Reset();
- iteration_done_ = false;
+bool SafeStackFrameIterator::IsValidExitFrame(Address fp) const {
+ if (!IsValidStackAddress(fp)) return false;
+ Address sp = ExitFrame::ComputeStackPointer(fp);
+ if (!IsValidStackAddress(sp)) return false;
+ StackFrame::State state;
+ ExitFrame::FillState(fp, sp, &state);
+ if (!IsValidStackAddress(reinterpret_cast<Address>(state.pc_address))) {
+ return false;
}
+ return *state.pc_address != NULL;
}
-// -------------------------------------------------------------------------
-
-
-SafeStackTraceFrameIterator::SafeStackTraceFrameIterator(
- Isolate* isolate,
- Address fp, Address sp, Address low_bound, Address high_bound) :
- SafeJavaScriptFrameIterator(isolate, fp, sp, low_bound, high_bound) {
- if (!done() && !frame()->is_java_script()) Advance();
-}
-
-
-void SafeStackTraceFrameIterator::Advance() {
+void SafeStackFrameIterator::Advance() {
while (true) {
- SafeJavaScriptFrameIterator::Advance();
+ AdvanceOneFrame();
if (done()) return;
- if (frame()->is_java_script()) return;
+ if (frame_->is_java_script()) return;
+ if (frame_->is_exit() && external_callback_scope_) {
+ // Some of the EXIT frames may have ExternalCallbackScope allocated on
+ // top of them. In that case the scope corresponds to the first EXIT
+ // frame beneath it. There may be other EXIT frames on top of the
+ // ExternalCallbackScope, just skip them as we cannot collect any useful
+ // information about them.
+ if (external_callback_scope_->scope_address() < frame_->fp()) {
+ Address* callback_address =
+ external_callback_scope_->callback_address();
+ if (*callback_address != NULL) {
+ frame_->state_.pc_address = callback_address;
+ }
+ external_callback_scope_ = external_callback_scope_->previous();
+ ASSERT(external_callback_scope_ == NULL ||
+ external_callback_scope_->scope_address() > frame_->fp());
+ return;
+ }
+ }
}
}
+// -------------------------------------------------------------------------
+
+
Code* StackFrame::GetSafepointData(Isolate* isolate,
Address inner_pointer,
SafepointEntry* safepoint_entry,
@@ -433,12 +430,13 @@ void StackFrame::IteratePc(ObjectVisitor* v,
void StackFrame::SetReturnAddressLocationResolver(
ReturnAddressLocationResolver resolver) {
- ASSERT(return_address_location_resolver == NULL);
- return_address_location_resolver = resolver;
+ ASSERT(return_address_location_resolver_ == NULL);
+ return_address_location_resolver_ = resolver;
}
-StackFrame::Type StackFrame::ComputeType(Isolate* isolate, State* state) {
+StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
+ State* state) {
ASSERT(state->fp != NULL);
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
return ARGUMENTS_ADAPTOR;
@@ -453,8 +451,9 @@ StackFrame::Type StackFrame::ComputeType(Isolate* isolate, State* state) {
// frames as normal JavaScript frames to avoid having to look
// into the heap to determine the state. This is safe as long
// as nobody tries to GC...
- if (SafeStackFrameIterator::is_active(isolate)) return JAVA_SCRIPT;
- Code::Kind kind = GetContainingCode(isolate, *(state->pc_address))->kind();
+ if (!iterator->can_access_heap_objects_) return JAVA_SCRIPT;
+ Code::Kind kind = GetContainingCode(iterator->isolate(),
+ *(state->pc_address))->kind();
ASSERT(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION);
return (kind == Code::OPTIMIZED_FUNCTION) ? OPTIMIZED : JAVA_SCRIPT;
}
@@ -462,15 +461,21 @@ StackFrame::Type StackFrame::ComputeType(Isolate* isolate, State* state) {
}
+#ifdef DEBUG
+bool StackFrame::can_access_heap_objects() const {
+ return iterator_->can_access_heap_objects_;
+}
+#endif
+
StackFrame::Type StackFrame::GetCallerState(State* state) const {
ComputeCallerState(state);
- return ComputeType(isolate(), state);
+ return ComputeType(iterator_, state);
}
Address StackFrame::UnpaddedFP() const {
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
if (!is_optimized()) return fp();
int32_t alignment_state = Memory::int32_at(
fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset);
@@ -484,7 +489,7 @@ Address StackFrame::UnpaddedFP() const {
Code* EntryFrame::unchecked_code() const {
- return HEAP->raw_unchecked_js_entry_code();
+ return isolate()->heap()->js_entry_code();
}
@@ -507,7 +512,7 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
Code* EntryConstructFrame::unchecked_code() const {
- return HEAP->raw_unchecked_js_construct_entry_code();
+ return isolate()->heap()->js_construct_entry_code();
}
@@ -558,11 +563,16 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
}
+Address ExitFrame::ComputeStackPointer(Address fp) {
+ return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
+}
+
+
void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->sp = sp;
state->fp = fp;
state->pc_address = ResolveReturnAddressLocation(
- reinterpret_cast<Address*>(sp - 1 * kPointerSize));
+ reinterpret_cast<Address*>(sp - 1 * kPCOnStackSize));
}
@@ -617,16 +627,10 @@ bool StandardFrame::IsExpressionInsideHandler(int n) const {
}
-void OptimizedFrame::Iterate(ObjectVisitor* v) const {
-#ifdef DEBUG
- // Make sure that optimized frames do not contain any stack handlers.
- StackHandlerIterator it(this, top_handler());
- ASSERT(it.done());
-#endif
-
+void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
// Make sure that we're not doing "safe" stack frame iteration. We cannot
// possibly find pointers in optimized frames in that state.
- ASSERT(!SafeStackFrameIterator::is_active(isolate()));
+ ASSERT(can_access_heap_objects());
// Compute the safepoint information.
unsigned stack_slots = 0;
@@ -649,7 +653,9 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
// Skip saved double registers.
if (safepoint_entry.has_doubles()) {
- parameters_base += DoubleRegister::kNumAllocatableRegisters *
+ // Number of doubles not known at snapshot time.
+ ASSERT(!Serializer::enabled());
+ parameters_base += DoubleRegister::NumAllocatableRegisters() *
kDoubleSize / kPointerSize;
}
@@ -681,14 +687,46 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
}
}
- // Visit the context and the function.
+ // Visit the return address in the callee and incoming arguments.
+ IteratePc(v, pc_address(), code);
+
+ // Visit the context in stub frame and JavaScript frame.
+ // Visit the function in JavaScript frame.
Object** fixed_base = &Memory::Object_at(
- fp() + JavaScriptFrameConstants::kFunctionOffset);
+ fp() + StandardFrameConstants::kMarkerOffset);
Object** fixed_limit = &Memory::Object_at(fp());
v->VisitPointers(fixed_base, fixed_limit);
+}
- // Visit the return address in the callee and incoming arguments.
- IteratePc(v, pc_address(), code);
+
+void StubFrame::Iterate(ObjectVisitor* v) const {
+ IterateCompiledFrame(v);
+}
+
+
+Code* StubFrame::unchecked_code() const {
+ return static_cast<Code*>(isolate()->FindCodeObject(pc()));
+}
+
+
+Address StubFrame::GetCallerStackPointer() const {
+ return fp() + ExitFrameConstants::kCallerSPDisplacement;
+}
+
+
+int StubFrame::GetNumberOfIncomingArguments() const {
+ return 0;
+}
+
+
+void OptimizedFrame::Iterate(ObjectVisitor* v) const {
+#ifdef DEBUG
+ // Make sure that optimized frames do not contain any stack handlers.
+ StackHandlerIterator it(this, top_handler());
+ ASSERT(it.done());
+#endif
+
+ IterateCompiledFrame(v);
}
@@ -718,17 +756,15 @@ int JavaScriptFrame::GetArgumentsLength() const {
Code* JavaScriptFrame::unchecked_code() const {
- JSFunction* function = JSFunction::cast(this->function());
- return function->unchecked_code();
+ return function()->code();
}
int JavaScriptFrame::GetNumberOfIncomingArguments() const {
- ASSERT(!SafeStackFrameIterator::is_active(isolate()) &&
+ ASSERT(can_access_heap_objects() &&
isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
- JSFunction* function = JSFunction::cast(this->function());
- return function->shared()->formal_parameter_count();
+ return function()->shared()->formal_parameter_count();
}
@@ -739,7 +775,7 @@ Address JavaScriptFrame::GetCallerStackPointer() const {
void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) {
ASSERT(functions->length() == 0);
- functions->Add(JSFunction::cast(function()));
+ functions->Add(function());
}
@@ -748,7 +784,7 @@ void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
Code* code_pointer = LookupCode();
int offset = static_cast<int>(pc() - code_pointer->address());
FrameSummary summary(receiver(),
- JSFunction::cast(function()),
+ function(),
code_pointer,
offset,
IsConstructor());
@@ -756,52 +792,47 @@ void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
}
-void JavaScriptFrame::PrintTop(FILE* file,
+void JavaScriptFrame::PrintTop(Isolate* isolate,
+ FILE* file,
bool print_args,
bool print_line_number) {
// constructor calls
- HandleScope scope;
- AssertNoAllocation no_allocation;
- JavaScriptFrameIterator it;
+ HandleScope scope(isolate);
+ DisallowHeapAllocation no_allocation;
+ JavaScriptFrameIterator it(isolate);
while (!it.done()) {
if (it.frame()->is_java_script()) {
JavaScriptFrame* frame = it.frame();
if (frame->IsConstructor()) PrintF(file, "new ");
// function name
- Object* maybe_fun = frame->function();
- if (maybe_fun->IsJSFunction()) {
- JSFunction* fun = JSFunction::cast(maybe_fun);
- fun->PrintName();
- Code* js_code = frame->unchecked_code();
- Address pc = frame->pc();
- int code_offset =
- static_cast<int>(pc - js_code->instruction_start());
- PrintF("+%d", code_offset);
- SharedFunctionInfo* shared = fun->shared();
- if (print_line_number) {
- Code* code = Code::cast(
- v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
- int source_pos = code->SourcePosition(pc);
- Object* maybe_script = shared->script();
- if (maybe_script->IsScript()) {
- Handle<Script> script(Script::cast(maybe_script));
- int line = GetScriptLineNumberSafe(script, source_pos) + 1;
- Object* script_name_raw = script->name();
- if (script_name_raw->IsString()) {
- String* script_name = String::cast(script->name());
- SmartArrayPointer<char> c_script_name =
- script_name->ToCString(DISALLOW_NULLS,
- ROBUST_STRING_TRAVERSAL);
- PrintF(file, " at %s:%d", *c_script_name, line);
- } else {
- PrintF(file, " at <unknown>:%d", line);
- }
+ JSFunction* fun = frame->function();
+ fun->PrintName();
+ Code* js_code = frame->unchecked_code();
+ Address pc = frame->pc();
+ int code_offset =
+ static_cast<int>(pc - js_code->instruction_start());
+ PrintF("+%d", code_offset);
+ SharedFunctionInfo* shared = fun->shared();
+ if (print_line_number) {
+ Code* code = Code::cast(isolate->FindCodeObject(pc));
+ int source_pos = code->SourcePosition(pc);
+ Object* maybe_script = shared->script();
+ if (maybe_script->IsScript()) {
+ Handle<Script> script(Script::cast(maybe_script));
+ int line = GetScriptLineNumberSafe(script, source_pos) + 1;
+ Object* script_name_raw = script->name();
+ if (script_name_raw->IsString()) {
+ String* script_name = String::cast(script->name());
+ SmartArrayPointer<char> c_script_name =
+ script_name->ToCString(DISALLOW_NULLS,
+ ROBUST_STRING_TRAVERSAL);
+ PrintF(file, " at %s:%d", *c_script_name, line);
} else {
- PrintF(file, " at <unknown>:<unknown>");
+ PrintF(file, " at <unknown>:%d", line);
}
+ } else {
+ PrintF(file, " at <unknown>:<unknown>");
}
- } else {
- PrintF("<unknown>");
}
if (print_args) {
@@ -824,6 +855,72 @@ void JavaScriptFrame::PrintTop(FILE* file,
}
+void JavaScriptFrame::SaveOperandStack(FixedArray* store,
+ int* stack_handler_index) const {
+ int operands_count = store->length();
+ ASSERT_LE(operands_count, ComputeOperandsCount());
+
+ // Visit the stack in LIFO order, saving operands and stack handlers into the
+ // array. The saved stack handlers store a link to the next stack handler,
+ // which will allow RestoreOperandStack to rewind the handlers.
+ StackHandlerIterator it(this, top_handler());
+ int i = operands_count - 1;
+ *stack_handler_index = -1;
+ for (; !it.done(); it.Advance()) {
+ StackHandler* handler = it.handler();
+ // Save operands pushed after the handler was pushed.
+ for (; GetOperandSlot(i) < handler->address(); i--) {
+ store->set(i, GetOperand(i));
+ }
+ ASSERT_GE(i + 1, StackHandlerConstants::kSlotCount);
+ ASSERT_EQ(handler->address(), GetOperandSlot(i));
+ int next_stack_handler_index = i + 1 - StackHandlerConstants::kSlotCount;
+ handler->Unwind(isolate(), store, next_stack_handler_index,
+ *stack_handler_index);
+ *stack_handler_index = next_stack_handler_index;
+ i -= StackHandlerConstants::kSlotCount;
+ }
+
+ // Save any remaining operands.
+ for (; i >= 0; i--) {
+ store->set(i, GetOperand(i));
+ }
+}
+
+
+void JavaScriptFrame::RestoreOperandStack(FixedArray* store,
+ int stack_handler_index) {
+ int operands_count = store->length();
+ ASSERT_LE(operands_count, ComputeOperandsCount());
+ int i = 0;
+ while (i <= stack_handler_index) {
+ if (i < stack_handler_index) {
+ // An operand.
+ ASSERT_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
+ Memory::Object_at(GetOperandSlot(i)) = store->get(i);
+ i++;
+ } else {
+ // A stack handler.
+ ASSERT_EQ(i, stack_handler_index);
+ // The FixedArray store grows up. The stack grows down. So the operand
+ // slot for i actually points to the bottom of the top word in the
+ // handler. The base of the StackHandler* is the address of the bottom
+ // word, which will be the last slot that is in the handler.
+ int handler_slot_index = i + StackHandlerConstants::kSlotCount - 1;
+ StackHandler *handler =
+ StackHandler::FromAddress(GetOperandSlot(handler_slot_index));
+ stack_handler_index = handler->Rewind(isolate(), store, i, fp());
+ i += StackHandlerConstants::kSlotCount;
+ }
+ }
+
+ for (; i < operands_count; i++) {
+ ASSERT_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
+ Memory::Object_at(GetOperandSlot(i)) = store->get(i);
+ }
+}
+
+
void FrameSummary::Print() {
PrintF("receiver: ");
receiver_->ShortPrint();
@@ -840,7 +937,7 @@ void FrameSummary::Print() {
JSFunction* OptimizedFrame::LiteralAt(FixedArray* literal_array,
int literal_id) {
if (literal_id == Translation::kSelfLiteralId) {
- return JSFunction::cast(function());
+ return function();
}
return JSFunction::cast(literal_array->get(literal_id));
@@ -945,7 +1042,7 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
int* deopt_index) {
ASSERT(is_optimized());
- JSFunction* opt_function = JSFunction::cast(function());
+ JSFunction* opt_function = function();
Code* code = opt_function->code();
// The code object may have been replaced by lazy deoptimization. Fall
@@ -1057,9 +1154,9 @@ void StackFrame::PrintIndex(StringStream* accumulator,
void JavaScriptFrame::Print(StringStream* accumulator,
PrintMode mode,
int index) const {
- HandleScope scope;
+ HandleScope scope(isolate());
Object* receiver = this->receiver();
- Object* function = this->function();
+ JSFunction* function = this->function();
accumulator->PrintSecurityTokenIfChanged(function);
PrintIndex(accumulator, mode, index);
@@ -1071,31 +1168,29 @@ void JavaScriptFrame::Print(StringStream* accumulator,
// doesn't contain scope info, scope_info will return 0 for the number of
// parameters, stack local variables, context local variables, stack slots,
// or context slots.
- Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
-
- if (function->IsJSFunction()) {
- Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
- scope_info = Handle<ScopeInfo>(shared->scope_info());
- Object* script_obj = shared->script();
- if (script_obj->IsScript()) {
- Handle<Script> script(Script::cast(script_obj));
- accumulator->Add(" [");
- accumulator->PrintName(script->name());
-
- Address pc = this->pc();
- if (code != NULL && code->kind() == Code::FUNCTION &&
- pc >= code->instruction_start() && pc < code->instruction_end()) {
- int source_pos = code->SourcePosition(pc);
- int line = GetScriptLineNumberSafe(script, source_pos) + 1;
- accumulator->Add(":%d", line);
- } else {
- int function_start_pos = shared->start_position();
- int line = GetScriptLineNumberSafe(script, function_start_pos) + 1;
- accumulator->Add(":~%d", line);
- }
-
- accumulator->Add("] ");
+ Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate()));
+
+ Handle<SharedFunctionInfo> shared(function->shared());
+ scope_info = Handle<ScopeInfo>(shared->scope_info());
+ Object* script_obj = shared->script();
+ if (script_obj->IsScript()) {
+ Handle<Script> script(Script::cast(script_obj));
+ accumulator->Add(" [");
+ accumulator->PrintName(script->name());
+
+ Address pc = this->pc();
+ if (code != NULL && code->kind() == Code::FUNCTION &&
+ pc >= code->instruction_start() && pc < code->instruction_end()) {
+ int source_pos = code->SourcePosition(pc);
+ int line = GetScriptLineNumberSafe(script, source_pos) + 1;
+ accumulator->Add(":%d", line);
+ } else {
+ int function_start_pos = shared->start_position();
+ int line = GetScriptLineNumberSafe(script, function_start_pos) + 1;
+ accumulator->Add(":~%d", line);
}
+
+ accumulator->Add("] ");
}
accumulator->Add("(this=%o", receiver);
@@ -1185,7 +1280,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
// Print details about the function.
if (FLAG_max_stack_trace_source_length != 0 && code != NULL) {
- SharedFunctionInfo* shared = JSFunction::cast(function)->shared();
+ SharedFunctionInfo* shared = function->shared();
accumulator->Add("--------- s o u r c e c o d e ---------\n");
shared->SourceCodePrint(accumulator, FLAG_max_stack_trace_source_length);
accumulator->Add("\n-----------------------------------------\n");
@@ -1200,10 +1295,8 @@ void ArgumentsAdaptorFrame::Print(StringStream* accumulator,
int index) const {
int actual = ComputeParametersCount();
int expected = -1;
- Object* function = this->function();
- if (function->IsJSFunction()) {
- expected = JSFunction::cast(function)->shared()->formal_parameter_count();
- }
+ JSFunction* function = this->function();
+ expected = function->shared()->formal_parameter_count();
PrintIndex(accumulator, mode, index);
accumulator->Add("arguments adaptor frame: %d->%d", actual, expected);
@@ -1276,6 +1369,43 @@ void InternalFrame::Iterate(ObjectVisitor* v) const {
}
+void StubFailureTrampolineFrame::Iterate(ObjectVisitor* v) const {
+ Object** base = &Memory::Object_at(sp());
+ Object** limit = &Memory::Object_at(fp() +
+ kFirstRegisterParameterFrameOffset);
+ v->VisitPointers(base, limit);
+ base = &Memory::Object_at(fp() + StandardFrameConstants::kMarkerOffset);
+ const int offset = StandardFrameConstants::kContextOffset;
+ limit = &Memory::Object_at(fp() + offset) + 1;
+ v->VisitPointers(base, limit);
+ IteratePc(v, pc_address(), LookupCode());
+}
+
+
+Address StubFailureTrampolineFrame::GetCallerStackPointer() const {
+ return fp() + StandardFrameConstants::kCallerSPOffset;
+}
+
+
+Code* StubFailureTrampolineFrame::unchecked_code() const {
+ Code* trampoline;
+ StubFailureTrampolineStub(NOT_JS_FUNCTION_STUB_MODE).
+ FindCodeInCache(&trampoline, isolate());
+ if (trampoline->contains(pc())) {
+ return trampoline;
+ }
+
+ StubFailureTrampolineStub(JS_FUNCTION_STUB_MODE).
+ FindCodeInCache(&trampoline, isolate());
+ if (trampoline->contains(pc())) {
+ return trampoline;
+ }
+
+ UNREACHABLE();
+ return NULL;
+}
+
+
// -------------------------------------------------------------------------
@@ -1385,6 +1515,60 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
// -------------------------------------------------------------------------
+
+void StackHandler::Unwind(Isolate* isolate,
+ FixedArray* array,
+ int offset,
+ int previous_handler_offset) const {
+ STATIC_ASSERT(StackHandlerConstants::kSlotCount >= 5);
+ ASSERT_LE(0, offset);
+ ASSERT_GE(array->length(), offset + StackHandlerConstants::kSlotCount);
+ // Unwinding a stack handler into an array chains it in the opposite
+ // direction, re-using the "next" slot as a "previous" link, so that stack
+ // handlers can be later re-wound in the correct order. Decode the "state"
+ // slot into "index" and "kind" and store them separately, using the fp slot.
+ array->set(offset, Smi::FromInt(previous_handler_offset)); // next
+ array->set(offset + 1, *code_address()); // code
+ array->set(offset + 2, Smi::FromInt(static_cast<int>(index()))); // state
+ array->set(offset + 3, *context_address()); // context
+ array->set(offset + 4, Smi::FromInt(static_cast<int>(kind()))); // fp
+
+ *isolate->handler_address() = next()->address();
+}
+
+
+int StackHandler::Rewind(Isolate* isolate,
+ FixedArray* array,
+ int offset,
+ Address fp) {
+ STATIC_ASSERT(StackHandlerConstants::kSlotCount >= 5);
+ ASSERT_LE(0, offset);
+ ASSERT_GE(array->length(), offset + StackHandlerConstants::kSlotCount);
+ Smi* prev_handler_offset = Smi::cast(array->get(offset));
+ Code* code = Code::cast(array->get(offset + 1));
+ Smi* smi_index = Smi::cast(array->get(offset + 2));
+ Object* context = array->get(offset + 3);
+ Smi* smi_kind = Smi::cast(array->get(offset + 4));
+
+ unsigned state = KindField::encode(static_cast<Kind>(smi_kind->value())) |
+ IndexField::encode(static_cast<unsigned>(smi_index->value()));
+
+ Memory::Address_at(address() + StackHandlerConstants::kNextOffset) =
+ *isolate->handler_address();
+ Memory::Object_at(address() + StackHandlerConstants::kCodeOffset) = code;
+ Memory::uintptr_at(address() + StackHandlerConstants::kStateOffset) = state;
+ Memory::Object_at(address() + StackHandlerConstants::kContextOffset) =
+ context;
+ SetFp(address() + StackHandlerConstants::kFPOffset, fp);
+
+ *isolate->handler_address() = address();
+
+ return prev_handler_offset->value();
+}
+
+
+// -------------------------------------------------------------------------
+
int NumRegs(RegList reglist) {
return CompilerIntrinsics::CountSetBits(reglist);
}
@@ -1405,6 +1589,7 @@ void SetUpJSCallerSavedCodeData() {
ASSERT(i == kNumJSCallerSaved);
}
+
int JSCallerSavedCode(int n) {
ASSERT(0 <= n && n < kNumJSCallerSaved);
return caller_saved_code_data.reg_code[n];
@@ -1437,9 +1622,10 @@ static StackFrame* AllocateFrameCopy(StackFrame* frame, Zone* zone) {
return NULL;
}
-Vector<StackFrame*> CreateStackMap(Zone* zone) {
+
+Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone) {
ZoneList<StackFrame*> list(10, zone);
- for (StackFrameIterator it; !it.done(); it.Advance()) {
+ for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
StackFrame* frame = AllocateFrameCopy(it.frame(), zone);
list.Add(frame, zone);
}
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 6f803c060..d2dbfe281 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -47,7 +47,8 @@ int JSCallerSavedCode(int n);
// Forward declarations.
-class StackFrameIterator;
+class ExternalCallbackScope;
+class StackFrameIteratorBase;
class ThreadLocalTop;
class Isolate;
@@ -84,6 +85,19 @@ class InnerPointerToCodeCache {
};
+class StackHandlerConstants : public AllStatic {
+ public:
+ static const int kNextOffset = 0 * kPointerSize;
+ static const int kCodeOffset = 1 * kPointerSize;
+ static const int kStateOffset = 2 * kPointerSize;
+ static const int kContextOffset = 3 * kPointerSize;
+ static const int kFPOffset = 4 * kPointerSize;
+
+ static const int kSize = kFPOffset + kFPOnStackSize;
+ static const int kSlotCount = kSize >> kPointerSizeLog2;
+};
+
+
class StackHandler BASE_EMBEDDED {
public:
enum Kind {
@@ -119,26 +133,52 @@ class StackHandler BASE_EMBEDDED {
inline bool is_catch() const;
inline bool is_finally() const;
+ // Generator support to preserve stack handlers.
+ void Unwind(Isolate* isolate, FixedArray* array, int offset,
+ int previous_handler_offset) const;
+ int Rewind(Isolate* isolate, FixedArray* array, int offset, Address fp);
+
private:
// Accessors.
inline Kind kind() const;
+ inline unsigned index() const;
inline Object** context_address() const;
inline Object** code_address() const;
+ inline void SetFp(Address slot, Address fp);
DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
};
-#define STACK_FRAME_TYPE_LIST(V) \
- V(ENTRY, EntryFrame) \
- V(ENTRY_CONSTRUCT, EntryConstructFrame) \
- V(EXIT, ExitFrame) \
- V(JAVA_SCRIPT, JavaScriptFrame) \
- V(OPTIMIZED, OptimizedFrame) \
- V(INTERNAL, InternalFrame) \
- V(CONSTRUCT, ConstructFrame) \
- V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
+#define STACK_FRAME_TYPE_LIST(V) \
+ V(ENTRY, EntryFrame) \
+ V(ENTRY_CONSTRUCT, EntryConstructFrame) \
+ V(EXIT, ExitFrame) \
+ V(JAVA_SCRIPT, JavaScriptFrame) \
+ V(OPTIMIZED, OptimizedFrame) \
+ V(STUB, StubFrame) \
+ V(STUB_FAILURE_TRAMPOLINE, StubFailureTrampolineFrame) \
+ V(INTERNAL, InternalFrame) \
+ V(CONSTRUCT, ConstructFrame) \
+ V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
+
+
+class StandardFrameConstants : public AllStatic {
+ public:
+ // Fixed part of the frame consists of return address, caller fp,
+ // context and function.
+ // StandardFrame::IterateExpressions assumes that kContextOffset is the last
+ // object pointer.
+ static const int kFixedFrameSize = kPCOnStackSize + kFPOnStackSize +
+ 2 * kPointerSize;
+ static const int kExpressionsOffset = -3 * kPointerSize;
+ static const int kMarkerOffset = -2 * kPointerSize;
+ static const int kContextOffset = -1 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kFPOnStackSize;
+ static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
+};
// Abstract base class for all stack frames.
@@ -193,6 +233,9 @@ class StackFrame BASE_EMBEDDED {
bool is_optimized() const { return type() == OPTIMIZED; }
bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
bool is_internal() const { return type() == INTERNAL; }
+ bool is_stub_failure_trampoline() const {
+ return type() == STUB_FAILURE_TRAMPOLINE;
+ }
bool is_construct() const { return type() == CONSTRUCT; }
virtual bool is_standard() const { return false; }
@@ -257,18 +300,22 @@ class StackFrame BASE_EMBEDDED {
static void SetReturnAddressLocationResolver(
ReturnAddressLocationResolver resolver);
+ // Resolves pc_address through the resolution address function if one is set.
+ static inline Address* ResolveReturnAddressLocation(Address* pc_address);
+
+
// Printing support.
enum PrintMode { OVERVIEW, DETAILS };
virtual void Print(StringStream* accumulator,
PrintMode mode,
int index) const { }
+ Isolate* isolate() const { return isolate_; }
+
protected:
- inline explicit StackFrame(StackFrameIterator* iterator);
+ inline explicit StackFrame(StackFrameIteratorBase* iterator);
virtual ~StackFrame() { }
- Isolate* isolate() const { return isolate_; }
-
// Compute the stack pointer for the calling frame.
virtual Address GetCallerStackPointer() const = 0;
@@ -281,13 +328,19 @@ class StackFrame BASE_EMBEDDED {
inline StackHandler* top_handler() const;
// Compute the stack frame type for the given state.
- static Type ComputeType(Isolate* isolate, State* state);
+ static Type ComputeType(const StackFrameIteratorBase* iterator, State* state);
+
+#ifdef DEBUG
+ bool can_access_heap_objects() const;
+#endif
private:
- const StackFrameIterator* iterator_;
+ const StackFrameIteratorBase* iterator_;
Isolate* isolate_;
State state_;
+ static ReturnAddressLocationResolver return_address_location_resolver_;
+
// Fill in the state of the calling frame.
virtual void ComputeCallerState(State* state) const = 0;
@@ -297,6 +350,7 @@ class StackFrame BASE_EMBEDDED {
static const intptr_t kIsolateTag = 1;
friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
friend class StackHandlerIterator;
friend class SafeStackFrameIterator;
@@ -322,7 +376,7 @@ class EntryFrame: public StackFrame {
virtual void SetCallerFp(Address caller_fp);
protected:
- inline explicit EntryFrame(StackFrameIterator* iterator);
+ inline explicit EntryFrame(StackFrameIteratorBase* iterator);
// The caller stack pointer for entry frames is always zero. The
// real information about the caller frame is available through the
@@ -333,7 +387,7 @@ class EntryFrame: public StackFrame {
virtual void ComputeCallerState(State* state) const;
virtual Type GetCallerState(State* state) const;
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
@@ -349,10 +403,10 @@ class EntryConstructFrame: public EntryFrame {
}
protected:
- inline explicit EntryConstructFrame(StackFrameIterator* iterator);
+ inline explicit EntryConstructFrame(StackFrameIteratorBase* iterator);
private:
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
@@ -383,14 +437,14 @@ class ExitFrame: public StackFrame {
static void FillState(Address fp, Address sp, State* state);
protected:
- inline explicit ExitFrame(StackFrameIterator* iterator);
+ inline explicit ExitFrame(StackFrameIteratorBase* iterator);
virtual Address GetCallerStackPointer() const;
private:
virtual void ComputeCallerState(State* state) const;
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
@@ -416,7 +470,7 @@ class StandardFrame: public StackFrame {
}
protected:
- inline explicit StandardFrame(StackFrameIterator* iterator);
+ inline explicit StandardFrame(StackFrameIteratorBase* iterator);
virtual void ComputeCallerState(State* state) const;
@@ -448,9 +502,12 @@ class StandardFrame: public StackFrame {
// construct frame.
static inline bool IsConstructFrame(Address fp);
+ // Used by OptimizedFrames and StubFrames.
+ void IterateCompiledFrame(ObjectVisitor* v) const;
+
private:
friend class StackFrame;
- friend class StackFrameIterator;
+ friend class SafeStackFrameIterator;
};
@@ -461,7 +518,7 @@ class FrameSummary BASE_EMBEDDED {
Code* code,
int offset,
bool is_constructor)
- : receiver_(receiver),
+ : receiver_(receiver, function->GetIsolate()),
function_(function),
code_(code),
offset_(offset),
@@ -489,7 +546,7 @@ class JavaScriptFrame: public StandardFrame {
virtual Type type() const { return JAVA_SCRIPT; }
// Accessors.
- inline Object* function() const;
+ inline JSFunction* function() const;
inline Object* receiver() const;
inline void set_receiver(Object* value);
@@ -500,6 +557,15 @@ class JavaScriptFrame: public StandardFrame {
return GetNumberOfIncomingArguments();
}
+ // Access the operand stack.
+ inline Address GetOperandSlot(int index) const;
+ inline Object* GetOperand(int index) const;
+ inline int ComputeOperandsCount() const;
+
+ // Generator support to preserve operand stack and stack handlers.
+ void SaveOperandStack(FixedArray* store, int* stack_handler_index) const;
+ void RestoreOperandStack(FixedArray* store, int stack_handler_index);
+
// Debugger access.
void SetParameterValue(int index, Object* value) const;
@@ -532,15 +598,22 @@ class JavaScriptFrame: public StandardFrame {
// Build a list with summaries for this frame including all inlined frames.
virtual void Summarize(List<FrameSummary>* frames);
+ // Architecture-specific register description.
+ static Register fp_register();
+ static Register context_register();
+
static JavaScriptFrame* cast(StackFrame* frame) {
ASSERT(frame->is_java_script());
return static_cast<JavaScriptFrame*>(frame);
}
- static void PrintTop(FILE* file, bool print_args, bool print_line_number);
+ static void PrintTop(Isolate* isolate,
+ FILE* file,
+ bool print_args,
+ bool print_line_number);
protected:
- inline explicit JavaScriptFrame(StackFrameIterator* iterator);
+ inline explicit JavaScriptFrame(StackFrameIteratorBase* iterator);
virtual Address GetCallerStackPointer() const;
@@ -553,8 +626,28 @@ class JavaScriptFrame: public StandardFrame {
private:
inline Object* function_slot_object() const;
- friend class StackFrameIterator;
- friend class StackTracer;
+ friend class StackFrameIteratorBase;
+};
+
+
+class StubFrame : public StandardFrame {
+ public:
+ virtual Type type() const { return STUB; }
+
+ // GC support.
+ virtual void Iterate(ObjectVisitor* v) const;
+
+ // Determine the code for the frame.
+ virtual Code* unchecked_code() const;
+
+ protected:
+ inline explicit StubFrame(StackFrameIteratorBase* iterator);
+
+ virtual Address GetCallerStackPointer() const;
+
+ virtual int GetNumberOfIncomingArguments() const;
+
+ friend class StackFrameIteratorBase;
};
@@ -577,12 +670,12 @@ class OptimizedFrame : public JavaScriptFrame {
DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
protected:
- inline explicit OptimizedFrame(StackFrameIterator* iterator);
+ inline explicit OptimizedFrame(StackFrameIteratorBase* iterator);
private:
JSFunction* LiteralAt(FixedArray* literal_array, int literal_id);
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
@@ -607,14 +700,14 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
int index) const;
protected:
- inline explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator);
+ inline explicit ArgumentsAdaptorFrame(StackFrameIteratorBase* iterator);
virtual int GetNumberOfIncomingArguments() const;
virtual Address GetCallerStackPointer() const;
private:
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
@@ -634,12 +727,45 @@ class InternalFrame: public StandardFrame {
}
protected:
- inline explicit InternalFrame(StackFrameIterator* iterator);
+ inline explicit InternalFrame(StackFrameIteratorBase* iterator);
virtual Address GetCallerStackPointer() const;
private:
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
+};
+
+
+class StubFailureTrampolineFrame: public StandardFrame {
+ public:
+ // sizeof(Arguments) - sizeof(Arguments*) is 3 * kPointerSize), but the
+ // presubmit script complains about using sizeof() on a type.
+ static const int kFirstRegisterParameterFrameOffset =
+ StandardFrameConstants::kMarkerOffset - 3 * kPointerSize;
+
+ static const int kCallerStackParameterCountFrameOffset =
+ StandardFrameConstants::kMarkerOffset - 2 * kPointerSize;
+
+ virtual Type type() const { return STUB_FAILURE_TRAMPOLINE; }
+
+ // Get the code associated with this frame.
+ // This method could be called during marking phase of GC.
+ virtual Code* unchecked_code() const;
+
+ virtual void Iterate(ObjectVisitor* v) const;
+
+ // Architecture-specific register description.
+ static Register fp_register();
+ static Register context_register();
+
+ protected:
+ inline explicit StubFailureTrampolineFrame(
+ StackFrameIteratorBase* iterator);
+
+ virtual Address GetCallerStackPointer() const;
+
+ private:
+ friend class StackFrameIteratorBase;
};
@@ -655,54 +781,30 @@ class ConstructFrame: public InternalFrame {
}
protected:
- inline explicit ConstructFrame(StackFrameIterator* iterator);
+ inline explicit ConstructFrame(StackFrameIteratorBase* iterator);
private:
- friend class StackFrameIterator;
+ friend class StackFrameIteratorBase;
};
-class StackFrameIterator BASE_EMBEDDED {
+class StackFrameIteratorBase BASE_EMBEDDED {
public:
- // An iterator that iterates over the current thread's stack,
- // and uses current isolate.
- StackFrameIterator();
-
- // An iterator that iterates over the isolate's current thread's stack,
- explicit StackFrameIterator(Isolate* isolate);
-
- // An iterator that iterates over a given thread's stack.
- StackFrameIterator(Isolate* isolate, ThreadLocalTop* t);
-
- // An iterator that can start from a given FP address.
- // If use_top, then work as usual, if fp isn't NULL, use it,
- // otherwise, do nothing.
- StackFrameIterator(Isolate* isolate, bool use_top, Address fp, Address sp);
-
- StackFrame* frame() const {
- ASSERT(!done());
- return frame_;
- }
-
Isolate* isolate() const { return isolate_; }
bool done() const { return frame_ == NULL; }
- void Advance() { (this->*advance_)(); }
- // Go back to the first frame.
- void Reset();
+ protected:
+ // An iterator that iterates over a given thread's stack.
+ StackFrameIteratorBase(Isolate* isolate, bool can_access_heap_objects);
- private:
Isolate* isolate_;
#define DECLARE_SINGLETON(ignore, type) type type##_;
STACK_FRAME_TYPE_LIST(DECLARE_SINGLETON)
#undef DECLARE_SINGLETON
StackFrame* frame_;
StackHandler* handler_;
- ThreadLocalTop* thread_;
- Address fp_;
- Address sp_;
- void (StackFrameIterator::*advance_)();
+ const bool can_access_heap_objects_;
StackHandler* handler() const {
ASSERT(!done());
@@ -714,46 +816,40 @@ class StackFrameIterator BASE_EMBEDDED {
// A helper function, can return a NULL pointer.
StackFrame* SingletonFor(StackFrame::Type type);
- void AdvanceWithHandler();
- void AdvanceWithoutHandler();
-
+ private:
friend class StackFrame;
- friend class SafeStackFrameIterator;
- DISALLOW_COPY_AND_ASSIGN(StackFrameIterator);
+ DISALLOW_COPY_AND_ASSIGN(StackFrameIteratorBase);
};
-// Iterator that supports iterating through all JavaScript frames.
-template<typename Iterator>
-class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
+class StackFrameIterator: public StackFrameIteratorBase {
public:
- JavaScriptFrameIteratorTemp() { if (!done()) Advance(); }
-
- inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate);
+ // An iterator that iterates over the isolate's current thread's stack,
+ explicit StackFrameIterator(Isolate* isolate);
+ // An iterator that iterates over a given thread's stack.
+ StackFrameIterator(Isolate* isolate, ThreadLocalTop* t);
- inline JavaScriptFrameIteratorTemp(Isolate* isolate, ThreadLocalTop* top);
+ StackFrame* frame() const {
+ ASSERT(!done());
+ return frame_;
+ }
+ void Advance();
- // Skip frames until the frame with the given id is reached.
- explicit JavaScriptFrameIteratorTemp(StackFrame::Id id) { AdvanceToId(id); }
+ private:
+ // Go back to the first frame.
+ void Reset(ThreadLocalTop* top);
- inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
+ DISALLOW_COPY_AND_ASSIGN(StackFrameIterator);
+};
- JavaScriptFrameIteratorTemp(Address fp,
- Address sp,
- Address low_bound,
- Address high_bound) :
- iterator_(fp, sp, low_bound, high_bound) {
- if (!done()) Advance();
- }
- JavaScriptFrameIteratorTemp(Isolate* isolate,
- Address fp,
- Address sp,
- Address low_bound,
- Address high_bound) :
- iterator_(isolate, fp, sp, low_bound, high_bound) {
- if (!done()) Advance();
- }
+// Iterator that supports iterating through all JavaScript frames.
+class JavaScriptFrameIterator BASE_EMBEDDED {
+ public:
+ inline explicit JavaScriptFrameIterator(Isolate* isolate);
+ inline JavaScriptFrameIterator(Isolate* isolate, ThreadLocalTop* top);
+ // Skip frames until the frame with the given id is reached.
+ JavaScriptFrameIterator(Isolate* isolate, StackFrame::Id id);
inline JavaScriptFrame* frame() const;
@@ -765,26 +861,17 @@ class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
// arguments.
void AdvanceToArgumentsFrame();
- // Go back to the first frame.
- void Reset();
-
private:
- inline void AdvanceToId(StackFrame::Id id);
-
- Iterator iterator_;
+ StackFrameIterator iterator_;
};
-typedef JavaScriptFrameIteratorTemp<StackFrameIterator> JavaScriptFrameIterator;
-
-
// NOTE: The stack trace frame iterator is an iterator that only
// traverse proper JavaScript frames; that is JavaScript frames that
// have proper JavaScript functions. This excludes the problematic
// functions in runtime.js.
class StackTraceFrameIterator: public JavaScriptFrameIterator {
public:
- StackTraceFrameIterator();
explicit StackTraceFrameIterator(Isolate* isolate);
void Advance();
@@ -793,100 +880,39 @@ class StackTraceFrameIterator: public JavaScriptFrameIterator {
};
-class SafeStackFrameIterator BASE_EMBEDDED {
+class SafeStackFrameIterator: public StackFrameIteratorBase {
public:
SafeStackFrameIterator(Isolate* isolate,
Address fp, Address sp,
- Address low_bound, Address high_bound);
-
- StackFrame* frame() const {
- ASSERT(is_working_iterator_);
- return iterator_.frame();
- }
-
- bool done() const { return iteration_done_ ? true : iterator_.done(); }
+ Address js_entry_sp);
+ inline StackFrame* frame() const;
void Advance();
- void Reset();
-
- static bool is_active(Isolate* isolate);
- static bool IsWithinBounds(
- Address low_bound, Address high_bound, Address addr) {
- return low_bound <= addr && addr <= high_bound;
- }
+ StackFrame::Type top_frame_type() const { return top_frame_type_; }
private:
- class StackAddressValidator {
- public:
- StackAddressValidator(Address low_bound, Address high_bound)
- : low_bound_(low_bound), high_bound_(high_bound) { }
- bool IsValid(Address addr) const {
- return IsWithinBounds(low_bound_, high_bound_, addr);
- }
- private:
- Address low_bound_;
- Address high_bound_;
- };
-
- class ExitFrameValidator {
- public:
- explicit ExitFrameValidator(const StackAddressValidator& validator)
- : validator_(validator) { }
- ExitFrameValidator(Address low_bound, Address high_bound)
- : validator_(low_bound, high_bound) { }
- bool IsValidFP(Address fp);
- private:
- StackAddressValidator validator_;
- };
+ void AdvanceOneFrame();
bool IsValidStackAddress(Address addr) const {
- return stack_validator_.IsValid(addr);
+ return low_bound_ <= addr && addr <= high_bound_;
}
- bool CanIterateHandles(StackFrame* frame, StackHandler* handler);
bool IsValidFrame(StackFrame* frame) const;
bool IsValidCaller(StackFrame* frame);
- static bool IsValidTop(Isolate* isolate,
- Address low_bound, Address high_bound);
-
- // This is a nasty hack to make sure the active count is incremented
- // before the constructor for the embedded iterator is invoked. This
- // is needed because the constructor will start looking at frames
- // right away and we need to make sure it doesn't start inspecting
- // heap objects.
- class ActiveCountMaintainer BASE_EMBEDDED {
- public:
- explicit ActiveCountMaintainer(Isolate* isolate);
- ~ActiveCountMaintainer();
- private:
- Isolate* isolate_;
- };
+ bool IsValidExitFrame(Address fp) const;
+ bool IsValidTop(ThreadLocalTop* top) const;
- ActiveCountMaintainer maintainer_;
- StackAddressValidator stack_validator_;
- const bool is_valid_top_;
- const bool is_valid_fp_;
- const bool is_working_iterator_;
- bool iteration_done_;
- StackFrameIterator iterator_;
-};
-
-
-typedef JavaScriptFrameIteratorTemp<SafeStackFrameIterator>
- SafeJavaScriptFrameIterator;
-
-
-class SafeStackTraceFrameIterator: public SafeJavaScriptFrameIterator {
- public:
- explicit SafeStackTraceFrameIterator(Isolate* isolate,
- Address fp, Address sp,
- Address low_bound, Address high_bound);
- void Advance();
+ const Address low_bound_;
+ const Address high_bound_;
+ StackFrame::Type top_frame_type_;
+ ExternalCallbackScope* external_callback_scope_;
};
class StackFrameLocator BASE_EMBEDDED {
public:
+ explicit StackFrameLocator(Isolate* isolate) : iterator_(isolate) {}
+
// Find the nth JavaScript frame on the stack. The caller must
// guarantee that such a frame exists.
JavaScriptFrame* FindJavaScriptFrame(int n);
@@ -896,9 +922,16 @@ class StackFrameLocator BASE_EMBEDDED {
};
+// Used specify the type of prologue to generate.
+enum PrologueFrameMode {
+ BUILD_FUNCTION_FRAME,
+ BUILD_STUB_FRAME
+};
+
+
// Reads all frames on the current stack and copies them into the current
// zone memory.
-Vector<StackFrame*> CreateStackMap(Zone* zone);
+Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone);
} } // namespace v8::internal
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 9592e0afa..fec9ee565 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -76,16 +76,23 @@ void BreakableStatementChecker::VisitExportDeclaration(
void BreakableStatementChecker::VisitModuleLiteral(ModuleLiteral* module) {
}
+
void BreakableStatementChecker::VisitModuleVariable(ModuleVariable* module) {
}
+
void BreakableStatementChecker::VisitModulePath(ModulePath* module) {
}
+
void BreakableStatementChecker::VisitModuleUrl(ModuleUrl* module) {
}
+void BreakableStatementChecker::VisitModuleStatement(ModuleStatement* stmt) {
+}
+
+
void BreakableStatementChecker::VisitBlock(Block* stmt) {
}
@@ -159,6 +166,12 @@ void BreakableStatementChecker::VisitForInStatement(ForInStatement* stmt) {
}
+void BreakableStatementChecker::VisitForOfStatement(ForOfStatement* stmt) {
+ // For-of is breakable because of the next() call.
+ is_breakable_ = true;
+}
+
+
void BreakableStatementChecker::VisitTryCatchStatement(
TryCatchStatement* stmt) {
// Mark try catch as breakable to avoid adding a break slot in front of it.
@@ -180,12 +193,16 @@ void BreakableStatementChecker::VisitDebuggerStatement(
}
+void BreakableStatementChecker::VisitCaseClause(CaseClause* clause) {
+}
+
+
void BreakableStatementChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
}
-void BreakableStatementChecker::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
+void BreakableStatementChecker::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* expr) {
}
@@ -228,6 +245,12 @@ void BreakableStatementChecker::VisitAssignment(Assignment* expr) {
}
+void BreakableStatementChecker::VisitYield(Yield* expr) {
+ // Yield is breakable if the expression is.
+ Visit(expr->expression());
+}
+
+
void BreakableStatementChecker::VisitThrow(Throw* expr) {
// Throw is breakable if the expression is.
Visit(expr->exception());
@@ -294,15 +317,14 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
int len = String::cast(script->source())->length();
isolate->counters()->total_full_codegen_source_size()->Increment(len);
}
- if (FLAG_trace_codegen) {
- PrintF("Full Compiler - ");
- }
- CodeGenerator::MakeCodePrologue(info);
+ CodeGenerator::MakeCodePrologue(info, "full");
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize);
#ifdef ENABLE_GDB_JIT_INTERFACE
masm.positions_recorder()->StartGDBJITLineInfoRecording();
#endif
+ LOG_CODE_EVENT(isolate,
+ CodeStartLinePosInfoRecordEvent(masm.positions_recorder()));
FullCodeGenerator cgen(&masm, info);
cgen.Generate();
@@ -310,12 +332,12 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
ASSERT(!isolate->has_pending_exception());
return false;
}
- unsigned table_offset = cgen.EmitStackCheckTable();
+ unsigned table_offset = cgen.EmitBackEdgeTable();
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
code->set_optimizable(info->IsOptimizable() &&
- !info->function()->flags()->Contains(kDontOptimize) &&
+ !info->function()->dont_optimize() &&
info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
@@ -323,38 +345,39 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_handler_table(*cgen.handler_table());
#ifdef ENABLE_DEBUGGER_SUPPORT
- code->set_has_debug_break_slots(
- info->isolate()->debugger()->IsDebuggerActive());
code->set_compiled_optimizable(info->IsOptimizable());
#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0);
code->set_profiler_ticks(0);
- code->set_stack_check_table_offset(table_offset);
+ code->set_back_edge_table_offset(table_offset);
+ code->set_back_edges_patched_for_osr(false);
CodeGenerator::PrintCode(code, info);
- info->SetCode(code); // May be an empty handle.
+ info->SetCode(code);
#ifdef ENABLE_GDB_JIT_INTERFACE
- if (FLAG_gdbjit && !code.is_null()) {
+ if (FLAG_gdbjit) {
GDBJITLineInfo* lineinfo =
masm.positions_recorder()->DetachGDBJITLineInfo();
-
GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
}
#endif
- return !code.is_null();
+ void* line_info = masm.positions_recorder()->DetachJITHandlerData();
+ LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(*code, line_info));
+ return true;
}
-unsigned FullCodeGenerator::EmitStackCheckTable() {
- // The stack check table consists of a length (in number of entries)
+unsigned FullCodeGenerator::EmitBackEdgeTable() {
+ // The back edge table consists of a length (in number of entries)
// field, and then a sequence of entries. Each entry is a pair of AST id
// and code-relative pc offset.
masm()->Align(kIntSize);
unsigned offset = masm()->pc_offset();
- unsigned length = stack_checks_.length();
+ unsigned length = back_edges_.length();
__ dd(length);
for (unsigned i = 0; i < length; ++i) {
- __ dd(stack_checks_[i].id.ToInt());
- __ dd(stack_checks_[i].pc_and_state);
+ __ dd(back_edges_[i].id.ToInt());
+ __ dd(back_edges_[i].pc);
+ __ dd(back_edges_[i].loop_depth);
}
return offset;
}
@@ -394,6 +417,7 @@ void FullCodeGenerator::Initialize() {
!Snapshot::HaveASnapshotToStartFrom();
masm_->set_emit_debug_code(generate_debug_code_);
masm_->set_predictable_code_size(true);
+ InitializeAstVisitor(info_->isolate());
}
@@ -443,35 +467,27 @@ void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
StateField::encode(state) | PcField::encode(masm_->pc_offset());
ASSERT(Smi::IsValid(pc_and_state));
BailoutEntry entry = { id, pc_and_state };
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- // Assert that we don't have multiple bailout entries for the same node.
- for (int i = 0; i < bailout_entries_.length(); i++) {
- if (bailout_entries_.at(i).id == entry.id) {
- AstPrinter printer;
- PrintF("%s", printer.PrintProgram(info_->function()));
- UNREACHABLE();
- }
- }
- }
-#endif // DEBUG
+ ASSERT(!prepared_bailout_ids_.Contains(id.ToInt()));
+ prepared_bailout_ids_.Add(id.ToInt(), zone());
bailout_entries_.Add(entry, zone());
}
void FullCodeGenerator::RecordTypeFeedbackCell(
- TypeFeedbackId id, Handle<JSGlobalPropertyCell> cell) {
+ TypeFeedbackId id, Handle<Cell> cell) {
TypeFeedbackCellEntry entry = { id, cell };
type_feedback_cells_.Add(entry, zone());
}
-void FullCodeGenerator::RecordStackCheck(BailoutId ast_id) {
- // The pc offset does not need to be encoded and packed together with a
- // state.
+void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
+ // The pc offset does not need to be encoded and packed together with a state.
ASSERT(masm_->pc_offset() > 0);
- BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
- stack_checks_.Add(entry, zone());
+ ASSERT(loop_depth() > 0);
+ uint8_t depth = Min(loop_depth(), Code::kMaxLoopNestingMarker);
+ BackEdgeEntry entry =
+ { ast_id, static_cast<unsigned>(masm_->pc_offset()), depth };
+ back_edges_.Add(entry, zone());
}
@@ -494,7 +510,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(Register reg) const {
void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
- __ push(reg);
+ __ Push(reg);
}
@@ -512,7 +528,7 @@ void FullCodeGenerator::EffectContext::PlugTOS() const {
void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
- __ pop(result_register());
+ __ Pop(result_register());
}
@@ -522,7 +538,7 @@ void FullCodeGenerator::StackValueContext::PlugTOS() const {
void FullCodeGenerator::TestContext::PlugTOS() const {
// For simplicity we always test the accumulator register.
- __ pop(result_register());
+ __ Pop(result_register());
codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -582,16 +598,137 @@ void FullCodeGenerator::DoTest(const TestContext* context) {
}
+void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) {
+ ASSERT(scope_->is_global_scope());
+
+ for (int i = 0; i < declarations->length(); i++) {
+ ModuleDeclaration* declaration = declarations->at(i)->AsModuleDeclaration();
+ if (declaration != NULL) {
+ ModuleLiteral* module = declaration->module()->AsModuleLiteral();
+ if (module != NULL) {
+ Comment cmnt(masm_, "[ Link nested modules");
+ Scope* scope = module->body()->scope();
+ Interface* interface = scope->interface();
+ ASSERT(interface->IsModule() && interface->IsFrozen());
+
+ interface->Allocate(scope->module_var()->index());
+
+ // Set up module context.
+ ASSERT(scope->interface()->Index() >= 0);
+ __ Push(Smi::FromInt(scope->interface()->Index()));
+ __ Push(scope->GetScopeInfo());
+ __ CallRuntime(Runtime::kPushModuleContext, 2);
+ StoreToFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
+
+ AllocateModules(scope->declarations());
+
+ // Pop module context.
+ LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+ // Update local stack frame context field.
+ StoreToFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
+ }
+ }
+ }
+}
+
+
+// Modules have their own local scope, represented by their own context.
+// Module instance objects have an accessor for every export that forwards
+// access to the respective slot from the module's context. (Exports that are
+// modules themselves, however, are simple data properties.)
+//
+// All modules have a _hosting_ scope/context, which (currently) is the
+// (innermost) enclosing global scope. To deal with recursion, nested modules
+// are hosted by the same scope as global ones.
+//
+// For every (global or nested) module literal, the hosting context has an
+// internal slot that points directly to the respective module context. This
+// enables quick access to (statically resolved) module members by 2-dimensional
+// access through the hosting context. For example,
+//
+// module A {
+// let x;
+// module B { let y; }
+// }
+// module C { let z; }
+//
+// allocates contexts as follows:
+//
+// [header| .A | .B | .C | A | C ] (global)
+// | | |
+// | | +-- [header| z ] (module)
+// | |
+// | +------- [header| y ] (module)
+// |
+// +------------ [header| x | B ] (module)
+//
+// Here, .A, .B, .C are the internal slots pointing to the hosted module
+// contexts, whereas A, B, C hold the actual instance objects (note that every
+// module context also points to the respective instance object through its
+// extension slot in the header).
+//
+// To deal with arbitrary recursion and aliases between modules,
+// they are created and initialized in several stages. Each stage applies to
+// all modules in the hosting global scope, including nested ones.
+//
+// 1. Allocate: for each module _literal_, allocate the module contexts and
+// respective instance object and wire them up. This happens in the
+// PushModuleContext runtime function, as generated by AllocateModules
+// (invoked by VisitDeclarations in the hosting scope).
+//
+// 2. Bind: for each module _declaration_ (i.e. literals as well as aliases),
+// assign the respective instance object to respective local variables. This
+// happens in VisitModuleDeclaration, and uses the instance objects created
+// in the previous stage.
+// For each module _literal_, this phase also constructs a module descriptor
+// for the next stage. This happens in VisitModuleLiteral.
+//
+// 3. Populate: invoke the DeclareModules runtime function to populate each
+// _instance_ object with accessors for it exports. This is generated by
+// DeclareModules (invoked by VisitDeclarations in the hosting scope again),
+// and uses the descriptors generated in the previous stage.
+//
+// 4. Initialize: execute the module bodies (and other code) in sequence. This
+// happens by the separate statements generated for module bodies. To reenter
+// the module scopes properly, the parser inserted ModuleStatements.
+
void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
+ Handle<FixedArray> saved_modules = modules_;
+ int saved_module_index = module_index_;
ZoneList<Handle<Object> >* saved_globals = globals_;
ZoneList<Handle<Object> > inner_globals(10, zone());
globals_ = &inner_globals;
+ if (scope_->num_modules() != 0) {
+ // This is a scope hosting modules. Allocate a descriptor array to pass
+ // to the runtime for initialization.
+ Comment cmnt(masm_, "[ Allocate modules");
+ ASSERT(scope_->is_global_scope());
+ modules_ =
+ isolate()->factory()->NewFixedArray(scope_->num_modules(), TENURED);
+ module_index_ = 0;
+
+ // Generate code for allocating all modules, including nested ones.
+ // The allocated contexts are stored in internal variables in this scope.
+ AllocateModules(declarations);
+ }
+
AstVisitor::VisitDeclarations(declarations);
+
+ if (scope_->num_modules() != 0) {
+ // Initialize modules from descriptor array.
+ ASSERT(module_index_ == modules_->length());
+ DeclareModules(modules_);
+ modules_ = saved_modules;
+ module_index_ = saved_module_index;
+ }
+
if (!globals_->is_empty()) {
// Invoke the platform-dependent code generator to do the actual
- // declaration the global functions and variables.
+ // declaration of the global functions and variables.
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(globals_->length(), TENURED);
for (int i = 0; i < globals_->length(); ++i)
@@ -604,19 +741,23 @@ void FullCodeGenerator::VisitDeclarations(
void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
- // Allocate a module context statically.
Block* block = module->body();
Scope* saved_scope = scope();
scope_ = block->scope();
- Interface* interface = module->interface();
- Handle<JSModule> instance = interface->Instance();
+ Interface* interface = scope_->interface();
Comment cmnt(masm_, "[ ModuleLiteral");
SetStatementPosition(block);
+ ASSERT(!modules_.is_null());
+ ASSERT(module_index_ < modules_->length());
+ int index = module_index_++;
+
// Set up module context.
- __ Push(instance);
- __ CallRuntime(Runtime::kPushModuleContext, 1);
+ ASSERT(interface->Index() >= 0);
+ __ Push(Smi::FromInt(interface->Index()));
+ __ Push(Smi::FromInt(0));
+ __ CallRuntime(Runtime::kPushModuleContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
{
@@ -624,6 +765,11 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
VisitDeclarations(scope_->declarations());
}
+ // Populate the module description.
+ Handle<ModuleInfo> description =
+ ModuleInfo::Create(isolate(), interface, scope_);
+ modules_->set(index, *description);
+
scope_ = saved_scope;
// Pop module context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
@@ -644,8 +790,20 @@ void FullCodeGenerator::VisitModulePath(ModulePath* module) {
}
-void FullCodeGenerator::VisitModuleUrl(ModuleUrl* decl) {
- // TODO(rossberg)
+void FullCodeGenerator::VisitModuleUrl(ModuleUrl* module) {
+ // TODO(rossberg): dummy allocation for now.
+ Scope* scope = module->body()->scope();
+ Interface* interface = scope_->interface();
+
+ ASSERT(interface->IsModule() && interface->IsFrozen());
+ ASSERT(!modules_.is_null());
+ ASSERT(module_index_ < modules_->length());
+ interface->Allocate(scope->module_var()->index());
+ int index = module_index_++;
+
+ Handle<ModuleInfo> description =
+ ModuleInfo::Create(isolate(), interface, scope_);
+ modules_->set(index, *description);
}
@@ -670,17 +828,17 @@ void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (!isolate()->debugger()->IsDebuggerActive()) {
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+ CodeGenerator::RecordPositions(masm_, stmt->position());
} else {
// Check if the statement will be breakable without adding a debug break
// slot.
- BreakableStatementChecker checker;
+ BreakableStatementChecker checker(isolate());
checker.Check(stmt);
// Record the statement position right here if the statement is not
// breakable. For breakable statements the actual recording of the
// position will be postponed to the breakable code (typically an IC).
bool position_recorded = CodeGenerator::RecordPositions(
- masm_, stmt->statement_pos(), !checker.is_breakable());
+ masm_, stmt->position(), !checker.is_breakable());
// If the position recording did record a new position generate a debug
// break slot to make the statement breakable.
if (position_recorded) {
@@ -688,19 +846,19 @@ void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
}
}
#else
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+ CodeGenerator::RecordPositions(masm_, stmt->position());
#endif
}
-void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
+void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (!isolate()->debugger()->IsDebuggerActive()) {
- CodeGenerator::RecordPositions(masm_, pos);
+ CodeGenerator::RecordPositions(masm_, expr->position());
} else {
// Check if the expression will be breakable without adding a debug break
// slot.
- BreakableStatementChecker checker;
+ BreakableStatementChecker checker(isolate());
checker.Check(expr);
// Record a statement position right here if the expression is not
// breakable. For breakable expressions the actual recording of the
@@ -710,7 +868,7 @@ void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
// statement positions this is used for e.g. the condition expression of
// a do while loop.
bool position_recorded = CodeGenerator::RecordPositions(
- masm_, pos, !checker.is_breakable());
+ masm_, expr->position(), !checker.is_breakable());
// If the position recording did record a new position generate a debug
// break slot to make the statement breakable.
if (position_recorded) {
@@ -769,6 +927,25 @@ void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGeneratorNext(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::NEXT);
+}
+
+
+void FullCodeGenerator::EmitGeneratorThrow(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::THROW);
+}
+
+
+void FullCodeGenerator::EmitDebugBreakInOptimizedCode(CallRuntime* expr) {
+ context()->Plug(handle(Smi::FromInt(0), isolate()));
+}
+
+
void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
switch (expr->op()) {
case Token::COMMA:
@@ -827,7 +1004,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
VisitForAccumulatorValue(left);
// We want the value in the accumulator for the test, and on the stack in
// case we need it.
- __ push(result_register());
+ __ Push(result_register());
Label discard, restore;
if (is_logical_and) {
DoTest(left, &discard, &restore, &restore);
@@ -835,7 +1012,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
DoTest(left, &restore, &discard, &restore);
}
__ bind(&restore);
- __ pop(result_register());
+ __ Pop(result_register());
__ jmp(&done);
__ bind(&discard);
__ Drop(1);
@@ -845,7 +1022,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
VisitForAccumulatorValue(left);
// We want the value in the accumulator for the test, and on the stack in
// case we need it.
- __ push(result_register());
+ __ Push(result_register());
Label discard;
if (is_logical_and) {
DoTest(left, &discard, &done, &discard);
@@ -904,37 +1081,28 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
// Push a block context when entering a block with block scoped variables.
if (stmt->scope() != NULL) {
scope_ = stmt->scope();
- if (scope_->is_module_scope()) {
- // If this block is a module body, then we have already allocated and
- // initialized the declarations earlier. Just push the context.
- ASSERT(!scope_->interface()->Instance().is_null());
- __ Push(scope_->interface()->Instance());
- __ CallRuntime(Runtime::kPushModuleContext, 1);
- StoreToFrameField(
- StandardFrameConstants::kContextOffset, context_register());
- } else {
- { Comment cmnt(masm_, "[ Extend block context");
- Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
- int heap_slots =
- scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
- __ Push(scope_info);
- PushFunctionArgumentForContextAllocation();
- if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
- FastNewBlockContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kPushBlockContext, 2);
- }
-
- // Replace the context stored in the frame.
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
- }
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope_->declarations());
+ ASSERT(!scope_->is_module_scope());
+ { Comment cmnt(masm_, "[ Extend block context");
+ Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
+ int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
+ __ Push(scope_info);
+ PushFunctionArgumentForContextAllocation();
+ if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
+ FastNewBlockContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kPushBlockContext, 2);
}
+
+ // Replace the context stored in the frame.
+ StoreToFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
+ }
+ { Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(scope_->declarations());
}
}
+
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
VisitStatements(stmt->statements());
scope_ = saved_scope;
@@ -951,6 +1119,26 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
}
+void FullCodeGenerator::VisitModuleStatement(ModuleStatement* stmt) {
+ Comment cmnt(masm_, "[ Module context");
+
+ __ Push(Smi::FromInt(stmt->proxy()->interface()->Index()));
+ __ Push(Smi::FromInt(0));
+ __ CallRuntime(Runtime::kPushModuleContext, 2);
+ StoreToFrameField(
+ StandardFrameConstants::kContextOffset, context_register());
+
+ Scope* saved_scope = scope_;
+ scope_ = stmt->body()->scope();
+ VisitStatements(stmt->body()->statements());
+ scope_ = saved_scope;
+ LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+ // Update local stack frame context field.
+ StoreToFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
+}
+
+
void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
Comment cmnt(masm_, "[ ExpressionStatement");
SetStatementPosition(stmt);
@@ -1048,13 +1236,7 @@ void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
}
-void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- Comment cmnt(masm_, "[ ReturnStatement");
- SetStatementPosition(stmt);
- Expression* expr = stmt->expression();
- VisitForAccumulatorValue(expr);
-
- // Exit all nested statements.
+void FullCodeGenerator::EmitUnwindBeforeReturn() {
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
@@ -1062,7 +1244,15 @@ void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
+}
+
+void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+ Comment cmnt(masm_, "[ ReturnStatement");
+ SetStatementPosition(stmt);
+ Expression* expr = stmt->expression();
+ VisitForAccumulatorValue(expr);
+ EmitUnwindBeforeReturn();
EmitReturnSequence();
}
@@ -1076,9 +1266,12 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
__ CallRuntime(Runtime::kPushWithContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+ Scope* saved_scope = scope();
+ scope_ = stmt->scope();
{ WithOrCatch body(this);
Visit(stmt->statement());
}
+ scope_ = saved_scope;
// Pop context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
@@ -1090,7 +1283,7 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
SetStatementPosition(stmt);
- Label body, stack_check;
+ Label body, book_keeping;
Iteration loop_statement(this, stmt);
increment_loop_depth();
@@ -1102,16 +1295,16 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
// possible to break on the condition.
__ bind(loop_statement.continue_label());
PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
- SetExpressionPosition(stmt->cond(), stmt->condition_position());
+ SetExpressionPosition(stmt->cond());
VisitForControl(stmt->cond(),
- &stack_check,
+ &book_keeping,
loop_statement.break_label(),
- &stack_check);
+ &book_keeping);
// Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
- __ bind(&stack_check);
- EmitStackCheck(stmt, &body);
+ __ bind(&book_keeping);
+ EmitBackEdgeBookkeeping(stmt, &body);
__ jmp(&body);
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1140,7 +1333,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
SetStatementPosition(stmt);
// Check stack before looping.
- EmitStackCheck(stmt, &body);
+ EmitBackEdgeBookkeeping(stmt, &body);
__ bind(&test);
VisitForControl(stmt->cond(),
@@ -1186,7 +1379,7 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
SetStatementPosition(stmt);
// Check stack before looping.
- EmitStackCheck(stmt, &body);
+ EmitBackEdgeBookkeeping(stmt, &body);
__ bind(&test);
if (stmt->cond() != NULL) {
@@ -1221,7 +1414,7 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
// Extend the context before executing the catch block.
{ Comment cmnt(masm_, "[ Extend catch context");
__ Push(stmt->variable()->name());
- __ push(result_register());
+ __ Push(result_register());
PushFunctionArgumentForContextAllocation();
__ CallRuntime(Runtime::kPushCatchContext, 3);
StoreToFrameField(StandardFrameConstants::kContextOffset,
@@ -1286,7 +1479,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// preserved by the finally block. Call the finally block and then
// rethrow the exception if it returns.
__ Call(&finally_entry);
- __ push(result_register());
+ __ Push(result_register());
__ CallRuntime(Runtime::kReThrow, 1);
// Finally block implementation.
@@ -1324,6 +1517,11 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
}
+void FullCodeGenerator::VisitCaseClause(CaseClause* clause) {
+ UNREACHABLE();
+}
+
+
void FullCodeGenerator::VisitConditional(Conditional* expr) {
Comment cmnt(masm_, "[ Conditional");
Label true_case, false_case, done;
@@ -1331,8 +1529,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
PrepareForBailoutForId(expr->ThenId(), NO_REGISTERS);
__ bind(&true_case);
- SetExpressionPosition(expr->then_expression(),
- expr->then_expression_position());
+ SetExpressionPosition(expr->then_expression());
if (context()->IsTest()) {
const TestContext* for_test = TestContext::cast(context());
VisitForControl(expr->then_expression(),
@@ -1346,8 +1543,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
__ bind(&false_case);
- SetExpressionPosition(expr->else_expression(),
- expr->else_expression_position());
+ SetExpressionPosition(expr->else_expression());
VisitInDuplicateContext(expr->else_expression());
// If control flow falls through Visit, merge it with true case here.
if (!context()->IsTest()) {
@@ -1358,7 +1554,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
void FullCodeGenerator::VisitLiteral(Literal* expr) {
Comment cmnt(masm_, "[ Literal");
- context()->Plug(expr->handle());
+ context()->Plug(expr->value());
}
@@ -1376,10 +1572,33 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
}
-void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- EmitNewClosure(expr->shared_function_info(), false);
+void FullCodeGenerator::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* expr) {
+ Comment cmnt(masm_, "[ NativeFunctionLiteral");
+
+ // Compute the function template for the native function.
+ Handle<String> name = expr->name();
+ v8::Handle<v8::FunctionTemplate> fun_template =
+ expr->extension()->GetNativeFunction(v8::Utils::ToLocal(name));
+ ASSERT(!fun_template.IsEmpty());
+
+ // Instantiate the function and create a shared function info from it.
+ Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
+ const int literals = fun->NumberOfLiterals();
+ Handle<Code> code = Handle<Code>(fun->shared()->code());
+ Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
+ bool is_generator = false;
+ Handle<SharedFunctionInfo> shared =
+ isolate()->factory()->NewSharedFunctionInfo(name, literals, is_generator,
+ code, Handle<ScopeInfo>(fun->shared()->scope_info()));
+ shared->set_construct_stub(*construct_stub);
+
+ // Copy the function data to the shared function info.
+ shared->set_function_data(fun->shared()->function_data());
+ int parameters = fun->shared()->formal_parameter_count();
+ shared->set_formal_parameter_count(parameters);
+
+ EmitNewClosure(shared, false);
}
@@ -1410,7 +1629,7 @@ bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
return true;
}
- if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+ if (expr->IsLiteralCompareUndefined(&sub_expr, isolate())) {
EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
return true;
}
@@ -1424,6 +1643,100 @@ bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
}
+void BackEdgeTable::Patch(Isolate* isolate,
+ Code* unoptimized) {
+ DisallowHeapAllocation no_gc;
+ Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+
+ // Iterate over the back edge table and patch every interrupt
+ // call to an unconditional call to the replacement code.
+ int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
+
+ BackEdgeTable back_edges(unoptimized, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ if (static_cast<int>(back_edges.loop_depth(i)) == loop_nesting_level) {
+ ASSERT_EQ(INTERRUPT, GetBackEdgeState(isolate,
+ unoptimized,
+ back_edges.pc(i)));
+ PatchAt(unoptimized, back_edges.pc(i), ON_STACK_REPLACEMENT, patch);
+ }
+ }
+
+ unoptimized->set_back_edges_patched_for_osr(true);
+ ASSERT(Verify(isolate, unoptimized, loop_nesting_level));
+}
+
+
+void BackEdgeTable::Revert(Isolate* isolate,
+ Code* unoptimized) {
+ DisallowHeapAllocation no_gc;
+ Code* patch = isolate->builtins()->builtin(Builtins::kInterruptCheck);
+
+ // Iterate over the back edge table and revert the patched interrupt calls.
+ ASSERT(unoptimized->back_edges_patched_for_osr());
+ int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
+
+ BackEdgeTable back_edges(unoptimized, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ if (static_cast<int>(back_edges.loop_depth(i)) <= loop_nesting_level) {
+ ASSERT_NE(INTERRUPT, GetBackEdgeState(isolate,
+ unoptimized,
+ back_edges.pc(i)));
+ PatchAt(unoptimized, back_edges.pc(i), INTERRUPT, patch);
+ }
+ }
+
+ unoptimized->set_back_edges_patched_for_osr(false);
+ unoptimized->set_allow_osr_at_loop_nesting_level(0);
+ // Assert that none of the back edges are patched anymore.
+ ASSERT(Verify(isolate, unoptimized, -1));
+}
+
+
+void BackEdgeTable::AddStackCheck(CompilationInfo* info) {
+ DisallowHeapAllocation no_gc;
+ Isolate* isolate = info->isolate();
+ Code* code = info->shared_info()->code();
+ Address pc = code->instruction_start() + info->osr_pc_offset();
+ ASSERT_EQ(ON_STACK_REPLACEMENT, GetBackEdgeState(isolate, code, pc));
+ Code* patch = isolate->builtins()->builtin(Builtins::kOsrAfterStackCheck);
+ PatchAt(code, pc, OSR_AFTER_STACK_CHECK, patch);
+}
+
+
+void BackEdgeTable::RemoveStackCheck(CompilationInfo* info) {
+ DisallowHeapAllocation no_gc;
+ Isolate* isolate = info->isolate();
+ Code* code = info->shared_info()->code();
+ Address pc = code->instruction_start() + info->osr_pc_offset();
+ if (GetBackEdgeState(isolate, code, pc) == OSR_AFTER_STACK_CHECK) {
+ Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ PatchAt(code, pc, ON_STACK_REPLACEMENT, patch);
+ }
+}
+
+
+#ifdef DEBUG
+bool BackEdgeTable::Verify(Isolate* isolate,
+ Code* unoptimized,
+ int loop_nesting_level) {
+ DisallowHeapAllocation no_gc;
+ BackEdgeTable back_edges(unoptimized, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ uint32_t loop_depth = back_edges.loop_depth(i);
+ CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
+ // Assert that all back edges for shallower loops (and only those)
+ // have already been patched.
+ CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
+ GetBackEdgeState(isolate,
+ unoptimized,
+ back_edges.pc(i)) != INTERRUPT);
+ }
+ return true;
+}
+#endif // DEBUG
+
+
#undef __
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 89b51f958..e27662e0e 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -31,10 +31,14 @@
#include "v8.h"
#include "allocation.h"
+#include "assert-scope.h"
#include "ast.h"
#include "code-stubs.h"
#include "codegen.h"
#include "compiler.h"
+#include "data-flow.h"
+#include "globals.h"
+#include "objects.h"
namespace v8 {
namespace internal {
@@ -48,7 +52,9 @@ class JumpPatchSite;
// debugger to piggybag on.
class BreakableStatementChecker: public AstVisitor {
public:
- BreakableStatementChecker() : is_breakable_(false) {}
+ explicit BreakableStatementChecker(Isolate* isolate) : is_breakable_(false) {
+ InitializeAstVisitor(isolate);
+ }
void Check(Statement* stmt);
void Check(Expression* stmt);
@@ -63,6 +69,7 @@ class BreakableStatementChecker: public AstVisitor {
bool is_breakable_;
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(BreakableStatementChecker);
};
@@ -88,7 +95,7 @@ class FullCodeGenerator: public AstVisitor {
bailout_entries_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0,
info->zone()),
- stack_checks_(2, info->zone()), // There's always at least one.
+ back_edges_(2, info->zone()),
type_feedback_cells_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0,
info->zone()),
@@ -119,19 +126,19 @@ class FullCodeGenerator: public AstVisitor {
static const int kMaxBackEdgeWeight = 127;
+ // Platform-specific code size multiplier.
#if V8_TARGET_ARCH_IA32
- static const int kBackEdgeDistanceUnit = 100;
+ static const int kCodeSizeMultiplier = 100;
#elif V8_TARGET_ARCH_X64
- static const int kBackEdgeDistanceUnit = 162;
+ static const int kCodeSizeMultiplier = 162;
#elif V8_TARGET_ARCH_ARM
- static const int kBackEdgeDistanceUnit = 142;
+ static const int kCodeSizeMultiplier = 142;
#elif V8_TARGET_ARCH_MIPS
- static const int kBackEdgeDistanceUnit = 142;
+ static const int kCodeSizeMultiplier = 142;
#else
#error Unsupported target architecture.
#endif
-
private:
class Breakable;
class Iteration;
@@ -326,7 +333,7 @@ class FullCodeGenerator: public AstVisitor {
// Helper function to split control flow and avoid a branch to the
// fall-through label if it is set up.
-#ifdef V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_MIPS
void Split(Condition cc,
Register lhs,
const Operand& rhs,
@@ -396,9 +403,20 @@ class FullCodeGenerator: public AstVisitor {
void VisitInDuplicateContext(Expression* expr);
void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void DeclareModules(Handle<FixedArray> descriptions);
void DeclareGlobals(Handle<FixedArray> pairs);
int DeclareGlobalsFlags();
+ // Generate code to allocate all (including nested) modules and contexts.
+ // Because of recursive linking and the presence of module alias declarations,
+ // this has to be a separate pass _before_ populating or executing any module.
+ void AllocateModules(ZoneList<Declaration*>* declarations);
+
+ // Generate code to create an iterator result object. The "value" property is
+ // set to a value popped from the stack, and "done" is set according to the
+ // argument. The result object is left in the result register.
+ void EmitCreateIteratorResult(bool done);
+
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.
@@ -421,8 +439,7 @@ class FullCodeGenerator: public AstVisitor {
// Cache cell support. This associates AST ids with global property cells
// that will be cleared during GC and collected by the type-feedback oracle.
- void RecordTypeFeedbackCell(TypeFeedbackId id,
- Handle<JSGlobalPropertyCell> cell);
+ void RecordTypeFeedbackCell(TypeFeedbackId id, Handle<Cell> cell);
// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
@@ -442,21 +459,25 @@ class FullCodeGenerator: public AstVisitor {
// neither a with nor a catch context.
void EmitDebugCheckDeclarationContext(Variable* variable);
- // Platform-specific code for checking the stack limit at the back edge of
- // a loop.
// This is meant to be called at loop back edges, |back_edge_target| is
// the jump target of the back edge and is used to approximate the amount
// of code inside the loop.
- void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target);
- // Record the OSR AST id corresponding to a stack check in the code.
- void RecordStackCheck(BailoutId osr_ast_id);
- // Emit a table of stack check ids and pcs into the code stream. Return
- // the offset of the start of the table.
- unsigned EmitStackCheckTable();
+ void EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target);
+ // Record the OSR AST id corresponding to a back edge in the code.
+ void RecordBackEdge(BailoutId osr_ast_id);
+ // Emit a table of back edge ids, pcs and loop depths into the code stream.
+ // Return the offset of the start of the table.
+ unsigned EmitBackEdgeTable();
void EmitProfilingCounterDecrement(int delta);
void EmitProfilingCounterReset();
+ // Emit code to pop values from the stack associated with nested statements
+ // like try/catch, try/finally, etc, running the finallies and unwinding the
+ // handlers as needed.
+ void EmitUnwindBeforeReturn();
+
// Platform-specific return sequence
void EmitReturnSequence();
@@ -476,6 +497,16 @@ class FullCodeGenerator: public AstVisitor {
INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
#undef EMIT_INLINE_RUNTIME_CALL
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask);
+
+ // Platform-specific code for resuming generators.
+ void EmitGeneratorResume(Expression *generator,
+ Expression *value,
+ JSGeneratorObject::ResumeMode resume_mode);
+
// Platform-specific code for loading variables.
void EmitLoadGlobalCheckExtensions(Variable* var,
TypeofState typeof_state,
@@ -545,7 +576,7 @@ class FullCodeGenerator: public AstVisitor {
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt);
- void SetExpressionPosition(Expression* expr, int pos);
+ void SetExpressionPosition(Expression* expr);
void SetStatementPosition(int pos);
void SetSourcePosition(int pos);
@@ -595,8 +626,6 @@ class FullCodeGenerator: public AstVisitor {
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- void EmitUnaryOperation(UnaryOperation* expr, const char* comment);
-
void VisitComma(BinaryOperation* expr);
void VisitLogicalExpression(BinaryOperation* expr);
void VisitArithmeticExpression(BinaryOperation* expr);
@@ -615,9 +644,15 @@ class FullCodeGenerator: public AstVisitor {
unsigned pc_and_state;
};
+ struct BackEdgeEntry {
+ BailoutId id;
+ unsigned pc;
+ uint32_t loop_depth;
+ };
+
struct TypeFeedbackCellEntry {
TypeFeedbackId ast_id;
- Handle<JSGlobalPropertyCell> cell;
+ Handle<Cell> cell;
};
@@ -804,18 +839,22 @@ class FullCodeGenerator: public AstVisitor {
NestedStatement* nesting_stack_;
int loop_depth_;
ZoneList<Handle<Object> >* globals_;
+ Handle<FixedArray> modules_;
+ int module_index_;
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
- ZoneList<BailoutEntry> stack_checks_;
+ GrowableBitVector prepared_bailout_ids_;
+ ZoneList<BackEdgeEntry> back_edges_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
Handle<FixedArray> handler_table_;
- Handle<JSGlobalPropertyCell> profiling_counter_;
+ Handle<Cell> profiling_counter_;
bool generate_debug_code_;
Zone* zone_;
friend class NestedStatement;
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator);
};
@@ -842,6 +881,93 @@ class AccessorTable: public TemplateHashMap<Literal,
};
+class BackEdgeTable {
+ public:
+ BackEdgeTable(Code* code, DisallowHeapAllocation* required) {
+ ASSERT(code->kind() == Code::FUNCTION);
+ instruction_start_ = code->instruction_start();
+ Address table_address = instruction_start_ + code->back_edge_table_offset();
+ length_ = Memory::uint32_at(table_address);
+ start_ = table_address + kTableLengthSize;
+ }
+
+ uint32_t length() { return length_; }
+
+ BailoutId ast_id(uint32_t index) {
+ return BailoutId(static_cast<int>(
+ Memory::uint32_at(entry_at(index) + kAstIdOffset)));
+ }
+
+ uint32_t loop_depth(uint32_t index) {
+ return Memory::uint32_at(entry_at(index) + kLoopDepthOffset);
+ }
+
+ uint32_t pc_offset(uint32_t index) {
+ return Memory::uint32_at(entry_at(index) + kPcOffsetOffset);
+ }
+
+ Address pc(uint32_t index) {
+ return instruction_start_ + pc_offset(index);
+ }
+
+ enum BackEdgeState {
+ INTERRUPT,
+ ON_STACK_REPLACEMENT,
+ OSR_AFTER_STACK_CHECK
+ };
+
+ // Patch all interrupts with allowed loop depth in the unoptimized code to
+ // unconditionally call replacement_code.
+ static void Patch(Isolate* isolate,
+ Code* unoptimized_code);
+
+ // Patch the back edge to the target state, provided the correct callee.
+ static void PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code);
+
+ // Change all patched back edges back to normal interrupts.
+ static void Revert(Isolate* isolate,
+ Code* unoptimized_code);
+
+ // Change a back edge patched for on-stack replacement to perform a
+ // stack check first.
+ static void AddStackCheck(CompilationInfo* info);
+
+ // Remove the stack check, if available, and replace by on-stack replacement.
+ static void RemoveStackCheck(CompilationInfo* info);
+
+ // Return the current patch state of the back edge.
+ static BackEdgeState GetBackEdgeState(Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after);
+
+#ifdef DEBUG
+ // Verify that all back edges of a certain loop depth are patched.
+ static bool Verify(Isolate* isolate,
+ Code* unoptimized_code,
+ int loop_nesting_level);
+#endif // DEBUG
+
+ private:
+ Address entry_at(uint32_t index) {
+ ASSERT(index < length_);
+ return start_ + index * kEntrySize;
+ }
+
+ static const int kTableLengthSize = kIntSize;
+ static const int kAstIdOffset = 0 * kIntSize;
+ static const int kPcOffsetOffset = 1 * kIntSize;
+ static const int kLoopDepthOffset = 2 * kIntSize;
+ static const int kEntrySize = 3 * kIntSize;
+
+ Address start_;
+ Address instruction_start_;
+ uint32_t length_;
+};
+
+
} } // namespace v8::internal
#endif // V8_FULL_CODEGEN_H_
diff --git a/deps/v8/src/func-name-inferrer.cc b/deps/v8/src/func-name-inferrer.cc
index 2dd0bbc15..84d3bf06b 100644
--- a/deps/v8/src/func-name-inferrer.cc
+++ b/deps/v8/src/func-name-inferrer.cc
@@ -55,14 +55,14 @@ void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
void FuncNameInferrer::PushLiteralName(Handle<String> name) {
- if (IsOpen() && !isolate()->heap()->prototype_symbol()->Equals(*name)) {
+ if (IsOpen() && !isolate()->heap()->prototype_string()->Equals(*name)) {
names_stack_.Add(Name(name, kLiteralName), zone());
}
}
void FuncNameInferrer::PushVariableName(Handle<String> name) {
- if (IsOpen() && !isolate()->heap()->result_symbol()->Equals(*name)) {
+ if (IsOpen() && !isolate()->heap()->result_string()->Equals(*name)) {
names_stack_.Add(Name(name, kVariableName), zone());
}
}
@@ -85,7 +85,7 @@ Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos,
if (prev->length() > 0) {
Factory* factory = isolate()->factory();
Handle<String> curr = factory->NewConsString(
- factory->dot_symbol(), names_stack_.at(pos).name);
+ factory->dot_string(), names_stack_.at(pos).name);
return MakeNameFromStackHelper(pos + 1,
factory->NewConsString(prev, curr));
} else {
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index dde6bbdaa..21cfd2233 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -187,7 +187,7 @@ class Writer BASE_EMBEDDED {
byte* buffer_;
};
-class StringTable;
+class ELFStringTable;
template<typename THeader>
class DebugSectionBase : public ZoneObject {
@@ -217,7 +217,7 @@ class DebugSectionBase : public ZoneObject {
struct MachOSectionHeader {
char sectname[16];
char segname[16];
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
uint32_t addr;
uint32_t size;
#else
@@ -338,7 +338,7 @@ class ELFSection : public DebugSectionBase<ELFSectionHeader> {
virtual ~ELFSection() { }
- void PopulateHeader(Writer::Slot<Header> header, StringTable* strtab);
+ void PopulateHeader(Writer::Slot<Header> header, ELFStringTable* strtab);
virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
uintptr_t start = w->position();
@@ -438,9 +438,9 @@ class FullHeaderELFSection : public ELFSection {
};
-class StringTable : public ELFSection {
+class ELFStringTable : public ELFSection {
public:
- explicit StringTable(const char* name)
+ explicit ELFStringTable(const char* name)
: ELFSection(name, TYPE_STRTAB, 1), writer_(NULL), offset_(0), size_(0) {
}
@@ -488,7 +488,7 @@ class StringTable : public ELFSection {
void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header,
- StringTable* strtab) {
+ ELFStringTable* strtab) {
header->name = strtab->Add(name_);
header->type = type_;
header->alignment = align_;
@@ -500,10 +500,10 @@ void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header,
#if defined(__MACH_O)
class MachO BASE_EMBEDDED {
public:
- MachO() : sections_(6) { }
+ explicit MachO(Zone* zone) : zone_(zone), sections_(6, zone) { }
uint32_t AddSection(MachOSection* section) {
- sections_.Add(section);
+ sections_.Add(section, zone_);
return sections_.length() - 1;
}
@@ -525,7 +525,7 @@ class MachO BASE_EMBEDDED {
uint32_t ncmds;
uint32_t sizeofcmds;
uint32_t flags;
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
uint32_t reserved;
#endif
};
@@ -534,7 +534,7 @@ class MachO BASE_EMBEDDED {
uint32_t cmd;
uint32_t cmdsize;
char segname[16];
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
uint32_t vmaddr;
uint32_t vmsize;
uint32_t fileoff;
@@ -560,11 +560,11 @@ class MachO BASE_EMBEDDED {
Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
ASSERT(w->position() == 0);
Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
header->magic = 0xFEEDFACEu;
header->cputype = 7; // i386
header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
-#elif defined(V8_TARGET_ARCH_X64)
+#elif V8_TARGET_ARCH_X64
header->magic = 0xFEEDFACFu;
header->cputype = 7 | 0x01000000; // i386 | 64-bit ABI
header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
@@ -585,7 +585,7 @@ class MachO BASE_EMBEDDED {
uintptr_t code_size) {
Writer::Slot<MachOSegmentCommand> cmd =
w->CreateSlotHere<MachOSegmentCommand>();
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
cmd->cmd = LC_SEGMENT_32;
#else
cmd->cmd = LC_SEGMENT_64;
@@ -620,7 +620,7 @@ class MachO BASE_EMBEDDED {
cmd->filesize = w->position() - (uintptr_t)cmd->fileoff;
}
-
+ Zone* zone_;
ZoneList<MachOSection*> sections_;
};
#endif // defined(__MACH_O)
@@ -629,9 +629,9 @@ class MachO BASE_EMBEDDED {
#if defined(__ELF)
class ELF BASE_EMBEDDED {
public:
- ELF(Zone* zone) : sections_(6, zone) {
+ explicit ELF(Zone* zone) : zone_(zone), sections_(6, zone) {
sections_.Add(new(zone) ELFSection("", ELFSection::TYPE_NULL, 0), zone);
- sections_.Add(new(zone) StringTable(".shstrtab"), zone);
+ sections_.Add(new(zone) ELFStringTable(".shstrtab"), zone);
}
void Write(Writer* w) {
@@ -644,8 +644,8 @@ class ELF BASE_EMBEDDED {
return sections_[index];
}
- uint32_t AddSection(ELFSection* section, Zone* zone) {
- sections_.Add(section, zone);
+ uint32_t AddSection(ELFSection* section) {
+ sections_.Add(section, zone_);
section->set_index(sections_.length() - 1);
return sections_.length() - 1;
}
@@ -672,25 +672,25 @@ class ELF BASE_EMBEDDED {
void WriteHeader(Writer* w) {
ASSERT(w->position() == 0);
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM
const uint8_t ident[16] =
{ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-#elif defined(V8_TARGET_ARCH_X64)
+#elif V8_TARGET_ARCH_X64
const uint8_t ident[16] =
{ 0x7f, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#else
#error Unsupported target architecture.
#endif
- memcpy(header->ident, ident, 16);
+ OS::MemCopy(header->ident, ident, 16);
header->type = 1;
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
header->machine = 3;
-#elif defined(V8_TARGET_ARCH_X64)
+#elif V8_TARGET_ARCH_X64
// Processor identification value for x64 is 62 as defined in
// System V ABI, AMD64 Supplement
// http://www.x86-64.org/documentation/abi.pdf
header->machine = 62;
-#elif defined(V8_TARGET_ARCH_ARM)
+#elif V8_TARGET_ARCH_ARM
// Set to EM_ARM, defined as 40, in "ARM ELF File Format" at
// infocenter.arm.com/help/topic/com.arm.doc.dui0101a/DUI0101A_Elf.pdf
header->machine = 40;
@@ -718,7 +718,7 @@ class ELF BASE_EMBEDDED {
w->CreateSlotsHere<ELFSection::Header>(sections_.length());
// String table for section table is the first section.
- StringTable* strtab = static_cast<StringTable*>(SectionAt(1));
+ ELFStringTable* strtab = static_cast<ELFStringTable*>(SectionAt(1));
strtab->AttachWriter(w);
for (int i = 0, length = sections_.length();
i < length;
@@ -743,6 +743,7 @@ class ELF BASE_EMBEDDED {
}
}
+ Zone* zone_;
ZoneList<ELFSection*> sections_;
};
@@ -784,7 +785,7 @@ class ELFSymbol BASE_EMBEDDED {
Binding binding() const {
return static_cast<Binding>(info >> 4);
}
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM
struct SerializedLayout {
SerializedLayout(uint32_t name,
uintptr_t value,
@@ -807,7 +808,7 @@ class ELFSymbol BASE_EMBEDDED {
uint8_t other;
uint16_t section;
};
-#elif defined(V8_TARGET_ARCH_X64)
+#elif V8_TARGET_ARCH_X64
struct SerializedLayout {
SerializedLayout(uint32_t name,
uintptr_t value,
@@ -832,7 +833,7 @@ class ELFSymbol BASE_EMBEDDED {
};
#endif
- void Write(Writer::Slot<SerializedLayout> s, StringTable* t) {
+ void Write(Writer::Slot<SerializedLayout> s, ELFStringTable* t) {
// Convert symbol names from strings to indexes in the string table.
s->name = t->Add(name);
s->value = value;
@@ -871,8 +872,8 @@ class ELFSymbolTable : public ELFSection {
header->size = w->position() - header->offset;
// String table for this symbol table should follow it in the section table.
- StringTable* strtab =
- static_cast<StringTable*>(w->debug_object()->SectionAt(index() + 1));
+ ELFStringTable* strtab =
+ static_cast<ELFStringTable*>(w->debug_object()->SectionAt(index() + 1));
strtab->AttachWriter(w);
symbols.at(0).set(ELFSymbol::SerializedLayout(0,
0,
@@ -905,7 +906,7 @@ class ELFSymbolTable : public ELFSection {
private:
void WriteSymbolsList(const ZoneList<ELFSymbol>* src,
Writer::Slot<ELFSymbol::SerializedLayout> dst,
- StringTable* strtab) {
+ ELFStringTable* strtab) {
for (int i = 0, len = src->length();
i < len;
i++) {
@@ -921,7 +922,7 @@ class ELFSymbolTable : public ELFSection {
class CodeDescription BASE_EMBEDDED {
public:
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
enum StackState {
POST_RBP_PUSH,
POST_RBP_SET,
@@ -984,7 +985,7 @@ class CodeDescription BASE_EMBEDDED {
lineinfo_ != NULL;
}
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
uintptr_t GetStackStateStartAddress(StackState state) const {
ASSERT(state < STACK_STATE_MAX);
return stack_state_start_addresses_[state];
@@ -1012,22 +1013,22 @@ class CodeDescription BASE_EMBEDDED {
GDBJITLineInfo* lineinfo_;
GDBJITInterface::CodeTag tag_;
CompilationInfo* info_;
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
uintptr_t stack_state_start_addresses_[STACK_STATE_MAX];
#endif
};
#if defined(__ELF)
static void CreateSymbolsTable(CodeDescription* desc,
+ Zone* zone,
ELF* elf,
int text_section_index) {
- Zone* zone = desc->info()->zone();
ELFSymbolTable* symtab = new(zone) ELFSymbolTable(".symtab", zone);
- StringTable* strtab = new(zone) StringTable(".strtab");
+ ELFStringTable* strtab = new(zone) ELFStringTable(".strtab");
// Symbol table should be followed by the linked string table.
- elf->AddSection(symtab, zone);
- elf->AddSection(strtab, zone);
+ elf->AddSection(symtab);
+ elf->AddSection(strtab);
symtab->Add(ELFSymbol("V8 Code",
0,
@@ -1106,13 +1107,13 @@ class DebugInfoSection : public DebugSection {
w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>();
uintptr_t fb_block_start = w->position();
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32
-#elif defined(V8_TARGET_ARCH_X64)
+#elif V8_TARGET_ARCH_X64
w->Write<uint8_t>(DW_OP_reg6); // and here on x64.
-#elif defined(V8_TARGET_ARCH_ARM)
+#elif V8_TARGET_ARCH_ARM
UNIMPLEMENTED();
-#elif defined(V8_TARGET_ARCH_MIPS)
+#elif V8_TARGET_ARCH_MIPS
UNIMPLEMENTED();
#else
#error Unsupported target architecture.
@@ -1213,8 +1214,11 @@ class DebugInfoSection : public DebugSection {
w->WriteSLEB128(StandardFrameConstants::kContextOffset);
block_size.set(static_cast<uint32_t>(w->position() - block_start));
}
+
+ w->WriteULEB128(0); // Terminate the sub program.
}
+ w->WriteULEB128(0); // Terminate the compile unit.
size.set(static_cast<uint32_t>(w->position() - start));
return true;
}
@@ -1324,15 +1328,14 @@ class DebugAbbrevSection : public DebugSection {
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
int locals = scope->StackLocalCount();
- int total_children =
- params + slots + context_slots + internal_slots + locals + 2;
+ // Total children is params + slots + context_slots + internal_slots +
+ // locals + 2 (__function and __context).
// The extra duplication below seems to be necessary to keep
// gdb from getting upset on OSX.
w->WriteULEB128(current_abbreviation++); // Abbreviation code.
w->WriteULEB128(DW_TAG_SUBPROGRAM);
- w->Write<uint8_t>(
- total_children != 0 ? DW_CHILDREN_YES : DW_CHILDREN_NO);
+ w->Write<uint8_t>(DW_CHILDREN_YES);
w->WriteULEB128(DW_AT_NAME);
w->WriteULEB128(DW_FORM_STRING);
w->WriteULEB128(DW_AT_LOW_PC);
@@ -1384,9 +1387,7 @@ class DebugAbbrevSection : public DebugSection {
// The context.
WriteVariableAbbreviation(w, current_abbreviation++, true, false);
- if (total_children != 0) {
- w->WriteULEB128(0); // Terminate the sibling list.
- }
+ w->WriteULEB128(0); // Terminate the sibling list.
}
w->WriteULEB128(0); // Terminate the table.
@@ -1563,7 +1564,7 @@ class DebugLineSection : public DebugSection {
};
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
class UnwindInfoSection : public DebugSection {
public:
@@ -1789,15 +1790,16 @@ bool UnwindInfoSection::WriteBodyInternal(Writer* w) {
#endif // V8_TARGET_ARCH_X64
-static void CreateDWARFSections(CodeDescription* desc, DebugObject* obj) {
- Zone* zone = desc->info()->zone();
+static void CreateDWARFSections(CodeDescription* desc,
+ Zone* zone,
+ DebugObject* obj) {
if (desc->IsLineInfoAvailable()) {
- obj->AddSection(new(zone) DebugInfoSection(desc), zone);
- obj->AddSection(new(zone) DebugAbbrevSection(desc), zone);
- obj->AddSection(new(zone) DebugLineSection(desc), zone);
+ obj->AddSection(new(zone) DebugInfoSection(desc));
+ obj->AddSection(new(zone) DebugAbbrevSection(desc));
+ obj->AddSection(new(zone) DebugLineSection(desc));
}
-#ifdef V8_TARGET_ARCH_X64
- obj->AddSection(new(zone) UnwindInfoSection(desc), zone);
+#if V8_TARGET_ARCH_X64
+ obj->AddSection(new(zone) UnwindInfoSection(desc));
#endif
}
@@ -1841,7 +1843,7 @@ extern "C" {
#ifdef OBJECT_PRINT
void __gdb_print_v8_object(MaybeObject* object) {
object->Print();
- fprintf(stdout, "\n");
+ PrintF(stdout, "\n");
}
#endif
}
@@ -1854,7 +1856,7 @@ static JITCodeEntry* CreateCodeEntry(Address symfile_addr,
entry->symfile_addr_ = reinterpret_cast<Address>(entry + 1);
entry->symfile_size_ = symfile_size;
- memcpy(entry->symfile_addr_, symfile_addr, symfile_size);
+ OS::MemCopy(entry->symfile_addr_, symfile_addr, symfile_size);
entry->prev_ = entry->next_ = NULL;
@@ -1870,7 +1872,7 @@ static void DestroyCodeEntry(JITCodeEntry* entry) {
static void RegisterCodeEntry(JITCodeEntry* entry,
bool dump_if_enabled,
const char* name_hint) {
-#if defined(DEBUG) && !defined(WIN32)
+#if defined(DEBUG) && !V8_OS_WIN
static int file_num = 0;
if (FLAG_gdbjit_dump && dump_if_enabled) {
static const int kMaxFileNameSize = 64;
@@ -1915,38 +1917,37 @@ static void UnregisterCodeEntry(JITCodeEntry* entry) {
}
-static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
- Zone* zone = desc->info()->zone();
- ZoneScope zone_scope(zone, DELETE_ON_EXIT);
+static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
#ifdef __MACH_O
- MachO mach_o;
+ Zone zone(isolate);
+ MachO mach_o(&zone);
Writer w(&mach_o);
- mach_o.AddSection(new MachOTextSection(kCodeAlignment,
- desc->CodeStart(),
- desc->CodeSize()));
+ mach_o.AddSection(new(&zone) MachOTextSection(kCodeAlignment,
+ desc->CodeStart(),
+ desc->CodeSize()));
- CreateDWARFSections(desc, &mach_o);
+ CreateDWARFSections(desc, &zone, &mach_o);
mach_o.Write(&w, desc->CodeStart(), desc->CodeSize());
#else
- ELF elf(zone);
+ Zone zone(isolate);
+ ELF elf(&zone);
Writer w(&elf);
int text_section_index = elf.AddSection(
- new(zone) FullHeaderELFSection(
+ new(&zone) FullHeaderELFSection(
".text",
ELFSection::TYPE_NOBITS,
kCodeAlignment,
desc->CodeStart(),
0,
desc->CodeSize(),
- ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC),
- zone);
+ ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC));
- CreateSymbolsTable(desc, &elf, text_section_index);
+ CreateSymbolsTable(desc, &zone, &elf, text_section_index);
- CreateDWARFSections(desc, &elf);
+ CreateDWARFSections(desc, &zone, &elf);
elf.Write(&w);
#endif
@@ -1996,7 +1997,7 @@ static GDBJITLineInfo* UntagLineInfo(void* ptr) {
}
-void GDBJITInterface::AddCode(Handle<String> name,
+void GDBJITInterface::AddCode(Handle<Name> name,
Handle<Script> script,
Handle<Code> code,
CompilationInfo* info) {
@@ -2005,16 +2006,18 @@ void GDBJITInterface::AddCode(Handle<String> name,
// Force initialization of line_ends array.
GetScriptLineNumber(script, 0);
- if (!name.is_null()) {
- SmartArrayPointer<char> name_cstring = name->ToCString(DISALLOW_NULLS);
+ if (!name.is_null() && name->IsString()) {
+ SmartArrayPointer<char> name_cstring =
+ Handle<String>::cast(name)->ToCString(DISALLOW_NULLS);
AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script, info);
} else {
AddCode("", *code, GDBJITInterface::FUNCTION, *script, info);
}
}
+
static void AddUnwindInfo(CodeDescription* desc) {
-#ifdef V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64
if (desc->tag() == GDBJITInterface::FUNCTION) {
// To avoid propagating unwinding information through
// compilation pipeline we use an approximation.
@@ -2060,8 +2063,8 @@ void GDBJITInterface::AddCode(const char* name,
CompilationInfo* info) {
if (!FLAG_gdbjit) return;
- ScopedLock lock(mutex.Pointer());
- AssertNoAllocation no_gc;
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
+ DisallowHeapAllocation no_gc;
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
if (e->value != NULL && !IsLineInfoTagged(e->value)) return;
@@ -2082,7 +2085,8 @@ void GDBJITInterface::AddCode(const char* name,
}
AddUnwindInfo(&code_desc);
- JITCodeEntry* entry = CreateELFObject(&code_desc);
+ Isolate* isolate = code->GetIsolate();
+ JITCodeEntry* entry = CreateELFObject(&code_desc, isolate);
ASSERT(!IsLineInfoTagged(entry));
delete lineinfo;
@@ -2124,10 +2128,14 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
- String* name,
+ Name* name,
Code* code) {
if (!FLAG_gdbjit) return;
- AddCode(tag, name != NULL ? *name->ToCString(DISALLOW_NULLS) : NULL, code);
+ if (name != NULL && name->IsString()) {
+ AddCode(tag, *String::cast(name)->ToCString(DISALLOW_NULLS), code);
+ } else {
+ AddCode(tag, "", code);
+ }
}
@@ -2141,7 +2149,7 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, Code* code) {
void GDBJITInterface::RemoveCode(Code* code) {
if (!FLAG_gdbjit) return;
- ScopedLock lock(mutex.Pointer());
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
HashMap::Entry* e = GetEntries()->Lookup(code,
HashForCodeObject(code),
false);
@@ -2159,9 +2167,27 @@ void GDBJITInterface::RemoveCode(Code* code) {
}
+void GDBJITInterface::RemoveCodeRange(Address start, Address end) {
+ HashMap* entries = GetEntries();
+ Zone zone(Isolate::Current());
+ ZoneList<Code*> dead_codes(1, &zone);
+
+ for (HashMap::Entry* e = entries->Start(); e != NULL; e = entries->Next(e)) {
+ Code* code = reinterpret_cast<Code*>(e->key);
+ if (code->address() >= start && code->address() < end) {
+ dead_codes.Add(code, &zone);
+ }
+ }
+
+ for (int i = 0; i < dead_codes.length(); i++) {
+ RemoveCode(dead_codes.at(i));
+ }
+}
+
+
void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
GDBJITLineInfo* line_info) {
- ScopedLock lock(mutex.Pointer());
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
ASSERT(!IsLineInfoTagged(line_info));
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
ASSERT(e->value == NULL);
diff --git a/deps/v8/src/gdb-jit.h b/deps/v8/src/gdb-jit.h
index 0eca9384d..a34d3d301 100644
--- a/deps/v8/src/gdb-jit.h
+++ b/deps/v8/src/gdb-jit.h
@@ -118,12 +118,12 @@ class GDBJITInterface: public AllStatic {
Script* script,
CompilationInfo* info);
- static void AddCode(Handle<String> name,
+ static void AddCode(Handle<Name> name,
Handle<Script> script,
Handle<Code> code,
CompilationInfo* info);
- static void AddCode(CodeTag tag, String* name, Code* code);
+ static void AddCode(CodeTag tag, Name* name, Code* code);
static void AddCode(CodeTag tag, const char* name, Code* code);
@@ -131,6 +131,8 @@ class GDBJITInterface: public AllStatic {
static void RemoveCode(Code* code);
+ static void RemoveCodeRange(Address start, Address end);
+
static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
};
diff --git a/deps/v8/src/generator.js b/deps/v8/src/generator.js
new file mode 100644
index 000000000..3c8ea6f31
--- /dev/null
+++ b/deps/v8/src/generator.js
@@ -0,0 +1,92 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+// This file relies on the fact that the following declarations have been made
+// in runtime.js:
+// var $Function = global.Function;
+
+// ----------------------------------------------------------------------------
+
+
+// Generator functions and objects are specified by ES6, sections 15.19.3 and
+// 15.19.4.
+
+function GeneratorObjectNext(value) {
+ if (!IS_GENERATOR(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['[Generator].prototype.next', this]);
+ }
+
+ return %_GeneratorNext(this, value);
+}
+
+function GeneratorObjectThrow(exn) {
+ if (!IS_GENERATOR(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['[Generator].prototype.throw', this]);
+ }
+
+ return %_GeneratorThrow(this, exn);
+}
+
+function GeneratorFunctionPrototypeConstructor(x) {
+ if (%_IsConstructCall()) {
+ throw MakeTypeError('not_constructor', ['GeneratorFunctionPrototype']);
+ }
+}
+
+function GeneratorFunctionConstructor(arg1) { // length == 1
+ var source = NewFunctionString(arguments, 'function*');
+ var global_receiver = %GlobalReceiver(global);
+ // Compile the string in the constructor and not a helper so that errors
+ // appear to come from here.
+ var f = %_CallFunction(global_receiver, %CompileString(source, true));
+ %FunctionMarkNameShouldPrintAsAnonymous(f);
+ return f;
+}
+
+
+function SetUpGenerators() {
+ %CheckIsBootstrapping();
+ var GeneratorObjectPrototype = GeneratorFunctionPrototype.prototype;
+ InstallFunctions(GeneratorObjectPrototype,
+ DONT_ENUM | DONT_DELETE | READ_ONLY,
+ ["next", GeneratorObjectNext,
+ "throw", GeneratorObjectThrow]);
+ %SetProperty(GeneratorObjectPrototype, "constructor",
+ GeneratorFunctionPrototype, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetPrototype(GeneratorFunctionPrototype, $Function.prototype);
+ %SetCode(GeneratorFunctionPrototype, GeneratorFunctionPrototypeConstructor);
+ %SetProperty(GeneratorFunctionPrototype, "constructor",
+ GeneratorFunction, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetPrototype(GeneratorFunction, $Function);
+ %SetCode(GeneratorFunction, GeneratorFunctionConstructor);
+}
+
+SetUpGenerators();
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index c09ba4b47..2ebe1c008 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -37,7 +37,13 @@ namespace internal {
ObjectGroup::~ObjectGroup() {
- if (info_ != NULL) info_->Dispose();
+ if (info != NULL) info->Dispose();
+ delete[] objects;
+}
+
+
+ImplicitRefGroup::~ImplicitRefGroup() {
+ delete[] children;
}
@@ -46,7 +52,7 @@ class GlobalHandles::Node {
// State transition diagram:
// FREE -> NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, FREE }
enum State {
- FREE,
+ FREE = 0,
NORMAL, // Normal global handle.
WEAK, // Flagged as weak but not yet finalized.
PENDING, // Has been recognized as only reachable by weak handles.
@@ -59,54 +65,66 @@ class GlobalHandles::Node {
return reinterpret_cast<Node*>(location);
}
- Node() {}
+ Node() {
+ ASSERT(OFFSET_OF(Node, class_id_) == Internals::kNodeClassIdOffset);
+ ASSERT(OFFSET_OF(Node, flags_) == Internals::kNodeFlagsOffset);
+ STATIC_ASSERT(static_cast<int>(NodeState::kMask) ==
+ Internals::kNodeStateMask);
+ STATIC_ASSERT(WEAK == Internals::kNodeStateIsWeakValue);
+ STATIC_ASSERT(PENDING == Internals::kNodeStateIsPendingValue);
+ STATIC_ASSERT(NEAR_DEATH == Internals::kNodeStateIsNearDeathValue);
+ STATIC_ASSERT(static_cast<int>(IsIndependent::kShift) ==
+ Internals::kNodeIsIndependentShift);
+ STATIC_ASSERT(static_cast<int>(IsPartiallyDependent::kShift) ==
+ Internals::kNodeIsPartiallyDependentShift);
+ }
-#ifdef DEBUG
+#ifdef ENABLE_HANDLE_ZAPPING
~Node() {
// TODO(1428): if it's a weak handle we should have invoked its callback.
// Zap the values for eager trapping.
- object_ = NULL;
+ object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
index_ = 0;
- independent_ = false;
- in_new_space_list_ = false;
+ set_independent(false);
+ set_partially_dependent(false);
+ set_in_new_space_list(false);
parameter_or_next_free_.next_free = NULL;
- callback_ = NULL;
+ weak_callback_ = NULL;
}
#endif
void Initialize(int index, Node** first_free) {
index_ = static_cast<uint8_t>(index);
ASSERT(static_cast<int>(index_) == index);
- state_ = FREE;
- in_new_space_list_ = false;
+ set_state(FREE);
+ set_in_new_space_list(false);
parameter_or_next_free_.next_free = *first_free;
*first_free = this;
}
- void Acquire(Object* object, GlobalHandles* global_handles) {
- ASSERT(state_ == FREE);
+ void Acquire(Object* object) {
+ ASSERT(state() == FREE);
object_ = object;
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
- independent_ = false;
- state_ = NORMAL;
+ set_independent(false);
+ set_partially_dependent(false);
+ set_state(NORMAL);
parameter_or_next_free_.parameter = NULL;
- callback_ = NULL;
- IncreaseBlockUses(global_handles);
+ weak_callback_ = NULL;
+ IncreaseBlockUses();
}
- void Release(GlobalHandles* global_handles) {
- ASSERT(state_ != FREE);
- if (IsWeakRetainer()) {
- global_handles->number_of_weak_handles_--;
- if (object_->IsJSGlobalObject()) {
- global_handles->number_of_global_object_weak_handles_--;
- }
- }
- state_ = FREE;
- parameter_or_next_free_.next_free = global_handles->first_free_;
- global_handles->first_free_ = this;
- DecreaseBlockUses(global_handles);
+ void Release() {
+ ASSERT(state() != FREE);
+ set_state(FREE);
+ // Zap the values for eager trapping.
+ object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
+ class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
+ set_independent(false);
+ set_partially_dependent(false);
+ weak_callback_ = NULL;
+ DecreaseBlockUses();
}
// Object slot accessors.
@@ -118,109 +136,138 @@ class GlobalHandles::Node {
bool has_wrapper_class_id() const {
return class_id_ != v8::HeapProfiler::kPersistentHandleNoClassId;
}
+
uint16_t wrapper_class_id() const { return class_id_; }
- void set_wrapper_class_id(uint16_t class_id) {
- class_id_ = class_id;
+
+ // State and flag accessors.
+
+ State state() const {
+ return NodeState::decode(flags_);
+ }
+ void set_state(State state) {
+ flags_ = NodeState::update(flags_, state);
+ }
+
+ bool is_independent() {
+ return IsIndependent::decode(flags_);
+ }
+ void set_independent(bool v) {
+ flags_ = IsIndependent::update(flags_, v);
}
- // State accessors.
+ bool is_partially_dependent() {
+ return IsPartiallyDependent::decode(flags_);
+ }
+ void set_partially_dependent(bool v) {
+ flags_ = IsPartiallyDependent::update(flags_, v);
+ }
- State state() const { return state_; }
+ bool is_in_new_space_list() {
+ return IsInNewSpaceList::decode(flags_);
+ }
+ void set_in_new_space_list(bool v) {
+ flags_ = IsInNewSpaceList::update(flags_, v);
+ }
+
+ bool is_revivable_callback() {
+ return IsRevivableCallback::decode(flags_);
+ }
+ void set_revivable_callback(bool v) {
+ flags_ = IsRevivableCallback::update(flags_, v);
+ }
bool IsNearDeath() const {
// Check for PENDING to ensure correct answer when processing callbacks.
- return state_ == PENDING || state_ == NEAR_DEATH;
+ return state() == PENDING || state() == NEAR_DEATH;
}
- bool IsWeak() const { return state_ == WEAK; }
+ bool IsWeak() const { return state() == WEAK; }
- bool IsRetainer() const { return state_ != FREE; }
+ bool IsRetainer() const { return state() != FREE; }
- bool IsStrongRetainer() const { return state_ == NORMAL; }
+ bool IsStrongRetainer() const { return state() == NORMAL; }
bool IsWeakRetainer() const {
- return state_ == WEAK || state_ == PENDING || state_ == NEAR_DEATH;
+ return state() == WEAK || state() == PENDING || state() == NEAR_DEATH;
}
void MarkPending() {
- ASSERT(state_ == WEAK);
- state_ = PENDING;
+ ASSERT(state() == WEAK);
+ set_state(PENDING);
}
// Independent flag accessors.
void MarkIndependent() {
- ASSERT(state_ != FREE);
- independent_ = true;
+ ASSERT(state() != FREE);
+ set_independent(true);
}
- bool is_independent() const { return independent_; }
- // In-new-space-list flag accessors.
- void set_in_new_space_list(bool v) { in_new_space_list_ = v; }
- bool is_in_new_space_list() const { return in_new_space_list_; }
+ void MarkPartiallyDependent() {
+ ASSERT(state() != FREE);
+ if (GetGlobalHandles()->isolate()->heap()->InNewSpace(object_)) {
+ set_partially_dependent(true);
+ }
+ }
+ void clear_partially_dependent() { set_partially_dependent(false); }
// Callback accessor.
- WeakReferenceCallback callback() { return callback_; }
+ // TODO(svenpanne) Re-enable or nuke later.
+ // WeakReferenceCallback callback() { return callback_; }
// Callback parameter accessors.
void set_parameter(void* parameter) {
- ASSERT(state_ != FREE);
+ ASSERT(state() != FREE);
parameter_or_next_free_.parameter = parameter;
}
void* parameter() const {
- ASSERT(state_ != FREE);
+ ASSERT(state() != FREE);
return parameter_or_next_free_.parameter;
}
// Accessors for next free node in the free list.
Node* next_free() {
- ASSERT(state_ == FREE);
+ ASSERT(state() == FREE);
return parameter_or_next_free_.next_free;
}
void set_next_free(Node* value) {
- ASSERT(state_ == FREE);
+ ASSERT(state() == FREE);
parameter_or_next_free_.next_free = value;
}
- void MakeWeak(GlobalHandles* global_handles,
- void* parameter,
- WeakReferenceCallback callback) {
- ASSERT(state_ != FREE);
- if (!IsWeakRetainer()) {
- global_handles->number_of_weak_handles_++;
- if (object_->IsJSGlobalObject()) {
- global_handles->number_of_global_object_weak_handles_++;
- }
- }
- state_ = WEAK;
+ void MakeWeak(void* parameter,
+ WeakCallback weak_callback,
+ RevivableCallback revivable_callback) {
+ ASSERT((weak_callback == NULL) != (revivable_callback == NULL));
+ ASSERT(state() != FREE);
+ set_state(WEAK);
set_parameter(parameter);
- callback_ = callback;
+ if (weak_callback != NULL) {
+ weak_callback_ = weak_callback;
+ set_revivable_callback(false);
+ } else {
+ weak_callback_ =
+ reinterpret_cast<WeakCallback>(revivable_callback);
+ set_revivable_callback(true);
+ }
}
- void ClearWeakness(GlobalHandles* global_handles) {
- ASSERT(state_ != FREE);
- if (IsWeakRetainer()) {
- global_handles->number_of_weak_handles_--;
- if (object_->IsJSGlobalObject()) {
- global_handles->number_of_global_object_weak_handles_--;
- }
- }
- state_ = NORMAL;
+ void ClearWeakness() {
+ ASSERT(state() != FREE);
+ set_state(NORMAL);
set_parameter(NULL);
}
- bool PostGarbageCollectionProcessing(Isolate* isolate,
- GlobalHandles* global_handles) {
- if (state_ != Node::PENDING) return false;
- WeakReferenceCallback func = callback();
- if (func == NULL) {
- Release(global_handles);
+ bool PostGarbageCollectionProcessing(Isolate* isolate) {
+ if (state() != Node::PENDING) return false;
+ if (weak_callback_ == NULL) {
+ Release();
return false;
}
void* par = parameter();
- state_ = NEAR_DEATH;
+ set_state(NEAR_DEATH);
set_parameter(NULL);
- v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
+ Object** object = location();
{
// Check that we are not passing a finalized external string to
// the callback.
@@ -229,19 +276,35 @@ class GlobalHandles::Node {
ASSERT(!object_->IsExternalTwoByteString() ||
ExternalTwoByteString::cast(object_)->resource() != NULL);
// Leaving V8.
- VMState state(isolate, EXTERNAL);
- func(object, par);
+ VMState<EXTERNAL> state(isolate);
+ HandleScope handle_scope(isolate);
+ if (is_revivable_callback()) {
+ RevivableCallback revivable =
+ reinterpret_cast<RevivableCallback>(weak_callback_);
+ revivable(reinterpret_cast<v8::Isolate*>(isolate),
+ reinterpret_cast<Persistent<Value>*>(&object),
+ par);
+ } else {
+ Handle<Object> handle(*object, isolate);
+ v8::WeakCallbackData<v8::Value, void> data(
+ reinterpret_cast<v8::Isolate*>(isolate),
+ v8::Utils::ToLocal(handle),
+ par);
+ weak_callback_(data);
+ }
}
// Absence of explicit cleanup or revival of weak handle
// in most of the cases would lead to memory leak.
- ASSERT(state_ != NEAR_DEATH);
+ ASSERT(state() != NEAR_DEATH);
return true;
}
+ inline GlobalHandles* GetGlobalHandles();
+
private:
inline NodeBlock* FindBlock();
- inline void IncreaseBlockUses(GlobalHandles* global_handles);
- inline void DecreaseBlockUses(GlobalHandles* global_handles);
+ inline void IncreaseBlockUses();
+ inline void DecreaseBlockUses();
// Storage for object pointer.
// Placed first to avoid offset computation.
@@ -256,14 +319,18 @@ class GlobalHandles::Node {
// Index in the containing handle block.
uint8_t index_;
- // Need one more bit for MSVC as it treats enums as signed.
- State state_ : 4;
+ // This stores three flags (independent, partially_dependent and
+ // in_new_space_list) and a State.
+ class NodeState: public BitField<State, 0, 4> {};
+ class IsIndependent: public BitField<bool, 4, 1> {};
+ class IsPartiallyDependent: public BitField<bool, 5, 1> {};
+ class IsInNewSpaceList: public BitField<bool, 6, 1> {};
+ class IsRevivableCallback: public BitField<bool, 7, 1> {};
- bool independent_ : 1;
- bool in_new_space_list_ : 1;
+ uint8_t flags_;
- // Handle specific callback.
- WeakReferenceCallback callback_;
+ // Handle specific callback - might be a weak reference in disguise.
+ WeakCallback weak_callback_;
// Provided data for callback. In FREE state, this is used for
// the free list link.
@@ -280,8 +347,12 @@ class GlobalHandles::NodeBlock {
public:
static const int kSize = 256;
- explicit NodeBlock(NodeBlock* next)
- : next_(next), used_nodes_(0), next_used_(NULL), prev_used_(NULL) {}
+ explicit NodeBlock(GlobalHandles* global_handles, NodeBlock* next)
+ : next_(next),
+ used_nodes_(0),
+ next_used_(NULL),
+ prev_used_(NULL),
+ global_handles_(global_handles) {}
void PutNodesOnFreeList(Node** first_free) {
for (int i = kSize - 1; i >= 0; --i) {
@@ -294,11 +365,11 @@ class GlobalHandles::NodeBlock {
return &nodes_[index];
}
- void IncreaseUses(GlobalHandles* global_handles) {
+ void IncreaseUses() {
ASSERT(used_nodes_ < kSize);
if (used_nodes_++ == 0) {
- NodeBlock* old_first = global_handles->first_used_block_;
- global_handles->first_used_block_ = this;
+ NodeBlock* old_first = global_handles_->first_used_block_;
+ global_handles_->first_used_block_ = this;
next_used_ = old_first;
prev_used_ = NULL;
if (old_first == NULL) return;
@@ -306,17 +377,19 @@ class GlobalHandles::NodeBlock {
}
}
- void DecreaseUses(GlobalHandles* global_handles) {
+ void DecreaseUses() {
ASSERT(used_nodes_ > 0);
if (--used_nodes_ == 0) {
if (next_used_ != NULL) next_used_->prev_used_ = prev_used_;
if (prev_used_ != NULL) prev_used_->next_used_ = next_used_;
- if (this == global_handles->first_used_block_) {
- global_handles->first_used_block_ = next_used_;
+ if (this == global_handles_->first_used_block_) {
+ global_handles_->first_used_block_ = next_used_;
}
}
}
+ GlobalHandles* global_handles() { return global_handles_; }
+
// Next block in the list of all blocks.
NodeBlock* next() const { return next_; }
@@ -330,9 +403,15 @@ class GlobalHandles::NodeBlock {
int used_nodes_;
NodeBlock* next_used_;
NodeBlock* prev_used_;
+ GlobalHandles* global_handles_;
};
+GlobalHandles* GlobalHandles::Node::GetGlobalHandles() {
+ return FindBlock()->global_handles();
+}
+
+
GlobalHandles::NodeBlock* GlobalHandles::Node::FindBlock() {
intptr_t ptr = reinterpret_cast<intptr_t>(this);
ptr = ptr - index_ * sizeof(Node);
@@ -342,13 +421,23 @@ GlobalHandles::NodeBlock* GlobalHandles::Node::FindBlock() {
}
-void GlobalHandles::Node::IncreaseBlockUses(GlobalHandles* global_handles) {
- FindBlock()->IncreaseUses(global_handles);
+void GlobalHandles::Node::IncreaseBlockUses() {
+ NodeBlock* node_block = FindBlock();
+ node_block->IncreaseUses();
+ GlobalHandles* global_handles = node_block->global_handles();
+ global_handles->isolate()->counters()->global_handles()->Increment();
+ global_handles->number_of_global_handles_++;
}
-void GlobalHandles::Node::DecreaseBlockUses(GlobalHandles* global_handles) {
- FindBlock()->DecreaseUses(global_handles);
+void GlobalHandles::Node::DecreaseBlockUses() {
+ NodeBlock* node_block = FindBlock();
+ GlobalHandles* global_handles = node_block->global_handles();
+ parameter_or_next_free_.next_free = global_handles->first_free_;
+ global_handles->first_free_ = this;
+ node_block->DecreaseUses();
+ global_handles->isolate()->counters()->global_handles()->Decrement();
+ global_handles->number_of_global_handles_--;
}
@@ -382,13 +471,12 @@ class GlobalHandles::NodeIterator {
GlobalHandles::GlobalHandles(Isolate* isolate)
: isolate_(isolate),
- number_of_weak_handles_(0),
- number_of_global_object_weak_handles_(0),
number_of_global_handles_(0),
first_block_(NULL),
first_used_block_(NULL),
first_free_(NULL),
- post_gc_processing_count_(0) {}
+ post_gc_processing_count_(0),
+ object_group_connections_(kObjectGroupConnectionsCapacity) {}
GlobalHandles::~GlobalHandles() {
@@ -403,17 +491,15 @@ GlobalHandles::~GlobalHandles() {
Handle<Object> GlobalHandles::Create(Object* value) {
- isolate_->counters()->global_handles()->Increment();
- number_of_global_handles_++;
if (first_free_ == NULL) {
- first_block_ = new NodeBlock(first_block_);
+ first_block_ = new NodeBlock(this, first_block_);
first_block_->PutNodesOnFreeList(&first_free_);
}
ASSERT(first_free_ != NULL);
// Take the first node in the free list.
Node* result = first_free_;
first_free_ = result->next_free();
- result->Acquire(value, this);
+ result->Acquire(value);
if (isolate_->heap()->InNewSpace(value) &&
!result->is_in_new_space_list()) {
new_space_nodes_.Add(result);
@@ -423,23 +509,28 @@ Handle<Object> GlobalHandles::Create(Object* value) {
}
+Handle<Object> GlobalHandles::CopyGlobal(Object** location) {
+ ASSERT(location != NULL);
+ return Node::FromLocation(location)->GetGlobalHandles()->Create(*location);
+}
+
+
void GlobalHandles::Destroy(Object** location) {
- isolate_->counters()->global_handles()->Decrement();
- number_of_global_handles_--;
- if (location == NULL) return;
- Node::FromLocation(location)->Release(this);
+ if (location != NULL) Node::FromLocation(location)->Release();
}
-void GlobalHandles::MakeWeak(Object** location, void* parameter,
- WeakReferenceCallback callback) {
- ASSERT(callback != NULL);
- Node::FromLocation(location)->MakeWeak(this, parameter, callback);
+void GlobalHandles::MakeWeak(Object** location,
+ void* parameter,
+ WeakCallback weak_callback,
+ RevivableCallback revivable_callback) {
+ Node::FromLocation(location)->MakeWeak(
+ parameter, weak_callback, revivable_callback);
}
void GlobalHandles::ClearWeakness(Object** location) {
- Node::FromLocation(location)->ClearWeakness(this);
+ Node::FromLocation(location)->ClearWeakness();
}
@@ -448,6 +539,11 @@ void GlobalHandles::MarkIndependent(Object** location) {
}
+void GlobalHandles::MarkPartiallyDependent(Object** location) {
+ Node::FromLocation(location)->MarkPartiallyDependent();
+}
+
+
bool GlobalHandles::IsIndependent(Object** location) {
return Node::FromLocation(location)->is_independent();
}
@@ -463,14 +559,6 @@ bool GlobalHandles::IsWeak(Object** location) {
}
-void GlobalHandles::SetWrapperClassId(Object** location, uint16_t class_id) {
- Node::FromLocation(location)->set_wrapper_class_id(class_id);
-}
-
-uint16_t GlobalHandles::GetWrapperClassId(Object** location) {
- return Node::FromLocation(location)->wrapper_class_id();
-}
-
void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->IsWeakRetainer()) v->VisitPointer(it.node()->location());
@@ -478,16 +566,6 @@ void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
}
-void GlobalHandles::IterateWeakRoots(WeakReferenceGuest f,
- WeakReferenceCallback callback) {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsWeak() && it.node()->callback() == callback) {
- f(it.node()->object(), it.node()->parameter());
- }
- }
-}
-
-
void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->IsWeak() && f(it.node()->location())) {
@@ -501,8 +579,9 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
if (node->IsStrongRetainer() ||
- (node->IsWeakRetainer() && !node->is_independent())) {
- v->VisitPointer(node->location());
+ (node->IsWeakRetainer() && !node->is_independent() &&
+ !node->is_partially_dependent())) {
+ v->VisitPointer(node->location());
}
}
}
@@ -513,8 +592,8 @@ void GlobalHandles::IdentifyNewSpaceWeakIndependentHandles(
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
- if (node->is_independent() && node->IsWeak() &&
- f(isolate_->heap(), node->location())) {
+ if ((node->is_independent() || node->is_partially_dependent()) &&
+ node->IsWeak() && f(isolate_->heap(), node->location())) {
node->MarkPending();
}
}
@@ -525,15 +604,62 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
- if (node->is_independent() && node->IsWeakRetainer()) {
+ if ((node->is_independent() || node->is_partially_dependent()) &&
+ node->IsWeakRetainer()) {
v->VisitPointer(node->location());
}
}
}
+bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
+ WeakSlotCallbackWithHeap can_skip) {
+ ComputeObjectGroupsAndImplicitReferences();
+ int last = 0;
+ bool any_group_was_visited = false;
+ for (int i = 0; i < object_groups_.length(); i++) {
+ ObjectGroup* entry = object_groups_.at(i);
+ ASSERT(entry != NULL);
+
+ Object*** objects = entry->objects;
+ bool group_should_be_visited = false;
+ for (size_t j = 0; j < entry->length; j++) {
+ Object* object = *objects[j];
+ if (object->IsHeapObject()) {
+ if (!can_skip(isolate_->heap(), &object)) {
+ group_should_be_visited = true;
+ break;
+ }
+ }
+ }
+
+ if (!group_should_be_visited) {
+ object_groups_[last++] = entry;
+ continue;
+ }
+
+ // An object in the group requires visiting, so iterate over all
+ // objects in the group.
+ for (size_t j = 0; j < entry->length; ++j) {
+ Object* object = *objects[j];
+ if (object->IsHeapObject()) {
+ v->VisitPointer(&object);
+ any_group_was_visited = true;
+ }
+ }
+
+ // Once the entire group has been iterated over, set the object
+ // group to NULL so it won't be processed again.
+ delete entry;
+ object_groups_.at(i) = NULL;
+ }
+ object_groups_.Rewind(last);
+ return any_group_was_visited;
+}
+
+
bool GlobalHandles::PostGarbageCollectionProcessing(
- GarbageCollector collector) {
+ GarbageCollector collector, GCTracer* tracer) {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
@@ -544,11 +670,19 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
+ if (!node->IsRetainer()) {
+ // Free nodes do not have weak callbacks. Do not use them to compute
+ // the next_gc_likely_to_collect_more.
+ continue;
+ }
// Skip dependent handles. Their weak callbacks might expect to be
// called between two global garbage collection callbacks which
// are not called for minor collections.
- if (!node->is_independent()) continue;
- if (node->PostGarbageCollectionProcessing(isolate_, this)) {
+ if (!node->is_independent() && !node->is_partially_dependent()) {
+ continue;
+ }
+ node->clear_partially_dependent();
+ if (node->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// Weak callback triggered another GC and another round of
// PostGarbageCollection processing. The current node might
@@ -563,7 +697,13 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
}
} else {
for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->PostGarbageCollectionProcessing(isolate_, this)) {
+ if (!it.node()->IsRetainer()) {
+ // Free nodes do not have weak callbacks. Do not use them to compute
+ // the next_gc_likely_to_collect_more.
+ continue;
+ }
+ it.node()->clear_partially_dependent();
+ if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above.
return next_gc_likely_to_collect_more;
@@ -579,10 +719,17 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
- if (node->IsRetainer() && isolate_->heap()->InNewSpace(node->object())) {
- new_space_nodes_[last++] = node;
+ if (node->IsRetainer()) {
+ if (isolate_->heap()->InNewSpace(node->object())) {
+ new_space_nodes_[last++] = node;
+ tracer->increment_nodes_copied_in_new_space();
+ } else {
+ node->set_in_new_space_list(false);
+ tracer->increment_nodes_promoted();
+ }
} else {
node->set_in_new_space_list(false);
+ tracer->increment_nodes_died_in_new_space();
}
}
new_space_nodes_.Rewind(last);
@@ -610,7 +757,7 @@ void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->has_wrapper_class_id() && it.node()->IsRetainer()) {
+ if (it.node()->IsRetainer() && it.node()->has_wrapper_class_id()) {
v->VisitEmbedderReference(it.node()->location(),
it.node()->wrapper_class_id());
}
@@ -618,6 +765,40 @@ void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
}
+void GlobalHandles::IterateAllRootsInNewSpaceWithClassIds(ObjectVisitor* v) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ if (node->IsRetainer() && node->has_wrapper_class_id()) {
+ v->VisitEmbedderReference(node->location(),
+ node->wrapper_class_id());
+ }
+ }
+}
+
+
+int GlobalHandles::NumberOfWeakHandles() {
+ int count = 0;
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsWeakRetainer()) {
+ count++;
+ }
+ }
+ return count;
+}
+
+
+int GlobalHandles::NumberOfGlobalObjectWeakHandles() {
+ int count = 0;
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsWeakRetainer() &&
+ it.node()->object()->IsJSGlobalObject()) {
+ count++;
+ }
+ }
+ return count;
+}
+
+
void GlobalHandles::RecordStats(HeapStats* stats) {
*stats->global_handle_count = 0;
*stats->weak_global_handle_count = 0;
@@ -664,6 +845,7 @@ void GlobalHandles::PrintStats() {
PrintF(" # total = %d\n", total);
}
+
void GlobalHandles::Print() {
PrintF("Global handles:\n");
for (NodeIterator it(this); !it.done(); it.Advance()) {
@@ -690,7 +872,23 @@ void GlobalHandles::AddObjectGroup(Object*** handles,
if (info != NULL) info->Dispose();
return;
}
- object_groups_.Add(ObjectGroup::New(handles, length, info));
+ ObjectGroup* group = new ObjectGroup(length);
+ for (size_t i = 0; i < length; ++i)
+ group->objects[i] = handles[i];
+ group->info = info;
+ object_groups_.Add(group);
+}
+
+
+void GlobalHandles::SetObjectGroupId(Object** handle,
+ UniqueId id) {
+ object_group_connections_.Add(ObjectGroupConnection(id, handle));
+}
+
+
+void GlobalHandles::SetRetainedObjectInfo(UniqueId id,
+ RetainedObjectInfo* info) {
+ retainer_infos_.Add(ObjectGroupRetainerInfo(id, info));
}
@@ -704,23 +902,45 @@ void GlobalHandles::AddImplicitReferences(HeapObject** parent,
}
#endif
if (length == 0) return;
- implicit_ref_groups_.Add(ImplicitRefGroup::New(parent, children, length));
+ ImplicitRefGroup* group = new ImplicitRefGroup(parent, length);
+ for (size_t i = 0; i < length; ++i)
+ group->children[i] = children[i];
+ implicit_ref_groups_.Add(group);
+}
+
+
+void GlobalHandles::SetReferenceFromGroup(UniqueId id, Object** child) {
+ ASSERT(!Node::FromLocation(child)->is_independent());
+ implicit_ref_connections_.Add(ObjectGroupConnection(id, child));
+}
+
+
+void GlobalHandles::SetReference(HeapObject** parent, Object** child) {
+ ASSERT(!Node::FromLocation(child)->is_independent());
+ ImplicitRefGroup* group = new ImplicitRefGroup(parent, 1);
+ group->children[0] = child;
+ implicit_ref_groups_.Add(group);
}
void GlobalHandles::RemoveObjectGroups() {
- for (int i = 0; i < object_groups_.length(); i++) {
- object_groups_.at(i)->Dispose();
- }
+ for (int i = 0; i < object_groups_.length(); i++)
+ delete object_groups_.at(i);
object_groups_.Clear();
+ for (int i = 0; i < retainer_infos_.length(); ++i)
+ retainer_infos_[i].info->Dispose();
+ retainer_infos_.Clear();
+ object_group_connections_.Clear();
+ object_group_connections_.Initialize(kObjectGroupConnectionsCapacity);
}
void GlobalHandles::RemoveImplicitRefGroups() {
for (int i = 0; i < implicit_ref_groups_.length(); i++) {
- implicit_ref_groups_.at(i)->Dispose();
+ delete implicit_ref_groups_.at(i);
}
implicit_ref_groups_.Clear();
+ implicit_ref_connections_.Clear();
}
@@ -729,4 +949,172 @@ void GlobalHandles::TearDown() {
}
+void GlobalHandles::ComputeObjectGroupsAndImplicitReferences() {
+ if (object_group_connections_.length() == 0) {
+ for (int i = 0; i < retainer_infos_.length(); ++i)
+ retainer_infos_[i].info->Dispose();
+ retainer_infos_.Clear();
+ implicit_ref_connections_.Clear();
+ return;
+ }
+
+ object_group_connections_.Sort();
+ retainer_infos_.Sort();
+ implicit_ref_connections_.Sort();
+
+ int info_index = 0; // For iterating retainer_infos_.
+ UniqueId current_group_id(0);
+ int current_group_start = 0;
+
+ int current_implicit_refs_start = 0;
+ int current_implicit_refs_end = 0;
+ for (int i = 0; i <= object_group_connections_.length(); ++i) {
+ if (i == 0)
+ current_group_id = object_group_connections_[i].id;
+ if (i == object_group_connections_.length() ||
+ current_group_id != object_group_connections_[i].id) {
+ // Group detected: objects in indices [current_group_start, i[.
+
+ // Find out which implicit references are related to this group. (We want
+ // to ignore object groups which only have 1 object, but that object is
+ // needed as a representative object for the implicit refrerence group.)
+ while (current_implicit_refs_start < implicit_ref_connections_.length() &&
+ implicit_ref_connections_[current_implicit_refs_start].id <
+ current_group_id)
+ ++current_implicit_refs_start;
+ current_implicit_refs_end = current_implicit_refs_start;
+ while (current_implicit_refs_end < implicit_ref_connections_.length() &&
+ implicit_ref_connections_[current_implicit_refs_end].id ==
+ current_group_id)
+ ++current_implicit_refs_end;
+
+ if (current_implicit_refs_end > current_implicit_refs_start) {
+ // Find a representative object for the implicit references.
+ HeapObject** representative = NULL;
+ for (int j = current_group_start; j < i; ++j) {
+ Object** object = object_group_connections_[j].object;
+ if ((*object)->IsHeapObject()) {
+ representative = reinterpret_cast<HeapObject**>(object);
+ break;
+ }
+ }
+ if (representative) {
+ ImplicitRefGroup* group = new ImplicitRefGroup(
+ representative,
+ current_implicit_refs_end - current_implicit_refs_start);
+ for (int j = current_implicit_refs_start;
+ j < current_implicit_refs_end;
+ ++j) {
+ group->children[j - current_implicit_refs_start] =
+ implicit_ref_connections_[j].object;
+ }
+ implicit_ref_groups_.Add(group);
+ }
+ current_implicit_refs_start = current_implicit_refs_end;
+ }
+
+ // Find a RetainedObjectInfo for the group.
+ RetainedObjectInfo* info = NULL;
+ while (info_index < retainer_infos_.length() &&
+ retainer_infos_[info_index].id < current_group_id) {
+ retainer_infos_[info_index].info->Dispose();
+ ++info_index;
+ }
+ if (info_index < retainer_infos_.length() &&
+ retainer_infos_[info_index].id == current_group_id) {
+ // This object group has an associated ObjectGroupRetainerInfo.
+ info = retainer_infos_[info_index].info;
+ ++info_index;
+ }
+
+ // Ignore groups which only contain one object.
+ if (i > current_group_start + 1) {
+ ObjectGroup* group = new ObjectGroup(i - current_group_start);
+ for (int j = current_group_start; j < i; ++j) {
+ group->objects[j - current_group_start] =
+ object_group_connections_[j].object;
+ }
+ group->info = info;
+ object_groups_.Add(group);
+ } else if (info) {
+ info->Dispose();
+ }
+
+ if (i < object_group_connections_.length()) {
+ current_group_id = object_group_connections_[i].id;
+ current_group_start = i;
+ }
+ }
+ }
+ object_group_connections_.Clear();
+ object_group_connections_.Initialize(kObjectGroupConnectionsCapacity);
+ retainer_infos_.Clear();
+ implicit_ref_connections_.Clear();
+}
+
+
+EternalHandles::EternalHandles() : size_(0) {
+ for (unsigned i = 0; i < ARRAY_SIZE(singleton_handles_); i++) {
+ singleton_handles_[i] = kInvalidIndex;
+ }
+}
+
+
+EternalHandles::~EternalHandles() {
+ for (int i = 0; i < blocks_.length(); i++) delete[] blocks_[i];
+}
+
+
+void EternalHandles::IterateAllRoots(ObjectVisitor* visitor) {
+ int limit = size_;
+ for (int i = 0; i < blocks_.length(); i++) {
+ ASSERT(limit > 0);
+ Object** block = blocks_[i];
+ visitor->VisitPointers(block, block + Min(limit, kSize));
+ limit -= kSize;
+ }
+}
+
+
+void EternalHandles::IterateNewSpaceRoots(ObjectVisitor* visitor) {
+ for (int i = 0; i < new_space_indices_.length(); i++) {
+ visitor->VisitPointer(GetLocation(new_space_indices_[i]));
+ }
+}
+
+
+void EternalHandles::PostGarbageCollectionProcessing(Heap* heap) {
+ int last = 0;
+ for (int i = 0; i < new_space_indices_.length(); i++) {
+ int index = new_space_indices_[i];
+ if (heap->InNewSpace(*GetLocation(index))) {
+ new_space_indices_[last++] = index;
+ }
+ }
+ new_space_indices_.Rewind(last);
+}
+
+
+void EternalHandles::Create(Isolate* isolate, Object* object, int* index) {
+ ASSERT_EQ(kInvalidIndex, *index);
+ if (object == NULL) return;
+ ASSERT_NE(isolate->heap()->the_hole_value(), object);
+ int block = size_ >> kShift;
+ int offset = size_ & kMask;
+ // need to resize
+ if (offset == 0) {
+ Object** next_block = new Object*[kSize];
+ Object* the_hole = isolate->heap()->the_hole_value();
+ MemsetPointer(next_block, the_hole, kSize);
+ blocks_.Add(next_block);
+ }
+ ASSERT_EQ(isolate->heap()->the_hole_value(), blocks_[block][offset]);
+ blocks_[block][offset] = object;
+ if (isolate->heap()->InNewSpace(object)) {
+ new_space_indices_.Add(size_);
+ }
+ *index = size_++;
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 866317ee1..4b46aac05 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -28,87 +28,98 @@
#ifndef V8_GLOBAL_HANDLES_H_
#define V8_GLOBAL_HANDLES_H_
+#include "../include/v8.h"
#include "../include/v8-profiler.h"
+#include "handles.h"
#include "list.h"
+#include "v8utils.h"
namespace v8 {
namespace internal {
+class GCTracer;
+class HeapStats;
+class ObjectVisitor;
+
// Structure for tracking global handles.
// A single list keeps all the allocated global handles.
// Destroyed handles stay in the list but is added to the free list.
// At GC the destroyed global handles are removed from the free list
// and deallocated.
+// Data structures for tracking object groups and implicit references.
+
// An object group is treated like a single JS object: if one of object in
// the group is alive, all objects in the same group are considered alive.
// An object group is used to simulate object relationship in a DOM tree.
-class ObjectGroup {
- public:
- static ObjectGroup* New(Object*** handles,
- size_t length,
- v8::RetainedObjectInfo* info) {
- ASSERT(length > 0);
- ObjectGroup* group = reinterpret_cast<ObjectGroup*>(
- malloc(OFFSET_OF(ObjectGroup, objects_[length])));
- group->length_ = length;
- group->info_ = info;
- CopyWords(group->objects_, handles, static_cast<int>(length));
- return group;
- }
- void Dispose() {
- if (info_ != NULL) info_->Dispose();
- free(this);
- }
-
- size_t length_;
- v8::RetainedObjectInfo* info_;
- Object** objects_[1]; // Variable sized array.
+// An implicit references group consists of two parts: a parent object and a
+// list of children objects. If the parent is alive, all the children are alive
+// too.
- private:
- void* operator new(size_t size);
- void operator delete(void* p);
+struct ObjectGroup {
+ explicit ObjectGroup(size_t length)
+ : info(NULL), length(length) {
+ ASSERT(length > 0);
+ objects = new Object**[length];
+ }
~ObjectGroup();
- DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectGroup);
+
+ v8::RetainedObjectInfo* info;
+ Object*** objects;
+ size_t length;
};
-// An implicit references group consists of two parts: a parent object and
-// a list of children objects. If the parent is alive, all the children
-// are alive too.
-class ImplicitRefGroup {
- public:
- static ImplicitRefGroup* New(HeapObject** parent,
- Object*** children,
- size_t length) {
+struct ImplicitRefGroup {
+ ImplicitRefGroup(HeapObject** parent, size_t length)
+ : parent(parent), length(length) {
ASSERT(length > 0);
- ImplicitRefGroup* group = reinterpret_cast<ImplicitRefGroup*>(
- malloc(OFFSET_OF(ImplicitRefGroup, children_[length])));
- group->parent_ = parent;
- group->length_ = length;
- CopyWords(group->children_, children, static_cast<int>(length));
- return group;
+ children = new Object**[length];
}
+ ~ImplicitRefGroup();
+
+ HeapObject** parent;
+ Object*** children;
+ size_t length;
+};
- void Dispose() {
- free(this);
+
+// For internal bookkeeping.
+struct ObjectGroupConnection {
+ ObjectGroupConnection(UniqueId id, Object** object)
+ : id(id), object(object) {}
+
+ bool operator==(const ObjectGroupConnection& other) const {
+ return id == other.id;
}
- HeapObject** parent_;
- size_t length_;
- Object** children_[1]; // Variable sized array.
+ bool operator<(const ObjectGroupConnection& other) const {
+ return id < other.id;
+ }
- private:
- void* operator new(size_t size);
- void operator delete(void* p);
- ~ImplicitRefGroup();
- DISALLOW_IMPLICIT_CONSTRUCTORS(ImplicitRefGroup);
+ UniqueId id;
+ Object** object;
};
-typedef void (*WeakReferenceGuest)(Object* object, void* parameter);
+struct ObjectGroupRetainerInfo {
+ ObjectGroupRetainerInfo(UniqueId id, RetainedObjectInfo* info)
+ : id(id), info(info) {}
+
+ bool operator==(const ObjectGroupRetainerInfo& other) const {
+ return id == other.id;
+ }
+
+ bool operator<(const ObjectGroupRetainerInfo& other) const {
+ return id < other.id;
+ }
+
+ UniqueId id;
+ RetainedObjectInfo* info;
+};
+
class GlobalHandles {
public:
@@ -117,8 +128,14 @@ class GlobalHandles {
// Creates a new global handle that is alive until Destroy is called.
Handle<Object> Create(Object* value);
+ // Copy a global handle
+ static Handle<Object> CopyGlobal(Object** location);
+
// Destroy a global handle.
- void Destroy(Object** location);
+ static void Destroy(Object** location);
+
+ typedef WeakCallbackData<v8::Value, void>::Callback WeakCallback;
+ typedef WeakReferenceCallbacks<v8::Value, void>::Revivable RevivableCallback;
// Make the global handle weak and set the callback parameter for the
// handle. When the garbage collector recognizes that only weak global
@@ -126,34 +143,39 @@ class GlobalHandles {
// function is invoked (for each handle) with the handle and corresponding
// parameter as arguments. Note: cleared means set to Smi::FromInt(0). The
// reason is that Smi::FromInt(0) does not change during garage collection.
- void MakeWeak(Object** location,
- void* parameter,
- WeakReferenceCallback callback);
+ static void MakeWeak(Object** location,
+ void* parameter,
+ WeakCallback weak_callback,
+ RevivableCallback revivable_callback);
+
+ static inline void MakeWeak(Object** location,
+ void* parameter,
+ RevivableCallback revivable_callback) {
+ MakeWeak(location, parameter, NULL, revivable_callback);
+ }
- static void SetWrapperClassId(Object** location, uint16_t class_id);
- static uint16_t GetWrapperClassId(Object** location);
+ void RecordStats(HeapStats* stats);
// Returns the current number of weak handles.
- int NumberOfWeakHandles() { return number_of_weak_handles_; }
-
- void RecordStats(HeapStats* stats);
+ int NumberOfWeakHandles();
// Returns the current number of weak handles to global objects.
// These handles are also included in NumberOfWeakHandles().
- int NumberOfGlobalObjectWeakHandles() {
- return number_of_global_object_weak_handles_;
- }
+ int NumberOfGlobalObjectWeakHandles();
// Returns the current number of handles to global objects.
- int NumberOfGlobalHandles() {
+ int global_handles_count() const {
return number_of_global_handles_;
}
// Clear the weakness of a global handle.
- void ClearWeakness(Object** location);
+ static void ClearWeakness(Object** location);
// Clear the weakness of a global handle.
- void MarkIndependent(Object** location);
+ static void MarkIndependent(Object** location);
+
+ // Mark the reference to this object externaly unreachable.
+ static void MarkPartiallyDependent(Object** location);
static bool IsIndependent(Object** location);
@@ -165,7 +187,8 @@ class GlobalHandles {
// Process pending weak handles.
// Returns true if next major GC is likely to collect more garbage.
- bool PostGarbageCollectionProcessing(GarbageCollector collector);
+ bool PostGarbageCollectionProcessing(GarbageCollector collector,
+ GCTracer* tracer);
// Iterates over all strong handles.
void IterateStrongRoots(ObjectVisitor* v);
@@ -176,13 +199,13 @@ class GlobalHandles {
// Iterates over all handles that have embedder-assigned class ID.
void IterateAllRootsWithClassIds(ObjectVisitor* v);
+ // Iterates over all handles in the new space that have embedder-assigned
+ // class ID.
+ void IterateAllRootsInNewSpaceWithClassIds(ObjectVisitor* v);
+
// Iterates over all weak roots in heap.
void IterateWeakRoots(ObjectVisitor* v);
- // Iterates over weak roots that are bound to a given callback.
- void IterateWeakRoots(WeakReferenceGuest f,
- WeakReferenceCallback callback);
-
// Find all weak handles satisfying the callback predicate, mark
// them as pending.
void IdentifyWeakHandles(WeakSlotCallback f);
@@ -195,20 +218,36 @@ class GlobalHandles {
// Iterates over strong and dependent handles. See the node above.
void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v);
- // Finds weak independent handles satisfying the callback predicate
- // and marks them as pending. See the note above.
+ // Finds weak independent or partially independent handles satisfying
+ // the callback predicate and marks them as pending. See the note above.
void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f);
- // Iterates over weak independent handles. See the note above.
+ // Iterates over weak independent or partially independent handles.
+ // See the note above.
void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v);
+ // Iterate over objects in object groups that have at least one object
+ // which requires visiting. The callback has to return true if objects
+ // can be skipped and false otherwise.
+ bool IterateObjectGroups(ObjectVisitor* v, WeakSlotCallbackWithHeap can_skip);
+
// Add an object group.
// Should be only used in GC callback function before a collection.
- // All groups are destroyed after a mark-compact collection.
+ // All groups are destroyed after a garbage collection.
void AddObjectGroup(Object*** handles,
size_t length,
v8::RetainedObjectInfo* info);
+ // Associates handle with the object group represented by id.
+ // Should be only used in GC callback function before a collection.
+ // All groups are destroyed after a garbage collection.
+ void SetObjectGroupId(Object** handle, UniqueId id);
+
+ // Set RetainedObjectInfo for an object group. Should not be called more than
+ // once for a group. Should not be called for a group which contains no
+ // handles.
+ void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
+
// Add an implicit references' group.
// Should be only used in GC callback function before a collection.
// All groups are destroyed after a mark-compact collection.
@@ -216,11 +255,23 @@ class GlobalHandles {
Object*** children,
size_t length);
- // Returns the object groups.
- List<ObjectGroup*>* object_groups() { return &object_groups_; }
+ // Adds an implicit reference from a group to an object. Should be only used
+ // in GC callback function before a collection. All implicit references are
+ // destroyed after a mark-compact collection.
+ void SetReferenceFromGroup(UniqueId id, Object** child);
+
+ // Adds an implicit reference from a parent object to a child object. Should
+ // be only used in GC callback function before a collection. All implicit
+ // references are destroyed after a mark-compact collection.
+ void SetReference(HeapObject** parent, Object** child);
+
+ List<ObjectGroup*>* object_groups() {
+ ComputeObjectGroupsAndImplicitReferences();
+ return &object_groups_;
+ }
- // Returns the implicit references' groups.
List<ImplicitRefGroup*>* implicit_ref_groups() {
+ ComputeObjectGroupsAndImplicitReferences();
return &implicit_ref_groups_;
}
@@ -241,6 +292,15 @@ class GlobalHandles {
private:
explicit GlobalHandles(Isolate* isolate);
+ // Migrates data from the internal representation (object_group_connections_,
+ // retainer_infos_ and implicit_ref_connections_) to the public and more
+ // efficient representation (object_groups_ and implicit_ref_groups_).
+ void ComputeObjectGroupsAndImplicitReferences();
+
+ // v8::internal::List is inefficient even for small number of elements, if we
+ // don't assign any initial capacity.
+ static const int kObjectGroupConnectionsCapacity = 20;
+
// Internal node structures.
class Node;
class NodeBlock;
@@ -248,14 +308,6 @@ class GlobalHandles {
Isolate* isolate_;
- // Field always containing the number of weak and near-death handles.
- int number_of_weak_handles_;
-
- // Field always containing the number of weak and near-death handles
- // to global objects. These objects are also included in
- // number_of_weak_handles_.
- int number_of_global_object_weak_handles_;
-
// Field always containing the number of handles to global objects.
int number_of_global_handles_;
@@ -274,15 +326,92 @@ class GlobalHandles {
int post_gc_processing_count_;
+ // Object groups and implicit references, public and more efficient
+ // representation.
List<ObjectGroup*> object_groups_;
List<ImplicitRefGroup*> implicit_ref_groups_;
+ // Object groups and implicit references, temporary representation while
+ // constructing the groups.
+ List<ObjectGroupConnection> object_group_connections_;
+ List<ObjectGroupRetainerInfo> retainer_infos_;
+ List<ObjectGroupConnection> implicit_ref_connections_;
+
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(GlobalHandles);
};
+class EternalHandles {
+ public:
+ enum SingletonHandle {
+ I18N_TEMPLATE_ONE,
+ I18N_TEMPLATE_TWO,
+
+ NUMBER_OF_SINGLETON_HANDLES
+ };
+
+ EternalHandles();
+ ~EternalHandles();
+
+ int NumberOfHandles() { return size_; }
+
+ // Create an EternalHandle, overwriting the index.
+ void Create(Isolate* isolate, Object* object, int* index);
+
+ // Grab the handle for an existing EternalHandle.
+ inline Handle<Object> Get(int index) {
+ return Handle<Object>(GetLocation(index));
+ }
+
+ // Grab the handle for an existing SingletonHandle.
+ inline Handle<Object> GetSingleton(SingletonHandle singleton) {
+ ASSERT(Exists(singleton));
+ return Get(singleton_handles_[singleton]);
+ }
+
+ // Checks whether a SingletonHandle has been assigned.
+ inline bool Exists(SingletonHandle singleton) {
+ return singleton_handles_[singleton] != kInvalidIndex;
+ }
+
+ // Assign a SingletonHandle to an empty slot and returns the handle.
+ Handle<Object> CreateSingleton(Isolate* isolate,
+ Object* object,
+ SingletonHandle singleton) {
+ Create(isolate, object, &singleton_handles_[singleton]);
+ return Get(singleton_handles_[singleton]);
+ }
+
+ // Iterates over all handles.
+ void IterateAllRoots(ObjectVisitor* visitor);
+ // Iterates over all handles which might be in new space.
+ void IterateNewSpaceRoots(ObjectVisitor* visitor);
+ // Rebuilds new space list.
+ void PostGarbageCollectionProcessing(Heap* heap);
+
+ private:
+ static const int kInvalidIndex = -1;
+ static const int kShift = 8;
+ static const int kSize = 1 << kShift;
+ static const int kMask = 0xff;
+
+ // Gets the slot for an index
+ inline Object** GetLocation(int index) {
+ ASSERT(index >= 0 && index < size_);
+ return &blocks_[index >> kShift][index & kMask];
+ }
+
+ int size_;
+ List<Object**> blocks_;
+ List<int> new_space_indices_;
+ int singleton_handles_[NUMBER_OF_SINGLETON_HANDLES];
+
+ DISALLOW_COPY_AND_ASSIGN(EternalHandles);
+};
+
+
} } // namespace v8::internal
#endif // V8_GLOBAL_HANDLES_H_
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index babffbf65..3456030b7 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -28,37 +28,22 @@
#ifndef V8_GLOBALS_H_
#define V8_GLOBALS_H_
-// Define V8_INFINITY
-#define V8_INFINITY INFINITY
-
-// GCC specific stuff
-#ifdef __GNUC__
-
-#define __GNUC_VERSION_FOR_INFTY__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
+#include "../include/v8stdint.h"
// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
// warning flag and certain versions of GCC due to a bug:
// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
// For now, we use the more involved template-based version from <limits>, but
// only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
-// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
-#if __GNUC_VERSION_FOR_INFTY__ >= 29600 && __GNUC_VERSION_FOR_INFTY__ < 40100
-#include <limits>
-#undef V8_INFINITY
-#define V8_INFINITY std::numeric_limits<double>::infinity()
-#endif
-#undef __GNUC_VERSION_FOR_INFTY__
-
-#endif // __GNUC__
-
-#ifdef _MSC_VER
-#undef V8_INFINITY
-#define V8_INFINITY HUGE_VAL
+#if V8_CC_GNU && V8_GNUC_PREREQ(2, 96, 0) && !V8_GNUC_PREREQ(4, 1, 0)
+# include <limits> // NOLINT
+# define V8_INFINITY std::numeric_limits<double>::infinity()
+#elif V8_CC_MSVC
+# define V8_INFINITY HUGE_VAL
+#else
+# define V8_INFINITY INFINITY
#endif
-
-#include "../include/v8stdint.h"
-
namespace v8 {
namespace internal {
@@ -67,9 +52,21 @@ namespace internal {
// http://www.agner.org/optimize/calling_conventions.pdf
// or with gcc, run: "echo | gcc -E -dM -"
#if defined(_M_X64) || defined(__x86_64__)
+#if defined(__native_client__)
+// For Native Client builds of V8, use V8_TARGET_ARCH_ARM, so that V8
+// generates ARM machine code, together with a portable ARM simulator
+// compiled for the host architecture in question.
+//
+// Since Native Client is ILP-32 on all architectures we use
+// V8_HOST_ARCH_IA32 on both 32- and 64-bit x86.
+#define V8_HOST_ARCH_IA32 1
+#define V8_HOST_ARCH_32_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
+#else
#define V8_HOST_ARCH_X64 1
#define V8_HOST_ARCH_64_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
+#endif // __native_client__
#elif defined(_M_IX86) || defined(__i386__)
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
@@ -77,12 +74,6 @@ namespace internal {
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
-// Some CPU-OS combinations allow unaligned access on ARM. We assume
-// that unaligned accesses are not allowed unless the build system
-// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
-#if CAN_USE_UNALIGNED_ACCESSES
-#define V8_HOST_CAN_READ_UNALIGNED 1
-#endif
#elif defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
@@ -90,11 +81,21 @@ namespace internal {
#error Host architecture was not detected as supported by v8
#endif
+#if defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7R__) || \
+ defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV7_INSTRUCTIONS 1
+# ifndef CAN_USE_VFP3_INSTRUCTIONS
+# define CAN_USE_VFP3_INSTRUCTIONS
+# endif
+#endif
+
+
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
-#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_IA32) && \
- !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
+ !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
@@ -109,18 +110,16 @@ namespace internal {
#endif
// Check for supported combinations of host and target architectures.
-#if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32 && !V8_HOST_ARCH_IA32
#error Target architecture ia32 is only supported on ia32 host
#endif
-#if defined(V8_TARGET_ARCH_X64) && !defined(V8_HOST_ARCH_X64)
+#if V8_TARGET_ARCH_X64 && !V8_HOST_ARCH_X64
#error Target architecture x64 is only supported on x64 host
#endif
-#if (defined(V8_TARGET_ARCH_ARM) && \
- !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_ARM)))
+#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
#error Target architecture arm is only supported on arm and ia32 host
#endif
-#if (defined(V8_TARGET_ARCH_MIPS) && \
- !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_MIPS)))
+#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
#error Target architecture mips is only supported on mips and ia32 host
#endif
@@ -128,14 +127,27 @@ namespace internal {
// Setting USE_SIMULATOR explicitly from the build script will force
// the use of a simulated environment.
#if !defined(USE_SIMULATOR)
-#if (defined(V8_TARGET_ARCH_ARM) && !defined(V8_HOST_ARCH_ARM))
+#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
#define USE_SIMULATOR 1
#endif
-#if (defined(V8_TARGET_ARCH_MIPS) && !defined(V8_HOST_ARCH_MIPS))
+#if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
#define USE_SIMULATOR 1
#endif
#endif
+// Determine architecture endiannes (we only support little-endian).
+#if V8_TARGET_ARCH_IA32
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_X64
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_ARM
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_MIPS
+#define V8_TARGET_LITTLE_ENDIAN 1
+#else
+#error Unknown target architecture endiannes
+#endif
+
// Support for alternative bool type. This is only enabled if the code is
// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
// For instance, 'bool b = "false";' results in b == true! This is a hidden
@@ -159,27 +171,32 @@ typedef byte* Address;
// Define our own macros for writing 64-bit constants. This is less fragile
// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
// works on compilers that don't have it (like MSVC).
-#if V8_HOST_ARCH_64_BIT
-#if defined(_MSC_VER)
-#define V8_UINT64_C(x) (x ## UI64)
-#define V8_INT64_C(x) (x ## I64)
-#define V8_INTPTR_C(x) (x ## I64)
-#define V8_PTR_PREFIX "ll"
-#elif defined(__MINGW64__)
-#define V8_UINT64_C(x) (x ## ULL)
-#define V8_INT64_C(x) (x ## LL)
-#define V8_INTPTR_C(x) (x ## LL)
-#define V8_PTR_PREFIX "I64"
+#if V8_CC_MSVC
+# define V8_UINT64_C(x) (x ## UI64)
+# define V8_INT64_C(x) (x ## I64)
+# if V8_HOST_ARCH_64_BIT
+# define V8_INTPTR_C(x) (x ## I64)
+# define V8_PTR_PREFIX "ll"
+# else
+# define V8_INTPTR_C(x) (x)
+# define V8_PTR_PREFIX ""
+# endif // V8_HOST_ARCH_64_BIT
+#elif V8_CC_MINGW64
+# define V8_UINT64_C(x) (x ## ULL)
+# define V8_INT64_C(x) (x ## LL)
+# define V8_INTPTR_C(x) (x ## LL)
+# define V8_PTR_PREFIX "I64"
+#elif V8_HOST_ARCH_64_BIT
+# define V8_UINT64_C(x) (x ## UL)
+# define V8_INT64_C(x) (x ## L)
+# define V8_INTPTR_C(x) (x ## L)
+# define V8_PTR_PREFIX "l"
#else
-#define V8_UINT64_C(x) (x ## UL)
-#define V8_INT64_C(x) (x ## L)
-#define V8_INTPTR_C(x) (x ## L)
-#define V8_PTR_PREFIX "l"
+# define V8_UINT64_C(x) (x ## ULL)
+# define V8_INT64_C(x) (x ## LL)
+# define V8_INTPTR_C(x) (x)
+# define V8_PTR_PREFIX ""
#endif
-#else // V8_HOST_ARCH_64_BIT
-#define V8_INTPTR_C(x) (x)
-#define V8_PTR_PREFIX ""
-#endif // V8_HOST_ARCH_64_BIT
// The following macro works on both 32 and 64-bit platforms.
// Usage: instead of writing 0x1234567890123456
@@ -212,12 +229,17 @@ const int kMinInt = -kMaxInt - 1;
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
-const int kCharSize = sizeof(char); // NOLINT
-const int kShortSize = sizeof(short); // NOLINT
-const int kIntSize = sizeof(int); // NOLINT
-const int kDoubleSize = sizeof(double); // NOLINT
-const int kIntptrSize = sizeof(intptr_t); // NOLINT
-const int kPointerSize = sizeof(void*); // NOLINT
+const int kCharSize = sizeof(char); // NOLINT
+const int kShortSize = sizeof(short); // NOLINT
+const int kIntSize = sizeof(int); // NOLINT
+const int kInt32Size = sizeof(int32_t); // NOLINT
+const int kInt64Size = sizeof(int64_t); // NOLINT
+const int kDoubleSize = sizeof(double); // NOLINT
+const int kIntptrSize = sizeof(intptr_t); // NOLINT
+const int kPointerSize = sizeof(void*); // NOLINT
+const int kRegisterSize = kPointerSize;
+const int kPCOnStackSize = kRegisterSize;
+const int kFPOnStackSize = kRegisterSize;
const int kDoubleSizeLog2 = 3;
@@ -228,10 +250,12 @@ const int kRandomStateSize = 2 * kIntSize;
const int kPointerSizeLog2 = 3;
const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
+const bool kIs64BitArch = true;
#else
const int kPointerSizeLog2 = 2;
const intptr_t kIntptrSignBit = 0x80000000;
const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
+const bool kIs64BitArch = false;
#endif
const int kBitsPerByte = 8;
@@ -253,15 +277,17 @@ const int kBinary32ExponentShift = 23;
// other bits set.
const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
-// ASCII/UTF-16 constants
+// Latin1/UTF-16 constants
// Code-point values in Unicode 4.0 are 21 bits wide.
// Code units in UTF-16 are 16 bits wide.
typedef uint16_t uc16;
typedef int32_t uc32;
-const int kASCIISize = kCharSize;
+const int kOneByteSize = kCharSize;
const int kUC16Size = sizeof(uc16); // NOLINT
-const uc32 kMaxAsciiCharCode = 0x7f;
-const uint32_t kMaxAsciiCharCodeU = 0x7fu;
+
+
+// Round up n to be a multiple of sz, where sz is a power of 2.
+#define ROUND_UP(n, sz) (((n) + ((sz) - 1)) & ~((sz) - 1))
// The expression OFFSET_OF(type, field) computes the byte-offset
@@ -304,9 +330,9 @@ F FUNCTION_CAST(Address addr) {
// A macro to disallow the evil copy constructor and operator= functions
// This should be used in the private: declarations for a class
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&); \
- void operator=(const TypeName&)
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&) V8_DELETE; \
+ void operator=(const TypeName&) V8_DELETE
// A macro to disallow all the implicit constructors, namely the
@@ -315,42 +341,24 @@ F FUNCTION_CAST(Address addr) {
// This should be used in the private: declarations for a class
// that wants to prevent anyone from instantiating it. This is
// especially useful for classes containing only static methods.
-#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
- TypeName(); \
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName() V8_DELETE; \
DISALLOW_COPY_AND_ASSIGN(TypeName)
-// Define used for helping GCC to make better inlining. Don't bother for debug
-// builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
-// errors in debug build.
-#if defined(__GNUC__) && !defined(DEBUG)
-#if (__GNUC__ >= 4)
-#define INLINE(header) inline header __attribute__((always_inline))
-#define NO_INLINE(header) header __attribute__((noinline))
-#else
-#define INLINE(header) inline __attribute__((always_inline)) header
-#define NO_INLINE(header) __attribute__((noinline)) header
-#endif
-#elif defined(_MSC_VER) && !defined(DEBUG)
-#define INLINE(header) __forceinline header
-#define NO_INLINE(header) header
-#else
-#define INLINE(header) inline header
-#define NO_INLINE(header) header
-#endif
+// Newly written code should use V8_INLINE and V8_NOINLINE directly.
+#define INLINE(declarator) V8_INLINE declarator
+#define NO_INLINE(declarator) V8_NOINLINE declarator
-#if defined(__GNUC__) && __GNUC__ >= 4
-#define MUST_USE_RESULT __attribute__ ((warn_unused_result))
-#else
-#define MUST_USE_RESULT
-#endif
+// Newly written code should use V8_WARN_UNUSED_RESULT.
+#define MUST_USE_RESULT V8_WARN_UNUSED_RESULT
// Define DISABLE_ASAN macros.
#if defined(__has_feature)
#if __has_feature(address_sanitizer)
-#define DISABLE_ASAN __attribute__((no_address_safety_analysis))
+#define DISABLE_ASAN __attribute__((no_sanitize_address))
#endif
#endif
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 130798647..ec69c3fdb 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -32,54 +32,84 @@
#include "api.h"
#include "apiutils.h"
#include "handles.h"
+#include "heap.h"
#include "isolate.h"
namespace v8 {
namespace internal {
-inline Isolate* GetIsolateForHandle(Object* obj) {
- return Isolate::Current();
-}
-
-inline Isolate* GetIsolateForHandle(HeapObject* obj) {
- return obj->GetIsolate();
-}
-
template<typename T>
Handle<T>::Handle(T* obj) {
ASSERT(!obj->IsFailure());
- location_ = HandleScope::CreateHandle(obj, GetIsolateForHandle(obj));
+ location_ = HandleScope::CreateHandle(obj->GetIsolate(), obj);
}
template<typename T>
Handle<T>::Handle(T* obj, Isolate* isolate) {
ASSERT(!obj->IsFailure());
- location_ = HandleScope::CreateHandle(obj, isolate);
+ location_ = HandleScope::CreateHandle(isolate, obj);
+}
+
+
+template <typename T>
+inline bool Handle<T>::is_identical_to(const Handle<T> other) const {
+ ASSERT(location_ == NULL || !(*location_)->IsFailure());
+ if (location_ == other.location_) return true;
+ if (location_ == NULL || other.location_ == NULL) return false;
+ // Dereferencing deferred handles to check object equality is safe.
+ SLOW_ASSERT(IsDereferenceAllowed(NO_DEFERRED_CHECK) &&
+ other.IsDereferenceAllowed(NO_DEFERRED_CHECK));
+ return *location_ == *other.location_;
}
template <typename T>
inline T* Handle<T>::operator*() const {
- ASSERT(location_ != NULL);
- ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
+ ASSERT(location_ != NULL && !(*location_)->IsFailure());
+ SLOW_ASSERT(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
return *BitCast<T**>(location_);
}
+template <typename T>
+inline T** Handle<T>::location() const {
+ ASSERT(location_ == NULL || !(*location_)->IsFailure());
+ SLOW_ASSERT(location_ == NULL ||
+ IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
+ return location_;
+}
-HandleScope::HandleScope() {
- Isolate* isolate = Isolate::Current();
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
- isolate_ = isolate;
- prev_next_ = current->next;
- prev_limit_ = current->limit;
- current->level++;
+#ifdef DEBUG
+template <typename T>
+bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
+ ASSERT(location_ != NULL);
+ Object* object = *BitCast<T**>(location_);
+ if (object->IsSmi()) return true;
+ HeapObject* heap_object = HeapObject::cast(object);
+ Heap* heap = heap_object->GetHeap();
+ Object** handle = reinterpret_cast<Object**>(location_);
+ Object** roots_array_start = heap->roots_array_start();
+ if (roots_array_start <= handle &&
+ handle < roots_array_start + Heap::kStrongRootListLength &&
+ heap->RootCanBeTreatedAsConstant(
+ static_cast<Heap::RootListIndex>(handle - roots_array_start))) {
+ return true;
+ }
+ if (!AllowHandleDereference::IsAllowed()) return false;
+ if (mode == INCLUDE_DEFERRED_CHECK &&
+ !AllowDeferredHandleDereference::IsAllowed()) {
+ // Accessing maps and internalized strings is safe.
+ if (heap_object->IsMap()) return true;
+ if (heap_object->IsInternalizedString()) return true;
+ return !heap->isolate()->IsDeferredHandle(handle);
+ }
+ return true;
}
+#endif
+
HandleScope::HandleScope(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
v8::ImplementationUtilities::HandleScopeData* current =
isolate->handle_scope_data();
isolate_ = isolate;
@@ -90,35 +120,41 @@ HandleScope::HandleScope(Isolate* isolate) {
HandleScope::~HandleScope() {
- CloseScope();
+ CloseScope(isolate_, prev_next_, prev_limit_);
}
-void HandleScope::CloseScope() {
- ASSERT(isolate_ == Isolate::Current());
+
+void HandleScope::CloseScope(Isolate* isolate,
+ Object** prev_next,
+ Object** prev_limit) {
v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
- current->next = prev_next_;
+ isolate->handle_scope_data();
+
+ std::swap(current->next, prev_next);
current->level--;
- if (current->limit != prev_limit_) {
- current->limit = prev_limit_;
- DeleteExtensions(isolate_);
- }
-#ifdef DEBUG
- ZapRange(prev_next_, prev_limit_);
+ if (current->limit != prev_limit) {
+ current->limit = prev_limit;
+ DeleteExtensions(isolate);
+#ifdef ENABLE_HANDLE_ZAPPING
+ ZapRange(current->next, prev_limit);
+ } else {
+ ZapRange(current->next, prev_next);
#endif
+ }
}
template <typename T>
Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
- T* value = *handle_value;
- // Throw away all handles in the current scope.
- CloseScope();
v8::ImplementationUtilities::HandleScopeData* current =
isolate_->handle_scope_data();
+
+ T* value = *handle_value;
+ // Throw away all handles in the current scope.
+ CloseScope(isolate_, prev_next_, prev_limit_);
// Allocate one handle in the parent scope.
ASSERT(current->level > 0);
- Handle<T> result(CreateHandle<T>(value, isolate_));
+ Handle<T> result(CreateHandle<T>(isolate_, value));
// Reinitialize the current scope (so that it's ready
// to be used or closed again).
prev_next_ = current->next;
@@ -129,13 +165,13 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
template <typename T>
-T** HandleScope::CreateHandle(T* value, Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
+T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
+ ASSERT(AllowHandleAllocation::IsAllowed());
v8::ImplementationUtilities::HandleScopeData* current =
isolate->handle_scope_data();
internal::Object** cur = current->next;
- if (cur == current->limit) cur = Extend();
+ if (cur == current->limit) cur = Extend(isolate);
// Update the current next field, set the value in the created
// handle, and return the result.
ASSERT(cur < current->limit);
@@ -148,35 +184,32 @@ T** HandleScope::CreateHandle(T* value, Isolate* isolate) {
#ifdef DEBUG
-inline NoHandleAllocation::NoHandleAllocation() {
- Isolate* isolate = Isolate::Current();
+inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
+ // Make sure the current thread is allowed to create handles to begin with.
+ CHECK(AllowHandleAllocation::IsAllowed());
v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
-
- active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- if (active_) {
- // Shrink the current handle scope to make it impossible to do
- // handle allocations without an explicit handle scope.
- current->limit = current->next;
-
- level_ = current->level;
- current->level = 0;
- }
+ isolate_->handle_scope_data();
+ // Shrink the current handle scope to make it impossible to do
+ // handle allocations without an explicit handle scope.
+ limit_ = current->limit;
+ current->limit = current->next;
+ level_ = current->level;
+ current->level = 0;
}
-inline NoHandleAllocation::~NoHandleAllocation() {
- if (active_) {
- // Restore state in current handle scope to re-enable handle
- // allocations.
- v8::ImplementationUtilities::HandleScopeData* data =
- Isolate::Current()->handle_scope_data();
- ASSERT_EQ(0, data->level);
- data->level = level_;
- }
+inline SealHandleScope::~SealHandleScope() {
+ // Restore state in current handle scope to re-enable handle
+ // allocations.
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate_->handle_scope_data();
+ ASSERT_EQ(0, current->level);
+ current->level = level_;
+ ASSERT_EQ(current->next, current->limit);
+ current->limit = limit_;
}
-#endif
+#endif
} } // namespace v8::internal
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 46399d65e..4cb1827d8 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -45,8 +45,7 @@ namespace v8 {
namespace internal {
-int HandleScope::NumberOfHandles() {
- Isolate* isolate = Isolate::Current();
+int HandleScope::NumberOfHandles(Isolate* isolate) {
HandleScopeImplementer* impl = isolate->handle_scope_implementer();
int n = impl->blocks()->length();
if (n == 0) return 0;
@@ -55,8 +54,7 @@ int HandleScope::NumberOfHandles() {
}
-Object** HandleScope::Extend() {
- Isolate* isolate = Isolate::Current();
+Object** HandleScope::Extend(Isolate* isolate) {
v8::ImplementationUtilities::HandleScopeData* current =
isolate->handle_scope_data();
@@ -97,36 +95,34 @@ Object** HandleScope::Extend() {
void HandleScope::DeleteExtensions(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
v8::ImplementationUtilities::HandleScopeData* current =
isolate->handle_scope_data();
isolate->handle_scope_implementer()->DeleteExtensions(current->limit);
}
+#ifdef ENABLE_HANDLE_ZAPPING
void HandleScope::ZapRange(Object** start, Object** end) {
ASSERT(end - start <= kHandleBlockSize);
for (Object** p = start; p != end; p++) {
*reinterpret_cast<Address*>(p) = v8::internal::kHandleZapValue;
}
}
+#endif
-Address HandleScope::current_level_address() {
- return reinterpret_cast<Address>(
- &Isolate::Current()->handle_scope_data()->level);
+Address HandleScope::current_level_address(Isolate* isolate) {
+ return reinterpret_cast<Address>(&isolate->handle_scope_data()->level);
}
-Address HandleScope::current_next_address() {
- return reinterpret_cast<Address>(
- &Isolate::Current()->handle_scope_data()->next);
+Address HandleScope::current_next_address(Isolate* isolate) {
+ return reinterpret_cast<Address>(&isolate->handle_scope_data()->next);
}
-Address HandleScope::current_limit_address() {
- return reinterpret_cast<Address>(
- &Isolate::Current()->handle_scope_data()->limit);
+Address HandleScope::current_limit_address(Isolate* isolate) {
+ return reinterpret_cast<Address>(&isolate->handle_scope_data()->limit);
}
@@ -154,60 +150,6 @@ Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
}
-void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
- // If objects constructed from this function exist then changing
- // 'estimated_nof_properties' is dangerous since the previous value might
- // have been compiled into the fast construct stub. More over, the inobject
- // slack tracking logic might have adjusted the previous value, so even
- // passing the same value is risky.
- if (func->shared()->live_objects_may_exist()) return;
-
- func->shared()->set_expected_nof_properties(nof);
- if (func->has_initial_map()) {
- Handle<Map> new_initial_map =
- func->GetIsolate()->factory()->CopyMap(
- Handle<Map>(func->initial_map()));
- new_initial_map->set_unused_property_fields(nof);
- func->set_initial_map(*new_initial_map);
- }
-}
-
-
-void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value) {
- CALL_HEAP_FUNCTION_VOID(func->GetIsolate(),
- func->SetPrototype(*value));
-}
-
-
-static int ExpectedNofPropertiesFromEstimate(int estimate) {
- // If no properties are added in the constructor, they are more likely
- // to be added later.
- if (estimate == 0) estimate = 2;
-
- // We do not shrink objects that go into a snapshot (yet), so we adjust
- // the estimate conservatively.
- if (Serializer::enabled()) return estimate + 2;
-
- // Inobject slack tracking will reclaim redundant inobject space later,
- // so we can afford to adjust the estimate generously.
- if (FLAG_clever_optimizations) {
- return estimate + 8;
- } else {
- return estimate + 3;
- }
-}
-
-
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
- int estimate) {
- // See the comment in SetExpectedNofProperties.
- if (shared->live_objects_may_exist()) return;
-
- shared->set_expected_nof_properties(
- ExpectedNofPropertiesFromEstimate(estimate));
-}
-
-
void FlattenString(Handle<String> string) {
CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten());
}
@@ -218,23 +160,12 @@ Handle<String> FlattenGetString(Handle<String> string) {
}
-Handle<Object> SetPrototype(Handle<JSFunction> function,
- Handle<Object> prototype) {
- ASSERT(function->should_have_prototype());
- CALL_HEAP_FUNCTION(function->GetIsolate(),
- Accessors::FunctionSetPrototype(*function,
- *prototype,
- NULL),
- Object);
-}
-
-
-Handle<Object> SetProperty(Handle<Object> object,
+Handle<Object> SetProperty(Isolate* isolate,
+ Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
- Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(
isolate,
Runtime::SetObjectProperty(
@@ -256,108 +187,72 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object,
}
+Handle<Object> DeleteProperty(Handle<JSObject> object, Handle<Object> key) {
+ Isolate* isolate = object->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ Runtime::DeleteObjectProperty(
+ isolate, object, key, JSReceiver::NORMAL_DELETION),
+ Object);
+}
+
+
Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
Handle<Object> key) {
Isolate* isolate = object->GetIsolate();
CALL_HEAP_FUNCTION(isolate,
- Runtime::ForceDeleteObjectProperty(isolate, object, key),
+ Runtime::DeleteObjectProperty(
+ isolate, object, key, JSReceiver::FORCE_DELETION),
Object);
}
-Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetPropertyWithInterceptor(*key,
- *value,
- attributes,
- strict_mode),
- Object);
+Handle<Object> HasProperty(Handle<JSReceiver> obj, Handle<Object> key) {
+ Isolate* isolate = obj->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ Runtime::HasObjectProperty(isolate, obj, key), Object);
}
Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name) {
Isolate* isolate = obj->GetIsolate();
- Handle<String> str = isolate->factory()->LookupAsciiSymbol(name);
+ Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
CALL_HEAP_FUNCTION(isolate, obj->GetProperty(*str), Object);
}
-Handle<Object> GetProperty(Handle<Object> obj,
+Handle<Object> GetProperty(Isolate* isolate,
+ Handle<Object> obj,
Handle<Object> key) {
- Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(isolate,
Runtime::GetObjectProperty(isolate, obj, key), Object);
}
-Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name,
- PropertyAttributes* attributes) {
- Isolate* isolate = receiver->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- holder->GetPropertyWithInterceptor(*receiver,
- *name,
- attributes),
- Object);
-}
-
-
-Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
- const bool skip_hidden_prototypes = false;
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->SetPrototype(*value, skip_hidden_prototypes), Object);
-}
-
-
-Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) {
- Isolate* isolate = Isolate::Current();
+Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
+ uint32_t index) {
CALL_HEAP_FUNCTION(
isolate,
isolate->heap()->LookupSingleCharacterStringFromCode(index), Object);
}
-Handle<String> SubString(Handle<String> str,
- int start,
- int end,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(str->GetIsolate(),
- str->SubString(start, end, pretenure), String);
-}
-
-
-Handle<JSObject> Copy(Handle<JSObject> obj) {
- Isolate* isolate = obj->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- isolate->heap()->CopyJSObject(*obj), JSObject);
-}
-
-
-Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(), obj->DefineAccessor(*info), Object);
-}
-
-
// Wrappers for scripts are kept alive and cached in weak global
// handles referred from foreign objects held by the scripts as long as
// they are used. When they are not used anymore, the garbage
// collector will call the weak callback on the global handle
// associated with the wrapper and get rid of both the wrapper and the
// handle.
-static void ClearWrapperCache(Persistent<v8::Value> handle, void*) {
- Handle<Object> cache = Utils::OpenHandle(*handle);
+static void ClearWrapperCache(v8::Isolate* v8_isolate,
+ Persistent<v8::Value>* handle,
+ void*) {
+ Handle<Object> cache = Utils::OpenPersistent(handle);
JSValue* wrapper = JSValue::cast(*cache);
Foreign* foreign = Script::cast(wrapper->value())->wrapper();
ASSERT(foreign->foreign_address() ==
reinterpret_cast<Address>(cache.location()));
foreign->set_foreign_address(0);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
isolate->global_handles()->Destroy(cache.location());
isolate->counters()->script_wrappers()->Decrement();
}
@@ -369,19 +264,29 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
return Handle<JSValue>(
reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
}
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = script->GetIsolate();
// Construct a new script wrapper.
isolate->counters()->script_wrappers()->Increment();
Handle<JSFunction> constructor = isolate->script_function();
Handle<JSValue> result =
Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
+
+ // The allocation might have triggered a GC, which could have called this
+ // function recursively, and a wrapper has already been created and cached.
+ // In that case, simply return the cached wrapper.
+ if (script->wrapper()->foreign_address() != NULL) {
+ return Handle<JSValue>(
+ reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
+ }
+
result->set_value(*script);
// Create a new weak global handle and use it to cache the wrapper
// for future use. The cache will automatically be cleared by the
// garbage collector when it is not used anymore.
Handle<Object> handle = isolate->global_handles()->Create(*result);
- isolate->global_handles()->MakeWeak(handle.location(), NULL,
+ isolate->global_handles()->MakeWeak(handle.location(),
+ NULL,
&ClearWrapperCache);
script->wrapper()->set_foreign_address(
reinterpret_cast<Address>(handle.location()));
@@ -423,7 +328,7 @@ static void CalculateLineEnds(Isolate* isolate,
Vector<const SourceChar> src,
bool with_last_line) {
const int src_len = src.length();
- StringSearch<char, SourceChar> search(isolate, CStrVector("\n"));
+ StringSearch<uint8_t, SourceChar> search(isolate, STATIC_ASCII_VECTOR("\n"));
// Find and record line ends.
int position = 0;
@@ -450,14 +355,14 @@ Handle<FixedArray> CalculateLineEnds(Handle<String> src,
List<int> line_ends(line_count_estimate);
Isolate* isolate = src->GetIsolate();
{
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid.
+ DisallowHeapAllocation no_allocation; // ensure vectors stay valid.
// Dispatch on type of strings.
String::FlatContent content = src->GetFlatContent();
ASSERT(content.IsFlat());
if (content.IsAscii()) {
CalculateLineEnds(isolate,
&line_ends,
- content.ToAsciiVector(),
+ content.ToOneByteVector(),
with_last_line);
} else {
CalculateLineEnds(isolate,
@@ -478,7 +383,7 @@ Handle<FixedArray> CalculateLineEnds(Handle<String> src,
// Convert code position into line number.
int GetScriptLineNumber(Handle<Script> script, int code_pos) {
InitScriptLineEnds(script);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
const int line_ends_len = line_ends_array->length();
@@ -500,12 +405,13 @@ int GetScriptLineNumber(Handle<Script> script, int code_pos) {
return right + script->line_offset()->value();
}
+
// Convert code position into column number.
int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
int line_number = GetScriptLineNumber(script, code_pos);
if (line_number == -1) return -1;
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
line_number = line_number - script->line_offset()->value();
if (line_number == 0) return code_pos + script->column_offset()->value();
@@ -514,8 +420,9 @@ int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
return code_pos - (prev_line_end_pos + 1);
}
+
int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
if (!script->line_ends()->IsUndefined()) {
return GetScriptLineNumber(script, code_pos);
}
@@ -538,33 +445,27 @@ int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
}
-void CustomArguments::IterateInstance(ObjectVisitor* v) {
- v->VisitPointers(values_, values_ + ARRAY_SIZE(values_));
-}
-
-
// Compute the property keys from the interceptor.
+// TODO(rossberg): support symbols in API, and filter here if needed.
v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
Handle<JSObject> object) {
Isolate* isolate = receiver->GetIsolate();
Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
- CustomArguments args(isolate, interceptor->data(), *receiver, *object);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments
+ args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
- v8::NamedPropertyEnumerator enum_fun =
- v8::ToCData<v8::NamedPropertyEnumerator>(interceptor->enumerator());
+ v8::NamedPropertyEnumeratorCallback enum_fun =
+ v8::ToCData<v8::NamedPropertyEnumeratorCallback>(
+ interceptor->enumerator());
LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = enum_fun(info);
- }
+ result = args.Call(enum_fun);
}
#if ENABLE_EXTRA_CHECKS
CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
#endif
- return result;
+ return v8::Local<v8::Array>::New(reinterpret_cast<v8::Isolate*>(isolate),
+ result);
}
@@ -573,21 +474,40 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
Handle<JSObject> object) {
Isolate* isolate = receiver->GetIsolate();
Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- CustomArguments args(isolate, interceptor->data(), *receiver, *object);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments
+ args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
- v8::IndexedPropertyEnumerator enum_fun =
- v8::ToCData<v8::IndexedPropertyEnumerator>(interceptor->enumerator());
+ v8::IndexedPropertyEnumeratorCallback enum_fun =
+ v8::ToCData<v8::IndexedPropertyEnumeratorCallback>(
+ interceptor->enumerator());
LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object));
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = enum_fun(info);
+ result = args.Call(enum_fun);
#if ENABLE_EXTRA_CHECKS
- CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
+ CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
#endif
- }
+ }
+ return v8::Local<v8::Array>::New(reinterpret_cast<v8::Isolate*>(isolate),
+ result);
+}
+
+
+Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script) {
+ Isolate* isolate = script->GetIsolate();
+ Handle<String> name_or_source_url_key =
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("nameOrSourceURL"));
+ Handle<JSValue> script_wrapper = GetScriptWrapper(script);
+ Handle<Object> property = GetProperty(isolate,
+ script_wrapper,
+ name_or_source_url_key);
+ ASSERT(property->IsJSFunction());
+ Handle<JSFunction> method = Handle<JSFunction>::cast(property);
+ bool caught_exception;
+ Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
+ NULL, &caught_exception);
+ if (caught_exception) {
+ result = isolate->factory()->undefined_value();
}
return result;
}
@@ -619,12 +539,16 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
// Only collect keys if access is permitted.
for (Handle<Object> p = object;
*p != isolate->heap()->null_value();
- p = Handle<Object>(p->GetPrototype(), isolate)) {
+ p = Handle<Object>(p->GetPrototype(isolate), isolate)) {
if (p->IsJSProxy()) {
Handle<JSProxy> proxy(JSProxy::cast(*p), isolate);
Handle<Object> args[] = { proxy };
- Handle<Object> names = Execution::Call(
- isolate->proxy_enumerate(), object, ARRAY_SIZE(args), args, threw);
+ Handle<Object> names = Execution::Call(isolate,
+ isolate->proxy_enumerate(),
+ object,
+ ARRAY_SIZE(args),
+ args,
+ threw);
if (*threw) return content;
content = AddKeysFromJSArray(content, Handle<JSArray>::cast(names));
break;
@@ -638,6 +562,10 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
isolate->ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
+ if (isolate->has_scheduled_exception()) {
+ isolate->PromoteScheduledException();
+ *threw = true;
+ }
break;
}
@@ -726,10 +654,10 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
// to kInvalidEnumCache, this means that the map itself has never used the
// present enum cache. The first step to using the cache is to set the
// enum length of the map by counting the number of own descriptors that
- // are not DONT_ENUM.
+ // are not DONT_ENUM or SYMBOLIC.
if (own_property_count == Map::kInvalidEnumCache) {
own_property_count = object->map()->NumberOfDescribedProperties(
- OWN_DESCRIPTORS, DONT_ENUM);
+ OWN_DESCRIPTORS, DONT_SHOW);
if (cache_result) object->map()->SetEnumLength(own_property_count);
}
@@ -756,7 +684,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
}
isolate->counters()->enum_cache_misses()->Increment();
- int num_enum = map->NumberOfDescribedProperties(ALL_DESCRIPTORS, DONT_ENUM);
+ int num_enum = map->NumberOfDescribedProperties(ALL_DESCRIPTORS, DONT_SHOW);
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
Handle<FixedArray> indices = isolate->factory()->NewFixedArray(num_enum);
@@ -770,14 +698,15 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
for (int i = 0; i < descs->number_of_descriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
- if (!details.IsDontEnum()) {
+ Object* key = descs->GetKey(i);
+ if (!(details.IsDontEnum() || key->IsSymbol())) {
if (i < real_size) ++enum_size;
- storage->set(index, descs->GetKey(i));
+ storage->set(index, key);
if (!indices.is_null()) {
if (details.type() != FIELD) {
indices = Handle<FixedArray>();
} else {
- int field_index = Descriptor::IndexFromValue(descs->GetValue(i));
+ int field_index = descs->GetFieldIndex(i);
if (field_index >= map->inobject_properties()) {
field_index = -(field_index - map->inobject_properties() + 1);
}
@@ -803,7 +732,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
return ReduceFixedArrayTo(storage, enum_size);
} else {
- Handle<StringDictionary> dictionary(object->property_dictionary());
+ Handle<NameDictionary> dictionary(object->property_dictionary());
int length = dictionary->NumberOfElements();
if (length == 0) {
@@ -824,7 +753,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
// many properties were added but subsequently deleted.
int next_enumeration = dictionary->NextEnumerationIndex();
if (!object->IsGlobalObject() && next_enumeration > (length * 3) / 2) {
- StringDictionary::DoGenerateNewEnumerationIndices(dictionary);
+ NameDictionary::DoGenerateNewEnumerationIndices(dictionary);
next_enumeration = dictionary->NextEnumerationIndex();
}
@@ -832,7 +761,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
isolate->factory()->NewFixedArray(next_enumeration);
storage = Handle<FixedArray>(dictionary->CopyEnumKeysTo(*storage));
- ASSERT(storage->length() == object->NumberOfLocalProperties(DONT_ENUM));
+ ASSERT(storage->length() == object->NumberOfLocalProperties(DONT_SHOW));
return storage;
}
}
@@ -863,168 +792,8 @@ Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
}
-// This method determines the type of string involved and then gets the UTF8
-// length of the string. It doesn't flatten the string and has log(n) recursion
-// for a string of length n. If the failure flag gets set, then we have to
-// flatten the string and retry. Failures are caused by surrogate pairs in deep
-// cons strings.
-
-// Single surrogate characters that are encountered in the UTF-16 character
-// sequence of the input string get counted as 3 UTF-8 bytes, because that
-// is the way that WriteUtf8 will encode them. Surrogate pairs are counted and
-// encoded as one 4-byte UTF-8 sequence.
-
-// This function conceptually uses recursion on the two halves of cons strings.
-// However, in order to avoid the recursion going too deep it recurses on the
-// second string of the cons, but iterates on the first substring (by manually
-// eliminating it as a tail recursion). This means it counts the UTF-8 length
-// from the end to the start, which makes no difference to the total.
-
-// Surrogate pairs are recognized even if they are split across two sides of a
-// cons, which complicates the implementation somewhat. Therefore, too deep
-// recursion cannot always be avoided. This case is detected, and the failure
-// flag is set, a signal to the caller that the string should be flattened and
-// the operation retried.
-int Utf8LengthHelper(String* input,
- int from,
- int to,
- bool followed_by_surrogate,
- int max_recursion,
- bool* failure,
- bool* starts_with_surrogate) {
- if (from == to) return 0;
- int total = 0;
- bool dummy;
- while (true) {
- if (input->IsAsciiRepresentation()) {
- *starts_with_surrogate = false;
- return total + to - from;
- }
- switch (StringShape(input).representation_tag()) {
- case kConsStringTag: {
- ConsString* str = ConsString::cast(input);
- String* first = str->first();
- String* second = str->second();
- int first_length = first->length();
- if (first_length - from > to - first_length) {
- if (first_length < to) {
- // Right hand side is shorter. No need to check the recursion depth
- // since this can only happen log(n) times.
- bool right_starts_with_surrogate = false;
- total += Utf8LengthHelper(second,
- 0,
- to - first_length,
- followed_by_surrogate,
- max_recursion - 1,
- failure,
- &right_starts_with_surrogate);
- if (*failure) return 0;
- followed_by_surrogate = right_starts_with_surrogate;
- input = first;
- to = first_length;
- } else {
- // We only need the left hand side.
- input = first;
- }
- } else {
- if (first_length > from) {
- // Left hand side is shorter.
- if (first->IsAsciiRepresentation()) {
- total += first_length - from;
- *starts_with_surrogate = false;
- starts_with_surrogate = &dummy;
- input = second;
- from = 0;
- to -= first_length;
- } else if (second->IsAsciiRepresentation()) {
- followed_by_surrogate = false;
- total += to - first_length;
- input = first;
- to = first_length;
- } else if (max_recursion > 0) {
- bool right_starts_with_surrogate = false;
- // Recursing on the long one. This may fail.
- total += Utf8LengthHelper(second,
- 0,
- to - first_length,
- followed_by_surrogate,
- max_recursion - 1,
- failure,
- &right_starts_with_surrogate);
- if (*failure) return 0;
- input = first;
- to = first_length;
- followed_by_surrogate = right_starts_with_surrogate;
- } else {
- *failure = true;
- return 0;
- }
- } else {
- // We only need the right hand side.
- input = second;
- from = 0;
- to -= first_length;
- }
- }
- continue;
- }
- case kExternalStringTag:
- case kSeqStringTag: {
- Vector<const uc16> vector = input->GetFlatContent().ToUC16Vector();
- const uc16* p = vector.start();
- int previous = unibrow::Utf16::kNoPreviousCharacter;
- for (int i = from; i < to; i++) {
- uc16 c = p[i];
- total += unibrow::Utf8::Length(c, previous);
- previous = c;
- }
- if (to - from > 0) {
- if (unibrow::Utf16::IsLeadSurrogate(previous) &&
- followed_by_surrogate) {
- total -= unibrow::Utf8::kBytesSavedByCombiningSurrogates;
- }
- if (unibrow::Utf16::IsTrailSurrogate(p[from])) {
- *starts_with_surrogate = true;
- }
- }
- return total;
- }
- case kSlicedStringTag: {
- SlicedString* str = SlicedString::cast(input);
- int offset = str->offset();
- input = str->parent();
- from += offset;
- to += offset;
- continue;
- }
- default:
- break;
- }
- UNREACHABLE();
- return 0;
- }
- return 0;
-}
-
-
-int Utf8Length(Handle<String> str) {
- bool dummy;
- bool failure;
- int len;
- const int kRecursionBudget = 100;
- do {
- failure = false;
- len = Utf8LengthHelper(
- *str, 0, str->length(), false, kRecursionBudget, &failure, &dummy);
- if (failure) FlattenString(str);
- } while (failure);
- return len;
-}
-
-
DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
: impl_(isolate->handle_scope_implementer()) {
- ASSERT(impl_->isolate() == Isolate::Current());
impl_->BeginDeferredScope();
v8::ImplementationUtilities::HandleScopeData* data =
impl_->isolate()->handle_scope_data();
@@ -1064,4 +833,15 @@ DeferredHandles* DeferredHandleScope::Detach() {
}
+void AddWeakObjectToCodeDependency(Heap* heap,
+ Handle<Object> object,
+ Handle<Code> code) {
+ heap->EnsureWeakObjectToCodeTable();
+ Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(*object));
+ dep = DependentCode::Insert(dep, DependentCode::kWeaklyEmbeddedGroup, code);
+ CALL_HEAP_FUNCTION_VOID(heap->isolate(),
+ heap->AddWeakObjectToCodeDependency(*object, *dep));
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index a1d88c2f8..cfdecac19 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -30,6 +30,7 @@
#include "allocation.h"
#include "apiutils.h"
+#include "objects.h"
namespace v8 {
namespace internal {
@@ -58,29 +59,23 @@ class Handle {
a = b; // Fake assignment to enforce type checks.
USE(a);
#endif
- location_ = reinterpret_cast<T**>(handle.location());
+ location_ = reinterpret_cast<T**>(handle.location_);
}
- INLINE(T* operator ->() const) { return operator*(); }
+ INLINE(T* operator->() const) { return operator*(); }
// Check if this handle refers to the exact same object as the other handle.
- bool is_identical_to(const Handle<T> other) const {
- return operator*() == *other;
- }
+ INLINE(bool is_identical_to(const Handle<T> other) const);
// Provides the C++ dereference operator.
INLINE(T* operator*() const);
// Returns the address to where the raw pointer is stored.
- T** location() const {
- ASSERT(location_ == NULL ||
- reinterpret_cast<Address>(*location_) != kZapValue);
- return location_;
- }
+ INLINE(T** location() const);
template <class S> static Handle<T> cast(Handle<S> that) {
- T::cast(*that);
- return Handle<T>(reinterpret_cast<T**>(that.location()));
+ T::cast(*reinterpret_cast<T**>(that.location_));
+ return Handle<T>(reinterpret_cast<T**>(that.location_));
}
static Handle<T> null() { return Handle<T>(); }
@@ -90,11 +85,34 @@ class Handle {
// implementation in api.h.
inline Handle<T> EscapeFrom(v8::HandleScope* scope);
+#ifdef DEBUG
+ enum DereferenceCheckMode { INCLUDE_DEFERRED_CHECK, NO_DEFERRED_CHECK };
+
+ bool IsDereferenceAllowed(DereferenceCheckMode mode) const;
+#endif // DEBUG
+
private:
T** location_;
+
+ // Handles of different classes are allowed to access each other's location_.
+ template<class S> friend class Handle;
};
+// Convenience wrapper.
+template<class T>
+inline Handle<T> handle(T* t, Isolate* isolate) {
+ return Handle<T>(t, isolate);
+}
+
+
+// Convenience wrapper.
+template<class T>
+inline Handle<T> handle(T* t) {
+ return Handle<T>(t, t->GetIsolate());
+}
+
+
class DeferredHandles;
class HandleScopeImplementer;
@@ -113,24 +131,23 @@ class HandleScopeImplementer;
// for which the handle scope has been deleted is undefined.
class HandleScope {
public:
- inline HandleScope();
explicit inline HandleScope(Isolate* isolate);
inline ~HandleScope();
// Counts the number of allocated handles.
- static int NumberOfHandles();
+ static int NumberOfHandles(Isolate* isolate);
// Creates a new handle with the given value.
template <typename T>
- static inline T** CreateHandle(T* value, Isolate* isolate);
+ static inline T** CreateHandle(Isolate* isolate, T* value);
// Deallocates any extensions used by the current scope.
static void DeleteExtensions(Isolate* isolate);
- static Address current_next_address();
- static Address current_limit_address();
- static Address current_level_address();
+ static Address current_next_address(Isolate* isolate);
+ static Address current_limit_address(Isolate* isolate);
+ static Address current_level_address(Isolate* isolate);
// Closes the HandleScope (invalidating all handles
// created in the scope of the HandleScope) and returns
@@ -148,22 +165,26 @@ class HandleScope {
void* operator new(size_t size);
void operator delete(void* size_t);
- inline void CloseScope();
-
Isolate* isolate_;
Object** prev_next_;
Object** prev_limit_;
+ // Close the handle scope resetting limits to a previous state.
+ static inline void CloseScope(Isolate* isolate,
+ Object** prev_next,
+ Object** prev_limit);
+
// Extend the handle scope making room for more handles.
- static internal::Object** Extend();
+ static internal::Object** Extend(Isolate* isolate);
+#ifdef ENABLE_HANDLE_ZAPPING
// Zaps the handles in the half-open interval [start, end).
- static void ZapRange(internal::Object** start, internal::Object** end);
+ static void ZapRange(Object** start, Object** end);
+#endif
- friend class v8::internal::DeferredHandles;
friend class v8::HandleScope;
+ friend class v8::internal::DeferredHandles;
friend class v8::internal::HandleScopeImplementer;
- friend class v8::ImplementationUtilities;
friend class v8::internal::Isolate;
};
@@ -207,9 +228,8 @@ void FlattenString(Handle<String> str);
// string.
Handle<String> FlattenGetString(Handle<String> str);
-int Utf8Length(Handle<String> str);
-
-Handle<Object> SetProperty(Handle<Object> object,
+Handle<Object> SetProperty(Isolate* isolate,
+ Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes,
@@ -220,27 +240,20 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> value,
PropertyAttributes attributes);
-Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
- Handle<Object> key);
+Handle<Object> DeleteProperty(Handle<JSObject> object, Handle<Object> key);
-Handle<Object> GetProperty(Handle<JSReceiver> obj,
- const char* name);
-
-Handle<Object> GetProperty(Handle<Object> obj,
- Handle<Object> key);
+Handle<Object> ForceDeleteProperty(Handle<JSObject> object, Handle<Object> key);
-Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name,
- PropertyAttributes* attributes);
+Handle<Object> HasProperty(Handle<JSReceiver> obj, Handle<Object> key);
-Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
+Handle<Object> GetProperty(Handle<JSReceiver> obj, const char* name);
-Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index);
-
-Handle<JSObject> Copy(Handle<JSObject> obj);
+Handle<Object> GetProperty(Isolate* isolate,
+ Handle<Object> obj,
+ Handle<Object> key);
-Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info);
+Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
+ uint32_t index);
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
Handle<JSArray> array);
@@ -260,6 +273,7 @@ int GetScriptLineNumber(Handle<Script> script, int code_position);
// The safe version does not make heap allocations but may work much slower.
int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
int GetScriptColumnNumber(Handle<Script> script, int code_position);
+Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script);
// Computes the enumerable keys from interceptors. Used for debug mirrors and
// by GetKeysInFixedArrayFor below.
@@ -285,29 +299,10 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
Handle<FixedArray> second);
-Handle<String> SubString(Handle<String> str,
- int start,
- int end,
- PretenureFlag pretenure = NOT_TENURED);
-
-// Sets the expected number of properties for the function's instances.
-void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
-
-// Sets the prototype property for a function instance.
-void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value);
-
-// Sets the expected number of properties based on estimate from compiler.
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
- int estimate);
-
-
Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
Handle<JSFunction> constructor,
Handle<JSGlobalProxy> global);
-Handle<Object> SetPrototype(Handle<JSFunction> function,
- Handle<Object> prototype);
-
Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
Handle<Object> key);
@@ -318,17 +313,24 @@ Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
Handle<Object> key,
Handle<Object> value);
-class NoHandleAllocation BASE_EMBEDDED {
+void AddWeakObjectToCodeDependency(Heap* heap,
+ Handle<Object> object,
+ Handle<Code> code);
+
+// Seal off the current HandleScope so that new handles can only be created
+// if a new HandleScope is entered.
+class SealHandleScope BASE_EMBEDDED {
public:
#ifndef DEBUG
- NoHandleAllocation() {}
- ~NoHandleAllocation() {}
+ explicit SealHandleScope(Isolate* isolate) {}
+ ~SealHandleScope() {}
#else
- inline NoHandleAllocation();
- inline ~NoHandleAllocation();
+ explicit inline SealHandleScope(Isolate* isolate);
+ inline ~SealHandleScope();
private:
+ Isolate* isolate_;
+ Object** limit_;
int level_;
- bool active_;
#endif
};
diff --git a/deps/v8/src/harmony-array.js b/deps/v8/src/harmony-array.js
new file mode 100644
index 000000000..e440299ff
--- /dev/null
+++ b/deps/v8/src/harmony-array.js
@@ -0,0 +1,124 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'use strict';
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+// -------------------------------------------------------------------
+
+// ES6 draft 07-15-13, section 15.4.3.23
+function ArrayFind(predicate /* thisArg */) { // length == 1
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.find"]);
+ }
+
+ var array = ToObject(this);
+ var length = ToInteger(array.length);
+
+ if (!IS_SPEC_FUNCTION(predicate)) {
+ throw MakeTypeError('called_non_callable', [predicate]);
+ }
+
+ var thisArg;
+ if (%_ArgumentsLength() > 1) {
+ thisArg = %_Arguments(1);
+ }
+
+ if (IS_NULL_OR_UNDEFINED(thisArg)) {
+ thisArg = %GetDefaultReceiver(predicate) || thisArg;
+ } else if (!IS_SPEC_OBJECT(thisArg) && %IsClassicModeFunction(predicate)) {
+ thisArg = ToObject(thisArg);
+ }
+
+ for (var i = 0; i < length; i++) {
+ if (i in array) {
+ var element = array[i];
+ if (%_CallFunction(thisArg, element, i, array, predicate)) {
+ return element;
+ }
+ }
+ }
+
+ return;
+}
+
+
+// ES6 draft 07-15-13, section 15.4.3.24
+function ArrayFindIndex(predicate /* thisArg */) { // length == 1
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.findIndex"]);
+ }
+
+ var array = ToObject(this);
+ var length = ToInteger(array.length);
+
+ if (!IS_SPEC_FUNCTION(predicate)) {
+ throw MakeTypeError('called_non_callable', [predicate]);
+ }
+
+ var thisArg;
+ if (%_ArgumentsLength() > 1) {
+ thisArg = %_Arguments(1);
+ }
+
+ if (IS_NULL_OR_UNDEFINED(thisArg)) {
+ thisArg = %GetDefaultReceiver(predicate) || thisArg;
+ } else if (!IS_SPEC_OBJECT(thisArg) && %IsClassicModeFunction(predicate)) {
+ thisArg = ToObject(thisArg);
+ }
+
+ for (var i = 0; i < length; i++) {
+ if (i in array) {
+ var element = array[i];
+ if (%_CallFunction(thisArg, element, i, array, predicate)) {
+ return i;
+ }
+ }
+ }
+
+ return -1;
+}
+
+
+// -------------------------------------------------------------------
+
+function HarmonyArrayExtendArrayPrototype() {
+ %CheckIsBootstrapping();
+
+ // Set up the non-enumerable functions on the Array prototype object.
+ InstallFunctions($Array.prototype, DONT_ENUM, $Array(
+ "find", ArrayFind,
+ "findIndex", ArrayFindIndex
+ ));
+}
+
+HarmonyArrayExtendArrayPrototype(); \ No newline at end of file
diff --git a/deps/v8/src/harmony-math.js b/deps/v8/src/harmony-math.js
new file mode 100644
index 000000000..a4d3f2e8a
--- /dev/null
+++ b/deps/v8/src/harmony-math.js
@@ -0,0 +1,60 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'use strict';
+
+// ES6 draft 09-27-13, section 20.2.2.28.
+function MathSign(x) {
+ x = TO_NUMBER_INLINE(x);
+ if (x > 0) return 1;
+ if (x < 0) return -1;
+ if (x === 0) return x;
+ return NAN;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.34.
+function MathTrunc(x) {
+ x = TO_NUMBER_INLINE(x);
+ if (x > 0) return MathFloor(x);
+ if (x < 0) return MathCeil(x);
+ if (x === 0) return x;
+ return NAN;
+}
+
+
+function ExtendMath() {
+ %CheckIsBootstrapping();
+
+ // Set up the non-enumerable functions on the Math object.
+ InstallFunctions($Math, DONT_ENUM, $Array(
+ "sign", MathSign,
+ "trunc", MathTrunc
+ ));
+}
+
+ExtendMath();
diff --git a/deps/v8/src/harmony-string.js b/deps/v8/src/harmony-string.js
new file mode 100644
index 000000000..a5c6f4e2e
--- /dev/null
+++ b/deps/v8/src/harmony-string.js
@@ -0,0 +1,154 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'use strict';
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $String = global.String;
+// var $Array = global.Array;
+
+// -------------------------------------------------------------------
+
+// ES6 draft 07-15-13, section 15.5.3.21
+function StringRepeat(count) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.repeat"]);
+ }
+
+ var s = TO_STRING_INLINE(this);
+ var n = ToInteger(count);
+ if (n < 0 || !NUMBER_IS_FINITE(n)) {
+ throw MakeRangeError("invalid_count_value", []);
+ }
+
+ var elements = new InternalArray(n);
+ for (var i = 0; i < n; i++) {
+ elements[i] = s;
+ }
+
+ return %StringBuilderConcat(elements, n, "");
+}
+
+
+// ES6 draft 07-15-13, section 15.5.3.22
+function StringStartsWith(searchString /* position */) { // length == 1
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.startsWith"]);
+ }
+
+ var s = TO_STRING_INLINE(this);
+ var ss = TO_STRING_INLINE(searchString);
+ var pos = 0;
+ if (%_ArgumentsLength() > 1) {
+ pos = %_Arguments(1); // position
+ pos = ToInteger(pos);
+ }
+
+ var s_len = s.length;
+ var start = MathMin(MathMax(pos, 0), s_len);
+ var ss_len = ss.length;
+ if (ss_len + start > s_len) {
+ return false;
+ }
+
+ return %StringIndexOf(s, ss, start) === start;
+}
+
+
+// ES6 draft 07-15-13, section 15.5.3.23
+function StringEndsWith(searchString /* position */) { // length == 1
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.endsWith"]);
+ }
+
+ var s = TO_STRING_INLINE(this);
+ var ss = TO_STRING_INLINE(searchString);
+ var s_len = s.length;
+ var pos = s_len;
+ if (%_ArgumentsLength() > 1) {
+ var arg = %_Arguments(1); // position
+ if (!IS_UNDEFINED(arg)) {
+ pos = ToInteger(arg);
+ }
+ }
+
+ var end = MathMin(MathMax(pos, 0), s_len);
+ var ss_len = ss.length;
+ var start = end - ss_len;
+ if (start < 0) {
+ return false;
+ }
+
+ return %StringLastIndexOf(s, ss, start) === start;
+}
+
+
+// ES6 draft 07-15-13, section 15.5.3.24
+function StringContains(searchString /* position */) { // length == 1
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.contains"]);
+ }
+
+ var s = TO_STRING_INLINE(this);
+ var ss = TO_STRING_INLINE(searchString);
+ var pos = 0;
+ if (%_ArgumentsLength() > 1) {
+ pos = %_Arguments(1); // position
+ pos = ToInteger(pos);
+ }
+
+ var s_len = s.length;
+ var start = MathMin(MathMax(pos, 0), s_len);
+ var ss_len = ss.length;
+ if (ss_len + start > s_len) {
+ return false;
+ }
+
+ return %StringIndexOf(s, ss, start) !== -1;
+}
+
+
+// -------------------------------------------------------------------
+
+function ExtendStringPrototype() {
+ %CheckIsBootstrapping();
+
+ // Set up the non-enumerable functions on the String prototype object.
+ InstallFunctions($String.prototype, DONT_ENUM, $Array(
+ "repeat", StringRepeat,
+ "startsWith", StringStartsWith,
+ "endsWith", StringEndsWith,
+ "contains", StringContains
+ ));
+}
+
+ExtendStringPrototype(); \ No newline at end of file
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index bace902d4..ad6f44f93 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -69,7 +69,7 @@ void PromotionQueue::insert(HeapObject* target, int size) {
*(--rear_) = size;
// Assert no overflow into live objects.
#ifdef DEBUG
- SemiSpace::AssertValidRange(HEAP->new_space()->top(),
+ SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
reinterpret_cast<Address>(rear_));
#endif
}
@@ -91,36 +91,60 @@ MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
if (non_ascii_start >= length) {
// If the string is ASCII, we do not need to convert the characters
// since UTF8 is backwards compatible with ASCII.
- return AllocateStringFromAscii(str, pretenure);
+ return AllocateStringFromOneByte(str, pretenure);
}
// Non-ASCII and we need to decode.
return AllocateStringFromUtf8Slow(str, non_ascii_start, pretenure);
}
-MaybeObject* Heap::AllocateSymbol(Vector<const char> str,
- int chars,
- uint32_t hash_field) {
- unibrow::Utf8InputBuffer<> buffer(str.start(),
- static_cast<unsigned>(str.length()));
- return AllocateInternalSymbol(&buffer, chars, hash_field);
+template<>
+bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
+ // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
+ // ASCII only check.
+ return chars == str.length();
}
-MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
- uint32_t hash_field) {
- if (str.length() > SeqAsciiString::kMaxLength) {
- return Failure::OutOfMemoryException();
+template<>
+bool inline Heap::IsOneByte(String* str, int chars) {
+ return str->IsOneByteRepresentation();
+}
+
+
+MaybeObject* Heap::AllocateInternalizedStringFromUtf8(
+ Vector<const char> str, int chars, uint32_t hash_field) {
+ if (IsOneByte(str, chars)) {
+ return AllocateOneByteInternalizedString(
+ Vector<const uint8_t>::cast(str), hash_field);
+ }
+ return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
+}
+
+
+template<typename T>
+MaybeObject* Heap::AllocateInternalizedStringImpl(
+ T t, int chars, uint32_t hash_field) {
+ if (IsOneByte(t, chars)) {
+ return AllocateInternalizedStringImpl<true>(t, chars, hash_field);
+ }
+ return AllocateInternalizedStringImpl<false>(t, chars, hash_field);
+}
+
+
+MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
+ uint32_t hash_field) {
+ if (str.length() > SeqOneByteString::kMaxLength) {
+ return Failure::OutOfMemoryException(0x2);
}
// Compute map and object size.
- Map* map = ascii_symbol_map();
- int size = SeqAsciiString::SizeFor(str.length());
+ Map* map = ascii_internalized_string_map();
+ int size = SeqOneByteString::SizeFor(str.length());
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -134,27 +158,26 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
- memcpy(answer->address() + SeqAsciiString::kHeaderSize,
- str.start(), str.length());
+ OS::MemCopy(answer->address() + SeqOneByteString::kHeaderSize,
+ str.start(), str.length());
return answer;
}
-MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
- uint32_t hash_field) {
+MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
+ uint32_t hash_field) {
if (str.length() > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x3);
}
// Compute map and object size.
- Map* map = symbol_map();
+ Map* map = internalized_string_map();
int size = SeqTwoByteString::SizeFor(str.length());
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -167,8 +190,8 @@ MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
- memcpy(answer->address() + SeqTwoByteString::kHeaderSize,
- str.start(), str.length() * kUC16Size);
+ OS::MemCopy(answer->address() + SeqTwoByteString::kHeaderSize,
+ str.start(), str.length() * kUC16Size);
return answer;
}
@@ -183,10 +206,17 @@ MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
}
+MaybeObject* Heap::CopyConstantPoolArray(ConstantPoolArray* src) {
+ return CopyConstantPoolArrayWithMap(src, src->map());
+}
+
+
MaybeObject* Heap::AllocateRaw(int size_in_bytes,
AllocationSpace space,
AllocationSpace retry_space) {
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+ ASSERT(AllowHandleAllocation::IsAllowed());
+ ASSERT(AllowHeapAllocation::IsAllowed());
+ ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(space != NEW_SPACE ||
retry_space == OLD_POINTER_SPACE ||
retry_space == OLD_DATA_SPACE ||
@@ -220,6 +250,8 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else if (CELL_SPACE == space) {
result = cell_space_->AllocateRaw(size_in_bytes);
+ } else if (PROPERTY_CELL_SPACE == space) {
+ result = property_cell_space_->AllocateRaw(size_in_bytes);
} else {
ASSERT(MAP_SPACE == space);
result = map_space_->AllocateRaw(size_in_bytes);
@@ -239,8 +271,9 @@ MaybeObject* Heap::NumberFromInt32(
MaybeObject* Heap::NumberFromUint32(
uint32_t value, PretenureFlag pretenure) {
- if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
- return Smi::FromInt((int32_t)value);
+ if (static_cast<int32_t>(value) >= 0 &&
+ Smi::IsValid(static_cast<int32_t>(value))) {
+ return Smi::FromInt(static_cast<int32_t>(value));
}
// Bypass NumberFromDouble to avoid various redundant checks.
return AllocateHeapNumber(FastUI2D(value), pretenure);
@@ -263,28 +296,6 @@ void Heap::FinalizeExternalString(String* string) {
}
-MaybeObject* Heap::AllocateRawMap() {
-#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
-#endif
- MaybeObject* result = map_space_->AllocateRaw(Map::kSize);
- if (result->IsFailure()) old_gen_exhausted_ = true;
- return result;
-}
-
-
-MaybeObject* Heap::AllocateRawCell() {
-#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
-#endif
- MaybeObject* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize);
- if (result->IsFailure()) old_gen_exhausted_ = true;
- return result;
-}
-
-
bool Heap::InNewSpace(Object* object) {
bool result = new_space_.Contains(object);
ASSERT(!result || // Either not in new space
@@ -294,8 +305,8 @@ bool Heap::InNewSpace(Object* object) {
}
-bool Heap::InNewSpace(Address addr) {
- return new_space_.Contains(addr);
+bool Heap::InNewSpace(Address address) {
+ return new_space_.Contains(address);
}
@@ -309,6 +320,26 @@ bool Heap::InToSpace(Object* object) {
}
+bool Heap::InOldPointerSpace(Address address) {
+ return old_pointer_space_->Contains(address);
+}
+
+
+bool Heap::InOldPointerSpace(Object* object) {
+ return InOldPointerSpace(reinterpret_cast<Address>(object));
+}
+
+
+bool Heap::InOldDataSpace(Address address) {
+ return old_data_space_->Contains(address);
+}
+
+
+bool Heap::InOldDataSpace(Object* object) {
+ return InOldDataSpace(reinterpret_cast<Address>(object));
+}
+
+
bool Heap::OldGenerationAllocationLimitReached() {
if (!incremental_marking()->IsStopped()) return false;
return OldGenerationSpaceAvailable() < 0;
@@ -361,9 +392,12 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
ASSERT(type != MAP_TYPE);
ASSERT(type != CODE_TYPE);
ASSERT(type != ODDBALL_TYPE);
- ASSERT(type != JS_GLOBAL_PROPERTY_CELL_TYPE);
+ ASSERT(type != CELL_TYPE);
+ ASSERT(type != PROPERTY_CELL_TYPE);
- if (type < FIRST_NONSTRING_TYPE) {
+ if (type <= LAST_NAME_TYPE) {
+ if (type == SYMBOL_TYPE) return OLD_POINTER_SPACE;
+ ASSERT(type < FIRST_NONSTRING_TYPE);
// There are four string representations: sequential strings, external
// strings, cons strings, and sliced strings.
// Only the latter two contain non-map-word pointers to heap objects.
@@ -376,10 +410,47 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
}
+bool Heap::AllowedToBeMigrated(HeapObject* object, AllocationSpace dst) {
+ // Object migration is governed by the following rules:
+ //
+ // 1) Objects in new-space can be migrated to one of the old spaces
+ // that matches their target space or they stay in new-space.
+ // 2) Objects in old-space stay in the same space when migrating.
+ // 3) Fillers (two or more words) can migrate due to left-trimming of
+ // fixed arrays in new-space, old-data-space and old-pointer-space.
+ // 4) Fillers (one word) can never migrate, they are skipped by
+ // incremental marking explicitly to prevent invalid pattern.
+ //
+ // Since this function is used for debugging only, we do not place
+ // asserts here, but check everything explicitly.
+ if (object->map() == one_pointer_filler_map()) return false;
+ InstanceType type = object->map()->instance_type();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ AllocationSpace src = chunk->owner()->identity();
+ switch (src) {
+ case NEW_SPACE:
+ return dst == src || dst == TargetSpaceId(type);
+ case OLD_POINTER_SPACE:
+ return dst == src && (dst == TargetSpaceId(type) || object->IsFiller());
+ case OLD_DATA_SPACE:
+ return dst == src && dst == TargetSpaceId(type);
+ case CODE_SPACE:
+ return dst == src && type == CODE_TYPE;
+ case MAP_SPACE:
+ case CELL_SPACE:
+ case PROPERTY_CELL_SPACE:
+ case LO_SPACE:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
CopyWords(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
- byte_size / kPointerSize);
+ static_cast<size_t>(byte_size / kPointerSize));
}
@@ -397,7 +468,7 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
*dst_slot++ = *src_slot++;
}
} else {
- memmove(dst, src, byte_size);
+ OS::MemMove(dst, src, static_cast<size_t>(byte_size));
}
}
@@ -408,7 +479,7 @@ void Heap::ScavengePointer(HeapObject** p) {
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
- ASSERT(HEAP->InFromSpace(object));
+ ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
// We use the first word (where the map pointer usually is) of a heap
// object to record the forwarding pointer. A forwarding pointer can
@@ -420,11 +491,20 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
// copied.
if (first_word.IsForwardingAddress()) {
HeapObject* dest = first_word.ToForwardingAddress();
- ASSERT(HEAP->InFromSpace(*p));
+ ASSERT(object->GetIsolate()->heap()->InFromSpace(*p));
*p = dest;
return;
}
+ if (FLAG_trace_track_allocation_sites && object->IsJSObject()) {
+ if (AllocationMemento::FindForJSObject(JSObject::cast(object), true) !=
+ NULL) {
+ object->GetIsolate()->heap()->allocation_mementos_found_++;
+ }
+ }
+
+ // AllocationMementos are unrooted and shouldn't survive a scavenge
+ ASSERT(object->map() != object->GetHeap()->allocation_memento_map());
// Call the slow part of scavenge object.
return ScavengeObjectSlow(p, object);
}
@@ -460,7 +540,7 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes) {
ASSERT(HasBeenSetUp());
intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
- if (change_in_bytes >= 0) {
+ if (change_in_bytes > 0) {
// Avoid overflow.
if (amount > amount_of_external_allocated_memory_) {
amount_of_external_allocated_memory_ = amount;
@@ -478,7 +558,7 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
if (amount >= 0) {
amount_of_external_allocated_memory_ = amount;
} else {
- // Give up and reset the counters in case of an overflow.
+ // Give up and reset the counters in case of an underflow.
amount_of_external_allocated_memory_ = 0;
amount_of_external_allocated_memory_at_last_global_gc_ = 0;
}
@@ -486,8 +566,11 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
if (FLAG_trace_external_memory) {
PrintPID("%8.0f ms: ", isolate()->time_millis_since_init());
PrintF("Adjust amount of external memory: delta=%6" V8_PTR_PREFIX "d KB, "
- " amount=%6" V8_PTR_PREFIX "d KB, isolate=0x%08" V8PRIxPTR ".\n",
- change_in_bytes / 1024, amount_of_external_allocated_memory_ / 1024,
+ "amount=%6" V8_PTR_PREFIX "d KB, since_gc=%6" V8_PTR_PREFIX "d KB, "
+ "isolate=0x%08" V8PRIxPTR ".\n",
+ change_in_bytes / KB,
+ amount_of_external_allocated_memory_ / KB,
+ PromotedExternalMemorySize() / KB,
reinterpret_cast<intptr_t>(isolate()));
}
ASSERT(amount_of_external_allocated_memory_ >= 0);
@@ -495,11 +578,6 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
}
-void Heap::SetLastScriptId(Object* last_script_id) {
- roots_[kLastScriptIdRootIndex] = last_script_id;
-}
-
-
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
@@ -507,10 +585,10 @@ Isolate* Heap::isolate() {
#ifdef DEBUG
-#define GC_GREEDY_CHECK() \
- if (FLAG_gc_greedy) HEAP->GarbageCollectionGreedyCheck()
+#define GC_GREEDY_CHECK(ISOLATE) \
+ if (FLAG_gc_greedy) (ISOLATE)->heap()->GarbageCollectionGreedyCheck()
#else
-#define GC_GREEDY_CHECK() { }
+#define GC_GREEDY_CHECK(ISOLATE) { }
#endif
// Calls the FUNCTION_CALL function and retries it up to three times
@@ -520,61 +598,68 @@ Isolate* Heap::isolate() {
// Warning: Do not use the identifiers __object__, __maybe_object__ or
// __scope__ in a call to this macro.
-#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)\
- do { \
- GC_GREEDY_CHECK(); \
- MaybeObject* __maybe_object__ = FUNCTION_CALL; \
- Object* __object__ = NULL; \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\
- } \
- if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
- allocation_space(), \
- "allocation failure"); \
- __maybe_object__ = FUNCTION_CALL; \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1", true);\
- } \
- if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \
- ISOLATE->heap()->CollectAllAvailableGarbage("last resort gc"); \
- { \
- AlwaysAllocateScope __scope__; \
- __maybe_object__ = FUNCTION_CALL; \
- } \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory() || \
- __maybe_object__->IsRetryAfterGC()) { \
- /* TODO(1181417): Fix this. */ \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2", true);\
- } \
- RETURN_EMPTY; \
+#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY, OOM)\
+ do { \
+ GC_GREEDY_CHECK(ISOLATE); \
+ MaybeObject* __maybe_object__ = FUNCTION_CALL; \
+ Object* __object__ = NULL; \
+ if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
+ if (__maybe_object__->IsOutOfMemory()) { \
+ OOM; \
+ } \
+ if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
+ (ISOLATE)->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
+ allocation_space(), \
+ "allocation failure"); \
+ __maybe_object__ = FUNCTION_CALL; \
+ if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
+ if (__maybe_object__->IsOutOfMemory()) { \
+ OOM; \
+ } \
+ if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
+ (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
+ (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \
+ { \
+ AlwaysAllocateScope __scope__; \
+ __maybe_object__ = FUNCTION_CALL; \
+ } \
+ if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
+ if (__maybe_object__->IsOutOfMemory()) { \
+ OOM; \
+ } \
+ if (__maybe_object__->IsRetryAfterGC()) { \
+ /* TODO(1181417): Fix this. */ \
+ v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
+ } \
+ RETURN_EMPTY; \
} while (false)
+#define CALL_AND_RETRY_OR_DIE( \
+ ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
+ CALL_AND_RETRY( \
+ ISOLATE, \
+ FUNCTION_CALL, \
+ RETURN_VALUE, \
+ RETURN_EMPTY, \
+ v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY", true))
-#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
- CALL_AND_RETRY(ISOLATE, \
- FUNCTION_CALL, \
- return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
- return Handle<TYPE>())
+#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
+ CALL_AND_RETRY_OR_DIE(ISOLATE, \
+ FUNCTION_CALL, \
+ return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
+ return Handle<TYPE>()) \
-#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
- CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, return, return)
+#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
+ CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, return, return)
-#ifdef DEBUG
-
-inline bool Heap::allow_allocation(bool new_state) {
- bool old = allocation_allowed_;
- allocation_allowed_ = new_state;
- return old;
-}
-
-#endif
+#define CALL_HEAP_FUNCTION_PASS_EXCEPTION(ISOLATE, FUNCTION_CALL) \
+ CALL_AND_RETRY(ISOLATE, \
+ FUNCTION_CALL, \
+ return __object__, \
+ return __maybe_object__, \
+ return __maybe_object__)
void ExternalStringTable::AddString(String* string) {
@@ -605,23 +690,13 @@ void ExternalStringTable::Verify() {
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
Object* obj = Object::cast(new_space_strings_[i]);
- // TODO(yangguo): check that the object is indeed an external string.
ASSERT(heap_->InNewSpace(obj));
- ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
- if (obj->IsExternalAsciiString()) {
- ExternalAsciiString* string = ExternalAsciiString::cast(obj);
- ASSERT(String::IsAscii(string->GetChars(), string->length()));
- }
+ ASSERT(obj != heap_->the_hole_value());
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
Object* obj = Object::cast(old_space_strings_[i]);
- // TODO(yangguo): check that the object is indeed an external string.
ASSERT(!heap_->InNewSpace(obj));
- ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
- if (obj->IsExternalAsciiString()) {
- ExternalAsciiString* string = ExternalAsciiString::cast(obj);
- ASSERT(String::IsAscii(string->GetChars(), string->length()));
- }
+ ASSERT(obj != heap_->the_hole_value());
}
#endif
}
@@ -663,7 +738,7 @@ void Heap::CompletelyClearInstanceofCache() {
MaybeObject* TranscendentalCache::Get(Type type, double input) {
SubCache* cache = caches_[type];
if (cache == NULL) {
- caches_[type] = cache = new SubCache(type);
+ caches_[type] = cache = new SubCache(isolate_, type);
}
return cache->Get(input);
}
@@ -728,22 +803,38 @@ AlwaysAllocateScope::AlwaysAllocateScope() {
// non-handle code to call handle code. The code still works but
// performance will degrade, so we want to catch this situation
// in debug mode.
- ASSERT(HEAP->always_allocate_scope_depth_ == 0);
- HEAP->always_allocate_scope_depth_++;
+ Isolate* isolate = Isolate::Current();
+ ASSERT(isolate->heap()->always_allocate_scope_depth_ == 0);
+ isolate->heap()->always_allocate_scope_depth_++;
}
AlwaysAllocateScope::~AlwaysAllocateScope() {
- HEAP->always_allocate_scope_depth_--;
- ASSERT(HEAP->always_allocate_scope_depth_ == 0);
+ Isolate* isolate = Isolate::Current();
+ isolate->heap()->always_allocate_scope_depth_--;
+ ASSERT(isolate->heap()->always_allocate_scope_depth_ == 0);
}
+#ifdef VERIFY_HEAP
+NoWeakObjectVerificationScope::NoWeakObjectVerificationScope() {
+ Isolate* isolate = Isolate::Current();
+ isolate->heap()->no_weak_object_verification_scope_depth_++;
+}
+
+
+NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() {
+ Isolate* isolate = Isolate::Current();
+ isolate->heap()->no_weak_object_verification_scope_depth_--;
+}
+#endif
+
+
void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- CHECK(HEAP->Contains(object));
+ CHECK(object->GetIsolate()->heap()->Contains(object));
CHECK(object->map()->IsMap());
}
}
@@ -751,63 +842,27 @@ void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
double GCTracer::SizeOfHeapObjects() {
- return (static_cast<double>(HEAP->SizeOfObjects())) / MB;
+ return (static_cast<double>(heap_->SizeOfObjects())) / MB;
}
DisallowAllocationFailure::DisallowAllocationFailure() {
#ifdef DEBUG
- old_state_ = HEAP->disallow_allocation_failure_;
- HEAP->disallow_allocation_failure_ = true;
+ Isolate* isolate = Isolate::Current();
+ old_state_ = isolate->heap()->disallow_allocation_failure_;
+ isolate->heap()->disallow_allocation_failure_ = true;
#endif
}
DisallowAllocationFailure::~DisallowAllocationFailure() {
#ifdef DEBUG
- HEAP->disallow_allocation_failure_ = old_state_;
+ Isolate* isolate = Isolate::Current();
+ isolate->heap()->disallow_allocation_failure_ = old_state_;
#endif
}
-#ifdef DEBUG
-AssertNoAllocation::AssertNoAllocation() {
- Isolate* isolate = ISOLATE;
- active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- if (active_) {
- old_state_ = isolate->heap()->allow_allocation(false);
- }
-}
-
-
-AssertNoAllocation::~AssertNoAllocation() {
- if (active_) HEAP->allow_allocation(old_state_);
-}
-
-
-DisableAssertNoAllocation::DisableAssertNoAllocation() {
- Isolate* isolate = ISOLATE;
- active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- if (active_) {
- old_state_ = isolate->heap()->allow_allocation(true);
- }
-}
-
-
-DisableAssertNoAllocation::~DisableAssertNoAllocation() {
- if (active_) HEAP->allow_allocation(old_state_);
-}
-
-#else
-
-AssertNoAllocation::AssertNoAllocation() { }
-AssertNoAllocation::~AssertNoAllocation() { }
-DisableAssertNoAllocation::DisableAssertNoAllocation() { }
-DisableAssertNoAllocation::~DisableAssertNoAllocation() { }
-
-#endif
-
-
} } // namespace v8::internal
#endif // V8_HEAP_INL_H_
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index 301b09993..6b159a98a 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -27,15 +27,17 @@
#include "v8.h"
+#include "deoptimizer.h"
#include "heap-profiler.h"
-#include "profile-generator.h"
+#include "heap-snapshot-generator-inl.h"
namespace v8 {
namespace internal {
-HeapProfiler::HeapProfiler()
- : snapshots_(new HeapSnapshotsCollection()),
- next_snapshot_uid_(1) {
+HeapProfiler::HeapProfiler(Heap* heap)
+ : snapshots_(new HeapSnapshotsCollection(heap)),
+ next_snapshot_uid_(1),
+ is_tracking_allocations_(false) {
}
@@ -44,62 +46,10 @@ HeapProfiler::~HeapProfiler() {
}
-void HeapProfiler::ResetSnapshots() {
+void HeapProfiler::DeleteAllSnapshots() {
+ Heap* the_heap = heap();
delete snapshots_;
- snapshots_ = new HeapSnapshotsCollection();
-}
-
-
-void HeapProfiler::SetUp() {
- Isolate* isolate = Isolate::Current();
- if (isolate->heap_profiler() == NULL) {
- isolate->set_heap_profiler(new HeapProfiler());
- }
-}
-
-
-void HeapProfiler::TearDown() {
- Isolate* isolate = Isolate::Current();
- delete isolate->heap_profiler();
- isolate->set_heap_profiler(NULL);
-}
-
-
-HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name,
- int type,
- v8::ActivityControl* control) {
- ASSERT(Isolate::Current()->heap_profiler() != NULL);
- return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
- type,
- control);
-}
-
-
-HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
- int type,
- v8::ActivityControl* control) {
- ASSERT(Isolate::Current()->heap_profiler() != NULL);
- return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
- type,
- control);
-}
-
-
-void HeapProfiler::StartHeapObjectsTracking() {
- ASSERT(Isolate::Current()->heap_profiler() != NULL);
- Isolate::Current()->heap_profiler()->StartHeapObjectsTrackingImpl();
-}
-
-
-void HeapProfiler::StopHeapObjectsTracking() {
- ASSERT(Isolate::Current()->heap_profiler() != NULL);
- Isolate::Current()->heap_profiler()->StopHeapObjectsTrackingImpl();
-}
-
-
-SnapshotObjectId HeapProfiler::PushHeapObjectsStats(v8::OutputStream* stream) {
- ASSERT(Isolate::Current()->heap_profiler() != NULL);
- return Isolate::Current()->heap_profiler()->PushHeapObjectsStatsImpl(stream);
+ snapshots_ = new HeapSnapshotsCollection(the_heap);
}
@@ -122,99 +72,147 @@ v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
}
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
- int type,
- v8::ActivityControl* control) {
- HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
- HeapSnapshot* result =
- snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
- bool generation_completed = true;
- switch (s_type) {
- case HeapSnapshot::kFull: {
- HeapSnapshotGenerator generator(result, control);
- generation_completed = generator.GenerateSnapshot();
- break;
+HeapSnapshot* HeapProfiler::TakeSnapshot(
+ const char* name,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver) {
+ HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
+ {
+ HeapSnapshotGenerator generator(result, control, resolver, heap());
+ if (!generator.GenerateSnapshot()) {
+ delete result;
+ result = NULL;
}
- default:
- UNREACHABLE();
- }
- if (!generation_completed) {
- delete result;
- result = NULL;
}
snapshots_->SnapshotGenerationFinished(result);
return result;
}
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name,
- int type,
- v8::ActivityControl* control) {
- return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control);
+HeapSnapshot* HeapProfiler::TakeSnapshot(
+ String* name,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver) {
+ return TakeSnapshot(snapshots_->names()->GetName(name), control, resolver);
}
-void HeapProfiler::StartHeapObjectsTrackingImpl() {
+
+void HeapProfiler::StartHeapObjectsTracking() {
snapshots_->StartHeapObjectsTracking();
}
-SnapshotObjectId HeapProfiler::PushHeapObjectsStatsImpl(OutputStream* stream) {
+SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
return snapshots_->PushHeapObjectsStats(stream);
}
-void HeapProfiler::StopHeapObjectsTrackingImpl() {
+void HeapProfiler::StopHeapObjectsTracking() {
snapshots_->StopHeapObjectsTracking();
}
size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- size_t size = profiler->snapshots_->GetUsedMemorySize();
- return size;
+ return snapshots_->GetUsedMemorySize();
}
int HeapProfiler::GetSnapshotsCount() {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- return profiler->snapshots_->snapshots()->length();
+ return snapshots_->snapshots()->length();
}
HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- return profiler->snapshots_->snapshots()->at(index);
-}
-
-
-HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- return profiler->snapshots_->GetSnapshot(uid);
+ return snapshots_->snapshots()->at(index);
}
SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
if (!obj->IsHeapObject())
return v8::HeapProfiler::kUnknownObjectId;
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- return profiler->snapshots_->FindObjectId(HeapObject::cast(*obj)->address());
+ return snapshots_->FindObjectId(HeapObject::cast(*obj)->address());
}
-void HeapProfiler::DeleteAllSnapshots() {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- profiler->ResetSnapshots();
+void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
+ snapshots_->ObjectMoveEvent(from, to, size);
+}
+
+
+void HeapProfiler::NewObjectEvent(Address addr, int size) {
+ snapshots_->NewObjectEvent(addr, size);
+}
+
+
+void HeapProfiler::UpdateObjectSizeEvent(Address addr, int size) {
+ snapshots_->UpdateObjectSizeEvent(addr, size);
}
-void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
- snapshots_->ObjectMoveEvent(from, to);
+void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
+ RetainedObjectInfo* info) {
+ // TODO(yurus, marja): Don't route this information through GlobalHandles.
+ heap()->isolate()->global_handles()->SetRetainedObjectInfo(id, info);
+}
+
+
+void HeapProfiler::StartHeapAllocationsRecording() {
+ StartHeapObjectsTracking();
+ is_tracking_allocations_ = true;
+ DropCompiledCode();
+ snapshots_->UpdateHeapObjectsMap();
+}
+
+
+void HeapProfiler::StopHeapAllocationsRecording() {
+ StopHeapObjectsTracking();
+ is_tracking_allocations_ = false;
+ DropCompiledCode();
+}
+
+
+void HeapProfiler::RecordObjectAllocationFromMasm(Isolate* isolate,
+ Address obj,
+ int size) {
+ isolate->heap_profiler()->NewObjectEvent(obj, size);
+}
+
+
+void HeapProfiler::DropCompiledCode() {
+ Isolate* isolate = heap()->isolate();
+ HandleScope scope(isolate);
+
+ if (FLAG_concurrent_recompilation) {
+ isolate->optimizing_compiler_thread()->Flush();
+ }
+
+ Deoptimizer::DeoptimizeAll(isolate);
+
+ Handle<Code> lazy_compile =
+ Handle<Code>(isolate->builtins()->builtin(Builtins::kLazyCompile));
+
+ heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "switch allocations tracking");
+
+ DisallowHeapAllocation no_allocation;
+
+ HeapIterator iterator(heap());
+ HeapObject* obj = NULL;
+ while (((obj = iterator.next()) != NULL)) {
+ if (obj->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(obj);
+ SharedFunctionInfo* shared = function->shared();
+
+ if (!shared->allows_lazy_compilation()) continue;
+ if (!shared->script()->IsScript()) continue;
+
+ Code::Kind kind = function->code()->kind();
+ if (kind == Code::FUNCTION || kind == Code::BUILTIN) {
+ function->set_code(*lazy_compile);
+ shared->set_code(*lazy_compile);
+ }
+ }
+ }
}
diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h
index 346177b8b..74002278d 100644
--- a/deps/v8/src/heap-profiler.h
+++ b/deps/v8/src/heap-profiler.h
@@ -28,6 +28,7 @@
#ifndef V8_HEAP_PROFILER_H_
#define V8_HEAP_PROFILER_H_
+#include "heap-snapshot-generator-inl.h"
#include "isolate.h"
namespace v8 {
@@ -36,38 +37,40 @@ namespace internal {
class HeapSnapshot;
class HeapSnapshotsCollection;
-#define HEAP_PROFILE(heap, call) \
- do { \
- v8::internal::HeapProfiler* profiler = heap->isolate()->heap_profiler(); \
- if (profiler != NULL && profiler->is_profiling()) { \
- profiler->call; \
- } \
- } while (false)
-
class HeapProfiler {
public:
- static void SetUp();
- static void TearDown();
+ explicit HeapProfiler(Heap* heap);
+ ~HeapProfiler();
+
+ size_t GetMemorySizeUsedByProfiler();
+
+ HeapSnapshot* TakeSnapshot(
+ const char* name,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver);
+ HeapSnapshot* TakeSnapshot(
+ String* name,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver);
- static size_t GetMemorySizeUsedByProfiler();
+ void StartHeapObjectsTracking();
+ void StopHeapObjectsTracking();
- static HeapSnapshot* TakeSnapshot(const char* name,
- int type,
- v8::ActivityControl* control);
- static HeapSnapshot* TakeSnapshot(String* name,
- int type,
- v8::ActivityControl* control);
+ static void RecordObjectAllocationFromMasm(Isolate* isolate,
+ Address obj,
+ int size);
- static void StartHeapObjectsTracking();
- static void StopHeapObjectsTracking();
- static SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
- static int GetSnapshotsCount();
- static HeapSnapshot* GetSnapshot(int index);
- static HeapSnapshot* FindSnapshot(unsigned uid);
- static SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
- static void DeleteAllSnapshots();
+ SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
+ int GetSnapshotsCount();
+ HeapSnapshot* GetSnapshot(int index);
+ SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
+ void DeleteAllSnapshots();
- void ObjectMoveEvent(Address from, Address to);
+ void ObjectMoveEvent(Address from, Address to, int size);
+
+ void NewObjectEvent(Address addr, int size);
+
+ void UpdateObjectSizeEvent(Address addr, int size);
void DefineWrapperClass(
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback);
@@ -78,24 +81,28 @@ class HeapProfiler {
return snapshots_->is_tracking_objects();
}
+ void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
+
+ bool is_tracking_allocations() {
+ return is_tracking_allocations_;
+ }
+
+ void StartHeapAllocationsRecording();
+ void StopHeapAllocationsRecording();
+
+ int FindUntrackedObjects() {
+ return snapshots_->FindUntrackedObjects();
+ }
+
+ void DropCompiledCode();
+
private:
- HeapProfiler();
- ~HeapProfiler();
- HeapSnapshot* TakeSnapshotImpl(const char* name,
- int type,
- v8::ActivityControl* control);
- HeapSnapshot* TakeSnapshotImpl(String* name,
- int type,
- v8::ActivityControl* control);
- void ResetSnapshots();
-
- void StartHeapObjectsTrackingImpl();
- void StopHeapObjectsTrackingImpl();
- SnapshotObjectId PushHeapObjectsStatsImpl(OutputStream* stream);
+ Heap* heap() const { return snapshots_->heap(); }
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
+ bool is_tracking_allocations_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/heap-snapshot-generator-inl.h b/deps/v8/src/heap-snapshot-generator-inl.h
new file mode 100644
index 000000000..1a878c6df
--- /dev/null
+++ b/deps/v8/src/heap-snapshot-generator-inl.h
@@ -0,0 +1,88 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
+#define V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
+
+#include "heap-snapshot-generator.h"
+
+namespace v8 {
+namespace internal {
+
+
+HeapEntry* HeapGraphEdge::from() const {
+ return &snapshot()->entries()[from_index_];
+}
+
+
+HeapSnapshot* HeapGraphEdge::snapshot() const {
+ return to_entry_->snapshot();
+}
+
+
+int HeapEntry::index() const {
+ return static_cast<int>(this - &snapshot_->entries().first());
+}
+
+
+int HeapEntry::set_children_index(int index) {
+ children_index_ = index;
+ int next_index = index + children_count_;
+ children_count_ = 0;
+ return next_index;
+}
+
+
+HeapGraphEdge** HeapEntry::children_arr() {
+ ASSERT(children_index_ >= 0);
+ return &snapshot_->children()[children_index_];
+}
+
+
+SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) {
+ return kGcRootsFirstSubrootId + delta * kObjectIdStep;
+}
+
+
+HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) {
+ return reinterpret_cast<HeapObject*>(
+ reinterpret_cast<char*>(kFirstGcSubrootObject) +
+ delta * HeapObjectsMap::kObjectIdStep);
+}
+
+
+int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
+ return static_cast<int>(
+ (reinterpret_cast<char*>(subroot) -
+ reinterpret_cast<char*>(kFirstGcSubrootObject)) /
+ HeapObjectsMap::kObjectIdStep);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
+
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
new file mode 100644
index 000000000..10d113c3d
--- /dev/null
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -0,0 +1,3035 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "heap-snapshot-generator-inl.h"
+
+#include "allocation-tracker.h"
+#include "heap-profiler.h"
+#include "debug.h"
+#include "types.h"
+
+namespace v8 {
+namespace internal {
+
+
+HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to)
+ : type_(type),
+ from_index_(from),
+ to_index_(to),
+ name_(name) {
+ ASSERT(type == kContextVariable
+ || type == kProperty
+ || type == kInternal
+ || type == kShortcut);
+}
+
+
+HeapGraphEdge::HeapGraphEdge(Type type, int index, int from, int to)
+ : type_(type),
+ from_index_(from),
+ to_index_(to),
+ index_(index) {
+ ASSERT(type == kElement || type == kHidden || type == kWeak);
+}
+
+
+void HeapGraphEdge::ReplaceToIndexWithEntry(HeapSnapshot* snapshot) {
+ to_entry_ = &snapshot->entries()[to_index_];
+}
+
+
+const int HeapEntry::kNoEntry = -1;
+
+HeapEntry::HeapEntry(HeapSnapshot* snapshot,
+ Type type,
+ const char* name,
+ SnapshotObjectId id,
+ int self_size)
+ : type_(type),
+ children_count_(0),
+ children_index_(-1),
+ self_size_(self_size),
+ id_(id),
+ snapshot_(snapshot),
+ name_(name) { }
+
+
+void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
+ const char* name,
+ HeapEntry* entry) {
+ HeapGraphEdge edge(type, name, this->index(), entry->index());
+ snapshot_->edges().Add(edge);
+ ++children_count_;
+}
+
+
+void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
+ int index,
+ HeapEntry* entry) {
+ HeapGraphEdge edge(type, index, this->index(), entry->index());
+ snapshot_->edges().Add(edge);
+ ++children_count_;
+}
+
+
+Handle<HeapObject> HeapEntry::GetHeapObject() {
+ return snapshot_->collection()->FindHeapObjectById(id());
+}
+
+
+void HeapEntry::Print(
+ const char* prefix, const char* edge_name, int max_depth, int indent) {
+ STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
+ OS::Print("%6d @%6u %*c %s%s: ",
+ self_size(), id(), indent, ' ', prefix, edge_name);
+ if (type() != kString) {
+ OS::Print("%s %.40s\n", TypeAsString(), name_);
+ } else {
+ OS::Print("\"");
+ const char* c = name_;
+ while (*c && (c - name_) <= 40) {
+ if (*c != '\n')
+ OS::Print("%c", *c);
+ else
+ OS::Print("\\n");
+ ++c;
+ }
+ OS::Print("\"\n");
+ }
+ if (--max_depth == 0) return;
+ Vector<HeapGraphEdge*> ch = children();
+ for (int i = 0; i < ch.length(); ++i) {
+ HeapGraphEdge& edge = *ch[i];
+ const char* edge_prefix = "";
+ EmbeddedVector<char, 64> index;
+ const char* edge_name = index.start();
+ switch (edge.type()) {
+ case HeapGraphEdge::kContextVariable:
+ edge_prefix = "#";
+ edge_name = edge.name();
+ break;
+ case HeapGraphEdge::kElement:
+ OS::SNPrintF(index, "%d", edge.index());
+ break;
+ case HeapGraphEdge::kInternal:
+ edge_prefix = "$";
+ edge_name = edge.name();
+ break;
+ case HeapGraphEdge::kProperty:
+ edge_name = edge.name();
+ break;
+ case HeapGraphEdge::kHidden:
+ edge_prefix = "$";
+ OS::SNPrintF(index, "%d", edge.index());
+ break;
+ case HeapGraphEdge::kShortcut:
+ edge_prefix = "^";
+ edge_name = edge.name();
+ break;
+ case HeapGraphEdge::kWeak:
+ edge_prefix = "w";
+ OS::SNPrintF(index, "%d", edge.index());
+ break;
+ default:
+ OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type());
+ }
+ edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2);
+ }
+}
+
+
+const char* HeapEntry::TypeAsString() {
+ switch (type()) {
+ case kHidden: return "/hidden/";
+ case kObject: return "/object/";
+ case kClosure: return "/closure/";
+ case kString: return "/string/";
+ case kCode: return "/code/";
+ case kArray: return "/array/";
+ case kRegExp: return "/regexp/";
+ case kHeapNumber: return "/number/";
+ case kNative: return "/native/";
+ case kSynthetic: return "/synthetic/";
+ case kConsString: return "/concatenated string/";
+ case kSlicedString: return "/sliced string/";
+ default: return "???";
+ }
+}
+
+
+// It is very important to keep objects that form a heap snapshot
+// as small as possible.
+namespace { // Avoid littering the global namespace.
+
+template <size_t ptr_size> struct SnapshotSizeConstants;
+
+template <> struct SnapshotSizeConstants<4> {
+ static const int kExpectedHeapGraphEdgeSize = 12;
+ static const int kExpectedHeapEntrySize = 24;
+};
+
+template <> struct SnapshotSizeConstants<8> {
+ static const int kExpectedHeapGraphEdgeSize = 24;
+ static const int kExpectedHeapEntrySize = 32;
+};
+
+} // namespace
+
+HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
+ const char* title,
+ unsigned uid)
+ : collection_(collection),
+ title_(title),
+ uid_(uid),
+ root_index_(HeapEntry::kNoEntry),
+ gc_roots_index_(HeapEntry::kNoEntry),
+ natives_root_index_(HeapEntry::kNoEntry),
+ max_snapshot_js_object_id_(0) {
+ STATIC_CHECK(
+ sizeof(HeapGraphEdge) ==
+ SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize);
+ STATIC_CHECK(
+ sizeof(HeapEntry) ==
+ SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
+ for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
+ gc_subroot_indexes_[i] = HeapEntry::kNoEntry;
+ }
+}
+
+
+void HeapSnapshot::Delete() {
+ collection_->RemoveSnapshot(this);
+ delete this;
+}
+
+
+void HeapSnapshot::RememberLastJSObjectId() {
+ max_snapshot_js_object_id_ = collection_->last_assigned_id();
+}
+
+
+HeapEntry* HeapSnapshot::AddRootEntry() {
+ ASSERT(root_index_ == HeapEntry::kNoEntry);
+ ASSERT(entries_.is_empty()); // Root entry must be the first one.
+ HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
+ "",
+ HeapObjectsMap::kInternalRootObjectId,
+ 0);
+ root_index_ = entry->index();
+ ASSERT(root_index_ == 0);
+ return entry;
+}
+
+
+HeapEntry* HeapSnapshot::AddGcRootsEntry() {
+ ASSERT(gc_roots_index_ == HeapEntry::kNoEntry);
+ HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
+ "(GC roots)",
+ HeapObjectsMap::kGcRootsObjectId,
+ 0);
+ gc_roots_index_ = entry->index();
+ return entry;
+}
+
+
+HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
+ ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry);
+ ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
+ HeapEntry* entry = AddEntry(
+ HeapEntry::kSynthetic,
+ VisitorSynchronization::kTagNames[tag],
+ HeapObjectsMap::GetNthGcSubrootId(tag),
+ 0);
+ gc_subroot_indexes_[tag] = entry->index();
+ return entry;
+}
+
+
+HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
+ const char* name,
+ SnapshotObjectId id,
+ int size) {
+ HeapEntry entry(this, type, name, id, size);
+ entries_.Add(entry);
+ return &entries_.last();
+}
+
+
+void HeapSnapshot::FillChildren() {
+ ASSERT(children().is_empty());
+ children().Allocate(edges().length());
+ int children_index = 0;
+ for (int i = 0; i < entries().length(); ++i) {
+ HeapEntry* entry = &entries()[i];
+ children_index = entry->set_children_index(children_index);
+ }
+ ASSERT(edges().length() == children_index);
+ for (int i = 0; i < edges().length(); ++i) {
+ HeapGraphEdge* edge = &edges()[i];
+ edge->ReplaceToIndexWithEntry(this);
+ edge->from()->add_child(edge);
+ }
+}
+
+
+class FindEntryById {
+ public:
+ explicit FindEntryById(SnapshotObjectId id) : id_(id) { }
+ int operator()(HeapEntry* const* entry) {
+ if ((*entry)->id() == id_) return 0;
+ return (*entry)->id() < id_ ? -1 : 1;
+ }
+ private:
+ SnapshotObjectId id_;
+};
+
+
+HeapEntry* HeapSnapshot::GetEntryById(SnapshotObjectId id) {
+ List<HeapEntry*>* entries_by_id = GetSortedEntriesList();
+ // Perform a binary search by id.
+ int index = SortedListBSearch(*entries_by_id, FindEntryById(id));
+ if (index == -1)
+ return NULL;
+ return entries_by_id->at(index);
+}
+
+
+template<class T>
+static int SortByIds(const T* entry1_ptr,
+ const T* entry2_ptr) {
+ if ((*entry1_ptr)->id() == (*entry2_ptr)->id()) return 0;
+ return (*entry1_ptr)->id() < (*entry2_ptr)->id() ? -1 : 1;
+}
+
+
+List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
+ if (sorted_entries_.is_empty()) {
+ sorted_entries_.Allocate(entries_.length());
+ for (int i = 0; i < entries_.length(); ++i) {
+ sorted_entries_[i] = &entries_[i];
+ }
+ sorted_entries_.Sort(SortByIds);
+ }
+ return &sorted_entries_;
+}
+
+
+void HeapSnapshot::Print(int max_depth) {
+ root()->Print("", "", max_depth, 0);
+}
+
+
+template<typename T, class P>
+static size_t GetMemoryUsedByList(const List<T, P>& list) {
+ return list.length() * sizeof(T) + sizeof(list);
+}
+
+
+size_t HeapSnapshot::RawSnapshotSize() const {
+ return
+ sizeof(*this) +
+ GetMemoryUsedByList(entries_) +
+ GetMemoryUsedByList(edges_) +
+ GetMemoryUsedByList(children_) +
+ GetMemoryUsedByList(sorted_entries_);
+}
+
+
+// We split IDs on evens for embedder objects (see
+// HeapObjectsMap::GenerateId) and odds for native objects.
+const SnapshotObjectId HeapObjectsMap::kInternalRootObjectId = 1;
+const SnapshotObjectId HeapObjectsMap::kGcRootsObjectId =
+ HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep;
+const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId =
+ HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
+const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId =
+ HeapObjectsMap::kGcRootsFirstSubrootId +
+ VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
+
+
+static bool AddressesMatch(void* key1, void* key2) {
+ return key1 == key2;
+}
+
+
+HeapObjectsMap::HeapObjectsMap(Heap* heap)
+ : next_id_(kFirstAvailableObjectId),
+ entries_map_(AddressesMatch),
+ heap_(heap) {
+ // This dummy element solves a problem with entries_map_.
+ // When we do lookup in HashMap we see no difference between two cases:
+ // it has an entry with NULL as the value or it has created
+ // a new entry on the fly with NULL as the default value.
+ // With such dummy element we have a guaranty that all entries_map_ entries
+ // will have the value field grater than 0.
+ // This fact is using in MoveObject method.
+ entries_.Add(EntryInfo(0, NULL, 0));
+}
+
+
+void HeapObjectsMap::SnapshotGenerationFinished() {
+ RemoveDeadEntries();
+}
+
+
+void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
+ ASSERT(to != NULL);
+ ASSERT(from != NULL);
+ if (from == to) return;
+ void* from_value = entries_map_.Remove(from, ComputePointerHash(from));
+ if (from_value == NULL) {
+ // It may occur that some untracked object moves to an address X and there
+ // is a tracked object at that address. In this case we should remove the
+ // entry as we know that the object has died.
+ void* to_value = entries_map_.Remove(to, ComputePointerHash(to));
+ if (to_value != NULL) {
+ int to_entry_info_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(to_value));
+ entries_.at(to_entry_info_index).addr = NULL;
+ }
+ } else {
+ HashMap::Entry* to_entry = entries_map_.Lookup(to, ComputePointerHash(to),
+ true);
+ if (to_entry->value != NULL) {
+ // We found the existing entry with to address for an old object.
+ // Without this operation we will have two EntryInfo's with the same
+ // value in addr field. It is bad because later at RemoveDeadEntries
+ // one of this entry will be removed with the corresponding entries_map_
+ // entry.
+ int to_entry_info_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(to_entry->value));
+ entries_.at(to_entry_info_index).addr = NULL;
+ }
+ int from_entry_info_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(from_value));
+ entries_.at(from_entry_info_index).addr = to;
+ // Size of an object can change during its life, so to keep information
+ // about the object in entries_ consistent, we have to adjust size when the
+ // object is migrated.
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("Move object from %p to %p old size %6d new size %6d\n",
+ from,
+ to,
+ entries_.at(from_entry_info_index).size,
+ object_size);
+ }
+ entries_.at(from_entry_info_index).size = object_size;
+ to_entry->value = from_value;
+ }
+}
+
+
+void HeapObjectsMap::NewObject(Address addr, int size) {
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("New object : %p %6d. Next address is %p\n",
+ addr,
+ size,
+ addr + size);
+ }
+ ASSERT(addr != NULL);
+ FindOrAddEntry(addr, size, false);
+}
+
+
+void HeapObjectsMap::UpdateObjectSize(Address addr, int size) {
+ FindOrAddEntry(addr, size, false);
+}
+
+
+SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
+ HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
+ false);
+ if (entry == NULL) return 0;
+ int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ EntryInfo& entry_info = entries_.at(entry_index);
+ ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
+ return entry_info.id;
+}
+
+
+SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
+ unsigned int size,
+ bool accessed) {
+ ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
+ HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
+ true);
+ if (entry->value != NULL) {
+ int entry_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ EntryInfo& entry_info = entries_.at(entry_index);
+ entry_info.accessed = accessed;
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("Update object size : %p with old size %d and new size %d\n",
+ addr,
+ entry_info.size,
+ size);
+ }
+ entry_info.size = size;
+ return entry_info.id;
+ }
+ entry->value = reinterpret_cast<void*>(entries_.length());
+ SnapshotObjectId id = next_id_;
+ next_id_ += kObjectIdStep;
+ entries_.Add(EntryInfo(id, addr, size, accessed));
+ ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
+ return id;
+}
+
+
+void HeapObjectsMap::StopHeapObjectsTracking() {
+ time_intervals_.Clear();
+}
+
+
+void HeapObjectsMap::UpdateHeapObjectsMap() {
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("Begin HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n",
+ entries_map_.occupancy());
+ }
+ heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "HeapSnapshotsCollection::UpdateHeapObjectsMap");
+ HeapIterator iterator(heap_);
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next()) {
+ FindOrAddEntry(obj->address(), obj->Size());
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("Update object : %p %6d. Next address is %p\n",
+ obj->address(),
+ obj->Size(),
+ obj->address() + obj->Size());
+ }
+ }
+ RemoveDeadEntries();
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("End HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n",
+ entries_map_.occupancy());
+ }
+}
+
+
+namespace {
+
+
+struct HeapObjectInfo {
+ HeapObjectInfo(HeapObject* obj, int expected_size)
+ : obj(obj),
+ expected_size(expected_size) {
+ }
+
+ HeapObject* obj;
+ int expected_size;
+
+ bool IsValid() const { return expected_size == obj->Size(); }
+
+ void Print() const {
+ if (expected_size == 0) {
+ PrintF("Untracked object : %p %6d. Next address is %p\n",
+ obj->address(),
+ obj->Size(),
+ obj->address() + obj->Size());
+ } else if (obj->Size() != expected_size) {
+ PrintF("Wrong size %6d: %p %6d. Next address is %p\n",
+ expected_size,
+ obj->address(),
+ obj->Size(),
+ obj->address() + obj->Size());
+ } else {
+ PrintF("Good object : %p %6d. Next address is %p\n",
+ obj->address(),
+ expected_size,
+ obj->address() + obj->Size());
+ }
+ }
+};
+
+
+static int comparator(const HeapObjectInfo* a, const HeapObjectInfo* b) {
+ if (a->obj < b->obj) return -1;
+ if (a->obj > b->obj) return 1;
+ return 0;
+}
+
+
+} // namespace
+
+
+int HeapObjectsMap::FindUntrackedObjects() {
+ List<HeapObjectInfo> heap_objects(1000);
+
+ HeapIterator iterator(heap_);
+ int untracked = 0;
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next()) {
+ HashMap::Entry* entry = entries_map_.Lookup(
+ obj->address(), ComputePointerHash(obj->address()), false);
+ if (entry == NULL) {
+ ++untracked;
+ if (FLAG_heap_profiler_trace_objects) {
+ heap_objects.Add(HeapObjectInfo(obj, 0));
+ }
+ } else {
+ int entry_index = static_cast<int>(
+ reinterpret_cast<intptr_t>(entry->value));
+ EntryInfo& entry_info = entries_.at(entry_index);
+ if (FLAG_heap_profiler_trace_objects) {
+ heap_objects.Add(HeapObjectInfo(obj,
+ static_cast<int>(entry_info.size)));
+ if (obj->Size() != static_cast<int>(entry_info.size))
+ ++untracked;
+ } else {
+ CHECK_EQ(obj->Size(), static_cast<int>(entry_info.size));
+ }
+ }
+ }
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("\nBegin HeapObjectsMap::FindUntrackedObjects. %d entries in map.\n",
+ entries_map_.occupancy());
+ heap_objects.Sort(comparator);
+ int last_printed_object = -1;
+ bool print_next_object = false;
+ for (int i = 0; i < heap_objects.length(); ++i) {
+ const HeapObjectInfo& object_info = heap_objects[i];
+ if (!object_info.IsValid()) {
+ ++untracked;
+ if (last_printed_object != i - 1) {
+ if (i > 0) {
+ PrintF("%d objects were skipped\n", i - 1 - last_printed_object);
+ heap_objects[i - 1].Print();
+ }
+ }
+ object_info.Print();
+ last_printed_object = i;
+ print_next_object = true;
+ } else if (print_next_object) {
+ object_info.Print();
+ print_next_object = false;
+ last_printed_object = i;
+ }
+ }
+ if (last_printed_object < heap_objects.length() - 1) {
+ PrintF("Last %d objects were skipped\n",
+ heap_objects.length() - 1 - last_printed_object);
+ }
+ PrintF("End HeapObjectsMap::FindUntrackedObjects. %d entries in map.\n\n",
+ entries_map_.occupancy());
+ }
+ return untracked;
+}
+
+
+SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
+ UpdateHeapObjectsMap();
+ time_intervals_.Add(TimeInterval(next_id_));
+ int prefered_chunk_size = stream->GetChunkSize();
+ List<v8::HeapStatsUpdate> stats_buffer;
+ ASSERT(!entries_.is_empty());
+ EntryInfo* entry_info = &entries_.first();
+ EntryInfo* end_entry_info = &entries_.last() + 1;
+ for (int time_interval_index = 0;
+ time_interval_index < time_intervals_.length();
+ ++time_interval_index) {
+ TimeInterval& time_interval = time_intervals_[time_interval_index];
+ SnapshotObjectId time_interval_id = time_interval.id;
+ uint32_t entries_size = 0;
+ EntryInfo* start_entry_info = entry_info;
+ while (entry_info < end_entry_info && entry_info->id < time_interval_id) {
+ entries_size += entry_info->size;
+ ++entry_info;
+ }
+ uint32_t entries_count =
+ static_cast<uint32_t>(entry_info - start_entry_info);
+ if (time_interval.count != entries_count ||
+ time_interval.size != entries_size) {
+ stats_buffer.Add(v8::HeapStatsUpdate(
+ time_interval_index,
+ time_interval.count = entries_count,
+ time_interval.size = entries_size));
+ if (stats_buffer.length() >= prefered_chunk_size) {
+ OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
+ &stats_buffer.first(), stats_buffer.length());
+ if (result == OutputStream::kAbort) return last_assigned_id();
+ stats_buffer.Clear();
+ }
+ }
+ }
+ ASSERT(entry_info == end_entry_info);
+ if (!stats_buffer.is_empty()) {
+ OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
+ &stats_buffer.first(), stats_buffer.length());
+ if (result == OutputStream::kAbort) return last_assigned_id();
+ }
+ stream->EndOfStream();
+ return last_assigned_id();
+}
+
+
+void HeapObjectsMap::RemoveDeadEntries() {
+ ASSERT(entries_.length() > 0 &&
+ entries_.at(0).id == 0 &&
+ entries_.at(0).addr == NULL);
+ int first_free_entry = 1;
+ for (int i = 1; i < entries_.length(); ++i) {
+ EntryInfo& entry_info = entries_.at(i);
+ if (entry_info.accessed) {
+ if (first_free_entry != i) {
+ entries_.at(first_free_entry) = entry_info;
+ }
+ entries_.at(first_free_entry).accessed = false;
+ HashMap::Entry* entry = entries_map_.Lookup(
+ entry_info.addr, ComputePointerHash(entry_info.addr), false);
+ ASSERT(entry);
+ entry->value = reinterpret_cast<void*>(first_free_entry);
+ ++first_free_entry;
+ } else {
+ if (entry_info.addr) {
+ entries_map_.Remove(entry_info.addr,
+ ComputePointerHash(entry_info.addr));
+ }
+ }
+ }
+ entries_.Rewind(first_free_entry);
+ ASSERT(static_cast<uint32_t>(entries_.length()) - 1 ==
+ entries_map_.occupancy());
+}
+
+
+SnapshotObjectId HeapObjectsMap::GenerateId(Heap* heap,
+ v8::RetainedObjectInfo* info) {
+ SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash());
+ const char* label = info->GetLabel();
+ id ^= StringHasher::HashSequentialString(label,
+ static_cast<int>(strlen(label)),
+ heap->HashSeed());
+ intptr_t element_count = info->GetElementCount();
+ if (element_count != -1)
+ id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count),
+ v8::internal::kZeroHashSeed);
+ return id << 1;
+}
+
+
+size_t HeapObjectsMap::GetUsedMemorySize() const {
+ return
+ sizeof(*this) +
+ sizeof(HashMap::Entry) * entries_map_.capacity() +
+ GetMemoryUsedByList(entries_) +
+ GetMemoryUsedByList(time_intervals_);
+}
+
+
+HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap)
+ : is_tracking_objects_(false),
+ names_(heap),
+ ids_(heap),
+ allocation_tracker_(NULL) {
+}
+
+
+static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
+ delete *snapshot_ptr;
+}
+
+
+HeapSnapshotsCollection::~HeapSnapshotsCollection() {
+ delete allocation_tracker_;
+ snapshots_.Iterate(DeleteHeapSnapshot);
+}
+
+
+void HeapSnapshotsCollection::StartHeapObjectsTracking() {
+ ids_.UpdateHeapObjectsMap();
+ if (allocation_tracker_ == NULL) {
+ allocation_tracker_ = new AllocationTracker(&ids_, names());
+ }
+ is_tracking_objects_ = true;
+}
+
+
+void HeapSnapshotsCollection::StopHeapObjectsTracking() {
+ ids_.StopHeapObjectsTracking();
+ if (allocation_tracker_ != NULL) {
+ delete allocation_tracker_;
+ allocation_tracker_ = NULL;
+ }
+}
+
+
+HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
+ unsigned uid) {
+ is_tracking_objects_ = true; // Start watching for heap objects moves.
+ return new HeapSnapshot(this, name, uid);
+}
+
+
+void HeapSnapshotsCollection::SnapshotGenerationFinished(
+ HeapSnapshot* snapshot) {
+ ids_.SnapshotGenerationFinished();
+ if (snapshot != NULL) {
+ snapshots_.Add(snapshot);
+ }
+}
+
+
+void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
+ snapshots_.RemoveElement(snapshot);
+}
+
+
+Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
+ SnapshotObjectId id) {
+ // First perform a full GC in order to avoid dead objects.
+ heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "HeapSnapshotsCollection::FindHeapObjectById");
+ DisallowHeapAllocation no_allocation;
+ HeapObject* object = NULL;
+ HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
+ // Make sure that object with the given id is still reachable.
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next()) {
+ if (ids_.FindEntry(obj->address()) == id) {
+ ASSERT(object == NULL);
+ object = obj;
+ // Can't break -- kFilterUnreachable requires full heap traversal.
+ }
+ }
+ return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>();
+}
+
+
+void HeapSnapshotsCollection::NewObjectEvent(Address addr, int size) {
+ DisallowHeapAllocation no_allocation;
+ ids_.NewObject(addr, size);
+ if (allocation_tracker_ != NULL) {
+ allocation_tracker_->NewObjectEvent(addr, size);
+ }
+}
+
+
+size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
+ size_t size = sizeof(*this);
+ size += names_.GetUsedMemorySize();
+ size += ids_.GetUsedMemorySize();
+ size += GetMemoryUsedByList(snapshots_);
+ for (int i = 0; i < snapshots_.length(); ++i) {
+ size += snapshots_[i]->RawSnapshotSize();
+ }
+ return size;
+}
+
+
+HeapEntriesMap::HeapEntriesMap()
+ : entries_(HeapThingsMatch) {
+}
+
+
+int HeapEntriesMap::Map(HeapThing thing) {
+ HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), false);
+ if (cache_entry == NULL) return HeapEntry::kNoEntry;
+ return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
+}
+
+
+void HeapEntriesMap::Pair(HeapThing thing, int entry) {
+ HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true);
+ ASSERT(cache_entry->value == NULL);
+ cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
+}
+
+
+HeapObjectsSet::HeapObjectsSet()
+ : entries_(HeapEntriesMap::HeapThingsMatch) {
+}
+
+
+void HeapObjectsSet::Clear() {
+ entries_.Clear();
+}
+
+
+bool HeapObjectsSet::Contains(Object* obj) {
+ if (!obj->IsHeapObject()) return false;
+ HeapObject* object = HeapObject::cast(obj);
+ return entries_.Lookup(object, HeapEntriesMap::Hash(object), false) != NULL;
+}
+
+
+void HeapObjectsSet::Insert(Object* obj) {
+ if (!obj->IsHeapObject()) return;
+ HeapObject* object = HeapObject::cast(obj);
+ entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
+}
+
+
+const char* HeapObjectsSet::GetTag(Object* obj) {
+ HeapObject* object = HeapObject::cast(obj);
+ HashMap::Entry* cache_entry =
+ entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
+ return cache_entry != NULL
+ ? reinterpret_cast<const char*>(cache_entry->value)
+ : NULL;
+}
+
+
+void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
+ if (!obj->IsHeapObject()) return;
+ HeapObject* object = HeapObject::cast(obj);
+ HashMap::Entry* cache_entry =
+ entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
+ cache_entry->value = const_cast<char*>(tag);
+}
+
+
+HeapObject* const V8HeapExplorer::kInternalRootObject =
+ reinterpret_cast<HeapObject*>(
+ static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
+HeapObject* const V8HeapExplorer::kGcRootsObject =
+ reinterpret_cast<HeapObject*>(
+ static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
+HeapObject* const V8HeapExplorer::kFirstGcSubrootObject =
+ reinterpret_cast<HeapObject*>(
+ static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
+HeapObject* const V8HeapExplorer::kLastGcSubrootObject =
+ reinterpret_cast<HeapObject*>(
+ static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
+
+
+V8HeapExplorer::V8HeapExplorer(
+ HeapSnapshot* snapshot,
+ SnapshottingProgressReportingInterface* progress,
+ v8::HeapProfiler::ObjectNameResolver* resolver)
+ : heap_(snapshot->collection()->heap()),
+ snapshot_(snapshot),
+ collection_(snapshot_->collection()),
+ progress_(progress),
+ filler_(NULL),
+ global_object_name_resolver_(resolver) {
+}
+
+
+V8HeapExplorer::~V8HeapExplorer() {
+}
+
+
+HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) {
+ return AddEntry(reinterpret_cast<HeapObject*>(ptr));
+}
+
+
+HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
+ if (object == kInternalRootObject) {
+ snapshot_->AddRootEntry();
+ return snapshot_->root();
+ } else if (object == kGcRootsObject) {
+ HeapEntry* entry = snapshot_->AddGcRootsEntry();
+ return entry;
+ } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
+ HeapEntry* entry = snapshot_->AddGcSubrootEntry(GetGcSubrootOrder(object));
+ return entry;
+ } else if (object->IsJSFunction()) {
+ JSFunction* func = JSFunction::cast(object);
+ SharedFunctionInfo* shared = func->shared();
+ const char* name = shared->bound() ? "native_bind" :
+ collection_->names()->GetName(String::cast(shared->name()));
+ return AddEntry(object, HeapEntry::kClosure, name);
+ } else if (object->IsJSRegExp()) {
+ JSRegExp* re = JSRegExp::cast(object);
+ return AddEntry(object,
+ HeapEntry::kRegExp,
+ collection_->names()->GetName(re->Pattern()));
+ } else if (object->IsJSObject()) {
+ const char* name = collection_->names()->GetName(
+ GetConstructorName(JSObject::cast(object)));
+ if (object->IsJSGlobalObject()) {
+ const char* tag = objects_tags_.GetTag(object);
+ if (tag != NULL) {
+ name = collection_->names()->GetFormatted("%s / %s", name, tag);
+ }
+ }
+ return AddEntry(object, HeapEntry::kObject, name);
+ } else if (object->IsString()) {
+ String* string = String::cast(object);
+ if (string->IsConsString())
+ return AddEntry(object,
+ HeapEntry::kConsString,
+ "(concatenated string)");
+ if (string->IsSlicedString())
+ return AddEntry(object,
+ HeapEntry::kSlicedString,
+ "(sliced string)");
+ return AddEntry(object,
+ HeapEntry::kString,
+ collection_->names()->GetName(String::cast(object)));
+ } else if (object->IsCode()) {
+ return AddEntry(object, HeapEntry::kCode, "");
+ } else if (object->IsSharedFunctionInfo()) {
+ String* name = String::cast(SharedFunctionInfo::cast(object)->name());
+ return AddEntry(object,
+ HeapEntry::kCode,
+ collection_->names()->GetName(name));
+ } else if (object->IsScript()) {
+ Object* name = Script::cast(object)->name();
+ return AddEntry(object,
+ HeapEntry::kCode,
+ name->IsString()
+ ? collection_->names()->GetName(String::cast(name))
+ : "");
+ } else if (object->IsNativeContext()) {
+ return AddEntry(object, HeapEntry::kHidden, "system / NativeContext");
+ } else if (object->IsContext()) {
+ return AddEntry(object, HeapEntry::kObject, "system / Context");
+ } else if (object->IsFixedArray() ||
+ object->IsFixedDoubleArray() ||
+ object->IsByteArray() ||
+ object->IsExternalArray()) {
+ return AddEntry(object, HeapEntry::kArray, "");
+ } else if (object->IsHeapNumber()) {
+ return AddEntry(object, HeapEntry::kHeapNumber, "number");
+ }
+ return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
+}
+
+
+HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
+ HeapEntry::Type type,
+ const char* name) {
+ int object_size = object->Size();
+ SnapshotObjectId object_id =
+ collection_->GetObjectId(object->address(), object_size);
+ return snapshot_->AddEntry(type, name, object_id, object_size);
+}
+
+
+class GcSubrootsEnumerator : public ObjectVisitor {
+ public:
+ GcSubrootsEnumerator(
+ SnapshotFillerInterface* filler, V8HeapExplorer* explorer)
+ : filler_(filler),
+ explorer_(explorer),
+ previous_object_count_(0),
+ object_count_(0) {
+ }
+ void VisitPointers(Object** start, Object** end) {
+ object_count_ += end - start;
+ }
+ void Synchronize(VisitorSynchronization::SyncTag tag) {
+ // Skip empty subroots.
+ if (previous_object_count_ != object_count_) {
+ previous_object_count_ = object_count_;
+ filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_);
+ }
+ }
+ private:
+ SnapshotFillerInterface* filler_;
+ V8HeapExplorer* explorer_;
+ intptr_t previous_object_count_;
+ intptr_t object_count_;
+};
+
+
+void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
+ filler->AddEntry(kInternalRootObject, this);
+ filler->AddEntry(kGcRootsObject, this);
+ GcSubrootsEnumerator enumerator(filler, this);
+ heap_->IterateRoots(&enumerator, VISIT_ALL);
+}
+
+
+const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
+ switch (object->map()->instance_type()) {
+ case MAP_TYPE:
+ switch (Map::cast(object)->instance_type()) {
+#define MAKE_STRING_MAP_CASE(instance_type, size, name, Name) \
+ case instance_type: return "system / Map (" #Name ")";
+ STRING_TYPE_LIST(MAKE_STRING_MAP_CASE)
+#undef MAKE_STRING_MAP_CASE
+ default: return "system / Map";
+ }
+ case CELL_TYPE: return "system / Cell";
+ case PROPERTY_CELL_TYPE: return "system / PropertyCell";
+ case FOREIGN_TYPE: return "system / Foreign";
+ case ODDBALL_TYPE: return "system / Oddball";
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+ case NAME##_TYPE: return "system / "#Name;
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ default: return "system";
+ }
+}
+
+
+int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
+ int objects_count = 0;
+ for (HeapObject* obj = iterator->next();
+ obj != NULL;
+ obj = iterator->next()) {
+ objects_count++;
+ }
+ return objects_count;
+}
+
+
+class IndexedReferencesExtractor : public ObjectVisitor {
+ public:
+ IndexedReferencesExtractor(V8HeapExplorer* generator,
+ HeapObject* parent_obj,
+ int parent)
+ : generator_(generator),
+ parent_obj_(parent_obj),
+ parent_(parent),
+ next_index_(0) {
+ }
+ void VisitCodeEntry(Address entry_address) {
+ Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+ generator_->SetInternalReference(parent_obj_, parent_, "code", code);
+ generator_->TagObject(code, "(code)");
+ }
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) {
+ if (CheckVisitedAndUnmark(p)) continue;
+ generator_->SetHiddenReference(parent_obj_, parent_, next_index_++, *p);
+ }
+ }
+ static void MarkVisitedField(HeapObject* obj, int offset) {
+ if (offset < 0) return;
+ Address field = obj->address() + offset;
+ ASSERT(!Memory::Object_at(field)->IsFailure());
+ ASSERT(Memory::Object_at(field)->IsHeapObject());
+ *field |= kFailureTag;
+ }
+
+ private:
+ bool CheckVisitedAndUnmark(Object** field) {
+ if ((*field)->IsFailure()) {
+ intptr_t untagged = reinterpret_cast<intptr_t>(*field) & ~kFailureTagMask;
+ *field = reinterpret_cast<Object*>(untagged | kHeapObjectTag);
+ ASSERT((*field)->IsHeapObject());
+ return true;
+ }
+ return false;
+ }
+ V8HeapExplorer* generator_;
+ HeapObject* parent_obj_;
+ int parent_;
+ int next_index_;
+};
+
+
+void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
+ HeapEntry* heap_entry = GetEntry(obj);
+ if (heap_entry == NULL) return; // No interest in this object.
+ int entry = heap_entry->index();
+
+ if (obj->IsJSGlobalProxy()) {
+ ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
+ } else if (obj->IsJSObject()) {
+ ExtractJSObjectReferences(entry, JSObject::cast(obj));
+ } else if (obj->IsString()) {
+ ExtractStringReferences(entry, String::cast(obj));
+ } else if (obj->IsContext()) {
+ ExtractContextReferences(entry, Context::cast(obj));
+ } else if (obj->IsMap()) {
+ ExtractMapReferences(entry, Map::cast(obj));
+ } else if (obj->IsSharedFunctionInfo()) {
+ ExtractSharedFunctionInfoReferences(entry, SharedFunctionInfo::cast(obj));
+ } else if (obj->IsScript()) {
+ ExtractScriptReferences(entry, Script::cast(obj));
+ } else if (obj->IsAccessorPair()) {
+ ExtractAccessorPairReferences(entry, AccessorPair::cast(obj));
+ } else if (obj->IsCodeCache()) {
+ ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
+ } else if (obj->IsCode()) {
+ ExtractCodeReferences(entry, Code::cast(obj));
+ } else if (obj->IsCell()) {
+ ExtractCellReferences(entry, Cell::cast(obj));
+ } else if (obj->IsPropertyCell()) {
+ ExtractPropertyCellReferences(entry, PropertyCell::cast(obj));
+ } else if (obj->IsAllocationSite()) {
+ ExtractAllocationSiteReferences(entry, AllocationSite::cast(obj));
+ }
+ SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+
+ // Extract unvisited fields as hidden references and restore tags
+ // of visited fields.
+ IndexedReferencesExtractor refs_extractor(this, obj, entry);
+ obj->Iterate(&refs_extractor);
+}
+
+
+void V8HeapExplorer::ExtractJSGlobalProxyReferences(
+ int entry, JSGlobalProxy* proxy) {
+ SetInternalReference(proxy, entry,
+ "native_context", proxy->native_context(),
+ JSGlobalProxy::kNativeContextOffset);
+}
+
+
+void V8HeapExplorer::ExtractJSObjectReferences(
+ int entry, JSObject* js_obj) {
+ HeapObject* obj = js_obj;
+ ExtractClosureReferences(js_obj, entry);
+ ExtractPropertyReferences(js_obj, entry);
+ ExtractElementReferences(js_obj, entry);
+ ExtractInternalReferences(js_obj, entry);
+ SetPropertyReference(
+ obj, entry, heap_->proto_string(), js_obj->GetPrototype());
+ if (obj->IsJSFunction()) {
+ JSFunction* js_fun = JSFunction::cast(js_obj);
+ Object* proto_or_map = js_fun->prototype_or_initial_map();
+ if (!proto_or_map->IsTheHole()) {
+ if (!proto_or_map->IsMap()) {
+ SetPropertyReference(
+ obj, entry,
+ heap_->prototype_string(), proto_or_map,
+ NULL,
+ JSFunction::kPrototypeOrInitialMapOffset);
+ } else {
+ SetPropertyReference(
+ obj, entry,
+ heap_->prototype_string(), js_fun->prototype());
+ SetInternalReference(
+ obj, entry, "initial_map", proto_or_map,
+ JSFunction::kPrototypeOrInitialMapOffset);
+ }
+ }
+ SharedFunctionInfo* shared_info = js_fun->shared();
+ // JSFunction has either bindings or literals and never both.
+ bool bound = shared_info->bound();
+ TagObject(js_fun->literals_or_bindings(),
+ bound ? "(function bindings)" : "(function literals)");
+ SetInternalReference(js_fun, entry,
+ bound ? "bindings" : "literals",
+ js_fun->literals_or_bindings(),
+ JSFunction::kLiteralsOffset);
+ TagObject(shared_info, "(shared function info)");
+ SetInternalReference(js_fun, entry,
+ "shared", shared_info,
+ JSFunction::kSharedFunctionInfoOffset);
+ TagObject(js_fun->context(), "(context)");
+ SetInternalReference(js_fun, entry,
+ "context", js_fun->context(),
+ JSFunction::kContextOffset);
+ for (int i = JSFunction::kNonWeakFieldsEndOffset;
+ i < JSFunction::kSize;
+ i += kPointerSize) {
+ SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
+ }
+ } else if (obj->IsGlobalObject()) {
+ GlobalObject* global_obj = GlobalObject::cast(obj);
+ SetInternalReference(global_obj, entry,
+ "builtins", global_obj->builtins(),
+ GlobalObject::kBuiltinsOffset);
+ SetInternalReference(global_obj, entry,
+ "native_context", global_obj->native_context(),
+ GlobalObject::kNativeContextOffset);
+ SetInternalReference(global_obj, entry,
+ "global_receiver", global_obj->global_receiver(),
+ GlobalObject::kGlobalReceiverOffset);
+ }
+ TagObject(js_obj->properties(), "(object properties)");
+ SetInternalReference(obj, entry,
+ "properties", js_obj->properties(),
+ JSObject::kPropertiesOffset);
+ TagObject(js_obj->elements(), "(object elements)");
+ SetInternalReference(obj, entry,
+ "elements", js_obj->elements(),
+ JSObject::kElementsOffset);
+}
+
+
+void V8HeapExplorer::ExtractStringReferences(int entry, String* string) {
+ if (string->IsConsString()) {
+ ConsString* cs = ConsString::cast(string);
+ SetInternalReference(cs, entry, "first", cs->first(),
+ ConsString::kFirstOffset);
+ SetInternalReference(cs, entry, "second", cs->second(),
+ ConsString::kSecondOffset);
+ } else if (string->IsSlicedString()) {
+ SlicedString* ss = SlicedString::cast(string);
+ SetInternalReference(ss, entry, "parent", ss->parent(),
+ SlicedString::kParentOffset);
+ }
+}
+
+
+void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
+ if (context == context->declaration_context()) {
+ ScopeInfo* scope_info = context->closure()->shared()->scope_info();
+ // Add context allocated locals.
+ int context_locals = scope_info->ContextLocalCount();
+ for (int i = 0; i < context_locals; ++i) {
+ String* local_name = scope_info->ContextLocalName(i);
+ int idx = Context::MIN_CONTEXT_SLOTS + i;
+ SetContextReference(context, entry, local_name, context->get(idx),
+ Context::OffsetOfElementAt(idx));
+ }
+ if (scope_info->HasFunctionName()) {
+ String* name = scope_info->FunctionName();
+ VariableMode mode;
+ int idx = scope_info->FunctionContextSlotIndex(name, &mode);
+ if (idx >= 0) {
+ SetContextReference(context, entry, name, context->get(idx),
+ Context::OffsetOfElementAt(idx));
+ }
+ }
+ }
+
+#define EXTRACT_CONTEXT_FIELD(index, type, name) \
+ SetInternalReference(context, entry, #name, context->get(Context::index), \
+ FixedArray::OffsetOfElementAt(Context::index));
+ EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure);
+ EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
+ EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
+ EXTRACT_CONTEXT_FIELD(GLOBAL_OBJECT_INDEX, GlobalObject, global);
+ if (context->IsNativeContext()) {
+ TagObject(context->jsfunction_result_caches(),
+ "(context func. result caches)");
+ TagObject(context->normalized_map_cache(), "(context norm. map cache)");
+ TagObject(context->runtime_context(), "(runtime context)");
+ TagObject(context->embedder_data(), "(context data)");
+ NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
+#undef EXTRACT_CONTEXT_FIELD
+ for (int i = Context::FIRST_WEAK_SLOT;
+ i < Context::NATIVE_CONTEXT_SLOTS;
+ ++i) {
+ SetWeakReference(context, entry, i, context->get(i),
+ FixedArray::OffsetOfElementAt(i));
+ }
+ }
+}
+
+
+void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
+ if (map->HasTransitionArray()) {
+ TransitionArray* transitions = map->transitions();
+ int transitions_entry = GetEntry(transitions)->index();
+ Object* back_pointer = transitions->back_pointer_storage();
+ TagObject(back_pointer, "(back pointer)");
+ SetInternalReference(transitions, transitions_entry,
+ "back_pointer", back_pointer);
+ TagObject(transitions, "(transition array)");
+ SetInternalReference(map, entry,
+ "transitions", transitions,
+ Map::kTransitionsOrBackPointerOffset);
+ } else {
+ Object* back_pointer = map->GetBackPointer();
+ TagObject(back_pointer, "(back pointer)");
+ SetInternalReference(map, entry,
+ "back_pointer", back_pointer,
+ Map::kTransitionsOrBackPointerOffset);
+ }
+ DescriptorArray* descriptors = map->instance_descriptors();
+ TagObject(descriptors, "(map descriptors)");
+ SetInternalReference(map, entry,
+ "descriptors", descriptors,
+ Map::kDescriptorsOffset);
+
+ SetInternalReference(map, entry,
+ "code_cache", map->code_cache(),
+ Map::kCodeCacheOffset);
+ SetInternalReference(map, entry,
+ "prototype", map->prototype(), Map::kPrototypeOffset);
+ SetInternalReference(map, entry,
+ "constructor", map->constructor(),
+ Map::kConstructorOffset);
+ TagObject(map->dependent_code(), "(dependent code)");
+ SetInternalReference(map, entry,
+ "dependent_code", map->dependent_code(),
+ Map::kDependentCodeOffset);
+}
+
+
+void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
+ int entry, SharedFunctionInfo* shared) {
+ HeapObject* obj = shared;
+ SetInternalReference(obj, entry,
+ "name", shared->name(),
+ SharedFunctionInfo::kNameOffset);
+ TagObject(shared->code(), "(code)");
+ SetInternalReference(obj, entry,
+ "code", shared->code(),
+ SharedFunctionInfo::kCodeOffset);
+ TagObject(shared->scope_info(), "(function scope info)");
+ SetInternalReference(obj, entry,
+ "scope_info", shared->scope_info(),
+ SharedFunctionInfo::kScopeInfoOffset);
+ SetInternalReference(obj, entry,
+ "instance_class_name", shared->instance_class_name(),
+ SharedFunctionInfo::kInstanceClassNameOffset);
+ SetInternalReference(obj, entry,
+ "script", shared->script(),
+ SharedFunctionInfo::kScriptOffset);
+ TagObject(shared->construct_stub(), "(code)");
+ SetInternalReference(obj, entry,
+ "construct_stub", shared->construct_stub(),
+ SharedFunctionInfo::kConstructStubOffset);
+ SetInternalReference(obj, entry,
+ "function_data", shared->function_data(),
+ SharedFunctionInfo::kFunctionDataOffset);
+ SetInternalReference(obj, entry,
+ "debug_info", shared->debug_info(),
+ SharedFunctionInfo::kDebugInfoOffset);
+ SetInternalReference(obj, entry,
+ "inferred_name", shared->inferred_name(),
+ SharedFunctionInfo::kInferredNameOffset);
+ SetWeakReference(obj, entry,
+ 1, shared->initial_map(),
+ SharedFunctionInfo::kInitialMapOffset);
+}
+
+
+void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
+ HeapObject* obj = script;
+ SetInternalReference(obj, entry,
+ "source", script->source(),
+ Script::kSourceOffset);
+ SetInternalReference(obj, entry,
+ "name", script->name(),
+ Script::kNameOffset);
+ SetInternalReference(obj, entry,
+ "data", script->data(),
+ Script::kDataOffset);
+ SetInternalReference(obj, entry,
+ "context_data", script->context_data(),
+ Script::kContextOffset);
+ TagObject(script->line_ends(), "(script line ends)");
+ SetInternalReference(obj, entry,
+ "line_ends", script->line_ends(),
+ Script::kLineEndsOffset);
+}
+
+
+void V8HeapExplorer::ExtractAccessorPairReferences(
+ int entry, AccessorPair* accessors) {
+ SetInternalReference(accessors, entry, "getter", accessors->getter(),
+ AccessorPair::kGetterOffset);
+ SetInternalReference(accessors, entry, "setter", accessors->setter(),
+ AccessorPair::kSetterOffset);
+}
+
+
+void V8HeapExplorer::ExtractCodeCacheReferences(
+ int entry, CodeCache* code_cache) {
+ TagObject(code_cache->default_cache(), "(default code cache)");
+ SetInternalReference(code_cache, entry,
+ "default_cache", code_cache->default_cache(),
+ CodeCache::kDefaultCacheOffset);
+ TagObject(code_cache->normal_type_cache(), "(code type cache)");
+ SetInternalReference(code_cache, entry,
+ "type_cache", code_cache->normal_type_cache(),
+ CodeCache::kNormalTypeCacheOffset);
+}
+
+
+void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
+ TagObject(code->relocation_info(), "(code relocation info)");
+ SetInternalReference(code, entry,
+ "relocation_info", code->relocation_info(),
+ Code::kRelocationInfoOffset);
+ SetInternalReference(code, entry,
+ "handler_table", code->handler_table(),
+ Code::kHandlerTableOffset);
+ TagObject(code->deoptimization_data(), "(code deopt data)");
+ SetInternalReference(code, entry,
+ "deoptimization_data", code->deoptimization_data(),
+ Code::kDeoptimizationDataOffset);
+ if (code->kind() == Code::FUNCTION) {
+ SetInternalReference(code, entry,
+ "type_feedback_info", code->type_feedback_info(),
+ Code::kTypeFeedbackInfoOffset);
+ }
+ SetInternalReference(code, entry,
+ "gc_metadata", code->gc_metadata(),
+ Code::kGCMetadataOffset);
+}
+
+
+void V8HeapExplorer::ExtractCellReferences(int entry, Cell* cell) {
+ SetInternalReference(cell, entry, "value", cell->value(), Cell::kValueOffset);
+}
+
+
+void V8HeapExplorer::ExtractPropertyCellReferences(int entry,
+ PropertyCell* cell) {
+ ExtractCellReferences(entry, cell);
+ SetInternalReference(cell, entry, "type", cell->type(),
+ PropertyCell::kTypeOffset);
+ SetInternalReference(cell, entry, "dependent_code", cell->dependent_code(),
+ PropertyCell::kDependentCodeOffset);
+}
+
+
+void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
+ AllocationSite* site) {
+ SetInternalReference(site, entry, "transition_info", site->transition_info(),
+ AllocationSite::kTransitionInfoOffset);
+ SetInternalReference(site, entry, "nested_site", site->nested_site(),
+ AllocationSite::kNestedSiteOffset);
+ SetInternalReference(site, entry, "dependent_code", site->dependent_code(),
+ AllocationSite::kDependentCodeOffset);
+}
+
+
+void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
+ if (!js_obj->IsJSFunction()) return;
+
+ JSFunction* func = JSFunction::cast(js_obj);
+ if (func->shared()->bound()) {
+ FixedArray* bindings = func->function_bindings();
+ SetNativeBindReference(js_obj, entry, "bound_this",
+ bindings->get(JSFunction::kBoundThisIndex));
+ SetNativeBindReference(js_obj, entry, "bound_function",
+ bindings->get(JSFunction::kBoundFunctionIndex));
+ for (int i = JSFunction::kBoundArgumentsStartIndex;
+ i < bindings->length(); i++) {
+ const char* reference_name = collection_->names()->GetFormatted(
+ "bound_argument_%d",
+ i - JSFunction::kBoundArgumentsStartIndex);
+ SetNativeBindReference(js_obj, entry, reference_name,
+ bindings->get(i));
+ }
+ }
+}
+
+
+void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
+ if (js_obj->HasFastProperties()) {
+ DescriptorArray* descs = js_obj->map()->instance_descriptors();
+ int real_size = js_obj->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < real_size; i++) {
+ switch (descs->GetType(i)) {
+ case FIELD: {
+ int index = descs->GetFieldIndex(i);
+
+ Name* k = descs->GetKey(i);
+ if (index < js_obj->map()->inobject_properties()) {
+ Object* value = js_obj->InObjectPropertyAt(index);
+ if (k != heap_->hidden_string()) {
+ SetPropertyReference(
+ js_obj, entry,
+ k, value,
+ NULL,
+ js_obj->GetInObjectPropertyOffset(index));
+ } else {
+ TagObject(value, "(hidden properties)");
+ SetInternalReference(
+ js_obj, entry,
+ "hidden_properties", value,
+ js_obj->GetInObjectPropertyOffset(index));
+ }
+ } else {
+ Object* value = js_obj->RawFastPropertyAt(index);
+ if (k != heap_->hidden_string()) {
+ SetPropertyReference(js_obj, entry, k, value);
+ } else {
+ TagObject(value, "(hidden properties)");
+ SetInternalReference(js_obj, entry, "hidden_properties", value);
+ }
+ }
+ break;
+ }
+ case CONSTANT:
+ SetPropertyReference(
+ js_obj, entry,
+ descs->GetKey(i), descs->GetConstant(i));
+ break;
+ case CALLBACKS:
+ ExtractAccessorPairProperty(
+ js_obj, entry,
+ descs->GetKey(i), descs->GetValue(i));
+ break;
+ case NORMAL: // only in slow mode
+ case HANDLER: // only in lookup results, not in descriptors
+ case INTERCEPTOR: // only in lookup results, not in descriptors
+ break;
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else {
+ NameDictionary* dictionary = js_obj->property_dictionary();
+ int length = dictionary->Capacity();
+ for (int i = 0; i < length; ++i) {
+ Object* k = dictionary->KeyAt(i);
+ if (dictionary->IsKey(k)) {
+ Object* target = dictionary->ValueAt(i);
+ // We assume that global objects can only have slow properties.
+ Object* value = target->IsPropertyCell()
+ ? PropertyCell::cast(target)->value()
+ : target;
+ if (k == heap_->hidden_string()) {
+ TagObject(value, "(hidden properties)");
+ SetInternalReference(js_obj, entry, "hidden_properties", value);
+ continue;
+ }
+ if (ExtractAccessorPairProperty(js_obj, entry, k, value)) continue;
+ SetPropertyReference(js_obj, entry, String::cast(k), value);
+ }
+ }
+ }
+}
+
+
+bool V8HeapExplorer::ExtractAccessorPairProperty(
+ JSObject* js_obj, int entry, Object* key, Object* callback_obj) {
+ if (!callback_obj->IsAccessorPair()) return false;
+ AccessorPair* accessors = AccessorPair::cast(callback_obj);
+ Object* getter = accessors->getter();
+ if (!getter->IsOddball()) {
+ SetPropertyReference(js_obj, entry, String::cast(key), getter, "get %s");
+ }
+ Object* setter = accessors->setter();
+ if (!setter->IsOddball()) {
+ SetPropertyReference(js_obj, entry, String::cast(key), setter, "set %s");
+ }
+ return true;
+}
+
+
+void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
+ if (js_obj->HasFastObjectElements()) {
+ FixedArray* elements = FixedArray::cast(js_obj->elements());
+ int length = js_obj->IsJSArray() ?
+ Smi::cast(JSArray::cast(js_obj)->length())->value() :
+ elements->length();
+ for (int i = 0; i < length; ++i) {
+ if (!elements->get(i)->IsTheHole()) {
+ SetElementReference(js_obj, entry, i, elements->get(i));
+ }
+ }
+ } else if (js_obj->HasDictionaryElements()) {
+ SeededNumberDictionary* dictionary = js_obj->element_dictionary();
+ int length = dictionary->Capacity();
+ for (int i = 0; i < length; ++i) {
+ Object* k = dictionary->KeyAt(i);
+ if (dictionary->IsKey(k)) {
+ ASSERT(k->IsNumber());
+ uint32_t index = static_cast<uint32_t>(k->Number());
+ SetElementReference(js_obj, entry, index, dictionary->ValueAt(i));
+ }
+ }
+ }
+}
+
+
+void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) {
+ int length = js_obj->GetInternalFieldCount();
+ for (int i = 0; i < length; ++i) {
+ Object* o = js_obj->GetInternalField(i);
+ SetInternalReference(
+ js_obj, entry, i, o, js_obj->GetInternalFieldOffset(i));
+ }
+}
+
+
+String* V8HeapExplorer::GetConstructorName(JSObject* object) {
+ Heap* heap = object->GetHeap();
+ if (object->IsJSFunction()) return heap->closure_string();
+ String* constructor_name = object->constructor_name();
+ if (constructor_name == heap->Object_string()) {
+ // Look up an immediate "constructor" property, if it is a function,
+ // return its name. This is for instances of binding objects, which
+ // have prototype constructor type "Object".
+ Object* constructor_prop = NULL;
+ LookupResult result(heap->isolate());
+ object->LocalLookupRealNamedProperty(heap->constructor_string(), &result);
+ if (!result.IsFound()) return object->constructor_name();
+
+ constructor_prop = result.GetLazyValue();
+ if (constructor_prop->IsJSFunction()) {
+ Object* maybe_name =
+ JSFunction::cast(constructor_prop)->shared()->name();
+ if (maybe_name->IsString()) {
+ String* name = String::cast(maybe_name);
+ if (name->length() > 0) return name;
+ }
+ }
+ }
+ return object->constructor_name();
+}
+
+
+HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
+ if (!obj->IsHeapObject()) return NULL;
+ return filler_->FindOrAddEntry(obj, this);
+}
+
+
+class RootsReferencesExtractor : public ObjectVisitor {
+ private:
+ struct IndexTag {
+ IndexTag(int index, VisitorSynchronization::SyncTag tag)
+ : index(index), tag(tag) { }
+ int index;
+ VisitorSynchronization::SyncTag tag;
+ };
+
+ public:
+ RootsReferencesExtractor()
+ : collecting_all_references_(false),
+ previous_reference_count_(0) {
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ if (collecting_all_references_) {
+ for (Object** p = start; p < end; p++) all_references_.Add(*p);
+ } else {
+ for (Object** p = start; p < end; p++) strong_references_.Add(*p);
+ }
+ }
+
+ void SetCollectingAllReferences() { collecting_all_references_ = true; }
+
+ void FillReferences(V8HeapExplorer* explorer) {
+ ASSERT(strong_references_.length() <= all_references_.length());
+ for (int i = 0; i < reference_tags_.length(); ++i) {
+ explorer->SetGcRootsReference(reference_tags_[i].tag);
+ }
+ int strong_index = 0, all_index = 0, tags_index = 0;
+ while (all_index < all_references_.length()) {
+ if (strong_index < strong_references_.length() &&
+ strong_references_[strong_index] == all_references_[all_index]) {
+ explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
+ false,
+ all_references_[all_index++]);
+ ++strong_index;
+ } else {
+ explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
+ true,
+ all_references_[all_index++]);
+ }
+ if (reference_tags_[tags_index].index == all_index) ++tags_index;
+ }
+ }
+
+ void Synchronize(VisitorSynchronization::SyncTag tag) {
+ if (collecting_all_references_ &&
+ previous_reference_count_ != all_references_.length()) {
+ previous_reference_count_ = all_references_.length();
+ reference_tags_.Add(IndexTag(previous_reference_count_, tag));
+ }
+ }
+
+ private:
+ bool collecting_all_references_;
+ List<Object*> strong_references_;
+ List<Object*> all_references_;
+ int previous_reference_count_;
+ List<IndexTag> reference_tags_;
+};
+
+
+bool V8HeapExplorer::IterateAndExtractReferences(
+ SnapshotFillerInterface* filler) {
+ HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
+
+ filler_ = filler;
+ bool interrupted = false;
+
+ // Heap iteration with filtering must be finished in any case.
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next(), progress_->ProgressStep()) {
+ if (!interrupted) {
+ ExtractReferences(obj);
+ if (!progress_->ProgressReport(false)) interrupted = true;
+ }
+ }
+ if (interrupted) {
+ filler_ = NULL;
+ return false;
+ }
+
+ SetRootGcRootsReference();
+ RootsReferencesExtractor extractor;
+ heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
+ extractor.SetCollectingAllReferences();
+ heap_->IterateRoots(&extractor, VISIT_ALL);
+ extractor.FillReferences(this);
+ filler_ = NULL;
+ return progress_->ProgressReport(true);
+}
+
+
+bool V8HeapExplorer::IsEssentialObject(Object* object) {
+ return object->IsHeapObject()
+ && !object->IsOddball()
+ && object != heap_->empty_byte_array()
+ && object != heap_->empty_fixed_array()
+ && object != heap_->empty_descriptor_array()
+ && object != heap_->fixed_array_map()
+ && object != heap_->cell_map()
+ && object != heap_->global_property_cell_map()
+ && object != heap_->shared_function_info_map()
+ && object != heap_->free_space_map()
+ && object != heap_->one_pointer_filler_map()
+ && object != heap_->two_pointer_filler_map();
+}
+
+
+void V8HeapExplorer::SetContextReference(HeapObject* parent_obj,
+ int parent_entry,
+ String* reference_name,
+ Object* child_obj,
+ int field_offset) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
+ parent_entry,
+ collection_->names()->GetName(reference_name),
+ child_entry);
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ }
+}
+
+
+void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
+ int parent_entry,
+ const char* reference_name,
+ Object* child_obj) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ filler_->SetNamedReference(HeapGraphEdge::kShortcut,
+ parent_entry,
+ reference_name,
+ child_entry);
+ }
+}
+
+
+void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
+ int parent_entry,
+ int index,
+ Object* child_obj) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ filler_->SetIndexedReference(HeapGraphEdge::kElement,
+ parent_entry,
+ index,
+ child_entry);
+ }
+}
+
+
+void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
+ int parent_entry,
+ const char* reference_name,
+ Object* child_obj,
+ int field_offset) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry == NULL) return;
+ if (IsEssentialObject(child_obj)) {
+ filler_->SetNamedReference(HeapGraphEdge::kInternal,
+ parent_entry,
+ reference_name,
+ child_entry);
+ }
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+}
+
+
+void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
+ int parent_entry,
+ int index,
+ Object* child_obj,
+ int field_offset) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry == NULL) return;
+ if (IsEssentialObject(child_obj)) {
+ filler_->SetNamedReference(HeapGraphEdge::kInternal,
+ parent_entry,
+ collection_->names()->GetName(index),
+ child_entry);
+ }
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+}
+
+
+void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
+ int parent_entry,
+ int index,
+ Object* child_obj) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL && IsEssentialObject(child_obj)) {
+ filler_->SetIndexedReference(HeapGraphEdge::kHidden,
+ parent_entry,
+ index,
+ child_entry);
+ }
+}
+
+
+void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
+ int parent_entry,
+ int index,
+ Object* child_obj,
+ int field_offset) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry == NULL) return;
+ if (IsEssentialObject(child_obj)) {
+ filler_->SetIndexedReference(HeapGraphEdge::kWeak,
+ parent_entry,
+ index,
+ child_entry);
+ }
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+}
+
+
+void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
+ int parent_entry,
+ Name* reference_name,
+ Object* child_obj,
+ const char* name_format_string,
+ int field_offset) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ HeapGraphEdge::Type type =
+ reference_name->IsSymbol() || String::cast(reference_name)->length() > 0
+ ? HeapGraphEdge::kProperty : HeapGraphEdge::kInternal;
+ const char* name = name_format_string != NULL && reference_name->IsString()
+ ? collection_->names()->GetFormatted(
+ name_format_string,
+ *String::cast(reference_name)->ToCString(
+ DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)) :
+ collection_->names()->GetName(reference_name);
+
+ filler_->SetNamedReference(type,
+ parent_entry,
+ name,
+ child_entry);
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ }
+}
+
+
+void V8HeapExplorer::SetRootGcRootsReference() {
+ filler_->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement,
+ snapshot_->root()->index(),
+ snapshot_->gc_roots());
+}
+
+
+void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ ASSERT(child_entry != NULL);
+ filler_->SetNamedAutoIndexReference(
+ HeapGraphEdge::kShortcut,
+ snapshot_->root()->index(),
+ child_entry);
+}
+
+
+void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
+ filler_->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement,
+ snapshot_->gc_roots()->index(),
+ snapshot_->gc_subroot(tag));
+}
+
+
+void V8HeapExplorer::SetGcSubrootReference(
+ VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ const char* name = GetStrongGcSubrootName(child_obj);
+ if (name != NULL) {
+ filler_->SetNamedReference(
+ HeapGraphEdge::kInternal,
+ snapshot_->gc_subroot(tag)->index(),
+ name,
+ child_entry);
+ } else {
+ filler_->SetIndexedAutoIndexReference(
+ is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
+ snapshot_->gc_subroot(tag)->index(),
+ child_entry);
+ }
+
+ // Add a shortcut to JS global object reference at snapshot root.
+ if (child_obj->IsNativeContext()) {
+ Context* context = Context::cast(child_obj);
+ GlobalObject* global = context->global_object();
+ if (global->IsJSGlobalObject()) {
+ bool is_debug_object = false;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ is_debug_object = heap_->isolate()->debug()->IsDebugGlobal(global);
+#endif
+ if (!is_debug_object && !user_roots_.Contains(global)) {
+ user_roots_.Insert(global);
+ SetUserGlobalReference(global);
+ }
+ }
+ }
+ }
+}
+
+
+const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
+ if (strong_gc_subroot_names_.is_empty()) {
+#define NAME_ENTRY(name) strong_gc_subroot_names_.SetTag(heap_->name(), #name);
+#define ROOT_NAME(type, name, camel_name) NAME_ENTRY(name)
+ STRONG_ROOT_LIST(ROOT_NAME)
+#undef ROOT_NAME
+#define STRUCT_MAP_NAME(NAME, Name, name) NAME_ENTRY(name##_map)
+ STRUCT_LIST(STRUCT_MAP_NAME)
+#undef STRUCT_MAP_NAME
+#define STRING_NAME(name, str) NAME_ENTRY(name)
+ INTERNALIZED_STRING_LIST(STRING_NAME)
+#undef STRING_NAME
+#undef NAME_ENTRY
+ CHECK(!strong_gc_subroot_names_.is_empty());
+ }
+ return strong_gc_subroot_names_.GetTag(object);
+}
+
+
+void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
+ if (IsEssentialObject(obj)) {
+ HeapEntry* entry = GetEntry(obj);
+ if (entry->name()[0] == '\0') {
+ entry->set_name(tag);
+ }
+ }
+}
+
+
+class GlobalObjectsEnumerator : public ObjectVisitor {
+ public:
+ virtual void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsNativeContext()) {
+ Context* context = Context::cast(*p);
+ JSObject* proxy = context->global_proxy();
+ if (proxy->IsJSGlobalProxy()) {
+ Object* global = proxy->map()->prototype();
+ if (global->IsJSGlobalObject()) {
+ objects_.Add(Handle<JSGlobalObject>(JSGlobalObject::cast(global)));
+ }
+ }
+ }
+ }
+ }
+ int count() { return objects_.length(); }
+ Handle<JSGlobalObject>& at(int i) { return objects_[i]; }
+
+ private:
+ List<Handle<JSGlobalObject> > objects_;
+};
+
+
+// Modifies heap. Must not be run during heap traversal.
+void V8HeapExplorer::TagGlobalObjects() {
+ Isolate* isolate = heap_->isolate();
+ HandleScope scope(isolate);
+ GlobalObjectsEnumerator enumerator;
+ isolate->global_handles()->IterateAllRoots(&enumerator);
+ const char** urls = NewArray<const char*>(enumerator.count());
+ for (int i = 0, l = enumerator.count(); i < l; ++i) {
+ if (global_object_name_resolver_) {
+ HandleScope scope(isolate);
+ Handle<JSGlobalObject> global_obj = enumerator.at(i);
+ urls[i] = global_object_name_resolver_->GetName(
+ Utils::ToLocal(Handle<JSObject>::cast(global_obj)));
+ } else {
+ urls[i] = NULL;
+ }
+ }
+
+ DisallowHeapAllocation no_allocation;
+ for (int i = 0, l = enumerator.count(); i < l; ++i) {
+ objects_tags_.SetTag(*enumerator.at(i), urls[i]);
+ }
+
+ DeleteArray(urls);
+}
+
+
+class GlobalHandlesExtractor : public ObjectVisitor {
+ public:
+ explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
+ : explorer_(explorer) {}
+ virtual ~GlobalHandlesExtractor() {}
+ virtual void VisitPointers(Object** start, Object** end) {
+ UNREACHABLE();
+ }
+ virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {
+ explorer_->VisitSubtreeWrapper(p, class_id);
+ }
+ private:
+ NativeObjectsExplorer* explorer_;
+};
+
+
+class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
+ public:
+ BasicHeapEntriesAllocator(
+ HeapSnapshot* snapshot,
+ HeapEntry::Type entries_type)
+ : snapshot_(snapshot),
+ collection_(snapshot_->collection()),
+ entries_type_(entries_type) {
+ }
+ virtual HeapEntry* AllocateEntry(HeapThing ptr);
+ private:
+ HeapSnapshot* snapshot_;
+ HeapSnapshotsCollection* collection_;
+ HeapEntry::Type entries_type_;
+};
+
+
+HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
+ v8::RetainedObjectInfo* info = reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
+ intptr_t elements = info->GetElementCount();
+ intptr_t size = info->GetSizeInBytes();
+ const char* name = elements != -1
+ ? collection_->names()->GetFormatted(
+ "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements)
+ : collection_->names()->GetCopy(info->GetLabel());
+ return snapshot_->AddEntry(
+ entries_type_,
+ name,
+ HeapObjectsMap::GenerateId(collection_->heap(), info),
+ size != -1 ? static_cast<int>(size) : 0);
+}
+
+
+NativeObjectsExplorer::NativeObjectsExplorer(
+ HeapSnapshot* snapshot,
+ SnapshottingProgressReportingInterface* progress)
+ : isolate_(snapshot->collection()->heap()->isolate()),
+ snapshot_(snapshot),
+ collection_(snapshot_->collection()),
+ progress_(progress),
+ embedder_queried_(false),
+ objects_by_info_(RetainedInfosMatch),
+ native_groups_(StringsMatch),
+ filler_(NULL) {
+ synthetic_entries_allocator_ =
+ new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic);
+ native_entries_allocator_ =
+ new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative);
+}
+
+
+NativeObjectsExplorer::~NativeObjectsExplorer() {
+ for (HashMap::Entry* p = objects_by_info_.Start();
+ p != NULL;
+ p = objects_by_info_.Next(p)) {
+ v8::RetainedObjectInfo* info =
+ reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
+ info->Dispose();
+ List<HeapObject*>* objects =
+ reinterpret_cast<List<HeapObject*>* >(p->value);
+ delete objects;
+ }
+ for (HashMap::Entry* p = native_groups_.Start();
+ p != NULL;
+ p = native_groups_.Next(p)) {
+ v8::RetainedObjectInfo* info =
+ reinterpret_cast<v8::RetainedObjectInfo*>(p->value);
+ info->Dispose();
+ }
+ delete synthetic_entries_allocator_;
+ delete native_entries_allocator_;
+}
+
+
+int NativeObjectsExplorer::EstimateObjectsCount() {
+ FillRetainedObjects();
+ return objects_by_info_.occupancy();
+}
+
+
+void NativeObjectsExplorer::FillRetainedObjects() {
+ if (embedder_queried_) return;
+ Isolate* isolate = isolate_;
+ const GCType major_gc_type = kGCTypeMarkSweepCompact;
+ // Record objects that are joined into ObjectGroups.
+ isolate->heap()->CallGCPrologueCallbacks(
+ major_gc_type, kGCCallbackFlagConstructRetainedObjectInfos);
+ List<ObjectGroup*>* groups = isolate->global_handles()->object_groups();
+ for (int i = 0; i < groups->length(); ++i) {
+ ObjectGroup* group = groups->at(i);
+ if (group->info == NULL) continue;
+ List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info);
+ for (size_t j = 0; j < group->length; ++j) {
+ HeapObject* obj = HeapObject::cast(*group->objects[j]);
+ list->Add(obj);
+ in_groups_.Insert(obj);
+ }
+ group->info = NULL; // Acquire info object ownership.
+ }
+ isolate->global_handles()->RemoveObjectGroups();
+ isolate->heap()->CallGCEpilogueCallbacks(major_gc_type);
+ // Record objects that are not in ObjectGroups, but have class ID.
+ GlobalHandlesExtractor extractor(this);
+ isolate->global_handles()->IterateAllRootsWithClassIds(&extractor);
+ embedder_queried_ = true;
+}
+
+
+void NativeObjectsExplorer::FillImplicitReferences() {
+ Isolate* isolate = isolate_;
+ List<ImplicitRefGroup*>* groups =
+ isolate->global_handles()->implicit_ref_groups();
+ for (int i = 0; i < groups->length(); ++i) {
+ ImplicitRefGroup* group = groups->at(i);
+ HeapObject* parent = *group->parent;
+ int parent_entry =
+ filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
+ ASSERT(parent_entry != HeapEntry::kNoEntry);
+ Object*** children = group->children;
+ for (size_t j = 0; j < group->length; ++j) {
+ Object* child = *children[j];
+ HeapEntry* child_entry =
+ filler_->FindOrAddEntry(child, native_entries_allocator_);
+ filler_->SetNamedReference(
+ HeapGraphEdge::kInternal,
+ parent_entry,
+ "native",
+ child_entry);
+ }
+ }
+ isolate->global_handles()->RemoveImplicitRefGroups();
+}
+
+List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
+ v8::RetainedObjectInfo* info) {
+ HashMap::Entry* entry =
+ objects_by_info_.Lookup(info, InfoHash(info), true);
+ if (entry->value != NULL) {
+ info->Dispose();
+ } else {
+ entry->value = new List<HeapObject*>(4);
+ }
+ return reinterpret_cast<List<HeapObject*>* >(entry->value);
+}
+
+
+bool NativeObjectsExplorer::IterateAndExtractReferences(
+ SnapshotFillerInterface* filler) {
+ filler_ = filler;
+ FillRetainedObjects();
+ FillImplicitReferences();
+ if (EstimateObjectsCount() > 0) {
+ for (HashMap::Entry* p = objects_by_info_.Start();
+ p != NULL;
+ p = objects_by_info_.Next(p)) {
+ v8::RetainedObjectInfo* info =
+ reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
+ SetNativeRootReference(info);
+ List<HeapObject*>* objects =
+ reinterpret_cast<List<HeapObject*>* >(p->value);
+ for (int i = 0; i < objects->length(); ++i) {
+ SetWrapperNativeReferences(objects->at(i), info);
+ }
+ }
+ SetRootNativeRootsReference();
+ }
+ filler_ = NULL;
+ return true;
+}
+
+
+class NativeGroupRetainedObjectInfo : public v8::RetainedObjectInfo {
+ public:
+ explicit NativeGroupRetainedObjectInfo(const char* label)
+ : disposed_(false),
+ hash_(reinterpret_cast<intptr_t>(label)),
+ label_(label) {
+ }
+
+ virtual ~NativeGroupRetainedObjectInfo() {}
+ virtual void Dispose() {
+ CHECK(!disposed_);
+ disposed_ = true;
+ delete this;
+ }
+ virtual bool IsEquivalent(RetainedObjectInfo* other) {
+ return hash_ == other->GetHash() && !strcmp(label_, other->GetLabel());
+ }
+ virtual intptr_t GetHash() { return hash_; }
+ virtual const char* GetLabel() { return label_; }
+
+ private:
+ bool disposed_;
+ intptr_t hash_;
+ const char* label_;
+};
+
+
+NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
+ const char* label) {
+ const char* label_copy = collection_->names()->GetCopy(label);
+ uint32_t hash = StringHasher::HashSequentialString(
+ label_copy,
+ static_cast<int>(strlen(label_copy)),
+ isolate_->heap()->HashSeed());
+ HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy),
+ hash, true);
+ if (entry->value == NULL) {
+ entry->value = new NativeGroupRetainedObjectInfo(label);
+ }
+ return static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
+}
+
+
+void NativeObjectsExplorer::SetNativeRootReference(
+ v8::RetainedObjectInfo* info) {
+ HeapEntry* child_entry =
+ filler_->FindOrAddEntry(info, native_entries_allocator_);
+ ASSERT(child_entry != NULL);
+ NativeGroupRetainedObjectInfo* group_info =
+ FindOrAddGroupInfo(info->GetGroupLabel());
+ HeapEntry* group_entry =
+ filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_);
+ filler_->SetNamedAutoIndexReference(
+ HeapGraphEdge::kInternal,
+ group_entry->index(),
+ child_entry);
+}
+
+
+void NativeObjectsExplorer::SetWrapperNativeReferences(
+ HeapObject* wrapper, v8::RetainedObjectInfo* info) {
+ HeapEntry* wrapper_entry = filler_->FindEntry(wrapper);
+ ASSERT(wrapper_entry != NULL);
+ HeapEntry* info_entry =
+ filler_->FindOrAddEntry(info, native_entries_allocator_);
+ ASSERT(info_entry != NULL);
+ filler_->SetNamedReference(HeapGraphEdge::kInternal,
+ wrapper_entry->index(),
+ "native",
+ info_entry);
+ filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
+ info_entry->index(),
+ wrapper_entry);
+}
+
+
+void NativeObjectsExplorer::SetRootNativeRootsReference() {
+ for (HashMap::Entry* entry = native_groups_.Start();
+ entry;
+ entry = native_groups_.Next(entry)) {
+ NativeGroupRetainedObjectInfo* group_info =
+ static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
+ HeapEntry* group_entry =
+ filler_->FindOrAddEntry(group_info, native_entries_allocator_);
+ ASSERT(group_entry != NULL);
+ filler_->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement,
+ snapshot_->root()->index(),
+ group_entry);
+ }
+}
+
+
+void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
+ if (in_groups_.Contains(*p)) return;
+ Isolate* isolate = isolate_;
+ v8::RetainedObjectInfo* info =
+ isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
+ if (info == NULL) return;
+ GetListMaybeDisposeInfo(info)->Add(HeapObject::cast(*p));
+}
+
+
+class SnapshotFiller : public SnapshotFillerInterface {
+ public:
+ explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
+ : snapshot_(snapshot),
+ collection_(snapshot->collection()),
+ entries_(entries) { }
+ HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = allocator->AllocateEntry(ptr);
+ entries_->Pair(ptr, entry->index());
+ return entry;
+ }
+ HeapEntry* FindEntry(HeapThing ptr) {
+ int index = entries_->Map(ptr);
+ return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
+ }
+ HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = FindEntry(ptr);
+ return entry != NULL ? entry : AddEntry(ptr, allocator);
+ }
+ void SetIndexedReference(HeapGraphEdge::Type type,
+ int parent,
+ int index,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ parent_entry->SetIndexedReference(type, index, child_entry);
+ }
+ void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
+ int parent,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ int index = parent_entry->children_count() + 1;
+ parent_entry->SetIndexedReference(type, index, child_entry);
+ }
+ void SetNamedReference(HeapGraphEdge::Type type,
+ int parent,
+ const char* reference_name,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ parent_entry->SetNamedReference(type, reference_name, child_entry);
+ }
+ void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
+ int parent,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ int index = parent_entry->children_count() + 1;
+ parent_entry->SetNamedReference(
+ type,
+ collection_->names()->GetName(index),
+ child_entry);
+ }
+
+ private:
+ HeapSnapshot* snapshot_;
+ HeapSnapshotsCollection* collection_;
+ HeapEntriesMap* entries_;
+};
+
+
+HeapSnapshotGenerator::HeapSnapshotGenerator(
+ HeapSnapshot* snapshot,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver,
+ Heap* heap)
+ : snapshot_(snapshot),
+ control_(control),
+ v8_heap_explorer_(snapshot_, this, resolver),
+ dom_explorer_(snapshot_, this),
+ heap_(heap) {
+}
+
+
+bool HeapSnapshotGenerator::GenerateSnapshot() {
+ v8_heap_explorer_.TagGlobalObjects();
+
+ // TODO(1562) Profiler assumes that any object that is in the heap after
+ // full GC is reachable from the root when computing dominators.
+ // This is not true for weakly reachable objects.
+ // As a temporary solution we call GC twice.
+ heap_->CollectAllGarbage(
+ Heap::kMakeHeapIterableMask,
+ "HeapSnapshotGenerator::GenerateSnapshot");
+ heap_->CollectAllGarbage(
+ Heap::kMakeHeapIterableMask,
+ "HeapSnapshotGenerator::GenerateSnapshot");
+
+#ifdef VERIFY_HEAP
+ Heap* debug_heap = heap_;
+ CHECK(!debug_heap->old_data_space()->was_swept_conservatively());
+ CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively());
+ CHECK(!debug_heap->code_space()->was_swept_conservatively());
+ CHECK(!debug_heap->cell_space()->was_swept_conservatively());
+ CHECK(!debug_heap->property_cell_space()->
+ was_swept_conservatively());
+ CHECK(!debug_heap->map_space()->was_swept_conservatively());
+#endif
+
+ // The following code uses heap iterators, so we want the heap to be
+ // stable. It should follow TagGlobalObjects as that can allocate.
+ DisallowHeapAllocation no_alloc;
+
+#ifdef VERIFY_HEAP
+ debug_heap->Verify();
+#endif
+
+ SetProgressTotal(1); // 1 pass.
+
+#ifdef VERIFY_HEAP
+ debug_heap->Verify();
+#endif
+
+ if (!FillReferences()) return false;
+
+ snapshot_->FillChildren();
+ snapshot_->RememberLastJSObjectId();
+
+ progress_counter_ = progress_total_;
+ if (!ProgressReport(true)) return false;
+ return true;
+}
+
+
+void HeapSnapshotGenerator::ProgressStep() {
+ ++progress_counter_;
+}
+
+
+bool HeapSnapshotGenerator::ProgressReport(bool force) {
+ const int kProgressReportGranularity = 10000;
+ if (control_ != NULL
+ && (force || progress_counter_ % kProgressReportGranularity == 0)) {
+ return
+ control_->ReportProgressValue(progress_counter_, progress_total_) ==
+ v8::ActivityControl::kContinue;
+ }
+ return true;
+}
+
+
+void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
+ if (control_ == NULL) return;
+ HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
+ progress_total_ = iterations_count * (
+ v8_heap_explorer_.EstimateObjectsCount(&iterator) +
+ dom_explorer_.EstimateObjectsCount());
+ progress_counter_ = 0;
+}
+
+
+bool HeapSnapshotGenerator::FillReferences() {
+ SnapshotFiller filler(snapshot_, &entries_);
+ v8_heap_explorer_.AddRootEntries(&filler);
+ return v8_heap_explorer_.IterateAndExtractReferences(&filler)
+ && dom_explorer_.IterateAndExtractReferences(&filler);
+}
+
+
+template<int bytes> struct MaxDecimalDigitsIn;
+template<> struct MaxDecimalDigitsIn<4> {
+ static const int kSigned = 11;
+ static const int kUnsigned = 10;
+};
+template<> struct MaxDecimalDigitsIn<8> {
+ static const int kSigned = 20;
+ static const int kUnsigned = 20;
+};
+
+
+class OutputStreamWriter {
+ public:
+ explicit OutputStreamWriter(v8::OutputStream* stream)
+ : stream_(stream),
+ chunk_size_(stream->GetChunkSize()),
+ chunk_(chunk_size_),
+ chunk_pos_(0),
+ aborted_(false) {
+ ASSERT(chunk_size_ > 0);
+ }
+ bool aborted() { return aborted_; }
+ void AddCharacter(char c) {
+ ASSERT(c != '\0');
+ ASSERT(chunk_pos_ < chunk_size_);
+ chunk_[chunk_pos_++] = c;
+ MaybeWriteChunk();
+ }
+ void AddString(const char* s) {
+ AddSubstring(s, StrLength(s));
+ }
+ void AddSubstring(const char* s, int n) {
+ if (n <= 0) return;
+ ASSERT(static_cast<size_t>(n) <= strlen(s));
+ const char* s_end = s + n;
+ while (s < s_end) {
+ int s_chunk_size = Min(
+ chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
+ ASSERT(s_chunk_size > 0);
+ OS::MemCopy(chunk_.start() + chunk_pos_, s, s_chunk_size);
+ s += s_chunk_size;
+ chunk_pos_ += s_chunk_size;
+ MaybeWriteChunk();
+ }
+ }
+ void AddNumber(unsigned n) { AddNumberImpl<unsigned>(n, "%u"); }
+ void Finalize() {
+ if (aborted_) return;
+ ASSERT(chunk_pos_ < chunk_size_);
+ if (chunk_pos_ != 0) {
+ WriteChunk();
+ }
+ stream_->EndOfStream();
+ }
+
+ private:
+ template<typename T>
+ void AddNumberImpl(T n, const char* format) {
+ // Buffer for the longest value plus trailing \0
+ static const int kMaxNumberSize =
+ MaxDecimalDigitsIn<sizeof(T)>::kUnsigned + 1;
+ if (chunk_size_ - chunk_pos_ >= kMaxNumberSize) {
+ int result = OS::SNPrintF(
+ chunk_.SubVector(chunk_pos_, chunk_size_), format, n);
+ ASSERT(result != -1);
+ chunk_pos_ += result;
+ MaybeWriteChunk();
+ } else {
+ EmbeddedVector<char, kMaxNumberSize> buffer;
+ int result = OS::SNPrintF(buffer, format, n);
+ USE(result);
+ ASSERT(result != -1);
+ AddString(buffer.start());
+ }
+ }
+ void MaybeWriteChunk() {
+ ASSERT(chunk_pos_ <= chunk_size_);
+ if (chunk_pos_ == chunk_size_) {
+ WriteChunk();
+ }
+ }
+ void WriteChunk() {
+ if (aborted_) return;
+ if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) ==
+ v8::OutputStream::kAbort) aborted_ = true;
+ chunk_pos_ = 0;
+ }
+
+ v8::OutputStream* stream_;
+ int chunk_size_;
+ ScopedVector<char> chunk_;
+ int chunk_pos_;
+ bool aborted_;
+};
+
+
+// type, name|index, to_node.
+const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
+// type, name, id, self_size, children_index.
+const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
+
+void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
+ if (AllocationTracker* allocation_tracker =
+ snapshot_->collection()->allocation_tracker()) {
+ allocation_tracker->PrepareForSerialization();
+ }
+ ASSERT(writer_ == NULL);
+ writer_ = new OutputStreamWriter(stream);
+ SerializeImpl();
+ delete writer_;
+ writer_ = NULL;
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeImpl() {
+ ASSERT(0 == snapshot_->root()->index());
+ writer_->AddCharacter('{');
+ writer_->AddString("\"snapshot\":{");
+ SerializeSnapshot();
+ if (writer_->aborted()) return;
+ writer_->AddString("},\n");
+ writer_->AddString("\"nodes\":[");
+ SerializeNodes();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+ writer_->AddString("\"edges\":[");
+ SerializeEdges();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+
+ writer_->AddString("\"trace_function_infos\":[");
+ SerializeTraceNodeInfos();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+ writer_->AddString("\"trace_tree\":[");
+ SerializeTraceTree();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+
+ writer_->AddString("\"strings\":[");
+ SerializeStrings();
+ if (writer_->aborted()) return;
+ writer_->AddCharacter(']');
+ writer_->AddCharacter('}');
+ writer_->Finalize();
+}
+
+
+int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
+ HashMap::Entry* cache_entry = strings_.Lookup(
+ const_cast<char*>(s), StringHash(s), true);
+ if (cache_entry->value == NULL) {
+ cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
+ }
+ return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
+}
+
+
+static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
+ int number_of_digits = 0;
+ unsigned t = value;
+ do {
+ ++number_of_digits;
+ } while (t /= 10);
+
+ buffer_pos += number_of_digits;
+ int result = buffer_pos;
+ do {
+ int last_digit = value % 10;
+ buffer[--buffer_pos] = '0' + last_digit;
+ value /= 10;
+ } while (value);
+ return result;
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
+ bool first_edge) {
+ // The buffer needs space for 3 unsigned ints, 3 commas, \n and \0
+ static const int kBufferSize =
+ MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned * 3 + 3 + 2; // NOLINT
+ EmbeddedVector<char, kBufferSize> buffer;
+ int edge_name_or_index = edge->type() == HeapGraphEdge::kElement
+ || edge->type() == HeapGraphEdge::kHidden
+ || edge->type() == HeapGraphEdge::kWeak
+ ? edge->index() : GetStringId(edge->name());
+ int buffer_pos = 0;
+ if (!first_edge) {
+ buffer[buffer_pos++] = ',';
+ }
+ buffer_pos = utoa(edge->type(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(edge_name_or_index, buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(entry_index(edge->to()), buffer, buffer_pos);
+ buffer[buffer_pos++] = '\n';
+ buffer[buffer_pos++] = '\0';
+ writer_->AddString(buffer.start());
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeEdges() {
+ List<HeapGraphEdge*>& edges = snapshot_->children();
+ for (int i = 0; i < edges.length(); ++i) {
+ ASSERT(i == 0 ||
+ edges[i - 1]->from()->index() <= edges[i]->from()->index());
+ SerializeEdge(edges[i], i == 0);
+ if (writer_->aborted()) return;
+ }
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
+ // The buffer needs space for 5 unsigned ints, 5 commas, \n and \0
+ static const int kBufferSize =
+ 5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ + 5 + 1 + 1;
+ EmbeddedVector<char, kBufferSize> buffer;
+ int buffer_pos = 0;
+ if (entry_index(entry) != 0) {
+ buffer[buffer_pos++] = ',';
+ }
+ buffer_pos = utoa(entry->type(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(GetStringId(entry->name()), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(entry->id(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(entry->self_size(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(entry->children_count(), buffer, buffer_pos);
+ buffer[buffer_pos++] = '\n';
+ buffer[buffer_pos++] = '\0';
+ writer_->AddString(buffer.start());
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeNodes() {
+ List<HeapEntry>& entries = snapshot_->entries();
+ for (int i = 0; i < entries.length(); ++i) {
+ SerializeNode(&entries[i]);
+ if (writer_->aborted()) return;
+ }
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeSnapshot() {
+ writer_->AddString("\"title\":\"");
+ writer_->AddString(snapshot_->title());
+ writer_->AddString("\"");
+ writer_->AddString(",\"uid\":");
+ writer_->AddNumber(snapshot_->uid());
+ writer_->AddString(",\"meta\":");
+ // The object describing node serialization layout.
+ // We use a set of macros to improve readability.
+#define JSON_A(s) "[" s "]"
+#define JSON_O(s) "{" s "}"
+#define JSON_S(s) "\"" s "\""
+ writer_->AddString(JSON_O(
+ JSON_S("node_fields") ":" JSON_A(
+ JSON_S("type") ","
+ JSON_S("name") ","
+ JSON_S("id") ","
+ JSON_S("self_size") ","
+ JSON_S("edge_count")) ","
+ JSON_S("node_types") ":" JSON_A(
+ JSON_A(
+ JSON_S("hidden") ","
+ JSON_S("array") ","
+ JSON_S("string") ","
+ JSON_S("object") ","
+ JSON_S("code") ","
+ JSON_S("closure") ","
+ JSON_S("regexp") ","
+ JSON_S("number") ","
+ JSON_S("native") ","
+ JSON_S("synthetic") ","
+ JSON_S("concatenated string") ","
+ JSON_S("sliced string")) ","
+ JSON_S("string") ","
+ JSON_S("number") ","
+ JSON_S("number") ","
+ JSON_S("number") ","
+ JSON_S("number") ","
+ JSON_S("number")) ","
+ JSON_S("edge_fields") ":" JSON_A(
+ JSON_S("type") ","
+ JSON_S("name_or_index") ","
+ JSON_S("to_node")) ","
+ JSON_S("edge_types") ":" JSON_A(
+ JSON_A(
+ JSON_S("context") ","
+ JSON_S("element") ","
+ JSON_S("property") ","
+ JSON_S("internal") ","
+ JSON_S("hidden") ","
+ JSON_S("shortcut") ","
+ JSON_S("weak")) ","
+ JSON_S("string_or_number") ","
+ JSON_S("node")) ","
+ JSON_S("trace_function_info_fields") ":" JSON_A(
+ JSON_S("function_id") ","
+ JSON_S("name") ","
+ JSON_S("script_name") ","
+ JSON_S("script_id") ","
+ JSON_S("line") ","
+ JSON_S("column")) ","
+ JSON_S("trace_node_fields") ":" JSON_A(
+ JSON_S("id") ","
+ JSON_S("function_id") ","
+ JSON_S("count") ","
+ JSON_S("size") ","
+ JSON_S("children"))));
+#undef JSON_S
+#undef JSON_O
+#undef JSON_A
+ writer_->AddString(",\"node_count\":");
+ writer_->AddNumber(snapshot_->entries().length());
+ writer_->AddString(",\"edge_count\":");
+ writer_->AddNumber(snapshot_->edges().length());
+ writer_->AddString(",\"trace_function_count\":");
+ uint32_t count = 0;
+ AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ if (tracker) {
+ count = tracker->id_to_function_info()->occupancy();
+ }
+ writer_->AddNumber(count);
+}
+
+
+static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
+ static const char hex_chars[] = "0123456789ABCDEF";
+ w->AddString("\\u");
+ w->AddCharacter(hex_chars[(u >> 12) & 0xf]);
+ w->AddCharacter(hex_chars[(u >> 8) & 0xf]);
+ w->AddCharacter(hex_chars[(u >> 4) & 0xf]);
+ w->AddCharacter(hex_chars[u & 0xf]);
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeTraceTree() {
+ AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ if (!tracker) return;
+ AllocationTraceTree* traces = tracker->trace_tree();
+ SerializeTraceNode(traces->root());
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeTraceNode(AllocationTraceNode* node) {
+ // The buffer needs space for 4 unsigned ints, 4 commas, [ and \0
+ const int kBufferSize =
+ 4 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ + 4 + 1 + 1;
+ EmbeddedVector<char, kBufferSize> buffer;
+ int buffer_pos = 0;
+ buffer_pos = utoa(node->id(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(node->function_id(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(node->allocation_count(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(node->allocation_size(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer[buffer_pos++] = '[';
+ buffer[buffer_pos++] = '\0';
+ writer_->AddString(buffer.start());
+
+ Vector<AllocationTraceNode*> children = node->children();
+ for (int i = 0; i < children.length(); i++) {
+ if (i > 0) {
+ writer_->AddCharacter(',');
+ }
+ SerializeTraceNode(children[i]);
+ }
+ writer_->AddCharacter(']');
+}
+
+
+// 0-based position is converted to 1-based during the serialization.
+static int SerializePosition(int position, const Vector<char>& buffer,
+ int buffer_pos) {
+ if (position == -1) {
+ buffer[buffer_pos++] = '0';
+ } else {
+ ASSERT(position >= 0);
+ buffer_pos = utoa(static_cast<unsigned>(position + 1), buffer, buffer_pos);
+ }
+ return buffer_pos;
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() {
+ AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ if (!tracker) return;
+ // The buffer needs space for 6 unsigned ints, 6 commas, \n and \0
+ const int kBufferSize =
+ 6 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ + 6 + 1 + 1;
+ EmbeddedVector<char, kBufferSize> buffer;
+ HashMap* id_to_function_info = tracker->id_to_function_info();
+ bool first_entry = true;
+ for (HashMap::Entry* p = id_to_function_info->Start();
+ p != NULL;
+ p = id_to_function_info->Next(p)) {
+ SnapshotObjectId id =
+ static_cast<SnapshotObjectId>(reinterpret_cast<intptr_t>(p->key));
+ AllocationTracker::FunctionInfo* info =
+ reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value);
+ int buffer_pos = 0;
+ if (first_entry) {
+ first_entry = false;
+ } else {
+ buffer[buffer_pos++] = ',';
+ }
+ buffer_pos = utoa(id, buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(GetStringId(info->name), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(GetStringId(info->script_name), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ // The cast is safe because script id is a non-negative Smi.
+ buffer_pos = utoa(static_cast<unsigned>(info->script_id), buffer,
+ buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = SerializePosition(info->line, buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = SerializePosition(info->column, buffer, buffer_pos);
+ buffer[buffer_pos++] = '\n';
+ buffer[buffer_pos++] = '\0';
+ writer_->AddString(buffer.start());
+ }
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
+ writer_->AddCharacter('\n');
+ writer_->AddCharacter('\"');
+ for ( ; *s != '\0'; ++s) {
+ switch (*s) {
+ case '\b':
+ writer_->AddString("\\b");
+ continue;
+ case '\f':
+ writer_->AddString("\\f");
+ continue;
+ case '\n':
+ writer_->AddString("\\n");
+ continue;
+ case '\r':
+ writer_->AddString("\\r");
+ continue;
+ case '\t':
+ writer_->AddString("\\t");
+ continue;
+ case '\"':
+ case '\\':
+ writer_->AddCharacter('\\');
+ writer_->AddCharacter(*s);
+ continue;
+ default:
+ if (*s > 31 && *s < 128) {
+ writer_->AddCharacter(*s);
+ } else if (*s <= 31) {
+ // Special character with no dedicated literal.
+ WriteUChar(writer_, *s);
+ } else {
+ // Convert UTF-8 into \u UTF-16 literal.
+ unsigned length = 1, cursor = 0;
+ for ( ; length <= 4 && *(s + length) != '\0'; ++length) { }
+ unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor);
+ if (c != unibrow::Utf8::kBadChar) {
+ WriteUChar(writer_, c);
+ ASSERT(cursor != 0);
+ s += cursor - 1;
+ } else {
+ writer_->AddCharacter('?');
+ }
+ }
+ }
+ }
+ writer_->AddCharacter('\"');
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeStrings() {
+ ScopedVector<const unsigned char*> sorted_strings(
+ strings_.occupancy() + 1);
+ for (HashMap::Entry* entry = strings_.Start();
+ entry != NULL;
+ entry = strings_.Next(entry)) {
+ int index = static_cast<int>(reinterpret_cast<uintptr_t>(entry->value));
+ sorted_strings[index] = reinterpret_cast<const unsigned char*>(entry->key);
+ }
+ writer_->AddString("\"<dummy>\"");
+ for (int i = 1; i < sorted_strings.length(); ++i) {
+ writer_->AddCharacter(',');
+ SerializeString(sorted_strings[i]);
+ if (writer_->aborted()) return;
+ }
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h
new file mode 100644
index 000000000..e4038b10f
--- /dev/null
+++ b/deps/v8/src/heap-snapshot-generator.h
@@ -0,0 +1,705 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HEAP_SNAPSHOT_GENERATOR_H_
+#define V8_HEAP_SNAPSHOT_GENERATOR_H_
+
+#include "profile-generator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+class AllocationTracker;
+class AllocationTraceNode;
+class HeapEntry;
+class HeapSnapshot;
+
+class HeapGraphEdge BASE_EMBEDDED {
+ public:
+ enum Type {
+ kContextVariable = v8::HeapGraphEdge::kContextVariable,
+ kElement = v8::HeapGraphEdge::kElement,
+ kProperty = v8::HeapGraphEdge::kProperty,
+ kInternal = v8::HeapGraphEdge::kInternal,
+ kHidden = v8::HeapGraphEdge::kHidden,
+ kShortcut = v8::HeapGraphEdge::kShortcut,
+ kWeak = v8::HeapGraphEdge::kWeak
+ };
+
+ HeapGraphEdge() { }
+ HeapGraphEdge(Type type, const char* name, int from, int to);
+ HeapGraphEdge(Type type, int index, int from, int to);
+ void ReplaceToIndexWithEntry(HeapSnapshot* snapshot);
+
+ Type type() const { return static_cast<Type>(type_); }
+ int index() const {
+ ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
+ return index_;
+ }
+ const char* name() const {
+ ASSERT(type_ == kContextVariable
+ || type_ == kProperty
+ || type_ == kInternal
+ || type_ == kShortcut);
+ return name_;
+ }
+ INLINE(HeapEntry* from() const);
+ HeapEntry* to() const { return to_entry_; }
+
+ private:
+ INLINE(HeapSnapshot* snapshot() const);
+
+ unsigned type_ : 3;
+ int from_index_ : 29;
+ union {
+ // During entries population |to_index_| is used for storing the index,
+ // afterwards it is replaced with a pointer to the entry.
+ int to_index_;
+ HeapEntry* to_entry_;
+ };
+ union {
+ int index_;
+ const char* name_;
+ };
+};
+
+
+// HeapEntry instances represent an entity from the heap (or a special
+// virtual node, e.g. root).
+class HeapEntry BASE_EMBEDDED {
+ public:
+ enum Type {
+ kHidden = v8::HeapGraphNode::kHidden,
+ kArray = v8::HeapGraphNode::kArray,
+ kString = v8::HeapGraphNode::kString,
+ kObject = v8::HeapGraphNode::kObject,
+ kCode = v8::HeapGraphNode::kCode,
+ kClosure = v8::HeapGraphNode::kClosure,
+ kRegExp = v8::HeapGraphNode::kRegExp,
+ kHeapNumber = v8::HeapGraphNode::kHeapNumber,
+ kNative = v8::HeapGraphNode::kNative,
+ kSynthetic = v8::HeapGraphNode::kSynthetic,
+ kConsString = v8::HeapGraphNode::kConsString,
+ kSlicedString = v8::HeapGraphNode::kSlicedString
+ };
+ static const int kNoEntry;
+
+ HeapEntry() { }
+ HeapEntry(HeapSnapshot* snapshot,
+ Type type,
+ const char* name,
+ SnapshotObjectId id,
+ int self_size);
+
+ HeapSnapshot* snapshot() { return snapshot_; }
+ Type type() { return static_cast<Type>(type_); }
+ const char* name() { return name_; }
+ void set_name(const char* name) { name_ = name; }
+ inline SnapshotObjectId id() { return id_; }
+ int self_size() { return self_size_; }
+ INLINE(int index() const);
+ int children_count() const { return children_count_; }
+ INLINE(int set_children_index(int index));
+ void add_child(HeapGraphEdge* edge) {
+ children_arr()[children_count_++] = edge;
+ }
+ Vector<HeapGraphEdge*> children() {
+ return Vector<HeapGraphEdge*>(children_arr(), children_count_); }
+
+ void SetIndexedReference(
+ HeapGraphEdge::Type type, int index, HeapEntry* entry);
+ void SetNamedReference(
+ HeapGraphEdge::Type type, const char* name, HeapEntry* entry);
+
+ void Print(
+ const char* prefix, const char* edge_name, int max_depth, int indent);
+
+ Handle<HeapObject> GetHeapObject();
+
+ private:
+ INLINE(HeapGraphEdge** children_arr());
+ const char* TypeAsString();
+
+ unsigned type_: 4;
+ int children_count_: 28;
+ int children_index_;
+ int self_size_;
+ SnapshotObjectId id_;
+ HeapSnapshot* snapshot_;
+ const char* name_;
+};
+
+
+class HeapSnapshotsCollection;
+
+// HeapSnapshot represents a single heap snapshot. It is stored in
+// HeapSnapshotsCollection, which is also a factory for
+// HeapSnapshots. All HeapSnapshots share strings copied from JS heap
+// to be able to return them even if they were collected.
+// HeapSnapshotGenerator fills in a HeapSnapshot.
+class HeapSnapshot {
+ public:
+ HeapSnapshot(HeapSnapshotsCollection* collection,
+ const char* title,
+ unsigned uid);
+ void Delete();
+
+ HeapSnapshotsCollection* collection() { return collection_; }
+ const char* title() { return title_; }
+ unsigned uid() { return uid_; }
+ size_t RawSnapshotSize() const;
+ HeapEntry* root() { return &entries_[root_index_]; }
+ HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
+ HeapEntry* natives_root() { return &entries_[natives_root_index_]; }
+ HeapEntry* gc_subroot(int index) {
+ return &entries_[gc_subroot_indexes_[index]];
+ }
+ List<HeapEntry>& entries() { return entries_; }
+ List<HeapGraphEdge>& edges() { return edges_; }
+ List<HeapGraphEdge*>& children() { return children_; }
+ void RememberLastJSObjectId();
+ SnapshotObjectId max_snapshot_js_object_id() const {
+ return max_snapshot_js_object_id_;
+ }
+
+ HeapEntry* AddEntry(HeapEntry::Type type,
+ const char* name,
+ SnapshotObjectId id,
+ int size);
+ HeapEntry* AddRootEntry();
+ HeapEntry* AddGcRootsEntry();
+ HeapEntry* AddGcSubrootEntry(int tag);
+ HeapEntry* AddNativesRootEntry();
+ HeapEntry* GetEntryById(SnapshotObjectId id);
+ List<HeapEntry*>* GetSortedEntriesList();
+ void FillChildren();
+
+ void Print(int max_depth);
+ void PrintEntriesSize();
+
+ private:
+ HeapSnapshotsCollection* collection_;
+ const char* title_;
+ unsigned uid_;
+ int root_index_;
+ int gc_roots_index_;
+ int natives_root_index_;
+ int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
+ List<HeapEntry> entries_;
+ List<HeapGraphEdge> edges_;
+ List<HeapGraphEdge*> children_;
+ List<HeapEntry*> sorted_entries_;
+ SnapshotObjectId max_snapshot_js_object_id_;
+
+ friend class HeapSnapshotTester;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
+};
+
+
+class HeapObjectsMap {
+ public:
+ explicit HeapObjectsMap(Heap* heap);
+
+ Heap* heap() const { return heap_; }
+
+ void SnapshotGenerationFinished();
+ SnapshotObjectId FindEntry(Address addr);
+ SnapshotObjectId FindOrAddEntry(Address addr,
+ unsigned int size,
+ bool accessed = true);
+ void MoveObject(Address from, Address to, int size);
+ void NewObject(Address addr, int size);
+ void UpdateObjectSize(Address addr, int size);
+ SnapshotObjectId last_assigned_id() const {
+ return next_id_ - kObjectIdStep;
+ }
+
+ void StopHeapObjectsTracking();
+ SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
+ size_t GetUsedMemorySize() const;
+
+ static SnapshotObjectId GenerateId(Heap* heap, v8::RetainedObjectInfo* info);
+ static inline SnapshotObjectId GetNthGcSubrootId(int delta);
+
+ static const int kObjectIdStep = 2;
+ static const SnapshotObjectId kInternalRootObjectId;
+ static const SnapshotObjectId kGcRootsObjectId;
+ static const SnapshotObjectId kNativesRootObjectId;
+ static const SnapshotObjectId kGcRootsFirstSubrootId;
+ static const SnapshotObjectId kFirstAvailableObjectId;
+
+ int FindUntrackedObjects();
+
+ void UpdateHeapObjectsMap();
+
+ private:
+ struct EntryInfo {
+ EntryInfo(SnapshotObjectId id, Address addr, unsigned int size)
+ : id(id), addr(addr), size(size), accessed(true) { }
+ EntryInfo(SnapshotObjectId id, Address addr, unsigned int size, bool accessed)
+ : id(id), addr(addr), size(size), accessed(accessed) { }
+ SnapshotObjectId id;
+ Address addr;
+ unsigned int size;
+ bool accessed;
+ };
+ struct TimeInterval {
+ explicit TimeInterval(SnapshotObjectId id) : id(id), size(0), count(0) { }
+ SnapshotObjectId id;
+ uint32_t size;
+ uint32_t count;
+ };
+
+ void RemoveDeadEntries();
+
+ SnapshotObjectId next_id_;
+ HashMap entries_map_;
+ List<EntryInfo> entries_;
+ List<TimeInterval> time_intervals_;
+ Heap* heap_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap);
+};
+
+
+class HeapSnapshotsCollection {
+ public:
+ explicit HeapSnapshotsCollection(Heap* heap);
+ ~HeapSnapshotsCollection();
+
+ Heap* heap() const { return ids_.heap(); }
+
+ bool is_tracking_objects() { return is_tracking_objects_; }
+ SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) {
+ return ids_.PushHeapObjectsStats(stream);
+ }
+ void StartHeapObjectsTracking();
+ void StopHeapObjectsTracking();
+
+ HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
+ void SnapshotGenerationFinished(HeapSnapshot* snapshot);
+ List<HeapSnapshot*>* snapshots() { return &snapshots_; }
+ void RemoveSnapshot(HeapSnapshot* snapshot);
+
+ StringsStorage* names() { return &names_; }
+ AllocationTracker* allocation_tracker() { return allocation_tracker_; }
+
+ SnapshotObjectId FindObjectId(Address object_addr) {
+ return ids_.FindEntry(object_addr);
+ }
+ SnapshotObjectId GetObjectId(Address object_addr, int object_size) {
+ return ids_.FindOrAddEntry(object_addr, object_size);
+ }
+ Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
+ void ObjectMoveEvent(Address from, Address to, int size) {
+ ids_.MoveObject(from, to, size);
+ }
+ void NewObjectEvent(Address addr, int size);
+ void UpdateObjectSizeEvent(Address addr, int size) {
+ ids_.UpdateObjectSize(addr, size);
+ }
+ SnapshotObjectId last_assigned_id() const {
+ return ids_.last_assigned_id();
+ }
+ size_t GetUsedMemorySize() const;
+
+ int FindUntrackedObjects() { return ids_.FindUntrackedObjects(); }
+
+ void UpdateHeapObjectsMap() { ids_.UpdateHeapObjectsMap(); }
+
+ private:
+ bool is_tracking_objects_; // Whether tracking object moves is needed.
+ List<HeapSnapshot*> snapshots_;
+ StringsStorage names_;
+ // Mapping from HeapObject addresses to objects' uids.
+ HeapObjectsMap ids_;
+ AllocationTracker* allocation_tracker_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
+};
+
+
+// A typedef for referencing anything that can be snapshotted living
+// in any kind of heap memory.
+typedef void* HeapThing;
+
+
+// An interface that creates HeapEntries by HeapThings.
+class HeapEntriesAllocator {
+ public:
+ virtual ~HeapEntriesAllocator() { }
+ virtual HeapEntry* AllocateEntry(HeapThing ptr) = 0;
+};
+
+
+// The HeapEntriesMap instance is used to track a mapping between
+// real heap objects and their representations in heap snapshots.
+class HeapEntriesMap {
+ public:
+ HeapEntriesMap();
+
+ int Map(HeapThing thing);
+ void Pair(HeapThing thing, int entry);
+
+ private:
+ static uint32_t Hash(HeapThing thing) {
+ return ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
+ v8::internal::kZeroHashSeed);
+ }
+ static bool HeapThingsMatch(HeapThing key1, HeapThing key2) {
+ return key1 == key2;
+ }
+
+ HashMap entries_;
+
+ friend class HeapObjectsSet;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
+};
+
+
+class HeapObjectsSet {
+ public:
+ HeapObjectsSet();
+ void Clear();
+ bool Contains(Object* object);
+ void Insert(Object* obj);
+ const char* GetTag(Object* obj);
+ void SetTag(Object* obj, const char* tag);
+ bool is_empty() const { return entries_.occupancy() == 0; }
+
+ private:
+ HashMap entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapObjectsSet);
+};
+
+
+// An interface used to populate a snapshot with nodes and edges.
+class SnapshotFillerInterface {
+ public:
+ virtual ~SnapshotFillerInterface() { }
+ virtual HeapEntry* AddEntry(HeapThing ptr,
+ HeapEntriesAllocator* allocator) = 0;
+ virtual HeapEntry* FindEntry(HeapThing ptr) = 0;
+ virtual HeapEntry* FindOrAddEntry(HeapThing ptr,
+ HeapEntriesAllocator* allocator) = 0;
+ virtual void SetIndexedReference(HeapGraphEdge::Type type,
+ int parent_entry,
+ int index,
+ HeapEntry* child_entry) = 0;
+ virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
+ int parent_entry,
+ HeapEntry* child_entry) = 0;
+ virtual void SetNamedReference(HeapGraphEdge::Type type,
+ int parent_entry,
+ const char* reference_name,
+ HeapEntry* child_entry) = 0;
+ virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
+ int parent_entry,
+ HeapEntry* child_entry) = 0;
+};
+
+
+class SnapshottingProgressReportingInterface {
+ public:
+ virtual ~SnapshottingProgressReportingInterface() { }
+ virtual void ProgressStep() = 0;
+ virtual bool ProgressReport(bool force) = 0;
+};
+
+
+// An implementation of V8 heap graph extractor.
+class V8HeapExplorer : public HeapEntriesAllocator {
+ public:
+ V8HeapExplorer(HeapSnapshot* snapshot,
+ SnapshottingProgressReportingInterface* progress,
+ v8::HeapProfiler::ObjectNameResolver* resolver);
+ virtual ~V8HeapExplorer();
+ virtual HeapEntry* AllocateEntry(HeapThing ptr);
+ void AddRootEntries(SnapshotFillerInterface* filler);
+ int EstimateObjectsCount(HeapIterator* iterator);
+ bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+ void TagGlobalObjects();
+
+ static String* GetConstructorName(JSObject* object);
+
+ static HeapObject* const kInternalRootObject;
+
+ private:
+ HeapEntry* AddEntry(HeapObject* object);
+ HeapEntry* AddEntry(HeapObject* object,
+ HeapEntry::Type type,
+ const char* name);
+ const char* GetSystemEntryName(HeapObject* object);
+
+ void ExtractReferences(HeapObject* obj);
+ void ExtractJSGlobalProxyReferences(int entry, JSGlobalProxy* proxy);
+ void ExtractJSObjectReferences(int entry, JSObject* js_obj);
+ void ExtractStringReferences(int entry, String* obj);
+ void ExtractContextReferences(int entry, Context* context);
+ void ExtractMapReferences(int entry, Map* map);
+ void ExtractSharedFunctionInfoReferences(int entry,
+ SharedFunctionInfo* shared);
+ void ExtractScriptReferences(int entry, Script* script);
+ void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
+ void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
+ void ExtractCodeReferences(int entry, Code* code);
+ void ExtractCellReferences(int entry, Cell* cell);
+ void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
+ void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
+ void ExtractClosureReferences(JSObject* js_obj, int entry);
+ void ExtractPropertyReferences(JSObject* js_obj, int entry);
+ bool ExtractAccessorPairProperty(JSObject* js_obj, int entry,
+ Object* key, Object* callback_obj);
+ void ExtractElementReferences(JSObject* js_obj, int entry);
+ void ExtractInternalReferences(JSObject* js_obj, int entry);
+ bool IsEssentialObject(Object* object);
+ void SetContextReference(HeapObject* parent_obj,
+ int parent,
+ String* reference_name,
+ Object* child,
+ int field_offset);
+ void SetNativeBindReference(HeapObject* parent_obj,
+ int parent,
+ const char* reference_name,
+ Object* child);
+ void SetElementReference(HeapObject* parent_obj,
+ int parent,
+ int index,
+ Object* child);
+ void SetInternalReference(HeapObject* parent_obj,
+ int parent,
+ const char* reference_name,
+ Object* child,
+ int field_offset = -1);
+ void SetInternalReference(HeapObject* parent_obj,
+ int parent,
+ int index,
+ Object* child,
+ int field_offset = -1);
+ void SetHiddenReference(HeapObject* parent_obj,
+ int parent,
+ int index,
+ Object* child);
+ void SetWeakReference(HeapObject* parent_obj,
+ int parent,
+ int index,
+ Object* child_obj,
+ int field_offset);
+ void SetPropertyReference(HeapObject* parent_obj,
+ int parent,
+ Name* reference_name,
+ Object* child,
+ const char* name_format_string = NULL,
+ int field_offset = -1);
+ void SetUserGlobalReference(Object* user_global);
+ void SetRootGcRootsReference();
+ void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
+ void SetGcSubrootReference(
+ VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
+ const char* GetStrongGcSubrootName(Object* object);
+ void TagObject(Object* obj, const char* tag);
+
+ HeapEntry* GetEntry(Object* obj);
+
+ static inline HeapObject* GetNthGcSubrootObject(int delta);
+ static inline int GetGcSubrootOrder(HeapObject* subroot);
+
+ Heap* heap_;
+ HeapSnapshot* snapshot_;
+ HeapSnapshotsCollection* collection_;
+ SnapshottingProgressReportingInterface* progress_;
+ SnapshotFillerInterface* filler_;
+ HeapObjectsSet objects_tags_;
+ HeapObjectsSet strong_gc_subroot_names_;
+ HeapObjectsSet user_roots_;
+ v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
+
+ static HeapObject* const kGcRootsObject;
+ static HeapObject* const kFirstGcSubrootObject;
+ static HeapObject* const kLastGcSubrootObject;
+
+ friend class IndexedReferencesExtractor;
+ friend class GcSubrootsEnumerator;
+ friend class RootsReferencesExtractor;
+
+ DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
+};
+
+
+class NativeGroupRetainedObjectInfo;
+
+
+// An implementation of retained native objects extractor.
+class NativeObjectsExplorer {
+ public:
+ NativeObjectsExplorer(HeapSnapshot* snapshot,
+ SnapshottingProgressReportingInterface* progress);
+ virtual ~NativeObjectsExplorer();
+ void AddRootEntries(SnapshotFillerInterface* filler);
+ int EstimateObjectsCount();
+ bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+
+ private:
+ void FillRetainedObjects();
+ void FillImplicitReferences();
+ List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info);
+ void SetNativeRootReference(v8::RetainedObjectInfo* info);
+ void SetRootNativeRootsReference();
+ void SetWrapperNativeReferences(HeapObject* wrapper,
+ v8::RetainedObjectInfo* info);
+ void VisitSubtreeWrapper(Object** p, uint16_t class_id);
+
+ static uint32_t InfoHash(v8::RetainedObjectInfo* info) {
+ return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()),
+ v8::internal::kZeroHashSeed);
+ }
+ static bool RetainedInfosMatch(void* key1, void* key2) {
+ return key1 == key2 ||
+ (reinterpret_cast<v8::RetainedObjectInfo*>(key1))->IsEquivalent(
+ reinterpret_cast<v8::RetainedObjectInfo*>(key2));
+ }
+ INLINE(static bool StringsMatch(void* key1, void* key2)) {
+ return strcmp(reinterpret_cast<char*>(key1),
+ reinterpret_cast<char*>(key2)) == 0;
+ }
+
+ NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label);
+
+ Isolate* isolate_;
+ HeapSnapshot* snapshot_;
+ HeapSnapshotsCollection* collection_;
+ SnapshottingProgressReportingInterface* progress_;
+ bool embedder_queried_;
+ HeapObjectsSet in_groups_;
+ // RetainedObjectInfo* -> List<HeapObject*>*
+ HashMap objects_by_info_;
+ HashMap native_groups_;
+ HeapEntriesAllocator* synthetic_entries_allocator_;
+ HeapEntriesAllocator* native_entries_allocator_;
+ // Used during references extraction.
+ SnapshotFillerInterface* filler_;
+
+ static HeapThing const kNativesRootObject;
+
+ friend class GlobalHandlesExtractor;
+
+ DISALLOW_COPY_AND_ASSIGN(NativeObjectsExplorer);
+};
+
+
+class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
+ public:
+ HeapSnapshotGenerator(HeapSnapshot* snapshot,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver,
+ Heap* heap);
+ bool GenerateSnapshot();
+
+ private:
+ bool FillReferences();
+ void ProgressStep();
+ bool ProgressReport(bool force = false);
+ void SetProgressTotal(int iterations_count);
+
+ HeapSnapshot* snapshot_;
+ v8::ActivityControl* control_;
+ V8HeapExplorer v8_heap_explorer_;
+ NativeObjectsExplorer dom_explorer_;
+ // Mapping from HeapThing pointers to HeapEntry* pointers.
+ HeapEntriesMap entries_;
+ // Used during snapshot generation.
+ int progress_counter_;
+ int progress_total_;
+ Heap* heap_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
+};
+
+class OutputStreamWriter;
+
+class HeapSnapshotJSONSerializer {
+ public:
+ explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot)
+ : snapshot_(snapshot),
+ strings_(StringsMatch),
+ next_node_id_(1),
+ next_string_id_(1),
+ writer_(NULL) {
+ }
+ void Serialize(v8::OutputStream* stream);
+
+ private:
+ INLINE(static bool StringsMatch(void* key1, void* key2)) {
+ return strcmp(reinterpret_cast<char*>(key1),
+ reinterpret_cast<char*>(key2)) == 0;
+ }
+
+ INLINE(static uint32_t StringHash(const void* string)) {
+ const char* s = reinterpret_cast<const char*>(string);
+ int len = static_cast<int>(strlen(s));
+ return StringHasher::HashSequentialString(
+ s, len, v8::internal::kZeroHashSeed);
+ }
+
+ int GetStringId(const char* s);
+ int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; }
+ void SerializeEdge(HeapGraphEdge* edge, bool first_edge);
+ void SerializeEdges();
+ void SerializeImpl();
+ void SerializeNode(HeapEntry* entry);
+ void SerializeNodes();
+ void SerializeSnapshot();
+ void SerializeTraceTree();
+ void SerializeTraceNode(AllocationTraceNode* node);
+ void SerializeTraceNodeInfos();
+ void SerializeString(const unsigned char* s);
+ void SerializeStrings();
+
+ static const int kEdgeFieldsCount;
+ static const int kNodeFieldsCount;
+
+ HeapSnapshot* snapshot_;
+ HashMap strings_;
+ int next_node_id_;
+ int next_string_id_;
+ OutputStreamWriter* writer_;
+
+ friend class HeapSnapshotJSONSerializerEnumerator;
+ friend class HeapSnapshotJSONSerializerIterator;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotJSONSerializer);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HEAP_SNAPSHOT_GENERATOR_H_
+
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index e3fcb93a7..fa358c539 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -32,12 +32,13 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "compilation-cache.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "deoptimizer.h"
#include "global-handles.h"
#include "heap-profiler.h"
#include "incremental-marking.h"
-#include "liveobjectlist-inl.h"
+#include "isolate-inl.h"
#include "mark-compact.h"
#include "natives.h"
#include "objects-visiting.h"
@@ -47,6 +48,7 @@
#include "scopeinfo.h"
#include "snapshot.h"
#include "store-buffer.h"
+#include "utils/random-number-generator.h"
#include "v8threads.h"
#include "v8utils.h"
#include "vm-state-inl.h"
@@ -65,29 +67,14 @@ namespace internal {
Heap::Heap()
: isolate_(NULL),
+ code_range_size_(kIs64BitArch ? 512 * MB : 0),
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
-#if defined(V8_TARGET_ARCH_X64)
-#define LUMP_OF_MEMORY (2 * MB)
- code_range_size_(512*MB),
-#else
-#define LUMP_OF_MEMORY MB
- code_range_size_(0),
-#endif
-#if defined(ANDROID)
- reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- initial_semispace_size_(Page::kPageSize),
- max_old_generation_size_(192*MB),
- max_executable_size_(max_old_generation_size_),
-#else
- reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+ reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
+ max_semispace_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(Page::kPageSize),
- max_old_generation_size_(700ul * LUMP_OF_MEMORY),
- max_executable_size_(256l * LUMP_OF_MEMORY),
-#endif
-
+ max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
+ max_executable_size_(256ul * (kPointerSize / 4) * MB),
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
// Will be 4 * reserved_semispace_size_ to ensure that young
@@ -99,6 +86,7 @@ Heap::Heap()
contexts_disposed_(0),
global_ic_age_(0),
flush_monomorphic_ics_(false),
+ allocation_mementos_found_(0),
scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
@@ -106,6 +94,7 @@ Heap::Heap()
code_space_(NULL),
map_space_(NULL),
cell_space_(NULL),
+ property_cell_space_(NULL),
lo_space_(NULL),
gc_state_(NOT_IN_GC),
gc_post_processing_depth_(0),
@@ -114,38 +103,35 @@ Heap::Heap()
remembered_unmapped_pages_index_(0),
unflattened_strings_length_(0),
#ifdef DEBUG
- allocation_allowed_(true),
allocation_timeout_(0),
disallow_allocation_failure_(false),
- debug_utils_(NULL),
#endif // DEBUG
new_space_high_promotion_mode_active_(false),
- old_gen_promotion_limit_(kMinimumPromotionLimit),
- old_gen_allocation_limit_(kMinimumAllocationLimit),
- old_gen_limit_factor_(1),
+ old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
size_of_old_gen_at_last_old_space_gc_(0),
external_allocation_limit_(0),
amount_of_external_allocated_memory_(0),
amount_of_external_allocated_memory_at_last_global_gc_(0),
old_gen_exhausted_(false),
store_buffer_rebuilder_(store_buffer()),
- hidden_symbol_(NULL),
- global_gc_prologue_callback_(NULL),
- global_gc_epilogue_callback_(NULL),
+ hidden_string_(NULL),
gc_safe_size_of_old_object_(NULL),
total_regexp_code_generated_(0),
tracer_(NULL),
young_survivors_after_last_gc_(0),
high_survival_rate_period_length_(0),
+ low_survival_rate_period_length_(0),
survival_rate_(0),
previous_survival_rate_trend_(Heap::STABLE),
survival_rate_trend_(Heap::STABLE),
- max_gc_pause_(0),
- total_gc_time_ms_(0),
+ max_gc_pause_(0.0),
+ total_gc_time_ms_(0.0),
max_alive_after_gc_(0),
min_in_mutator_(kMaxInt),
alive_after_last_gc_(0),
last_gc_end_timestamp_(0.0),
+ marking_time_(0.0),
+ sweeping_time_(0.0),
store_buffer_(this),
marking_(this),
incremental_marking_(this),
@@ -153,9 +139,14 @@ Heap::Heap()
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
mark_sweeps_since_idle_round_started_(0),
- ms_count_at_last_idle_notification_(0),
gc_count_at_last_idle_gc_(0),
scavenges_since_last_idle_round_(kIdleScavengeThreshold),
+ full_codegen_bytes_generated_(0),
+ crankshaft_codegen_bytes_generated_(0),
+ gcs_since_last_deopt_(0),
+#ifdef VERIFY_HEAP
+ no_weak_object_verification_scope_depth_(0),
+#endif
promotion_queue_(this),
configured_(false),
chunks_queued_for_free_(NULL),
@@ -167,6 +158,9 @@ Heap::Heap()
max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
#endif
+ // Ensure old_generation_size_ is a multiple of kPageSize.
+ ASSERT(MB >= Page::kPageSize);
+
intptr_t max_virtual = OS::MaxVirtualMemory();
if (max_virtual > 0) {
@@ -178,6 +172,8 @@ Heap::Heap()
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
native_contexts_list_ = NULL;
+ array_buffers_list_ = Smi::FromInt(0);
+ allocation_sites_list_ = Smi::FromInt(0);
mark_compact_collector_.heap_ = this;
external_string_table_.heap_ = this;
// Put a dummy entry in the remembered pages so we can find the list the
@@ -196,7 +192,8 @@ intptr_t Heap::Capacity() {
old_data_space_->Capacity() +
code_space_->Capacity() +
map_space_->Capacity() +
- cell_space_->Capacity();
+ cell_space_->Capacity() +
+ property_cell_space_->Capacity();
}
@@ -209,9 +206,25 @@ intptr_t Heap::CommittedMemory() {
code_space_->CommittedMemory() +
map_space_->CommittedMemory() +
cell_space_->CommittedMemory() +
+ property_cell_space_->CommittedMemory() +
lo_space_->Size();
}
+
+size_t Heap::CommittedPhysicalMemory() {
+ if (!HasBeenSetUp()) return 0;
+
+ return new_space_.CommittedPhysicalMemory() +
+ old_pointer_space_->CommittedPhysicalMemory() +
+ old_data_space_->CommittedPhysicalMemory() +
+ code_space_->CommittedPhysicalMemory() +
+ map_space_->CommittedPhysicalMemory() +
+ cell_space_->CommittedPhysicalMemory() +
+ property_cell_space_->CommittedPhysicalMemory() +
+ lo_space_->CommittedPhysicalMemory();
+}
+
+
intptr_t Heap::CommittedMemoryExecutable() {
if (!HasBeenSetUp()) return 0;
@@ -227,7 +240,8 @@ intptr_t Heap::Available() {
old_data_space_->Available() +
code_space_->Available() +
map_space_->Available() +
- cell_space_->Available();
+ cell_space_->Available() +
+ property_cell_space_->Available();
}
@@ -237,6 +251,7 @@ bool Heap::HasBeenSetUp() {
code_space_ != NULL &&
map_space_ != NULL &&
cell_space_ != NULL &&
+ property_cell_space_ != NULL &&
lo_space_ != NULL;
}
@@ -264,7 +279,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
}
// Is enough data promoted to justify a global GC?
- if (OldGenerationPromotionLimitReached()) {
+ if (OldGenerationAllocationLimitReached()) {
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
*reason = "promotion limit reached";
return MARK_COMPACTOR;
@@ -366,6 +381,12 @@ void Heap::PrintShortHeapStatistics() {
cell_space_->SizeOfObjects() / KB,
cell_space_->Available() / KB,
cell_space_->CommittedMemory() / KB);
+ PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ property_cell_space_->SizeOfObjects() / KB,
+ property_cell_space_->Available() / KB,
+ property_cell_space_->CommittedMemory() / KB);
PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
", available: %6" V8_PTR_PREFIX "d KB"
", committed: %6" V8_PTR_PREFIX "d KB\n",
@@ -378,7 +399,9 @@ void Heap::PrintShortHeapStatistics() {
this->SizeOfObjects() / KB,
this->Available() / KB,
this->CommittedMemory() / KB);
- PrintPID("Total time spent in GC : %d ms\n", total_gc_time_ms_);
+ PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
+ amount_of_external_allocated_memory_ / KB);
+ PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
}
@@ -401,34 +424,42 @@ void Heap::ReportStatisticsAfterGC() {
void Heap::GarbageCollectionPrologue() {
- isolate_->transcendental_cache()->Clear();
- ClearJSFunctionResultCaches();
- gc_count_++;
- unflattened_strings_length_ = 0;
+ { AllowHeapAllocation for_the_first_part_of_prologue;
+ isolate_->transcendental_cache()->Clear();
+ ClearJSFunctionResultCaches();
+ gc_count_++;
+ unflattened_strings_length_ = 0;
+
+ if (FLAG_flush_code && FLAG_flush_code_incrementally) {
+ mark_compact_collector()->EnableCodeFlushing(true);
+ }
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- Verify();
- }
+ if (FLAG_verify_heap) {
+ Verify();
+ }
#endif
+ }
#ifdef DEBUG
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
- allow_allocation(false);
+ ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
if (FLAG_gc_verbose) Print();
ReportStatisticsBeforeGC();
#endif // DEBUG
- LiveObjectList::GCPrologue();
store_buffer()->GCPrologue();
+
+ if (FLAG_concurrent_osr) {
+ isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
+ }
}
intptr_t Heap::SizeOfObjects() {
intptr_t total = 0;
- AllSpaces spaces;
+ AllSpaces spaces(this);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
total += space->SizeOfObjects();
}
@@ -437,7 +468,7 @@ intptr_t Heap::SizeOfObjects() {
void Heap::RepairFreeListsAfterBoot() {
- PagedSpaces spaces;
+ PagedSpaces spaces(this);
for (PagedSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
@@ -448,7 +479,6 @@ void Heap::RepairFreeListsAfterBoot() {
void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();
- LiveObjectList::GCEpilogue();
// In release mode, we only zap the from space under heap verification.
if (Heap::ShouldZapGarbage()) {
@@ -461,32 +491,67 @@ void Heap::GarbageCollectionEpilogue() {
}
#endif
+ AllowHeapAllocation for_the_rest_of_the_epilogue;
+
#ifdef DEBUG
- allow_allocation(true);
if (FLAG_print_global_handles) isolate_->global_handles()->Print();
if (FLAG_print_handles) PrintHandles();
if (FLAG_gc_verbose) Print();
if (FLAG_code_stats) ReportCodeStatistics("After GC");
#endif
+ if (FLAG_deopt_every_n_garbage_collections > 0) {
+ if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
+ Deoptimizer::DeoptimizeAll(isolate());
+ gcs_since_last_deopt_ = 0;
+ }
+ }
isolate_->counters()->alive_after_last_gc()->Set(
static_cast<int>(SizeOfObjects()));
- isolate_->counters()->symbol_table_capacity()->Set(
- symbol_table()->Capacity());
+ isolate_->counters()->string_table_capacity()->Set(
+ string_table()->Capacity());
isolate_->counters()->number_of_symbols()->Set(
- symbol_table()->NumberOfElements());
+ string_table()->NumberOfElements());
+
+ if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
+ isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
+ static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
+ (crankshaft_codegen_bytes_generated_
+ + full_codegen_bytes_generated_)));
+ }
if (CommittedMemory() > 0) {
isolate_->counters()->external_fragmentation_total()->AddSample(
static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
+ isolate_->counters()->heap_fraction_new_space()->
+ AddSample(static_cast<int>(
+ (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+ isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
+ static_cast<int>(
+ (old_pointer_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
+ isolate_->counters()->heap_fraction_old_data_space()->AddSample(
+ static_cast<int>(
+ (old_data_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
+ isolate_->counters()->heap_fraction_code_space()->
+ AddSample(static_cast<int>(
+ (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_fraction_map_space()->AddSample(
static_cast<int>(
(map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_fraction_cell_space()->AddSample(
static_cast<int>(
(cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+ isolate_->counters()->heap_fraction_property_cell_space()->
+ AddSample(static_cast<int>(
+ (property_cell_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
+ isolate_->counters()->heap_fraction_lo_space()->
+ AddSample(static_cast<int>(
+ (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_sample_total_committed()->AddSample(
static_cast<int>(CommittedMemory() / KB));
@@ -496,6 +561,12 @@ void Heap::GarbageCollectionEpilogue() {
static_cast<int>(map_space()->CommittedMemory() / KB));
isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
static_cast<int>(cell_space()->CommittedMemory() / KB));
+ isolate_->counters()->
+ heap_sample_property_cell_space_committed()->
+ AddSample(static_cast<int>(
+ property_cell_space()->CommittedMemory() / KB));
+ isolate_->counters()->heap_sample_code_space_committed()->AddSample(
+ static_cast<int>(code_space()->CommittedMemory() / KB));
}
#define UPDATE_COUNTERS_FOR_SPACE(space) \
@@ -521,6 +592,7 @@ void Heap::GarbageCollectionEpilogue() {
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
#undef UPDATE_COUNTERS_FOR_SPACE
#undef UPDATE_FRAGMENTATION_FOR_SPACE
@@ -557,19 +629,25 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
+ if (FLAG_concurrent_recompilation) {
+ // The optimizing compiler may be unnecessarily holding on to memory.
+ DisallowHeapAllocation no_recursive_gc;
+ isolate()->optimizing_compiler_thread()->Flush();
+ }
mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
const int kMaxNumberOfAttempts = 7;
+ const int kMinNumberOfAttempts = 2;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
- if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
+ if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
+ attempt + 1 >= kMinNumberOfAttempts) {
break;
}
}
mark_compact_collector()->SetFlags(kNoGCFlags);
new_space_.Shrink();
UncommitFromSpace();
- Shrink();
incremental_marking()->UncommitMarkingDeque();
}
@@ -579,7 +657,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
const char* gc_reason,
const char* collector_reason) {
// The VM is in the GC state until exiting this function.
- VMState state(isolate_, GC);
+ VMState<GC> state(isolate_);
#ifdef DEBUG
// Reset the allocation timeout to the GC interval, but make sure to
@@ -597,7 +675,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
if (collector == MARK_COMPACTOR &&
- !mark_compact_collector()->abort_incremental_marking_ &&
+ !mark_compact_collector()->abort_incremental_marking() &&
!incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() &&
FLAG_incremental_marking_steps) {
@@ -617,6 +695,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
bool next_gc_likely_to_collect_more = false;
{ GCTracer tracer(this, gc_reason, collector_reason);
+ ASSERT(AllowHeapAllocation::IsAllowed());
+ DisallowHeapAllocation no_allocation_during_gc;
GarbageCollectionPrologue();
// The GC count was incremented in the prologue. Tell the tracer about
// it.
@@ -625,30 +705,40 @@ bool Heap::CollectGarbage(AllocationSpace space,
// Tell the tracer which collector we've selected.
tracer.set_collector(collector);
- HistogramTimer* rate = (collector == SCAVENGER)
- ? isolate_->counters()->gc_scavenger()
- : isolate_->counters()->gc_compactor();
- rate->Start();
- next_gc_likely_to_collect_more =
- PerformGarbageCollection(collector, &tracer);
- rate->Stop();
-
- ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
+ {
+ HistogramTimerScope histogram_timer_scope(
+ (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
+ : isolate_->counters()->gc_compactor());
+ next_gc_likely_to_collect_more =
+ PerformGarbageCollection(collector, &tracer);
+ }
- // This can do debug callbacks and restart incremental marking.
GarbageCollectionEpilogue();
}
- if (incremental_marking()->IsStopped()) {
- if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
- incremental_marking()->Start();
- }
+ // Start incremental marking for the next cycle. The heap snapshot
+ // generator needs incremental marking to stay off after it aborted.
+ if (!mark_compact_collector()->abort_incremental_marking() &&
+ incremental_marking()->IsStopped() &&
+ incremental_marking()->WorthActivating() &&
+ NextGCIsLikelyToBeFull()) {
+ incremental_marking()->Start();
}
return next_gc_likely_to_collect_more;
}
+int Heap::NotifyContextDisposed() {
+ if (FLAG_concurrent_recompilation) {
+ // Flush the queued recompilation tasks.
+ isolate()->optimizing_compiler_thread()->Flush();
+ }
+ flush_monomorphic_ics_ = true;
+ return ++contexts_disposed_;
+}
+
+
void Heap::PerformScavenge() {
GCTracer tracer(this, NULL, NULL);
if (incremental_marking()->IsStopped()) {
@@ -659,25 +749,49 @@ void Heap::PerformScavenge() {
}
+void Heap::MoveElements(FixedArray* array,
+ int dst_index,
+ int src_index,
+ int len) {
+ if (len == 0) return;
+
+ ASSERT(array->map() != fixed_cow_array_map());
+ Object** dst_objects = array->data_start() + dst_index;
+ OS::MemMove(dst_objects,
+ array->data_start() + src_index,
+ len * kPointerSize);
+ if (!InNewSpace(array)) {
+ for (int i = 0; i < len; i++) {
+ // TODO(hpayer): check store buffer for entries
+ if (InNewSpace(dst_objects[i])) {
+ RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
+ }
+ }
+ }
+ incremental_marking()->RecordWrites(array);
+}
+
+
#ifdef VERIFY_HEAP
-// Helper class for verifying the symbol table.
-class SymbolTableVerifier : public ObjectVisitor {
+// Helper class for verifying the string table.
+class StringTableVerifier : public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject()) {
- // Check that the symbol is actually a symbol.
- CHECK((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
+ // Check that the string is actually internalized.
+ CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
+ (*p)->IsInternalizedString());
}
}
}
};
-static void VerifySymbolTable() {
- SymbolTableVerifier verifier;
- HEAP->symbol_table()->IterateElements(&verifier);
+static void VerifyStringTable(Heap* heap) {
+ StringTableVerifier verifier;
+ heap->string_table()->IterateElements(&verifier);
}
#endif // VERIFY_HEAP
@@ -744,11 +858,6 @@ void Heap::EnsureFromSpaceIsCommitted() {
if (new_space_.CommitFromSpaceIfNeeded()) return;
// Committing memory to from space failed.
- // Try shrinking and try again.
- Shrink();
- if (new_space_.CommitFromSpaceIfNeeded()) return;
-
- // Committing memory to from space failed again.
// Memory is exhausted and we will die.
V8::FatalProcessOutOfMemory("Committing semi space failed.");
}
@@ -777,7 +886,6 @@ void Heap::ClearJSFunctionResultCaches() {
}
-
void Heap::ClearNormalizedMapCaches() {
if (isolate_->bootstrapper()->IsActive() &&
!incremental_marking()->IsMarking()) {
@@ -838,23 +946,18 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- VerifySymbolTable();
+ VerifyStringTable(this);
}
#endif
- if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
- ASSERT(!allocation_allowed_);
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- global_gc_prologue_callback_();
- }
-
GCType gc_type =
collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
- for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
- if (gc_type & gc_prologue_callbacks_[i].gc_type) {
- gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
- }
+ {
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
}
EnsureFromSpaceIsCommitted();
@@ -872,30 +975,13 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
sweep_generation_++;
- bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
- IsStableOrIncreasingSurvivalTrend();
UpdateSurvivalRateTrend(start_new_space_size);
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
- if (high_survival_rate_during_scavenges &&
- IsStableOrIncreasingSurvivalTrend()) {
- // Stable high survival rates of young objects both during partial and
- // full collection indicate that mutator is either building or modifying
- // a structure with a long lifetime.
- // In this case we aggressively raise old generation memory limits to
- // postpone subsequent mark-sweep collection and thus trade memory
- // space for the mutation speed.
- old_gen_limit_factor_ = 2;
- } else {
- old_gen_limit_factor_ = 1;
- }
-
- old_gen_promotion_limit_ =
- OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
- old_gen_allocation_limit_ =
- OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
+ old_generation_allocation_limit_ =
+ OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
old_gen_exhausted_ = false;
} else {
@@ -914,22 +1000,34 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// maximum capacity indicates that most objects will be promoted.
// To decrease scavenger pauses and final mark-sweep pauses, we
// have to limit maximal capacity of the young generation.
- new_space_high_promotion_mode_active_ = true;
+ SetNewSpaceHighPromotionModeActive(true);
if (FLAG_trace_gc) {
PrintPID("Limited new space size due to high promotion rate: %d MB\n",
new_space_.InitialCapacity() / MB);
}
+ // Support for global pre-tenuring uses the high promotion mode as a
+ // heuristic indicator of whether to pretenure or not, we trigger
+ // deoptimization here to take advantage of pre-tenuring as soon as
+ // possible.
+ if (FLAG_pretenuring) {
+ isolate_->stack_guard()->FullDeopt();
+ }
} else if (new_space_high_promotion_mode_active_ &&
IsStableOrDecreasingSurvivalTrend() &&
IsLowSurvivalRate()) {
// Decreasing low survival rates might indicate that the above high
// promotion mode is over and we should allow the young generation
// to grow again.
- new_space_high_promotion_mode_active_ = false;
+ SetNewSpaceHighPromotionModeActive(false);
if (FLAG_trace_gc) {
PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
new_space_.MaximumCapacity() / MB);
}
+ // Trigger deoptimization here to turn off pre-tenuring as soon as
+ // possible.
+ if (FLAG_pretenuring) {
+ isolate_->stack_guard()->FullDeopt();
+ }
}
if (new_space_high_promotion_mode_active_ &&
@@ -939,16 +1037,23 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
isolate_->counters()->objs_since_last_young()->Set(0);
+ // Callbacks that fire after this point might trigger nested GCs and
+ // restart incremental marking, the assertion can't be moved down.
+ ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
+
gc_post_processing_depth_++;
- { DisableAssertNoAllocation allow_allocation;
+ { AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
next_gc_likely_to_collect_more =
- isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
+ isolate_->global_handles()->PostGarbageCollectionProcessing(
+ collector, tracer);
}
gc_post_processing_depth_--;
+ isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
+
// Update relocatables.
- Relocatable::PostGarbageCollectionProcessing();
+ Relocatable::PostGarbageCollectionProcessing(isolate_);
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
@@ -956,22 +1061,16 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
amount_of_external_allocated_memory_;
}
- GCCallbackFlags callback_flags = kNoGCCallbackFlags;
- for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
- if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
- gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
- }
- }
-
- if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
- ASSERT(!allocation_allowed_);
+ {
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- global_gc_epilogue_callback_();
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCEpilogueCallbacks(gc_type);
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- VerifySymbolTable();
+ VerifyStringTable(this);
}
#endif
@@ -979,6 +1078,41 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
}
+void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
+ for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
+ if (gc_type & gc_prologue_callbacks_[i].gc_type) {
+ if (!gc_prologue_callbacks_[i].pass_isolate_) {
+ v8::GCPrologueCallback callback =
+ reinterpret_cast<v8::GCPrologueCallback>(
+ gc_prologue_callbacks_[i].callback);
+ callback(gc_type, flags);
+ } else {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
+ gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
+ }
+ }
+ }
+}
+
+
+void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
+ for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
+ if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
+ if (!gc_epilogue_callbacks_[i].pass_isolate_) {
+ v8::GCPrologueCallback callback =
+ reinterpret_cast<v8::GCPrologueCallback>(
+ gc_epilogue_callbacks_[i].callback);
+ callback(gc_type, kNoGCCallbackFlags);
+ } else {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
+ gc_epilogue_callbacks_[i].callback(
+ isolate, gc_type, kNoGCCallbackFlags);
+ }
+ }
+ }
+}
+
+
void Heap::MarkCompact(GCTracer* tracer) {
gc_state_ = MARK_COMPACT;
LOG(isolate_, ResourceEvent("markcompact", "begin"));
@@ -1026,12 +1160,6 @@ void Heap::MarkCompactPrologue() {
}
-Object* Heap::FindCodeObject(Address a) {
- return isolate()->inner_pointer_to_code_cache()->
- GcSafeFindCodeForInnerPointer(a);
-}
-
-
// Helper class for copying HeapObjects
class ScavengeVisitor: public ObjectVisitor {
public:
@@ -1061,29 +1189,33 @@ class ScavengeVisitor: public ObjectVisitor {
// new space.
class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
public:
+ explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
void VisitPointers(Object** start, Object**end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
- CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
+ CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
}
}
}
+
+ private:
+ Heap* heap_;
};
-static void VerifyNonPointerSpacePointers() {
+static void VerifyNonPointerSpacePointers(Heap* heap) {
// Verify that there are no pointers to new space in spaces where we
// do not expect them.
- VerifyNonPointerSpacePointersVisitor v;
- HeapObjectIterator code_it(HEAP->code_space());
+ VerifyNonPointerSpacePointersVisitor v(heap);
+ HeapObjectIterator code_it(heap->code_space());
for (HeapObject* object = code_it.Next();
object != NULL; object = code_it.Next())
object->Iterate(&v);
// The old data space was normally swept conservatively so that the iterator
// doesn't work, so we normally skip the next bit.
- if (!HEAP->old_data_space()->was_swept_conservatively()) {
- HeapObjectIterator data_it(HEAP->old_data_space());
+ if (!heap->old_data_space()->was_swept_conservatively()) {
+ HeapObjectIterator data_it(heap->old_data_space());
for (HeapObject* object = data_it.Next();
object != NULL; object = data_it.Next())
object->Iterate(&v);
@@ -1153,7 +1285,7 @@ void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
// Store Buffer overflowed while scanning promoted objects. These are not
// in any particular page, though they are likely to be clustered by the
// allocation routines.
- store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
+ store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
} else {
// Store Buffer overflowed while scanning a particular old space page for
// pointers to new space.
@@ -1229,8 +1361,10 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::Scavenge() {
RelocationLock relocation_lock(this);
+ allocation_mementos_found_ = 0;
+
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
+ if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
#endif
gc_state_ = SCAVENGE;
@@ -1250,7 +1384,8 @@ void Heap::Scavenge() {
incremental_marking()->PrepareForScavenge();
- AdvanceSweepers(static_cast<int>(new_space_.Size()));
+ paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
+ paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
@@ -1293,22 +1428,52 @@ void Heap::Scavenge() {
store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
}
- // Copy objects reachable from cells by scavenging cell values directly.
+ // Copy objects reachable from simple cells by scavenging cell values
+ // directly.
HeapObjectIterator cell_iterator(cell_space_);
for (HeapObject* heap_object = cell_iterator.Next();
heap_object != NULL;
heap_object = cell_iterator.Next()) {
- if (heap_object->IsJSGlobalPropertyCell()) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
+ if (heap_object->IsCell()) {
+ Cell* cell = Cell::cast(heap_object);
Address value_address = cell->ValueAddress();
scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
}
}
+ // Copy objects reachable from global property cells by scavenging global
+ // property cell values directly.
+ HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
+ for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
+ heap_object != NULL;
+ heap_object = js_global_property_cell_iterator.Next()) {
+ if (heap_object->IsPropertyCell()) {
+ PropertyCell* cell = PropertyCell::cast(heap_object);
+ Address value_address = cell->ValueAddress();
+ scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+ Address type_address = cell->TypeAddress();
+ scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
+ }
+ }
+
+ // Copy objects reachable from the code flushing candidates list.
+ MarkCompactCollector* collector = mark_compact_collector();
+ if (collector->is_code_flushing_enabled()) {
+ collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
+ }
+
// Scavenge object reachable from the native contexts list directly.
scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+
+ while (isolate()->global_handles()->IterateObjectGroups(
+ &scavenge_visitor, &IsUnscavengedHeapObject)) {
+ new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ }
+ isolate()->global_handles()->RemoveObjectGroups();
+ isolate()->global_handles()->RemoveImplicitRefGroups();
+
isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
&IsUnscavengedHeapObject);
isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
@@ -1320,7 +1485,6 @@ void Heap::Scavenge() {
promotion_queue_.Destroy();
- LiveObjectList::UpdateReferencesForScavengeGC();
if (!FLAG_watch_ic_patching) {
isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
}
@@ -1346,6 +1510,11 @@ void Heap::Scavenge() {
gc_state_ = NOT_IN_GC;
scavenges_since_last_idle_round_++;
+
+ if (FLAG_trace_track_allocation_sites && allocation_mementos_found_ > 0) {
+ PrintF("AllocationMementos found during scavenge = %d\n",
+ allocation_mementos_found_);
+ }
}
@@ -1415,61 +1584,165 @@ void Heap::UpdateReferencesInExternalStringTable(
}
-static Object* ProcessFunctionWeakReferences(Heap* heap,
- Object* function,
- WeakObjectRetainer* retainer,
- bool record_slots) {
+template <class T>
+struct WeakListVisitor;
+
+
+template <class T>
+static Object* VisitWeakList(Heap* heap,
+ Object* list,
+ WeakObjectRetainer* retainer,
+ bool record_slots) {
Object* undefined = heap->undefined_value();
Object* head = undefined;
- JSFunction* tail = NULL;
- Object* candidate = function;
- while (candidate != undefined) {
+ T* tail = NULL;
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ while (list != undefined) {
// Check whether to keep the candidate in the list.
- JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
- Object* retain = retainer->RetainAs(candidate);
- if (retain != NULL) {
+ T* candidate = reinterpret_cast<T*>(list);
+ Object* retained = retainer->RetainAs(list);
+ if (retained != NULL) {
if (head == undefined) {
// First element in the list.
- head = retain;
+ head = retained;
} else {
// Subsequent elements in the list.
ASSERT(tail != NULL);
- tail->set_next_function_link(retain);
+ WeakListVisitor<T>::SetWeakNext(tail, retained);
if (record_slots) {
- Object** next_function =
- HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
- heap->mark_compact_collector()->RecordSlot(
- next_function, next_function, retain);
+ Object** next_slot =
+ HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
+ collector->RecordSlot(next_slot, next_slot, retained);
}
}
- // Retained function is new tail.
- candidate_function = reinterpret_cast<JSFunction*>(retain);
- tail = candidate_function;
+ // Retained object is new tail.
+ ASSERT(!retained->IsUndefined());
+ candidate = reinterpret_cast<T*>(retained);
+ tail = candidate;
- ASSERT(retain->IsUndefined() || retain->IsJSFunction());
- if (retain == undefined) break;
+ // tail is a live object, visit it.
+ WeakListVisitor<T>::VisitLiveObject(
+ heap, tail, retainer, record_slots);
+ } else {
+ WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
}
// Move to next element in the list.
- candidate = candidate_function->next_function_link();
+ list = WeakListVisitor<T>::WeakNext(candidate);
}
// Terminate the list if there is one or more elements.
if (tail != NULL) {
- tail->set_next_function_link(undefined);
+ WeakListVisitor<T>::SetWeakNext(tail, undefined);
}
-
return head;
}
-void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
- Object* undefined = undefined_value();
- Object* head = undefined;
- Context* tail = NULL;
- Object* candidate = native_contexts_list_;
+template<>
+struct WeakListVisitor<JSFunction> {
+ static void SetWeakNext(JSFunction* function, Object* next) {
+ function->set_next_function_link(next);
+ }
+
+ static Object* WeakNext(JSFunction* function) {
+ return function->next_function_link();
+ }
+
+ static int WeakNextOffset() {
+ return JSFunction::kNextFunctionLinkOffset;
+ }
+
+ static void VisitLiveObject(Heap*, JSFunction*,
+ WeakObjectRetainer*, bool) {
+ }
+
+ static void VisitPhantomObject(Heap*, JSFunction*) {
+ }
+};
+
+
+template<>
+struct WeakListVisitor<Code> {
+ static void SetWeakNext(Code* code, Object* next) {
+ code->set_next_code_link(next);
+ }
+
+ static Object* WeakNext(Code* code) {
+ return code->next_code_link();
+ }
+
+ static int WeakNextOffset() {
+ return Code::kNextCodeLinkOffset;
+ }
+
+ static void VisitLiveObject(Heap*, Code*,
+ WeakObjectRetainer*, bool) {
+ }
+
+ static void VisitPhantomObject(Heap*, Code*) {
+ }
+};
+
+
+template<>
+struct WeakListVisitor<Context> {
+ static void SetWeakNext(Context* context, Object* next) {
+ context->set(Context::NEXT_CONTEXT_LINK,
+ next,
+ UPDATE_WRITE_BARRIER);
+ }
+
+ static Object* WeakNext(Context* context) {
+ return context->get(Context::NEXT_CONTEXT_LINK);
+ }
+
+ static void VisitLiveObject(Heap* heap,
+ Context* context,
+ WeakObjectRetainer* retainer,
+ bool record_slots) {
+ // Process the three weak lists linked off the context.
+ DoWeakList<JSFunction>(heap, context, retainer, record_slots,
+ Context::OPTIMIZED_FUNCTIONS_LIST);
+ DoWeakList<Code>(heap, context, retainer, record_slots,
+ Context::OPTIMIZED_CODE_LIST);
+ DoWeakList<Code>(heap, context, retainer, record_slots,
+ Context::DEOPTIMIZED_CODE_LIST);
+ }
+
+ template<class T>
+ static void DoWeakList(Heap* heap,
+ Context* context,
+ WeakObjectRetainer* retainer,
+ bool record_slots,
+ int index) {
+ // Visit the weak list, removing dead intermediate elements.
+ Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer,
+ record_slots);
+
+ // Update the list head.
+ context->set(index, list_head, UPDATE_WRITE_BARRIER);
+
+ if (record_slots) {
+ // Record the updated slot if necessary.
+ Object** head_slot = HeapObject::RawField(
+ context, FixedArray::SizeFor(index));
+ heap->mark_compact_collector()->RecordSlot(
+ head_slot, head_slot, list_head);
+ }
+ }
+
+ static void VisitPhantomObject(Heap*, Context*) {
+ }
+
+ static int WeakNextOffset() {
+ return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
+ }
+};
+
+void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
// We don't record weak slots during marking or scavenges.
// Instead we do it once when we complete mark-compact cycle.
// Note that write barrier has no effect if we are already in the middle of
@@ -1477,84 +1750,173 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
bool record_slots =
gc_state() == MARK_COMPACT &&
mark_compact_collector()->is_compacting();
+ ProcessArrayBuffers(retainer, record_slots);
+ ProcessNativeContexts(retainer, record_slots);
+ ProcessAllocationSites(retainer, record_slots);
+}
- while (candidate != undefined) {
- // Check whether to keep the candidate in the list.
- Context* candidate_context = reinterpret_cast<Context*>(candidate);
- Object* retain = retainer->RetainAs(candidate);
- if (retain != NULL) {
- if (head == undefined) {
- // First element in the list.
- head = retain;
- } else {
- // Subsequent elements in the list.
- ASSERT(tail != NULL);
- tail->set_unchecked(this,
- Context::NEXT_CONTEXT_LINK,
- retain,
- UPDATE_WRITE_BARRIER);
+void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
+ bool record_slots) {
+ Object* head =
+ VisitWeakList<Context>(
+ this, native_contexts_list(), retainer, record_slots);
+ // Update the head of the list of contexts.
+ native_contexts_list_ = head;
+}
- if (record_slots) {
- Object** next_context =
- HeapObject::RawField(
- tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
- mark_compact_collector()->RecordSlot(
- next_context, next_context, retain);
- }
- }
- // Retained context is new tail.
- candidate_context = reinterpret_cast<Context*>(retain);
- tail = candidate_context;
-
- if (retain == undefined) break;
-
- // Process the weak list of optimized functions for the context.
- Object* function_list_head =
- ProcessFunctionWeakReferences(
- this,
- candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
- retainer,
- record_slots);
- candidate_context->set_unchecked(this,
- Context::OPTIMIZED_FUNCTIONS_LIST,
- function_list_head,
- UPDATE_WRITE_BARRIER);
- if (record_slots) {
- Object** optimized_functions =
- HeapObject::RawField(
- tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
- mark_compact_collector()->RecordSlot(
- optimized_functions, optimized_functions, function_list_head);
- }
+
+template<>
+struct WeakListVisitor<JSArrayBufferView> {
+ static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
+ obj->set_weak_next(next);
+ }
+
+ static Object* WeakNext(JSArrayBufferView* obj) {
+ return obj->weak_next();
+ }
+
+ static void VisitLiveObject(Heap*,
+ JSArrayBufferView* obj,
+ WeakObjectRetainer* retainer,
+ bool record_slots) {}
+
+ static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
+
+ static int WeakNextOffset() {
+ return JSArrayBufferView::kWeakNextOffset;
+ }
+};
+
+
+template<>
+struct WeakListVisitor<JSArrayBuffer> {
+ static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
+ obj->set_weak_next(next);
+ }
+
+ static Object* WeakNext(JSArrayBuffer* obj) {
+ return obj->weak_next();
+ }
+
+ static void VisitLiveObject(Heap* heap,
+ JSArrayBuffer* array_buffer,
+ WeakObjectRetainer* retainer,
+ bool record_slots) {
+ Object* typed_array_obj =
+ VisitWeakList<JSArrayBufferView>(
+ heap,
+ array_buffer->weak_first_view(),
+ retainer, record_slots);
+ array_buffer->set_weak_first_view(typed_array_obj);
+ if (typed_array_obj != heap->undefined_value() && record_slots) {
+ Object** slot = HeapObject::RawField(
+ array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
}
+ }
- // Move to next element in the list.
- candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
+ static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
+ Runtime::FreeArrayBuffer(heap->isolate(), phantom);
}
- // Terminate the list if there is one or more elements.
- if (tail != NULL) {
- tail->set_unchecked(this,
- Context::NEXT_CONTEXT_LINK,
- Heap::undefined_value(),
- UPDATE_WRITE_BARRIER);
+ static int WeakNextOffset() {
+ return JSArrayBuffer::kWeakNextOffset;
}
+};
- // Update the head of the list of contexts.
- native_contexts_list_ = head;
+
+void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
+ bool record_slots) {
+ Object* array_buffer_obj =
+ VisitWeakList<JSArrayBuffer>(this,
+ array_buffers_list(),
+ retainer, record_slots);
+ set_array_buffers_list(array_buffer_obj);
+}
+
+
+void Heap::TearDownArrayBuffers() {
+ Object* undefined = undefined_value();
+ for (Object* o = array_buffers_list(); o != undefined;) {
+ JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
+ Runtime::FreeArrayBuffer(isolate(), buffer);
+ o = buffer->weak_next();
+ }
+ array_buffers_list_ = undefined;
+}
+
+
+template<>
+struct WeakListVisitor<AllocationSite> {
+ static void SetWeakNext(AllocationSite* obj, Object* next) {
+ obj->set_weak_next(next);
+ }
+
+ static Object* WeakNext(AllocationSite* obj) {
+ return obj->weak_next();
+ }
+
+ static void VisitLiveObject(Heap* heap,
+ AllocationSite* array_buffer,
+ WeakObjectRetainer* retainer,
+ bool record_slots) {}
+
+ static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
+
+ static int WeakNextOffset() {
+ return AllocationSite::kWeakNextOffset;
+ }
+};
+
+
+void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
+ bool record_slots) {
+ Object* allocation_site_obj =
+ VisitWeakList<AllocationSite>(this,
+ allocation_sites_list(),
+ retainer, record_slots);
+ set_allocation_sites_list(allocation_site_obj);
}
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
+
+ // Both the external string table and the string table may contain
+ // external strings, but neither lists them exhaustively, nor is the
+ // intersection set empty. Therefore we iterate over the external string
+ // table first, ignoring internalized strings, and then over the
+ // internalized string table.
+
+ class ExternalStringTableVisitorAdapter : public ObjectVisitor {
+ public:
+ explicit ExternalStringTableVisitorAdapter(
+ v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
+ virtual void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) {
+ // Visit non-internalized external strings,
+ // since internalized strings are listed in the string table.
+ if (!(*p)->IsInternalizedString()) {
+ ASSERT((*p)->IsExternalString());
+ visitor_->VisitExternalString(Utils::ToLocal(
+ Handle<String>(String::cast(*p))));
+ }
+ }
+ }
+ private:
+ v8::ExternalResourceVisitor* visitor_;
+ } external_string_table_visitor(visitor);
+
+ external_string_table_.Iterate(&external_string_table_visitor);
- class VisitorAdapter : public ObjectVisitor {
+ class StringTableVisitorAdapter : public ObjectVisitor {
public:
- explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
- : visitor_(visitor) {}
+ explicit StringTableVisitorAdapter(
+ v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if ((*p)->IsExternalString()) {
+ ASSERT((*p)->IsInternalizedString());
visitor_->VisitExternalString(Utils::ToLocal(
Handle<String>(String::cast(*p))));
}
@@ -1562,8 +1924,9 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
}
private:
v8::ExternalResourceVisitor* visitor_;
- } visitor_adapter(visitor);
- external_string_table_.Iterate(&visitor_adapter);
+ } string_table_visitor(visitor);
+
+ string_table()->IterateElements(&string_table_visitor);
}
@@ -1626,6 +1989,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
+STATIC_ASSERT((ConstantPoolArray::kHeaderSize & kDoubleAlignmentMask) == 0);
INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
@@ -1660,7 +2024,7 @@ template<MarksHandling marks_handling,
class ScavengingVisitor : public StaticVisitorBase {
public:
static void Initialize() {
- table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
+ table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
table_.Register(kVisitByteArray, &EvacuateByteArray);
@@ -1679,6 +2043,10 @@ class ScavengingVisitor : public StaticVisitorBase {
&ObjectEvacuationStrategy<POINTER_OBJECT>::
template VisitSpecialized<SlicedString::kSize>);
+ table_.Register(kVisitSymbol,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ template VisitSpecialized<Symbol::kSize>);
+
table_.Register(kVisitSharedFunctionInfo,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
template VisitSpecialized<SharedFunctionInfo::kSize>);
@@ -1687,6 +2055,22 @@ class ScavengingVisitor : public StaticVisitorBase {
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
+ table_.Register(kVisitJSWeakSet,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ Visit);
+
+ table_.Register(kVisitJSArrayBuffer,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ Visit);
+
+ table_.Register(kVisitJSTypedArray,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ Visit);
+
+ table_.Register(kVisitJSDataView,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ Visit);
+
table_.Register(kVisitJSRegExp,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
@@ -1718,7 +2102,6 @@ class ScavengingVisitor : public StaticVisitorBase {
private:
enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
- enum SizeRestriction { SMALL, UNKNOWN_SIZE };
static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
bool should_record = false;
@@ -1751,10 +2134,14 @@ class ScavengingVisitor : public StaticVisitorBase {
if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
// Update NewSpace stats if necessary.
RecordCopiedObject(heap, target);
- HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Isolate* isolate = heap->isolate();
+ HeapProfiler* heap_profiler = isolate->heap_profiler();
+ if (heap_profiler->is_profiling()) {
+ heap_profiler->ObjectMoveEvent(source->address(), target->address(),
+ size);
+ }
if (isolate->logger()->is_logging_code_events() ||
- CpuProfiler::is_profiling(isolate)) {
+ isolate->cpu_profiler()->is_profiling()) {
if (target->IsSharedFunctionInfo()) {
PROFILE(isolate, SharedFunctionInfoMoveEvent(
source->address(), target->address()));
@@ -1770,15 +2157,12 @@ class ScavengingVisitor : public StaticVisitorBase {
}
- template<ObjectContents object_contents,
- SizeRestriction size_restriction,
- int alignment>
+ template<ObjectContents object_contents, int alignment>
static inline void EvacuateObject(Map* map,
HeapObject** slot,
HeapObject* object,
int object_size) {
- SLOW_ASSERT((size_restriction != SMALL) ||
- (object_size <= Page::kMaxNonCodeHeapObjectSize));
+ SLOW_ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
SLOW_ASSERT(object->Size() == object_size);
int allocation_size = object_size;
@@ -1791,17 +2175,12 @@ class ScavengingVisitor : public StaticVisitorBase {
if (heap->ShouldBePromoted(object->address(), object_size)) {
MaybeObject* maybe_result;
- if ((size_restriction != SMALL) &&
- (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
- maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
- NOT_EXECUTABLE);
+ if (object_contents == DATA_OBJECT) {
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
+ maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
} else {
- if (object_contents == DATA_OBJECT) {
- maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
- } else {
- maybe_result =
- heap->old_pointer_space()->AllocateRaw(allocation_size);
- }
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
+ maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size);
}
Object* result = NULL; // Initialization to please compiler.
@@ -1831,6 +2210,7 @@ class ScavengingVisitor : public StaticVisitorBase {
return;
}
}
+ ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
Object* result = allocation->ToObjectUnchecked();
@@ -1875,10 +2255,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object) {
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
- EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
- slot,
- object,
- object_size);
+ EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
+ map, slot, object, object_size);
}
@@ -1887,11 +2265,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
int object_size = FixedDoubleArray::SizeFor(length);
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
- map,
- slot,
- object,
- object_size);
+ EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
+ map, slot, object, object_size);
}
@@ -1899,17 +2274,17 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
+ EvacuateObject<DATA_OBJECT, kObjectAlignment>(
map, slot, object, object_size);
}
- static inline void EvacuateSeqAsciiString(Map* map,
+ static inline void EvacuateSeqOneByteString(Map* map,
HeapObject** slot,
HeapObject* object) {
- int object_size = SeqAsciiString::cast(object)->
- SeqAsciiStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
+ int object_size = SeqOneByteString::cast(object)->
+ SeqOneByteStringSize(map->instance_type());
+ EvacuateObject<DATA_OBJECT, kObjectAlignment>(
map, slot, object, object_size);
}
@@ -1919,7 +2294,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
int object_size = SeqTwoByteString::cast(object)->
SeqTwoByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
+ EvacuateObject<DATA_OBJECT, kObjectAlignment>(
map, slot, object, object_size);
}
@@ -1963,7 +2338,7 @@ class ScavengingVisitor : public StaticVisitorBase {
}
int object_size = ConsString::kSize;
- EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
+ EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
map, slot, object, object_size);
}
@@ -1974,7 +2349,7 @@ class ScavengingVisitor : public StaticVisitorBase {
static inline void VisitSpecialized(Map* map,
HeapObject** slot,
HeapObject* object) {
- EvacuateObject<object_contents, SMALL, kObjectAlignment>(
+ EvacuateObject<object_contents, kObjectAlignment>(
map, slot, object, object_size);
}
@@ -1982,7 +2357,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object) {
int object_size = map->instance_size();
- EvacuateObject<object_contents, SMALL, kObjectAlignment>(
+ EvacuateObject<object_contents, kObjectAlignment>(
map, slot, object, object_size);
}
};
@@ -2010,7 +2385,7 @@ static void InitializeScavengingVisitorsTables() {
void Heap::SelectScavengingVisitorsTable() {
bool logging_and_profiling =
isolate()->logger()->is_logging() ||
- CpuProfiler::is_profiling(isolate()) ||
+ isolate()->cpu_profiler()->is_profiling() ||
(isolate()->heap_profiler() != NULL &&
isolate()->heap_profiler()->is_profiling());
@@ -2050,7 +2425,7 @@ void Heap::SelectScavengingVisitorsTable() {
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
- SLOW_ASSERT(HEAP->InFromSpace(object));
+ SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
MapWord first_word = object->map_word();
SLOW_ASSERT(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
@@ -2061,7 +2436,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result;
- MaybeObject* maybe_result = AllocateRawMap();
+ MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
// Map::cast cannot be used due to uninitialized map field.
@@ -2086,7 +2461,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
int instance_size,
ElementsKind elements_kind) {
Object* result;
- MaybeObject* maybe_result = AllocateRawMap();
+ MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
if (!maybe_result->To(&result)) return maybe_result;
Map* map = reinterpret_cast<Map*>(result);
@@ -2100,6 +2475,8 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_inobject_properties(0);
map->set_pre_allocated_property_fields(0);
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
+ map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
map->init_back_pointer(undefined_value());
map->set_unused_property_fields(0);
map->set_instance_descriptors(empty_descriptor_array());
@@ -2137,6 +2514,7 @@ MaybeObject* Heap::AllocateAccessorPair() {
}
accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
+ accessors->set_access_flags(Smi::FromInt(0), SKIP_WRITE_BARRIER);
return accessors;
}
@@ -2171,11 +2549,11 @@ const Heap::StringTypeTable Heap::string_type_table[] = {
};
-const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
-#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
+const Heap::ConstantStringTable Heap::constant_string_table[] = {
+#define CONSTANT_STRING_ELEMENT(name, contents) \
{contents, k##name##RootIndex},
- SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
-#undef CONSTANT_SYMBOL_ELEMENT
+ INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
+#undef CONSTANT_STRING_ELEMENT
};
@@ -2235,14 +2613,18 @@ bool Heap::CreateInitialMaps() {
// Fix the instance_descriptors for the existing maps.
meta_map()->set_code_cache(empty_fixed_array());
+ meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
meta_map()->init_back_pointer(undefined_value());
meta_map()->set_instance_descriptors(empty_descriptor_array());
fixed_array_map()->set_code_cache(empty_fixed_array());
+ fixed_array_map()->set_dependent_code(
+ DependentCode::cast(empty_fixed_array()));
fixed_array_map()->init_back_pointer(undefined_value());
fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
oddball_map()->set_code_cache(empty_fixed_array());
+ oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
oddball_map()->init_back_pointer(undefined_value());
oddball_map()->set_instance_descriptors(empty_descriptor_array());
@@ -2274,6 +2656,11 @@ bool Heap::CreateInitialMaps() {
}
set_heap_number_map(Map::cast(obj));
+ { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_symbol_map(Map::cast(obj));
+
{ MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -2307,6 +2694,12 @@ bool Heap::CreateInitialMaps() {
set_fixed_double_array_map(Map::cast(obj));
{ MaybeObject* maybe_obj =
+ AllocateMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_constant_pool_array_map(Map::cast(obj));
+
+ { MaybeObject* maybe_obj =
AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -2383,13 +2776,66 @@ bool Heap::CreateInitialMaps() {
}
set_external_double_array_map(Map::cast(obj));
+ { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_byte_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj =
+ AllocateEmptyExternalArray(kExternalUnsignedByteArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_short_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
+ kExternalUnsignedShortArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_int_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj =
+ AllocateEmptyExternalArray(kExternalUnsignedIntArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_float_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_double_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_pixel_array(ExternalArray::cast(obj));
+
{ MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_code_map(Map::cast(obj));
- { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
- JSGlobalPropertyCell::kSize);
+ { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_cell_map(Map::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
+ PropertyCell::kSize);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_global_property_cell_map(Map::cast(obj));
@@ -2475,6 +2921,14 @@ bool Heap::CreateInitialMaps() {
}
set_message_object_map(Map::cast(obj));
+ Map* external_map;
+ { MaybeObject* maybe_obj =
+ AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
+ if (!maybe_obj->To(&external_map)) return false;
+ }
+ external_map->set_is_extensible(false);
+ set_external_map(external_map);
+
ASSERT(!InNewSpace(empty_fixed_array()));
return true;
}
@@ -2483,12 +2937,12 @@ bool Heap::CreateInitialMaps() {
MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
+ int size = HeapNumber::kSize;
STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -2498,36 +2952,63 @@ MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
}
-MaybeObject* Heap::AllocateHeapNumber(double value) {
- // Use general version, if we're forced to always allocate.
- if (always_allocate()) return AllocateHeapNumber(value, TENURED);
+MaybeObject* Heap::AllocateCell(Object* value) {
+ int size = Cell::kSize;
+ STATIC_ASSERT(Cell::kSize <= Page::kNonCodeObjectAreaSize);
- // This version of AllocateHeapNumber is optimized for
- // allocation in new space.
- STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
Object* result;
- { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
+ { MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
- HeapNumber::cast(result)->set_value(value);
+ HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
+ Cell::cast(result)->set_value(value);
return result;
}
-MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
+MaybeObject* Heap::AllocatePropertyCell() {
+ int size = PropertyCell::kSize;
+ STATIC_ASSERT(PropertyCell::kSize <= Page::kNonCodeObjectAreaSize);
+
Object* result;
- { MaybeObject* maybe_result = AllocateRawCell();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result =
+ AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+
HeapObject::cast(result)->set_map_no_write_barrier(
global_property_cell_map());
- JSGlobalPropertyCell::cast(result)->set_value(value);
+ PropertyCell* cell = PropertyCell::cast(result);
+ cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
+ cell->set_value(the_hole_value());
+ cell->set_type(Type::None());
+ return result;
+}
+
+
+MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
+ Box* result;
+ MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
+ if (!maybe_result->To(&result)) return maybe_result;
+ result->set_value(value);
return result;
}
+MaybeObject* Heap::AllocateAllocationSite() {
+ AllocationSite* site;
+ MaybeObject* maybe_result = Allocate(allocation_site_map(),
+ OLD_POINTER_SPACE);
+ if (!maybe_result->To(&site)) return maybe_result;
+ site->Initialize();
+
+ // Link the site
+ site->set_weak_next(allocation_sites_list());
+ set_allocation_sites_list(site);
+ return site;
+}
+
+
MaybeObject* Heap::CreateOddball(const char* to_string,
Object* to_number,
byte kind) {
@@ -2535,7 +3016,7 @@ MaybeObject* Heap::CreateOddball(const char* to_string,
{ MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- return Oddball::cast(result)->Initialize(to_string, to_number, kind);
+ return Oddball::cast(result)->Initialize(this, to_string, to_number, kind);
}
@@ -2570,13 +3051,13 @@ bool Heap::CreateApiObjects() {
void Heap::CreateJSEntryStub() {
JSEntryStub stub;
- set_js_entry_code(*stub.GetCode());
+ set_js_entry_code(*stub.GetCode(isolate()));
}
void Heap::CreateJSConstructEntryStub() {
JSConstructEntryStub stub;
- set_js_construct_entry_code(*stub.GetCode());
+ set_js_construct_entry_code(*stub.GetCode(isolate()));
}
@@ -2585,7 +3066,7 @@ void Heap::CreateFixedStubs() {
// for cooking and uncooking (check out frames.cc).
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
- HandleScope scope;
+ HandleScope scope(isolate());
// gcc-4.4 has problem generating correct code of following snippet:
// { JSEntryStub stub;
// js_entry_code_ = *stub.GetCode();
@@ -2601,7 +3082,7 @@ void Heap::CreateFixedStubs() {
// create them if we need them during the creation of another stub.
// Stub creation mixes raw pointers and handles in an unsafe manner so
// we cannot create stubs while we are creating stubs.
- CodeStub::GenerateStubsAheadOfTime();
+ CodeStub::GenerateStubsAheadOfTime(isolate());
}
@@ -2613,7 +3094,7 @@ bool Heap::CreateInitialObjects() {
if (!maybe_obj->ToObject(&obj)) return false;
}
set_minus_zero_value(HeapNumber::cast(obj));
- ASSERT(signbit(minus_zero_value()->Number()) != 0);
+ ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
{ MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
@@ -2626,27 +3107,29 @@ bool Heap::CreateInitialObjects() {
set_infinity_value(HeapNumber::cast(obj));
// The hole has not been created yet, but we want to put something
- // predictable in the gaps in the symbol table, so lets make that Smi zero.
+ // predictable in the gaps in the string table, so lets make that Smi zero.
set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
- // Allocate initial symbol table.
- { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
+ // Allocate initial string table.
+ { MaybeObject* maybe_obj =
+ StringTable::Allocate(this, kInitialStringTableSize);
if (!maybe_obj->ToObject(&obj)) return false;
}
- // Don't use set_symbol_table() due to asserts.
- roots_[kSymbolTableRootIndex] = obj;
+ // Don't use set_string_table() due to asserts.
+ roots_[kStringTableRootIndex] = obj;
- // Finish initializing oddballs after creating symboltable.
+ // Finish initializing oddballs after creating the string table.
{ MaybeObject* maybe_obj =
- undefined_value()->Initialize("undefined",
+ undefined_value()->Initialize(this,
+ "undefined",
nan_value(),
Oddball::kUndefined);
if (!maybe_obj->ToObject(&obj)) return false;
}
// Initialize the null_value.
- { MaybeObject* maybe_obj =
- null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
+ { MaybeObject* maybe_obj = null_value()->Initialize(
+ this, "null", Smi::FromInt(0), Oddball::kNull);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -2671,6 +3154,13 @@ bool Heap::CreateInitialObjects() {
}
set_the_hole_value(Oddball::cast(obj));
+ { MaybeObject* maybe_obj = CreateOddball("uninitialized",
+ Smi::FromInt(-1),
+ Oddball::kUninitialized);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_uninitialized_value(Oddball::cast(obj));
+
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Smi::FromInt(-4),
Oddball::kArgumentMarker);
@@ -2692,42 +3182,29 @@ bool Heap::CreateInitialObjects() {
}
set_termination_exception(obj);
- // Allocate the empty string.
- { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_string(String::cast(obj));
-
- for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
+ for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
{ MaybeObject* maybe_obj =
- LookupAsciiSymbol(constant_symbol_table[i].contents);
+ InternalizeUtf8String(constant_string_table[i].contents);
if (!maybe_obj->ToObject(&obj)) return false;
}
- roots_[constant_symbol_table[i].index] = String::cast(obj);
+ roots_[constant_string_table[i].index] = String::cast(obj);
}
- // Allocate the hidden symbol which is used to identify the hidden properties
+ // Allocate the hidden string which is used to identify the hidden properties
// in JSObjects. The hash code has a special value so that it will not match
// the empty string when searching for the property. It cannot be part of the
// loop above because it needs to be allocated manually with the special
- // hash code in place. The hash code for the hidden_symbol is zero to ensure
+ // hash code in place. The hash code for the hidden_string is zero to ensure
// that it will always be at the first entry in property descriptors.
- { MaybeObject* maybe_obj =
- AllocateSymbol(CStrVector(""), 0, String::kEmptyStringHash);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- hidden_symbol_ = String::cast(obj);
-
- // Allocate the foreign for __proto__.
- { MaybeObject* maybe_obj =
- AllocateForeign((Address) &Accessors::ObjectPrototype);
+ { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
+ OneByteVector("", 0), String::kEmptyStringHash);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_prototype_accessors(Foreign::cast(obj));
+ hidden_string_ = String::cast(obj);
// Allocate the code_stubs dictionary. The initial size is set to avoid
// expanding the dictionary during bootstrapping.
- { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
+ { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_code_stubs(UnseededNumberDictionary::cast(obj));
@@ -2735,7 +3212,7 @@ bool Heap::CreateInitialObjects() {
// Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
// is set to avoid expanding the dictionary during bootstrapping.
- { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
+ { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
@@ -2752,23 +3229,24 @@ bool Heap::CreateInitialObjects() {
CreateFixedStubs();
// Allocate the dictionary of intrinsic function names.
- { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
+ { MaybeObject* maybe_obj =
+ NameDictionary::Allocate(this, Runtime::kNumFunctions);
if (!maybe_obj->ToObject(&obj)) return false;
}
{ MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
obj);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_intrinsic_function_names(StringDictionary::cast(obj));
+ set_intrinsic_function_names(NameDictionary::cast(obj));
{ MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
if (!maybe_obj->ToObject(&obj)) return false;
}
set_number_string_cache(FixedArray::cast(obj));
- // Allocate cache for single character ASCII strings.
+ // Allocate cache for single character one byte strings.
{ MaybeObject* maybe_obj =
- AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
+ AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_single_character_string_cache(FixedArray::cast(obj));
@@ -2792,8 +3270,38 @@ bool Heap::CreateInitialObjects() {
}
set_natives_source_cache(FixedArray::cast(obj));
- // Handling of script id generation is in FACTORY->NewScript.
- set_last_script_id(undefined_value());
+ // Allocate object to hold object observation state.
+ { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_observation_state(JSObject::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateSymbol();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_frozen_symbol(Symbol::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateSymbol();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_elements_transition_symbol(Symbol::cast(obj));
+
+ { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
+ set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateSymbol();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_observed_symbol(Symbol::cast(obj));
+
+ // Handling of script id generation is in Factory::NewScript.
+ set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
// Initialize keyed lookup cache.
isolate_->keyed_lookup_cache()->Clear();
@@ -2811,15 +3319,50 @@ bool Heap::CreateInitialObjects() {
}
+bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
+ RootListIndex writable_roots[] = {
+ kStoreBufferTopRootIndex,
+ kStackLimitRootIndex,
+ kNumberStringCacheRootIndex,
+ kInstanceofCacheFunctionRootIndex,
+ kInstanceofCacheMapRootIndex,
+ kInstanceofCacheAnswerRootIndex,
+ kCodeStubsRootIndex,
+ kNonMonomorphicCacheRootIndex,
+ kPolymorphicCodeCacheRootIndex,
+ kLastScriptIdRootIndex,
+ kEmptyScriptRootIndex,
+ kRealStackLimitRootIndex,
+ kArgumentsAdaptorDeoptPCOffsetRootIndex,
+ kConstructStubDeoptPCOffsetRootIndex,
+ kGetterStubDeoptPCOffsetRootIndex,
+ kSetterStubDeoptPCOffsetRootIndex,
+ kStringTableRootIndex,
+ };
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
+ if (root_index == writable_roots[i])
+ return true;
+ }
+ return false;
+}
+
+
+bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
+ return !RootCanBeWrittenAfterInitialization(root_index) &&
+ !InNewSpace(roots_array_start()[root_index]);
+}
+
+
Object* RegExpResultsCache::Lookup(Heap* heap,
String* key_string,
Object* key_pattern,
ResultsCacheType type) {
FixedArray* cache;
- if (!key_string->IsSymbol()) return Smi::FromInt(0);
+ if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
if (type == STRING_SPLIT_SUBSTRINGS) {
ASSERT(key_pattern->IsString());
- if (!key_pattern->IsSymbol()) return Smi::FromInt(0);
+ if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
cache = heap->string_split_cache();
} else {
ASSERT(type == REGEXP_MULTIPLE_INDICES);
@@ -2850,10 +3393,10 @@ void RegExpResultsCache::Enter(Heap* heap,
FixedArray* value_array,
ResultsCacheType type) {
FixedArray* cache;
- if (!key_string->IsSymbol()) return;
+ if (!key_string->IsInternalizedString()) return;
if (type == STRING_SPLIT_SUBSTRINGS) {
ASSERT(key_pattern->IsString());
- if (!key_pattern->IsSymbol()) return;
+ if (!key_pattern->IsInternalizedString()) return;
cache = heap->string_split_cache();
} else {
ASSERT(type == REGEXP_MULTIPLE_INDICES);
@@ -2885,14 +3428,14 @@ void RegExpResultsCache::Enter(Heap* heap,
}
}
// If the array is a reasonably short list of substrings, convert it into a
- // list of symbols.
+ // list of internalized strings.
if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
for (int i = 0; i < value_array->length(); i++) {
String* str = String::cast(value_array->get(i));
- Object* symbol;
- MaybeObject* maybe_symbol = heap->LookupSymbol(str);
- if (maybe_symbol->ToObject(&symbol)) {
- value_array->set(i, symbol);
+ Object* internalized_str;
+ MaybeObject* maybe_string = heap->InternalizeString(str);
+ if (maybe_string->ToObject(&internalized_str)) {
+ value_array->set(i, internalized_str);
}
}
}
@@ -2950,7 +3493,7 @@ void Heap::FlushNumberStringCache() {
// Flush the number to string cache.
int len = number_string_cache()->length();
for (int i = 0; i < len; i++) {
- number_string_cache()->set_undefined(this, i);
+ number_string_cache()->set_undefined(i);
}
}
@@ -3007,7 +3550,8 @@ void Heap::SetNumberStringCache(Object* number, String* string) {
MaybeObject* Heap::NumberToString(Object* number,
- bool check_number_string_cache) {
+ bool check_number_string_cache,
+ PretenureFlag pretenure) {
isolate_->counters()->number_to_string_runtime()->Increment();
if (check_number_string_cache) {
Object* cached = GetNumberStringCache(number);
@@ -3028,7 +3572,8 @@ MaybeObject* Heap::NumberToString(Object* number,
}
Object* js_string;
- MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
+ MaybeObject* maybe_js_string =
+ AllocateStringFromOneByte(CStrVector(str), pretenure);
if (maybe_js_string->ToObject(&js_string)) {
SetNumberStringCache(number, String::cast(js_string));
}
@@ -3077,6 +3622,41 @@ Heap::RootListIndex Heap::RootIndexForExternalArrayType(
}
}
+Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
+ ElementsKind elementsKind) {
+ switch (elementsKind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ return kEmptyExternalByteArrayRootIndex;
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ return kEmptyExternalUnsignedByteArrayRootIndex;
+ case EXTERNAL_SHORT_ELEMENTS:
+ return kEmptyExternalShortArrayRootIndex;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ return kEmptyExternalUnsignedShortArrayRootIndex;
+ case EXTERNAL_INT_ELEMENTS:
+ return kEmptyExternalIntArrayRootIndex;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ return kEmptyExternalUnsignedIntArrayRootIndex;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ return kEmptyExternalFloatArrayRootIndex;
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ return kEmptyExternalDoubleArrayRootIndex;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ return kEmptyExternalPixelArrayRootIndex;
+ default:
+ UNREACHABLE();
+ return kUndefinedValueRootIndex;
+ }
+}
+
+
+ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
+ return ExternalArray::cast(
+ roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
+}
+
+
+
MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
// We need to distinguish the minus zero value and this cannot be
@@ -3120,20 +3700,18 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_name(name);
Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
share->set_code(illegal);
- share->ClearOptimizedCodeMap();
- share->set_scope_info(ScopeInfo::Empty());
+ share->set_optimized_code_map(Smi::FromInt(0));
+ share->set_scope_info(ScopeInfo::Empty(isolate_));
Code* construct_stub =
isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
share->set_construct_stub(construct_stub);
- share->set_instance_class_name(Object_symbol());
+ share->set_instance_class_name(Object_string());
share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
- share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
share->set_ast_node_count(0);
- share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
share->set_counters(0);
// Set integer fields (smi or int, depending on the architecture).
@@ -3146,8 +3724,7 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_function_token_position(0);
// All compiler hints default to false or 0.
share->set_compiler_hints(0);
- share->set_this_property_assignments_count(0);
- share->set_opt_count(0);
+ share->set_opt_count_and_bailout_reason(0);
return share;
}
@@ -3189,25 +3766,26 @@ static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
Heap* heap,
- uint32_t c1,
- uint32_t c2) {
- String* symbol;
+ uint16_t c1,
+ uint16_t c2) {
+ String* result;
// Numeric strings have a different hash algorithm not known by
- // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
+ // LookupTwoCharsStringIfExists, so we skip this step for such strings.
if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
- heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
- return symbol;
+ heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
+ return result;
// Now we know the length is 2, we might as well make use of that fact
// when building the new string.
- } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
+ } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
+ // We can do this.
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
+ { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- char* dest = SeqAsciiString::cast(result)->GetChars();
- dest[0] = c1;
- dest[1] = c2;
+ uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
+ dest[0] = static_cast<uint8_t>(c1);
+ dest[1] = static_cast<uint8_t>(c2);
return result;
} else {
Object* result;
@@ -3236,33 +3814,32 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
int length = first_length + second_length;
// Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. Check whether we already have the string in the symbol
+ // dictionary. Check whether we already have the string in the string
// table to prevent creation of many unneccesary strings.
if (length == 2) {
- unsigned c1 = first->Get(0);
- unsigned c2 = second->Get(0);
+ uint16_t c1 = first->Get(0);
+ uint16_t c2 = second->Get(0);
return MakeOrFindTwoCharacterString(this, c1, c2);
}
- bool first_is_ascii = first->IsAsciiRepresentation();
- bool second_is_ascii = second->IsAsciiRepresentation();
- bool is_ascii = first_is_ascii && second_is_ascii;
-
+ bool first_is_one_byte = first->IsOneByteRepresentation();
+ bool second_is_one_byte = second->IsOneByteRepresentation();
+ bool is_one_byte = first_is_one_byte && second_is_one_byte;
// Make sure that an out of memory exception is thrown if the length
// of the new cons string is too large.
if (length > String::kMaxLength || length < 0) {
isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x4);
}
- bool is_ascii_data_in_two_byte_string = false;
- if (!is_ascii) {
+ bool is_one_byte_data_in_two_byte_string = false;
+ if (!is_one_byte) {
// At least one of the strings uses two-byte representation so we
// can't use the fast case code for short ASCII strings below, but
// we can try to save memory if all chars actually fit in ASCII.
- is_ascii_data_in_two_byte_string =
- first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
- if (is_ascii_data_in_two_byte_string) {
+ is_one_byte_data_in_two_byte_string =
+ first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
+ if (is_one_byte_data_in_two_byte_string) {
isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
}
}
@@ -3273,37 +3850,37 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
ASSERT(first->IsFlat());
ASSERT(second->IsFlat());
- if (is_ascii) {
+ if (is_one_byte) {
Object* result;
- { MaybeObject* maybe_result = AllocateRawAsciiString(length);
+ { MaybeObject* maybe_result = AllocateRawOneByteString(length);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Copy the characters into the new object.
- char* dest = SeqAsciiString::cast(result)->GetChars();
+ uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
// Copy first part.
- const char* src;
+ const uint8_t* src;
if (first->IsExternalString()) {
src = ExternalAsciiString::cast(first)->GetChars();
} else {
- src = SeqAsciiString::cast(first)->GetChars();
+ src = SeqOneByteString::cast(first)->GetChars();
}
for (int i = 0; i < first_length; i++) *dest++ = src[i];
// Copy second part.
if (second->IsExternalString()) {
src = ExternalAsciiString::cast(second)->GetChars();
} else {
- src = SeqAsciiString::cast(second)->GetChars();
+ src = SeqOneByteString::cast(second)->GetChars();
}
for (int i = 0; i < second_length; i++) *dest++ = src[i];
return result;
} else {
- if (is_ascii_data_in_two_byte_string) {
+ if (is_one_byte_data_in_two_byte_string) {
Object* result;
- { MaybeObject* maybe_result = AllocateRawAsciiString(length);
+ { MaybeObject* maybe_result = AllocateRawOneByteString(length);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Copy the characters into the new object.
- char* dest = SeqAsciiString::cast(result)->GetChars();
+ uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
String::WriteToFlat(first, dest, 0, first_length);
String::WriteToFlat(second, dest + first_length, 0, second_length);
isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
@@ -3322,7 +3899,7 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
}
}
- Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
+ Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
cons_ascii_string_map() : cons_string_map();
Object* result;
@@ -3330,7 +3907,7 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
ConsString* cons_string = ConsString::cast(result);
WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
cons_string->set_length(length);
@@ -3352,10 +3929,10 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
return LookupSingleCharacterStringFromCode(buffer->Get(start));
} else if (length == 2) {
// Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. Check whether we already have the string in the symbol
- // table to prevent creation of many unneccesary strings.
- unsigned c1 = buffer->Get(start);
- unsigned c2 = buffer->Get(start + 1);
+ // dictionary. Check whether we already have the string in the string
+ // table to prevent creation of many unnecessary strings.
+ uint16_t c1 = buffer->Get(start);
+ uint16_t c2 = buffer->Get(start + 1);
return MakeOrFindTwoCharacterString(this, c1, c2);
}
@@ -3370,17 +3947,17 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
// WriteToFlat takes care of the case when an indirect string has a
// different encoding from its underlying string. These encodings may
// differ because of externalization.
- bool is_ascii = buffer->IsAsciiRepresentation();
- { MaybeObject* maybe_result = is_ascii
- ? AllocateRawAsciiString(length, pretenure)
+ bool is_one_byte = buffer->IsOneByteRepresentation();
+ { MaybeObject* maybe_result = is_one_byte
+ ? AllocateRawOneByteString(length, pretenure)
: AllocateRawTwoByteString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
String* string_result = String::cast(result);
// Copy the characters into the new object.
- if (is_ascii) {
- ASSERT(string_result->IsAsciiRepresentation());
- char* dest = SeqAsciiString::cast(string_result)->GetChars();
+ if (is_one_byte) {
+ ASSERT(string_result->IsOneByteRepresentation());
+ uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
String::WriteToFlat(buffer, dest, start, end);
} else {
ASSERT(string_result->IsTwoByteRepresentation());
@@ -3404,14 +3981,14 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
// indirect ASCII string is pointing to a two-byte string, the two-byte char
// codes of the underlying string must still fit into ASCII (because
// externalization must not change char codes).
- { Map* map = buffer->IsAsciiRepresentation()
+ { Map* map = buffer->IsOneByteRepresentation()
? sliced_ascii_string_map()
: sliced_string_map();
MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
SlicedString* sliced_string = SlicedString::cast(result);
sliced_string->set_length(length);
sliced_string->set_hash_field(String::kEmptyHashField);
@@ -3440,11 +4017,9 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x5);
}
- ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
-
Map* map = external_ascii_string_map();
Object* result;
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
@@ -3465,16 +4040,16 @@ MaybeObject* Heap::AllocateExternalStringFromTwoByte(
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x6);
}
// For small strings we check whether the resource contains only
- // ASCII characters. If yes, we use a different string map.
- static const size_t kAsciiCheckLengthLimit = 32;
- bool is_ascii = length <= kAsciiCheckLengthLimit &&
- String::IsAscii(resource->data(), static_cast<int>(length));
- Map* map = is_ascii ?
- external_string_with_ascii_data_map() : external_string_map();
+ // one byte characters. If yes, we use a different string map.
+ static const size_t kOneByteCheckLengthLimit = 32;
+ bool is_one_byte = length <= kOneByteCheckLengthLimit &&
+ String::IsOneByte(resource->data(), static_cast<int>(length));
+ Map* map = is_one_byte ?
+ external_string_with_one_byte_data_map() : external_string_map();
Object* result;
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -3490,14 +4065,15 @@ MaybeObject* Heap::AllocateExternalStringFromTwoByte(
MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
- if (code <= String::kMaxAsciiCharCode) {
+ if (code <= String::kMaxOneByteCharCode) {
Object* value = single_character_string_cache()->get(code);
if (value != undefined_value()) return value;
- char buffer[1];
- buffer[0] = static_cast<char>(code);
+ uint8_t buffer[1];
+ buffer[0] = static_cast<uint8_t>(code);
Object* result;
- MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
+ MaybeObject* maybe_result =
+ InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
if (!maybe_result->ToObject(&result)) return maybe_result;
single_character_string_cache()->set(code, result);
@@ -3516,33 +4092,10 @@ MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException();
- }
- if (pretenure == NOT_TENURED) {
- return AllocateByteArray(length);
+ return Failure::OutOfMemoryException(0x7);
}
int size = ByteArray::SizeFor(length);
- Object* result;
- { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
- ? old_data_space_->AllocateRaw(size)
- : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
- byte_array_map());
- reinterpret_cast<ByteArray*>(result)->set_length(length);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateByteArray(int length) {
- if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException();
- }
- int size = ByteArray::SizeFor(length);
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
{ MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -3573,11 +4126,10 @@ MaybeObject* Heap::AllocateExternalArray(int length,
ExternalArrayType array_type,
void* external_pointer,
PretenureFlag pretenure) {
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ int size = ExternalArray::kAlignedSize;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
- space,
- OLD_DATA_SPACE);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -3594,7 +4146,9 @@ MaybeObject* Heap::AllocateExternalArray(int length,
MaybeObject* Heap::CreateCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_reference,
- bool immovable) {
+ bool immovable,
+ bool crankshafted,
+ int prologue_offset) {
// Allocate ByteArray before the Code object, so that we do not risk
// leaving uninitialized Code object (and breaking the heap).
ByteArray* reloc_info;
@@ -3638,11 +4192,24 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
if (code->is_call_stub() || code->is_keyed_call_stub()) {
code->set_check_type(RECEIVER_MAP_CHECK);
}
+ code->set_is_crankshafted(crankshafted);
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
+ code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
+ code->set_prologue_offset(prologue_offset);
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ code->set_marked_for_deoptimization(false);
+ }
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (code->kind() == Code::FUNCTION) {
+ code->set_has_debug_break_slots(
+ isolate_->debugger()->IsDebuggerActive());
+ }
+#endif
+
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@@ -3724,13 +4291,15 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
// Copy header and instructions.
- memcpy(new_addr, old_addr, relocation_offset);
+ CopyBytes(new_addr, old_addr, relocation_offset);
Code* new_code = Code::cast(result);
new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
// Copy patched rinfo.
- memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
+ CopyBytes(new_code->relocation_start(),
+ reloc_info.start(),
+ static_cast<size_t>(reloc_info.length()));
// Relocate the copy.
ASSERT(!isolate_->code_range()->exists() ||
@@ -3746,6 +4315,29 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
}
+MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
+ Handle<AllocationSite> allocation_site) {
+ ASSERT(gc_state_ == NOT_IN_GC);
+ ASSERT(map->instance_type() != MAP_TYPE);
+ // If allocation failures are disallowed, we may allocate in a different
+ // space when new space is full and the object is not a large object.
+ AllocationSpace retry_space =
+ (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
+ int size = map->instance_size() + AllocationMemento::kSize;
+ Object* result;
+ MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ // No need for write barrier since object is white and map is in old space.
+ HeapObject::cast(result)->set_map_no_write_barrier(map);
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(result) + map->instance_size());
+ alloc_memento->set_map_no_write_barrier(allocation_memento_map());
+ ASSERT(allocation_site->map() == allocation_site_map());
+ alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
+ return result;
+}
+
+
MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(map->instance_type() != MAP_TYPE);
@@ -3753,11 +4345,10 @@ MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
// space when new space is full and the object is not a large object.
AllocationSpace retry_space =
(space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
+ int size = map->instance_size();
Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(map->instance_size(), space, retry_space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
// No need for write barrier since object is white and map is in old space.
HeapObject::cast(result)->set_map_no_write_barrier(map);
return result;
@@ -3780,30 +4371,33 @@ void Heap::InitializeFunction(JSFunction* function,
MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
- // Allocate the prototype. Make sure to use the object function
- // from the function's context, since the function can be from a
- // different context.
- JSFunction* object_function =
- function->context()->native_context()->object_function();
-
- // Each function prototype gets a copy of the object function map.
- // This avoid unwanted sharing of maps between prototypes of different
- // constructors.
+ // Make sure to use globals from the function's context, since the function
+ // can be from a different context.
+ Context* native_context = function->context()->native_context();
Map* new_map;
- ASSERT(object_function->has_initial_map());
- MaybeObject* maybe_map = object_function->initial_map()->Copy();
- if (!maybe_map->To(&new_map)) return maybe_map;
+ if (function->shared()->is_generator()) {
+ // Generator prototypes can share maps since they don't have "constructor"
+ // properties.
+ new_map = native_context->generator_object_prototype_map();
+ } else {
+ // Each function prototype gets a fresh map to avoid unwanted sharing of
+ // maps between prototypes of different constructors.
+ JSFunction* object_function = native_context->object_function();
+ ASSERT(object_function->has_initial_map());
+ MaybeObject* maybe_map = object_function->initial_map()->Copy();
+ if (!maybe_map->To(&new_map)) return maybe_map;
+ }
Object* prototype;
MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- // When creating the prototype for the function we must set its
- // constructor to the function.
- MaybeObject* maybe_failure =
- JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
- constructor_symbol(), function, DONT_ENUM);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ if (!function->shared()->is_generator()) {
+ MaybeObject* maybe_failure =
+ JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributesTrampoline(
+ constructor_string(), function, DONT_ENUM);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
return prototype;
}
@@ -3843,10 +4437,6 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
arguments_object_size = kArgumentsObjectSize;
}
- // This calls Copy directly rather than using Heap::AllocateRaw so we
- // duplicate the check here.
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
-
// Check that the size of the boilerplate matches our
// expectations. The ArgumentsAccessStub::GenerateNewObject relies
// on the size being a known constant.
@@ -3884,29 +4474,25 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
}
-static bool HasDuplicates(DescriptorArray* descriptors) {
- int count = descriptors->number_of_descriptors();
- if (count > 1) {
- String* prev_key = descriptors->GetKey(0);
- for (int i = 1; i != count; i++) {
- String* current_key = descriptors->GetKey(i);
- if (prev_key == current_key) return true;
- prev_key = current_key;
- }
- }
- return false;
-}
-
-
MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
ASSERT(!fun->has_initial_map());
// First create a new map with the size and number of in-object properties
// suggested by the function.
- int instance_size = fun->shared()->CalculateInstanceSize();
- int in_object_properties = fun->shared()->CalculateInObjectProperties();
+ InstanceType instance_type;
+ int instance_size;
+ int in_object_properties;
+ if (fun->shared()->is_generator()) {
+ instance_type = JS_GENERATOR_OBJECT_TYPE;
+ instance_size = JSGeneratorObject::kSize;
+ in_object_properties = 0;
+ } else {
+ instance_type = JS_OBJECT_TYPE;
+ instance_size = fun->shared()->CalculateInstanceSize();
+ in_object_properties = fun->shared()->CalculateInObjectProperties();
+ }
Map* map;
- MaybeObject* maybe_map = AllocateMap(JS_OBJECT_TYPE, instance_size);
+ MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
if (!maybe_map->To(&map)) return maybe_map;
// Fetch or allocate prototype.
@@ -3922,47 +4508,10 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
map->set_prototype(prototype);
ASSERT(map->has_fast_object_elements());
- // If the function has only simple this property assignments add
- // field descriptors for these to the initial map as the object
- // cannot be constructed without having these properties. Guard by
- // the inline_new flag so we only change the map if we generate a
- // specialized construct stub.
- ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
- if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
- int count = fun->shared()->this_property_assignments_count();
- if (count > in_object_properties) {
- // Inline constructor can only handle inobject properties.
- fun->shared()->ForbidInlineConstructor();
- } else {
- DescriptorArray* descriptors;
- MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
- if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
-
- DescriptorArray::WhitenessWitness witness(descriptors);
- for (int i = 0; i < count; i++) {
- String* name = fun->shared()->GetThisPropertyAssignmentName(i);
- ASSERT(name->IsSymbol());
- FieldDescriptor field(name, i, NONE, i + 1);
- descriptors->Set(i, &field, witness);
- }
- descriptors->Sort();
-
- // The descriptors may contain duplicates because the compiler does not
- // guarantee the uniqueness of property names (it would have required
- // quadratic time). Once the descriptors are sorted we can check for
- // duplicates in linear time.
- if (HasDuplicates(descriptors)) {
- fun->shared()->ForbidInlineConstructor();
- } else {
- map->InitializeDescriptors(descriptors);
- map->set_pre_allocated_property_fields(count);
- map->set_unused_property_fields(in_object_properties - count);
- }
- }
+ if (!fun->shared()->is_generator()) {
+ fun->shared()->StartInobjectSlackTracking(map);
}
- fun->shared()->StartInobjectSlackTracking(map);
-
return map;
}
@@ -3999,7 +4548,8 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
}
-MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
+MaybeObject* Heap::AllocateJSObjectFromMap(
+ Map* map, PretenureFlag pretenure, bool allocate_properties) {
// JSFunctions should be allocated using AllocateFunction to be
// properly initialized.
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
@@ -4010,30 +4560,62 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
// Allocate the backing storage for the properties.
- int prop_size =
- map->pre_allocated_property_fields() +
- map->unused_property_fields() -
- map->inobject_properties();
- ASSERT(prop_size >= 0);
- Object* properties;
- { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
- if (!maybe_properties->ToObject(&properties)) return maybe_properties;
+ FixedArray* properties;
+ if (allocate_properties) {
+ int prop_size = map->InitialPropertiesLength();
+ ASSERT(prop_size >= 0);
+ { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
+ if (!maybe_properties->To(&properties)) return maybe_properties;
+ }
+ } else {
+ properties = empty_fixed_array();
}
// Allocate the JSObject.
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
+ int size = map->instance_size();
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
Object* obj;
- { MaybeObject* maybe_obj = Allocate(map, space);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ MaybeObject* maybe_obj = Allocate(map, space);
+ if (!maybe_obj->To(&obj)) return maybe_obj;
+
+ // Initialize the JSObject.
+ InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
+ ASSERT(JSObject::cast(obj)->HasFastElements() ||
+ JSObject::cast(obj)->HasExternalArrayElements());
+ return obj;
+}
+
+
+MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
+ Map* map, Handle<AllocationSite> allocation_site) {
+ // JSFunctions should be allocated using AllocateFunction to be
+ // properly initialized.
+ ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
+
+ // Both types of global objects should be allocated using
+ // AllocateGlobalObject to be properly initialized.
+ ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
+ ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
+
+ // Allocate the backing storage for the properties.
+ int prop_size = map->InitialPropertiesLength();
+ ASSERT(prop_size >= 0);
+ FixedArray* properties;
+ { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
+ if (!maybe_properties->To(&properties)) return maybe_properties;
}
+ // Allocate the JSObject.
+ int size = map->instance_size();
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED);
+ Object* obj;
+ MaybeObject* maybe_obj =
+ AllocateWithAllocationSite(map, space, allocation_site);
+ if (!maybe_obj->To(&obj)) return maybe_obj;
+
// Initialize the JSObject.
- InitializeJSObjectFromMap(JSObject::cast(obj),
- FixedArray::cast(properties),
- map);
- ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
+ InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
+ ASSERT(JSObject::cast(obj)->HasFastElements());
return obj;
}
@@ -4061,6 +4643,65 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
}
+MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
+ Handle<AllocationSite> allocation_site) {
+ // Allocate the initial map if absent.
+ if (!constructor->has_initial_map()) {
+ Object* initial_map;
+ { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
+ if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
+ }
+ constructor->set_initial_map(Map::cast(initial_map));
+ Map::cast(initial_map)->set_constructor(constructor);
+ }
+ // Allocate the object based on the constructors initial map, or the payload
+ // advice
+ Map* initial_map = constructor->initial_map();
+
+ Smi* smi = Smi::cast(allocation_site->transition_info());
+ ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
+ AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
+ if (to_kind != initial_map->elements_kind()) {
+ MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
+ if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
+ // Possibly alter the mode, since we found an updated elements kind
+ // in the type info cell.
+ mode = AllocationSite::GetMode(to_kind);
+ }
+
+ MaybeObject* result;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
+ allocation_site);
+ } else {
+ result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
+ }
+#ifdef DEBUG
+ // Make sure result is NOT a global object if valid.
+ Object* non_failure;
+ ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
+#endif
+ return result;
+}
+
+
+MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
+ ASSERT(function->shared()->is_generator());
+ Map *map;
+ if (function->has_initial_map()) {
+ map = function->initial_map();
+ } else {
+ // Allocate the initial map if absent.
+ MaybeObject* maybe_map = AllocateInitialMap(function);
+ if (!maybe_map->To(&map)) return maybe_map;
+ function->set_initial_map(map);
+ map->set_constructor(function);
+ }
+ ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
+ return AllocateJSObjectFromMap(map);
+}
+
+
MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
// Allocate a fresh map. Modules do not have a prototype.
Map* map;
@@ -4082,14 +4723,53 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
int capacity,
ArrayStorageAllocationMode mode,
PretenureFlag pretenure) {
- ASSERT(capacity >= length);
- if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- }
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
+ // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
+ // for performance reasons.
+ ASSERT(capacity >= length);
+
+ if (capacity == 0) {
+ array->set_length(Smi::FromInt(0));
+ array->set_elements(empty_fixed_array());
+ return array;
+ }
+
+ FixedArrayBase* elms;
+ MaybeObject* maybe_elms = NULL;
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
+ maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
+ } else {
+ ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
+ }
+ } else {
+ ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
+ if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
+ maybe_elms = AllocateUninitializedFixedArray(capacity);
+ } else {
+ ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ maybe_elms = AllocateFixedArrayWithHoles(capacity);
+ }
+ }
+ if (!maybe_elms->To(&elms)) return maybe_elms;
+
+ array->set_elements(elms);
+ array->set_length(Smi::FromInt(length));
+ return array;
+}
+
+
+MaybeObject* Heap::AllocateJSArrayStorage(
+ JSArray* array,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode) {
+ ASSERT(capacity >= length);
+
if (capacity == 0) {
array->set_length(Smi::FromInt(0));
array->set_elements(empty_fixed_array());
@@ -4098,7 +4778,8 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
FixedArrayBase* elms;
MaybeObject* maybe_elms = NULL;
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ ElementsKind elements_kind = array->GetElementsKind();
+ if (IsFastDoubleElementsKind(elements_kind)) {
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
} else {
@@ -4125,13 +4806,14 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
MaybeObject* Heap::AllocateJSArrayWithElements(
FixedArrayBase* elements,
ElementsKind elements_kind,
+ int length,
PretenureFlag pretenure) {
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
array->set_elements(elements);
- array->set_length(Smi::FromInt(elements->length()));
+ array->set_length(Smi::FromInt(length));
array->ValidateElements();
return array;
}
@@ -4183,75 +4865,7 @@ MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
}
-MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
- ASSERT(constructor->has_initial_map());
- Map* map = constructor->initial_map();
- ASSERT(map->is_dictionary_map());
-
- // Make sure no field properties are described in the initial map.
- // This guarantees us that normalizing the properties does not
- // require us to change property values to JSGlobalPropertyCells.
- ASSERT(map->NextFreePropertyIndex() == 0);
-
- // Make sure we don't have a ton of pre-allocated slots in the
- // global objects. They will be unused once we normalize the object.
- ASSERT(map->unused_property_fields() == 0);
- ASSERT(map->inobject_properties() == 0);
-
- // Initial size of the backing store to avoid resize of the storage during
- // bootstrapping. The size differs between the JS global object ad the
- // builtins object.
- int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
-
- // Allocate a dictionary object for backing storage.
- StringDictionary* dictionary;
- MaybeObject* maybe_dictionary =
- StringDictionary::Allocate(
- map->NumberOfOwnDescriptors() * 2 + initial_size);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
-
- // The global object might be created from an object template with accessors.
- // Fill these accessors into the dictionary.
- DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
- ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
- PropertyDetails d = PropertyDetails(details.attributes(),
- CALLBACKS,
- details.descriptor_index());
- Object* value = descs->GetCallbacksObject(i);
- MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
- if (!maybe_value->ToObject(&value)) return maybe_value;
-
- MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_added->To(&dictionary)) return maybe_added;
- }
-
- // Allocate the global object and initialize it with the backing store.
- JSObject* global;
- MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
- if (!maybe_global->To(&global)) return maybe_global;
-
- InitializeJSObjectFromMap(global, dictionary, map);
-
- // Create a new map for the global object.
- Map* new_map;
- MaybeObject* maybe_map = map->CopyDropDescriptors();
- if (!maybe_map->To(&new_map)) return maybe_map;
- new_map->set_dictionary_map(true);
-
- // Set up the global object as a normalized object.
- global->set_map(new_map);
- global->set_properties(dictionary);
-
- // Make sure result is a global object with properties in dictionary.
- ASSERT(global->IsGlobalObject());
- ASSERT(!global->HasFastProperties());
- return global;
-}
-
-
-MaybeObject* Heap::CopyJSObject(JSObject* source) {
+MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
// Never used to copy functions. If functions need to be copied we
// have to be careful to clear the literals array.
SLOW_ASSERT(!source->IsJSFunction());
@@ -4261,6 +4875,9 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
int object_size = map->instance_size();
Object* clone;
+ ASSERT(site == NULL || (AllocationSite::CanTrack(map->instance_type()) &&
+ map->instance_type() == JS_ARRAY_TYPE));
+
WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
// If we're forced to always allocate, we use the general allocation
@@ -4280,7 +4897,11 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
(object_size - JSObject::kHeaderSize) / kPointerSize);
} else {
wb_mode = SKIP_WRITE_BARRIER;
- { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
+
+ { int adjusted_object_size = site != NULL
+ ? object_size + AllocationMemento::kSize
+ : object_size;
+ MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
}
SLOW_ASSERT(InNewSpace(clone));
@@ -4289,6 +4910,21 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
CopyBlock(HeapObject::cast(clone)->address(),
source->address(),
object_size);
+
+ if (site != NULL) {
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(clone) + object_size);
+ alloc_memento->set_map_no_write_barrier(allocation_memento_map());
+ ASSERT(site->map() == allocation_site_map());
+ alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
+ HeapProfiler* profiler = isolate()->heap_profiler();
+ if (profiler->is_tracking_allocations()) {
+ profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(),
+ object_size);
+ profiler->NewObjectEvent(alloc_memento->address(),
+ AllocationMemento::kSize);
+ }
+ }
}
SLOW_ASSERT(
@@ -4349,7 +4985,8 @@ MaybeObject* Heap::ReinitializeJSReceiver(
SharedFunctionInfo* shared = NULL;
if (type == JS_FUNCTION_TYPE) {
String* name;
- maybe = LookupAsciiSymbol("<freezing call trap>");
+ maybe =
+ InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
if (!maybe->To<String>(&name)) return maybe;
maybe = AllocateSharedFunctionInfo(name);
if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
@@ -4409,7 +5046,7 @@ MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
}
-MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
+MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
PretenureFlag pretenure) {
int length = string.length();
if (length == 1) {
@@ -4417,12 +5054,14 @@ MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
}
Object* result;
{ MaybeObject* maybe_result =
- AllocateRawAsciiString(string.length(), pretenure);
+ AllocateRawOneByteString(string.length(), pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Copy the characters into the new object.
- CopyChars(SeqAsciiString::cast(result)->GetChars(), string.start(), length);
+ CopyChars(SeqOneByteString::cast(result)->GetChars(),
+ string.start(),
+ length);
return result;
}
@@ -4432,37 +5071,31 @@ MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
PretenureFlag pretenure) {
// Continue counting the number of characters in the UTF-8 string, starting
// from the first non-ascii character or word.
- int chars = non_ascii_start;
Access<UnicodeCache::Utf8Decoder>
decoder(isolate_->unicode_cache()->utf8_decoder());
- decoder->Reset(string.start() + non_ascii_start, string.length() - chars);
- while (decoder->has_more()) {
- uint32_t r = decoder->GetNext();
- if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
- chars++;
- } else {
- chars += 2;
- }
- }
-
+ decoder->Reset(string.start() + non_ascii_start,
+ string.length() - non_ascii_start);
+ int utf16_length = decoder->Utf16Length();
+ ASSERT(utf16_length > 0);
+ // Allocate string.
Object* result;
- { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
+ {
+ int chars = non_ascii_start + utf16_length;
+ MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
-
// Convert and copy the characters into the new object.
SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
- decoder->Reset(string.start(), string.length());
- int i = 0;
- while (i < chars) {
- uint32_t r = decoder->GetNext();
- if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::LeadSurrogate(r));
- twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::TrailSurrogate(r));
- } else {
- twobyte->SeqTwoByteStringSet(i++, r);
+ // Copy ascii portion.
+ uint16_t* data = twobyte->GetChars();
+ if (non_ascii_start != 0) {
+ const char* ascii_data = string.start();
+ for (int i = 0; i < non_ascii_start; i++) {
+ *data++ = *ascii_data++;
}
}
+ // Now write the remainder.
+ decoder->WriteUtf16(data, utf16_length);
return result;
}
@@ -4474,11 +5107,11 @@ MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
int length = string.length();
const uc16* start = string.start();
- if (String::IsAscii(start, length)) {
- MaybeObject* maybe_result = AllocateRawAsciiString(length, pretenure);
+ if (String::IsOneByte(start, length)) {
+ MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
- CopyChars(SeqAsciiString::cast(result)->GetChars(), start, length);
- } else { // It's not an ASCII string.
+ CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
+ } else { // It's not a one byte string.
MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
@@ -4487,69 +5120,106 @@ MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
}
-Map* Heap::SymbolMapForString(String* string) {
- // If the string is in new space it cannot be used as a symbol.
+Map* Heap::InternalizedStringMapForString(String* string) {
+ // If the string is in new space it cannot be used as internalized.
if (InNewSpace(string)) return NULL;
- // Find the corresponding symbol map for strings.
+ // Find the corresponding internalized string map for strings.
switch (string->map()->instance_type()) {
- case STRING_TYPE: return symbol_map();
- case ASCII_STRING_TYPE: return ascii_symbol_map();
- case CONS_STRING_TYPE: return cons_symbol_map();
- case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map();
- case EXTERNAL_STRING_TYPE: return external_symbol_map();
- case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map();
- case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
- return external_symbol_with_ascii_data_map();
- case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map();
+ case STRING_TYPE: return internalized_string_map();
+ case ASCII_STRING_TYPE: return ascii_internalized_string_map();
+ case CONS_STRING_TYPE: return cons_internalized_string_map();
+ case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
+ case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
+ case EXTERNAL_ASCII_STRING_TYPE:
+ return external_ascii_internalized_string_map();
+ case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ return external_internalized_string_with_one_byte_data_map();
+ case SHORT_EXTERNAL_STRING_TYPE:
+ return short_external_internalized_string_map();
case SHORT_EXTERNAL_ASCII_STRING_TYPE:
- return short_external_ascii_symbol_map();
- case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
- return short_external_symbol_with_ascii_data_map();
+ return short_external_ascii_internalized_string_map();
+ case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ return short_external_internalized_string_with_one_byte_data_map();
default: return NULL; // No match found.
}
}
-MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
- int chars,
- uint32_t hash_field) {
- ASSERT(chars >= 0);
- // Ensure the chars matches the number of characters in the buffer.
- ASSERT(static_cast<unsigned>(chars) == buffer->Utf16Length());
- // Determine whether the string is ASCII.
- bool is_ascii = true;
- while (buffer->has_more()) {
- if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
- is_ascii = false;
- break;
+static inline void WriteOneByteData(Vector<const char> vector,
+ uint8_t* chars,
+ int len) {
+ // Only works for ascii.
+ ASSERT(vector.length() == len);
+ OS::MemCopy(chars, vector.start(), len);
+}
+
+static inline void WriteTwoByteData(Vector<const char> vector,
+ uint16_t* chars,
+ int len) {
+ const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
+ unsigned stream_length = vector.length();
+ while (stream_length != 0) {
+ unsigned consumed = 0;
+ uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
+ ASSERT(c != unibrow::Utf8::kBadChar);
+ ASSERT(consumed <= stream_length);
+ stream_length -= consumed;
+ stream += consumed;
+ if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ len -= 2;
+ if (len < 0) break;
+ *chars++ = unibrow::Utf16::LeadSurrogate(c);
+ *chars++ = unibrow::Utf16::TrailSurrogate(c);
+ } else {
+ len -= 1;
+ if (len < 0) break;
+ *chars++ = c;
}
}
- buffer->Rewind();
+ ASSERT(stream_length == 0);
+ ASSERT(len == 0);
+}
+
+
+static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
+ ASSERT(s->length() == len);
+ String::WriteToFlat(s, chars, 0, len);
+}
+
+static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
+ ASSERT(s->length() == len);
+ String::WriteToFlat(s, chars, 0, len);
+}
+
+
+template<bool is_one_byte, typename T>
+MaybeObject* Heap::AllocateInternalizedStringImpl(
+ T t, int chars, uint32_t hash_field) {
+ ASSERT(chars >= 0);
// Compute map and object size.
int size;
Map* map;
- if (is_ascii) {
- if (chars > SeqAsciiString::kMaxLength) {
- return Failure::OutOfMemoryException();
+ if (is_one_byte) {
+ if (chars > SeqOneByteString::kMaxLength) {
+ return Failure::OutOfMemoryException(0x9);
}
- map = ascii_symbol_map();
- size = SeqAsciiString::SizeFor(chars);
+ map = ascii_internalized_string_map();
+ size = SeqOneByteString::SizeFor(chars);
} else {
if (chars > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0xa);
}
- map = symbol_map();
+ map = internalized_string_map();
size = SeqTwoByteString::SizeFor(chars);
}
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -4561,46 +5231,37 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
ASSERT_EQ(size, answer->Size());
- // Fill in the characters.
- int i = 0;
- while (i < chars) {
- uint32_t character = buffer->GetNext();
- if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- answer->Set(i++, unibrow::Utf16::LeadSurrogate(character));
- answer->Set(i++, unibrow::Utf16::TrailSurrogate(character));
- } else {
- answer->Set(i++, character);
- }
+ if (is_one_byte) {
+ WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
+ } else {
+ WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
}
return answer;
}
-MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
- if (length < 0 || length > SeqAsciiString::kMaxLength) {
- return Failure::OutOfMemoryException();
- }
+// Need explicit instantiations.
+template
+MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
+template
+MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
+ String*, int, uint32_t);
+template
+MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
+ Vector<const char>, int, uint32_t);
- int size = SeqAsciiString::SizeFor(length);
- ASSERT(size <= SeqAsciiString::kMaxSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
- if (space == NEW_SPACE) {
- if (size > kMaxObjectSizeInNewSpace) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- } else if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in new space, retry in large object space.
- retry_space = LO_SPACE;
- }
- } else if (space == OLD_DATA_SPACE &&
- size > Page::kMaxNonCodeHeapObjectSize) {
- space = LO_SPACE;
+MaybeObject* Heap::AllocateRawOneByteString(int length,
+ PretenureFlag pretenure) {
+ if (length < 0 || length > SeqOneByteString::kMaxLength) {
+ return Failure::OutOfMemoryException(0xb);
}
+ int size = SeqOneByteString::SizeFor(length);
+ ASSERT(size <= SeqOneByteString::kMaxSize);
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -4610,15 +5271,6 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- // Initialize string's content to ensure ASCII-ness (character range 0-127)
- // as required when verifying the heap.
- char* dest = SeqAsciiString::cast(result)->GetChars();
- memset(dest, 0x0F, length * kCharSize);
- }
-#endif
-
return result;
}
@@ -4626,27 +5278,14 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateRawTwoByteString(int length,
PretenureFlag pretenure) {
if (length < 0 || length > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0xc);
}
int size = SeqTwoByteString::SizeFor(length);
ASSERT(size <= SeqTwoByteString::kMaxSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
- if (space == NEW_SPACE) {
- if (size > kMaxObjectSizeInNewSpace) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- } else if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in new space, retry in large object space.
- retry_space = LO_SPACE;
- }
- } else if (space == OLD_DATA_SPACE &&
- size > Page::kMaxNonCodeHeapObjectSize) {
- space = LO_SPACE;
- }
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -4665,15 +5304,8 @@ MaybeObject* Heap::AllocateJSArray(
Context* native_context = isolate()->context()->native_context();
JSFunction* array_function = native_context->array_function();
Map* map = array_function->initial_map();
- Object* maybe_map_array = native_context->js_array_maps();
- if (!maybe_map_array->IsUndefined()) {
- Object* maybe_transitioned_map =
- FixedArray::cast(maybe_map_array)->get(elements_kind);
- if (!maybe_transitioned_map->IsUndefined()) {
- map = Map::cast(maybe_transitioned_map);
- }
- }
-
+ Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
+ if (transition_map != NULL) map = transition_map;
return AllocateJSObjectFromMap(map, pretenure);
}
@@ -4693,25 +5325,15 @@ MaybeObject* Heap::AllocateEmptyFixedArray() {
}
-MaybeObject* Heap::AllocateRawFixedArray(int length) {
- if (length < 0 || length > FixedArray::kMaxLength) {
- return Failure::OutOfMemoryException();
- }
- ASSERT(length > 0);
- // Use the general function if we're forced to always allocate.
- if (always_allocate()) return AllocateFixedArray(length, TENURED);
- // Allocate the raw data for a fixed array.
- int size = FixedArray::SizeFor(length);
- return size <= kMaxObjectSizeInNewSpace
- ? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
+MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
+ return AllocateExternalArray(0, array_type, NULL, TENURED);
}
MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
int len = src->length();
Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
+ { MaybeObject* maybe_obj = AllocateRawFixedArray(len, NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
if (InNewSpace(obj)) {
@@ -4727,7 +5349,7 @@ MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
result->set_length(len);
// Copy the content
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
return result;
@@ -4751,64 +5373,52 @@ MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
}
-MaybeObject* Heap::AllocateFixedArray(int length) {
- ASSERT(length >= 0);
- if (length == 0) return empty_fixed_array();
- Object* result;
- { MaybeObject* maybe_result = AllocateRawFixedArray(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
+ Map* map) {
+ int int64_entries = src->count_of_int64_entries();
+ int ptr_entries = src->count_of_ptr_entries();
+ int int32_entries = src->count_of_int32_entries();
+ Object* obj;
+ { MaybeObject* maybe_obj =
+ AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- // Initialize header.
- FixedArray* array = reinterpret_cast<FixedArray*>(result);
- array->set_map_no_write_barrier(fixed_array_map());
- array->set_length(length);
- // Initialize body.
- ASSERT(!InNewSpace(undefined_value()));
- MemsetPointer(array->data_start(), undefined_value(), length);
- return result;
+ HeapObject* dst = HeapObject::cast(obj);
+ dst->set_map_no_write_barrier(map);
+ CopyBlock(
+ dst->address() + ConstantPoolArray::kLengthOffset,
+ src->address() + ConstantPoolArray::kLengthOffset,
+ ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries)
+ - ConstantPoolArray::kLengthOffset);
+ return obj;
}
MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > FixedArray::kMaxLength) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0xe);
}
-
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
int size = FixedArray::SizeFor(length);
- if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
- // Too big for new space.
- space = LO_SPACE;
- } else if (space == OLD_POINTER_SPACE &&
- size > Page::kMaxNonCodeHeapObjectSize) {
- // Too big for old pointer space.
- space = LO_SPACE;
- }
-
- AllocationSpace retry_space =
- (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
- return AllocateRaw(size, space, retry_space);
+ return AllocateRaw(size, space, OLD_POINTER_SPACE);
}
-MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
- Heap* heap,
- int length,
- PretenureFlag pretenure,
- Object* filler) {
+MaybeObject* Heap::AllocateFixedArrayWithFiller(int length,
+ PretenureFlag pretenure,
+ Object* filler) {
ASSERT(length >= 0);
- ASSERT(heap->empty_fixed_array()->IsFixedArray());
- if (length == 0) return heap->empty_fixed_array();
+ ASSERT(empty_fixed_array()->IsFixedArray());
+ if (length == 0) return empty_fixed_array();
- ASSERT(!heap->InNewSpace(filler));
+ ASSERT(!InNewSpace(filler));
Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
+ { MaybeObject* maybe_result = AllocateRawFixedArray(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
MemsetPointer(array->data_start(), filler, length);
@@ -4817,19 +5427,13 @@ MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(this,
- length,
- pretenure,
- undefined_value());
+ return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
}
MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(this,
- length,
- pretenure,
- the_hole_value());
+ return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
}
@@ -4837,7 +5441,7 @@ MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
if (length == 0) return empty_fixed_array();
Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
+ { MaybeObject* maybe_obj = AllocateRawFixedArray(length, NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -4904,35 +5508,55 @@ MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
PretenureFlag pretenure) {
if (length < 0 || length > FixedDoubleArray::kMaxLength) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0xf);
}
-
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
int size = FixedDoubleArray::SizeFor(length);
-
#ifndef V8_HOST_ARCH_64_BIT
size += kPointerSize;
#endif
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
- if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
- // Too big for new space.
- space = LO_SPACE;
- } else if (space == OLD_DATA_SPACE &&
- size > Page::kMaxNonCodeHeapObjectSize) {
- // Too big for old data space.
- space = LO_SPACE;
+ HeapObject* object;
+ { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
}
- AllocationSpace retry_space =
- (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
+ return EnsureDoubleAligned(this, object, size);
+}
+
+
+MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
+ number_of_int32_entries > 0);
+ int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
+ number_of_ptr_entries,
+ number_of_int32_entries);
+#ifndef V8_HOST_ARCH_64_BIT
+ size += kPointerSize;
+#endif
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
HeapObject* object;
- { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE);
if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
}
+ object = EnsureDoubleAligned(this, object, size);
+ HeapObject::cast(object)->set_map_no_write_barrier(constant_pool_array_map());
- return EnsureDoubleAligned(this, object, size);
+ ConstantPoolArray* constant_pool =
+ reinterpret_cast<ConstantPoolArray*>(object);
+ constant_pool->SetEntryCounts(number_of_int64_entries,
+ number_of_ptr_entries,
+ number_of_int32_entries);
+ MemsetPointer(
+ HeapObject::RawField(
+ constant_pool,
+ constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
+ undefined_value(),
+ number_of_ptr_entries);
+ return constant_pool;
}
@@ -4948,6 +5572,35 @@ MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
}
+MaybeObject* Heap::AllocateSymbol() {
+ // Statically ensure that it is safe to allocate symbols in paged spaces.
+ STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
+
+ Object* result;
+ MaybeObject* maybe =
+ AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
+ if (!maybe->ToObject(&result)) return maybe;
+
+ HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
+
+ // Generate a random hash value.
+ int hash;
+ int attempts = 0;
+ do {
+ hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
+ attempts++;
+ } while (hash == 0 && attempts < 30);
+ if (hash == 0) hash = 1; // never return 0
+
+ Symbol::cast(result)->set_hash_field(
+ Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
+ Symbol::cast(result)->set_name(undefined_value());
+
+ ASSERT(result->IsSymbol());
+ return result;
+}
+
+
MaybeObject* Heap::AllocateNativeContext() {
Object* result;
{ MaybeObject* maybe_result =
@@ -4990,7 +5643,7 @@ MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_no_write_barrier(module_context_map());
- // Context links will be set later.
+ // Instance link will be set later.
context->set_extension(Smi::FromInt(0));
return context;
}
@@ -5035,7 +5688,7 @@ MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
MaybeObject* Heap::AllocateWithContext(JSFunction* function,
Context* previous,
- JSObject* extension) {
+ JSReceiver* extension) {
Object* result;
{ MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -5077,6 +5730,20 @@ MaybeObject* Heap::AllocateScopeInfo(int length) {
}
+MaybeObject* Heap::AllocateExternal(void* value) {
+ Foreign* foreign;
+ { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
+ if (!maybe_result->To(&foreign)) return maybe_result;
+ }
+ JSObject* external;
+ { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
+ if (!maybe_result->To(&external)) return maybe_result;
+ }
+ external->SetInternalField(0, foreign);
+ return external;
+}
+
+
MaybeObject* Heap::AllocateStruct(InstanceType type) {
Map* map;
switch (type) {
@@ -5089,8 +5756,7 @@ STRUCT_LIST(MAKE_CASE)
return Failure::InternalError();
}
int size = map->instance_size();
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
Object* result;
{ MaybeObject* maybe_result = Allocate(map, space);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -5107,7 +5773,7 @@ bool Heap::IsHeapIterable() {
void Heap::EnsureHeapIsIterable() {
- ASSERT(IsAllocationAllowed());
+ ASSERT(AllowHeapAllocation::IsAllowed());
if (!IsHeapIterable()) {
CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
}
@@ -5127,6 +5793,7 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
uncommit = true;
}
CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
+ mark_sweeps_since_idle_round_started_++;
gc_count_at_last_idle_gc_ = gc_count_;
if (uncommit) {
new_space_.Shrink();
@@ -5140,6 +5807,7 @@ bool Heap::IdleNotification(int hint) {
// Hints greater than this value indicate that
// the embedder is requesting a lot of GC work.
const int kMaxHint = 1000;
+ const int kMinHintForIncrementalMarking = 10;
// Minimal hint that allows to do full GC.
const int kMinHintForFullGC = 100;
intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
@@ -5166,10 +5834,6 @@ bool Heap::IdleNotification(int hint) {
AdvanceIdleIncrementalMarking(step_size);
contexts_disposed_ = 0;
}
- // Make sure that we have no pending context disposals.
- // Take into account that we might have decided to delay full collection
- // because incremental marking is in progress.
- ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
// After context disposal there is likely a lot of garbage remaining, reset
// the idle notification counters in order to trigger more incremental GCs
// on subsequent idle notifications.
@@ -5190,9 +5854,9 @@ bool Heap::IdleNotification(int hint) {
// 3. many lazy sweep steps.
// Use mark-sweep-compact events to count incremental GCs in a round.
-
if (incremental_marking()->IsStopped()) {
- if (!IsSweepingComplete() &&
+ if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
+ !IsSweepingComplete() &&
!AdvanceSweepers(static_cast<int>(step_size))) {
return false;
}
@@ -5206,18 +5870,9 @@ bool Heap::IdleNotification(int hint) {
}
}
- int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
- mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
- ms_count_at_last_idle_notification_ = ms_count_;
-
int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
mark_sweeps_since_idle_round_started_;
- if (remaining_mark_sweeps <= 0) {
- FinishIdleRound();
- return true;
- }
-
if (incremental_marking()->IsStopped()) {
// If there are no more than two GCs left in this idle round and we are
// allowed to do a full GC, then make those GCs full in order to compact
@@ -5227,13 +5882,21 @@ bool Heap::IdleNotification(int hint) {
if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
CollectAllGarbage(kReduceMemoryFootprintMask,
"idle notification: finalize idle round");
- } else {
+ mark_sweeps_since_idle_round_started_++;
+ } else if (hint > kMinHintForIncrementalMarking) {
incremental_marking()->Start();
}
}
- if (!incremental_marking()->IsStopped()) {
+ if (!incremental_marking()->IsStopped() &&
+ hint > kMinHintForIncrementalMarking) {
AdvanceIdleIncrementalMarking(step_size);
}
+
+ if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
+ FinishIdleRound();
+ return true;
+ }
+
return false;
}
@@ -5302,21 +5965,22 @@ bool Heap::IdleGlobalGC() {
void Heap::Print() {
if (!HasBeenSetUp()) return;
- isolate()->PrintStack();
- AllSpaces spaces;
- for (Space* space = spaces.next(); space != NULL; space = spaces.next())
+ isolate()->PrintStack(stdout);
+ AllSpaces spaces(this);
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
space->Print();
+ }
}
void Heap::ReportCodeStatistics(const char* title) {
PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
- PagedSpace::ResetCodeStatistics();
+ PagedSpace::ResetCodeStatistics(isolate());
// We do not look for code in new space, map space, or old space. If code
// somehow ends up in those spaces, we would miss it here.
code_space_->CollectCodeStatistics();
lo_space_->CollectCodeStatistics();
- PagedSpace::ReportCodeStatistics();
+ PagedSpace::ReportCodeStatistics(isolate());
}
@@ -5327,14 +5991,11 @@ void Heap::ReportHeapStatistics(const char* title) {
USE(title);
PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
title, gc_count_);
- PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
- old_gen_promotion_limit_);
- PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
- old_gen_allocation_limit_);
- PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
+ PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
+ old_generation_allocation_limit_);
PrintF("\n");
- PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
+ PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
isolate_->global_handles()->PrintStats();
PrintF("\n");
@@ -5352,6 +6013,8 @@ void Heap::ReportHeapStatistics(const char* title) {
map_space_->ReportStatistics();
PrintF("Cell space : ");
cell_space_->ReportStatistics();
+ PrintF("PropertyCell space : ");
+ property_cell_space_->ReportStatistics();
PrintF("Large object space : ");
lo_space_->ReportStatistics();
PrintF(">>>>>> ========================================= >>>>>>\n");
@@ -5365,7 +6028,7 @@ bool Heap::Contains(HeapObject* value) {
bool Heap::Contains(Address addr) {
- if (OS::IsOutsideAllocatedSpace(addr)) return false;
+ if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
return HasBeenSetUp() &&
(new_space_.ToSpaceContains(addr) ||
old_pointer_space_->Contains(addr) ||
@@ -5373,6 +6036,7 @@ bool Heap::Contains(Address addr) {
code_space_->Contains(addr) ||
map_space_->Contains(addr) ||
cell_space_->Contains(addr) ||
+ property_cell_space_->Contains(addr) ||
lo_space_->SlowContains(addr));
}
@@ -5383,7 +6047,7 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
bool Heap::InSpace(Address addr, AllocationSpace space) {
- if (OS::IsOutsideAllocatedSpace(addr)) return false;
+ if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
if (!HasBeenSetUp()) return false;
switch (space) {
@@ -5399,6 +6063,8 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
return map_space_->Contains(addr);
case CELL_SPACE:
return cell_space_->Contains(addr);
+ case PROPERTY_CELL_SPACE:
+ return property_cell_space_->Contains(addr);
case LO_SPACE:
return lo_space_->SlowContains(addr);
}
@@ -5425,101 +6091,103 @@ void Heap::Verify() {
old_data_space_->Verify(&no_dirty_regions_visitor);
code_space_->Verify(&no_dirty_regions_visitor);
cell_space_->Verify(&no_dirty_regions_visitor);
+ property_cell_space_->Verify(&no_dirty_regions_visitor);
lo_space_->Verify();
}
#endif
-MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
- Object* symbol = NULL;
+MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
+ Object* result = NULL;
Object* new_table;
{ MaybeObject* maybe_new_table =
- symbol_table()->LookupSymbol(string, &symbol);
+ string_table()->LookupUtf8String(string, &result);
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
}
- // Can't use set_symbol_table because SymbolTable::cast knows that
- // SymbolTable is a singleton and checks for identity.
- roots_[kSymbolTableRootIndex] = new_table;
- ASSERT(symbol != NULL);
- return symbol;
+ // Can't use set_string_table because StringTable::cast knows that
+ // StringTable is a singleton and checks for identity.
+ roots_[kStringTableRootIndex] = new_table;
+ ASSERT(result != NULL);
+ return result;
}
-MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
- Object* symbol = NULL;
+MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
+ Object* result = NULL;
Object* new_table;
{ MaybeObject* maybe_new_table =
- symbol_table()->LookupAsciiSymbol(string, &symbol);
+ string_table()->LookupOneByteString(string, &result);
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
}
- // Can't use set_symbol_table because SymbolTable::cast knows that
- // SymbolTable is a singleton and checks for identity.
- roots_[kSymbolTableRootIndex] = new_table;
- ASSERT(symbol != NULL);
- return symbol;
+ // Can't use set_string_table because StringTable::cast knows that
+ // StringTable is a singleton and checks for identity.
+ roots_[kStringTableRootIndex] = new_table;
+ ASSERT(result != NULL);
+ return result;
}
-MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
+MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
int from,
int length) {
- Object* symbol = NULL;
+ Object* result = NULL;
Object* new_table;
{ MaybeObject* maybe_new_table =
- symbol_table()->LookupSubStringAsciiSymbol(string,
+ string_table()->LookupSubStringOneByteString(string,
from,
length,
- &symbol);
+ &result);
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
}
- // Can't use set_symbol_table because SymbolTable::cast knows that
- // SymbolTable is a singleton and checks for identity.
- roots_[kSymbolTableRootIndex] = new_table;
- ASSERT(symbol != NULL);
- return symbol;
+ // Can't use set_string_table because StringTable::cast knows that
+ // StringTable is a singleton and checks for identity.
+ roots_[kStringTableRootIndex] = new_table;
+ ASSERT(result != NULL);
+ return result;
}
-MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
- Object* symbol = NULL;
+MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
+ Object* result = NULL;
Object* new_table;
{ MaybeObject* maybe_new_table =
- symbol_table()->LookupTwoByteSymbol(string, &symbol);
+ string_table()->LookupTwoByteString(string, &result);
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
}
- // Can't use set_symbol_table because SymbolTable::cast knows that
- // SymbolTable is a singleton and checks for identity.
- roots_[kSymbolTableRootIndex] = new_table;
- ASSERT(symbol != NULL);
- return symbol;
+ // Can't use set_string_table because StringTable::cast knows that
+ // StringTable is a singleton and checks for identity.
+ roots_[kStringTableRootIndex] = new_table;
+ ASSERT(result != NULL);
+ return result;
}
-MaybeObject* Heap::LookupSymbol(String* string) {
- if (string->IsSymbol()) return string;
- Object* symbol = NULL;
+MaybeObject* Heap::InternalizeString(String* string) {
+ if (string->IsInternalizedString()) return string;
+ Object* result = NULL;
Object* new_table;
{ MaybeObject* maybe_new_table =
- symbol_table()->LookupString(string, &symbol);
+ string_table()->LookupString(string, &result);
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
}
- // Can't use set_symbol_table because SymbolTable::cast knows that
- // SymbolTable is a singleton and checks for identity.
- roots_[kSymbolTableRootIndex] = new_table;
- ASSERT(symbol != NULL);
- return symbol;
+ // Can't use set_string_table because StringTable::cast knows that
+ // StringTable is a singleton and checks for identity.
+ roots_[kStringTableRootIndex] = new_table;
+ ASSERT(result != NULL);
+ return result;
}
-bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
- if (string->IsSymbol()) {
- *symbol = string;
+bool Heap::InternalizeStringIfExists(String* string, String** result) {
+ if (string->IsInternalizedString()) {
+ *result = string;
return true;
}
- return symbol_table()->LookupSymbolIfExists(string, symbol);
+ return string_table()->LookupStringIfExists(string, result);
}
+
void Heap::ZapFromSpace() {
NewSpacePageIterator it(new_space_.FromSpaceStart(),
new_space_.FromSpaceEnd());
@@ -5744,8 +6412,8 @@ void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
- v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
- v->Synchronize(VisitorSynchronization::kSymbolTable);
+ v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
+ v->Synchronize(VisitorSynchronization::kStringTable);
if (mode != VISIT_ALL_IN_SCAVENGE &&
mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
// Scavenge collections have special processing for this.
@@ -5759,14 +6427,14 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
- v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
- v->Synchronize(VisitorSynchronization::kSymbol);
+ v->VisitPointer(BitCast<Object**>(&hidden_string_));
+ v->Synchronize(VisitorSynchronization::kInternalizedString);
isolate_->bootstrapper()->Iterate(v);
v->Synchronize(VisitorSynchronization::kBootstrapper);
isolate_->Iterate(v);
v->Synchronize(VisitorSynchronization::kTop);
- Relocatable::Iterate(v);
+ Relocatable::Iterate(isolate_, v);
v->Synchronize(VisitorSynchronization::kRelocatable);
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -5807,6 +6475,14 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
}
v->Synchronize(VisitorSynchronization::kGlobalHandles);
+ // Iterate over eternal handles.
+ if (mode == VISIT_ALL_IN_SCAVENGE) {
+ isolate_->eternal_handles()->IterateNewSpaceRoots(v);
+ } else {
+ isolate_->eternal_handles()->IterateAllRoots(v);
+ }
+ v->Synchronize(VisitorSynchronization::kEternalHandles);
+
// Iterate over pointers being held by inactive threads.
isolate_->thread_manager()->Iterate(v);
v->Synchronize(VisitorSynchronization::kThreadManager);
@@ -5819,7 +6495,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// serialization this does nothing, since the partial snapshot cache is
// empty. However the next thing we do is create the partial snapshot,
// filling up the partial snapshot cache with objects it needs as we go.
- SerializerDeserializer::Iterate(v);
+ SerializerDeserializer::Iterate(isolate_, v);
// We don't do a v->Synchronize call here, because in debug mode that will
// output a flag to the snapshot. However at this point the serializer and
// deserializer are deliberately a little unsynchronized (see above) so the
@@ -5886,7 +6562,12 @@ bool Heap::ConfigureHeap(int max_semispace_size,
max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
- external_allocation_limit_ = 16 * max_semispace_size_;
+
+ // The external allocation limit should be below 256 MB on all architectures
+ // to avoid unnecessary low memory notifications, as that is the threshold
+ // for some embedders.
+ external_allocation_limit_ = 12 * max_semispace_size_;
+ ASSERT(external_allocation_limit_ <= 256 * MB);
// The old generation is paged and needs at least one page for each space.
int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
@@ -5895,6 +6576,12 @@ bool Heap::ConfigureHeap(int max_semispace_size,
RoundUp(max_old_generation_size_,
Page::kPageSize));
+ // We rely on being able to allocate new arrays in paged spaces.
+ ASSERT(MaxRegularSpaceAllocationSize() >=
+ (JSArray::kSize +
+ FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
+ AllocationMemento::kSize));
+
configured_ = true;
return true;
}
@@ -5922,6 +6609,8 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->map_space_capacity = map_space_->Capacity();
*stats->cell_space_size = cell_space_->SizeOfObjects();
*stats->cell_space_capacity = cell_space_->Capacity();
+ *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
+ *stats->property_cell_space_capacity = property_cell_space_->Capacity();
*stats->lo_space_size = lo_space_->Size();
isolate_->global_handles()->RecordStats(stats);
*stats->memory_allocator_size = isolate()->memory_allocator()->Size();
@@ -5931,7 +6620,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->os_error = OS::GetLastError();
isolate()->memory_allocator()->Available();
if (take_snapshot) {
- HeapIterator iterator;
+ HeapIterator iterator(this);
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next()) {
@@ -5950,6 +6639,7 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
+ code_space_->SizeOfObjects()
+ map_space_->SizeOfObjects()
+ cell_space_->SizeOfObjects()
+ + property_cell_space_->SizeOfObjects()
+ lo_space_->SizeOfObjects();
}
@@ -5961,172 +6651,6 @@ intptr_t Heap::PromotedExternalMemorySize() {
- amount_of_external_allocated_memory_at_last_global_gc_;
}
-#ifdef DEBUG
-
-// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
-static const int kMarkTag = 2;
-
-
-class HeapDebugUtils {
- public:
- explicit HeapDebugUtils(Heap* heap)
- : search_for_any_global_(false),
- search_target_(NULL),
- found_target_(false),
- object_stack_(20),
- heap_(heap) {
- }
-
- class MarkObjectVisitor : public ObjectVisitor {
- public:
- explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
-
- void VisitPointers(Object** start, Object** end) {
- // Copy all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject())
- utils_->MarkObjectRecursively(p);
- }
- }
-
- HeapDebugUtils* utils_;
- };
-
- void MarkObjectRecursively(Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
-
- Object* map = obj->map();
-
- if (!map->IsHeapObject()) return; // visited before
-
- if (found_target_) return; // stop if target found
- object_stack_.Add(obj);
- if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
- (!search_for_any_global_ && (obj == search_target_))) {
- found_target_ = true;
- return;
- }
-
- // not visited yet
- Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
-
- Address map_addr = map_p->address();
-
- obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
-
- MarkObjectRecursively(&map);
-
- MarkObjectVisitor mark_visitor(this);
-
- obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
- &mark_visitor);
-
- if (!found_target_) // don't pop if found the target
- object_stack_.RemoveLast();
- }
-
-
- class UnmarkObjectVisitor : public ObjectVisitor {
- public:
- explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
-
- void VisitPointers(Object** start, Object** end) {
- // Copy all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject())
- utils_->UnmarkObjectRecursively(p);
- }
- }
-
- HeapDebugUtils* utils_;
- };
-
-
- void UnmarkObjectRecursively(Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
-
- Object* map = obj->map();
-
- if (map->IsHeapObject()) return; // unmarked already
-
- Address map_addr = reinterpret_cast<Address>(map);
-
- map_addr -= kMarkTag;
-
- ASSERT_TAG_ALIGNED(map_addr);
-
- HeapObject* map_p = HeapObject::FromAddress(map_addr);
-
- obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
-
- UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
-
- UnmarkObjectVisitor unmark_visitor(this);
-
- obj->IterateBody(Map::cast(map_p)->instance_type(),
- obj->SizeFromMap(Map::cast(map_p)),
- &unmark_visitor);
- }
-
-
- void MarkRootObjectRecursively(Object** root) {
- if (search_for_any_global_) {
- ASSERT(search_target_ == NULL);
- } else {
- ASSERT(search_target_->IsHeapObject());
- }
- found_target_ = false;
- object_stack_.Clear();
-
- MarkObjectRecursively(root);
- UnmarkObjectRecursively(root);
-
- if (found_target_) {
- PrintF("=====================================\n");
- PrintF("==== Path to object ====\n");
- PrintF("=====================================\n\n");
-
- ASSERT(!object_stack_.is_empty());
- for (int i = 0; i < object_stack_.length(); i++) {
- if (i > 0) PrintF("\n |\n |\n V\n\n");
- Object* obj = object_stack_[i];
- obj->Print();
- }
- PrintF("=====================================\n");
- }
- }
-
- // Helper class for visiting HeapObjects recursively.
- class MarkRootVisitor: public ObjectVisitor {
- public:
- explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
-
- void VisitPointers(Object** start, Object** end) {
- // Visit all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject())
- utils_->MarkRootObjectRecursively(p);
- }
- }
-
- HeapDebugUtils* utils_;
- };
-
- bool search_for_any_global_;
- Object* search_target_;
- bool found_target_;
- List<Object*> object_stack_;
- Heap* heap_;
-
- friend class Heap;
-};
-
-#endif
-
V8_DECLARE_ONCE(initialize_gc_once);
@@ -6136,10 +6660,10 @@ static void InitializeGCOnce() {
MarkCompactCollector::Initialize();
}
-bool Heap::SetUp(bool create_heap_objects) {
+
+bool Heap::SetUp() {
#ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval;
- debug_utils_ = new HeapDebugUtils(this);
#endif
// Initialize heap spaces and initial maps and objects. Whenever something
@@ -6205,11 +6729,17 @@ bool Heap::SetUp(bool create_heap_objects) {
if (map_space_ == NULL) return false;
if (!map_space_->SetUp()) return false;
- // Initialize global property cell space.
+ // Initialize simple cell space.
cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
if (cell_space_ == NULL) return false;
if (!cell_space_->SetUp()) return false;
+ // Initialize global property cell space.
+ property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
+ PROPERTY_CELL_SPACE);
+ if (property_cell_space_ == NULL) return false;
+ if (!property_cell_space_->SetUp()) return false;
+
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
// explicitly when allocating large code objects.
@@ -6221,31 +6751,39 @@ bool Heap::SetUp(bool create_heap_objects) {
ASSERT(hash_seed() == 0);
if (FLAG_randomize_hashes) {
if (FLAG_hash_seed == 0) {
- set_hash_seed(
- Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
+ int rnd = isolate()->random_number_generator()->NextInt();
+ set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
} else {
set_hash_seed(Smi::FromInt(FLAG_hash_seed));
}
}
- if (create_heap_objects) {
- // Create initial maps.
- if (!CreateInitialMaps()) return false;
- if (!CreateApiObjects()) return false;
-
- // Create initial objects
- if (!CreateInitialObjects()) return false;
-
- native_contexts_list_ = undefined_value();
- }
-
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
store_buffer()->SetUp();
- if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
+ if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
+#ifdef DEBUG
+ relocation_mutex_locked_by_optimizer_thread_ = false;
+#endif // DEBUG
+
+ return true;
+}
+
+bool Heap::CreateHeapObjects() {
+ // Create initial maps.
+ if (!CreateInitialMaps()) return false;
+ if (!CreateApiObjects()) return false;
+
+ // Create initial objects
+ if (!CreateInitialObjects()) return false;
+
+ native_contexts_list_ = undefined_value();
+ array_buffers_list_ = undefined_value();
+ allocation_sites_list_ = undefined_value();
+ weak_object_to_code_table_ = undefined_value();
return true;
}
@@ -6275,21 +6813,27 @@ void Heap::TearDown() {
#endif
if (FLAG_print_cumulative_gc_stat) {
- PrintF("\n\n");
+ PrintF("\n");
PrintF("gc_count=%d ", gc_count_);
PrintF("mark_sweep_count=%d ", ms_count_);
- PrintF("max_gc_pause=%d ", get_max_gc_pause());
- PrintF("total_gc_time=%d ", total_gc_time_ms_);
- PrintF("min_in_mutator=%d ", get_min_in_mutator());
+ PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
+ PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
+ PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
get_max_alive_after_gc());
+ PrintF("total_marking_time=%.1f ", marking_time());
+ PrintF("total_sweeping_time=%.1f ", sweeping_time());
PrintF("\n\n");
}
+ TearDownArrayBuffers();
+
isolate_->global_handles()->TearDown();
external_string_table_.TearDown();
+ mark_compact_collector()->TearDown();
+
new_space_.TearDown();
if (old_pointer_space_ != NULL) {
@@ -6322,6 +6866,12 @@ void Heap::TearDown() {
cell_space_ = NULL;
}
+ if (property_cell_space_ != NULL) {
+ property_cell_space_->TearDown();
+ delete property_cell_space_;
+ property_cell_space_ = NULL;
+ }
+
if (lo_space_ != NULL) {
lo_space_->TearDown();
delete lo_space_;
@@ -6334,34 +6884,20 @@ void Heap::TearDown() {
isolate_->memory_allocator()->TearDown();
delete relocation_mutex_;
-
-#ifdef DEBUG
- delete debug_utils_;
- debug_utils_ = NULL;
-#endif
}
-void Heap::Shrink() {
- // Try to shrink all paged spaces.
- PagedSpaces spaces;
- for (PagedSpace* space = spaces.next();
- space != NULL;
- space = spaces.next()) {
- space->ReleaseAllUnusedPages();
- }
-}
-
-
-void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
+void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type,
+ bool pass_isolate) {
ASSERT(callback != NULL);
- GCPrologueCallbackPair pair(callback, gc_type);
+ GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
ASSERT(!gc_prologue_callbacks_.Contains(pair));
return gc_prologue_callbacks_.Add(pair);
}
-void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
ASSERT(callback != NULL);
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_prologue_callbacks_[i].callback == callback) {
@@ -6373,15 +6909,17 @@ void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
}
-void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
+void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+ GCType gc_type,
+ bool pass_isolate) {
ASSERT(callback != NULL);
- GCEpilogueCallbackPair pair(callback, gc_type);
+ GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
ASSERT(!gc_epilogue_callbacks_.Contains(pair));
return gc_epilogue_callbacks_.Add(pair);
}
-void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
ASSERT(callback != NULL);
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_epilogue_callbacks_[i].callback == callback) {
@@ -6393,6 +6931,37 @@ void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
}
+MaybeObject* Heap::AddWeakObjectToCodeDependency(Object* obj,
+ DependentCode* dep) {
+ ASSERT(!InNewSpace(obj));
+ ASSERT(!InNewSpace(dep));
+ MaybeObject* maybe_obj =
+ WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep);
+ WeakHashTable* table;
+ if (!maybe_obj->To(&table)) return maybe_obj;
+ if (ShouldZapGarbage() && weak_object_to_code_table_ != table) {
+ WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
+ }
+ set_weak_object_to_code_table(table);
+ ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj));
+ return weak_object_to_code_table_;
+}
+
+
+DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) {
+ Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
+ if (dep->IsDependentCode()) return DependentCode::cast(dep);
+ return DependentCode::cast(empty_fixed_array());
+}
+
+
+void Heap::EnsureWeakObjectToCodeTable() {
+ if (!weak_object_to_code_table()->IsHashTable()) {
+ set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16));
+ }
+}
+
+
#ifdef DEBUG
class PrintHandleVisitor: public ObjectVisitor {
@@ -6405,6 +6974,7 @@ class PrintHandleVisitor: public ObjectVisitor {
}
};
+
void Heap::PrintHandles() {
PrintF("Handles:\n");
PrintHandleVisitor v;
@@ -6417,19 +6987,21 @@ void Heap::PrintHandles() {
Space* AllSpaces::next() {
switch (counter_++) {
case NEW_SPACE:
- return HEAP->new_space();
+ return heap_->new_space();
case OLD_POINTER_SPACE:
- return HEAP->old_pointer_space();
+ return heap_->old_pointer_space();
case OLD_DATA_SPACE:
- return HEAP->old_data_space();
+ return heap_->old_data_space();
case CODE_SPACE:
- return HEAP->code_space();
+ return heap_->code_space();
case MAP_SPACE:
- return HEAP->map_space();
+ return heap_->map_space();
case CELL_SPACE:
- return HEAP->cell_space();
+ return heap_->cell_space();
+ case PROPERTY_CELL_SPACE:
+ return heap_->property_cell_space();
case LO_SPACE:
- return HEAP->lo_space();
+ return heap_->lo_space();
default:
return NULL;
}
@@ -6439,15 +7011,17 @@ Space* AllSpaces::next() {
PagedSpace* PagedSpaces::next() {
switch (counter_++) {
case OLD_POINTER_SPACE:
- return HEAP->old_pointer_space();
+ return heap_->old_pointer_space();
case OLD_DATA_SPACE:
- return HEAP->old_data_space();
+ return heap_->old_data_space();
case CODE_SPACE:
- return HEAP->code_space();
+ return heap_->code_space();
case MAP_SPACE:
- return HEAP->map_space();
+ return heap_->map_space();
case CELL_SPACE:
- return HEAP->cell_space();
+ return heap_->cell_space();
+ case PROPERTY_CELL_SPACE:
+ return heap_->property_cell_space();
default:
return NULL;
}
@@ -6458,26 +7032,28 @@ PagedSpace* PagedSpaces::next() {
OldSpace* OldSpaces::next() {
switch (counter_++) {
case OLD_POINTER_SPACE:
- return HEAP->old_pointer_space();
+ return heap_->old_pointer_space();
case OLD_DATA_SPACE:
- return HEAP->old_data_space();
+ return heap_->old_data_space();
case CODE_SPACE:
- return HEAP->code_space();
+ return heap_->code_space();
default:
return NULL;
}
}
-SpaceIterator::SpaceIterator()
- : current_space_(FIRST_SPACE),
+SpaceIterator::SpaceIterator(Heap* heap)
+ : heap_(heap),
+ current_space_(FIRST_SPACE),
iterator_(NULL),
size_func_(NULL) {
}
-SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
- : current_space_(FIRST_SPACE),
+SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
+ : heap_(heap),
+ current_space_(FIRST_SPACE),
iterator_(NULL),
size_func_(size_func) {
}
@@ -6517,25 +7093,30 @@ ObjectIterator* SpaceIterator::CreateIterator() {
switch (current_space_) {
case NEW_SPACE:
- iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
+ iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
break;
case OLD_POINTER_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
+ iterator_ =
+ new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
break;
case OLD_DATA_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
+ iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
break;
case CODE_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
+ iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
break;
case MAP_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
+ iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
break;
case CELL_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
+ iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
+ break;
+ case PROPERTY_CELL_SPACE:
+ iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
+ size_func_);
break;
case LO_SPACE:
- iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
+ iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
break;
}
@@ -6554,12 +7135,12 @@ class HeapObjectsFilter {
class UnreachableObjectsFilter : public HeapObjectsFilter {
public:
- UnreachableObjectsFilter() {
+ explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
MarkReachableObjects();
}
~UnreachableObjectsFilter() {
- Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
+ heap_->mark_compact_collector()->ClearMarkbits();
}
bool SkipObject(HeapObject* object) {
@@ -6596,25 +7177,28 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
};
void MarkReachableObjects() {
- Heap* heap = Isolate::Current()->heap();
MarkingVisitor visitor;
- heap->IterateRoots(&visitor, VISIT_ALL);
+ heap_->IterateRoots(&visitor, VISIT_ALL);
visitor.TransitiveClosure();
}
- AssertNoAllocation no_alloc;
+ Heap* heap_;
+ DisallowHeapAllocation no_allocation_;
};
-HeapIterator::HeapIterator()
- : filtering_(HeapIterator::kNoFiltering),
+HeapIterator::HeapIterator(Heap* heap)
+ : heap_(heap),
+ filtering_(HeapIterator::kNoFiltering),
filter_(NULL) {
Init();
}
-HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
- : filtering_(filtering),
+HeapIterator::HeapIterator(Heap* heap,
+ HeapIterator::HeapObjectsFiltering filtering)
+ : heap_(heap),
+ filtering_(filtering),
filter_(NULL) {
Init();
}
@@ -6627,10 +7211,10 @@ HeapIterator::~HeapIterator() {
void HeapIterator::Init() {
// Start the iteration.
- space_iterator_ = new SpaceIterator;
+ space_iterator_ = new SpaceIterator(heap_);
switch (filtering_) {
case kFilterUnreachable:
- filter_ = new UnreachableObjectsFilter;
+ filter_ = new UnreachableObjectsFilter(heap_);
break;
default:
break;
@@ -6694,9 +7278,9 @@ void HeapIterator::reset() {
}
-#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
+#ifdef DEBUG
-Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
+Object* const PathTracer::kAnyGlobalObject = NULL;
class PathTracer::MarkVisitor: public ObjectVisitor {
public:
@@ -6861,10 +7445,8 @@ void PathTracer::ProcessResults() {
PrintF("=====================================\n");
}
}
-#endif // DEBUG || LIVE_OBJECT_LIST
-#ifdef DEBUG
// Triggers a depth-first traversal of reachable objects from one
// given root object and finds a path to a specific heap object and
// prints it.
@@ -6894,9 +7476,9 @@ void Heap::TracePathToGlobal() {
#endif
-static intptr_t CountTotalHolesSize() {
+static intptr_t CountTotalHolesSize(Heap* heap) {
intptr_t holes_size = 0;
- OldSpaces spaces;
+ OldSpaces spaces(heap);
for (OldSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
@@ -6917,6 +7499,9 @@ GCTracer::GCTracer(Heap* heap,
allocated_since_last_gc_(0),
spent_in_mutator_(0),
promoted_objects_size_(0),
+ nodes_died_in_new_space_(0),
+ nodes_copied_in_new_space_(0),
+ nodes_promoted_(0),
heap_(heap),
gc_reason_(gc_reason),
collector_reason_(collector_reason) {
@@ -6929,7 +7514,7 @@ GCTracer::GCTracer(Heap* heap,
scopes_[i] = 0;
}
- in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
+ in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
allocated_since_last_gc_ =
heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
@@ -6957,7 +7542,7 @@ GCTracer::~GCTracer() {
heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
- int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
+ double time = heap_->last_gc_end_timestamp_ - start_time_;
// Update cumulative GC statistics if required.
if (FLAG_print_cumulative_gc_stat) {
@@ -6967,7 +7552,7 @@ GCTracer::~GCTracer() {
heap_->alive_after_last_gc_);
if (!first_gc) {
heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
- static_cast<int>(spent_in_mutator_));
+ spent_in_mutator_);
}
} else if (FLAG_trace_gc_verbose) {
heap_->total_gc_time_ms_ += time;
@@ -6975,6 +7560,9 @@ GCTracer::~GCTracer() {
if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
+ heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
+
+ if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
if (!FLAG_trace_gc_nvp) {
@@ -6991,16 +7579,16 @@ GCTracer::~GCTracer() {
end_memory_size_mb);
if (external_time > 0) PrintF("%d / ", external_time);
- PrintF("%d ms", time);
+ PrintF("%.1f ms", time);
if (steps_count_ > 0) {
if (collector_ == SCAVENGER) {
- PrintF(" (+ %d ms in %d steps since last GC)",
- static_cast<int>(steps_took_since_last_gc_),
+ PrintF(" (+ %.1f ms in %d steps since last GC)",
+ steps_took_since_last_gc_,
steps_count_since_last_gc_);
} else {
- PrintF(" (+ %d ms in %d steps since start of marking, "
- "biggest step %f ms)",
- static_cast<int>(steps_took_),
+ PrintF(" (+ %.1f ms in %d steps since start of marking, "
+ "biggest step %.1f ms)",
+ steps_took_,
steps_count_,
longest_step_);
}
@@ -7016,8 +7604,8 @@ GCTracer::~GCTracer() {
PrintF(".\n");
} else {
- PrintF("pause=%d ", time);
- PrintF("mutator=%d ", static_cast<int>(spent_in_mutator_));
+ PrintF("pause=%.1f ", time);
+ PrintF("mutator=%.1f ", spent_in_mutator_);
PrintF("gc=");
switch (collector_) {
case SCAVENGER:
@@ -7031,39 +7619,43 @@ GCTracer::~GCTracer() {
}
PrintF(" ");
- PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
- PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
- PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
- PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
- PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
- PrintF("new_new=%d ",
- static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
- PrintF("root_new=%d ",
- static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
- PrintF("old_new=%d ",
- static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
- PrintF("compaction_ptrs=%d ",
- static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
- PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
- Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
- PrintF("misc_compaction=%d ",
- static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
+ PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
+ PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
+ PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
+ PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
+ PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
+ PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
+ PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
+ PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
+ PrintF("compaction_ptrs=%.1f ",
+ scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
+ PrintF("intracompaction_ptrs=%.1f ",
+ scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
+ PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
+ PrintF("weakcollection_process=%.1f ",
+ scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
+ PrintF("weakcollection_clear=%.1f ",
+ scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
in_free_list_or_wasted_before_gc_);
- PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
+ PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
+ PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
+ PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
+ PrintF("nodes_promoted=%d ", nodes_promoted_);
if (collector_ == SCAVENGER) {
PrintF("stepscount=%d ", steps_count_since_last_gc_);
- PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
+ PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
} else {
PrintF("stepscount=%d ", steps_count_);
- PrintF("stepstook=%d ", static_cast<int>(steps_took_));
+ PrintF("stepstook=%.1f ", steps_took_);
+ PrintF("longeststep=%.1f ", longest_step_);
}
PrintF("\n");
@@ -7084,7 +7676,7 @@ const char* GCTracer::CollectorString() {
}
-int KeyedLookupCache::Hash(Map* map, String* name) {
+int KeyedLookupCache::Hash(Map* map, Name* name) {
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
@@ -7092,7 +7684,7 @@ int KeyedLookupCache::Hash(Map* map, String* name) {
}
-int KeyedLookupCache::Lookup(Map* map, String* name) {
+int KeyedLookupCache::Lookup(Map* map, Name* name) {
int index = (Hash(map, name) & kHashMask);
for (int i = 0; i < kEntriesPerBucket; i++) {
Key& key = keys_[index + i];
@@ -7104,37 +7696,46 @@ int KeyedLookupCache::Lookup(Map* map, String* name) {
}
-void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
- String* symbol;
- if (HEAP->LookupSymbolIfExists(name, &symbol)) {
- int index = (Hash(map, symbol) & kHashMask);
- // After a GC there will be free slots, so we use them in order (this may
- // help to get the most frequently used one in position 0).
- for (int i = 0; i< kEntriesPerBucket; i++) {
- Key& key = keys_[index];
- Object* free_entry_indicator = NULL;
- if (key.map == free_entry_indicator) {
- key.map = map;
- key.name = symbol;
- field_offsets_[index + i] = field_offset;
- return;
- }
- }
- // No free entry found in this bucket, so we move them all down one and
- // put the new entry at position zero.
- for (int i = kEntriesPerBucket - 1; i > 0; i--) {
- Key& key = keys_[index + i];
- Key& key2 = keys_[index + i - 1];
- key = key2;
- field_offsets_[index + i] = field_offsets_[index + i - 1];
+void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
+ if (!name->IsUniqueName()) {
+ String* internalized_string;
+ if (!map->GetIsolate()->heap()->InternalizeStringIfExists(
+ String::cast(name), &internalized_string)) {
+ return;
}
+ name = internalized_string;
+ }
+ // This cache is cleared only between mark compact passes, so we expect the
+ // cache to only contain old space names.
+ ASSERT(!map->GetIsolate()->heap()->InNewSpace(name));
- // Write the new first entry.
+ int index = (Hash(map, name) & kHashMask);
+ // After a GC there will be free slots, so we use them in order (this may
+ // help to get the most frequently used one in position 0).
+ for (int i = 0; i< kEntriesPerBucket; i++) {
Key& key = keys_[index];
- key.map = map;
- key.name = symbol;
- field_offsets_[index] = field_offset;
+ Object* free_entry_indicator = NULL;
+ if (key.map == free_entry_indicator) {
+ key.map = map;
+ key.name = name;
+ field_offsets_[index + i] = field_offset;
+ return;
+ }
}
+ // No free entry found in this bucket, so we move them all down one and
+ // put the new entry at position zero.
+ for (int i = kEntriesPerBucket - 1; i > 0; i--) {
+ Key& key = keys_[index + i];
+ Key& key2 = keys_[index + i - 1];
+ key = key2;
+ field_offsets_[index + i] = field_offsets_[index + i - 1];
+ }
+
+ // Write the new first entry.
+ Key& key = keys_[index];
+ key.map = map;
+ key.name = name;
+ field_offsets_[index] = field_offset;
}
@@ -7158,9 +7759,9 @@ void Heap::GarbageCollectionGreedyCheck() {
#endif
-TranscendentalCache::SubCache::SubCache(Type t)
+TranscendentalCache::SubCache::SubCache(Isolate* isolate, Type t)
: type_(t),
- isolate_(Isolate::Current()) {
+ isolate_(isolate) {
uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
uint32_t in1 = 0xffffffffu; // generated by the FPU.
for (int i = 0; i < kCacheSize; i++) {
@@ -7184,9 +7785,10 @@ void TranscendentalCache::Clear() {
void ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {
- if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
+ if (new_space_strings_[i] == heap_->the_hole_value()) {
continue;
}
+ ASSERT(new_space_strings_[i]->IsExternalString());
if (heap_->InNewSpace(new_space_strings_[i])) {
new_space_strings_[last++] = new_space_strings_[i];
} else {
@@ -7194,15 +7796,19 @@ void ExternalStringTable::CleanUp() {
}
}
new_space_strings_.Rewind(last);
+ new_space_strings_.Trim();
+
last = 0;
for (int i = 0; i < old_space_strings_.length(); ++i) {
- if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
+ if (old_space_strings_[i] == heap_->the_hole_value()) {
continue;
}
+ ASSERT(old_space_strings_[i]->IsExternalString());
ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
old_space_strings_[last++] = old_space_strings_[i];
}
old_space_strings_.Rewind(last);
+ old_space_strings_.Trim();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -7300,7 +7906,7 @@ static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
void Heap::CheckpointObjectStats() {
- ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
+ LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
Counters* counters = isolate()->counters();
#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
counters->count_of_##name()->Increment( \
@@ -7338,10 +7944,33 @@ void Heap::CheckpointObjectStats() {
static_cast<int>(object_sizes_last_time_[index]));
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ index = FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge; \
+ counters->count_of_CODE_AGE_##name()->Increment( \
+ static_cast<int>(object_counts_[index])); \
+ counters->count_of_CODE_AGE_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[index])); \
+ counters->size_of_CODE_AGE_##name()->Increment( \
+ static_cast<int>(object_sizes_[index])); \
+ counters->size_of_CODE_AGE_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[index]));
+ CODE_AGE_LIST_WITH_NO_AGE(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
- memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
- memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
+ OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
+ OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
ClearObjectStats();
}
+
+Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
+ if (FLAG_concurrent_recompilation) {
+ heap_->relocation_mutex_->Lock();
+#ifdef DEBUG
+ heap_->relocation_mutex_locked_by_optimizer_thread_ =
+ heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
+#endif // DEBUG
+ }
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 0ab7ae0bd..96cda586b 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -28,9 +28,10 @@
#ifndef V8_HEAP_H_
#define V8_HEAP_H_
-#include <math.h>
+#include <cmath>
#include "allocation.h"
+#include "assert-scope.h"
#include "globals.h"
#include "incremental-marking.h"
#include "list.h"
@@ -58,11 +59,11 @@ namespace internal {
V(Oddball, null_value, NullValue) \
V(Oddball, true_value, TrueValue) \
V(Oddball, false_value, FalseValue) \
+ V(Oddball, uninitialized_value, UninitializedValue) \
+ V(Map, cell_map, CellMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, meta_map, MetaMap) \
- V(Map, ascii_symbol_map, AsciiSymbolMap) \
- V(Map, ascii_string_map, AsciiStringMap) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, native_context_map, NativeContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
@@ -70,17 +71,17 @@ namespace internal {
V(Map, scope_info_map, ScopeInfoMap) \
V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
+ V(Map, constant_pool_array_map, ConstantPoolArrayMap) \
V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Map, hash_table_map, HashTableMap) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
- V(String, empty_string, EmptyString) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Smi, stack_limit, StackLimit) \
V(Oddball, arguments_marker, ArgumentsMarker) \
- /* The first 32 roots above this line should be boring from a GC point of */ \
- /* view. This means they are never in new space and never on a page that */ \
- /* is being compacted. */ \
+ /* The roots above this line should be boring from a GC point of view. */ \
+ /* This means they are never in new space and never on a page that is */ \
+ /* being compacted. */ \
V(FixedArray, number_string_cache, NumberStringCache) \
V(Object, instanceof_cache_function, InstanceofCacheFunction) \
V(Object, instanceof_cache_map, InstanceofCacheMap) \
@@ -90,33 +91,47 @@ namespace internal {
V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
V(Object, termination_exception, TerminationException) \
V(Smi, hash_seed, HashSeed) \
- V(Map, string_map, StringMap) \
V(Map, symbol_map, SymbolMap) \
+ V(Map, string_map, StringMap) \
+ V(Map, ascii_string_map, AsciiStringMap) \
V(Map, cons_string_map, ConsStringMap) \
V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
V(Map, sliced_string_map, SlicedStringMap) \
V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \
- V(Map, cons_symbol_map, ConsSymbolMap) \
- V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \
- V(Map, external_symbol_map, ExternalSymbolMap) \
- V(Map, external_symbol_with_ascii_data_map, ExternalSymbolWithAsciiDataMap) \
- V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \
V(Map, external_string_map, ExternalStringMap) \
- V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \
- V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
- V(Map, short_external_symbol_map, ShortExternalSymbolMap) \
V(Map, \
- short_external_symbol_with_ascii_data_map, \
- ShortExternalSymbolWithAsciiDataMap) \
- V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap) \
+ external_string_with_one_byte_data_map, \
+ ExternalStringWithOneByteDataMap) \
+ V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
V(Map, short_external_string_map, ShortExternalStringMap) \
V(Map, \
- short_external_string_with_ascii_data_map, \
- ShortExternalStringWithAsciiDataMap) \
+ short_external_string_with_one_byte_data_map, \
+ ShortExternalStringWithOneByteDataMap) \
+ V(Map, internalized_string_map, InternalizedStringMap) \
+ V(Map, ascii_internalized_string_map, AsciiInternalizedStringMap) \
+ V(Map, cons_internalized_string_map, ConsInternalizedStringMap) \
+ V(Map, cons_ascii_internalized_string_map, ConsAsciiInternalizedStringMap) \
+ V(Map, \
+ external_internalized_string_map, \
+ ExternalInternalizedStringMap) \
+ V(Map, \
+ external_internalized_string_with_one_byte_data_map, \
+ ExternalInternalizedStringWithOneByteDataMap) \
+ V(Map, \
+ external_ascii_internalized_string_map, \
+ ExternalAsciiInternalizedStringMap) \
+ V(Map, \
+ short_external_internalized_string_map, \
+ ShortExternalInternalizedStringMap) \
+ V(Map, \
+ short_external_internalized_string_with_one_byte_data_map, \
+ ShortExternalInternalizedStringWithOneByteDataMap) \
+ V(Map, \
+ short_external_ascii_internalized_string_map, \
+ ShortExternalAsciiInternalizedStringMap) \
V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \
V(Map, undetectable_string_map, UndetectableStringMap) \
V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
- V(Map, external_pixel_array_map, ExternalPixelArrayMap) \
V(Map, external_byte_array_map, ExternalByteArrayMap) \
V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
V(Map, external_short_array_map, ExternalShortArrayMap) \
@@ -125,6 +140,21 @@ namespace internal {
V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap) \
V(Map, external_float_array_map, ExternalFloatArrayMap) \
V(Map, external_double_array_map, ExternalDoubleArrayMap) \
+ V(Map, external_pixel_array_map, ExternalPixelArrayMap) \
+ V(ExternalArray, empty_external_byte_array, \
+ EmptyExternalByteArray) \
+ V(ExternalArray, empty_external_unsigned_byte_array, \
+ EmptyExternalUnsignedByteArray) \
+ V(ExternalArray, empty_external_short_array, EmptyExternalShortArray) \
+ V(ExternalArray, empty_external_unsigned_short_array, \
+ EmptyExternalUnsignedShortArray) \
+ V(ExternalArray, empty_external_int_array, EmptyExternalIntArray) \
+ V(ExternalArray, empty_external_unsigned_int_array, \
+ EmptyExternalUnsignedIntArray) \
+ V(ExternalArray, empty_external_float_array, EmptyExternalFloatArray) \
+ V(ExternalArray, empty_external_double_array, EmptyExternalDoubleArray) \
+ V(ExternalArray, empty_external_pixel_array, \
+ EmptyExternalPixelArray) \
V(Map, non_strict_arguments_elements_map, NonStrictArgumentsElementsMap) \
V(Map, function_context_map, FunctionContextMap) \
V(Map, catch_context_map, CatchContextMap) \
@@ -140,117 +170,133 @@ namespace internal {
V(HeapNumber, minus_zero_value, MinusZeroValue) \
V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \
- V(Foreign, prototype_accessors, PrototypeAccessors) \
V(UnseededNumberDictionary, code_stubs, CodeStubs) \
V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
- V(Object, last_script_id, LastScriptId) \
+ V(Smi, last_script_id, LastScriptId) \
V(Script, empty_script, EmptyScript) \
V(Smi, real_stack_limit, RealStackLimit) \
- V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
+ V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
- V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
+ V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
+ V(JSObject, observation_state, ObservationState) \
+ V(Map, external_map, ExternalMap) \
+ V(Symbol, frozen_symbol, FrozenSymbol) \
+ V(Symbol, elements_transition_symbol, ElementsTransitionSymbol) \
+ V(SeededNumberDictionary, empty_slow_element_dictionary, \
+ EmptySlowElementDictionary) \
+ V(Symbol, observed_symbol, ObservedSymbol)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
- V(SymbolTable, symbol_table, SymbolTable)
-
-#define SYMBOL_LIST(V) \
- V(Array_symbol, "Array") \
- V(Object_symbol, "Object") \
- V(Proto_symbol, "__proto__") \
- V(StringImpl_symbol, "StringImpl") \
- V(arguments_symbol, "arguments") \
- V(Arguments_symbol, "Arguments") \
- V(call_symbol, "call") \
- V(apply_symbol, "apply") \
- V(caller_symbol, "caller") \
- V(boolean_symbol, "boolean") \
- V(Boolean_symbol, "Boolean") \
- V(callee_symbol, "callee") \
- V(constructor_symbol, "constructor") \
- V(code_symbol, ".code") \
- V(result_symbol, ".result") \
- V(dot_for_symbol, ".for.") \
- V(catch_var_symbol, ".catch-var") \
- V(empty_symbol, "") \
- V(eval_symbol, "eval") \
- V(function_symbol, "function") \
- V(length_symbol, "length") \
- V(module_symbol, "module") \
- V(name_symbol, "name") \
- V(native_symbol, "native") \
- V(null_symbol, "null") \
- V(number_symbol, "number") \
- V(Number_symbol, "Number") \
- V(nan_symbol, "NaN") \
- V(RegExp_symbol, "RegExp") \
- V(source_symbol, "source") \
- V(global_symbol, "global") \
- V(ignore_case_symbol, "ignoreCase") \
- V(multiline_symbol, "multiline") \
- V(input_symbol, "input") \
- V(index_symbol, "index") \
- V(last_index_symbol, "lastIndex") \
- V(object_symbol, "object") \
- V(prototype_symbol, "prototype") \
- V(string_symbol, "string") \
- V(String_symbol, "String") \
- V(Date_symbol, "Date") \
- V(this_symbol, "this") \
- V(to_string_symbol, "toString") \
- V(char_at_symbol, "CharAt") \
- V(undefined_symbol, "undefined") \
- V(value_of_symbol, "valueOf") \
- V(InitializeVarGlobal_symbol, "InitializeVarGlobal") \
- V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
- V(KeyedLoadElementMonomorphic_symbol, \
+ V(StringTable, string_table, StringTable)
+
+#define INTERNALIZED_STRING_LIST(V) \
+ V(Array_string, "Array") \
+ V(Object_string, "Object") \
+ V(proto_string, "__proto__") \
+ V(arguments_string, "arguments") \
+ V(Arguments_string, "Arguments") \
+ V(call_string, "call") \
+ V(apply_string, "apply") \
+ V(caller_string, "caller") \
+ V(boolean_string, "boolean") \
+ V(Boolean_string, "Boolean") \
+ V(callee_string, "callee") \
+ V(constructor_string, "constructor") \
+ V(result_string, ".result") \
+ V(dot_for_string, ".for.") \
+ V(eval_string, "eval") \
+ V(empty_string, "") \
+ V(function_string, "function") \
+ V(length_string, "length") \
+ V(module_string, "module") \
+ V(name_string, "name") \
+ V(native_string, "native") \
+ V(null_string, "null") \
+ V(number_string, "number") \
+ V(Number_string, "Number") \
+ V(nan_string, "NaN") \
+ V(RegExp_string, "RegExp") \
+ V(source_string, "source") \
+ V(global_string, "global") \
+ V(ignore_case_string, "ignoreCase") \
+ V(multiline_string, "multiline") \
+ V(input_string, "input") \
+ V(index_string, "index") \
+ V(last_index_string, "lastIndex") \
+ V(object_string, "object") \
+ V(literals_string, "literals") \
+ V(prototype_string, "prototype") \
+ V(string_string, "string") \
+ V(String_string, "String") \
+ V(symbol_string, "symbol") \
+ V(Symbol_string, "Symbol") \
+ V(Date_string, "Date") \
+ V(this_string, "this") \
+ V(to_string_string, "toString") \
+ V(char_at_string, "CharAt") \
+ V(undefined_string, "undefined") \
+ V(value_of_string, "valueOf") \
+ V(stack_string, "stack") \
+ V(toJSON_string, "toJSON") \
+ V(InitializeVarGlobal_string, "InitializeVarGlobal") \
+ V(InitializeConstGlobal_string, "InitializeConstGlobal") \
+ V(KeyedLoadElementMonomorphic_string, \
"KeyedLoadElementMonomorphic") \
- V(KeyedStoreElementMonomorphic_symbol, \
+ V(KeyedStoreElementMonomorphic_string, \
"KeyedStoreElementMonomorphic") \
- V(KeyedStoreAndGrowElementMonomorphic_symbol, \
- "KeyedStoreAndGrowElementMonomorphic") \
- V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
- V(illegal_access_symbol, "illegal access") \
- V(out_of_memory_symbol, "out-of-memory") \
- V(illegal_execution_state_symbol, "illegal execution state") \
- V(get_symbol, "get") \
- V(set_symbol, "set") \
- V(function_class_symbol, "Function") \
- V(illegal_argument_symbol, "illegal argument") \
- V(MakeReferenceError_symbol, "MakeReferenceError") \
- V(MakeSyntaxError_symbol, "MakeSyntaxError") \
- V(MakeTypeError_symbol, "MakeTypeError") \
- V(invalid_lhs_in_assignment_symbol, "invalid_lhs_in_assignment") \
- V(invalid_lhs_in_for_in_symbol, "invalid_lhs_in_for_in") \
- V(invalid_lhs_in_postfix_op_symbol, "invalid_lhs_in_postfix_op") \
- V(invalid_lhs_in_prefix_op_symbol, "invalid_lhs_in_prefix_op") \
- V(illegal_return_symbol, "illegal_return") \
- V(illegal_break_symbol, "illegal_break") \
- V(illegal_continue_symbol, "illegal_continue") \
- V(unknown_label_symbol, "unknown_label") \
- V(redeclaration_symbol, "redeclaration") \
- V(failure_symbol, "<failure>") \
- V(space_symbol, " ") \
- V(exec_symbol, "exec") \
- V(zero_symbol, "0") \
- V(global_eval_symbol, "GlobalEval") \
- V(identity_hash_symbol, "v8::IdentityHash") \
- V(closure_symbol, "(closure)") \
- V(use_strict, "use strict") \
- V(dot_symbol, ".") \
- V(anonymous_function_symbol, "(anonymous function)") \
- V(compare_ic_symbol, "==") \
- V(strict_compare_ic_symbol, "===") \
- V(infinity_symbol, "Infinity") \
- V(minus_infinity_symbol, "-Infinity") \
- V(hidden_stack_trace_symbol, "v8::hidden_stack_trace") \
- V(query_colon_symbol, "(?:)")
+ V(stack_overflow_string, "kStackOverflowBoilerplate") \
+ V(illegal_access_string, "illegal access") \
+ V(illegal_execution_state_string, "illegal execution state") \
+ V(get_string, "get") \
+ V(set_string, "set") \
+ V(map_field_string, "%map") \
+ V(elements_field_string, "%elements") \
+ V(length_field_string, "%length") \
+ V(cell_value_string, "%cell_value") \
+ V(function_class_string, "Function") \
+ V(illegal_argument_string, "illegal argument") \
+ V(MakeReferenceError_string, "MakeReferenceError") \
+ V(MakeSyntaxError_string, "MakeSyntaxError") \
+ V(MakeTypeError_string, "MakeTypeError") \
+ V(invalid_lhs_in_assignment_string, "invalid_lhs_in_assignment") \
+ V(invalid_lhs_in_for_in_string, "invalid_lhs_in_for_in") \
+ V(invalid_lhs_in_postfix_op_string, "invalid_lhs_in_postfix_op") \
+ V(invalid_lhs_in_prefix_op_string, "invalid_lhs_in_prefix_op") \
+ V(illegal_return_string, "illegal_return") \
+ V(illegal_break_string, "illegal_break") \
+ V(illegal_continue_string, "illegal_continue") \
+ V(unknown_label_string, "unknown_label") \
+ V(redeclaration_string, "redeclaration") \
+ V(space_string, " ") \
+ V(exec_string, "exec") \
+ V(zero_string, "0") \
+ V(global_eval_string, "GlobalEval") \
+ V(identity_hash_string, "v8::IdentityHash") \
+ V(closure_string, "(closure)") \
+ V(use_strict_string, "use strict") \
+ V(dot_string, ".") \
+ V(anonymous_function_string, "(anonymous function)") \
+ V(compare_ic_string, "==") \
+ V(strict_compare_ic_string, "===") \
+ V(infinity_string, "Infinity") \
+ V(minus_infinity_string, "-Infinity") \
+ V(hidden_stack_trace_string, "v8::hidden_stack_trace") \
+ V(query_colon_string, "(?:)") \
+ V(Generator_string, "Generator") \
+ V(throw_string, "throw") \
+ V(done_string, "done") \
+ V(value_string, "value") \
+ V(next_string, "next") \
+ V(byte_length_string, "byteLength") \
+ V(byte_offset_string, "byteOffset") \
+ V(buffer_string, "buffer")
// Forward declarations.
class GCTracer;
@@ -284,14 +330,6 @@ class StoreBufferRebuilder {
-// The all static Heap captures the interface to the global object heap.
-// All JavaScript contexts by this process share the same object heap.
-
-#ifdef DEBUG
-class HeapDebugUtils;
-#endif
-
-
// A queue of objects promoted during scavenge. Each object is accompanied
// by it's size to avoid dereferencing a map pointer for scanning.
class PromotionQueue {
@@ -438,6 +476,7 @@ enum ArrayStorageAllocationMode {
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
};
+
class Heap {
public:
// Configure heap size before setup. Return false if the heap has been
@@ -447,10 +486,13 @@ class Heap {
intptr_t max_executable_size);
bool ConfigureHeapDefault();
- // Initializes the global object heap. If create_heap_objects is true,
- // also creates the basic non-mutable objects.
+ // Prepares the heap, setting up memory areas that are needed in the isolate
+ // without actually creating any objects.
+ bool SetUp();
+
+ // Bootstraps the object heap with the core set of objects required to run.
// Returns whether it succeeded.
- bool SetUp(bool create_heap_objects);
+ bool CreateHeapObjects();
// Destroys all memory allocated by the heap.
void TearDown();
@@ -476,6 +518,7 @@ class Heap {
int InitialSemiSpaceSize() { return initial_semispace_size_; }
intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
intptr_t MaxExecutableSize() { return max_executable_size_; }
+ int MaxRegularSpaceAllocationSize() { return InitialSemiSpaceSize() * 4/5; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
@@ -487,6 +530,9 @@ class Heap {
// Returns the amount of executable memory currently committed for the heap.
intptr_t CommittedMemoryExecutable();
+ // Returns the amount of phyical memory currently committed for the heap.
+ size_t CommittedPhysicalMemory();
+
// Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead.
@@ -508,6 +554,9 @@ class Heap {
OldSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
CellSpace* cell_space() { return cell_space_; }
+ PropertyCellSpace* property_cell_space() {
+ return property_cell_space_;
+ }
LargeObjectSpace* lo_space() { return lo_space_; }
PagedSpace* paged_space(int idx) {
switch (idx) {
@@ -519,6 +568,8 @@ class Heap {
return map_space();
case CELL_SPACE:
return cell_space();
+ case PROPERTY_CELL_SPACE:
+ return property_cell_space();
case CODE_SPACE:
return code_space();
case NEW_SPACE:
@@ -543,6 +594,20 @@ class Heap {
return new_space_.allocation_limit_address();
}
+ Address* OldPointerSpaceAllocationTopAddress() {
+ return old_pointer_space_->allocation_top_address();
+ }
+ Address* OldPointerSpaceAllocationLimitAddress() {
+ return old_pointer_space_->allocation_limit_address();
+ }
+
+ Address* OldDataSpaceAllocationTopAddress() {
+ return old_data_space_->allocation_top_address();
+ }
+ Address* OldDataSpaceAllocationLimitAddress() {
+ return old_data_space_->allocation_limit_address();
+ }
+
// Uncommit unused semi space.
bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
@@ -552,7 +617,15 @@ class Heap {
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateJSObject(
- JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
+ JSFunction* constructor,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ MUST_USE_RESULT MaybeObject* AllocateJSObjectWithAllocationSite(
+ JSFunction* constructor,
+ Handle<AllocationSite> allocation_site);
+
+ MUST_USE_RESULT MaybeObject* AllocateJSGeneratorObject(
+ JSFunction* function);
MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
ScopeInfo* scope_info);
@@ -575,22 +648,25 @@ class Heap {
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT MaybeObject* AllocateJSArrayStorage(
+ JSArray* array,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
+
// Allocate a JSArray with no elements
MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements(
FixedArrayBase* array_base,
ElementsKind elements_kind,
+ int length,
PretenureFlag pretenure = NOT_TENURED);
- // Allocates and initializes a new global object based on a constructor.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateGlobalObject(JSFunction* constructor);
-
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
// Returns failure if allocation failed.
- MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source);
+ // Optionally takes an AllocationSite to be appended in an AllocationMemento.
+ MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source,
+ AllocationSite* site = NULL);
// Allocates the function prototype.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -598,6 +674,12 @@ class Heap {
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateFunctionPrototype(JSFunction* function);
+ // Allocates a JS ArrayBuffer object.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ MUST_USE_RESULT MaybeObject* AllocateJSArrayBuffer();
+
// Allocates a Harmony proxy or function proxy.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -631,7 +713,10 @@ class Heap {
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap(
- Map* map, PretenureFlag pretenure = NOT_TENURED);
+ Map* map, PretenureFlag pretenure = NOT_TENURED, bool alloc_props = true);
+
+ MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMapWithAllocationSite(
+ Map* map, Handle<AllocationSite> allocation_site);
// Allocates a heap object based on the map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -639,6 +724,9 @@ class Heap {
// Please note this function does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space);
+ MUST_USE_RESULT MaybeObject* AllocateWithAllocationSite(Map* map,
+ AllocationSpace space, Handle<AllocationSite> allocation_site);
+
// Allocates a JS Map in the heap.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -661,6 +749,9 @@ class Heap {
// Allocates a serialized scope info.
MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length);
+ // Allocates an External object for v8's external API.
+ MUST_USE_RESULT MaybeObject* AllocateExternal(void* value);
+
// Allocates an empty PolymorphicCodeCache.
MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
@@ -697,9 +788,16 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateStringFromAscii(
- Vector<const char> str,
+ MUST_USE_RESULT MaybeObject* AllocateStringFromOneByte(
+ Vector<const uint8_t> str,
PretenureFlag pretenure = NOT_TENURED);
+ // TODO(dcarney): remove this function.
+ MUST_USE_RESULT inline MaybeObject* AllocateStringFromOneByte(
+ Vector<const char> str,
+ PretenureFlag pretenure = NOT_TENURED) {
+ return AllocateStringFromOneByte(Vector<const uint8_t>::cast(str),
+ pretenure);
+ }
MUST_USE_RESULT inline MaybeObject* AllocateStringFromUtf8(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
@@ -711,28 +809,33 @@ class Heap {
Vector<const uc16> str,
PretenureFlag pretenure = NOT_TENURED);
- // Allocates a symbol in old space based on the character stream.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
+ // Allocates an internalized string in old space based on the character
+ // stream. Returns Failure::RetryAfterGC(requested_bytes, space) if the
+ // allocation failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* AllocateSymbol(Vector<const char> str,
- int chars,
- uint32_t hash_field);
+ MUST_USE_RESULT inline MaybeObject* AllocateInternalizedStringFromUtf8(
+ Vector<const char> str,
+ int chars,
+ uint32_t hash_field);
- MUST_USE_RESULT inline MaybeObject* AllocateAsciiSymbol(
- Vector<const char> str,
+ MUST_USE_RESULT inline MaybeObject* AllocateOneByteInternalizedString(
+ Vector<const uint8_t> str,
uint32_t hash_field);
- MUST_USE_RESULT inline MaybeObject* AllocateTwoByteSymbol(
+ MUST_USE_RESULT inline MaybeObject* AllocateTwoByteInternalizedString(
Vector<const uc16> str,
uint32_t hash_field);
- MUST_USE_RESULT MaybeObject* AllocateInternalSymbol(
- unibrow::CharacterStream* buffer, int chars, uint32_t hash_field);
+ template<typename T>
+ static inline bool IsOneByte(T t, int chars);
- MUST_USE_RESULT MaybeObject* AllocateExternalSymbol(
- Vector<const char> str,
- int chars);
+ template<typename T>
+ MUST_USE_RESULT inline MaybeObject* AllocateInternalizedStringImpl(
+ T t, int chars, uint32_t hash_field);
+
+ template<bool is_one_byte, typename T>
+ MUST_USE_RESULT MaybeObject* AllocateInternalizedStringImpl(
+ T t, int chars, uint32_t hash_field);
// Allocates and partially initializes a String. There are two String
// encodings: ASCII and two byte. These functions allocate a string of the
@@ -741,7 +844,7 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateRawAsciiString(
+ MUST_USE_RESULT MaybeObject* AllocateRawOneByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateRawTwoByteString(
@@ -759,14 +862,9 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateByteArray(int length,
- PretenureFlag pretenure);
-
- // Allocate a non-tenured byte array of the specified length
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateByteArray(int length);
+ MUST_USE_RESULT MaybeObject* AllocateByteArray(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocates an external array of the specified length and type.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -778,20 +876,22 @@ class Heap {
void* external_pointer,
PretenureFlag pretenure);
- // Allocate a tenured JS global property cell.
+ // Allocate a symbol in old space.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSGlobalPropertyCell(Object* value);
+ MUST_USE_RESULT MaybeObject* AllocateSymbol();
+
+ // Allocate a tenured AllocationSite. It's payload is null
+ MUST_USE_RESULT MaybeObject* AllocateAllocationSite();
// Allocates a fixed array initialized with undefined values
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length,
- PretenureFlag pretenure);
- // Allocates a fixed array initialized with undefined values
- MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length);
+ MUST_USE_RESULT MaybeObject* AllocateFixedArray(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocates an uninitialized fixed array. It must be filled by the caller.
//
@@ -800,6 +900,10 @@ class Heap {
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedArray(int length);
+ // Move len elements within a given array from src_index index to dst_index
+ // index.
+ void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
+
// Make a copy of src and return it. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
MUST_USE_RESULT inline MaybeObject* CopyFixedArray(FixedArray* src);
@@ -818,6 +922,16 @@ class Heap {
MUST_USE_RESULT MaybeObject* CopyFixedDoubleArrayWithMap(
FixedDoubleArray* src, Map* map);
+ // Make a copy of src and return it. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT inline MaybeObject* CopyConstantPoolArray(
+ ConstantPoolArray* src);
+
+ // Make a copy of src, set the map, and return the copy. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT MaybeObject* CopyConstantPoolArrayWithMap(
+ ConstantPoolArray* src, Map* map);
+
// Allocates a fixed array initialized with the hole values.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -826,9 +940,10 @@ class Heap {
int length,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray(
- int length,
- PretenureFlag pretenure);
+ MUST_USE_RESULT MaybeObject* AllocateConstantPoolArray(
+ int first_int64_index,
+ int first_ptr_index,
+ int first_int32_index);
// Allocates a fixed double array with uninitialized values. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
@@ -871,7 +986,7 @@ class Heap {
// Allocate a 'with' context.
MUST_USE_RESULT MaybeObject* AllocateWithContext(JSFunction* function,
Context* previous,
- JSObject* extension);
+ JSReceiver* extension);
// Allocate a block context.
MUST_USE_RESULT MaybeObject* AllocateBlockContext(JSFunction* function,
@@ -916,10 +1031,7 @@ class Heap {
// Allocated a HeapNumber from value.
MUST_USE_RESULT MaybeObject* AllocateHeapNumber(
- double value,
- PretenureFlag pretenure);
- // pretenure = NOT_TENURED
- MUST_USE_RESULT MaybeObject* AllocateHeapNumber(double value);
+ double value, PretenureFlag pretenure = NOT_TENURED);
// Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -1013,10 +1125,13 @@ class Heap {
// self_reference. This allows generated code to reference its own Code
// object by containing this pointer.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* CreateCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference,
- bool immovable = false);
+ MUST_USE_RESULT MaybeObject* CreateCode(
+ const CodeDesc& desc,
+ Code::Flags flags,
+ Handle<Object> self_reference,
+ bool immovable = false,
+ bool crankshafted = false,
+ int prologue_offset = Code::kPrologueOffsetNotSet);
MUST_USE_RESULT MaybeObject* CopyCode(Code* code);
@@ -1024,28 +1139,28 @@ class Heap {
// the provided data as the relocation information.
MUST_USE_RESULT MaybeObject* CopyCode(Code* code, Vector<byte> reloc_info);
- // Finds the symbol for string in the symbol table.
- // If not found, a new symbol is added to the table and returned.
+ // Finds the internalized copy for string in the string table.
+ // If not found, a new string is added to the table and returned.
// Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str);
- MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str);
- MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str);
- MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(const char* str) {
- return LookupSymbol(CStrVector(str));
+ MUST_USE_RESULT MaybeObject* InternalizeUtf8String(Vector<const char> str);
+ MUST_USE_RESULT MaybeObject* InternalizeUtf8String(const char* str) {
+ return InternalizeUtf8String(CStrVector(str));
}
- MUST_USE_RESULT MaybeObject* LookupSymbol(String* str);
- MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Handle<SeqAsciiString> string,
- int from,
- int length);
+ MUST_USE_RESULT MaybeObject* InternalizeOneByteString(
+ Vector<const uint8_t> str);
+ MUST_USE_RESULT MaybeObject* InternalizeTwoByteString(Vector<const uc16> str);
+ MUST_USE_RESULT MaybeObject* InternalizeString(String* str);
+ MUST_USE_RESULT MaybeObject* InternalizeOneByteString(
+ Handle<SeqOneByteString> string, int from, int length);
- bool LookupSymbolIfExists(String* str, String** symbol);
- bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
+ bool InternalizeStringIfExists(String* str, String** result);
+ bool InternalizeTwoCharsStringIfExists(String* str, String** result);
- // Compute the matching symbol map for a string if possible.
+ // Compute the matching internalized string map for a string if possible.
// NULL is returned if string is in new space or not flattened.
- Map* SymbolMapForString(String* str);
+ Map* InternalizedStringMapForString(String* str);
// Tries to flatten a string before compare operation.
//
@@ -1104,10 +1219,7 @@ class Heap {
void EnsureHeapIsIterable();
// Notify the heap that a context has been disposed.
- int NotifyContextDisposed() {
- flush_monomorphic_ics_ = true;
- return ++contexts_disposed_;
- }
+ int NotifyContextDisposed();
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
@@ -1134,22 +1246,15 @@ class Heap {
void GarbageCollectionGreedyCheck();
#endif
- void AddGCPrologueCallback(
- GCPrologueCallback callback, GCType gc_type_filter);
- void RemoveGCPrologueCallback(GCPrologueCallback callback);
-
- void AddGCEpilogueCallback(
- GCEpilogueCallback callback, GCType gc_type_filter);
- void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
+ void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type_filter,
+ bool pass_isolate = true);
+ void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback);
- void SetGlobalGCPrologueCallback(GCCallback callback) {
- ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
- global_gc_prologue_callback_ = callback;
- }
- void SetGlobalGCEpilogueCallback(GCCallback callback) {
- ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
- global_gc_epilogue_callback_ = callback;
- }
+ void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+ GCType gc_type_filter,
+ bool pass_isolate = true);
+ void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback);
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
@@ -1173,21 +1278,34 @@ class Heap {
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
-#define SYMBOL_ACCESSOR(name, str) String* name() { \
+#define STRING_ACCESSOR(name, str) String* name() { \
return String::cast(roots_[k##name##RootIndex]); \
}
- SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
+ INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
- // The hidden_symbol is special because it is the empty string, but does
+ // The hidden_string is special because it is the empty string, but does
// not match the empty string.
- String* hidden_symbol() { return hidden_symbol_; }
+ String* hidden_string() { return hidden_string_; }
void set_native_contexts_list(Object* object) {
native_contexts_list_ = object;
}
Object* native_contexts_list() { return native_contexts_list_; }
+ void set_array_buffers_list(Object* object) {
+ array_buffers_list_ = object;
+ }
+ Object* array_buffers_list() { return array_buffers_list_; }
+
+ void set_allocation_sites_list(Object* object) {
+ allocation_sites_list_ = object;
+ }
+ Object* allocation_sites_list() { return allocation_sites_list_; }
+ Object** allocation_sites_list_address() { return &allocation_sites_list_; }
+
+ Object* weak_object_to_code_table() { return weak_object_to_code_table_; }
+
// Number of mark-sweeps.
unsigned int ms_count() { return ms_count_; }
@@ -1206,11 +1324,19 @@ class Heap {
// Returns whether the object resides in new space.
inline bool InNewSpace(Object* object);
- inline bool InNewSpace(Address addr);
- inline bool InNewSpacePage(Address addr);
+ inline bool InNewSpace(Address address);
+ inline bool InNewSpacePage(Address address);
inline bool InFromSpace(Object* object);
inline bool InToSpace(Object* object);
+ // Returns whether the object resides in old pointer space.
+ inline bool InOldPointerSpace(Address address);
+ inline bool InOldPointerSpace(Object* object);
+
+ // Returns whether the object resides in old data space.
+ inline bool InOldDataSpace(Address address);
+ inline bool InOldDataSpace(Object* object);
+
// Checks whether an address/object in the heap (including auxiliary
// area and unused area).
bool Contains(Address addr);
@@ -1223,7 +1349,11 @@ class Heap {
// Finds out which space an object should get promoted to based on its type.
inline OldSpace* TargetSpace(HeapObject* object);
- inline AllocationSpace TargetSpaceId(InstanceType type);
+ static inline AllocationSpace TargetSpaceId(InstanceType type);
+
+ // Checks whether the given object is allowed to be migrated from it's
+ // current space into the given destination space. Used for debugging.
+ inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
// Sets the stub_cache_ (only used when expanding the dictionary).
void public_set_code_stubs(UnseededNumberDictionary* value) {
@@ -1250,9 +1380,6 @@ class Heap {
roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
}
- // Update the next script id.
- inline void SetLastScriptId(Object* last_script_id);
-
// Generated code can embed this address to get access to the roots.
Object** roots_array_start() { return roots_; }
@@ -1268,6 +1395,11 @@ class Heap {
#ifdef VERIFY_HEAP
// Verify the heap is in its normal state before or after a GC.
void Verify();
+
+
+ bool weak_embedded_objects_verification_enabled() {
+ return no_weak_object_verification_scope_depth_ == 0;
+ }
#endif
#ifdef DEBUG
@@ -1302,25 +1434,19 @@ class Heap {
// Print short heap statistics.
void PrintShortHeapStatistics();
- // Makes a new symbol object
+ // Makes a new internalized string object
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* CreateSymbol(
+ MUST_USE_RESULT MaybeObject* CreateInternalizedString(
const char* str, int length, int hash);
- MUST_USE_RESULT MaybeObject* CreateSymbol(String* str);
+ MUST_USE_RESULT MaybeObject* CreateInternalizedString(String* str);
// Write barrier support for address[offset] = o.
- inline void RecordWrite(Address address, int offset);
+ INLINE(void RecordWrite(Address address, int offset));
// Write barrier support for address[start : start + len[ = o.
- inline void RecordWrites(Address address, int start, int len);
-
- // Given an address occupied by a live code object, return that object.
- Object* FindCodeObject(Address a);
-
- // Invoke Shrink on shrinkable spaces.
- void Shrink();
+ INLINE(void RecordWrites(Address address, int start, int len));
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
inline HeapState gc_state() { return gc_state_; }
@@ -1328,8 +1454,9 @@ class Heap {
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
#ifdef DEBUG
- bool IsAllocationAllowed() { return allocation_allowed_; }
- inline bool allow_allocation(bool enable);
+ void set_allocation_timeout(int timeout) {
+ allocation_timeout_ = timeout;
+ }
bool disallow_allocation_failure() {
return disallow_allocation_failure_;
@@ -1372,49 +1499,49 @@ class Heap {
inline intptr_t AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes);
- // Allocate uninitialized fixed array.
- MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length);
- MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
- PretenureFlag pretenure);
+ // This is only needed for testing high promotion mode.
+ void SetNewSpaceHighPromotionModeActive(bool mode) {
+ new_space_high_promotion_mode_active_ = mode;
+ }
- inline intptr_t PromotedTotalSize() {
- return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
+ // Returns the allocation mode (pre-tenuring) based on observed promotion
+ // rates of previous collections.
+ inline PretenureFlag GetPretenureMode() {
+ return FLAG_pretenuring && new_space_high_promotion_mode_active_
+ ? TENURED : NOT_TENURED;
}
- // True if we have reached the allocation limit in the old generation that
- // should force the next GC (caused normally) to be a full one.
- inline bool OldGenerationPromotionLimitReached() {
- return PromotedTotalSize() > old_gen_promotion_limit_;
+ inline Address* NewSpaceHighPromotionModeActiveAddress() {
+ return reinterpret_cast<Address*>(&new_space_high_promotion_mode_active_);
+ }
+
+ inline intptr_t PromotedTotalSize() {
+ return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
}
inline intptr_t OldGenerationSpaceAvailable() {
- return old_gen_allocation_limit_ - PromotedTotalSize();
+ return old_generation_allocation_limit_ - PromotedTotalSize();
}
inline intptr_t OldGenerationCapacityAvailable() {
return max_old_generation_size_ - PromotedTotalSize();
}
- static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
- static const intptr_t kMinimumAllocationLimit =
+ static const intptr_t kMinimumOldGenerationAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
- intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
- const int divisor = FLAG_stress_compaction ? 10 : 3;
+ intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size) {
+ const int divisor = FLAG_stress_compaction ? 10 :
+ new_space_high_promotion_mode_active_ ? 1 : 3;
intptr_t limit =
- Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
+ Max(old_gen_size + old_gen_size / divisor,
+ kMinimumOldGenerationAllocationLimit);
limit += new_space_.Capacity();
- limit *= old_gen_limit_factor_;
- intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
- return Min(limit, halfway_to_the_max);
- }
-
- intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
- const int divisor = FLAG_stress_compaction ? 8 : 2;
- intptr_t limit =
- Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
- limit += new_space_.Capacity();
- limit *= old_gen_limit_factor_;
+ // TODO(hpayer): Can be removed when when pretenuring is supported for all
+ // allocation sites.
+ if (IsHighSurvivalRate() && IsStableOrIncreasingSurvivalTrend()) {
+ limit *= 2;
+ }
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}
@@ -1428,17 +1555,17 @@ class Heap {
STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
#undef ROOT_INDEX_DECLARATION
-#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
- SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_DECLARATION
+#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
+ INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
+#undef STRING_DECLARATION
// Utility type maps
#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
STRUCT_LIST(DECLARE_STRUCT_MAP)
#undef DECLARE_STRUCT_MAP
- kSymbolTableRootIndex,
- kStrongRootListLength = kSymbolTableRootIndex,
+ kStringTableRootIndex,
+ kStrongRootListLength = kStringTableRootIndex,
kRootListLength
};
@@ -1446,10 +1573,17 @@ class Heap {
STATIC_CHECK(kNullValueRootIndex == Internals::kNullValueRootIndex);
STATIC_CHECK(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
- STATIC_CHECK(kempty_symbolRootIndex == Internals::kEmptySymbolRootIndex);
+ STATIC_CHECK(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
+
+ // Generated code can embed direct references to non-writable roots if
+ // they are in new space.
+ static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
+ // Generated code can treat direct references to this root as constant.
+ bool RootCanBeTreatedAsConstant(RootListIndex root_index);
MUST_USE_RESULT MaybeObject* NumberToString(
- Object* number, bool check_number_string_cache = true);
+ Object* number, bool check_number_string_cache = true,
+ PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* Uint32ToString(
uint32_t value, bool check_number_string_cache = true);
@@ -1457,6 +1591,9 @@ class Heap {
RootListIndex RootIndexForExternalArrayType(
ExternalArrayType array_type);
+ RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
+ ExternalArray* EmptyExternalArrayForMap(Map* map);
+
void RecordStats(HeapStats* stats, bool take_snapshot = false);
// Copy block of memory from src to dst. Size of block should be aligned
@@ -1481,22 +1618,14 @@ class Heap {
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
- intptr_t total_promoted = PromotedTotalSize();
-
- intptr_t adjusted_promotion_limit =
- old_gen_promotion_limit_ - new_space_.Capacity();
-
- if (total_promoted >= adjusted_promotion_limit) return true;
-
intptr_t adjusted_allocation_limit =
- old_gen_allocation_limit_ - new_space_.Capacity() / 5;
+ old_generation_allocation_limit_ - new_space_.Capacity();
- if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return true;
+ if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
return false;
}
-
void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
@@ -1513,8 +1642,6 @@ class Heap {
// we try to promote this object.
inline bool ShouldBePromoted(Address old_address, int object_size);
- int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
-
void ClearJSFunctionResultCaches();
void ClearNormalizedMapCaches();
@@ -1529,14 +1656,40 @@ class Heap {
total_regexp_code_generated_ += size;
}
+ void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
+ if (is_crankshafted) {
+ crankshaft_codegen_bytes_generated_ += size;
+ } else {
+ full_codegen_bytes_generated_ += size;
+ }
+ }
+
// Returns maximum GC pause.
- int get_max_gc_pause() { return max_gc_pause_; }
+ double get_max_gc_pause() { return max_gc_pause_; }
// Returns maximum size of objects alive after GC.
intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
// Returns minimal interval between two subsequent collections.
- int get_min_in_mutator() { return min_in_mutator_; }
+ double get_min_in_mutator() { return min_in_mutator_; }
+
+ // TODO(hpayer): remove, should be handled by GCTracer
+ void AddMarkingTime(double marking_time) {
+ marking_time_ += marking_time;
+ }
+
+ double marking_time() const {
+ return marking_time_;
+ }
+
+ // TODO(hpayer): remove, should be handled by GCTracer
+ void AddSweepingTime(double sweeping_time) {
+ sweeping_time_ += sweeping_time;
+ }
+
+ double sweeping_time() const {
+ return sweeping_time_;
+ }
MarkCompactCollector* mark_compact_collector() {
return &mark_compact_collector_;
@@ -1555,16 +1708,24 @@ class Heap {
}
bool IsSweepingComplete() {
- return old_data_space()->IsSweepingComplete() &&
- old_pointer_space()->IsSweepingComplete();
+ return !mark_compact_collector()->IsConcurrentSweepingInProgress() &&
+ old_data_space()->IsLazySweepingComplete() &&
+ old_pointer_space()->IsLazySweepingComplete();
}
bool AdvanceSweepers(int step_size) {
+ ASSERT(!FLAG_parallel_sweeping && !FLAG_concurrent_sweeping);
bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
return sweeping_complete;
}
+ bool EnsureSweepersProgressed(int step_size) {
+ bool sweeping_complete = old_data_space()->EnsureSweeperProgress(step_size);
+ sweeping_complete &= old_pointer_space()->EnsureSweeperProgress(step_size);
+ return sweeping_complete;
+ }
+
ExternalStringTable* external_string_table() {
return &external_string_table_;
}
@@ -1576,13 +1737,8 @@ class Heap {
inline Isolate* isolate();
- inline void CallGlobalGCPrologueCallback() {
- if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
- }
-
- inline void CallGlobalGCEpilogueCallback() {
- if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
- }
+ void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
+ void CallGCEpilogueCallbacks(GCType gc_type);
inline bool OldGenerationAllocationLimitReached();
@@ -1593,6 +1749,8 @@ class Heap {
void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FreeQueuedChunks();
+ int gc_count() const { return gc_count_; }
+
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
inline void CompletelyClearInstanceofCache();
@@ -1651,50 +1809,70 @@ class Heap {
enum {
FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
FIRST_FIXED_ARRAY_SUB_TYPE =
- FIRST_CODE_KIND_SUB_TYPE + Code::LAST_CODE_KIND + 1,
- OBJECT_STATS_COUNT =
- FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1
+ FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
+ FIRST_CODE_AGE_SUB_TYPE =
+ FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
+ OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kLastCodeAge + 1
};
- void RecordObjectStats(InstanceType type, int sub_type, size_t size) {
+ void RecordObjectStats(InstanceType type, size_t size) {
ASSERT(type <= LAST_TYPE);
- if (sub_type < 0) {
- object_counts_[type]++;
- object_sizes_[type] += size;
- } else {
- if (type == CODE_TYPE) {
- ASSERT(sub_type <= Code::LAST_CODE_KIND);
- object_counts_[FIRST_CODE_KIND_SUB_TYPE + sub_type]++;
- object_sizes_[FIRST_CODE_KIND_SUB_TYPE + sub_type] += size;
- } else if (type == FIXED_ARRAY_TYPE) {
- ASSERT(sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
- object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type]++;
- object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type] += size;
- }
- }
+ object_counts_[type]++;
+ object_sizes_[type] += size;
+ }
+
+ void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
+ ASSERT(code_sub_type < Code::NUMBER_OF_KINDS);
+ ASSERT(code_age < Code::kLastCodeAge);
+ object_counts_[FIRST_CODE_KIND_SUB_TYPE + code_sub_type]++;
+ object_sizes_[FIRST_CODE_KIND_SUB_TYPE + code_sub_type] += size;
+ object_counts_[FIRST_CODE_AGE_SUB_TYPE + code_age]++;
+ object_sizes_[FIRST_CODE_AGE_SUB_TYPE + code_age] += size;
+ }
+
+ void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
+ ASSERT(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
+ object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
+ object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
}
void CheckpointObjectStats();
- // We don't use a ScopedLock here since we want to lock the heap
- // only when FLAG_parallel_recompilation is true.
+ // We don't use a LockGuard here since we want to lock the heap
+ // only when FLAG_concurrent_recompilation is true.
class RelocationLock {
public:
- explicit RelocationLock(Heap* heap) : heap_(heap) {
- if (FLAG_parallel_recompilation) {
- heap_->relocation_mutex_->Lock();
- }
- }
+ explicit RelocationLock(Heap* heap);
+
~RelocationLock() {
- if (FLAG_parallel_recompilation) {
+ if (FLAG_concurrent_recompilation) {
+#ifdef DEBUG
+ heap_->relocation_mutex_locked_by_optimizer_thread_ = false;
+#endif // DEBUG
heap_->relocation_mutex_->Unlock();
}
}
+#ifdef DEBUG
+ static bool IsLockedByOptimizerThread(Heap* heap) {
+ return heap->relocation_mutex_locked_by_optimizer_thread_;
+ }
+#endif // DEBUG
+
private:
Heap* heap_;
};
+ MaybeObject* AddWeakObjectToCodeDependency(Object* obj, DependentCode* dep);
+
+ DependentCode* LookupWeakObjectToCodeDependency(Object* obj);
+
+ void InitializeWeakObjectToCodeTable() {
+ set_weak_object_to_code_table(undefined_value());
+ }
+
+ void EnsureWeakObjectToCodeTable();
+
private:
Heap();
@@ -1728,13 +1906,10 @@ class Heap {
bool flush_monomorphic_ics_;
- int scan_on_scavenge_pages_;
+ // AllocationMementos found in new space.
+ int allocation_mementos_found_;
-#if defined(V8_TARGET_ARCH_X64)
- static const int kMaxObjectSizeInNewSpace = 1024*KB;
-#else
- static const int kMaxObjectSizeInNewSpace = 512*KB;
-#endif
+ int scan_on_scavenge_pages_;
NewSpace new_space_;
OldSpace* old_pointer_space_;
@@ -1742,6 +1917,7 @@ class Heap {
OldSpace* code_space_;
MapSpace* map_space_;
CellSpace* cell_space_;
+ PropertyCellSpace* property_cell_space_;
LargeObjectSpace* lo_space_;
HeapState gc_state_;
int gc_post_processing_depth_;
@@ -1771,8 +1947,6 @@ class Heap {
#undef ROOT_ACCESSOR
#ifdef DEBUG
- bool allocation_allowed_;
-
// If the --gc-interval flag is set to a positive value, this
// variable holds the value indicating the number of allocations
// remain until the next failure and garbage collection.
@@ -1781,27 +1955,18 @@ class Heap {
// Do we expect to be able to handle allocation failure at this
// time?
bool disallow_allocation_failure_;
-
- HeapDebugUtils* debug_utils_;
#endif // DEBUG
// Indicates that the new space should be kept small due to high promotion
// rates caused by the mutator allocating a lot of long-lived objects.
- bool new_space_high_promotion_mode_active_;
+ // TODO(hpayer): change to bool if no longer accessed from generated code
+ intptr_t new_space_high_promotion_mode_active_;
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
- // which collector to invoke.
- intptr_t old_gen_promotion_limit_;
-
- // Limit that triggers a global GC as soon as is reasonable. This is
- // checked before expanding a paged space in the old generation and on
- // every allocation in large object space.
- intptr_t old_gen_allocation_limit_;
-
- // Sometimes the heuristics dictate that those limits are increased. This
- // variable records that fact.
- int old_gen_limit_factor_;
+ // which collector to invoke, before expanding a paged space in the old
+ // generation and on every allocation in large object space.
+ intptr_t old_generation_allocation_limit_;
// Used to adjust the limits that control the timing of the next GC.
intptr_t size_of_old_gen_at_last_old_space_gc_;
@@ -1819,9 +1984,18 @@ class Heap {
// Indicates that an allocation has failed in the old generation since the
// last GC.
- int old_gen_exhausted_;
+ bool old_gen_exhausted_;
+ // Weak list heads, threaded through the objects.
+ // List heads are initilized lazily and contain the undefined_value at start.
Object* native_contexts_list_;
+ Object* array_buffers_list_;
+ Object* allocation_sites_list_;
+
+ // WeakHashTable that maps objects embedded in optimized code to dependent
+ // code list. It is initilized lazily and contains the undefined_value at
+ // start.
+ Object* weak_object_to_code_table_;
StoreBufferRebuilder store_buffer_rebuilder_;
@@ -1831,7 +2005,7 @@ class Heap {
RootListIndex index;
};
- struct ConstantSymbolTable {
+ struct ConstantStringTable {
const char* contents;
RootListIndex index;
};
@@ -1843,42 +2017,47 @@ class Heap {
};
static const StringTypeTable string_type_table[];
- static const ConstantSymbolTable constant_symbol_table[];
+ static const ConstantStringTable constant_string_table[];
static const StructTable struct_table[];
- // The special hidden symbol which is an empty string, but does not match
+ // The special hidden string which is an empty string, but does not match
// any string when looked up in properties.
- String* hidden_symbol_;
+ String* hidden_string_;
// GC callback function, called before and after mark-compact GC.
// Allocations in the callback function are disallowed.
struct GCPrologueCallbackPair {
- GCPrologueCallbackPair(GCPrologueCallback callback, GCType gc_type)
- : callback(callback), gc_type(gc_type) {
+ GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type,
+ bool pass_isolate)
+ : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {
}
bool operator==(const GCPrologueCallbackPair& pair) const {
return pair.callback == callback;
}
- GCPrologueCallback callback;
+ v8::Isolate::GCPrologueCallback callback;
GCType gc_type;
+ // TODO(dcarney): remove variable
+ bool pass_isolate_;
};
List<GCPrologueCallbackPair> gc_prologue_callbacks_;
struct GCEpilogueCallbackPair {
- GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type)
- : callback(callback), gc_type(gc_type) {
+ GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type,
+ bool pass_isolate)
+ : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {
}
bool operator==(const GCEpilogueCallbackPair& pair) const {
return pair.callback == callback;
}
- GCEpilogueCallback callback;
+ v8::Isolate::GCPrologueCallback callback;
GCType gc_type;
+ // TODO(dcarney): remove variable
+ bool pass_isolate_;
};
List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
- GCCallback global_gc_prologue_callback_;
- GCCallback global_gc_epilogue_callback_;
-
// Support for computing object sizes during GC.
HeapObjectCallback gc_safe_size_of_old_object_;
static int GcSafeSizeOfOldObject(HeapObject* object);
@@ -1899,17 +2078,30 @@ class Heap {
bool PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer);
-
inline void UpdateOldSpaceLimits();
- // Allocate an uninitialized object in map space. The behavior is identical
- // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
- // have to test the allocation space argument and (b) can reduce code size
- // (since both AllocateRaw and AllocateRawMap are inlined).
- MUST_USE_RESULT inline MaybeObject* AllocateRawMap();
+ // Selects the proper allocation space depending on the given object
+ // size, pretenuring decision, and preferred old-space.
+ static AllocationSpace SelectSpace(int object_size,
+ AllocationSpace preferred_old_space,
+ PretenureFlag pretenure) {
+ ASSERT(preferred_old_space == OLD_POINTER_SPACE ||
+ preferred_old_space == OLD_DATA_SPACE);
+ if (object_size > Page::kMaxNonCodeHeapObjectSize) return LO_SPACE;
+ return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
+ }
- // Allocate an uninitialized object in the global property cell space.
- MUST_USE_RESULT inline MaybeObject* AllocateRawCell();
+ // Allocate an uninitialized fixed array.
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(
+ int length, PretenureFlag pretenure);
+
+ // Allocate an uninitialized fixed double array.
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray(
+ int length, PretenureFlag pretenure);
+
+ // Allocate an initialized fixed array with the given filler value.
+ MUST_USE_RESULT MaybeObject* AllocateFixedArrayWithFiller(
+ int length, PretenureFlag pretenure, Object* filler);
// Initializes a JSObject based on its map.
void InitializeJSObjectFromMap(JSObject* obj,
@@ -1926,9 +2118,9 @@ class Heap {
void CreateFixedStubs();
- MaybeObject* CreateOddball(const char* to_string,
- Object* to_number,
- byte kind);
+ MUST_USE_RESULT MaybeObject* CreateOddball(const char* to_string,
+ Object* to_number,
+ byte kind);
// Allocate a JSArray with no elements
MUST_USE_RESULT MaybeObject* AllocateJSArray(
@@ -1938,9 +2130,23 @@ class Heap {
// Allocate empty fixed array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
+ // Allocate empty external array of given type.
+ MUST_USE_RESULT MaybeObject* AllocateEmptyExternalArray(
+ ExternalArrayType array_type);
+
// Allocate empty fixed double array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
+ // Allocate a tenured simple cell.
+ MUST_USE_RESULT MaybeObject* AllocateCell(Object* value);
+
+ // Allocate a tenured JS global property cell initialized with the hole.
+ MUST_USE_RESULT MaybeObject* AllocatePropertyCell();
+
+ // Allocate Box.
+ MUST_USE_RESULT MaybeObject* AllocateBox(Object* value,
+ PretenureFlag pretenure);
+
// Performs a minor collection in new generation.
void Scavenge();
@@ -1959,6 +2165,13 @@ class Heap {
// Code to be run before and after mark-compact.
void MarkCompactPrologue();
+ void ProcessNativeContexts(WeakObjectRetainer* retainer, bool record_slots);
+ void ProcessArrayBuffers(WeakObjectRetainer* retainer, bool record_slots);
+ void ProcessAllocationSites(WeakObjectRetainer* retainer, bool record_slots);
+
+ // Called on heap tear-down.
+ void TearDownArrayBuffers();
+
// Record statistics before and after garbage collection.
void ReportStatisticsBeforeGC();
void ReportStatisticsAfterGC();
@@ -1981,7 +2194,6 @@ class Heap {
GCTracer* tracer_;
-
// Allocates a small number to string cache.
MUST_USE_RESULT MaybeObject* AllocateInitialNumberStringCache();
// Creates and installs the full-sized number string cache.
@@ -2061,7 +2273,6 @@ class Heap {
void StartIdleRound() {
mark_sweeps_since_idle_round_started_ = 0;
- ms_count_at_last_idle_notification_ = ms_count_;
}
void FinishIdleRound() {
@@ -2092,7 +2303,16 @@ class Heap {
void ClearObjectStats(bool clear_last_time_stats = false);
- static const int kInitialSymbolTableSize = 2048;
+ void set_weak_object_to_code_table(Object* value) {
+ ASSERT(!InNewSpace(value));
+ weak_object_to_code_table_ = value;
+ }
+
+ Object** weak_object_to_code_table_address() {
+ return &weak_object_to_code_table_;
+ }
+
+ static const int kInitialStringTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;
@@ -2103,22 +2323,28 @@ class Heap {
size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
// Maximum GC pause.
- int max_gc_pause_;
+ double max_gc_pause_;
// Total time spent in GC.
- int total_gc_time_ms_;
+ double total_gc_time_ms_;
// Maximum size of objects alive after GC.
intptr_t max_alive_after_gc_;
// Minimal interval between two subsequent collections.
- int min_in_mutator_;
+ double min_in_mutator_;
// Size of objects alive after last GC.
intptr_t alive_after_last_gc_;
double last_gc_end_timestamp_;
+ // Cumulative GC time spent in marking
+ double marking_time_;
+
+ // Cumulative GC time spent in sweeping
+ double sweeping_time_;
+
MarkCompactCollector mark_compact_collector_;
StoreBuffer store_buffer_;
@@ -2132,10 +2358,22 @@ class Heap {
bool last_idle_notification_gc_count_init_;
int mark_sweeps_since_idle_round_started_;
- int ms_count_at_last_idle_notification_;
unsigned int gc_count_at_last_idle_gc_;
int scavenges_since_last_idle_round_;
+ // These two counters are monotomically increasing and never reset.
+ size_t full_codegen_bytes_generated_;
+ size_t crankshaft_codegen_bytes_generated_;
+
+ // If the --deopt_every_n_garbage_collections flag is set to a positive value,
+ // this variable holds the number of garbage collections since the last
+ // deoptimization triggered by garbage collection.
+ int gcs_since_last_deopt_;
+
+#ifdef VERIFY_HEAP
+ int no_weak_object_verification_scope_depth_;
+#endif
+
static const int kMaxMarkSweepsInIdleRound = 7;
static const int kIdleScavengeThreshold = 5;
@@ -2153,6 +2391,9 @@ class Heap {
MemoryChunk* chunks_queued_for_free_;
Mutex* relocation_mutex_;
+#ifdef DEBUG
+ bool relocation_mutex_locked_by_optimizer_thread_;
+#endif // DEBUG;
friend class Factory;
friend class GCTracer;
@@ -2163,6 +2404,9 @@ class Heap {
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
friend class MapCompact;
+#ifdef VERIFY_HEAP
+ friend class NoWeakObjectVerificationScope;
+#endif
DISALLOW_COPY_AND_ASSIGN(Heap);
};
@@ -2198,6 +2442,8 @@ class HeapStats {
int* size_per_type; // 22
int* os_error; // 23
int* end_marker; // 24
+ intptr_t* property_cell_space_size; // 25
+ intptr_t* property_cell_space_capacity; // 26
};
@@ -2223,6 +2469,14 @@ class AlwaysAllocateScope {
DisallowAllocationFailure disallow_allocation_failure_;
};
+#ifdef VERIFY_HEAP
+class NoWeakObjectVerificationScope {
+ public:
+ inline NoWeakObjectVerificationScope();
+ inline ~NoWeakObjectVerificationScope();
+};
+#endif
+
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
@@ -2235,37 +2489,40 @@ class VerifyPointersVisitor: public ObjectVisitor {
};
-// Space iterator for iterating over all spaces of the heap.
-// Returns each space in turn, and null when it is done.
+// Space iterator for iterating over all spaces of the heap. Returns each space
+// in turn, and null when it is done.
class AllSpaces BASE_EMBEDDED {
public:
+ explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
Space* next();
- AllSpaces() { counter_ = FIRST_SPACE; }
private:
+ Heap* heap_;
int counter_;
};
// Space iterator for iterating over all old spaces of the heap: Old pointer
-// space, old data space and code space.
-// Returns each space in turn, and null when it is done.
+// space, old data space and code space. Returns each space in turn, and null
+// when it is done.
class OldSpaces BASE_EMBEDDED {
public:
+ explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
OldSpace* next();
- OldSpaces() { counter_ = OLD_POINTER_SPACE; }
private:
+ Heap* heap_;
int counter_;
};
-// Space iterator for iterating over all the paged spaces of the heap:
-// Map space, old pointer space, old data space, code space and cell space.
-// Returns each space in turn, and null when it is done.
+// Space iterator for iterating over all the paged spaces of the heap: Map
+// space, old pointer space, old data space, code space and cell space. Returns
+// each space in turn, and null when it is done.
class PagedSpaces BASE_EMBEDDED {
public:
+ explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
PagedSpace* next();
- PagedSpaces() { counter_ = OLD_POINTER_SPACE; }
private:
+ Heap* heap_;
int counter_;
};
@@ -2275,8 +2532,8 @@ class PagedSpaces BASE_EMBEDDED {
// returned object iterators is handled by the space iterator.
class SpaceIterator : public Malloced {
public:
- SpaceIterator();
- explicit SpaceIterator(HeapObjectCallback size_func);
+ explicit SpaceIterator(Heap* heap);
+ SpaceIterator(Heap* heap, HeapObjectCallback size_func);
virtual ~SpaceIterator();
bool has_next();
@@ -2285,6 +2542,7 @@ class SpaceIterator : public Malloced {
private:
ObjectIterator* CreateIterator();
+ Heap* heap_;
int current_space_; // from enum AllocationSpace.
ObjectIterator* iterator_; // object iterator for the current space.
HeapObjectCallback size_func_;
@@ -2309,8 +2567,8 @@ class HeapIterator BASE_EMBEDDED {
kFilterUnreachable
};
- HeapIterator();
- explicit HeapIterator(HeapObjectsFiltering filtering);
+ explicit HeapIterator(Heap* heap);
+ HeapIterator(Heap* heap, HeapObjectsFiltering filtering);
~HeapIterator();
HeapObject* next();
@@ -2323,6 +2581,7 @@ class HeapIterator BASE_EMBEDDED {
void Shutdown();
HeapObject* NextObject();
+ Heap* heap_;
HeapObjectsFiltering filtering_;
HeapObjectsFilter* filter_;
// Space iterator for iterating all the spaces.
@@ -2337,10 +2596,10 @@ class HeapIterator BASE_EMBEDDED {
class KeyedLookupCache {
public:
// Lookup field offset for (map, name). If absent, -1 is returned.
- int Lookup(Map* map, String* name);
+ int Lookup(Map* map, Name* name);
// Update an element in the cache.
- void Update(Map* map, String* name, int field_offset);
+ void Update(Map* map, Name* name, int field_offset);
// Clear the cache.
void Clear();
@@ -2365,7 +2624,7 @@ class KeyedLookupCache {
}
}
- static inline int Hash(Map* map, String* name);
+ static inline int Hash(Map* map, Name* name);
// Get the address of the keys and field_offsets arrays. Used in
// generated code to perform cache lookups.
@@ -2379,7 +2638,7 @@ class KeyedLookupCache {
struct Key {
Map* map;
- String* name;
+ Name* name;
};
Key keys_[kLength];
@@ -2399,8 +2658,8 @@ class DescriptorLookupCache {
public:
// Lookup descriptor index for (map, name).
// If absent, kAbsent is returned.
- int Lookup(Map* source, String* name) {
- if (!StringShape(name).IsSymbol()) return kAbsent;
+ int Lookup(Map* source, Name* name) {
+ if (!name->IsUniqueName()) return kAbsent;
int index = Hash(source, name);
Key& key = keys_[index];
if ((key.source == source) && (key.name == name)) return results_[index];
@@ -2408,9 +2667,9 @@ class DescriptorLookupCache {
}
// Update an element in the cache.
- void Update(Map* source, String* name, int result) {
+ void Update(Map* source, Name* name, int result) {
ASSERT(result != kAbsent);
- if (StringShape(name).IsSymbol()) {
+ if (name->IsUniqueName()) {
int index = Hash(source, name);
Key& key = keys_[index];
key.source = source;
@@ -2433,7 +2692,7 @@ class DescriptorLookupCache {
}
}
- static int Hash(Object* source, String* name) {
+ static int Hash(Object* source, Name* name) {
// Uses only lower 32 bits if pointers are larger.
uint32_t source_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source))
@@ -2447,7 +2706,7 @@ class DescriptorLookupCache {
static const int kLength = 64;
struct Key {
Map* source;
- String* name;
+ Name* name;
};
Key keys_[kLength];
@@ -2458,38 +2717,6 @@ class DescriptorLookupCache {
};
-// A helper class to document/test C++ scopes where we do not
-// expect a GC. Usage:
-//
-// /* Allocation not allowed: we cannot handle a GC in this scope. */
-// { AssertNoAllocation nogc;
-// ...
-// }
-class AssertNoAllocation {
- public:
- inline AssertNoAllocation();
- inline ~AssertNoAllocation();
-
-#ifdef DEBUG
- private:
- bool old_state_;
- bool active_;
-#endif
-};
-
-
-class DisableAssertNoAllocation {
- public:
- inline DisableAssertNoAllocation();
- inline ~DisableAssertNoAllocation();
-
-#ifdef DEBUG
- private:
- bool old_state_;
- bool active_;
-#endif
-};
-
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
@@ -2509,6 +2736,8 @@ class GCTracer BASE_EMBEDDED {
MC_UPDATE_POINTERS_TO_EVACUATED,
MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
MC_UPDATE_MISC_POINTERS,
+ MC_WEAKCOLLECTION_PROCESS,
+ MC_WEAKCOLLECTION_CLEAR,
MC_FLUSH_CODE,
kNumberOfScopes
};
@@ -2548,6 +2777,18 @@ class GCTracer BASE_EMBEDDED {
promoted_objects_size_ += object_size;
}
+ void increment_nodes_died_in_new_space() {
+ nodes_died_in_new_space_++;
+ }
+
+ void increment_nodes_copied_in_new_space() {
+ nodes_copied_in_new_space_++;
+ }
+
+ void increment_nodes_promoted() {
+ nodes_promoted_++;
+ }
+
private:
// Returns a string matching the collector.
const char* CollectorString();
@@ -2592,6 +2833,15 @@ class GCTracer BASE_EMBEDDED {
// Size of objects promoted during the current collection.
intptr_t promoted_objects_size_;
+ // Number of died nodes in the new space.
+ int nodes_died_in_new_space_;
+
+ // Number of copied nodes to the new space.
+ int nodes_copied_in_new_space_;
+
+ // Number of promoted nodes to the old space.
+ int nodes_promoted_;
+
// Incremental marking steps counters.
int steps_count_;
double steps_took_;
@@ -2652,7 +2902,7 @@ class TranscendentalCache {
class SubCache {
static const int kCacheSize = 512;
- explicit SubCache(Type t);
+ explicit SubCache(Isolate* isolate, Type t);
MUST_USE_RESULT inline MaybeObject* Get(double input);
@@ -2689,10 +2939,14 @@ class TranscendentalCache {
DISALLOW_COPY_AND_ASSIGN(SubCache);
};
- TranscendentalCache() {
+ explicit TranscendentalCache(Isolate* isolate) : isolate_(isolate) {
for (int i = 0; i < kNumberOfCaches; ++i) caches_[i] = NULL;
}
+ ~TranscendentalCache() {
+ for (int i = 0; i < kNumberOfCaches; ++i) delete caches_[i];
+ }
+
// Used to create an external reference.
inline Address cache_array_address();
@@ -2703,6 +2957,7 @@ class TranscendentalCache {
// Allow access to the caches_ array as an ExternalReference.
friend class ExternalReference;
+ Isolate* isolate_;
SubCache* caches_[kNumberOfCaches];
DISALLOW_COPY_AND_ASSIGN(TranscendentalCache);
};
@@ -2760,7 +3015,7 @@ class IntrusiveMarking {
};
-#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
+#ifdef DEBUG
// Helper class for tracing paths to a search target Object from all roots.
// The TracePathFrom() method can be used to trace paths from a specific
// object to the search target object.
@@ -2783,7 +3038,7 @@ class PathTracer : public ObjectVisitor {
what_to_find_(what_to_find),
visit_mode_(visit_mode),
object_stack_(20),
- no_alloc() {}
+ no_allocation() {}
virtual void VisitPointers(Object** start, Object** end);
@@ -2812,12 +3067,12 @@ class PathTracer : public ObjectVisitor {
VisitMode visit_mode_;
List<Object*> object_stack_;
- AssertNoAllocation no_alloc; // i.e. no gc allowed.
+ DisallowHeapAllocation no_allocation; // i.e. no gc allowed.
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
};
-#endif // DEBUG || LIVE_OBJECT_LIST
+#endif // DEBUG
} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-alias-analysis.h b/deps/v8/src/hydrogen-alias-analysis.h
new file mode 100644
index 000000000..21a54625f
--- /dev/null
+++ b/deps/v8/src/hydrogen-alias-analysis.h
@@ -0,0 +1,96 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_ALIAS_ANALYSIS_H_
+#define V8_HYDROGEN_ALIAS_ANALYSIS_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+enum HAliasing {
+ kMustAlias,
+ kMayAlias,
+ kNoAlias
+};
+
+
+// Defines the interface to alias analysis for the rest of the compiler.
+// A simple implementation can use only local reasoning, but a more powerful
+// analysis might employ points-to analysis.
+class HAliasAnalyzer : public ZoneObject {
+ public:
+ // Simple alias analysis distinguishes allocations, parameters,
+ // and constants using only local reasoning.
+ HAliasing Query(HValue* a, HValue* b) {
+ // The same SSA value always references the same object.
+ if (a == b) return kMustAlias;
+
+ if (a->IsAllocate() || a->IsInnerAllocatedObject()) {
+ // Two non-identical allocations can never be aliases.
+ if (b->IsAllocate()) return kNoAlias;
+ if (b->IsInnerAllocatedObject()) return kNoAlias;
+ // An allocation can never alias a parameter or a constant.
+ if (b->IsParameter()) return kNoAlias;
+ if (b->IsConstant()) return kNoAlias;
+ }
+ if (b->IsAllocate() || b->IsInnerAllocatedObject()) {
+ // An allocation can never alias a parameter or a constant.
+ if (a->IsParameter()) return kNoAlias;
+ if (a->IsConstant()) return kNoAlias;
+ }
+
+ // Constant objects can be distinguished statically.
+ if (a->IsConstant()) {
+ // TODO(titzer): DataEquals() is more efficient, but that's protected.
+ return a->Equals(b) ? kMustAlias : kNoAlias;
+ }
+ return kMayAlias;
+ }
+
+ // Checks whether the objects referred to by the given instructions may
+ // ever be aliases. Note that this is more conservative than checking
+ // {Query(a, b) == kMayAlias}, since this method considers kMustAlias
+ // objects to also be may-aliasing.
+ inline bool MayAlias(HValue* a, HValue* b) {
+ return Query(a, b) != kNoAlias;
+ }
+
+ inline bool MustAlias(HValue* a, HValue* b) {
+ return Query(a, b) == kMustAlias;
+ }
+
+ inline bool NoAlias(HValue* a, HValue* b) {
+ return Query(a, b) == kNoAlias;
+ }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_ALIAS_ANALYSIS_H_
diff --git a/deps/v8/src/hydrogen-bce.cc b/deps/v8/src/hydrogen-bce.cc
new file mode 100644
index 000000000..869db54a2
--- /dev/null
+++ b/deps/v8/src/hydrogen-bce.cc
@@ -0,0 +1,437 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-bce.h"
+
+namespace v8 {
+namespace internal {
+
+// We try to "factor up" HBoundsCheck instructions towards the root of the
+// dominator tree.
+// For now we handle checks where the index is like "exp + int32value".
+// If in the dominator tree we check "exp + v1" and later (dominated)
+// "exp + v2", if v2 <= v1 we can safely remove the second check, and if
+// v2 > v1 we can use v2 in the 1st check and again remove the second.
+// To do so we keep a dictionary of all checks where the key if the pair
+// "exp, length".
+// The class BoundsCheckKey represents this key.
+class BoundsCheckKey : public ZoneObject {
+ public:
+ HValue* IndexBase() const { return index_base_; }
+ HValue* Length() const { return length_; }
+
+ uint32_t Hash() {
+ return static_cast<uint32_t>(index_base_->Hashcode() ^ length_->Hashcode());
+ }
+
+ static BoundsCheckKey* Create(Zone* zone,
+ HBoundsCheck* check,
+ int32_t* offset) {
+ if (!check->index()->representation().IsSmiOrInteger32()) return NULL;
+
+ HValue* index_base = NULL;
+ HConstant* constant = NULL;
+ bool is_sub = false;
+
+ if (check->index()->IsAdd()) {
+ HAdd* index = HAdd::cast(check->index());
+ if (index->left()->IsConstant()) {
+ constant = HConstant::cast(index->left());
+ index_base = index->right();
+ } else if (index->right()->IsConstant()) {
+ constant = HConstant::cast(index->right());
+ index_base = index->left();
+ }
+ } else if (check->index()->IsSub()) {
+ HSub* index = HSub::cast(check->index());
+ is_sub = true;
+ if (index->left()->IsConstant()) {
+ constant = HConstant::cast(index->left());
+ index_base = index->right();
+ } else if (index->right()->IsConstant()) {
+ constant = HConstant::cast(index->right());
+ index_base = index->left();
+ }
+ }
+
+ if (constant != NULL && constant->HasInteger32Value()) {
+ *offset = is_sub ? - constant->Integer32Value()
+ : constant->Integer32Value();
+ } else {
+ *offset = 0;
+ index_base = check->index();
+ }
+
+ return new(zone) BoundsCheckKey(index_base, check->length());
+ }
+
+ private:
+ BoundsCheckKey(HValue* index_base, HValue* length)
+ : index_base_(index_base),
+ length_(length) { }
+
+ HValue* index_base_;
+ HValue* length_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckKey);
+};
+
+
+// Data about each HBoundsCheck that can be eliminated or moved.
+// It is the "value" in the dictionary indexed by "base-index, length"
+// (the key is BoundsCheckKey).
+// We scan the code with a dominator tree traversal.
+// Traversing the dominator tree we keep a stack (implemented as a singly
+// linked list) of "data" for each basic block that contains a relevant check
+// with the same key (the dictionary holds the head of the list).
+// We also keep all the "data" created for a given basic block in a list, and
+// use it to "clean up" the dictionary when backtracking in the dominator tree
+// traversal.
+// Doing this each dictionary entry always directly points to the check that
+// is dominating the code being examined now.
+// We also track the current "offset" of the index expression and use it to
+// decide if any check is already "covered" (so it can be removed) or not.
+class BoundsCheckBbData: public ZoneObject {
+ public:
+ BoundsCheckKey* Key() const { return key_; }
+ int32_t LowerOffset() const { return lower_offset_; }
+ int32_t UpperOffset() const { return upper_offset_; }
+ HBasicBlock* BasicBlock() const { return basic_block_; }
+ HBoundsCheck* LowerCheck() const { return lower_check_; }
+ HBoundsCheck* UpperCheck() const { return upper_check_; }
+ BoundsCheckBbData* NextInBasicBlock() const { return next_in_bb_; }
+ BoundsCheckBbData* FatherInDominatorTree() const { return father_in_dt_; }
+
+ bool OffsetIsCovered(int32_t offset) const {
+ return offset >= LowerOffset() && offset <= UpperOffset();
+ }
+
+ bool HasSingleCheck() { return lower_check_ == upper_check_; }
+
+ // The goal of this method is to modify either upper_offset_ or
+ // lower_offset_ so that also new_offset is covered (the covered
+ // range grows).
+ //
+ // The precondition is that new_check follows UpperCheck() and
+ // LowerCheck() in the same basic block, and that new_offset is not
+ // covered (otherwise we could simply remove new_check).
+ //
+ // If HasSingleCheck() is true then new_check is added as "second check"
+ // (either upper or lower; note that HasSingleCheck() becomes false).
+ // Otherwise one of the current checks is modified so that it also covers
+ // new_offset, and new_check is removed.
+ //
+ // If the check cannot be modified because the context is unknown it
+ // returns false, otherwise it returns true.
+ bool CoverCheck(HBoundsCheck* new_check,
+ int32_t new_offset) {
+ ASSERT(new_check->index()->representation().IsSmiOrInteger32());
+ bool keep_new_check = false;
+
+ if (new_offset > upper_offset_) {
+ upper_offset_ = new_offset;
+ if (HasSingleCheck()) {
+ keep_new_check = true;
+ upper_check_ = new_check;
+ } else {
+ bool result = BuildOffsetAdd(upper_check_,
+ &added_upper_index_,
+ &added_upper_offset_,
+ Key()->IndexBase(),
+ new_check->index()->representation(),
+ new_offset);
+ if (!result) return false;
+ upper_check_->ReplaceAllUsesWith(upper_check_->index());
+ upper_check_->SetOperandAt(0, added_upper_index_);
+ }
+ } else if (new_offset < lower_offset_) {
+ lower_offset_ = new_offset;
+ if (HasSingleCheck()) {
+ keep_new_check = true;
+ lower_check_ = new_check;
+ } else {
+ bool result = BuildOffsetAdd(lower_check_,
+ &added_lower_index_,
+ &added_lower_offset_,
+ Key()->IndexBase(),
+ new_check->index()->representation(),
+ new_offset);
+ if (!result) return false;
+ lower_check_->ReplaceAllUsesWith(lower_check_->index());
+ lower_check_->SetOperandAt(0, added_lower_index_);
+ }
+ } else {
+ ASSERT(false);
+ }
+
+ if (!keep_new_check) {
+ new_check->block()->graph()->isolate()->counters()->
+ bounds_checks_eliminated()->Increment();
+ new_check->DeleteAndReplaceWith(new_check->ActualValue());
+ }
+
+ return true;
+ }
+
+ void RemoveZeroOperations() {
+ RemoveZeroAdd(&added_lower_index_, &added_lower_offset_);
+ RemoveZeroAdd(&added_upper_index_, &added_upper_offset_);
+ }
+
+ BoundsCheckBbData(BoundsCheckKey* key,
+ int32_t lower_offset,
+ int32_t upper_offset,
+ HBasicBlock* bb,
+ HBoundsCheck* lower_check,
+ HBoundsCheck* upper_check,
+ BoundsCheckBbData* next_in_bb,
+ BoundsCheckBbData* father_in_dt)
+ : key_(key),
+ lower_offset_(lower_offset),
+ upper_offset_(upper_offset),
+ basic_block_(bb),
+ lower_check_(lower_check),
+ upper_check_(upper_check),
+ added_lower_index_(NULL),
+ added_lower_offset_(NULL),
+ added_upper_index_(NULL),
+ added_upper_offset_(NULL),
+ next_in_bb_(next_in_bb),
+ father_in_dt_(father_in_dt) { }
+
+ private:
+ BoundsCheckKey* key_;
+ int32_t lower_offset_;
+ int32_t upper_offset_;
+ HBasicBlock* basic_block_;
+ HBoundsCheck* lower_check_;
+ HBoundsCheck* upper_check_;
+ HInstruction* added_lower_index_;
+ HConstant* added_lower_offset_;
+ HInstruction* added_upper_index_;
+ HConstant* added_upper_offset_;
+ BoundsCheckBbData* next_in_bb_;
+ BoundsCheckBbData* father_in_dt_;
+
+ // Given an existing add instruction and a bounds check it tries to
+ // find the current context (either of the add or of the check index).
+ HValue* IndexContext(HInstruction* add, HBoundsCheck* check) {
+ if (add != NULL && add->IsAdd()) {
+ return HAdd::cast(add)->context();
+ }
+ if (check->index()->IsBinaryOperation()) {
+ return HBinaryOperation::cast(check->index())->context();
+ }
+ return NULL;
+ }
+
+ // This function returns false if it cannot build the add because the
+ // current context cannot be determined.
+ bool BuildOffsetAdd(HBoundsCheck* check,
+ HInstruction** add,
+ HConstant** constant,
+ HValue* original_value,
+ Representation representation,
+ int32_t new_offset) {
+ HValue* index_context = IndexContext(*add, check);
+ if (index_context == NULL) return false;
+
+ Zone* zone = BasicBlock()->zone();
+ HConstant* new_constant = HConstant::New(zone, index_context,
+ new_offset, representation);
+ if (*add == NULL) {
+ new_constant->InsertBefore(check);
+ (*add) = HAdd::New(zone, index_context, original_value, new_constant);
+ (*add)->AssumeRepresentation(representation);
+ (*add)->InsertBefore(check);
+ } else {
+ new_constant->InsertBefore(*add);
+ (*constant)->DeleteAndReplaceWith(new_constant);
+ }
+ *constant = new_constant;
+ return true;
+ }
+
+ void RemoveZeroAdd(HInstruction** add, HConstant** constant) {
+ if (*add != NULL && (*add)->IsAdd() && (*constant)->Integer32Value() == 0) {
+ (*add)->DeleteAndReplaceWith(HAdd::cast(*add)->left());
+ (*constant)->DeleteAndReplaceWith(NULL);
+ }
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckBbData);
+};
+
+
+static bool BoundsCheckKeyMatch(void* key1, void* key2) {
+ BoundsCheckKey* k1 = static_cast<BoundsCheckKey*>(key1);
+ BoundsCheckKey* k2 = static_cast<BoundsCheckKey*>(key2);
+ return k1->IndexBase() == k2->IndexBase() && k1->Length() == k2->Length();
+}
+
+
+BoundsCheckTable::BoundsCheckTable(Zone* zone)
+ : ZoneHashMap(BoundsCheckKeyMatch, ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)) { }
+
+
+BoundsCheckBbData** BoundsCheckTable::LookupOrInsert(BoundsCheckKey* key,
+ Zone* zone) {
+ return reinterpret_cast<BoundsCheckBbData**>(
+ &(Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value));
+}
+
+
+void BoundsCheckTable::Insert(BoundsCheckKey* key,
+ BoundsCheckBbData* data,
+ Zone* zone) {
+ Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value = data;
+}
+
+
+void BoundsCheckTable::Delete(BoundsCheckKey* key) {
+ Remove(key, key->Hash());
+}
+
+
+class HBoundsCheckEliminationState {
+ public:
+ HBasicBlock* block_;
+ BoundsCheckBbData* bb_data_list_;
+ int index_;
+};
+
+
+// Eliminates checks in bb and recursively in the dominated blocks.
+// Also replace the results of check instructions with the original value, if
+// the result is used. This is safe now, since we don't do code motion after
+// this point. It enables better register allocation since the value produced
+// by check instructions is really a copy of the original value.
+void HBoundsCheckEliminationPhase::EliminateRedundantBoundsChecks(
+ HBasicBlock* entry) {
+ // Allocate the stack.
+ HBoundsCheckEliminationState* stack =
+ zone()->NewArray<HBoundsCheckEliminationState>(graph()->blocks()->length());
+
+ // Explicitly push the entry block.
+ stack[0].block_ = entry;
+ stack[0].bb_data_list_ = PreProcessBlock(entry);
+ stack[0].index_ = 0;
+ int stack_depth = 1;
+
+ // Implement depth-first traversal with a stack.
+ while (stack_depth > 0) {
+ int current = stack_depth - 1;
+ HBoundsCheckEliminationState* state = &stack[current];
+ const ZoneList<HBasicBlock*>* children = state->block_->dominated_blocks();
+
+ if (state->index_ < children->length()) {
+ // Recursively visit children blocks.
+ HBasicBlock* child = children->at(state->index_++);
+ int next = stack_depth++;
+ stack[next].block_ = child;
+ stack[next].bb_data_list_ = PreProcessBlock(child);
+ stack[next].index_ = 0;
+ } else {
+ // Finished with all children; post process the block.
+ PostProcessBlock(state->block_, state->bb_data_list_);
+ stack_depth--;
+ }
+ }
+}
+
+
+BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
+ HBasicBlock* bb) {
+ BoundsCheckBbData* bb_data_list = NULL;
+
+ for (HInstructionIterator it(bb); !it.Done(); it.Advance()) {
+ HInstruction* i = it.Current();
+ if (!i->IsBoundsCheck()) continue;
+
+ HBoundsCheck* check = HBoundsCheck::cast(i);
+ int32_t offset;
+ BoundsCheckKey* key =
+ BoundsCheckKey::Create(zone(), check, &offset);
+ if (key == NULL) continue;
+ BoundsCheckBbData** data_p = table_.LookupOrInsert(key, zone());
+ BoundsCheckBbData* data = *data_p;
+ if (data == NULL) {
+ bb_data_list = new(zone()) BoundsCheckBbData(key,
+ offset,
+ offset,
+ bb,
+ check,
+ check,
+ bb_data_list,
+ NULL);
+ *data_p = bb_data_list;
+ } else if (data->OffsetIsCovered(offset)) {
+ bb->graph()->isolate()->counters()->
+ bounds_checks_eliminated()->Increment();
+ check->DeleteAndReplaceWith(check->ActualValue());
+ } else if (data->BasicBlock() != bb ||
+ !data->CoverCheck(check, offset)) {
+ // If the check is in the current BB we try to modify it by calling
+ // "CoverCheck", but if also that fails we record the current offsets
+ // in a new data instance because from now on they are covered.
+ int32_t new_lower_offset = offset < data->LowerOffset()
+ ? offset
+ : data->LowerOffset();
+ int32_t new_upper_offset = offset > data->UpperOffset()
+ ? offset
+ : data->UpperOffset();
+ bb_data_list = new(zone()) BoundsCheckBbData(key,
+ new_lower_offset,
+ new_upper_offset,
+ bb,
+ data->LowerCheck(),
+ data->UpperCheck(),
+ bb_data_list,
+ data);
+ table_.Insert(key, bb_data_list, zone());
+ }
+ }
+
+ return bb_data_list;
+}
+
+
+void HBoundsCheckEliminationPhase::PostProcessBlock(
+ HBasicBlock* block, BoundsCheckBbData* data) {
+ while (data != NULL) {
+ data->RemoveZeroOperations();
+ if (data->FatherInDominatorTree()) {
+ table_.Insert(data->Key(), data->FatherInDominatorTree(), zone());
+ } else {
+ table_.Delete(data->Key());
+ }
+ data = data->NextInBasicBlock();
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-bce.h b/deps/v8/src/hydrogen-bce.h
new file mode 100644
index 000000000..c55dea7b7
--- /dev/null
+++ b/deps/v8/src/hydrogen-bce.h
@@ -0,0 +1,74 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_BCE_H_
+#define V8_HYDROGEN_BCE_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class BoundsCheckBbData;
+class BoundsCheckKey;
+class BoundsCheckTable : private ZoneHashMap {
+ public:
+ explicit BoundsCheckTable(Zone* zone);
+
+ INLINE(BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key, Zone* zone));
+ INLINE(void Insert(BoundsCheckKey* key, BoundsCheckBbData* data, Zone* zone));
+ INLINE(void Delete(BoundsCheckKey* key));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckTable);
+};
+
+
+class HBoundsCheckEliminationPhase : public HPhase {
+ public:
+ explicit HBoundsCheckEliminationPhase(HGraph* graph)
+ : HPhase("H_Bounds checks elimination", graph), table_(zone()) { }
+
+ void Run() {
+ EliminateRedundantBoundsChecks(graph()->entry_block());
+ }
+
+ private:
+ void EliminateRedundantBoundsChecks(HBasicBlock* bb);
+ BoundsCheckBbData* PreProcessBlock(HBasicBlock* bb);
+ void PostProcessBlock(HBasicBlock* bb, BoundsCheckBbData* data);
+
+ BoundsCheckTable table_;
+
+ DISALLOW_COPY_AND_ASSIGN(HBoundsCheckEliminationPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_BCE_H_
diff --git a/deps/v8/src/hydrogen-bch.cc b/deps/v8/src/hydrogen-bch.cc
new file mode 100644
index 000000000..a0a0fee71
--- /dev/null
+++ b/deps/v8/src/hydrogen-bch.cc
@@ -0,0 +1,402 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-bch.h"
+
+namespace v8 {
+namespace internal {
+
+/*
+ * This class is a table with one element for eack basic block.
+ *
+ * It is used to check if, inside one loop, all execution paths contain
+ * a bounds check for a particular [index, length] combination.
+ * The reason is that if there is a path that stays in the loop without
+ * executing a check then the check cannot be hoisted out of the loop (it
+ * would likely fail and cause a deopt for no good reason).
+ * We also check is there are paths that exit the loop early, and if yes we
+ * perform the hoisting only if graph()->use_optimistic_licm() is true.
+ * The reason is that such paths are realtively common and harmless (like in
+ * a "search" method that scans an array until an element is found), but in
+ * some cases they could cause a deopt if we hoist the check so this is a
+ * situation we need to detect.
+ */
+class InductionVariableBlocksTable BASE_EMBEDDED {
+ public:
+ class Element {
+ public:
+ static const int kNoBlock = -1;
+
+ HBasicBlock* block() { return block_; }
+ void set_block(HBasicBlock* block) { block_ = block; }
+ bool is_start() { return is_start_; }
+ bool is_proper_exit() { return is_proper_exit_; }
+ bool is_in_loop() { return is_in_loop_; }
+ bool has_check() { return has_check_; }
+ void set_has_check() { has_check_ = true; }
+ InductionVariableLimitUpdate* additional_limit() {
+ return &additional_limit_;
+ }
+
+ /*
+ * Initializes the table element for a given loop (identified by its
+ * induction variable).
+ */
+ void InitializeLoop(InductionVariableData* data) {
+ ASSERT(data->limit() != NULL);
+ HLoopInformation* loop = data->phi()->block()->current_loop();
+ is_start_ = (block() == loop->loop_header());
+ is_proper_exit_ = (block() == data->induction_exit_target());
+ is_in_loop_ = loop->IsNestedInThisLoop(block()->current_loop());
+ has_check_ = false;
+ }
+
+ // Utility methods to iterate over dominated blocks.
+ void ResetCurrentDominatedBlock() { current_dominated_block_ = kNoBlock; }
+ HBasicBlock* CurrentDominatedBlock() {
+ ASSERT(current_dominated_block_ != kNoBlock);
+ return current_dominated_block_ < block()->dominated_blocks()->length() ?
+ block()->dominated_blocks()->at(current_dominated_block_) : NULL;
+ }
+ HBasicBlock* NextDominatedBlock() {
+ current_dominated_block_++;
+ return CurrentDominatedBlock();
+ }
+
+ Element()
+ : block_(NULL), is_start_(false), is_proper_exit_(false),
+ has_check_(false), additional_limit_(),
+ current_dominated_block_(kNoBlock) {}
+
+ private:
+ HBasicBlock* block_;
+ bool is_start_;
+ bool is_proper_exit_;
+ bool is_in_loop_;
+ bool has_check_;
+ InductionVariableLimitUpdate additional_limit_;
+ int current_dominated_block_;
+ };
+
+ HGraph* graph() const { return graph_; }
+ Counters* counters() const { return graph()->isolate()->counters(); }
+ HBasicBlock* loop_header() const { return loop_header_; }
+ Element* at(int index) const { return &(elements_.at(index)); }
+ Element* at(HBasicBlock* block) const { return at(block->block_id()); }
+
+ void AddCheckAt(HBasicBlock* block) {
+ at(block->block_id())->set_has_check();
+ }
+
+ /*
+ * Initializes the table for a given loop (identified by its induction
+ * variable).
+ */
+ void InitializeLoop(InductionVariableData* data) {
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ at(i)->InitializeLoop(data);
+ }
+ loop_header_ = data->phi()->block()->current_loop()->loop_header();
+ }
+
+
+ enum Hoistability {
+ HOISTABLE,
+ OPTIMISTICALLY_HOISTABLE,
+ NOT_HOISTABLE
+ };
+
+ /*
+ * This method checks if it is appropriate to hoist the bounds checks on an
+ * induction variable out of the loop.
+ * The problem is that in the loop code graph there could be execution paths
+ * where the check is not performed, but hoisting the check has the same
+ * semantics as performing it at every loop iteration, which could cause
+ * unnecessary check failures (which would mean unnecessary deoptimizations).
+ * The method returns OK if there are no paths that perform an iteration
+ * (loop back to the header) without meeting a check, or UNSAFE is set if
+ * early exit paths are found.
+ */
+ Hoistability CheckHoistability() {
+ for (int i = 0; i < elements_.length(); i++) {
+ at(i)->ResetCurrentDominatedBlock();
+ }
+ bool unsafe = false;
+
+ HBasicBlock* current = loop_header();
+ while (current != NULL) {
+ HBasicBlock* next;
+
+ if (at(current)->has_check() || !at(current)->is_in_loop()) {
+ // We found a check or we reached a dominated block out of the loop,
+ // therefore this block is safe and we can backtrack.
+ next = NULL;
+ } else {
+ for (int i = 0; i < current->end()->SuccessorCount(); i ++) {
+ Element* successor = at(current->end()->SuccessorAt(i));
+
+ if (!successor->is_in_loop()) {
+ if (!successor->is_proper_exit()) {
+ // We found a path that exits the loop early, and is not the exit
+ // related to the induction limit, therefore hoisting checks is
+ // an optimistic assumption.
+ unsafe = true;
+ }
+ }
+
+ if (successor->is_start()) {
+ // We found a path that does one loop iteration without meeting any
+ // check, therefore hoisting checks would be likely to cause
+ // unnecessary deopts.
+ return NOT_HOISTABLE;
+ }
+ }
+
+ next = at(current)->NextDominatedBlock();
+ }
+
+ // If we have no next block we need to backtrack the tree traversal.
+ while (next == NULL) {
+ current = current->dominator();
+ if (current != NULL) {
+ next = at(current)->NextDominatedBlock();
+ } else {
+ // We reached the root: next stays NULL.
+ next = NULL;
+ break;
+ }
+ }
+
+ current = next;
+ }
+
+ return unsafe ? OPTIMISTICALLY_HOISTABLE : HOISTABLE;
+ }
+
+ explicit InductionVariableBlocksTable(HGraph* graph)
+ : graph_(graph), loop_header_(NULL),
+ elements_(graph->blocks()->length(), graph->zone()) {
+ for (int i = 0; i < graph->blocks()->length(); i++) {
+ Element element;
+ element.set_block(graph->blocks()->at(i));
+ elements_.Add(element, graph->zone());
+ ASSERT(at(i)->block()->block_id() == i);
+ }
+ }
+
+ // Tries to hoist a check out of its induction loop.
+ void ProcessRelatedChecks(
+ InductionVariableData::InductionVariableCheck* check,
+ InductionVariableData* data) {
+ HValue* length = check->check()->length();
+ check->set_processed();
+ HBasicBlock* header =
+ data->phi()->block()->current_loop()->loop_header();
+ HBasicBlock* pre_header = header->predecessors()->at(0);
+ // Check that the limit is defined in the loop preheader.
+ if (!data->limit()->IsInteger32Constant()) {
+ HBasicBlock* limit_block = data->limit()->block();
+ if (limit_block != pre_header &&
+ !limit_block->Dominates(pre_header)) {
+ return;
+ }
+ }
+ // Check that the length and limit have compatible representations.
+ if (!(data->limit()->representation().Equals(
+ length->representation()) ||
+ data->limit()->IsInteger32Constant())) {
+ return;
+ }
+ // Check that the length is defined in the loop preheader.
+ if (check->check()->length()->block() != pre_header &&
+ !check->check()->length()->block()->Dominates(pre_header)) {
+ return;
+ }
+
+ // Add checks to the table.
+ for (InductionVariableData::InductionVariableCheck* current_check = check;
+ current_check != NULL;
+ current_check = current_check->next()) {
+ if (current_check->check()->length() != length) continue;
+
+ AddCheckAt(current_check->check()->block());
+ current_check->set_processed();
+ }
+
+ // Check that we will not cause unwanted deoptimizations.
+ Hoistability hoistability = CheckHoistability();
+ if (hoistability == NOT_HOISTABLE ||
+ (hoistability == OPTIMISTICALLY_HOISTABLE &&
+ !graph()->use_optimistic_licm())) {
+ return;
+ }
+
+ // We will do the hoisting, but we must see if the limit is "limit" or if
+ // all checks are done on constants: if all check are done against the same
+ // constant limit we will use that instead of the induction limit.
+ bool has_upper_constant_limit = true;
+ int32_t upper_constant_limit =
+ check != NULL && check->HasUpperLimit() ? check->upper_limit() : 0;
+ for (InductionVariableData::InductionVariableCheck* current_check = check;
+ current_check != NULL;
+ current_check = current_check->next()) {
+ has_upper_constant_limit =
+ has_upper_constant_limit &&
+ check->HasUpperLimit() &&
+ check->upper_limit() == upper_constant_limit;
+ counters()->bounds_checks_eliminated()->Increment();
+ current_check->check()->set_skip_check();
+ }
+
+ // Choose the appropriate limit.
+ Zone* zone = graph()->zone();
+ HValue* context = graph()->GetInvalidContext();
+ HValue* limit = data->limit();
+ if (has_upper_constant_limit) {
+ HConstant* new_limit = HConstant::New(zone, context,
+ upper_constant_limit);
+ new_limit->InsertBefore(pre_header->end());
+ limit = new_limit;
+ }
+
+ // If necessary, redefine the limit in the preheader.
+ if (limit->IsInteger32Constant() &&
+ limit->block() != pre_header &&
+ !limit->block()->Dominates(pre_header)) {
+ HConstant* new_limit = HConstant::New(zone, context,
+ limit->GetInteger32Constant());
+ new_limit->InsertBefore(pre_header->end());
+ limit = new_limit;
+ }
+
+ // Do the hoisting.
+ HBoundsCheck* hoisted_check = HBoundsCheck::New(
+ zone, context, limit, check->check()->length());
+ hoisted_check->InsertBefore(pre_header->end());
+ hoisted_check->set_allow_equality(true);
+ counters()->bounds_checks_hoisted()->Increment();
+ }
+
+ void CollectInductionVariableData(HBasicBlock* bb) {
+ bool additional_limit = false;
+
+ for (int i = 0; i < bb->phis()->length(); i++) {
+ HPhi* phi = bb->phis()->at(i);
+ phi->DetectInductionVariable();
+ }
+
+ additional_limit = InductionVariableData::ComputeInductionVariableLimit(
+ bb, at(bb)->additional_limit());
+
+ if (additional_limit) {
+ at(bb)->additional_limit()->updated_variable->
+ UpdateAdditionalLimit(at(bb)->additional_limit());
+ }
+
+ for (HInstruction* i = bb->first(); i != NULL; i = i->next()) {
+ if (!i->IsBoundsCheck()) continue;
+ HBoundsCheck* check = HBoundsCheck::cast(i);
+ InductionVariableData::BitwiseDecompositionResult decomposition;
+ InductionVariableData::DecomposeBitwise(check->index(), &decomposition);
+ if (!decomposition.base->IsPhi()) continue;
+ HPhi* phi = HPhi::cast(decomposition.base);
+
+ if (!phi->IsInductionVariable()) continue;
+ InductionVariableData* data = phi->induction_variable_data();
+
+ // For now ignore loops decrementing the index.
+ if (data->increment() <= 0) continue;
+ if (!data->LowerLimitIsNonNegativeConstant()) continue;
+
+ // TODO(mmassi): skip OSR values for check->length().
+ if (check->length() == data->limit() ||
+ check->length() == data->additional_upper_limit()) {
+ counters()->bounds_checks_eliminated()->Increment();
+ check->set_skip_check();
+ continue;
+ }
+
+ if (!phi->IsLimitedInductionVariable()) continue;
+
+ int32_t limit = data->ComputeUpperLimit(decomposition.and_mask,
+ decomposition.or_mask);
+ phi->induction_variable_data()->AddCheck(check, limit);
+ }
+
+ for (int i = 0; i < bb->dominated_blocks()->length(); i++) {
+ CollectInductionVariableData(bb->dominated_blocks()->at(i));
+ }
+
+ if (additional_limit) {
+ at(bb->block_id())->additional_limit()->updated_variable->
+ UpdateAdditionalLimit(at(bb->block_id())->additional_limit());
+ }
+ }
+
+ void EliminateRedundantBoundsChecks(HBasicBlock* bb) {
+ for (int i = 0; i < bb->phis()->length(); i++) {
+ HPhi* phi = bb->phis()->at(i);
+ if (!phi->IsLimitedInductionVariable()) continue;
+
+ InductionVariableData* induction_data = phi->induction_variable_data();
+ InductionVariableData::ChecksRelatedToLength* current_length_group =
+ induction_data->checks();
+ while (current_length_group != NULL) {
+ current_length_group->CloseCurrentBlock();
+ InductionVariableData::InductionVariableCheck* current_base_check =
+ current_length_group->checks();
+ InitializeLoop(induction_data);
+
+ while (current_base_check != NULL) {
+ ProcessRelatedChecks(current_base_check, induction_data);
+ while (current_base_check != NULL &&
+ current_base_check->processed()) {
+ current_base_check = current_base_check->next();
+ }
+ }
+
+ current_length_group = current_length_group->next();
+ }
+ }
+ }
+
+ private:
+ HGraph* graph_;
+ HBasicBlock* loop_header_;
+ ZoneList<Element> elements_;
+};
+
+
+void HBoundsCheckHoistingPhase::HoistRedundantBoundsChecks() {
+ InductionVariableBlocksTable table(graph());
+ table.CollectInductionVariableData(graph()->entry_block());
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ table.EliminateRedundantBoundsChecks(graph()->blocks()->at(i));
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/inspector.h b/deps/v8/src/hydrogen-bch.h
index 6962e21f4..a22dacdd4 100644
--- a/deps/v8/src/inspector.h
+++ b/deps/v8/src/hydrogen-bch.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,36 +25,31 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef V8_HYDROGEN_BCH_H_
+#define V8_HYDROGEN_BCH_H_
-#ifndef V8_INSPECTOR_H_
-#define V8_INSPECTOR_H_
-
-// Only build this code if we're configured with the INSPECTOR.
-#ifdef INSPECTOR
-
-#include "v8.h"
-
-#include "objects.h"
+#include "hydrogen.h"
namespace v8 {
namespace internal {
-class Inspector {
+
+class HBoundsCheckHoistingPhase : public HPhase {
public:
- static void DumpObjectType(FILE* out, Object* obj, bool print_more);
- static void DumpObjectType(FILE* out, Object* obj) {
- DumpObjectType(out, obj, false);
- }
- static void DumpObjectType(Object* obj, bool print_more) {
- DumpObjectType(stdout, obj, print_more);
- }
- static void DumpObjectType(Object* obj) {
- DumpObjectType(stdout, obj, false);
+ explicit HBoundsCheckHoistingPhase(HGraph* graph)
+ : HPhase("H_Bounds checks hoisting", graph) { }
+
+ void Run() {
+ HoistRedundantBoundsChecks();
}
+
+ private:
+ void HoistRedundantBoundsChecks();
+
+ DISALLOW_COPY_AND_ASSIGN(HBoundsCheckHoistingPhase);
};
-} } // namespace v8::internal
-#endif // INSPECTOR
+} } // namespace v8::internal
-#endif // V8_INSPECTOR_H_
+#endif // V8_HYDROGEN_BCE_H_
diff --git a/deps/v8/src/hydrogen-canonicalize.cc b/deps/v8/src/hydrogen-canonicalize.cc
new file mode 100644
index 000000000..d3f72e933
--- /dev/null
+++ b/deps/v8/src/hydrogen-canonicalize.cc
@@ -0,0 +1,78 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-canonicalize.h"
+#include "hydrogen-redundant-phi.h"
+
+namespace v8 {
+namespace internal {
+
+void HCanonicalizePhase::Run() {
+ const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+ // Before removing no-op instructions, save their semantic value.
+ // We must be careful not to set the flag unnecessarily, because GVN
+ // cannot identify two instructions when their flag value differs.
+ for (int i = 0; i < blocks->length(); ++i) {
+ for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (instr->IsArithmeticBinaryOperation()) {
+ if (instr->representation().IsInteger32()) {
+ if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
+ HInstruction::kTruncatingToInt32)) {
+ instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
+ }
+ } else if (instr->representation().IsSmi()) {
+ if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
+ HInstruction::kTruncatingToSmi)) {
+ instr->SetFlag(HInstruction::kAllUsesTruncatingToSmi);
+ } else if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
+ HInstruction::kTruncatingToInt32)) {
+ // Avoid redundant minus zero check
+ instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
+ }
+ }
+ }
+ }
+ }
+
+ // Perform actual Canonicalization pass.
+ HRedundantPhiEliminationPhase redundant_phi_eliminator(graph());
+ for (int i = 0; i < blocks->length(); ++i) {
+ // Eliminate redundant phis in the block first; changes to their inputs
+ // might have made them redundant, and eliminating them creates more
+ // opportunities for constant folding and strength reduction.
+ redundant_phi_eliminator.ProcessBlock(blocks->at(i));
+ // Now canonicalize each instruction.
+ for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ HValue* value = instr->Canonicalize();
+ if (value != instr) instr->DeleteAndReplaceWith(value);
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-canonicalize.h b/deps/v8/src/hydrogen-canonicalize.h
new file mode 100644
index 000000000..d2b289bc2
--- /dev/null
+++ b/deps/v8/src/hydrogen-canonicalize.h
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_CANONICALIZE_H_
+#define V8_HYDROGEN_CANONICALIZE_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HCanonicalizePhase : public HPhase {
+ public:
+ explicit HCanonicalizePhase(HGraph* graph)
+ : HPhase("H_Canonicalize", graph) { }
+
+ void Run();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HCanonicalizePhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_CANONICALIZE_H_
diff --git a/deps/v8/src/hydrogen-check-elimination.cc b/deps/v8/src/hydrogen-check-elimination.cc
new file mode 100644
index 000000000..f712a39db
--- /dev/null
+++ b/deps/v8/src/hydrogen-check-elimination.cc
@@ -0,0 +1,357 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-check-elimination.h"
+#include "hydrogen-alias-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+static const int kMaxTrackedObjects = 10;
+typedef UniqueSet<Map>* MapSet;
+
+// The main datastructure used during check elimination, which stores a
+// set of known maps for each object.
+class HCheckTable {
+ public:
+ explicit HCheckTable(Zone* zone) : zone_(zone) {
+ Kill();
+ redundant_ = 0;
+ narrowed_ = 0;
+ empty_ = 0;
+ removed_ = 0;
+ compares_true_ = 0;
+ compares_false_ = 0;
+ transitions_ = 0;
+ loads_ = 0;
+ }
+
+ void ReduceCheckMaps(HCheckMaps* instr) {
+ HValue* object = instr->value()->ActualValue();
+ int index = Find(object);
+ if (index >= 0) {
+ // entry found;
+ MapSet a = known_maps_[index];
+ MapSet i = instr->map_set().Copy(zone_);
+ if (a->IsSubset(i)) {
+ // The first check is more strict; the second is redundant.
+ if (checks_[index] != NULL) {
+ instr->DeleteAndReplaceWith(checks_[index]);
+ redundant_++;
+ } else {
+ instr->DeleteAndReplaceWith(instr->value());
+ removed_++;
+ }
+ return;
+ }
+ i = i->Intersect(a, zone_);
+ if (i->size() == 0) {
+ // Intersection is empty; probably megamorphic, which is likely to
+ // deopt anyway, so just leave things as they are.
+ empty_++;
+ } else {
+ // TODO(titzer): replace the first check with a more strict check.
+ narrowed_++;
+ }
+ } else {
+ // No entry; insert a new one.
+ Insert(object, instr, instr->map_set().Copy(zone_));
+ }
+ }
+
+ void ReduceCheckValue(HCheckValue* instr) {
+ // Canonicalize HCheckValues; they might have their values load-eliminated.
+ HValue* value = instr->Canonicalize();
+ if (value == NULL) {
+ instr->DeleteAndReplaceWith(instr->value());
+ removed_++;
+ } else if (value != instr) {
+ instr->DeleteAndReplaceWith(value);
+ redundant_++;
+ }
+ }
+
+ void ReduceLoadNamedField(HLoadNamedField* instr) {
+ // Reduce a load of the map field when it is known to be a constant.
+ if (!IsMapAccess(instr->access())) return;
+
+ HValue* object = instr->object()->ActualValue();
+ MapSet maps = FindMaps(object);
+ if (maps == NULL || maps->size() != 1) return; // Not a constant.
+
+ Unique<Map> map = maps->at(0);
+ HConstant* constant = HConstant::CreateAndInsertBefore(
+ instr->block()->graph()->zone(), map, true, instr);
+ instr->DeleteAndReplaceWith(constant);
+ loads_++;
+ }
+
+ void ReduceCheckMapValue(HCheckMapValue* instr) {
+ if (!instr->map()->IsConstant()) return; // Nothing to learn.
+
+ HValue* object = instr->value()->ActualValue();
+ // Match a HCheckMapValue(object, HConstant(map))
+ Unique<Map> map = MapConstant(instr->map());
+ MapSet maps = FindMaps(object);
+ if (maps != NULL) {
+ if (maps->Contains(map)) {
+ if (maps->size() == 1) {
+ // Object is known to have exactly this map.
+ instr->DeleteAndReplaceWith(NULL);
+ removed_++;
+ } else {
+ // Only one map survives the check.
+ maps->Clear();
+ maps->Add(map, zone_);
+ }
+ }
+ } else {
+ // No prior information.
+ Insert(object, map);
+ }
+ }
+
+ void ReduceStoreNamedField(HStoreNamedField* instr) {
+ HValue* object = instr->object()->ActualValue();
+ if (instr->has_transition()) {
+ // This store transitions the object to a new map.
+ Kill(object);
+ Insert(object, MapConstant(instr->transition()));
+ } else if (IsMapAccess(instr->access())) {
+ // This is a store directly to the map field of the object.
+ Kill(object);
+ if (!instr->value()->IsConstant()) return;
+ Insert(object, MapConstant(instr->value()));
+ } else if (instr->CheckGVNFlag(kChangesMaps)) {
+ // This store indirectly changes the map of the object.
+ Kill(instr->object());
+ UNREACHABLE();
+ }
+ }
+
+ void ReduceCompareMap(HCompareMap* instr) {
+ MapSet maps = FindMaps(instr->value()->ActualValue());
+ if (maps == NULL) return;
+ if (maps->Contains(instr->map())) {
+ // TODO(titzer): replace with goto true branch
+ if (maps->size() == 1) compares_true_++;
+ } else {
+ // TODO(titzer): replace with goto false branch
+ compares_false_++;
+ }
+ }
+
+ void ReduceTransitionElementsKind(HTransitionElementsKind* instr) {
+ MapSet maps = FindMaps(instr->object()->ActualValue());
+ // Can only learn more about an object that already has a known set of maps.
+ if (maps == NULL) return;
+ if (maps->Contains(instr->original_map())) {
+ // If the object has the original map, it will be transitioned.
+ maps->Remove(instr->original_map());
+ maps->Add(instr->transitioned_map(), zone_);
+ } else {
+ // Object does not have the given map, thus the transition is redundant.
+ instr->DeleteAndReplaceWith(instr->object());
+ transitions_++;
+ }
+ }
+
+ // Kill everything in the table.
+ void Kill() {
+ memset(objects_, 0, sizeof(objects_));
+ }
+
+ // Kill everything in the table that may alias {object}.
+ void Kill(HValue* object) {
+ for (int i = 0; i < kMaxTrackedObjects; i++) {
+ if (objects_[i] == NULL) continue;
+ if (aliasing_.MayAlias(objects_[i], object)) objects_[i] = NULL;
+ }
+ ASSERT(Find(object) < 0);
+ }
+
+ void Print() {
+ for (int i = 0; i < kMaxTrackedObjects; i++) {
+ if (objects_[i] == NULL) continue;
+ PrintF(" checkmaps-table @%d: object #%d ", i, objects_[i]->id());
+ if (checks_[i] != NULL) {
+ PrintF("check #%d ", checks_[i]->id());
+ }
+ MapSet list = known_maps_[i];
+ PrintF("%d maps { ", list->size());
+ for (int j = 0; j < list->size(); j++) {
+ if (j > 0) PrintF(", ");
+ PrintF("%" V8PRIxPTR, list->at(j).Hashcode());
+ }
+ PrintF(" }\n");
+ }
+ }
+
+ void PrintStats() {
+ if (redundant_ > 0) PrintF(" redundant = %2d\n", redundant_);
+ if (removed_ > 0) PrintF(" removed = %2d\n", removed_);
+ if (narrowed_ > 0) PrintF(" narrowed = %2d\n", narrowed_);
+ if (loads_ > 0) PrintF(" loads = %2d\n", loads_);
+ if (empty_ > 0) PrintF(" empty = %2d\n", empty_);
+ if (compares_true_ > 0) PrintF(" cmp_true = %2d\n", compares_true_);
+ if (compares_false_ > 0) PrintF(" cmp_false = %2d\n", compares_false_);
+ if (transitions_ > 0) PrintF(" transitions = %2d\n", transitions_);
+ }
+
+ private:
+ int Find(HValue* object) {
+ for (int i = 0; i < kMaxTrackedObjects; i++) {
+ if (objects_[i] == NULL) continue;
+ if (aliasing_.MustAlias(objects_[i], object)) return i;
+ }
+ return -1;
+ }
+
+ MapSet FindMaps(HValue* object) {
+ int index = Find(object);
+ return index < 0 ? NULL : known_maps_[index];
+ }
+
+ void Insert(HValue* object, Unique<Map> map) {
+ MapSet list = new(zone_) UniqueSet<Map>();
+ list->Add(map, zone_);
+ Insert(object, NULL, list);
+ }
+
+ void Insert(HValue* object, HCheckMaps* check, MapSet maps) {
+ for (int i = 0; i < kMaxTrackedObjects; i++) {
+ // TODO(titzer): drop old entries instead of disallowing new ones.
+ if (objects_[i] == NULL) {
+ objects_[i] = object;
+ checks_[i] = check;
+ known_maps_[i] = maps;
+ return;
+ }
+ }
+ }
+
+ bool IsMapAccess(HObjectAccess access) {
+ return access.IsInobject() && access.offset() == JSObject::kMapOffset;
+ }
+
+ Unique<Map> MapConstant(HValue* value) {
+ return Unique<Map>::cast(HConstant::cast(value)->GetUnique());
+ }
+
+ Zone* zone_;
+ HValue* objects_[kMaxTrackedObjects];
+ HValue* checks_[kMaxTrackedObjects];
+ MapSet known_maps_[kMaxTrackedObjects];
+ HAliasAnalyzer aliasing_;
+ int redundant_;
+ int removed_;
+ int narrowed_;
+ int loads_;
+ int empty_;
+ int compares_true_;
+ int compares_false_;
+ int transitions_;
+};
+
+
+void HCheckEliminationPhase::Run() {
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ EliminateLocalChecks(graph()->blocks()->at(i));
+ }
+}
+
+
+// For code de-uglification.
+#define TRACE(x) if (FLAG_trace_check_elimination) PrintF x
+
+
+// Eliminate checks local to a block.
+void HCheckEliminationPhase::EliminateLocalChecks(HBasicBlock* block) {
+ HCheckTable table(zone());
+ TRACE(("-- check-elim B%d ------------------------------------------------\n",
+ block->block_id()));
+
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ bool changed = false;
+ HInstruction* instr = it.Current();
+
+ switch (instr->opcode()) {
+ case HValue::kCheckMaps: {
+ table.ReduceCheckMaps(HCheckMaps::cast(instr));
+ changed = true;
+ break;
+ }
+ case HValue::kCheckValue: {
+ table.ReduceCheckValue(HCheckValue::cast(instr));
+ changed = true;
+ break;
+ }
+ case HValue::kLoadNamedField: {
+ table.ReduceLoadNamedField(HLoadNamedField::cast(instr));
+ changed = true;
+ break;
+ }
+ case HValue::kStoreNamedField: {
+ table.ReduceStoreNamedField(HStoreNamedField::cast(instr));
+ changed = true;
+ break;
+ }
+ case HValue::kCompareMap: {
+ table.ReduceCompareMap(HCompareMap::cast(instr));
+ changed = true;
+ break;
+ }
+ case HValue::kTransitionElementsKind: {
+ table.ReduceTransitionElementsKind(
+ HTransitionElementsKind::cast(instr));
+ changed = true;
+ break;
+ }
+ case HValue::kCheckMapValue: {
+ table.ReduceCheckMapValue(HCheckMapValue::cast(instr));
+ changed = true;
+ break;
+ }
+ default: {
+ // If the instruction changes maps uncontrollably, kill the whole town.
+ if (instr->CheckGVNFlag(kChangesMaps)) {
+ table.Kill();
+ changed = true;
+ }
+ }
+ // Improvements possible:
+ // - eliminate HCheckSmi and HCheckHeapObject
+ }
+
+ if (changed && FLAG_trace_check_elimination) table.Print();
+ }
+
+ if (FLAG_trace_check_elimination) table.PrintStats();
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-check-elimination.h b/deps/v8/src/hydrogen-check-elimination.h
new file mode 100644
index 000000000..fa01964f6
--- /dev/null
+++ b/deps/v8/src/hydrogen-check-elimination.h
@@ -0,0 +1,52 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_CHECK_ELIMINATION_H_
+#define V8_HYDROGEN_CHECK_ELIMINATION_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Remove CheckMaps instructions through flow- and branch-sensitive analysis.
+class HCheckEliminationPhase : public HPhase {
+ public:
+ explicit HCheckEliminationPhase(HGraph* graph)
+ : HPhase("H_Check Elimination", graph) { }
+
+ void Run();
+
+ private:
+ void EliminateLocalChecks(HBasicBlock* block);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_CHECK_ELIMINATION_H_
diff --git a/deps/v8/src/hydrogen-dce.cc b/deps/v8/src/hydrogen-dce.cc
new file mode 100644
index 000000000..e101ee5bc
--- /dev/null
+++ b/deps/v8/src/hydrogen-dce.cc
@@ -0,0 +1,129 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-dce.h"
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+void HDeadCodeEliminationPhase::MarkLive(
+ HValue* instr, ZoneList<HValue*>* worklist) {
+ if (instr->CheckFlag(HValue::kIsLive)) return; // Already live.
+
+ if (FLAG_trace_dead_code_elimination) PrintLive(NULL, instr);
+
+ // Transitively mark all inputs of live instructions live.
+ worklist->Add(instr, zone());
+ while (!worklist->is_empty()) {
+ HValue* instr = worklist->RemoveLast();
+ instr->SetFlag(HValue::kIsLive);
+ for (int i = 0; i < instr->OperandCount(); ++i) {
+ HValue* input = instr->OperandAt(i);
+ if (!input->CheckFlag(HValue::kIsLive)) {
+ input->SetFlag(HValue::kIsLive);
+ worklist->Add(input, zone());
+ if (FLAG_trace_dead_code_elimination) PrintLive(instr, input);
+ }
+ }
+ }
+}
+
+
+void HDeadCodeEliminationPhase::PrintLive(HValue* ref, HValue* instr) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ if (ref != NULL) {
+ ref->PrintTo(&stream);
+ } else {
+ stream.Add("root ");
+ }
+ stream.Add(" -> ");
+ instr->PrintTo(&stream);
+ PrintF("[MarkLive %s]\n", *stream.ToCString());
+}
+
+
+void HDeadCodeEliminationPhase::MarkLiveInstructions() {
+ ZoneList<HValue*> worklist(10, zone());
+
+ // Transitively mark all live instructions, starting from roots.
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (instr->CannotBeEliminated()) MarkLive(instr, &worklist);
+ }
+ for (int j = 0; j < block->phis()->length(); j++) {
+ HPhi* phi = block->phis()->at(j);
+ if (phi->CannotBeEliminated()) MarkLive(phi, &worklist);
+ }
+ }
+
+ ASSERT(worklist.is_empty()); // Should have processed everything.
+}
+
+
+void HDeadCodeEliminationPhase::RemoveDeadInstructions() {
+ ZoneList<HPhi*> worklist(graph()->blocks()->length(), zone());
+
+ // Remove any instruction not marked kIsLive.
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (!instr->CheckFlag(HValue::kIsLive)) {
+ // Instruction has not been marked live, so remove it.
+ instr->DeleteAndReplaceWith(NULL);
+ } else {
+ // Clear the liveness flag to leave the graph clean for the next DCE.
+ instr->ClearFlag(HValue::kIsLive);
+ }
+ }
+ // Collect phis that are dead and remove them in the next pass.
+ for (int j = 0; j < block->phis()->length(); j++) {
+ HPhi* phi = block->phis()->at(j);
+ if (!phi->CheckFlag(HValue::kIsLive)) {
+ worklist.Add(phi, zone());
+ } else {
+ phi->ClearFlag(HValue::kIsLive);
+ }
+ }
+ }
+
+ // Process phis separately to avoid simultaneously mutating the phi list.
+ while (!worklist.is_empty()) {
+ HPhi* phi = worklist.RemoveLast();
+ HBasicBlock* block = phi->block();
+ phi->DeleteAndReplaceWith(NULL);
+ if (phi->HasMergedIndex()) {
+ block->RecordDeletedPhi(phi->merged_index());
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-dce.h b/deps/v8/src/hydrogen-dce.h
new file mode 100644
index 000000000..2d73b380e
--- /dev/null
+++ b/deps/v8/src/hydrogen-dce.h
@@ -0,0 +1,57 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_DCE_H_
+#define V8_HYDROGEN_DCE_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HDeadCodeEliminationPhase : public HPhase {
+ public:
+ explicit HDeadCodeEliminationPhase(HGraph* graph)
+ : HPhase("H_Dead code elimination", graph) { }
+
+ void Run() {
+ MarkLiveInstructions();
+ RemoveDeadInstructions();
+ }
+
+ private:
+ void MarkLive(HValue* instr, ZoneList<HValue*>* worklist);
+ void PrintLive(HValue* ref, HValue* instr);
+ void MarkLiveInstructions();
+ void RemoveDeadInstructions();
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_DCE_H_
diff --git a/deps/v8/src/hydrogen-dehoist.cc b/deps/v8/src/hydrogen-dehoist.cc
new file mode 100644
index 000000000..bdf2cfb25
--- /dev/null
+++ b/deps/v8/src/hydrogen-dehoist.cc
@@ -0,0 +1,80 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-dehoist.h"
+
+namespace v8 {
+namespace internal {
+
+static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
+ HValue* index = array_operation->GetKey()->ActualValue();
+ if (!index->representation().IsSmiOrInteger32()) return;
+ if (!index->IsAdd() && !index->IsSub()) return;
+
+ HConstant* constant;
+ HValue* subexpression;
+ HBinaryOperation* binary_operation = HBinaryOperation::cast(index);
+ if (binary_operation->left()->IsConstant() && index->IsAdd()) {
+ subexpression = binary_operation->right();
+ constant = HConstant::cast(binary_operation->left());
+ } else if (binary_operation->right()->IsConstant()) {
+ subexpression = binary_operation->left();
+ constant = HConstant::cast(binary_operation->right());
+ } else {
+ return;
+ }
+
+ if (!constant->HasInteger32Value()) return;
+ int32_t sign = binary_operation->IsSub() ? -1 : 1;
+ int32_t value = constant->Integer32Value() * sign;
+ // We limit offset values to 30 bits because we want to avoid the risk of
+ // overflows when the offset is added to the object header size.
+ if (value >= 1 << array_operation->MaxIndexOffsetBits() || value < 0) return;
+ array_operation->SetKey(subexpression);
+ if (binary_operation->HasNoUses()) {
+ binary_operation->DeleteAndReplaceWith(NULL);
+ }
+ array_operation->SetIndexOffset(static_cast<uint32_t>(value));
+ array_operation->SetDehoisted(true);
+}
+
+
+void HDehoistIndexComputationsPhase::Run() {
+ const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+ for (int i = 0; i < blocks->length(); ++i) {
+ for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (instr->IsLoadKeyed()) {
+ DehoistArrayIndex(HLoadKeyed::cast(instr));
+ } else if (instr->IsStoreKeyed()) {
+ DehoistArrayIndex(HStoreKeyed::cast(instr));
+ }
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-dehoist.h b/deps/v8/src/hydrogen-dehoist.h
new file mode 100644
index 000000000..140dc6e0e
--- /dev/null
+++ b/deps/v8/src/hydrogen-dehoist.h
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_DEHOIST_H_
+#define V8_HYDROGEN_DEHOIST_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HDehoistIndexComputationsPhase : public HPhase {
+ public:
+ explicit HDehoistIndexComputationsPhase(HGraph* graph)
+ : HPhase("H_Dehoist index computations", graph) { }
+
+ void Run();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HDehoistIndexComputationsPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_DEHOIST_H_
diff --git a/deps/v8/src/hydrogen-environment-liveness.cc b/deps/v8/src/hydrogen-environment-liveness.cc
new file mode 100644
index 000000000..fad9755e5
--- /dev/null
+++ b/deps/v8/src/hydrogen-environment-liveness.cc
@@ -0,0 +1,244 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "hydrogen-environment-liveness.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+HEnvironmentLivenessAnalysisPhase::HEnvironmentLivenessAnalysisPhase(
+ HGraph* graph)
+ : HPhase("H_Environment liveness analysis", graph),
+ block_count_(graph->blocks()->length()),
+ maximum_environment_size_(graph->maximum_environment_size()),
+ live_at_block_start_(block_count_, zone()),
+ first_simulate_(block_count_, zone()),
+ first_simulate_invalid_for_index_(block_count_, zone()),
+ markers_(maximum_environment_size_, zone()),
+ collect_markers_(true),
+ last_simulate_(NULL),
+ went_live_since_last_simulate_(maximum_environment_size_, zone()) {
+ ASSERT(maximum_environment_size_ > 0);
+ for (int i = 0; i < block_count_; ++i) {
+ live_at_block_start_.Add(
+ new(zone()) BitVector(maximum_environment_size_, zone()), zone());
+ first_simulate_.Add(NULL, zone());
+ first_simulate_invalid_for_index_.Add(
+ new(zone()) BitVector(maximum_environment_size_, zone()), zone());
+ }
+}
+
+
+void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlot(
+ int index, HSimulate* simulate) {
+ int operand_index = simulate->ToOperandIndex(index);
+ if (operand_index == -1) {
+ simulate->AddAssignedValue(index, graph()->GetConstantUndefined());
+ } else {
+ simulate->SetOperandAt(operand_index, graph()->GetConstantUndefined());
+ }
+}
+
+
+void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsInSuccessors(
+ HBasicBlock* block, BitVector* live) {
+ // When a value is live in successor A but dead in B, we must
+ // explicitly zap it in B.
+ for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
+ HBasicBlock* successor = it.Current();
+ int successor_id = successor->block_id();
+ BitVector* live_in_successor = live_at_block_start_[successor_id];
+ if (live_in_successor->Equals(*live)) continue;
+ for (int i = 0; i < live->length(); ++i) {
+ if (!live->Contains(i)) continue;
+ if (live_in_successor->Contains(i)) continue;
+ if (first_simulate_invalid_for_index_.at(successor_id)->Contains(i)) {
+ continue;
+ }
+ HSimulate* simulate = first_simulate_.at(successor_id);
+ if (simulate == NULL) continue;
+ ASSERT(simulate->closure().is_identical_to(
+ block->last_environment()->closure()));
+ ZapEnvironmentSlot(i, simulate);
+ }
+ }
+}
+
+
+void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsForInstruction(
+ HEnvironmentMarker* marker) {
+ if (!marker->CheckFlag(HValue::kEndsLiveRange)) return;
+ HSimulate* simulate = marker->next_simulate();
+ if (simulate != NULL) {
+ ASSERT(simulate->closure().is_identical_to(marker->closure()));
+ ZapEnvironmentSlot(marker->index(), simulate);
+ }
+}
+
+
+void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtBlockEnd(
+ HBasicBlock* block,
+ BitVector* live) {
+ // Liveness at the end of each block: union of liveness in successors.
+ live->Clear();
+ for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
+ live->Union(*live_at_block_start_[it.Current()->block_id()]);
+ }
+}
+
+
+void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtInstruction(
+ HInstruction* instr,
+ BitVector* live) {
+ switch (instr->opcode()) {
+ case HValue::kEnvironmentMarker: {
+ HEnvironmentMarker* marker = HEnvironmentMarker::cast(instr);
+ int index = marker->index();
+ if (!live->Contains(index)) {
+ marker->SetFlag(HValue::kEndsLiveRange);
+ } else {
+ marker->ClearFlag(HValue::kEndsLiveRange);
+ }
+ if (!went_live_since_last_simulate_.Contains(index)) {
+ marker->set_next_simulate(last_simulate_);
+ }
+ if (marker->kind() == HEnvironmentMarker::LOOKUP) {
+ live->Add(index);
+ } else {
+ ASSERT(marker->kind() == HEnvironmentMarker::BIND);
+ live->Remove(index);
+ went_live_since_last_simulate_.Add(index);
+ }
+ if (collect_markers_) {
+ // Populate |markers_| list during the first pass.
+ markers_.Add(marker, zone());
+ }
+ break;
+ }
+ case HValue::kLeaveInlined:
+ // No environment values are live at the end of an inlined section.
+ live->Clear();
+ last_simulate_ = NULL;
+
+ // The following ASSERTs guard the assumption used in case
+ // kEnterInlined below:
+ ASSERT(instr->next()->IsSimulate());
+ ASSERT(instr->next()->next()->IsGoto());
+
+ break;
+ case HValue::kEnterInlined: {
+ // Those environment values are live that are live at any return
+ // target block. Here we make use of the fact that the end of an
+ // inline sequence always looks like this: HLeaveInlined, HSimulate,
+ // HGoto (to return_target block), with no environment lookups in
+ // between (see ASSERTs above).
+ HEnterInlined* enter = HEnterInlined::cast(instr);
+ live->Clear();
+ for (int i = 0; i < enter->return_targets()->length(); ++i) {
+ int return_id = enter->return_targets()->at(i)->block_id();
+ live->Union(*live_at_block_start_[return_id]);
+ }
+ last_simulate_ = NULL;
+ break;
+ }
+ case HValue::kSimulate:
+ last_simulate_ = HSimulate::cast(instr);
+ went_live_since_last_simulate_.Clear();
+ break;
+ default:
+ break;
+ }
+}
+
+
+void HEnvironmentLivenessAnalysisPhase::Run() {
+ ASSERT(maximum_environment_size_ > 0);
+
+ // Main iteration. Compute liveness of environment slots, and store it
+ // for each block until it doesn't change any more. For efficiency, visit
+ // blocks in reverse order and walk backwards through each block. We
+ // need several iterations to propagate liveness through nested loops.
+ BitVector live(maximum_environment_size_, zone());
+ BitVector worklist(block_count_, zone());
+ for (int i = 0; i < block_count_; ++i) {
+ worklist.Add(i);
+ }
+ while (!worklist.IsEmpty()) {
+ for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
+ if (!worklist.Contains(block_id)) {
+ continue;
+ }
+ worklist.Remove(block_id);
+ last_simulate_ = NULL;
+
+ HBasicBlock* block = graph()->blocks()->at(block_id);
+ UpdateLivenessAtBlockEnd(block, &live);
+
+ for (HInstruction* instr = block->last(); instr != NULL;
+ instr = instr->previous()) {
+ UpdateLivenessAtInstruction(instr, &live);
+ }
+
+ // Reached the start of the block, do necessary bookkeeping:
+ // store computed information for this block and add predecessors
+ // to the work list as necessary.
+ first_simulate_.Set(block_id, last_simulate_);
+ first_simulate_invalid_for_index_[block_id]->CopyFrom(
+ went_live_since_last_simulate_);
+ if (live_at_block_start_[block_id]->UnionIsChanged(live)) {
+ for (int i = 0; i < block->predecessors()->length(); ++i) {
+ worklist.Add(block->predecessors()->at(i)->block_id());
+ }
+ if (block->IsInlineReturnTarget()) {
+ worklist.Add(block->inlined_entry_block()->block_id());
+ }
+ }
+ }
+ // Only collect bind/lookup instructions during the first pass.
+ collect_markers_ = false;
+ }
+
+ // Analysis finished. Zap dead environment slots.
+ for (int i = 0; i < markers_.length(); ++i) {
+ ZapEnvironmentSlotsForInstruction(markers_[i]);
+ }
+ for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
+ HBasicBlock* block = graph()->blocks()->at(block_id);
+ UpdateLivenessAtBlockEnd(block, &live);
+ ZapEnvironmentSlotsInSuccessors(block, &live);
+ }
+
+ // Finally, remove the HEnvironment{Bind,Lookup} markers.
+ for (int i = 0; i < markers_.length(); ++i) {
+ markers_[i]->DeleteAndReplaceWith(NULL);
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-environment-liveness.h b/deps/v8/src/hydrogen-environment-liveness.h
new file mode 100644
index 000000000..248ec5ce5
--- /dev/null
+++ b/deps/v8/src/hydrogen-environment-liveness.h
@@ -0,0 +1,88 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_
+#define V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_
+
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Trims live ranges of environment slots by doing explicit liveness analysis.
+// Values in the environment are kept alive by every subsequent LInstruction
+// that is assigned an LEnvironment, which creates register pressure and
+// unnecessary spill slot moves. Therefore it is beneficial to trim the
+// live ranges of environment slots by zapping them with a constant after
+// the last lookup that refers to them.
+// Slots are identified by their index and only affected if whitelisted in
+// HOptimizedGraphBuilder::IsEligibleForEnvironmentLivenessAnalysis().
+class HEnvironmentLivenessAnalysisPhase : public HPhase {
+ public:
+ explicit HEnvironmentLivenessAnalysisPhase(HGraph* graph);
+
+ void Run();
+
+ private:
+ void ZapEnvironmentSlot(int index, HSimulate* simulate);
+ void ZapEnvironmentSlotsInSuccessors(HBasicBlock* block, BitVector* live);
+ void ZapEnvironmentSlotsForInstruction(HEnvironmentMarker* marker);
+ void UpdateLivenessAtBlockEnd(HBasicBlock* block, BitVector* live);
+ void UpdateLivenessAtInstruction(HInstruction* instr, BitVector* live);
+
+ int block_count_;
+
+ // Largest number of local variables in any environment in the graph
+ // (including inlined environments).
+ int maximum_environment_size_;
+
+ // Per-block data. All these lists are indexed by block_id.
+ ZoneList<BitVector*> live_at_block_start_;
+ ZoneList<HSimulate*> first_simulate_;
+ ZoneList<BitVector*> first_simulate_invalid_for_index_;
+
+ // List of all HEnvironmentMarker instructions for quick iteration/deletion.
+ // It is populated during the first pass over the graph, controlled by
+ // |collect_markers_|.
+ ZoneList<HEnvironmentMarker*> markers_;
+ bool collect_markers_;
+
+ // Keeps track of the last simulate seen, as well as the environment slots
+ // for which a new live range has started since (so they must not be zapped
+ // in that simulate when the end of another live range of theirs is found).
+ HSimulate* last_simulate_;
+ BitVector went_live_since_last_simulate_;
+
+ DISALLOW_COPY_AND_ASSIGN(HEnvironmentLivenessAnalysisPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif /* V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_ */
diff --git a/deps/v8/src/hydrogen-escape-analysis.cc b/deps/v8/src/hydrogen-escape-analysis.cc
new file mode 100644
index 000000000..102301992
--- /dev/null
+++ b/deps/v8/src/hydrogen-escape-analysis.cc
@@ -0,0 +1,333 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-escape-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool HEscapeAnalysisPhase::HasNoEscapingUses(HValue* value, int size) {
+ for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ if (use->HasEscapingOperandAt(it.index())) {
+ if (FLAG_trace_escape_analysis) {
+ PrintF("#%d (%s) escapes through #%d (%s) @%d\n", value->id(),
+ value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+ }
+ return false;
+ }
+ if (use->HasOutOfBoundsAccess(size)) {
+ if (FLAG_trace_escape_analysis) {
+ PrintF("#%d (%s) out of bounds at #%d (%s) @%d\n", value->id(),
+ value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+ }
+ return false;
+ }
+ int redefined_index = use->RedefinedOperandIndex();
+ if (redefined_index == it.index() && !HasNoEscapingUses(use, size)) {
+ if (FLAG_trace_escape_analysis) {
+ PrintF("#%d (%s) escapes redefinition #%d (%s) @%d\n", value->id(),
+ value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+ }
+ return false;
+ }
+ }
+ return true;
+}
+
+
+void HEscapeAnalysisPhase::CollectCapturedValues() {
+ int block_count = graph()->blocks()->length();
+ for (int i = 0; i < block_count; ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (!instr->IsAllocate()) continue;
+ HAllocate* allocate = HAllocate::cast(instr);
+ if (!allocate->size()->IsInteger32Constant()) continue;
+ int size_in_bytes = allocate->size()->GetInteger32Constant();
+ if (HasNoEscapingUses(instr, size_in_bytes)) {
+ if (FLAG_trace_escape_analysis) {
+ PrintF("#%d (%s) is being captured\n", instr->id(),
+ instr->Mnemonic());
+ }
+ captured_.Add(instr, zone());
+ }
+ }
+ }
+}
+
+
+HCapturedObject* HEscapeAnalysisPhase::NewState(HInstruction* previous) {
+ Zone* zone = graph()->zone();
+ HCapturedObject* state =
+ new(zone) HCapturedObject(number_of_values_, number_of_objects_, zone);
+ state->InsertAfter(previous);
+ return state;
+}
+
+
+// Create a new state for replacing HAllocate instructions.
+HCapturedObject* HEscapeAnalysisPhase::NewStateForAllocation(
+ HInstruction* previous) {
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HCapturedObject* state = NewState(previous);
+ for (int index = 0; index < number_of_values_; index++) {
+ state->SetOperandAt(index, undefined);
+ }
+ return state;
+}
+
+
+// Create a new state full of phis for loop header entries.
+HCapturedObject* HEscapeAnalysisPhase::NewStateForLoopHeader(
+ HInstruction* previous,
+ HCapturedObject* old_state) {
+ HBasicBlock* block = previous->block();
+ HCapturedObject* state = NewState(previous);
+ for (int index = 0; index < number_of_values_; index++) {
+ HValue* operand = old_state->OperandAt(index);
+ HPhi* phi = NewPhiAndInsert(block, operand, index);
+ state->SetOperandAt(index, phi);
+ }
+ return state;
+}
+
+
+// Create a new state by copying an existing one.
+HCapturedObject* HEscapeAnalysisPhase::NewStateCopy(
+ HInstruction* previous,
+ HCapturedObject* old_state) {
+ HCapturedObject* state = NewState(previous);
+ for (int index = 0; index < number_of_values_; index++) {
+ HValue* operand = old_state->OperandAt(index);
+ state->SetOperandAt(index, operand);
+ }
+ return state;
+}
+
+
+// Insert a newly created phi into the given block and fill all incoming
+// edges with the given value.
+HPhi* HEscapeAnalysisPhase::NewPhiAndInsert(HBasicBlock* block,
+ HValue* incoming_value,
+ int index) {
+ Zone* zone = graph()->zone();
+ HPhi* phi = new(zone) HPhi(HPhi::kInvalidMergedIndex, zone);
+ for (int i = 0; i < block->predecessors()->length(); i++) {
+ phi->AddInput(incoming_value);
+ }
+ block->AddPhi(phi);
+ return phi;
+}
+
+
+// Insert a newly created value check as a replacement for map checks.
+HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state,
+ HCheckMaps* mapcheck) {
+ Zone* zone = graph()->zone();
+ HValue* value = state->map_value();
+ // TODO(mstarzinger): This will narrow a map check against a set of maps
+ // down to the first element in the set. Revisit and fix this.
+ HCheckValue* check = HCheckValue::New(
+ zone, NULL, value, mapcheck->first_map(), false);
+ check->InsertBefore(mapcheck);
+ return check;
+}
+
+
+// Performs a forward data-flow analysis of all loads and stores on the
+// given captured allocation. This uses a reverse post-order iteration
+// over affected basic blocks. All non-escaping instructions are handled
+// and replaced during the analysis.
+void HEscapeAnalysisPhase::AnalyzeDataFlow(HInstruction* allocate) {
+ HBasicBlock* allocate_block = allocate->block();
+ block_states_.AddBlock(NULL, graph()->blocks()->length(), zone());
+
+ // Iterate all blocks starting with the allocation block, since the
+ // allocation cannot dominate blocks that come before.
+ int start = allocate_block->block_id();
+ for (int i = start; i < graph()->blocks()->length(); i++) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ HCapturedObject* state = StateAt(block);
+
+ // Skip blocks that are not dominated by the captured allocation.
+ if (!allocate_block->Dominates(block) && allocate_block != block) continue;
+ if (FLAG_trace_escape_analysis) {
+ PrintF("Analyzing data-flow in B%d\n", block->block_id());
+ }
+
+ // Go through all instructions of the current block.
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ switch (instr->opcode()) {
+ case HValue::kAllocate: {
+ if (instr != allocate) continue;
+ state = NewStateForAllocation(allocate);
+ break;
+ }
+ case HValue::kLoadNamedField: {
+ HLoadNamedField* load = HLoadNamedField::cast(instr);
+ int index = load->access().offset() / kPointerSize;
+ if (load->object() != allocate) continue;
+ ASSERT(load->access().IsInobject());
+ HValue* replacement = state->OperandAt(index);
+ load->DeleteAndReplaceWith(replacement);
+ if (FLAG_trace_escape_analysis) {
+ PrintF("Replacing load #%d with #%d (%s)\n", instr->id(),
+ replacement->id(), replacement->Mnemonic());
+ }
+ break;
+ }
+ case HValue::kStoreNamedField: {
+ HStoreNamedField* store = HStoreNamedField::cast(instr);
+ int index = store->access().offset() / kPointerSize;
+ if (store->object() != allocate) continue;
+ ASSERT(store->access().IsInobject());
+ state = NewStateCopy(store->previous(), state);
+ state->SetOperandAt(index, store->value());
+ if (store->has_transition()) {
+ state->SetOperandAt(0, store->transition());
+ }
+ if (store->HasObservableSideEffects()) {
+ state->ReuseSideEffectsFromStore(store);
+ }
+ store->DeleteAndReplaceWith(store->ActualValue());
+ if (FLAG_trace_escape_analysis) {
+ PrintF("Replacing store #%d%s\n", instr->id(),
+ store->has_transition() ? " (with transition)" : "");
+ }
+ break;
+ }
+ case HValue::kArgumentsObject:
+ case HValue::kCapturedObject:
+ case HValue::kSimulate: {
+ for (int i = 0; i < instr->OperandCount(); i++) {
+ if (instr->OperandAt(i) != allocate) continue;
+ instr->SetOperandAt(i, state);
+ }
+ break;
+ }
+ case HValue::kCheckHeapObject: {
+ HCheckHeapObject* check = HCheckHeapObject::cast(instr);
+ if (check->value() != allocate) continue;
+ check->DeleteAndReplaceWith(check->ActualValue());
+ break;
+ }
+ case HValue::kCheckMaps: {
+ HCheckMaps* mapcheck = HCheckMaps::cast(instr);
+ if (mapcheck->value() != allocate) continue;
+ NewMapCheckAndInsert(state, mapcheck);
+ mapcheck->DeleteAndReplaceWith(mapcheck->ActualValue());
+ break;
+ }
+ default:
+ // Nothing to see here, move along ...
+ break;
+ }
+ }
+
+ // Propagate the block state forward to all successor blocks.
+ for (int i = 0; i < block->end()->SuccessorCount(); i++) {
+ HBasicBlock* succ = block->end()->SuccessorAt(i);
+ if (!allocate_block->Dominates(succ)) continue;
+ if (succ->predecessors()->length() == 1) {
+ // Case 1: This is the only predecessor, just reuse state.
+ SetStateAt(succ, state);
+ } else if (StateAt(succ) == NULL && succ->IsLoopHeader()) {
+ // Case 2: This is a state that enters a loop header, be
+ // pessimistic about loop headers, add phis for all values.
+ SetStateAt(succ, NewStateForLoopHeader(succ->first(), state));
+ } else if (StateAt(succ) == NULL) {
+ // Case 3: This is the first state propagated forward to the
+ // successor, leave a copy of the current state.
+ SetStateAt(succ, NewStateCopy(succ->first(), state));
+ } else {
+ // Case 4: This is a state that needs merging with previously
+ // propagated states, potentially introducing new phis lazily or
+ // adding values to existing phis.
+ HCapturedObject* succ_state = StateAt(succ);
+ for (int index = 0; index < number_of_values_; index++) {
+ HValue* operand = state->OperandAt(index);
+ HValue* succ_operand = succ_state->OperandAt(index);
+ if (succ_operand->IsPhi() && succ_operand->block() == succ) {
+ // Phi already exists, add operand.
+ HPhi* phi = HPhi::cast(succ_operand);
+ phi->SetOperandAt(succ->PredecessorIndexOf(block), operand);
+ } else if (succ_operand != operand) {
+ // Phi does not exist, introduce one.
+ HPhi* phi = NewPhiAndInsert(succ, succ_operand, index);
+ phi->SetOperandAt(succ->PredecessorIndexOf(block), operand);
+ succ_state->SetOperandAt(index, phi);
+ }
+ }
+ }
+ }
+ }
+
+ // All uses have been handled.
+ ASSERT(allocate->HasNoUses());
+ allocate->DeleteAndReplaceWith(NULL);
+}
+
+
+void HEscapeAnalysisPhase::PerformScalarReplacement() {
+ for (int i = 0; i < captured_.length(); i++) {
+ HAllocate* allocate = HAllocate::cast(captured_.at(i));
+
+ // Compute number of scalar values and start with clean slate.
+ int size_in_bytes = allocate->size()->GetInteger32Constant();
+ number_of_values_ = size_in_bytes / kPointerSize;
+ number_of_objects_++;
+ block_states_.Clear();
+
+ // Perform actual analysis step.
+ AnalyzeDataFlow(allocate);
+
+ cumulative_values_ += number_of_values_;
+ ASSERT(allocate->HasNoUses());
+ ASSERT(!allocate->IsLinked());
+ }
+}
+
+
+void HEscapeAnalysisPhase::Run() {
+ // TODO(mstarzinger): We disable escape analysis with OSR for now, because
+ // spill slots might be uninitialized. Needs investigation.
+ if (graph()->has_osr()) return;
+ int max_fixpoint_iteration_count = FLAG_escape_analysis_iterations;
+ for (int i = 0; i < max_fixpoint_iteration_count; i++) {
+ CollectCapturedValues();
+ if (captured_.is_empty()) break;
+ PerformScalarReplacement();
+ captured_.Clear();
+ }
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-escape-analysis.h b/deps/v8/src/hydrogen-escape-analysis.h
new file mode 100644
index 000000000..3e27cc1b4
--- /dev/null
+++ b/deps/v8/src/hydrogen-escape-analysis.h
@@ -0,0 +1,91 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_ESCAPE_ANALYSIS_H_
+#define V8_HYDROGEN_ESCAPE_ANALYSIS_H_
+
+#include "allocation.h"
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HEscapeAnalysisPhase : public HPhase {
+ public:
+ explicit HEscapeAnalysisPhase(HGraph* graph)
+ : HPhase("H_Escape analysis", graph),
+ captured_(0, zone()),
+ number_of_objects_(0),
+ number_of_values_(0),
+ cumulative_values_(0),
+ block_states_(graph->blocks()->length(), zone()) { }
+
+ void Run();
+
+ private:
+ void CollectCapturedValues();
+ bool HasNoEscapingUses(HValue* value, int size);
+ void PerformScalarReplacement();
+ void AnalyzeDataFlow(HInstruction* instr);
+
+ HCapturedObject* NewState(HInstruction* prev);
+ HCapturedObject* NewStateForAllocation(HInstruction* prev);
+ HCapturedObject* NewStateForLoopHeader(HInstruction* prev, HCapturedObject*);
+ HCapturedObject* NewStateCopy(HInstruction* prev, HCapturedObject* state);
+
+ HPhi* NewPhiAndInsert(HBasicBlock* block, HValue* incoming_value, int index);
+
+ HValue* NewMapCheckAndInsert(HCapturedObject* state, HCheckMaps* mapcheck);
+
+ HCapturedObject* StateAt(HBasicBlock* block) {
+ return block_states_.at(block->block_id());
+ }
+
+ void SetStateAt(HBasicBlock* block, HCapturedObject* state) {
+ block_states_.Set(block->block_id(), state);
+ }
+
+ // List of allocations captured during collection phase.
+ ZoneList<HInstruction*> captured_;
+
+ // Number of captured objects on which scalar replacement was done.
+ int number_of_objects_;
+
+ // Number of scalar values tracked during scalar replacement phase.
+ int number_of_values_;
+ int cumulative_values_;
+
+ // Map of block IDs to the data-flow state at block entry during the
+ // scalar replacement phase.
+ ZoneList<HCapturedObject*> block_states_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_ESCAPE_ANALYSIS_H_
diff --git a/deps/v8/src/hydrogen-flow-engine.h b/deps/v8/src/hydrogen-flow-engine.h
new file mode 100644
index 000000000..dfe43ec6c
--- /dev/null
+++ b/deps/v8/src/hydrogen-flow-engine.h
@@ -0,0 +1,235 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_FLOW_ENGINE_H_
+#define V8_HYDROGEN_FLOW_ENGINE_H_
+
+#include "hydrogen.h"
+#include "hydrogen-instructions.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// An example implementation of effects that doesn't collect anything.
+class NoEffects : public ZoneObject {
+ public:
+ explicit NoEffects(Zone* zone) { }
+
+ inline bool Disabled() {
+ return true; // Nothing to do.
+ }
+ template <class State>
+ inline void Apply(State* state) {
+ // do nothing.
+ }
+ inline void Process(HInstruction* value, Zone* zone) {
+ // do nothing.
+ }
+ inline void Union(NoEffects* other, Zone* zone) {
+ // do nothing.
+ }
+};
+
+
+// An example implementation of state that doesn't track anything.
+class NoState {
+ public:
+ inline NoState* Copy(HBasicBlock* succ, Zone* zone) {
+ return this;
+ }
+ inline NoState* Process(HInstruction* value, Zone* zone) {
+ return this;
+ }
+ inline NoState* Merge(HBasicBlock* succ, NoState* other, Zone* zone) {
+ return this;
+ }
+};
+
+
+// This class implements an engine that can drive flow-sensitive analyses
+// over a graph of basic blocks, either one block at a time (local analysis)
+// or over the entire graph (global analysis). The flow engine is parameterized
+// by the type of the state and the effects collected while walking over the
+// graph.
+//
+// The "State" collects which facts are known while passing over instructions
+// in control flow order, and the "Effects" collect summary information about
+// which facts could be invalidated on other control flow paths. The effects
+// are necessary to correctly handle loops in the control flow graph without
+// doing a fixed-point iteration. Thus the flow engine is guaranteed to visit
+// each block at most twice; once for state, and optionally once for effects.
+//
+// The flow engine requires the State and Effects classes to implement methods
+// like the example NoState and NoEffects above. It's not necessary to provide
+// an effects implementation for local analysis.
+template <class State, class Effects>
+class HFlowEngine {
+ public:
+ HFlowEngine(HGraph* graph, Zone* zone)
+ : graph_(graph),
+ zone_(zone),
+#if DEBUG
+ pred_counts_(graph->blocks()->length(), zone),
+#endif
+ block_states_(graph->blocks()->length(), zone),
+ loop_effects_(graph->blocks()->length(), zone) {
+ loop_effects_.AddBlock(NULL, graph_->blocks()->length(), zone);
+ }
+
+ // Local analysis. Iterates over the instructions in the given block.
+ State* AnalyzeOneBlock(HBasicBlock* block, State* state) {
+ // Go through all instructions of the current block, updating the state.
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ state = state->Process(it.Current(), zone_);
+ }
+ return state;
+ }
+
+ // Global analysis. Iterates over all blocks that are dominated by the given
+ // block, starting with the initial state. Computes effects for nested loops.
+ void AnalyzeDominatedBlocks(HBasicBlock* root, State* initial) {
+ InitializeStates();
+ SetStateAt(root, initial);
+
+ // Iterate all dominated blocks starting from the given start block.
+ for (int i = root->block_id(); i < graph_->blocks()->length(); i++) {
+ HBasicBlock* block = graph_->blocks()->at(i);
+
+ // Skip blocks not dominated by the root node.
+ if (SkipNonDominatedBlock(root, block)) continue;
+ State* state = StateAt(block);
+
+ if (block->IsLoopHeader()) {
+ // Apply loop effects before analyzing loop body.
+ ComputeLoopEffects(block)->Apply(state);
+ } else {
+ // Must have visited all predecessors before this block.
+ CheckPredecessorCount(block);
+ }
+
+ // Go through all instructions of the current block, updating the state.
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ state = state->Process(it.Current(), zone_);
+ }
+
+ // Propagate the block state forward to all successor blocks.
+ for (int i = 0; i < block->end()->SuccessorCount(); i++) {
+ HBasicBlock* succ = block->end()->SuccessorAt(i);
+ IncrementPredecessorCount(succ);
+ if (StateAt(succ) == NULL) {
+ // This is the first state to reach the successor.
+ SetStateAt(succ, state->Copy(succ, zone_));
+ } else {
+ // Merge the current state with the state already at the successor.
+ SetStateAt(succ, state->Merge(succ, StateAt(succ), zone_));
+ }
+ }
+ }
+ }
+
+ private:
+ // Computes and caches the loop effects for the loop which has the given
+ // block as its loop header.
+ Effects* ComputeLoopEffects(HBasicBlock* block) {
+ ASSERT(block->IsLoopHeader());
+ Effects* effects = loop_effects_[block->block_id()];
+ if (effects != NULL) return effects; // Already analyzed this loop.
+
+ effects = new(zone_) Effects(zone_);
+ loop_effects_[block->block_id()] = effects;
+ if (effects->Disabled()) return effects; // No effects for this analysis.
+
+ HLoopInformation* loop = block->loop_information();
+ int end = loop->GetLastBackEdge()->block_id();
+ // Process the blocks between the header and the end.
+ for (int i = block->block_id(); i <= end; i++) {
+ HBasicBlock* member = graph_->blocks()->at(i);
+ if (i != block->block_id() && member->IsLoopHeader()) {
+ // Recursively compute and cache the effects of the nested loop.
+ ASSERT(member->loop_information()->parent_loop() == loop);
+ Effects* nested = ComputeLoopEffects(member);
+ effects->Union(nested, zone_);
+ // Skip the nested loop's blocks.
+ i = member->loop_information()->GetLastBackEdge()->block_id();
+ } else {
+ // Process all the effects of the block.
+ ASSERT(member->current_loop() == loop);
+ for (HInstructionIterator it(member); !it.Done(); it.Advance()) {
+ effects->Process(it.Current(), zone_);
+ }
+ }
+ }
+ return effects;
+ }
+
+ inline bool SkipNonDominatedBlock(HBasicBlock* root, HBasicBlock* other) {
+ if (root->block_id() == 0) return false; // Visit the whole graph.
+ if (root == other) return false; // Always visit the root.
+ return !root->Dominates(other); // Only visit dominated blocks.
+ }
+
+ inline State* StateAt(HBasicBlock* block) {
+ return block_states_.at(block->block_id());
+ }
+
+ inline void SetStateAt(HBasicBlock* block, State* state) {
+ block_states_.Set(block->block_id(), state);
+ }
+
+ inline void InitializeStates() {
+#if DEBUG
+ pred_counts_.Rewind(0);
+ pred_counts_.AddBlock(0, graph_->blocks()->length(), zone_);
+#endif
+ block_states_.Rewind(0);
+ block_states_.AddBlock(NULL, graph_->blocks()->length(), zone_);
+ }
+
+ inline void CheckPredecessorCount(HBasicBlock* block) {
+ ASSERT(block->predecessors()->length() == pred_counts_[block->block_id()]);
+ }
+
+ inline void IncrementPredecessorCount(HBasicBlock* block) {
+#if DEBUG
+ pred_counts_[block->block_id()]++;
+#endif
+ }
+
+ HGraph* graph_; // The hydrogen graph.
+ Zone* zone_; // Temporary zone.
+#if DEBUG
+ ZoneList<int> pred_counts_; // Finished predecessors (by block id).
+#endif
+ ZoneList<State*> block_states_; // Block states (by block id).
+ ZoneList<Effects*> loop_effects_; // Loop effects (by block id).
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_FLOW_ENGINE_H_
diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/hydrogen-gvn.cc
new file mode 100644
index 000000000..e3bf316f3
--- /dev/null
+++ b/deps/v8/src/hydrogen-gvn.cc
@@ -0,0 +1,853 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen.h"
+#include "hydrogen-gvn.h"
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+class HValueMap: public ZoneObject {
+ public:
+ explicit HValueMap(Zone* zone)
+ : array_size_(0),
+ lists_size_(0),
+ count_(0),
+ present_flags_(0),
+ array_(NULL),
+ lists_(NULL),
+ free_list_head_(kNil) {
+ ResizeLists(kInitialSize, zone);
+ Resize(kInitialSize, zone);
+ }
+
+ void Kill(GVNFlagSet flags);
+
+ void Add(HValue* value, Zone* zone) {
+ present_flags_.Add(value->gvn_flags());
+ Insert(value, zone);
+ }
+
+ HValue* Lookup(HValue* value) const;
+
+ HValueMap* Copy(Zone* zone) const {
+ return new(zone) HValueMap(zone, this);
+ }
+
+ bool IsEmpty() const { return count_ == 0; }
+
+ private:
+ // A linked list of HValue* values. Stored in arrays.
+ struct HValueMapListElement {
+ HValue* value;
+ int next; // Index in the array of the next list element.
+ };
+ static const int kNil = -1; // The end of a linked list
+
+ // Must be a power of 2.
+ static const int kInitialSize = 16;
+
+ HValueMap(Zone* zone, const HValueMap* other);
+
+ void Resize(int new_size, Zone* zone);
+ void ResizeLists(int new_size, Zone* zone);
+ void Insert(HValue* value, Zone* zone);
+ uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
+
+ int array_size_;
+ int lists_size_;
+ int count_; // The number of values stored in the HValueMap.
+ GVNFlagSet present_flags_; // All flags that are in any value in the
+ // HValueMap.
+ HValueMapListElement* array_; // Primary store - contains the first value
+ // with a given hash. Colliding elements are stored in linked lists.
+ HValueMapListElement* lists_; // The linked lists containing hash collisions.
+ int free_list_head_; // Unused elements in lists_ are on the free list.
+};
+
+
+class HSideEffectMap BASE_EMBEDDED {
+ public:
+ HSideEffectMap();
+ explicit HSideEffectMap(HSideEffectMap* other);
+ HSideEffectMap& operator= (const HSideEffectMap& other);
+
+ void Kill(GVNFlagSet flags);
+
+ void Store(GVNFlagSet flags, HInstruction* instr);
+
+ bool IsEmpty() const { return count_ == 0; }
+
+ inline HInstruction* operator[](int i) const {
+ ASSERT(0 <= i);
+ ASSERT(i < kNumberOfTrackedSideEffects);
+ return data_[i];
+ }
+ inline HInstruction* at(int i) const { return operator[](i); }
+
+ private:
+ int count_;
+ HInstruction* data_[kNumberOfTrackedSideEffects];
+};
+
+
+void TraceGVN(const char* msg, ...) {
+ va_list arguments;
+ va_start(arguments, msg);
+ OS::VPrint(msg, arguments);
+ va_end(arguments);
+}
+
+
+// Wrap TraceGVN in macros to avoid the expense of evaluating its arguments when
+// --trace-gvn is off.
+#define TRACE_GVN_1(msg, a1) \
+ if (FLAG_trace_gvn) { \
+ TraceGVN(msg, a1); \
+ }
+
+#define TRACE_GVN_2(msg, a1, a2) \
+ if (FLAG_trace_gvn) { \
+ TraceGVN(msg, a1, a2); \
+ }
+
+#define TRACE_GVN_3(msg, a1, a2, a3) \
+ if (FLAG_trace_gvn) { \
+ TraceGVN(msg, a1, a2, a3); \
+ }
+
+#define TRACE_GVN_4(msg, a1, a2, a3, a4) \
+ if (FLAG_trace_gvn) { \
+ TraceGVN(msg, a1, a2, a3, a4); \
+ }
+
+#define TRACE_GVN_5(msg, a1, a2, a3, a4, a5) \
+ if (FLAG_trace_gvn) { \
+ TraceGVN(msg, a1, a2, a3, a4, a5); \
+ }
+
+
+HValueMap::HValueMap(Zone* zone, const HValueMap* other)
+ : array_size_(other->array_size_),
+ lists_size_(other->lists_size_),
+ count_(other->count_),
+ present_flags_(other->present_flags_),
+ array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
+ lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
+ free_list_head_(other->free_list_head_) {
+ OS::MemCopy(
+ array_, other->array_, array_size_ * sizeof(HValueMapListElement));
+ OS::MemCopy(
+ lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
+}
+
+
+void HValueMap::Kill(GVNFlagSet flags) {
+ GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(flags);
+ if (!present_flags_.ContainsAnyOf(depends_flags)) return;
+ present_flags_.RemoveAll();
+ for (int i = 0; i < array_size_; ++i) {
+ HValue* value = array_[i].value;
+ if (value != NULL) {
+ // Clear list of collisions first, so we know if it becomes empty.
+ int kept = kNil; // List of kept elements.
+ int next;
+ for (int current = array_[i].next; current != kNil; current = next) {
+ next = lists_[current].next;
+ HValue* value = lists_[current].value;
+ if (value->gvn_flags().ContainsAnyOf(depends_flags)) {
+ // Drop it.
+ count_--;
+ lists_[current].next = free_list_head_;
+ free_list_head_ = current;
+ } else {
+ // Keep it.
+ lists_[current].next = kept;
+ kept = current;
+ present_flags_.Add(value->gvn_flags());
+ }
+ }
+ array_[i].next = kept;
+
+ // Now possibly drop directly indexed element.
+ value = array_[i].value;
+ if (value->gvn_flags().ContainsAnyOf(depends_flags)) { // Drop it.
+ count_--;
+ int head = array_[i].next;
+ if (head == kNil) {
+ array_[i].value = NULL;
+ } else {
+ array_[i].value = lists_[head].value;
+ array_[i].next = lists_[head].next;
+ lists_[head].next = free_list_head_;
+ free_list_head_ = head;
+ }
+ } else {
+ present_flags_.Add(value->gvn_flags()); // Keep it.
+ }
+ }
+ }
+}
+
+
+HValue* HValueMap::Lookup(HValue* value) const {
+ uint32_t hash = static_cast<uint32_t>(value->Hashcode());
+ uint32_t pos = Bound(hash);
+ if (array_[pos].value != NULL) {
+ if (array_[pos].value->Equals(value)) return array_[pos].value;
+ int next = array_[pos].next;
+ while (next != kNil) {
+ if (lists_[next].value->Equals(value)) return lists_[next].value;
+ next = lists_[next].next;
+ }
+ }
+ return NULL;
+}
+
+
+void HValueMap::Resize(int new_size, Zone* zone) {
+ ASSERT(new_size > count_);
+ // Hashing the values into the new array has no more collisions than in the
+ // old hash map, so we can use the existing lists_ array, if we are careful.
+
+ // Make sure we have at least one free element.
+ if (free_list_head_ == kNil) {
+ ResizeLists(lists_size_ << 1, zone);
+ }
+
+ HValueMapListElement* new_array =
+ zone->NewArray<HValueMapListElement>(new_size);
+ memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
+
+ HValueMapListElement* old_array = array_;
+ int old_size = array_size_;
+
+ int old_count = count_;
+ count_ = 0;
+ // Do not modify present_flags_. It is currently correct.
+ array_size_ = new_size;
+ array_ = new_array;
+
+ if (old_array != NULL) {
+ // Iterate over all the elements in lists, rehashing them.
+ for (int i = 0; i < old_size; ++i) {
+ if (old_array[i].value != NULL) {
+ int current = old_array[i].next;
+ while (current != kNil) {
+ Insert(lists_[current].value, zone);
+ int next = lists_[current].next;
+ lists_[current].next = free_list_head_;
+ free_list_head_ = current;
+ current = next;
+ }
+ // Rehash the directly stored value.
+ Insert(old_array[i].value, zone);
+ }
+ }
+ }
+ USE(old_count);
+ ASSERT(count_ == old_count);
+}
+
+
+void HValueMap::ResizeLists(int new_size, Zone* zone) {
+ ASSERT(new_size > lists_size_);
+
+ HValueMapListElement* new_lists =
+ zone->NewArray<HValueMapListElement>(new_size);
+ memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
+
+ HValueMapListElement* old_lists = lists_;
+ int old_size = lists_size_;
+
+ lists_size_ = new_size;
+ lists_ = new_lists;
+
+ if (old_lists != NULL) {
+ OS::MemCopy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
+ }
+ for (int i = old_size; i < lists_size_; ++i) {
+ lists_[i].next = free_list_head_;
+ free_list_head_ = i;
+ }
+}
+
+
+void HValueMap::Insert(HValue* value, Zone* zone) {
+ ASSERT(value != NULL);
+ // Resizing when half of the hashtable is filled up.
+ if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
+ ASSERT(count_ < array_size_);
+ count_++;
+ uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
+ if (array_[pos].value == NULL) {
+ array_[pos].value = value;
+ array_[pos].next = kNil;
+ } else {
+ if (free_list_head_ == kNil) {
+ ResizeLists(lists_size_ << 1, zone);
+ }
+ int new_element_pos = free_list_head_;
+ ASSERT(new_element_pos != kNil);
+ free_list_head_ = lists_[free_list_head_].next;
+ lists_[new_element_pos].value = value;
+ lists_[new_element_pos].next = array_[pos].next;
+ ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].value != NULL);
+ array_[pos].next = new_element_pos;
+ }
+}
+
+
+HSideEffectMap::HSideEffectMap() : count_(0) {
+ memset(data_, 0, kNumberOfTrackedSideEffects * kPointerSize);
+}
+
+
+HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) {
+ *this = *other; // Calls operator=.
+}
+
+
+HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
+ if (this != &other) {
+ OS::MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
+ }
+ return *this;
+}
+
+
+void HSideEffectMap::Kill(GVNFlagSet flags) {
+ for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+ GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+ if (flags.Contains(changes_flag)) {
+ if (data_[i] != NULL) count_--;
+ data_[i] = NULL;
+ }
+ }
+}
+
+
+void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
+ for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+ GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+ if (flags.Contains(changes_flag)) {
+ if (data_[i] == NULL) count_++;
+ data_[i] = instr;
+ }
+ }
+}
+
+
+HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
+ : HPhase("H_Global value numbering", graph),
+ removed_side_effects_(false),
+ block_side_effects_(graph->blocks()->length(), zone()),
+ loop_side_effects_(graph->blocks()->length(), zone()),
+ visited_on_paths_(graph->blocks()->length(), zone()) {
+ ASSERT(!AllowHandleAllocation::IsAllowed());
+ block_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
+ zone());
+ loop_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
+ zone());
+ }
+
+void HGlobalValueNumberingPhase::Analyze() {
+ removed_side_effects_ = false;
+ ComputeBlockSideEffects();
+ if (FLAG_loop_invariant_code_motion) {
+ LoopInvariantCodeMotion();
+ }
+ AnalyzeGraph();
+}
+
+
+void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
+ // The Analyze phase of GVN can be called multiple times. Clear loop side
+ // effects before computing them to erase the contents from previous Analyze
+ // passes.
+ for (int i = 0; i < loop_side_effects_.length(); ++i) {
+ loop_side_effects_[i].RemoveAll();
+ }
+ for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
+ // Compute side effects for the block.
+ HBasicBlock* block = graph()->blocks()->at(i);
+ GVNFlagSet side_effects;
+ if (block->IsReachable() && !block->IsDeoptimizing()) {
+ int id = block->block_id();
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ side_effects.Add(instr->ChangesFlags());
+ }
+ block_side_effects_[id].Add(side_effects);
+
+ // Loop headers are part of their loop.
+ if (block->IsLoopHeader()) {
+ loop_side_effects_[id].Add(side_effects);
+ }
+
+ // Propagate loop side effects upwards.
+ if (block->HasParentLoopHeader()) {
+ int header_id = block->parent_loop_header()->block_id();
+ loop_side_effects_[header_id].Add(block->IsLoopHeader()
+ ? loop_side_effects_[id]
+ : side_effects);
+ }
+ }
+ }
+}
+
+
+SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
+ char underlying_buffer[kLastFlag * 128];
+ Vector<char> buffer(underlying_buffer, sizeof(underlying_buffer));
+#if DEBUG
+ int offset = 0;
+ const char* separator = "";
+ const char* comma = ", ";
+ buffer[0] = 0;
+ uint32_t set_depends_on = 0;
+ uint32_t set_changes = 0;
+ for (int bit = 0; bit < kLastFlag; ++bit) {
+ if ((flags.ToIntegral() & (1 << bit)) != 0) {
+ if (bit % 2 == 0) {
+ set_changes++;
+ } else {
+ set_depends_on++;
+ }
+ }
+ }
+ bool positive_changes = set_changes < (kLastFlag / 2);
+ bool positive_depends_on = set_depends_on < (kLastFlag / 2);
+ if (set_changes > 0) {
+ if (positive_changes) {
+ offset += OS::SNPrintF(buffer + offset, "changes [");
+ } else {
+ offset += OS::SNPrintF(buffer + offset, "changes all except [");
+ }
+ for (int bit = 0; bit < kLastFlag; ++bit) {
+ if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_changes) {
+ switch (static_cast<GVNFlag>(bit)) {
+#define DECLARE_FLAG(type) \
+ case kChanges##type: \
+ offset += OS::SNPrintF(buffer + offset, separator); \
+ offset += OS::SNPrintF(buffer + offset, #type); \
+ separator = comma; \
+ break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+ default:
+ break;
+ }
+ }
+ }
+ offset += OS::SNPrintF(buffer + offset, "]");
+ }
+ if (set_depends_on > 0) {
+ separator = "";
+ if (set_changes > 0) {
+ offset += OS::SNPrintF(buffer + offset, ", ");
+ }
+ if (positive_depends_on) {
+ offset += OS::SNPrintF(buffer + offset, "depends on [");
+ } else {
+ offset += OS::SNPrintF(buffer + offset, "depends on all except [");
+ }
+ for (int bit = 0; bit < kLastFlag; ++bit) {
+ if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_depends_on) {
+ switch (static_cast<GVNFlag>(bit)) {
+#define DECLARE_FLAG(type) \
+ case kDependsOn##type: \
+ offset += OS::SNPrintF(buffer + offset, separator); \
+ offset += OS::SNPrintF(buffer + offset, #type); \
+ separator = comma; \
+ break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+ default:
+ break;
+ }
+ }
+ }
+ offset += OS::SNPrintF(buffer + offset, "]");
+ }
+#else
+ OS::SNPrintF(buffer, "0x%08X", flags.ToIntegral());
+#endif
+ size_t string_len = strlen(underlying_buffer) + 1;
+ ASSERT(string_len <= sizeof(underlying_buffer));
+ char* result = new char[strlen(underlying_buffer) + 1];
+ OS::MemCopy(result, underlying_buffer, string_len);
+ return SmartArrayPointer<char>(result);
+}
+
+
+void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
+ TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
+ graph()->use_optimistic_licm() ? "yes" : "no");
+ for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ if (block->IsLoopHeader()) {
+ GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
+ TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
+ block->block_id(),
+ *GetGVNFlagsString(side_effects));
+
+ GVNFlagSet accumulated_first_time_depends;
+ GVNFlagSet accumulated_first_time_changes;
+ HBasicBlock* last = block->loop_information()->GetLastBackEdge();
+ for (int j = block->block_id(); j <= last->block_id(); ++j) {
+ ProcessLoopBlock(graph()->blocks()->at(j), block, side_effects,
+ &accumulated_first_time_depends,
+ &accumulated_first_time_changes);
+ }
+ }
+ }
+}
+
+
+void HGlobalValueNumberingPhase::ProcessLoopBlock(
+ HBasicBlock* block,
+ HBasicBlock* loop_header,
+ GVNFlagSet loop_kills,
+ GVNFlagSet* first_time_depends,
+ GVNFlagSet* first_time_changes) {
+ HBasicBlock* pre_header = loop_header->predecessors()->at(0);
+ GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
+ TRACE_GVN_2("Loop invariant motion for B%d %s\n",
+ block->block_id(),
+ *GetGVNFlagsString(depends_flags));
+ HInstruction* instr = block->first();
+ while (instr != NULL) {
+ HInstruction* next = instr->next();
+ bool hoisted = false;
+ if (instr->CheckFlag(HValue::kUseGVN)) {
+ TRACE_GVN_4("Checking instruction %d (%s) %s. Loop %s\n",
+ instr->id(),
+ instr->Mnemonic(),
+ *GetGVNFlagsString(instr->gvn_flags()),
+ *GetGVNFlagsString(loop_kills));
+ bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
+ if (can_hoist && !graph()->use_optimistic_licm()) {
+ can_hoist = block->IsLoopSuccessorDominator();
+ }
+
+ if (can_hoist) {
+ bool inputs_loop_invariant = true;
+ for (int i = 0; i < instr->OperandCount(); ++i) {
+ if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
+ inputs_loop_invariant = false;
+ }
+ }
+
+ if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
+ TRACE_GVN_1("Hoisting loop invariant instruction %d\n", instr->id());
+ // Move the instruction out of the loop.
+ instr->Unlink();
+ instr->InsertBefore(pre_header->end());
+ if (instr->HasSideEffects()) removed_side_effects_ = true;
+ hoisted = true;
+ }
+ }
+ }
+ if (!hoisted) {
+ // If an instruction is not hoisted, we have to account for its side
+ // effects when hoisting later HTransitionElementsKind instructions.
+ GVNFlagSet previous_depends = *first_time_depends;
+ GVNFlagSet previous_changes = *first_time_changes;
+ first_time_depends->Add(instr->DependsOnFlags());
+ first_time_changes->Add(instr->ChangesFlags());
+ if (!(previous_depends == *first_time_depends)) {
+ TRACE_GVN_1("Updated first-time accumulated %s\n",
+ *GetGVNFlagsString(*first_time_depends));
+ }
+ if (!(previous_changes == *first_time_changes)) {
+ TRACE_GVN_1("Updated first-time accumulated %s\n",
+ *GetGVNFlagsString(*first_time_changes));
+ }
+ }
+ instr = next;
+ }
+}
+
+
+bool HGlobalValueNumberingPhase::AllowCodeMotion() {
+ return info()->IsStub() || info()->opt_count() + 1 < FLAG_max_opt_count;
+}
+
+
+bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
+ HBasicBlock* loop_header) {
+ // If we've disabled code motion or we're in a block that unconditionally
+ // deoptimizes, don't move any instructions.
+ return AllowCodeMotion() && !instr->block()->IsDeoptimizing() &&
+ instr->block()->IsReachable();
+}
+
+
+GVNFlagSet
+HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock(
+ HBasicBlock* dominator, HBasicBlock* dominated) {
+ GVNFlagSet side_effects;
+ for (int i = 0; i < dominated->predecessors()->length(); ++i) {
+ HBasicBlock* block = dominated->predecessors()->at(i);
+ if (dominator->block_id() < block->block_id() &&
+ block->block_id() < dominated->block_id() &&
+ !visited_on_paths_.Contains(block->block_id())) {
+ visited_on_paths_.Add(block->block_id());
+ side_effects.Add(block_side_effects_[block->block_id()]);
+ if (block->IsLoopHeader()) {
+ side_effects.Add(loop_side_effects_[block->block_id()]);
+ }
+ side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock(
+ dominator, block));
+ }
+ }
+ return side_effects;
+}
+
+
+// Each instance of this class is like a "stack frame" for the recursive
+// traversal of the dominator tree done during GVN (the stack is handled
+// as a double linked list).
+// We reuse frames when possible so the list length is limited by the depth
+// of the dominator tree but this forces us to initialize each frame calling
+// an explicit "Initialize" method instead of a using constructor.
+class GvnBasicBlockState: public ZoneObject {
+ public:
+ static GvnBasicBlockState* CreateEntry(Zone* zone,
+ HBasicBlock* entry_block,
+ HValueMap* entry_map) {
+ return new(zone)
+ GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
+ }
+
+ HBasicBlock* block() { return block_; }
+ HValueMap* map() { return map_; }
+ HSideEffectMap* dominators() { return &dominators_; }
+
+ GvnBasicBlockState* next_in_dominator_tree_traversal(
+ Zone* zone,
+ HBasicBlock** dominator) {
+ // This assignment needs to happen before calling next_dominated() because
+ // that call can reuse "this" if we are at the last dominated block.
+ *dominator = block();
+ GvnBasicBlockState* result = next_dominated(zone);
+ if (result == NULL) {
+ GvnBasicBlockState* dominator_state = pop();
+ if (dominator_state != NULL) {
+ // This branch is guaranteed not to return NULL because pop() never
+ // returns a state where "is_done() == true".
+ *dominator = dominator_state->block();
+ result = dominator_state->next_dominated(zone);
+ } else {
+ // Unnecessary (we are returning NULL) but done for cleanness.
+ *dominator = NULL;
+ }
+ }
+ return result;
+ }
+
+ private:
+ void Initialize(HBasicBlock* block,
+ HValueMap* map,
+ HSideEffectMap* dominators,
+ bool copy_map,
+ Zone* zone) {
+ block_ = block;
+ map_ = copy_map ? map->Copy(zone) : map;
+ dominated_index_ = -1;
+ length_ = block->dominated_blocks()->length();
+ if (dominators != NULL) {
+ dominators_ = *dominators;
+ }
+ }
+ bool is_done() { return dominated_index_ >= length_; }
+
+ GvnBasicBlockState(GvnBasicBlockState* previous,
+ HBasicBlock* block,
+ HValueMap* map,
+ HSideEffectMap* dominators,
+ Zone* zone)
+ : previous_(previous), next_(NULL) {
+ Initialize(block, map, dominators, true, zone);
+ }
+
+ GvnBasicBlockState* next_dominated(Zone* zone) {
+ dominated_index_++;
+ if (dominated_index_ == length_ - 1) {
+ // No need to copy the map for the last child in the dominator tree.
+ Initialize(block_->dominated_blocks()->at(dominated_index_),
+ map(),
+ dominators(),
+ false,
+ zone);
+ return this;
+ } else if (dominated_index_ < length_) {
+ return push(zone, block_->dominated_blocks()->at(dominated_index_));
+ } else {
+ return NULL;
+ }
+ }
+
+ GvnBasicBlockState* push(Zone* zone, HBasicBlock* block) {
+ if (next_ == NULL) {
+ next_ =
+ new(zone) GvnBasicBlockState(this, block, map(), dominators(), zone);
+ } else {
+ next_->Initialize(block, map(), dominators(), true, zone);
+ }
+ return next_;
+ }
+ GvnBasicBlockState* pop() {
+ GvnBasicBlockState* result = previous_;
+ while (result != NULL && result->is_done()) {
+ TRACE_GVN_2("Backtracking from block B%d to block b%d\n",
+ block()->block_id(),
+ previous_->block()->block_id())
+ result = result->previous_;
+ }
+ return result;
+ }
+
+ GvnBasicBlockState* previous_;
+ GvnBasicBlockState* next_;
+ HBasicBlock* block_;
+ HValueMap* map_;
+ HSideEffectMap dominators_;
+ int dominated_index_;
+ int length_;
+};
+
+
+// This is a recursive traversal of the dominator tree but it has been turned
+// into a loop to avoid stack overflows.
+// The logical "stack frames" of the recursion are kept in a list of
+// GvnBasicBlockState instances.
+void HGlobalValueNumberingPhase::AnalyzeGraph() {
+ HBasicBlock* entry_block = graph()->entry_block();
+ HValueMap* entry_map = new(zone()) HValueMap(zone());
+ GvnBasicBlockState* current =
+ GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
+
+ while (current != NULL) {
+ HBasicBlock* block = current->block();
+ HValueMap* map = current->map();
+ HSideEffectMap* dominators = current->dominators();
+
+ TRACE_GVN_2("Analyzing block B%d%s\n",
+ block->block_id(),
+ block->IsLoopHeader() ? " (loop header)" : "");
+
+ // If this is a loop header kill everything killed by the loop.
+ if (block->IsLoopHeader()) {
+ map->Kill(loop_side_effects_[block->block_id()]);
+ dominators->Kill(loop_side_effects_[block->block_id()]);
+ }
+
+ // Go through all instructions of the current block.
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
+ for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+ HValue* other = dominators->at(i);
+ GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+ GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
+ if (instr->DependsOnFlags().Contains(depends_on_flag) &&
+ (other != NULL)) {
+ TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
+ i,
+ instr->id(),
+ instr->Mnemonic(),
+ other->id(),
+ other->Mnemonic());
+ instr->HandleSideEffectDominator(changes_flag, other);
+ }
+ }
+ }
+ // Instruction was unlinked during graph traversal.
+ if (!instr->IsLinked()) continue;
+
+ GVNFlagSet flags = instr->ChangesFlags();
+ if (!flags.IsEmpty()) {
+ // Clear all instructions in the map that are affected by side effects.
+ // Store instruction as the dominating one for tracked side effects.
+ map->Kill(flags);
+ dominators->Store(flags, instr);
+ TRACE_GVN_2("Instruction %d %s\n", instr->id(),
+ *GetGVNFlagsString(flags));
+ }
+ if (instr->CheckFlag(HValue::kUseGVN)) {
+ ASSERT(!instr->HasObservableSideEffects());
+ HValue* other = map->Lookup(instr);
+ if (other != NULL) {
+ ASSERT(instr->Equals(other) && other->Equals(instr));
+ TRACE_GVN_4("Replacing value %d (%s) with value %d (%s)\n",
+ instr->id(),
+ instr->Mnemonic(),
+ other->id(),
+ other->Mnemonic());
+ if (instr->HasSideEffects()) removed_side_effects_ = true;
+ instr->DeleteAndReplaceWith(other);
+ } else {
+ map->Add(instr, zone());
+ }
+ }
+ }
+
+ HBasicBlock* dominator_block;
+ GvnBasicBlockState* next =
+ current->next_in_dominator_tree_traversal(zone(),
+ &dominator_block);
+
+ if (next != NULL) {
+ HBasicBlock* dominated = next->block();
+ HValueMap* successor_map = next->map();
+ HSideEffectMap* successor_dominators = next->dominators();
+
+ // Kill everything killed on any path between this block and the
+ // dominated block. We don't have to traverse these paths if the
+ // value map and the dominators list is already empty. If the range
+ // of block ids (block_id, dominated_id) is empty there are no such
+ // paths.
+ if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
+ dominator_block->block_id() + 1 < dominated->block_id()) {
+ visited_on_paths_.Clear();
+ GVNFlagSet side_effects_on_all_paths =
+ CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
+ dominated);
+ successor_map->Kill(side_effects_on_all_paths);
+ successor_dominators->Kill(side_effects_on_all_paths);
+ }
+ }
+ current = next;
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-gvn.h b/deps/v8/src/hydrogen-gvn.h
new file mode 100644
index 000000000..fdbad99c6
--- /dev/null
+++ b/deps/v8/src/hydrogen-gvn.h
@@ -0,0 +1,89 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_GVN_H_
+#define V8_HYDROGEN_GVN_H_
+
+#include "hydrogen.h"
+#include "hydrogen-instructions.h"
+#include "compiler.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Perform common subexpression elimination and loop-invariant code motion.
+class HGlobalValueNumberingPhase : public HPhase {
+ public:
+ explicit HGlobalValueNumberingPhase(HGraph* graph);
+
+ void Run() {
+ Analyze();
+ // Trigger a second analysis pass to further eliminate duplicate values
+ // that could only be discovered by removing side-effect-generating
+ // instructions during the first pass.
+ if (FLAG_smi_only_arrays && removed_side_effects_) {
+ Analyze();
+ // TODO(danno): Turn this into a fixpoint iteration.
+ }
+ }
+
+ private:
+ void Analyze();
+ GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
+ HBasicBlock* dominator,
+ HBasicBlock* dominated);
+ void AnalyzeGraph();
+ void ComputeBlockSideEffects();
+ void LoopInvariantCodeMotion();
+ void ProcessLoopBlock(HBasicBlock* block,
+ HBasicBlock* before_loop,
+ GVNFlagSet loop_kills,
+ GVNFlagSet* accumulated_first_time_depends,
+ GVNFlagSet* accumulated_first_time_changes);
+ bool AllowCodeMotion();
+ bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
+
+ bool removed_side_effects_;
+
+ // A map of block IDs to their side effects.
+ ZoneList<GVNFlagSet> block_side_effects_;
+
+ // A map of loop header block IDs to their loop's side effects.
+ ZoneList<GVNFlagSet> loop_side_effects_;
+
+ // Used when collecting side effects on paths from dominator to
+ // dominated.
+ BitVector visited_on_paths_;
+
+ DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_GVN_H_
diff --git a/deps/v8/src/hydrogen-infer-representation.cc b/deps/v8/src/hydrogen-infer-representation.cc
new file mode 100644
index 000000000..f61649a68
--- /dev/null
+++ b/deps/v8/src/hydrogen-infer-representation.cc
@@ -0,0 +1,184 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-infer-representation.h"
+
+namespace v8 {
+namespace internal {
+
+void HInferRepresentationPhase::AddToWorklist(HValue* current) {
+ if (current->representation().IsTagged()) return;
+ if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
+ if (in_worklist_.Contains(current->id())) return;
+ worklist_.Add(current, zone());
+ in_worklist_.Add(current->id());
+}
+
+
+void HInferRepresentationPhase::Run() {
+ // (1) Initialize bit vectors and count real uses. Each phi gets a
+ // bit-vector of length <number of phis>.
+ const ZoneList<HPhi*>* phi_list = graph()->phi_list();
+ int phi_count = phi_list->length();
+ ZoneList<BitVector*> connected_phis(phi_count, zone());
+ for (int i = 0; i < phi_count; ++i) {
+ phi_list->at(i)->InitRealUses(i);
+ BitVector* connected_set = new(zone()) BitVector(phi_count, zone());
+ connected_set->Add(i);
+ connected_phis.Add(connected_set, zone());
+ }
+
+ // (2) Do a fixed point iteration to find the set of connected phis. A
+ // phi is connected to another phi if its value is used either directly or
+ // indirectly through a transitive closure of the def-use relation.
+ bool change = true;
+ while (change) {
+ change = false;
+ // We normally have far more "forward edges" than "backward edges",
+ // so we terminate faster when we walk backwards.
+ for (int i = phi_count - 1; i >= 0; --i) {
+ HPhi* phi = phi_list->at(i);
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ if (use->IsPhi()) {
+ int id = HPhi::cast(use)->phi_id();
+ if (connected_phis[i]->UnionIsChanged(*connected_phis[id]))
+ change = true;
+ }
+ }
+ }
+ }
+
+ // Set truncation flags for groups of connected phis. This is a conservative
+ // approximation; the flag will be properly re-computed after representations
+ // have been determined.
+ if (phi_count > 0) {
+ BitVector done(phi_count, zone());
+ for (int i = 0; i < phi_count; ++i) {
+ if (done.Contains(i)) continue;
+
+ // Check if all uses of all connected phis in this group are truncating.
+ bool all_uses_everywhere_truncating_int32 = true;
+ bool all_uses_everywhere_truncating_smi = true;
+ for (BitVector::Iterator it(connected_phis[i]);
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ all_uses_everywhere_truncating_int32 &=
+ phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32);
+ all_uses_everywhere_truncating_smi &=
+ phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToSmi);
+ done.Add(index);
+ }
+
+ if (!all_uses_everywhere_truncating_int32) {
+ // Clear truncation flag of this group of connected phis.
+ for (BitVector::Iterator it(connected_phis[i]);
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
+ }
+ }
+ if (!all_uses_everywhere_truncating_smi) {
+ // Clear truncation flag of this group of connected phis.
+ for (BitVector::Iterator it(connected_phis[i]);
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToSmi);
+ }
+ }
+ }
+ }
+
+ // Simplify constant phi inputs where possible.
+ // This step uses kTruncatingToInt32 flags of phis.
+ for (int i = 0; i < phi_count; ++i) {
+ phi_list->at(i)->SimplifyConstantInputs();
+ }
+
+ // Use the phi reachability information from step 2 to
+ // sum up the non-phi use counts of all connected phis.
+ for (int i = 0; i < phi_count; ++i) {
+ HPhi* phi = phi_list->at(i);
+ for (BitVector::Iterator it(connected_phis[i]);
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ HPhi* it_use = phi_list->at(index);
+ if (index != i) phi->AddNonPhiUsesFrom(it_use); // Don't count twice.
+ }
+ }
+
+ // Initialize work list
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ const ZoneList<HPhi*>* phis = block->phis();
+ for (int j = 0; j < phis->length(); ++j) {
+ AddToWorklist(phis->at(j));
+ }
+
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
+ AddToWorklist(current);
+ }
+ }
+
+ // Do a fixed point iteration, trying to improve representations
+ while (!worklist_.is_empty()) {
+ HValue* current = worklist_.RemoveLast();
+ current->InferRepresentation(this);
+ in_worklist_.Remove(current->id());
+ }
+
+ // Lastly: any instruction that we don't have representation information
+ // for defaults to Tagged.
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ const ZoneList<HPhi*>* phis = block->phis();
+ for (int j = 0; j < phis->length(); ++j) {
+ HPhi* phi = phis->at(j);
+ if (phi->representation().IsNone()) {
+ phi->ChangeRepresentation(Representation::Tagged());
+ }
+ }
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
+ if (current->representation().IsNone() &&
+ current->CheckFlag(HInstruction::kFlexibleRepresentation)) {
+ if (current->CheckFlag(HInstruction::kCannotBeTagged)) {
+ current->ChangeRepresentation(Representation::Double());
+ } else {
+ current->ChangeRepresentation(Representation::Tagged());
+ }
+ }
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-infer-representation.h b/deps/v8/src/hydrogen-infer-representation.h
new file mode 100644
index 000000000..7c605696c
--- /dev/null
+++ b/deps/v8/src/hydrogen-infer-representation.h
@@ -0,0 +1,57 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_INFER_REPRESENTATION_H_
+#define V8_HYDROGEN_INFER_REPRESENTATION_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HInferRepresentationPhase : public HPhase {
+ public:
+ explicit HInferRepresentationPhase(HGraph* graph)
+ : HPhase("H_Infer representations", graph),
+ worklist_(8, zone()),
+ in_worklist_(graph->GetMaximumValueID(), zone()) { }
+
+ void Run();
+ void AddToWorklist(HValue* current);
+
+ private:
+ ZoneList<HValue*> worklist_;
+ BitVector in_worklist_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInferRepresentationPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_INFER_REPRESENTATION_H_
diff --git a/deps/v8/src/hydrogen-infer-types.cc b/deps/v8/src/hydrogen-infer-types.cc
new file mode 100644
index 000000000..01c608473
--- /dev/null
+++ b/deps/v8/src/hydrogen-infer-types.cc
@@ -0,0 +1,77 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-infer-types.h"
+
+namespace v8 {
+namespace internal {
+
+void HInferTypesPhase::InferTypes(int from_inclusive, int to_inclusive) {
+ for (int i = from_inclusive; i <= to_inclusive; ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+
+ const ZoneList<HPhi*>* phis = block->phis();
+ for (int j = 0; j < phis->length(); j++) {
+ phis->at(j)->UpdateInferredType();
+ }
+
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ it.Current()->UpdateInferredType();
+ }
+
+ if (block->IsLoopHeader()) {
+ HBasicBlock* last_back_edge =
+ block->loop_information()->GetLastBackEdge();
+ InferTypes(i + 1, last_back_edge->block_id());
+ // Skip all blocks already processed by the recursive call.
+ i = last_back_edge->block_id();
+ // Update phis of the loop header now after the whole loop body is
+ // guaranteed to be processed.
+ for (int j = 0; j < block->phis()->length(); ++j) {
+ HPhi* phi = block->phis()->at(j);
+ worklist_.Add(phi, zone());
+ in_worklist_.Add(phi->id());
+ }
+ while (!worklist_.is_empty()) {
+ HValue* current = worklist_.RemoveLast();
+ in_worklist_.Remove(current->id());
+ if (current->UpdateInferredType()) {
+ for (HUseIterator it(current->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ if (!in_worklist_.Contains(use->id())) {
+ in_worklist_.Add(use->id());
+ worklist_.Add(use, zone());
+ }
+ }
+ }
+ }
+ ASSERT(in_worklist_.IsEmpty());
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform-tls-mac.h b/deps/v8/src/hydrogen-infer-types.h
index 728524e80..cfcbf3549 100644
--- a/deps/v8/src/platform-tls-mac.h
+++ b/deps/v8/src/hydrogen-infer-types.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,38 +25,35 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_PLATFORM_TLS_MAC_H_
-#define V8_PLATFORM_TLS_MAC_H_
+#ifndef V8_HYDROGEN_INFER_TYPES_H_
+#define V8_HYDROGEN_INFER_TYPES_H_
-#include "globals.h"
+#include "hydrogen.h"
namespace v8 {
namespace internal {
-#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
-#define V8_FAST_TLS_SUPPORTED 1
+class HInferTypesPhase : public HPhase {
+ public:
+ explicit HInferTypesPhase(HGraph* graph)
+ : HPhase("H_Inferring types", graph), worklist_(8, zone()),
+ in_worklist_(graph->GetMaximumValueID(), zone()) { }
-extern intptr_t kMacTlsBaseOffset;
+ void Run() {
+ InferTypes(0, graph()->blocks()->length() - 1);
+ }
-INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
+ private:
+ void InferTypes(int from_inclusive, int to_inclusive);
-inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
- intptr_t result;
-#if defined(V8_HOST_ARCH_IA32)
- asm("movl %%gs:(%1,%2,4), %0;"
- :"=r"(result) // Output must be a writable register.
- :"r"(kMacTlsBaseOffset), "r"(index));
-#else
- asm("movq %%gs:(%1,%2,8), %0;"
- :"=r"(result)
- :"r"(kMacTlsBaseOffset), "r"(index));
-#endif
- return result;
-}
+ ZoneList<HValue*> worklist_;
+ BitVector in_worklist_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInferTypesPhase);
+};
-#endif
} } // namespace v8::internal
-#endif // V8_PLATFORM_TLS_MAC_H_
+#endif // V8_HYDROGEN_INFER_TYPES_H_
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 79550f3ea..206ab7e2a 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -27,8 +27,9 @@
#include "v8.h"
+#include "double.h"
#include "factory.h"
-#include "hydrogen.h"
+#include "hydrogen-infer-representation.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/lithium-ia32.h"
@@ -53,20 +54,6 @@ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
-const char* Representation::Mnemonic() const {
- switch (kind_) {
- case kNone: return "v";
- case kTagged: return "t";
- case kDouble: return "d";
- case kInteger32: return "i";
- case kExternal: return "x";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
int HValue::LoopWeight() const {
const int w = FLAG_loop_weight;
static const int weights[] = { 1, w, w*w, w*w*w, w*w*w*w };
@@ -75,6 +62,12 @@ int HValue::LoopWeight() const {
}
+Isolate* HValue::isolate() const {
+ ASSERT(block() != NULL);
+ return block()->isolate();
+}
+
+
void HValue::AssumeRepresentation(Representation r) {
if (CheckFlag(kFlexibleRepresentation)) {
ChangeRepresentation(r);
@@ -85,34 +78,127 @@ void HValue::AssumeRepresentation(Representation r) {
}
-static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) {
- if (result > kMaxInt) {
- *overflow = true;
- return kMaxInt;
+void HValue::InferRepresentation(HInferRepresentationPhase* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ Representation new_rep = RepresentationFromInputs();
+ UpdateRepresentation(new_rep, h_infer, "inputs");
+ new_rep = RepresentationFromUses();
+ UpdateRepresentation(new_rep, h_infer, "uses");
+ if (representation().IsSmi() && HasNonSmiUse()) {
+ UpdateRepresentation(
+ Representation::Integer32(), h_infer, "use requirements");
+ }
+}
+
+
+Representation HValue::RepresentationFromUses() {
+ if (HasNoUses()) return Representation::None();
+
+ // Array of use counts for each representation.
+ int use_count[Representation::kNumRepresentations] = { 0 };
+
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ Representation rep = use->observed_input_representation(it.index());
+ if (rep.IsNone()) continue;
+ if (FLAG_trace_representation) {
+ PrintF("#%d %s is used by #%d %s as %s%s\n",
+ id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(),
+ (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
+ }
+ use_count[rep.kind()] += use->LoopWeight();
+ }
+ if (IsPhi()) HPhi::cast(this)->AddIndirectUsesTo(&use_count[0]);
+ int tagged_count = use_count[Representation::kTagged];
+ int double_count = use_count[Representation::kDouble];
+ int int32_count = use_count[Representation::kInteger32];
+ int smi_count = use_count[Representation::kSmi];
+
+ if (tagged_count > 0) return Representation::Tagged();
+ if (double_count > 0) return Representation::Double();
+ if (int32_count > 0) return Representation::Integer32();
+ if (smi_count > 0) return Representation::Smi();
+
+ return Representation::None();
+}
+
+
+void HValue::UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) {
+ Representation r = representation();
+ if (new_rep.is_more_general_than(r)) {
+ if (CheckFlag(kCannotBeTagged) && new_rep.IsTagged()) return;
+ if (FLAG_trace_representation) {
+ PrintF("Changing #%d %s representation %s -> %s based on %s\n",
+ id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
+ }
+ ChangeRepresentation(new_rep);
+ AddDependantsToWorklist(h_infer);
+ }
+}
+
+
+void HValue::AddDependantsToWorklist(HInferRepresentationPhase* h_infer) {
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ h_infer->AddToWorklist(it.value());
}
- if (result < kMinInt) {
- *overflow = true;
- return kMinInt;
+ for (int i = 0; i < OperandCount(); ++i) {
+ h_infer->AddToWorklist(OperandAt(i));
+ }
+}
+
+
+static int32_t ConvertAndSetOverflow(Representation r,
+ int64_t result,
+ bool* overflow) {
+ if (r.IsSmi()) {
+ if (result > Smi::kMaxValue) {
+ *overflow = true;
+ return Smi::kMaxValue;
+ }
+ if (result < Smi::kMinValue) {
+ *overflow = true;
+ return Smi::kMinValue;
+ }
+ } else {
+ if (result > kMaxInt) {
+ *overflow = true;
+ return kMaxInt;
+ }
+ if (result < kMinInt) {
+ *overflow = true;
+ return kMinInt;
+ }
}
return static_cast<int32_t>(result);
}
-static int32_t AddWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
+static int32_t AddWithoutOverflow(Representation r,
+ int32_t a,
+ int32_t b,
+ bool* overflow) {
int64_t result = static_cast<int64_t>(a) + static_cast<int64_t>(b);
- return ConvertAndSetOverflow(result, overflow);
+ return ConvertAndSetOverflow(r, result, overflow);
}
-static int32_t SubWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
+static int32_t SubWithoutOverflow(Representation r,
+ int32_t a,
+ int32_t b,
+ bool* overflow) {
int64_t result = static_cast<int64_t>(a) - static_cast<int64_t>(b);
- return ConvertAndSetOverflow(result, overflow);
+ return ConvertAndSetOverflow(r, result, overflow);
}
-static int32_t MulWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
+static int32_t MulWithoutOverflow(const Representation& r,
+ int32_t a,
+ int32_t b,
+ bool* overflow) {
int64_t result = static_cast<int64_t>(a) * static_cast<int64_t>(b);
- return ConvertAndSetOverflow(result, overflow);
+ return ConvertAndSetOverflow(r, result, overflow);
}
@@ -132,8 +218,9 @@ int32_t Range::Mask() const {
void Range::AddConstant(int32_t value) {
if (value == 0) return;
bool may_overflow = false; // Overflow is ignored here.
- lower_ = AddWithoutOverflow(lower_, value, &may_overflow);
- upper_ = AddWithoutOverflow(upper_, value, &may_overflow);
+ Representation r = Representation::Integer32();
+ lower_ = AddWithoutOverflow(r, lower_, value, &may_overflow);
+ upper_ = AddWithoutOverflow(r, upper_, value, &may_overflow);
#ifdef DEBUG
Verify();
#endif
@@ -192,10 +279,10 @@ void Range::Shl(int32_t value) {
}
-bool Range::AddAndCheckOverflow(Range* other) {
+bool Range::AddAndCheckOverflow(const Representation& r, Range* other) {
bool may_overflow = false;
- lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow);
- upper_ = AddWithoutOverflow(upper_, other->upper(), &may_overflow);
+ lower_ = AddWithoutOverflow(r, lower_, other->lower(), &may_overflow);
+ upper_ = AddWithoutOverflow(r, upper_, other->upper(), &may_overflow);
KeepOrder();
#ifdef DEBUG
Verify();
@@ -204,10 +291,10 @@ bool Range::AddAndCheckOverflow(Range* other) {
}
-bool Range::SubAndCheckOverflow(Range* other) {
+bool Range::SubAndCheckOverflow(const Representation& r, Range* other) {
bool may_overflow = false;
- lower_ = SubWithoutOverflow(lower_, other->upper(), &may_overflow);
- upper_ = SubWithoutOverflow(upper_, other->lower(), &may_overflow);
+ lower_ = SubWithoutOverflow(r, lower_, other->upper(), &may_overflow);
+ upper_ = SubWithoutOverflow(r, upper_, other->lower(), &may_overflow);
KeepOrder();
#ifdef DEBUG
Verify();
@@ -232,12 +319,12 @@ void Range::Verify() const {
#endif
-bool Range::MulAndCheckOverflow(Range* other) {
+bool Range::MulAndCheckOverflow(const Representation& r, Range* other) {
bool may_overflow = false;
- int v1 = MulWithoutOverflow(lower_, other->lower(), &may_overflow);
- int v2 = MulWithoutOverflow(lower_, other->upper(), &may_overflow);
- int v3 = MulWithoutOverflow(upper_, other->lower(), &may_overflow);
- int v4 = MulWithoutOverflow(upper_, other->upper(), &may_overflow);
+ int v1 = MulWithoutOverflow(r, lower_, other->lower(), &may_overflow);
+ int v2 = MulWithoutOverflow(r, lower_, other->upper(), &may_overflow);
+ int v3 = MulWithoutOverflow(r, upper_, other->lower(), &may_overflow);
+ int v4 = MulWithoutOverflow(r, upper_, other->upper(), &may_overflow);
lower_ = Min(Min(v1, v2), Min(v3, v4));
upper_ = Max(Max(v1, v2), Max(v3, v4));
#ifdef DEBUG
@@ -248,7 +335,10 @@ bool Range::MulAndCheckOverflow(Range* other) {
const char* HType::ToString() {
+ // Note: The c1visualizer syntax for locals allows only a sequence of the
+ // following characters: A-Za-z0-9_-|:
switch (type_) {
+ case kNone: return "none";
case kTagged: return "tagged";
case kTaggedPrimitive: return "primitive";
case kTaggedNumber: return "number";
@@ -259,10 +349,9 @@ const char* HType::ToString() {
case kNonPrimitive: return "non-primitive";
case kJSArray: return "array";
case kJSObject: return "object";
- case kUninitialized: return "uninitialized";
}
UNREACHABLE();
- return "Unreachable code";
+ return "unreachable";
}
@@ -299,14 +388,38 @@ HUseListNode* HUseListNode::tail() {
}
-bool HValue::CheckUsesForFlag(Flag f) {
+bool HValue::CheckUsesForFlag(Flag f) const {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ if (it.value()->IsSimulate()) continue;
if (!it.value()->CheckFlag(f)) return false;
}
return true;
}
+bool HValue::CheckUsesForFlag(Flag f, HValue** value) const {
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ if (it.value()->IsSimulate()) continue;
+ if (!it.value()->CheckFlag(f)) {
+ *value = it.value();
+ return false;
+ }
+ }
+ return true;
+}
+
+
+bool HValue::HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const {
+ bool return_value = false;
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ if (it.value()->IsSimulate()) continue;
+ if (!it.value()->CheckFlag(f)) return false;
+ return_value = true;
+ }
+ return return_value;
+}
+
+
HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
Advance();
}
@@ -396,6 +509,32 @@ const char* HValue::Mnemonic() const {
}
+bool HValue::CanReplaceWithDummyUses() {
+ return FLAG_unreachable_code_elimination &&
+ !(block()->IsReachable() ||
+ IsBlockEntry() ||
+ IsControlInstruction() ||
+ IsSimulate() ||
+ IsEnterInlined() ||
+ IsLeaveInlined());
+}
+
+
+bool HValue::IsInteger32Constant() {
+ return IsConstant() && HConstant::cast(this)->HasInteger32Value();
+}
+
+
+int32_t HValue::GetInteger32Constant() {
+ return HConstant::cast(this)->Integer32Value();
+}
+
+
+bool HValue::EqualsInteger32Constant(int32_t value) {
+ return IsInteger32Constant() && GetInteger32Constant() == value;
+}
+
+
void HValue::SetOperandAt(int index, HValue* value) {
RegisterUse(index, value);
InternalSetOperandAt(index, value);
@@ -405,7 +544,6 @@ void HValue::SetOperandAt(int index, HValue* value) {
void HValue::DeleteAndReplaceWith(HValue* other) {
// We replace all uses first, so Delete can assert that there are none.
if (other != NULL) ReplaceAllUsesWith(other);
- ASSERT(HasNoUses());
Kill();
DeleteFromGraph();
}
@@ -433,7 +571,7 @@ void HValue::Kill() {
HValue* operand = OperandAt(i);
if (operand == NULL) continue;
HUseListNode* first = operand->use_list_;
- if (first != NULL && first->value() == this && first->index() == i) {
+ if (first != NULL && first->value()->CheckFlag(kIsDead)) {
operand->use_list_ = first->tail();
}
}
@@ -451,16 +589,18 @@ void HValue::SetBlock(HBasicBlock* block) {
void HValue::PrintTypeTo(StringStream* stream) {
if (!representation().IsTagged() || type().Equals(HType::Tagged())) return;
- stream->Add(" type[%s]", type().ToString());
+ stream->Add(" type:%s", type().ToString());
}
void HValue::PrintRangeTo(StringStream* stream) {
if (range() == NULL || range()->IsMostGeneric()) return;
- stream->Add(" range[%d,%d,m0=%d]",
+ // Note: The c1visualizer syntax for locals allows only a sequence of the
+ // following characters: A-Za-z0-9_-|:
+ stream->Add(" range:%d_%d%s",
range()->lower(),
range()->upper(),
- static_cast<int>(range()->CanBeMinusZero()));
+ range()->CanBeMinusZero() ? "_m0" : "");
}
@@ -491,6 +631,11 @@ void HValue::PrintNameTo(StringStream* stream) {
}
+bool HValue::HasMonomorphicJSObjectType() {
+ return !GetMonomorphicJSObjectMap().is_null();
+}
+
+
bool HValue::UpdateInferredType() {
HType type = CalculateInferredType();
bool result = (!type.Equals(type_));
@@ -549,6 +694,17 @@ void HInstruction::PrintTo(StringStream* stream) {
PrintRangeTo(stream);
PrintChangesTo(stream);
PrintTypeTo(stream);
+ if (CheckFlag(HValue::kHasNoObservableSideEffects)) {
+ stream->Add(" [noOSE]");
+ }
+}
+
+
+void HInstruction::PrintDataTo(StringStream *stream) {
+ for (int i = 0; i < OperandCount(); ++i) {
+ if (i > 0) stream->Add(" ");
+ OperandAt(i)->PrintNameTo(stream);
+ }
}
@@ -585,6 +741,10 @@ void HInstruction::InsertBefore(HInstruction* next) {
next_ = next;
previous_ = prev;
SetBlock(next->block());
+ if (position() == RelocInfo::kNoPosition &&
+ next->position() != RelocInfo::kNoPosition) {
+ set_position(next->position());
+ }
}
@@ -619,6 +779,10 @@ void HInstruction::InsertAfter(HInstruction* previous) {
if (block->last() == previous) {
block->set_last(this);
}
+ if (position() == RelocInfo::kNoPosition &&
+ previous->position() != RelocInfo::kNoPosition) {
+ set_position(previous->position());
+ }
}
@@ -657,10 +821,27 @@ void HInstruction::Verify() {
// HValue::DataEquals. The default implementation is UNREACHABLE. We
// don't actually care whether DataEquals returns true or false here.
if (CheckFlag(kUseGVN)) DataEquals(this);
+
+ // Verify that all uses are in the graph.
+ for (HUseIterator use = uses(); !use.Done(); use.Advance()) {
+ if (use.value()->IsInstruction()) {
+ ASSERT(HInstruction::cast(use.value())->IsLinked());
+ }
+ }
}
#endif
+void HDummyUse::PrintDataTo(StringStream* stream) {
+ value()->PrintNameTo(stream);
+}
+
+
+void HEnvironmentMarker::PrintDataTo(StringStream* stream) {
+ stream->Add("%s var[%d]", kind() == BIND ? "bind" : "lookup", index());
+}
+
+
void HUnaryCall::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" ");
@@ -677,10 +858,100 @@ void HBinaryCall::PrintDataTo(StringStream* stream) {
}
+void HBoundsCheck::ApplyIndexChange() {
+ if (skip_check()) return;
+
+ DecompositionResult decomposition;
+ bool index_is_decomposable = index()->TryDecompose(&decomposition);
+ if (index_is_decomposable) {
+ ASSERT(decomposition.base() == base());
+ if (decomposition.offset() == offset() &&
+ decomposition.scale() == scale()) return;
+ } else {
+ return;
+ }
+
+ ReplaceAllUsesWith(index());
+
+ HValue* current_index = decomposition.base();
+ int actual_offset = decomposition.offset() + offset();
+ int actual_scale = decomposition.scale() + scale();
+
+ Zone* zone = block()->graph()->zone();
+ HValue* context = block()->graph()->GetInvalidContext();
+ if (actual_offset != 0) {
+ HConstant* add_offset = HConstant::New(zone, context, actual_offset);
+ add_offset->InsertBefore(this);
+ HInstruction* add = HAdd::New(zone, context,
+ current_index, add_offset);
+ add->InsertBefore(this);
+ add->AssumeRepresentation(index()->representation());
+ add->ClearFlag(kCanOverflow);
+ current_index = add;
+ }
+
+ if (actual_scale != 0) {
+ HConstant* sar_scale = HConstant::New(zone, context, actual_scale);
+ sar_scale->InsertBefore(this);
+ HInstruction* sar = HSar::New(zone, context,
+ current_index, sar_scale);
+ sar->InsertBefore(this);
+ sar->AssumeRepresentation(index()->representation());
+ current_index = sar;
+ }
+
+ SetOperandAt(0, current_index);
+
+ base_ = NULL;
+ offset_ = 0;
+ scale_ = 0;
+}
+
+
void HBoundsCheck::PrintDataTo(StringStream* stream) {
index()->PrintNameTo(stream);
stream->Add(" ");
length()->PrintNameTo(stream);
+ if (base() != NULL && (offset() != 0 || scale() != 0)) {
+ stream->Add(" base: ((");
+ if (base() != index()) {
+ index()->PrintNameTo(stream);
+ } else {
+ stream->Add("index");
+ }
+ stream->Add(" + %d) >> %d)", offset(), scale());
+ }
+ if (skip_check()) {
+ stream->Add(" [DISABLED]");
+ }
+}
+
+
+void HBoundsCheck::InferRepresentation(HInferRepresentationPhase* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ HValue* actual_index = index()->ActualValue();
+ HValue* actual_length = length()->ActualValue();
+ Representation index_rep = actual_index->representation();
+ Representation length_rep = actual_length->representation();
+ if (index_rep.IsTagged() && actual_index->type().IsSmi()) {
+ index_rep = Representation::Smi();
+ }
+ if (length_rep.IsTagged() && actual_length->type().IsSmi()) {
+ length_rep = Representation::Smi();
+ }
+ Representation r = index_rep.generalize(length_rep);
+ if (r.is_more_general_than(Representation::Integer32())) {
+ r = Representation::Integer32();
+ }
+ UpdateRepresentation(r, h_infer, "boundscheck");
+}
+
+
+void HBoundsCheckBaseIndexInformation::PrintDataTo(StringStream* stream) {
+ stream->Add("base: ");
+ base_index()->PrintNameTo(stream);
+ stream->Add(", check: ");
+ base_index()->PrintNameTo(stream);
}
@@ -712,8 +983,18 @@ void HCallKnownGlobal::PrintDataTo(StringStream* stream) {
}
+void HCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add(ElementsKindToString(elements_kind()));
+ stream->Add(" ");
+ HBinaryCall::PrintDataTo(stream);
+}
+
+
void HCallRuntime::PrintDataTo(StringStream* stream) {
stream->Add("%o ", *name());
+ if (save_doubles() == kSaveFPRegs) {
+ stream->Add("[save doubles] ");
+ }
stream->Add("#%d", argument_count());
}
@@ -758,22 +1039,57 @@ void HUnaryControlInstruction::PrintDataTo(StringStream* stream) {
}
-void HIsNilAndBranch::PrintDataTo(StringStream* stream) {
+void HReturn::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- HControlInstruction::PrintDataTo(stream);
+ stream->Add(" (pop ");
+ parameter_count()->PrintNameTo(stream);
+ stream->Add(" values)");
}
-void HReturn::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
+Representation HBranch::observed_input_representation(int index) {
+ static const ToBooleanStub::Types tagged_types(
+ ToBooleanStub::NULL_TYPE |
+ ToBooleanStub::SPEC_OBJECT |
+ ToBooleanStub::STRING |
+ ToBooleanStub::SYMBOL);
+ if (expected_input_types_.ContainsAnyOf(tagged_types)) {
+ return Representation::Tagged();
+ }
+ if (expected_input_types_.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ return Representation::Double();
+ }
+ return Representation::Tagged();
+ }
+ if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ return Representation::Double();
+ }
+ if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
+ return Representation::Smi();
+ }
+ return Representation::None();
+}
+
+
+bool HBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ HValue* value = this->value();
+ if (value->EmitAtUses()) {
+ ASSERT(value->IsConstant());
+ ASSERT(!value->representation().IsDouble());
+ *block = HConstant::cast(value)->BooleanValue()
+ ? FirstSuccessor()
+ : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
}
void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" (%p)", *map());
+ stream->Add(" (%p)", *map().handle());
HControlInstruction::PrintDataTo(stream);
}
@@ -782,20 +1098,42 @@ const char* HUnaryMathOperation::OpName() const {
switch (op()) {
case kMathFloor: return "floor";
case kMathRound: return "round";
- case kMathCeil: return "ceil";
case kMathAbs: return "abs";
case kMathLog: return "log";
case kMathSin: return "sin";
case kMathCos: return "cos";
case kMathTan: return "tan";
- case kMathASin: return "asin";
- case kMathACos: return "acos";
- case kMathATan: return "atan";
case kMathExp: return "exp";
case kMathSqrt: return "sqrt";
- default: break;
+ case kMathPowHalf: return "pow-half";
+ default:
+ UNREACHABLE();
+ return NULL;
}
- return "(unknown operation)";
+}
+
+
+Range* HUnaryMathOperation::InferRange(Zone* zone) {
+ Representation r = representation();
+ if (r.IsSmiOrInteger32() && value()->HasRange()) {
+ if (op() == kMathAbs) {
+ int upper = value()->range()->upper();
+ int lower = value()->range()->lower();
+ bool spans_zero = value()->range()->CanBeZero();
+ // Math.abs(kMinInt) overflows its representation, on which the
+ // instruction deopts. Hence clamp it to kMaxInt.
+ int abs_upper = upper == kMinInt ? kMaxInt : abs(upper);
+ int abs_lower = lower == kMinInt ? kMaxInt : abs(lower);
+ Range* result =
+ new(zone) Range(spans_zero ? 0 : Min(abs_lower, abs_upper),
+ Max(abs_lower, abs_upper));
+ // In case of Smi representation, clamp Math.abs(Smi::kMinValue) to
+ // Smi::kMaxValue.
+ if (r.IsSmi()) result->ClampToSmi();
+ return result;
+ }
+ }
+ return HValue::InferRange(zone);
}
@@ -866,59 +1204,100 @@ void HLoadFieldByIndex::PrintDataTo(StringStream* stream) {
}
-HValue* HConstant::Canonicalize() {
- return HasNoUses() ? NULL : this;
+static bool MatchLeftIsOnes(HValue* l, HValue* r, HValue** negated) {
+ if (!l->EqualsInteger32Constant(~0)) return false;
+ *negated = r;
+ return true;
+}
+
+
+static bool MatchNegationViaXor(HValue* instr, HValue** negated) {
+ if (!instr->IsBitwise()) return false;
+ HBitwise* b = HBitwise::cast(instr);
+ return (b->op() == Token::BIT_XOR) &&
+ (MatchLeftIsOnes(b->left(), b->right(), negated) ||
+ MatchLeftIsOnes(b->right(), b->left(), negated));
}
-HValue* HTypeof::Canonicalize() {
- return HasNoUses() ? NULL : this;
+static bool MatchDoubleNegation(HValue* instr, HValue** arg) {
+ HValue* negated;
+ return MatchNegationViaXor(instr, &negated) &&
+ MatchNegationViaXor(negated, arg);
}
HValue* HBitwise::Canonicalize() {
- if (!representation().IsInteger32()) return this;
+ if (!representation().IsSmiOrInteger32()) return this;
// If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0;
- if (left()->IsConstant() &&
- HConstant::cast(left())->HasInteger32Value() &&
- HConstant::cast(left())->Integer32Value() == nop_constant &&
+ if (left()->EqualsInteger32Constant(nop_constant) &&
!right()->CheckFlag(kUint32)) {
return right();
}
- if (right()->IsConstant() &&
- HConstant::cast(right())->HasInteger32Value() &&
- HConstant::cast(right())->Integer32Value() == nop_constant &&
+ if (right()->EqualsInteger32Constant(nop_constant) &&
!left()->CheckFlag(kUint32)) {
return left();
}
+ // Optimize double negation, a common pattern used for ToInt32(x).
+ HValue* arg;
+ if (MatchDoubleNegation(this, &arg) && !arg->CheckFlag(kUint32)) {
+ return arg;
+ }
return this;
}
-HValue* HBitNot::Canonicalize() {
- // Optimize ~~x, a common pattern used for ToInt32(x).
- if (value()->IsBitNot()) {
- HValue* result = HBitNot::cast(value())->value();
- ASSERT(result->representation().IsInteger32());
- if (!result->CheckFlag(kUint32)) {
- return result;
- }
- }
- return this;
+static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) {
+ return arg1->representation().IsSpecialization() &&
+ arg2->EqualsInteger32Constant(identity);
}
HValue* HAdd::Canonicalize() {
- if (!representation().IsInteger32()) return this;
- if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow);
+ // Adding 0 is an identity operation except in case of -0: -0 + 0 = +0
+ if (IsIdentityOperation(left(), right(), 0) &&
+ !left()->representation().IsDouble()) { // Left could be -0.
+ return left();
+ }
+ if (IsIdentityOperation(right(), left(), 0) &&
+ !left()->representation().IsDouble()) { // Right could be -0.
+ return right();
+ }
return this;
}
HValue* HSub::Canonicalize() {
- if (!representation().IsInteger32()) return this;
- if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow);
+ if (IsIdentityOperation(left(), right(), 0)) return left();
+ return this;
+}
+
+
+HValue* HMul::Canonicalize() {
+ if (IsIdentityOperation(left(), right(), 1)) return left();
+ if (IsIdentityOperation(right(), left(), 1)) return right();
+ return this;
+}
+
+
+bool HMul::MulMinusOne() {
+ if (left()->EqualsInteger32Constant(-1) ||
+ right()->EqualsInteger32Constant(-1)) {
+ return true;
+ }
+
+ return false;
+}
+
+
+HValue* HMod::Canonicalize() {
+ return this;
+}
+
+
+HValue* HDiv::Canonicalize() {
+ if (IsIdentityOperation(left(), right(), 1)) return left();
return this;
}
@@ -942,42 +1321,80 @@ void HTypeof::PrintDataTo(StringStream* stream) {
}
+void HForceRepresentation::PrintDataTo(StringStream* stream) {
+ stream->Add("%s ", representation().Mnemonic());
+ value()->PrintNameTo(stream);
+}
+
+
void HChange::PrintDataTo(StringStream* stream) {
HUnaryOperation::PrintDataTo(stream);
stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
if (CanTruncateToInt32()) stream->Add(" truncating-int32");
if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
- if (CheckFlag(kDeoptimizeOnUndefined)) stream->Add(" deopt-on-undefined");
+ if (CheckFlag(kAllowUndefinedAsNaN)) stream->Add(" allow-undefined-as-nan");
}
-void HJSArrayLength::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(" ");
- typecheck()->PrintNameTo(stream);
+static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
+ // A value with an integer representation does not need to be transformed.
+ if (dividend->representation().IsInteger32()) {
+ return dividend;
+ }
+ // A change from an integer32 can be replaced by the integer32 value.
+ if (dividend->IsChange() &&
+ HChange::cast(dividend)->from().IsInteger32()) {
+ return HChange::cast(dividend)->value();
+ }
+ return NULL;
}
HValue* HUnaryMathOperation::Canonicalize() {
+ if (op() == kMathRound || op() == kMathFloor) {
+ HValue* val = value();
+ if (val->IsChange()) val = HChange::cast(val)->value();
+
+ // If the input is smi or integer32 then we replace the instruction with its
+ // input.
+ if (val->representation().IsSmiOrInteger32()) {
+ if (!val->representation().Equals(representation())) {
+ HChange* result = new(block()->zone()) HChange(
+ val, representation(), false, false);
+ result->InsertBefore(this);
+ return result;
+ }
+ return val;
+ }
+ }
+
if (op() == kMathFloor) {
- // If the input is integer32 then we replace the floor instruction
- // with its input. This happens before the representation changes are
- // introduced.
- if (value()->representation().IsInteger32()) return value();
-
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \
- defined(V8_TARGET_ARCH_X64)
- if (value()->IsDiv() && (value()->UseCount() == 1)) {
- // TODO(2038): Implement this optimization for non ARM architectures.
- HDiv* hdiv = HDiv::cast(value());
+ HValue* val = value();
+ if (val->IsChange()) val = HChange::cast(val)->value();
+ if (val->IsDiv() && (val->UseCount() == 1)) {
+ HDiv* hdiv = HDiv::cast(val);
HValue* left = hdiv->left();
HValue* right = hdiv->right();
// Try to simplify left and right values of the division.
- HValue* new_left =
- LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(left);
+ HValue* new_left = SimplifiedDividendForMathFloorOfDiv(left);
+ if (new_left == NULL &&
+ hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
+ new_left = new(block()->zone()) HChange(
+ left, Representation::Integer32(), false, false);
+ HChange::cast(new_left)->InsertBefore(this);
+ }
HValue* new_right =
- LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(right);
+ LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(right);
+ if (new_right == NULL &&
+#if V8_TARGET_ARCH_ARM
+ CpuFeatures::IsSupported(SUDIV) &&
+#endif
+ hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
+ new_right = new(block()->zone()) HChange(
+ right, Representation::Integer32(), false, false);
+ HChange::cast(new_right)->InsertBefore(this);
+ }
// Return if left or right are not optimizable.
if ((new_left == NULL) || (new_right == NULL)) return this;
@@ -991,41 +1408,34 @@ HValue* HUnaryMathOperation::Canonicalize() {
!HInstruction::cast(new_right)->IsLinked()) {
HInstruction::cast(new_right)->InsertBefore(this);
}
- HMathFloorOfDiv* instr = new(block()->zone()) HMathFloorOfDiv(context(),
- new_left,
- new_right);
+ HMathFloorOfDiv* instr =
+ HMathFloorOfDiv::New(block()->zone(), context(), new_left, new_right);
// Replace this HMathFloor instruction by the new HMathFloorOfDiv.
instr->InsertBefore(this);
ReplaceAllUsesWith(instr);
Kill();
// We know the division had no other uses than this HMathFloor. Delete it.
- // Also delete the arguments of the division if they are not used any
- // more.
+ // Dead code elimination will deal with |left| and |right| if
+ // appropriate.
hdiv->DeleteAndReplaceWith(NULL);
- ASSERT(left->IsChange() || left->IsConstant());
- ASSERT(right->IsChange() || right->IsConstant());
- if (left->HasNoUses()) left->DeleteAndReplaceWith(NULL);
- if (right->HasNoUses()) right->DeleteAndReplaceWith(NULL);
// Return NULL to remove this instruction from the graph.
return NULL;
}
-#endif // V8_TARGET_ARCH_ARM
}
return this;
}
HValue* HCheckInstanceType::Canonicalize() {
- if (check_ == IS_STRING &&
- !value()->type().IsUninitialized() &&
- value()->type().IsString()) {
- return NULL;
+ if (check_ == IS_STRING && value()->type().IsString()) {
+ return value();
}
- if (check_ == IS_SYMBOL &&
- value()->IsConstant() &&
- HConstant::cast(value())->handle()->IsSymbol()) {
- return NULL;
+
+ if (check_ == IS_INTERNALIZED_STRING && value()->IsConstant()) {
+ if (HConstant::cast(value())->HasInternalizedStringValue()) {
+ return value();
+ }
}
return this;
}
@@ -1055,9 +1465,9 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
*mask = kIsNotStringMask;
*tag = kStringTag;
return;
- case IS_SYMBOL:
- *mask = kIsSymbolMask;
- *tag = kSymbolTag;
+ case IS_INTERNALIZED_STRING:
+ *mask = kIsNotInternalizedMask;
+ *tag = kInternalizedTag;
return;
default:
UNREACHABLE();
@@ -1065,26 +1475,46 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
}
-void HLoadElements::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(" ");
- typecheck()->PrintNameTo(stream);
+void HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) {
+ ASSERT(side_effect == kChangesMaps);
+ // TODO(mstarzinger): For now we specialize on HStoreNamedField, but once
+ // type information is rich enough we should generalize this to any HType
+ // for which the map is known.
+ if (HasNoUses() && dominator->IsStoreNamedField()) {
+ HStoreNamedField* store = HStoreNamedField::cast(dominator);
+ if (!store->has_transition() || store->object() != value()) return;
+ HConstant* transition = HConstant::cast(store->transition());
+ if (map_set_.Contains(transition->GetUnique())) {
+ DeleteAndReplaceWith(NULL);
+ return;
+ }
+ }
}
void HCheckMaps::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" [%p", *map_set()->first());
- for (int i = 1; i < map_set()->length(); ++i) {
- stream->Add(",%p", *map_set()->at(i));
+ stream->Add(" [%p", *map_set_.at(0).handle());
+ for (int i = 1; i < map_set_.size(); ++i) {
+ stream->Add(",%p", *map_set_.at(i).handle());
}
- stream->Add("]");
+ stream->Add("]%s", CanOmitMapChecks() ? "(omitted)" : "");
}
-void HCheckFunction::PrintDataTo(StringStream* stream) {
+void HCheckValue::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" %p", *target());
+ stream->Add(" ");
+ object().handle()->ShortPrint(stream);
+}
+
+
+HValue* HCheckValue::Canonicalize() {
+ return (value()->IsConstant() &&
+ HConstant::cast(value())->GetUnique() == object_)
+ ? NULL
+ : this;
}
@@ -1093,23 +1523,19 @@ const char* HCheckInstanceType::GetCheckName() {
case IS_SPEC_OBJECT: return "object";
case IS_JS_ARRAY: return "array";
case IS_STRING: return "string";
- case IS_SYMBOL: return "symbol";
+ case IS_INTERNALIZED_STRING: return "internalized_string";
}
UNREACHABLE();
return "";
}
+
void HCheckInstanceType::PrintDataTo(StringStream* stream) {
stream->Add("%s ", GetCheckName());
HUnaryOperation::PrintDataTo(stream);
}
-void HCheckPrototypeMaps::PrintDataTo(StringStream* stream) {
- stream->Add("[receiver_prototype=%p,holder=%p]", *prototype(), *holder());
-}
-
-
void HCallStub::PrintDataTo(StringStream* stream) {
stream->Add("%s ",
CodeStub::MajorName(major_key_, false));
@@ -1117,6 +1543,15 @@ void HCallStub::PrintDataTo(StringStream* stream) {
}
+void HUnknownOSRValue::PrintDataTo(StringStream *stream) {
+ const char* type = "expression";
+ if (environment_->is_local_index(index_)) type = "local";
+ if (environment_->is_special_index(index_)) type = "special";
+ if (environment_->is_parameter_index(index_)) type = "parameter";
+ stream->Add("%s @ %d", type, index_);
+}
+
+
void HInstanceOf::PrintDataTo(StringStream* stream) {
left()->PrintNameTo(stream);
stream->Add(" ");
@@ -1127,25 +1562,37 @@ void HInstanceOf::PrintDataTo(StringStream* stream) {
Range* HValue::InferRange(Zone* zone) {
- // Untagged integer32 cannot be -0, all other representations can.
- Range* result = new(zone) Range();
- result->set_can_be_minus_zero(!representation().IsInteger32());
+ Range* result;
+ if (representation().IsSmi() || type().IsSmi()) {
+ result = new(zone) Range(Smi::kMinValue, Smi::kMaxValue);
+ result->set_can_be_minus_zero(false);
+ } else {
+ result = new(zone) Range();
+ result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32));
+ // TODO(jkummerow): The range cannot be minus zero when the upper type
+ // bound is Integer32.
+ }
return result;
}
Range* HChange::InferRange(Zone* zone) {
Range* input_range = value()->range();
- if (from().IsInteger32() &&
- to().IsTagged() &&
- !value()->CheckFlag(HInstruction::kUint32) &&
- input_range != NULL && input_range->IsInSmiRange()) {
+ if (from().IsInteger32() && !value()->CheckFlag(HInstruction::kUint32) &&
+ (to().IsSmi() ||
+ (to().IsTagged() &&
+ input_range != NULL &&
+ input_range->IsInSmiRange()))) {
set_type(HType::Smi());
+ ClearGVNFlag(kChangesNewSpacePromotion);
}
Range* result = (input_range != NULL)
? input_range->Copy(zone)
: HValue::InferRange(zone);
- if (to().IsInteger32()) result->set_can_be_minus_zero(false);
+ result->set_can_be_minus_zero(!to().IsSmiOrInteger32() ||
+ !(CheckFlag(kAllUsesTruncatingToInt32) ||
+ CheckFlag(kAllUsesTruncatingToSmi)));
+ if (to().IsSmi()) result->ClampToSmi();
return result;
}
@@ -1160,10 +1607,18 @@ Range* HConstant::InferRange(Zone* zone) {
}
+int HPhi::position() const {
+ return block()->first()->position();
+}
+
+
Range* HPhi::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
+ Representation r = representation();
+ if (r.IsSmiOrInteger32()) {
if (block()->IsLoopHeader()) {
- Range* range = new(zone) Range(kMinInt, kMaxInt);
+ Range* range = r.IsSmi()
+ ? new(zone) Range(Smi::kMinValue, Smi::kMaxValue)
+ : new(zone) Range(kMinInt, kMaxInt);
return range;
} else {
Range* range = OperandAt(0)->range()->Copy(zone);
@@ -1179,15 +1634,19 @@ Range* HPhi::InferRange(Zone* zone) {
Range* HAdd::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
+ Representation r = representation();
+ if (r.IsSmiOrInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
- if (!res->AddAndCheckOverflow(b)) {
+ if (!res->AddAndCheckOverflow(r, b) ||
+ (r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
+ (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) {
ClearFlag(kCanOverflow);
}
- bool m0 = a->CanBeMinusZero() && b->CanBeMinusZero();
- res->set_can_be_minus_zero(m0);
+ res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
+ !CheckFlag(kAllUsesTruncatingToInt32) &&
+ a->CanBeMinusZero() && b->CanBeMinusZero());
return res;
} else {
return HValue::InferRange(zone);
@@ -1196,14 +1655,19 @@ Range* HAdd::InferRange(Zone* zone) {
Range* HSub::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
+ Representation r = representation();
+ if (r.IsSmiOrInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
- if (!res->SubAndCheckOverflow(b)) {
+ if (!res->SubAndCheckOverflow(r, b) ||
+ (r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
+ (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) {
ClearFlag(kCanOverflow);
}
- res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
+ res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
+ !CheckFlag(kAllUsesTruncatingToInt32) &&
+ a->CanBeMinusZero() && b->CanBeZero());
return res;
} else {
return HValue::InferRange(zone);
@@ -1212,16 +1676,24 @@ Range* HSub::InferRange(Zone* zone) {
Range* HMul::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
+ Representation r = representation();
+ if (r.IsSmiOrInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
- if (!res->MulAndCheckOverflow(b)) {
+ if (!res->MulAndCheckOverflow(r, b) ||
+ (((r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
+ (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) &&
+ MulMinusOne())) {
+ // Truncated int multiplication is too precise and therefore not the
+ // same as converting to Double and back.
+ // Handle truncated integer multiplication by -1 special.
ClearFlag(kCanOverflow);
}
- bool m0 = (a->CanBeZero() && b->CanBeNegative()) ||
- (a->CanBeNegative() && b->CanBeZero());
- res->set_can_be_minus_zero(m0);
+ res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
+ !CheckFlag(kAllUsesTruncatingToInt32) &&
+ ((a->CanBeZero() && b->CanBeNegative()) ||
+ (a->CanBeNegative() && b->CanBeZero())));
return res;
} else {
return HValue::InferRange(zone);
@@ -1231,20 +1703,20 @@ Range* HMul::InferRange(Zone* zone) {
Range* HDiv::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
+ Range* a = left()->range();
+ Range* b = right()->range();
Range* result = new(zone) Range();
- if (left()->range()->CanBeMinusZero()) {
- result->set_can_be_minus_zero(true);
+ result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+ (a->CanBeMinusZero() ||
+ (a->CanBeZero() && b->CanBeNegative())));
+ if (!a->Includes(kMinInt) ||
+ !b->Includes(-1) ||
+ CheckFlag(kAllUsesTruncatingToInt32)) {
+ // It is safe to clear kCanOverflow when kAllUsesTruncatingToInt32.
+ ClearFlag(HValue::kCanOverflow);
}
- if (left()->range()->CanBeZero() && right()->range()->CanBeNegative()) {
- result->set_can_be_minus_zero(true);
- }
-
- if (right()->range()->Includes(-1) && left()->range()->Includes(kMinInt)) {
- SetFlag(HValue::kCanOverflow);
- }
-
- if (!right()->range()->CanBeZero()) {
+ if (!b->CanBeZero()) {
ClearFlag(HValue::kCanBeDivByZero);
}
return result;
@@ -1257,11 +1729,26 @@ Range* HDiv::InferRange(Zone* zone) {
Range* HMod::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
Range* a = left()->range();
- Range* result = new(zone) Range();
- if (a->CanBeMinusZero() || a->CanBeNegative()) {
- result->set_can_be_minus_zero(true);
+ Range* b = right()->range();
+
+ // The magnitude of the modulus is bounded by the right operand. Note that
+ // apart for the cases involving kMinInt, the calculation below is the same
+ // as Max(Abs(b->lower()), Abs(b->upper())) - 1.
+ int32_t positive_bound = -(Min(NegAbs(b->lower()), NegAbs(b->upper())) + 1);
+
+ // The result of the modulo operation has the sign of its left operand.
+ bool left_can_be_negative = a->CanBeMinusZero() || a->CanBeNegative();
+ Range* result = new(zone) Range(left_can_be_negative ? -positive_bound : 0,
+ a->CanBePositive() ? positive_bound : 0);
+
+ result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+ left_can_be_negative);
+
+ if (!a->Includes(kMinInt) || !b->Includes(-1)) {
+ ClearFlag(HValue::kCanOverflow);
}
- if (!right()->range()->CanBeZero()) {
+
+ if (!b->CanBeZero()) {
ClearFlag(HValue::kCanBeDivByZero);
}
return result;
@@ -1271,8 +1758,451 @@ Range* HMod::InferRange(Zone* zone) {
}
+InductionVariableData* InductionVariableData::ExaminePhi(HPhi* phi) {
+ if (phi->block()->loop_information() == NULL) return NULL;
+ if (phi->OperandCount() != 2) return NULL;
+ int32_t candidate_increment;
+
+ candidate_increment = ComputeIncrement(phi, phi->OperandAt(0));
+ if (candidate_increment != 0) {
+ return new(phi->block()->graph()->zone())
+ InductionVariableData(phi, phi->OperandAt(1), candidate_increment);
+ }
+
+ candidate_increment = ComputeIncrement(phi, phi->OperandAt(1));
+ if (candidate_increment != 0) {
+ return new(phi->block()->graph()->zone())
+ InductionVariableData(phi, phi->OperandAt(0), candidate_increment);
+ }
+
+ return NULL;
+}
+
+
+/*
+ * This function tries to match the following patterns (and all the relevant
+ * variants related to |, & and + being commutative):
+ * base | constant_or_mask
+ * base & constant_and_mask
+ * (base + constant_offset) & constant_and_mask
+ * (base - constant_offset) & constant_and_mask
+ */
+void InductionVariableData::DecomposeBitwise(
+ HValue* value,
+ BitwiseDecompositionResult* result) {
+ HValue* base = IgnoreOsrValue(value);
+ result->base = value;
+
+ if (!base->representation().IsInteger32()) return;
+
+ if (base->IsBitwise()) {
+ bool allow_offset = false;
+ int32_t mask = 0;
+
+ HBitwise* bitwise = HBitwise::cast(base);
+ if (bitwise->right()->IsInteger32Constant()) {
+ mask = bitwise->right()->GetInteger32Constant();
+ base = bitwise->left();
+ } else if (bitwise->left()->IsInteger32Constant()) {
+ mask = bitwise->left()->GetInteger32Constant();
+ base = bitwise->right();
+ } else {
+ return;
+ }
+ if (bitwise->op() == Token::BIT_AND) {
+ result->and_mask = mask;
+ allow_offset = true;
+ } else if (bitwise->op() == Token::BIT_OR) {
+ result->or_mask = mask;
+ } else {
+ return;
+ }
+
+ result->context = bitwise->context();
+
+ if (allow_offset) {
+ if (base->IsAdd()) {
+ HAdd* add = HAdd::cast(base);
+ if (add->right()->IsInteger32Constant()) {
+ base = add->left();
+ } else if (add->left()->IsInteger32Constant()) {
+ base = add->right();
+ }
+ } else if (base->IsSub()) {
+ HSub* sub = HSub::cast(base);
+ if (sub->right()->IsInteger32Constant()) {
+ base = sub->left();
+ }
+ }
+ }
+
+ result->base = base;
+ }
+}
+
+
+void InductionVariableData::AddCheck(HBoundsCheck* check,
+ int32_t upper_limit) {
+ ASSERT(limit_validity() != NULL);
+ if (limit_validity() != check->block() &&
+ !limit_validity()->Dominates(check->block())) return;
+ if (!phi()->block()->current_loop()->IsNestedInThisLoop(
+ check->block()->current_loop())) return;
+
+ ChecksRelatedToLength* length_checks = checks();
+ while (length_checks != NULL) {
+ if (length_checks->length() == check->length()) break;
+ length_checks = length_checks->next();
+ }
+ if (length_checks == NULL) {
+ length_checks = new(check->block()->zone())
+ ChecksRelatedToLength(check->length(), checks());
+ checks_ = length_checks;
+ }
+
+ length_checks->AddCheck(check, upper_limit);
+}
+
+
+void InductionVariableData::ChecksRelatedToLength::CloseCurrentBlock() {
+ if (checks() != NULL) {
+ InductionVariableCheck* c = checks();
+ HBasicBlock* current_block = c->check()->block();
+ while (c != NULL && c->check()->block() == current_block) {
+ c->set_upper_limit(current_upper_limit_);
+ c = c->next();
+ }
+ }
+}
+
+
+void InductionVariableData::ChecksRelatedToLength::UseNewIndexInCurrentBlock(
+ Token::Value token,
+ int32_t mask,
+ HValue* index_base,
+ HValue* context) {
+ ASSERT(first_check_in_block() != NULL);
+ HValue* previous_index = first_check_in_block()->index();
+ ASSERT(context != NULL);
+
+ Zone* zone = index_base->block()->graph()->zone();
+ set_added_constant(HConstant::New(zone, context, mask));
+ if (added_index() != NULL) {
+ added_constant()->InsertBefore(added_index());
+ } else {
+ added_constant()->InsertBefore(first_check_in_block());
+ }
+
+ if (added_index() == NULL) {
+ first_check_in_block()->ReplaceAllUsesWith(first_check_in_block()->index());
+ HInstruction* new_index = HBitwise::New(zone, context, token, index_base,
+ added_constant());
+ ASSERT(new_index->IsBitwise());
+ new_index->ClearAllSideEffects();
+ new_index->AssumeRepresentation(Representation::Integer32());
+ set_added_index(HBitwise::cast(new_index));
+ added_index()->InsertBefore(first_check_in_block());
+ }
+ ASSERT(added_index()->op() == token);
+
+ added_index()->SetOperandAt(1, index_base);
+ added_index()->SetOperandAt(2, added_constant());
+ first_check_in_block()->SetOperandAt(0, added_index());
+ if (previous_index->UseCount() == 0) {
+ previous_index->DeleteAndReplaceWith(NULL);
+ }
+}
+
+void InductionVariableData::ChecksRelatedToLength::AddCheck(
+ HBoundsCheck* check,
+ int32_t upper_limit) {
+ BitwiseDecompositionResult decomposition;
+ InductionVariableData::DecomposeBitwise(check->index(), &decomposition);
+
+ if (first_check_in_block() == NULL ||
+ first_check_in_block()->block() != check->block()) {
+ CloseCurrentBlock();
+
+ first_check_in_block_ = check;
+ set_added_index(NULL);
+ set_added_constant(NULL);
+ current_and_mask_in_block_ = decomposition.and_mask;
+ current_or_mask_in_block_ = decomposition.or_mask;
+ current_upper_limit_ = upper_limit;
+
+ InductionVariableCheck* new_check = new(check->block()->graph()->zone())
+ InductionVariableCheck(check, checks_, upper_limit);
+ checks_ = new_check;
+ return;
+ }
+
+ if (upper_limit > current_upper_limit()) {
+ current_upper_limit_ = upper_limit;
+ }
+
+ if (decomposition.and_mask != 0 &&
+ current_or_mask_in_block() == 0) {
+ if (current_and_mask_in_block() == 0 ||
+ decomposition.and_mask > current_and_mask_in_block()) {
+ UseNewIndexInCurrentBlock(Token::BIT_AND,
+ decomposition.and_mask,
+ decomposition.base,
+ decomposition.context);
+ current_and_mask_in_block_ = decomposition.and_mask;
+ }
+ check->set_skip_check();
+ }
+ if (current_and_mask_in_block() == 0) {
+ if (decomposition.or_mask > current_or_mask_in_block()) {
+ UseNewIndexInCurrentBlock(Token::BIT_OR,
+ decomposition.or_mask,
+ decomposition.base,
+ decomposition.context);
+ current_or_mask_in_block_ = decomposition.or_mask;
+ }
+ check->set_skip_check();
+ }
+
+ if (!check->skip_check()) {
+ InductionVariableCheck* new_check = new(check->block()->graph()->zone())
+ InductionVariableCheck(check, checks_, upper_limit);
+ checks_ = new_check;
+ }
+}
+
+
+/*
+ * This method detects if phi is an induction variable, with phi_operand as
+ * its "incremented" value (the other operand would be the "base" value).
+ *
+ * It cheks is phi_operand has the form "phi + constant".
+ * If yes, the constant is the increment that the induction variable gets at
+ * every loop iteration.
+ * Otherwise it returns 0.
+ */
+int32_t InductionVariableData::ComputeIncrement(HPhi* phi,
+ HValue* phi_operand) {
+ if (!phi_operand->representation().IsInteger32()) return 0;
+
+ if (phi_operand->IsAdd()) {
+ HAdd* operation = HAdd::cast(phi_operand);
+ if (operation->left() == phi &&
+ operation->right()->IsInteger32Constant()) {
+ return operation->right()->GetInteger32Constant();
+ } else if (operation->right() == phi &&
+ operation->left()->IsInteger32Constant()) {
+ return operation->left()->GetInteger32Constant();
+ }
+ } else if (phi_operand->IsSub()) {
+ HSub* operation = HSub::cast(phi_operand);
+ if (operation->left() == phi &&
+ operation->right()->IsInteger32Constant()) {
+ return -operation->right()->GetInteger32Constant();
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ * Swaps the information in "update" with the one contained in "this".
+ * The swapping is important because this method is used while doing a
+ * dominator tree traversal, and "update" will retain the old data that
+ * will be restored while backtracking.
+ */
+void InductionVariableData::UpdateAdditionalLimit(
+ InductionVariableLimitUpdate* update) {
+ ASSERT(update->updated_variable == this);
+ if (update->limit_is_upper) {
+ swap(&additional_upper_limit_, &update->limit);
+ swap(&additional_upper_limit_is_included_, &update->limit_is_included);
+ } else {
+ swap(&additional_lower_limit_, &update->limit);
+ swap(&additional_lower_limit_is_included_, &update->limit_is_included);
+ }
+}
+
+
+int32_t InductionVariableData::ComputeUpperLimit(int32_t and_mask,
+ int32_t or_mask) {
+ // Should be Smi::kMaxValue but it must fit 32 bits; lower is safe anyway.
+ const int32_t MAX_LIMIT = 1 << 30;
+
+ int32_t result = MAX_LIMIT;
+
+ if (limit() != NULL &&
+ limit()->IsInteger32Constant()) {
+ int32_t limit_value = limit()->GetInteger32Constant();
+ if (!limit_included()) {
+ limit_value--;
+ }
+ if (limit_value < result) result = limit_value;
+ }
+
+ if (additional_upper_limit() != NULL &&
+ additional_upper_limit()->IsInteger32Constant()) {
+ int32_t limit_value = additional_upper_limit()->GetInteger32Constant();
+ if (!additional_upper_limit_is_included()) {
+ limit_value--;
+ }
+ if (limit_value < result) result = limit_value;
+ }
+
+ if (and_mask > 0 && and_mask < MAX_LIMIT) {
+ if (and_mask < result) result = and_mask;
+ return result;
+ }
+
+ // Add the effect of the or_mask.
+ result |= or_mask;
+
+ return result >= MAX_LIMIT ? kNoLimit : result;
+}
+
+
+HValue* InductionVariableData::IgnoreOsrValue(HValue* v) {
+ if (!v->IsPhi()) return v;
+ HPhi* phi = HPhi::cast(v);
+ if (phi->OperandCount() != 2) return v;
+ if (phi->OperandAt(0)->block()->is_osr_entry()) {
+ return phi->OperandAt(1);
+ } else if (phi->OperandAt(1)->block()->is_osr_entry()) {
+ return phi->OperandAt(0);
+ } else {
+ return v;
+ }
+}
+
+
+InductionVariableData* InductionVariableData::GetInductionVariableData(
+ HValue* v) {
+ v = IgnoreOsrValue(v);
+ if (v->IsPhi()) {
+ return HPhi::cast(v)->induction_variable_data();
+ }
+ return NULL;
+}
+
+
+/*
+ * Check if a conditional branch to "current_branch" with token "token" is
+ * the branch that keeps the induction loop running (and, conversely, will
+ * terminate it if the "other_branch" is taken).
+ *
+ * Three conditions must be met:
+ * - "current_branch" must be in the induction loop.
+ * - "other_branch" must be out of the induction loop.
+ * - "token" and the induction increment must be "compatible": the token should
+ * be a condition that keeps the execution inside the loop until the limit is
+ * reached.
+ */
+bool InductionVariableData::CheckIfBranchIsLoopGuard(
+ Token::Value token,
+ HBasicBlock* current_branch,
+ HBasicBlock* other_branch) {
+ if (!phi()->block()->current_loop()->IsNestedInThisLoop(
+ current_branch->current_loop())) {
+ return false;
+ }
+
+ if (phi()->block()->current_loop()->IsNestedInThisLoop(
+ other_branch->current_loop())) {
+ return false;
+ }
+
+ if (increment() > 0 && (token == Token::LT || token == Token::LTE)) {
+ return true;
+ }
+ if (increment() < 0 && (token == Token::GT || token == Token::GTE)) {
+ return true;
+ }
+ if (Token::IsInequalityOp(token) && (increment() == 1 || increment() == -1)) {
+ return true;
+ }
+
+ return false;
+}
+
+
+void InductionVariableData::ComputeLimitFromPredecessorBlock(
+ HBasicBlock* block,
+ LimitFromPredecessorBlock* result) {
+ if (block->predecessors()->length() != 1) return;
+ HBasicBlock* predecessor = block->predecessors()->at(0);
+ HInstruction* end = predecessor->last();
+
+ if (!end->IsCompareNumericAndBranch()) return;
+ HCompareNumericAndBranch* branch = HCompareNumericAndBranch::cast(end);
+
+ Token::Value token = branch->token();
+ if (!Token::IsArithmeticCompareOp(token)) return;
+
+ HBasicBlock* other_target;
+ if (block == branch->SuccessorAt(0)) {
+ other_target = branch->SuccessorAt(1);
+ } else {
+ other_target = branch->SuccessorAt(0);
+ token = Token::NegateCompareOp(token);
+ ASSERT(block == branch->SuccessorAt(1));
+ }
+
+ InductionVariableData* data;
+
+ data = GetInductionVariableData(branch->left());
+ HValue* limit = branch->right();
+ if (data == NULL) {
+ data = GetInductionVariableData(branch->right());
+ token = Token::ReverseCompareOp(token);
+ limit = branch->left();
+ }
+
+ if (data != NULL) {
+ result->variable = data;
+ result->token = token;
+ result->limit = limit;
+ result->other_target = other_target;
+ }
+}
+
+
+/*
+ * Compute the limit that is imposed on an induction variable when entering
+ * "block" (if any).
+ * If the limit is the "proper" induction limit (the one that makes the loop
+ * terminate when the induction variable reaches it) it is stored directly in
+ * the induction variable data.
+ * Otherwise the limit is written in "additional_limit" and the method
+ * returns true.
+ */
+bool InductionVariableData::ComputeInductionVariableLimit(
+ HBasicBlock* block,
+ InductionVariableLimitUpdate* additional_limit) {
+ LimitFromPredecessorBlock limit;
+ ComputeLimitFromPredecessorBlock(block, &limit);
+ if (!limit.LimitIsValid()) return false;
+
+ if (limit.variable->CheckIfBranchIsLoopGuard(limit.token,
+ block,
+ limit.other_target)) {
+ limit.variable->limit_ = limit.limit;
+ limit.variable->limit_included_ = limit.LimitIsIncluded();
+ limit.variable->limit_validity_ = block;
+ limit.variable->induction_exit_block_ = block->predecessors()->at(0);
+ limit.variable->induction_exit_target_ = limit.other_target;
+ return false;
+ } else {
+ additional_limit->updated_variable = limit.variable;
+ additional_limit->limit = limit.limit;
+ additional_limit->limit_is_upper = limit.LimitIsUpper();
+ additional_limit->limit_is_included = limit.LimitIsIncluded();
+ return true;
+ }
+}
+
+
Range* HMathMinMax::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
+ if (representation().IsSmiOrInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
@@ -1297,14 +2227,15 @@ void HPhi::PrintTo(StringStream* stream) {
value->PrintNameTo(stream);
stream->Add(" ");
}
- stream->Add(" uses%d_%di_%dd_%dt",
+ stream->Add(" uses:%d_%ds_%di_%dd_%dt",
UseCount(),
+ smi_non_phi_uses() + smi_indirect_uses(),
int32_non_phi_uses() + int32_indirect_uses(),
double_non_phi_uses() + double_indirect_uses(),
tagged_non_phi_uses() + tagged_indirect_uses());
- stream->Add("%s%s]",
- is_live() ? "_live" : "",
- IsConvertibleToInteger() ? "" : "_ncti");
+ PrintRangeTo(stream);
+ PrintTypeTo(stream);
+ stream->Add("]");
}
@@ -1353,18 +2284,27 @@ void HPhi::DeleteFromGraph() {
void HPhi::InitRealUses(int phi_id) {
// Initialize real uses.
phi_id_ = phi_id;
+ // Compute a conservative approximation of truncating uses before inferring
+ // representations. The proper, exact computation will be done later, when
+ // inserting representation changes.
+ SetFlag(kTruncatingToSmi);
+ SetFlag(kTruncatingToInt32);
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
if (!value->IsPhi()) {
- Representation rep = value->ObservedInputRepresentation(it.index());
+ Representation rep = value->observed_input_representation(it.index());
non_phi_uses_[rep.kind()] += value->LoopWeight();
if (FLAG_trace_representation) {
- PrintF("%d %s is used by %d %s as %s\n",
- this->id(),
- this->Mnemonic(),
- value->id(),
- value->Mnemonic(),
- rep.Mnemonic());
+ PrintF("#%d Phi is used by real #%d %s as %s\n",
+ id(), value->id(), value->Mnemonic(), rep.Mnemonic());
+ }
+ if (!value->IsSimulate()) {
+ if (!value->CheckFlag(kTruncatingToSmi)) {
+ ClearFlag(kTruncatingToSmi);
+ }
+ if (!value->CheckFlag(kTruncatingToInt32)) {
+ ClearFlag(kTruncatingToInt32);
+ }
}
}
}
@@ -1373,11 +2313,9 @@ void HPhi::InitRealUses(int phi_id) {
void HPhi::AddNonPhiUsesFrom(HPhi* other) {
if (FLAG_trace_representation) {
- PrintF("adding to %d %s uses of %d %s: i%d d%d t%d\n",
- this->id(),
- this->Mnemonic(),
- other->id(),
- other->Mnemonic(),
+ PrintF("adding to #%d Phi uses of #%d Phi: s%d i%d d%d t%d\n",
+ id(), other->id(),
+ other->non_phi_uses_[Representation::kSmi],
other->non_phi_uses_[Representation::kInteger32],
other->non_phi_uses_[Representation::kDouble],
other->non_phi_uses_[Representation::kTagged]);
@@ -1396,9 +2334,26 @@ void HPhi::AddIndirectUsesTo(int* dest) {
}
-void HPhi::ResetInteger32Uses() {
- non_phi_uses_[Representation::kInteger32] = 0;
- indirect_uses_[Representation::kInteger32] = 0;
+void HSimulate::MergeWith(ZoneList<HSimulate*>* list) {
+ while (!list->is_empty()) {
+ HSimulate* from = list->RemoveLast();
+ ZoneList<HValue*>* from_values = &from->values_;
+ for (int i = 0; i < from_values->length(); ++i) {
+ if (from->HasAssignedIndexAt(i)) {
+ int index = from->GetAssignedIndexAt(i);
+ if (HasValueForIndex(index)) continue;
+ AddAssignedValue(index, from_values->at(i));
+ } else {
+ if (pop_count_ > 0) {
+ pop_count_--;
+ } else {
+ AddPushedValue(from_values->at(i));
+ }
+ }
+ }
+ pop_count_ += from->pop_count_;
+ from->DeleteAndReplaceWith(NULL);
+ }
}
@@ -1407,29 +2362,73 @@ void HSimulate::PrintDataTo(StringStream* stream) {
if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
if (values_.length() > 0) {
if (pop_count_ > 0) stream->Add(" /");
- for (int i = 0; i < values_.length(); ++i) {
- if (i > 0) stream->Add(",");
+ for (int i = values_.length() - 1; i >= 0; --i) {
if (HasAssignedIndexAt(i)) {
stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
} else {
stream->Add(" push ");
}
values_[i]->PrintNameTo(stream);
+ if (i > 0) stream->Add(",");
}
}
}
-void HDeoptimize::PrintDataTo(StringStream* stream) {
- if (OperandCount() == 0) return;
- OperandAt(0)->PrintNameTo(stream);
- for (int i = 1; i < OperandCount(); ++i) {
- stream->Add(" ");
- OperandAt(i)->PrintNameTo(stream);
+void HSimulate::ReplayEnvironment(HEnvironment* env) {
+ ASSERT(env != NULL);
+ env->set_ast_id(ast_id());
+ env->Drop(pop_count());
+ for (int i = values()->length() - 1; i >= 0; --i) {
+ HValue* value = values()->at(i);
+ if (HasAssignedIndexAt(i)) {
+ env->Bind(GetAssignedIndexAt(i), value);
+ } else {
+ env->Push(value);
+ }
}
}
+static void ReplayEnvironmentNested(const ZoneList<HValue*>* values,
+ HCapturedObject* other) {
+ for (int i = 0; i < values->length(); ++i) {
+ HValue* value = values->at(i);
+ if (value->IsCapturedObject()) {
+ if (HCapturedObject::cast(value)->capture_id() == other->capture_id()) {
+ values->at(i) = other;
+ } else {
+ ReplayEnvironmentNested(HCapturedObject::cast(value)->values(), other);
+ }
+ }
+ }
+}
+
+
+// Replay captured objects by replacing all captured objects with the
+// same capture id in the current and all outer environments.
+void HCapturedObject::ReplayEnvironment(HEnvironment* env) {
+ ASSERT(env != NULL);
+ while (env != NULL) {
+ ReplayEnvironmentNested(env->values(), this);
+ env = env->outer();
+ }
+}
+
+
+void HCapturedObject::PrintDataTo(StringStream* stream) {
+ stream->Add("#%d ", capture_id());
+ HDematerializedObject::PrintDataTo(stream);
+}
+
+
+void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
+ Zone* zone) {
+ ASSERT(return_target->IsInlineReturnTarget());
+ return_targets_.Add(return_target, zone);
+}
+
+
void HEnterInlined::PrintDataTo(StringStream* stream) {
SmartArrayPointer<char> name = function()->debug_name()->ToCString();
stream->Add("%s, id=%d", *name, function()->id().ToInt());
@@ -1443,107 +2442,228 @@ static bool IsInteger32(double value) {
HConstant::HConstant(Handle<Object> handle, Representation r)
- : handle_(handle),
- has_int32_value_(false),
- has_double_value_(false) {
- set_representation(r);
- SetFlag(kUseGVN);
- if (handle_->IsNumber()) {
- double n = handle_->Number();
+ : HTemplateInstruction<0>(HType::TypeFromValue(handle)),
+ object_(Unique<Object>::CreateUninitialized(handle)),
+ has_smi_value_(false),
+ has_int32_value_(false),
+ has_double_value_(false),
+ has_external_reference_value_(false),
+ is_internalized_string_(false),
+ is_not_in_new_space_(true),
+ is_cell_(false),
+ boolean_value_(handle->BooleanValue()) {
+ if (handle->IsHeapObject()) {
+ Heap* heap = Handle<HeapObject>::cast(handle)->GetHeap();
+ is_not_in_new_space_ = !heap->InNewSpace(*handle);
+ }
+ if (handle->IsNumber()) {
+ double n = handle->Number();
has_int32_value_ = IsInteger32(n);
int32_value_ = DoubleToInt32(n);
+ has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
double_value_ = n;
has_double_value_ = true;
+ } else {
+ is_internalized_string_ = handle->IsInternalizedString();
}
-}
-
-HConstant::HConstant(int32_t integer_value, Representation r)
- : has_int32_value_(true),
- has_double_value_(true),
- int32_value_(integer_value),
- double_value_(FastI2D(integer_value)) {
+ is_cell_ = !handle.is_null() &&
+ (handle->IsCell() || handle->IsPropertyCell());
+ Initialize(r);
+}
+
+
+HConstant::HConstant(Unique<Object> unique,
+ Representation r,
+ HType type,
+ bool is_internalize_string,
+ bool is_not_in_new_space,
+ bool is_cell,
+ bool boolean_value)
+ : HTemplateInstruction<0>(type),
+ object_(unique),
+ has_smi_value_(false),
+ has_int32_value_(false),
+ has_double_value_(false),
+ has_external_reference_value_(false),
+ is_internalized_string_(is_internalize_string),
+ is_not_in_new_space_(is_not_in_new_space),
+ is_cell_(is_cell),
+ boolean_value_(boolean_value) {
+ ASSERT(!unique.handle().is_null());
+ ASSERT(!type.IsTaggedNumber());
+ Initialize(r);
+}
+
+
+HConstant::HConstant(int32_t integer_value,
+ Representation r,
+ bool is_not_in_new_space,
+ Unique<Object> object)
+ : object_(object),
+ has_smi_value_(Smi::IsValid(integer_value)),
+ has_int32_value_(true),
+ has_double_value_(true),
+ has_external_reference_value_(false),
+ is_internalized_string_(false),
+ is_not_in_new_space_(is_not_in_new_space),
+ is_cell_(false),
+ boolean_value_(integer_value != 0),
+ int32_value_(integer_value),
+ double_value_(FastI2D(integer_value)) {
+ set_type(has_smi_value_ ? HType::Smi() : HType::TaggedNumber());
+ Initialize(r);
+}
+
+
+HConstant::HConstant(double double_value,
+ Representation r,
+ bool is_not_in_new_space,
+ Unique<Object> object)
+ : object_(object),
+ has_int32_value_(IsInteger32(double_value)),
+ has_double_value_(true),
+ has_external_reference_value_(false),
+ is_internalized_string_(false),
+ is_not_in_new_space_(is_not_in_new_space),
+ is_cell_(false),
+ boolean_value_(double_value != 0 && !std::isnan(double_value)),
+ int32_value_(DoubleToInt32(double_value)),
+ double_value_(double_value) {
+ has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
+ set_type(has_smi_value_ ? HType::Smi() : HType::TaggedNumber());
+ Initialize(r);
+}
+
+
+HConstant::HConstant(ExternalReference reference)
+ : HTemplateInstruction<0>(HType::None()),
+ object_(Unique<Object>(Handle<Object>::null())),
+ has_smi_value_(false),
+ has_int32_value_(false),
+ has_double_value_(false),
+ has_external_reference_value_(true),
+ is_internalized_string_(false),
+ is_not_in_new_space_(true),
+ is_cell_(false),
+ boolean_value_(true),
+ external_reference_value_(reference) {
+ Initialize(Representation::External());
+}
+
+
+void HConstant::Initialize(Representation r) {
+ if (r.IsNone()) {
+ if (has_smi_value_ && SmiValuesAre31Bits()) {
+ r = Representation::Smi();
+ } else if (has_int32_value_) {
+ r = Representation::Integer32();
+ } else if (has_double_value_) {
+ r = Representation::Double();
+ } else if (has_external_reference_value_) {
+ r = Representation::External();
+ } else {
+ Handle<Object> object = object_.handle();
+ if (object->IsJSObject()) {
+ // Try to eagerly migrate JSObjects that have deprecated maps.
+ Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+ if (js_object->map()->is_deprecated()) {
+ JSObject::TryMigrateInstance(js_object);
+ }
+ }
+ r = Representation::Tagged();
+ }
+ }
set_representation(r);
SetFlag(kUseGVN);
}
-HConstant::HConstant(double double_value, Representation r)
- : has_int32_value_(IsInteger32(double_value)),
- has_double_value_(true),
- int32_value_(DoubleToInt32(double_value)),
- double_value_(double_value) {
- set_representation(r);
- SetFlag(kUseGVN);
+bool HConstant::EmitAtUses() {
+ ASSERT(IsLinked());
+ if (block()->graph()->has_osr() &&
+ block()->graph()->IsStandardConstant(this)) {
+ // TODO(titzer): this seems like a hack that should be fixed by custom OSR.
+ return true;
+ }
+ if (UseCount() == 0) return true;
+ if (IsCell()) return false;
+ if (representation().IsDouble()) return false;
+ return true;
}
HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
+ if (r.IsSmi() && !has_smi_value_) return NULL;
if (r.IsInteger32() && !has_int32_value_) return NULL;
if (r.IsDouble() && !has_double_value_) return NULL;
- if (handle_.is_null()) {
- ASSERT(has_int32_value_ || has_double_value_);
- if (has_int32_value_) return new(zone) HConstant(int32_value_, r);
- return new(zone) HConstant(double_value_, r);
+ if (r.IsExternal() && !has_external_reference_value_) return NULL;
+ if (has_int32_value_) {
+ return new(zone) HConstant(int32_value_, r, is_not_in_new_space_, object_);
+ }
+ if (has_double_value_) {
+ return new(zone) HConstant(double_value_, r, is_not_in_new_space_, object_);
}
- return new(zone) HConstant(handle_, r);
+ if (has_external_reference_value_) {
+ return new(zone) HConstant(external_reference_value_);
+ }
+ ASSERT(!object_.handle().is_null());
+ return new(zone) HConstant(object_,
+ r,
+ type_,
+ is_internalized_string_,
+ is_not_in_new_space_,
+ is_cell_,
+ boolean_value_);
}
-HConstant* HConstant::CopyToTruncatedInt32(Zone* zone) const {
+Maybe<HConstant*> HConstant::CopyToTruncatedInt32(Zone* zone) {
+ HConstant* res = NULL;
if (has_int32_value_) {
- if (handle_.is_null()) {
- return new(zone) HConstant(int32_value_, Representation::Integer32());
- } else {
- // Re-use the existing Handle if possible.
- return new(zone) HConstant(handle_, Representation::Integer32());
- }
+ res = new(zone) HConstant(int32_value_,
+ Representation::Integer32(),
+ is_not_in_new_space_,
+ object_);
} else if (has_double_value_) {
- return new(zone) HConstant(DoubleToInt32(double_value_),
- Representation::Integer32());
- } else {
- return NULL;
+ res = new(zone) HConstant(DoubleToInt32(double_value_),
+ Representation::Integer32(),
+ is_not_in_new_space_,
+ object_);
}
+ return Maybe<HConstant*>(res != NULL, res);
}
-bool HConstant::ToBoolean() {
- // Converts the constant's boolean value according to
- // ECMAScript section 9.2 ToBoolean conversion.
- if (HasInteger32Value()) return Integer32Value() != 0;
- if (HasDoubleValue()) {
- double v = DoubleValue();
- return v != 0 && !isnan(v);
+Maybe<HConstant*> HConstant::CopyToTruncatedNumber(Zone* zone) {
+ HConstant* res = NULL;
+ Handle<Object> handle = this->handle(zone->isolate());
+ if (handle->IsBoolean()) {
+ res = handle->BooleanValue() ?
+ new(zone) HConstant(1) : new(zone) HConstant(0);
+ } else if (handle->IsUndefined()) {
+ res = new(zone) HConstant(OS::nan_value());
+ } else if (handle->IsNull()) {
+ res = new(zone) HConstant(0);
}
- Handle<Object> literal = handle();
- if (literal->IsTrue()) return true;
- if (literal->IsFalse()) return false;
- if (literal->IsUndefined()) return false;
- if (literal->IsNull()) return false;
- if (literal->IsString() && String::cast(*literal)->length() == 0) {
- return false;
- }
- return true;
+ return Maybe<HConstant*>(res != NULL, res);
}
+
void HConstant::PrintDataTo(StringStream* stream) {
if (has_int32_value_) {
stream->Add("%d ", int32_value_);
} else if (has_double_value_) {
stream->Add("%f ", FmtElm(double_value_));
+ } else if (has_external_reference_value_) {
+ stream->Add("%p ", reinterpret_cast<void*>(
+ external_reference_value_.address()));
} else {
- handle()->ShortPrint(stream);
+ handle(Isolate::Current())->ShortPrint(stream);
}
}
-bool HArrayLiteral::IsCopyOnWrite() const {
- if (!boilerplate_object_->IsJSObject()) return false;
- return Handle<JSObject>::cast(boilerplate_object_)->elements()->map() ==
- HEAP->fixed_cow_array_map();
-}
-
-
void HBinaryOperation::PrintDataTo(StringStream* stream) {
left()->PrintNameTo(stream);
stream->Add(" ");
@@ -1553,8 +2673,115 @@ void HBinaryOperation::PrintDataTo(StringStream* stream) {
}
+void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ Representation new_rep = RepresentationFromInputs();
+ UpdateRepresentation(new_rep, h_infer, "inputs");
+
+ if (representation().IsSmi() && HasNonSmiUse()) {
+ UpdateRepresentation(
+ Representation::Integer32(), h_infer, "use requirements");
+ }
+
+ if (observed_output_representation_.IsNone()) {
+ new_rep = RepresentationFromUses();
+ UpdateRepresentation(new_rep, h_infer, "uses");
+ } else {
+ new_rep = RepresentationFromOutput();
+ UpdateRepresentation(new_rep, h_infer, "output");
+ }
+}
+
+
+Representation HBinaryOperation::RepresentationFromInputs() {
+ // Determine the worst case of observed input representations and
+ // the currently assumed output representation.
+ Representation rep = representation();
+ for (int i = 1; i <= 2; ++i) {
+ rep = rep.generalize(observed_input_representation(i));
+ }
+ // If any of the actual input representation is more general than what we
+ // have so far but not Tagged, use that representation instead.
+ Representation left_rep = left()->representation();
+ Representation right_rep = right()->representation();
+ if (!left_rep.IsTagged()) rep = rep.generalize(left_rep);
+ if (!right_rep.IsTagged()) rep = rep.generalize(right_rep);
+
+ return rep;
+}
+
+
+bool HBinaryOperation::IgnoreObservedOutputRepresentation(
+ Representation current_rep) {
+ return ((current_rep.IsInteger32() && CheckUsesForFlag(kTruncatingToInt32)) ||
+ (current_rep.IsSmi() && CheckUsesForFlag(kTruncatingToSmi))) &&
+ // Mul in Integer32 mode would be too precise.
+ (!this->IsMul() || HMul::cast(this)->MulMinusOne());
+}
+
+
+Representation HBinaryOperation::RepresentationFromOutput() {
+ Representation rep = representation();
+ // Consider observed output representation, but ignore it if it's Double,
+ // this instruction is not a division, and all its uses are truncating
+ // to Integer32.
+ if (observed_output_representation_.is_more_general_than(rep) &&
+ !IgnoreObservedOutputRepresentation(rep)) {
+ return observed_output_representation_;
+ }
+ return Representation::None();
+}
+
+
+void HBinaryOperation::AssumeRepresentation(Representation r) {
+ set_observed_input_representation(1, r);
+ set_observed_input_representation(2, r);
+ HValue::AssumeRepresentation(r);
+}
+
+
+void HMathMinMax::InferRepresentation(HInferRepresentationPhase* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ Representation new_rep = RepresentationFromInputs();
+ UpdateRepresentation(new_rep, h_infer, "inputs");
+ // Do not care about uses.
+}
+
+
Range* HBitwise::InferRange(Zone* zone) {
- if (op() == Token::BIT_XOR) return HValue::InferRange(zone);
+ if (op() == Token::BIT_XOR) {
+ if (left()->HasRange() && right()->HasRange()) {
+ // The maximum value has the high bit, and all bits below, set:
+ // (1 << high) - 1.
+ // If the range can be negative, the minimum int is a negative number with
+ // the high bit, and all bits below, unset:
+ // -(1 << high).
+ // If it cannot be negative, conservatively choose 0 as minimum int.
+ int64_t left_upper = left()->range()->upper();
+ int64_t left_lower = left()->range()->lower();
+ int64_t right_upper = right()->range()->upper();
+ int64_t right_lower = right()->range()->lower();
+
+ if (left_upper < 0) left_upper = ~left_upper;
+ if (left_lower < 0) left_lower = ~left_lower;
+ if (right_upper < 0) right_upper = ~right_upper;
+ if (right_lower < 0) right_lower = ~right_lower;
+
+ int high = MostSignificantBit(
+ static_cast<uint32_t>(
+ left_upper | left_lower | right_upper | right_lower));
+
+ int64_t limit = 1;
+ limit <<= high;
+ int32_t min = (left()->range()->CanBeNegative() ||
+ right()->range()->CanBeNegative())
+ ? static_cast<int32_t>(-limit) : 0;
+ return new(zone) Range(min, static_cast<int32_t>(limit - 1));
+ }
+ Range* result = HValue::InferRange(zone);
+ result->set_can_be_minus_zero(false);
+ return result;
+ }
const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
int32_t left_mask = (left()->range() != NULL)
? left()->range()->Mask()
@@ -1565,9 +2792,11 @@ Range* HBitwise::InferRange(Zone* zone) {
int32_t result_mask = (op() == Token::BIT_AND)
? left_mask & right_mask
: left_mask | right_mask;
- return (result_mask >= 0)
- ? new(zone) Range(0, result_mask)
- : HValue::InferRange(zone);
+ if (result_mask >= 0) return new(zone) Range(0, result_mask);
+
+ Range* result = HValue::InferRange(zone);
+ result->set_can_be_minus_zero(false);
+ return result;
}
@@ -1579,7 +2808,6 @@ Range* HSar::InferRange(Zone* zone) {
? left()->range()->Copy(zone)
: new(zone) Range();
result->Sar(c->Integer32Value());
- result->set_can_be_minus_zero(false);
return result;
}
}
@@ -1604,7 +2832,6 @@ Range* HShr::InferRange(Zone* zone) {
? left()->range()->Copy(zone)
: new(zone) Range();
result->Sar(c->Integer32Value());
- result->set_can_be_minus_zero(false);
return result;
}
}
@@ -1621,7 +2848,6 @@ Range* HShl::InferRange(Zone* zone) {
? left()->range()->Copy(zone)
: new(zone) Range();
result->Shl(c->Integer32Value());
- result->set_can_be_minus_zero(false);
return result;
}
}
@@ -1629,7 +2855,18 @@ Range* HShl::InferRange(Zone* zone) {
}
-Range* HLoadKeyedSpecializedArrayElement::InferRange(Zone* zone) {
+Range* HLoadNamedField::InferRange(Zone* zone) {
+ if (access().representation().IsByte()) {
+ return new(zone) Range(0, 255);
+ }
+ if (access().IsStringLength()) {
+ return new(zone) Range(0, String::kMaxLength);
+ }
+ return HValue::InferRange(zone);
+}
+
+
+Range* HLoadKeyed::InferRange(Zone* zone) {
switch (elements_kind()) {
case EXTERNAL_PIXEL_ELEMENTS:
return new(zone) Range(0, 255);
@@ -1661,7 +2898,7 @@ void HStringCompareAndBranch::PrintDataTo(StringStream* stream) {
}
-void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
+void HCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(token()));
stream->Add(" ");
left()->PrintNameTo(stream);
@@ -1679,33 +2916,67 @@ void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
}
+bool HCompareObjectEqAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (left()->IsConstant() && right()->IsConstant()) {
+ bool comparison_result =
+ HConstant::cast(left())->Equals(HConstant::cast(right()));
+ *block = comparison_result
+ ? FirstSuccessor()
+ : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+void HCompareHoleAndBranch::InferRepresentation(
+ HInferRepresentationPhase* h_infer) {
+ ChangeRepresentation(value()->representation());
+}
+
+
void HGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", SuccessorAt(0)->block_id());
}
-void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
- input_representation_ = r;
- if (r.IsDouble()) {
+void HCompareNumericAndBranch::InferRepresentation(
+ HInferRepresentationPhase* h_infer) {
+ Representation left_rep = left()->representation();
+ Representation right_rep = right()->representation();
+ Representation observed_left = observed_input_representation(0);
+ Representation observed_right = observed_input_representation(1);
+
+ Representation rep = Representation::None();
+ rep = rep.generalize(observed_left);
+ rep = rep.generalize(observed_right);
+ if (rep.IsNone() || rep.IsSmiOrInteger32()) {
+ if (!left_rep.IsTagged()) rep = rep.generalize(left_rep);
+ if (!right_rep.IsTagged()) rep = rep.generalize(right_rep);
+ } else {
+ rep = Representation::Double();
+ }
+
+ if (rep.IsDouble()) {
// According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
// and !=) have special handling of undefined, e.g. undefined == undefined
// is 'true'. Relational comparisons have a different semantic, first
// calling ToPrimitive() on their arguments. The standard Crankshaft
- // tagged-to-double conversion to ensure the HCompareIDAndBranch's inputs
- // are doubles caused 'undefined' to be converted to NaN. That's compatible
- // out-of-the box with ordered relational comparisons (<, >, <=,
+ // tagged-to-double conversion to ensure the HCompareNumericAndBranch's
+ // inputs are doubles caused 'undefined' to be converted to NaN. That's
+ // compatible out-of-the box with ordered relational comparisons (<, >, <=,
// >=). However, for equality comparisons (and for 'in' and 'instanceof'),
// it is not consistent with the spec. For example, it would cause undefined
// == undefined (should be true) to be evaluated as NaN == NaN
// (false). Therefore, any comparisons other than ordered relational
// comparisons must cause a deopt when one of their arguments is undefined.
// See also v8:1434
- if (!Token::IsOrderedRelationalCompareOp(token_)) {
- SetFlag(kDeoptimizeOnUndefined);
+ if (Token::IsOrderedRelationalCompareOp(token_)) {
+ SetFlag(kAllowUndefinedAsNaN);
}
- } else {
- ASSERT(r.IsInteger32());
}
+ ChangeRepresentation(rep);
}
@@ -1716,136 +2987,29 @@ void HParameter::PrintDataTo(StringStream* stream) {
void HLoadNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
- stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
-}
-
-
-// Returns true if an instance of this map can never find a property with this
-// name in its prototype chain. This means all prototypes up to the top are
-// fast and don't have the name in them. It would be good if we could optimize
-// polymorphic loads where the property is sometimes found in the prototype
-// chain.
-static bool PrototypeChainCanNeverResolve(
- Handle<Map> map, Handle<String> name) {
- Isolate* isolate = map->GetIsolate();
- Object* current = map->prototype();
- while (current != isolate->heap()->null_value()) {
- if (current->IsJSGlobalProxy() ||
- current->IsGlobalObject() ||
- !current->IsJSObject() ||
- JSObject::cast(current)->map()->has_named_interceptor() ||
- JSObject::cast(current)->IsAccessCheckNeeded() ||
- !JSObject::cast(current)->HasFastProperties()) {
- return false;
- }
-
- LookupResult lookup(isolate);
- Map* map = JSObject::cast(current)->map();
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsFound()) return false;
- if (!lookup.IsCacheable()) return false;
- current = JSObject::cast(current)->GetPrototype();
- }
- return true;
+ access_.PrintTo(stream);
}
-HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
- HValue* object,
- SmallMapList* types,
- Handle<String> name,
- Zone* zone)
- : types_(Min(types->length(), kMaxLoadPolymorphism), zone),
- name_(name),
- need_generic_(false) {
- SetOperandAt(0, context);
- SetOperandAt(1, object);
- set_representation(Representation::Tagged());
- SetGVNFlag(kDependsOnMaps);
- SmallMapList negative_lookups;
- for (int i = 0;
- i < types->length() && types_.length() < kMaxLoadPolymorphism;
- ++i) {
- Handle<Map> map = types->at(i);
- LookupResult lookup(map->GetIsolate());
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsFound()) {
- switch (lookup.type()) {
- case FIELD: {
- int index = lookup.GetLocalFieldIndexFromMap(*map);
- if (index < 0) {
- SetGVNFlag(kDependsOnInobjectFields);
- } else {
- SetGVNFlag(kDependsOnBackingStoreFields);
- }
- types_.Add(types->at(i), zone);
- break;
- }
- case CONSTANT_FUNCTION:
- types_.Add(types->at(i), zone);
- break;
- case CALLBACKS:
- break;
- case TRANSITION:
- case INTERCEPTOR:
- case NONEXISTENT:
- case NORMAL:
- case HANDLER:
- UNREACHABLE();
- break;
- }
- } else if (lookup.IsCacheable() &&
- // For dicts the lookup on the map will fail, but the object may
- // contain the property so we cannot generate a negative lookup
- // (which would just be a map check and return undefined).
- !map->is_dictionary_map() &&
- !map->has_named_interceptor() &&
- PrototypeChainCanNeverResolve(map, name)) {
- negative_lookups.Add(types->at(i), zone);
- }
- }
-
- bool need_generic =
- (types->length() != negative_lookups.length() + types_.length());
- if (!need_generic && FLAG_deoptimize_uncommon_cases) {
- SetFlag(kUseGVN);
- for (int i = 0; i < negative_lookups.length(); i++) {
- types_.Add(negative_lookups.at(i), zone);
- }
- } else {
- // We don't have an easy way to handle both a call (to the generic stub) and
- // a deopt in the same hydrogen instruction, so in this case we don't add
- // the negative lookups which can deopt - just let the generic stub handle
- // them.
- SetAllSideEffects();
- need_generic_ = true;
- }
-}
-
-
-bool HLoadNamedFieldPolymorphic::DataEquals(HValue* value) {
- HLoadNamedFieldPolymorphic* other = HLoadNamedFieldPolymorphic::cast(value);
- if (types_.length() != other->types()->length()) return false;
- if (!name_.is_identical_to(other->name())) return false;
- if (need_generic_ != other->need_generic_) return false;
- for (int i = 0; i < types_.length(); i++) {
- bool found = false;
- for (int j = 0; j < types_.length(); j++) {
- if (types_.at(j).is_identical_to(other->types()->at(i))) {
- found = true;
- break;
- }
+HCheckMaps* HCheckMaps::New(Zone* zone,
+ HValue* context,
+ HValue* value,
+ Handle<Map> map,
+ CompilationInfo* info,
+ HValue* typecheck) {
+ HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
+ check_map->Add(map, zone);
+ if (map->CanOmitMapChecks() &&
+ value->IsConstant() &&
+ HConstant::cast(value)->HasMap(map)) {
+ // TODO(titzer): collect dependent map checks into a list.
+ check_map->omit_ = true;
+ if (map->CanTransition()) {
+ map->AddDependentCompilationInfo(
+ DependentCode::kPrototypeCheckGroup, info);
}
- if (!found) return false;
}
- return true;
-}
-
-
-void HLoadNamedFieldPolymorphic::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ return check_map;
}
@@ -1856,38 +3020,86 @@ void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
+void HLoadKeyed::PrintDataTo(StringStream* stream) {
+ if (!is_external()) {
+ elements()->PrintNameTo(stream);
+ } else {
+ ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
+ elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ elements()->PrintNameTo(stream);
+ stream->Add(".");
+ stream->Add(ElementsKindToString(elements_kind()));
+ }
+
stream->Add("[");
key()->PrintNameTo(stream);
- stream->Add("] ");
- dependency()->PrintNameTo(stream);
+ if (IsDehoisted()) {
+ stream->Add(" + %d]", index_offset());
+ } else {
+ stream->Add("]");
+ }
+
+ if (HasDependency()) {
+ stream->Add(" ");
+ dependency()->PrintNameTo(stream);
+ }
+
if (RequiresHoleCheck()) {
stream->Add(" check_hole");
}
}
-bool HLoadKeyedFastElement::RequiresHoleCheck() const {
+bool HLoadKeyed::UsesMustHandleHole() const {
if (IsFastPackedElementsKind(elements_kind())) {
return false;
}
+ if (IsExternalArrayElementsKind(elements_kind())) {
+ return false;
+ }
+
+ if (hole_mode() == ALLOW_RETURN_HOLE) {
+ if (IsFastDoubleElementsKind(elements_kind())) {
+ return AllUsesCanTreatHoleAsNaN();
+ }
+ return true;
+ }
+
+ if (IsFastDoubleElementsKind(elements_kind())) {
+ return false;
+ }
+
+ // Holes are only returned as tagged values.
+ if (!representation().IsTagged()) {
+ return false;
+ }
+
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
- if (!use->IsChange()) return true;
+ if (!use->IsChange()) return false;
}
- return false;
+ return true;
}
-void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
- elements()->PrintNameTo(stream);
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("] ");
- dependency()->PrintNameTo(stream);
+bool HLoadKeyed::AllUsesCanTreatHoleAsNaN() const {
+ return IsFastDoubleElementsKind(elements_kind()) &&
+ CheckUsesForFlag(HValue::kAllowUndefinedAsNaN);
+}
+
+
+bool HLoadKeyed::RequiresHoleCheck() const {
+ if (IsFastPackedElementsKind(elements_kind())) {
+ return false;
+ }
+
+ if (IsExternalArrayElementsKind(elements_kind())) {
+ return false;
+ }
+
+ return !UsesMustHandleHole();
}
@@ -1903,21 +3115,27 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
// Recognize generic keyed loads that use property name generated
// by for-in statement as a key and rewrite them into fast property load
// by index.
- if (key()->IsLoadKeyedFastElement()) {
- HLoadKeyedFastElement* key_load = HLoadKeyedFastElement::cast(key());
- if (key_load->object()->IsForInCacheArray()) {
+ if (key()->IsLoadKeyed()) {
+ HLoadKeyed* key_load = HLoadKeyed::cast(key());
+ if (key_load->elements()->IsForInCacheArray()) {
HForInCacheArray* names_cache =
- HForInCacheArray::cast(key_load->object());
+ HForInCacheArray::cast(key_load->elements());
if (names_cache->enumerable() == object()) {
HForInCacheArray* index_cache =
names_cache->index_cache();
HCheckMapValue* map_check =
- new(block()->zone()) HCheckMapValue(object(), names_cache->map());
- HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
+ HCheckMapValue::New(block()->graph()->zone(),
+ block()->graph()->GetInvalidContext(),
+ object(),
+ names_cache->map());
+ HInstruction* index = HLoadKeyed::New(
+ block()->graph()->zone(),
+ block()->graph()->GetInvalidContext(),
index_cache,
key_load->key(),
- key_load->key());
+ key_load->key(),
+ key_load->elements_kind());
map_check->InsertBefore(this);
index->InsertBefore(this);
HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
@@ -1932,56 +3150,6 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
}
-void HLoadKeyedSpecializedArrayElement::PrintDataTo(
- StringStream* stream) {
- external_pointer()->PrintNameTo(stream);
- stream->Add(".");
- switch (elements_kind()) {
- case EXTERNAL_BYTE_ELEMENTS:
- stream->Add("byte");
- break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- stream->Add("u_byte");
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- stream->Add("short");
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- stream->Add("u_short");
- break;
- case EXTERNAL_INT_ELEMENTS:
- stream->Add("int");
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- stream->Add("u_int");
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- stream->Add("float");
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- stream->Add("double");
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- stream->Add("pixel");
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("] ");
- dependency()->PrintNameTo(stream);
-}
-
-
void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
@@ -1994,34 +3162,37 @@ void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
void HStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ access_.PrintTo(stream);
stream->Add(" = ");
value()->PrintNameTo(stream);
- stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
if (NeedsWriteBarrier()) {
stream->Add(" (write-barrier)");
}
- if (!transition().is_null()) {
- stream->Add(" (transition map %p)", *transition());
+ if (has_transition()) {
+ stream->Add(" (transition map %p)", *transition_map());
}
}
-void HStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("] = ");
- value()->PrintNameTo(stream);
-}
-
+void HStoreKeyed::PrintDataTo(StringStream* stream) {
+ if (!is_external()) {
+ elements()->PrintNameTo(stream);
+ } else {
+ elements()->PrintNameTo(stream);
+ stream->Add(".");
+ stream->Add(ElementsKindToString(elements_kind()));
+ ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
+ elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ }
-void HStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
- elements()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
- stream->Add("] = ");
+ if (IsDehoisted()) {
+ stream->Add(" + %d] = ", index_offset());
+ } else {
+ stream->Add("] = ");
+ }
+
value()->PrintNameTo(stream);
}
@@ -2035,70 +3206,21 @@ void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
-void HStoreKeyedSpecializedArrayElement::PrintDataTo(
- StringStream* stream) {
- external_pointer()->PrintNameTo(stream);
- stream->Add(".");
- switch (elements_kind()) {
- case EXTERNAL_BYTE_ELEMENTS:
- stream->Add("byte");
- break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- stream->Add("u_byte");
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- stream->Add("short");
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- stream->Add("u_short");
- break;
- case EXTERNAL_INT_ELEMENTS:
- stream->Add("int");
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- stream->Add("u_int");
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- stream->Add("float");
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- stream->Add("double");
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- stream->Add("pixel");
- break;
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("] = ");
- value()->PrintNameTo(stream);
-}
-
-
void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
- ElementsKind from_kind = original_map()->elements_kind();
- ElementsKind to_kind = transitioned_map()->elements_kind();
+ ElementsKind from_kind = original_map().handle()->elements_kind();
+ ElementsKind to_kind = transitioned_map().handle()->elements_kind();
stream->Add(" %p [%s] -> %p [%s]",
- *original_map(),
+ *original_map().handle(),
ElementsAccessor::ForKind(from_kind)->name(),
- *transitioned_map(),
+ *transitioned_map().handle(),
ElementsAccessor::ForKind(to_kind)->name());
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) stream->Add(" (simple)");
}
void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
- stream->Add("[%p]", *cell());
+ stream->Add("[%p]", *cell().handle());
if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
if (details_.IsReadOnly()) stream->Add(" (read-only)");
}
@@ -2119,8 +3241,14 @@ void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
}
+void HInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ base_object()->PrintNameTo(stream);
+ stream->Add(" offset %d", offset());
+}
+
+
void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
- stream->Add("[%p] = ", *cell());
+ stream->Add("[%p] = ", *cell().handle());
value()->PrintNameTo(stream);
if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
if (details_.IsReadOnly()) stream->Add(" (read-only)");
@@ -2154,30 +3282,10 @@ HType HValue::CalculateInferredType() {
}
-HType HCheckMaps::CalculateInferredType() {
- return value()->type();
-}
-
-
-HType HCheckFunction::CalculateInferredType() {
- return value()->type();
-}
-
-
-HType HCheckNonSmi::CalculateInferredType() {
- // TODO(kasperl): Is there any way to signal that this isn't a smi?
- return HType::Tagged();
-}
-
-
-HType HCheckSmi::CalculateInferredType() {
- return HType::Smi();
-}
-
-
HType HPhi::CalculateInferredType() {
- HType result = HType::Uninitialized();
- for (int i = 0; i < OperandCount(); ++i) {
+ if (OperandCount() == 0) return HType::Tagged();
+ HType result = OperandAt(0)->type();
+ for (int i = 1; i < OperandCount(); ++i) {
HType current = OperandAt(i)->type();
result = result.Combine(current);
}
@@ -2185,128 +3293,302 @@ HType HPhi::CalculateInferredType() {
}
-HType HConstant::CalculateInferredType() {
- if (has_int32_value_) {
- return Smi::IsValid(int32_value_) ? HType::Smi() : HType::HeapNumber();
- }
- if (has_double_value_) return HType::HeapNumber();
- return HType::TypeFromValue(handle_);
-}
-
-
-HType HCompareGeneric::CalculateInferredType() {
- return HType::Boolean();
-}
-
-
-HType HInstanceOf::CalculateInferredType() {
- return HType::Boolean();
-}
-
-
-HType HDeleteProperty::CalculateInferredType() {
- return HType::Boolean();
-}
-
-
-HType HInstanceOfKnownGlobal::CalculateInferredType() {
- return HType::Boolean();
-}
-
-
HType HChange::CalculateInferredType() {
if (from().IsDouble() && to().IsTagged()) return HType::HeapNumber();
return type();
}
-HType HBitwiseBinaryOperation::CalculateInferredType() {
- return HType::TaggedNumber();
+Representation HUnaryMathOperation::RepresentationFromInputs() {
+ Representation rep = representation();
+ // If any of the actual input representation is more general than what we
+ // have so far but not Tagged, use that representation instead.
+ Representation input_rep = value()->representation();
+ if (!input_rep.IsTagged()) {
+ rep = rep.generalize(input_rep);
+ }
+ return rep;
}
-HType HArithmeticBinaryOperation::CalculateInferredType() {
- return HType::TaggedNumber();
-}
-
-
-HType HAdd::CalculateInferredType() {
- return HType::Tagged();
-}
+void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) {
+ ASSERT(side_effect == kChangesNewSpacePromotion);
+ Zone* zone = block()->zone();
+ if (!FLAG_use_allocation_folding) return;
+ // Try to fold allocations together with their dominating allocations.
+ if (!dominator->IsAllocate()) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s)\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ }
+ return;
+ }
-HType HBitNot::CalculateInferredType() {
- return HType::TaggedNumber();
-}
+ HAllocate* dominator_allocate = HAllocate::cast(dominator);
+ HValue* dominator_size = dominator_allocate->size();
+ HValue* current_size = size();
+ // TODO(hpayer): Add support for non-constant allocation in dominator.
+ if (!current_size->IsInteger32Constant() ||
+ !dominator_size->IsInteger32Constant()) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s), dynamic allocation size\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ }
+ return;
+ }
-HType HUnaryMathOperation::CalculateInferredType() {
- return HType::TaggedNumber();
-}
+ dominator_allocate = GetFoldableDominator(dominator_allocate);
+ if (dominator_allocate == NULL) {
+ return;
+ }
+ ASSERT((IsNewSpaceAllocation() &&
+ dominator_allocate->IsNewSpaceAllocation()) ||
+ (IsOldDataSpaceAllocation() &&
+ dominator_allocate->IsOldDataSpaceAllocation()) ||
+ (IsOldPointerSpaceAllocation() &&
+ dominator_allocate->IsOldPointerSpaceAllocation()));
+
+ // First update the size of the dominator allocate instruction.
+ dominator_size = dominator_allocate->size();
+ int32_t original_object_size =
+ HConstant::cast(dominator_size)->GetInteger32Constant();
+ int32_t dominator_size_constant = original_object_size;
+ int32_t current_size_constant =
+ HConstant::cast(current_size)->GetInteger32Constant();
+ int32_t new_dominator_size = dominator_size_constant + current_size_constant;
+
+ if (MustAllocateDoubleAligned()) {
+ if (!dominator_allocate->MustAllocateDoubleAligned()) {
+ dominator_allocate->MakeDoubleAligned();
+ }
+ if ((dominator_size_constant & kDoubleAlignmentMask) != 0) {
+ dominator_size_constant += kDoubleSize / 2;
+ new_dominator_size += kDoubleSize / 2;
+ }
+ }
-HType HStringCharFromCode::CalculateInferredType() {
- return HType::String();
-}
+ if (new_dominator_size > Page::kMaxNonCodeHeapObjectSize) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
+ id(), Mnemonic(), dominator_allocate->id(),
+ dominator_allocate->Mnemonic(), new_dominator_size);
+ }
+ return;
+ }
+ HInstruction* new_dominator_size_constant = HConstant::CreateAndInsertBefore(
+ zone,
+ context(),
+ new_dominator_size,
+ Representation::None(),
+ dominator_allocate);
+ dominator_allocate->UpdateSize(new_dominator_size_constant);
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap && dominator_allocate->IsNewSpaceAllocation()) {
+ dominator_allocate->MakePrefillWithFiller();
+ } else {
+ // TODO(hpayer): This is a short-term hack to make allocation mementos
+ // work again in new space.
+ dominator_allocate->ClearNextMapWord(original_object_size);
+ }
+#else
+ // TODO(hpayer): This is a short-term hack to make allocation mementos
+ // work again in new space.
+ dominator_allocate->ClearNextMapWord(original_object_size);
+#endif
-HType HAllocateObject::CalculateInferredType() {
- return HType::JSObject();
+ dominator_allocate->clear_next_map_word_ = clear_next_map_word_;
+
+ // After that replace the dominated allocate instruction.
+ HInstruction* dominated_allocate_instr =
+ HInnerAllocatedObject::New(zone,
+ context(),
+ dominator_allocate,
+ dominator_size_constant,
+ type());
+ dominated_allocate_instr->InsertBefore(this);
+ DeleteAndReplaceWith(dominated_allocate_instr);
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) folded into #%d (%s)\n",
+ id(), Mnemonic(), dominator_allocate->id(),
+ dominator_allocate->Mnemonic());
+ }
}
-HType HFastLiteral::CalculateInferredType() {
- // TODO(mstarzinger): Be smarter, could also be JSArray here.
- return HType::JSObject();
-}
-
+HAllocate* HAllocate::GetFoldableDominator(HAllocate* dominator) {
+ if (!IsFoldable(dominator)) {
+ // We cannot hoist old space allocations over new space allocations.
+ if (IsNewSpaceAllocation() || dominator->IsNewSpaceAllocation()) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s), new space hoisting\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ }
+ return NULL;
+ }
-HType HArrayLiteral::CalculateInferredType() {
- return HType::JSArray();
-}
+ HAllocate* dominator_dominator = dominator->dominating_allocate_;
+ // We can hoist old data space allocations over an old pointer space
+ // allocation and vice versa. For that we have to check the dominator
+ // of the dominator allocate instruction.
+ if (dominator_dominator == NULL) {
+ dominating_allocate_ = dominator;
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s), different spaces\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ }
+ return NULL;
+ }
-HType HObjectLiteral::CalculateInferredType() {
- return HType::JSObject();
-}
+ // We can just fold old space allocations that are in the same basic block,
+ // since it is not guaranteed that we fill up the whole allocated old
+ // space memory.
+ // TODO(hpayer): Remove this limitation and add filler maps for each each
+ // allocation as soon as we have store elimination.
+ if (block()->block_id() != dominator_dominator->block()->block_id()) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s), different basic blocks\n",
+ id(), Mnemonic(), dominator_dominator->id(),
+ dominator_dominator->Mnemonic());
+ }
+ return NULL;
+ }
+ ASSERT((IsOldDataSpaceAllocation() &&
+ dominator_dominator->IsOldDataSpaceAllocation()) ||
+ (IsOldPointerSpaceAllocation() &&
+ dominator_dominator->IsOldPointerSpaceAllocation()));
+
+ int32_t current_size = HConstant::cast(size())->GetInteger32Constant();
+ HStoreNamedField* dominator_free_space_size =
+ dominator->filler_free_space_size_;
+ if (dominator_free_space_size != NULL) {
+ // We already hoisted one old space allocation, i.e., we already installed
+ // a filler map. Hence, we just have to update the free space size.
+ dominator->UpdateFreeSpaceFiller(current_size);
+ } else {
+ // This is the first old space allocation that gets hoisted. We have to
+ // install a filler map since the follwing allocation may cause a GC.
+ dominator->CreateFreeSpaceFiller(current_size);
+ }
-HType HRegExpLiteral::CalculateInferredType() {
- return HType::JSObject();
+ // We can hoist the old space allocation over the actual dominator.
+ return dominator_dominator;
+ }
+ return dominator;
+}
+
+
+void HAllocate::UpdateFreeSpaceFiller(int32_t free_space_size) {
+ ASSERT(filler_free_space_size_ != NULL);
+ Zone* zone = block()->zone();
+ // We must explicitly force Smi representation here because on x64 we
+ // would otherwise automatically choose int32, but the actual store
+ // requires a Smi-tagged value.
+ HConstant* new_free_space_size = HConstant::CreateAndInsertBefore(
+ zone,
+ context(),
+ filler_free_space_size_->value()->GetInteger32Constant() +
+ free_space_size,
+ Representation::Smi(),
+ filler_free_space_size_);
+ filler_free_space_size_->UpdateValue(new_free_space_size);
+}
+
+
+void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
+ ASSERT(filler_free_space_size_ == NULL);
+ Zone* zone = block()->zone();
+ int32_t dominator_size =
+ HConstant::cast(dominating_allocate_->size())->GetInteger32Constant();
+ HInstruction* free_space_instr =
+ HInnerAllocatedObject::New(zone, context(), dominating_allocate_,
+ dominator_size, type());
+ free_space_instr->InsertBefore(this);
+ HConstant* filler_map = HConstant::New(
+ zone,
+ context(),
+ isolate()->factory()->free_space_map());
+ filler_map->FinalizeUniqueness(); // TODO(titzer): should be init'd a'ready
+ filler_map->InsertAfter(free_space_instr);
+ HInstruction* store_map = HStoreNamedField::New(zone, context(),
+ free_space_instr, HObjectAccess::ForMap(), filler_map);
+ store_map->SetFlag(HValue::kHasNoObservableSideEffects);
+ store_map->InsertAfter(filler_map);
+
+ // We must explicitly force Smi representation here because on x64 we
+ // would otherwise automatically choose int32, but the actual store
+ // requires a Smi-tagged value.
+ HConstant* filler_size = HConstant::CreateAndInsertAfter(
+ zone, context(), free_space_size, Representation::Smi(), store_map);
+ // Must force Smi representation for x64 (see comment above).
+ HObjectAccess access =
+ HObjectAccess::ForJSObjectOffset(FreeSpace::kSizeOffset,
+ Representation::Smi());
+ HStoreNamedField* store_size = HStoreNamedField::New(zone, context(),
+ free_space_instr, access, filler_size);
+ store_size->SetFlag(HValue::kHasNoObservableSideEffects);
+ store_size->InsertAfter(filler_size);
+ filler_free_space_size_ = store_size;
+}
+
+
+void HAllocate::ClearNextMapWord(int offset) {
+ if (clear_next_map_word_) {
+ Zone* zone = block()->zone();
+ HObjectAccess access = HObjectAccess::ForJSObjectOffset(offset);
+ HStoreNamedField* clear_next_map =
+ HStoreNamedField::New(zone, context(), this, access,
+ block()->graph()->GetConstantNull());
+ clear_next_map->ClearAllSideEffects();
+ clear_next_map->InsertAfter(this);
+ }
}
-HType HFunctionLiteral::CalculateInferredType() {
- return HType::JSObject();
+void HAllocate::PrintDataTo(StringStream* stream) {
+ size()->PrintNameTo(stream);
+ stream->Add(" (");
+ if (IsNewSpaceAllocation()) stream->Add("N");
+ if (IsOldPointerSpaceAllocation()) stream->Add("P");
+ if (IsOldDataSpaceAllocation()) stream->Add("D");
+ if (MustAllocateDoubleAligned()) stream->Add("A");
+ if (MustPrefillWithFiller()) stream->Add("F");
+ stream->Add(")");
}
HValue* HUnaryMathOperation::EnsureAndPropagateNotMinusZero(
BitVector* visited) {
visited->Add(id());
- if (representation().IsInteger32() &&
- !value()->representation().IsInteger32()) {
+ if (representation().IsSmiOrInteger32() &&
+ !value()->representation().Equals(representation())) {
if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
SetFlag(kBailoutOnMinusZero);
}
}
- if (RequiredInputRepresentation(0).IsInteger32() &&
- representation().IsInteger32()) {
+ if (RequiredInputRepresentation(0).IsSmiOrInteger32() &&
+ representation().Equals(RequiredInputRepresentation(0))) {
return value();
}
return NULL;
}
-
HValue* HChange::EnsureAndPropagateNotMinusZero(BitVector* visited) {
visited->Add(id());
- if (from().IsInteger32()) return NULL;
+ if (from().IsSmiOrInteger32()) return NULL;
if (CanTruncateToInt32()) return NULL;
if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
SetFlag(kBailoutOnMinusZero);
}
- ASSERT(!from().IsInteger32() || !to().IsInteger32());
+ ASSERT(!from().IsSmiOrInteger32() || !to().IsSmiOrInteger32());
return NULL;
}
@@ -2375,36 +3657,50 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
}
-bool HStoreKeyedFastDoubleElement::NeedsCanonicalization() {
- // If value was loaded from unboxed double backing store or
- // converted from an integer then we don't have to canonicalize it.
- if (value()->IsLoadKeyedFastDoubleElement() ||
- (value()->IsChange() && HChange::cast(value())->from().IsInteger32())) {
+bool HStoreKeyed::NeedsCanonicalization() {
+ // If value is an integer or smi or comes from the result of a keyed load or
+ // constant then it is either be a non-hole value or in the case of a constant
+ // the hole is only being stored explicitly: no need for canonicalization.
+ //
+ // The exception to that is keyed loads from external float or double arrays:
+ // these can load arbitrary representation of NaN.
+
+ if (value()->IsConstant()) {
return false;
}
+
+ if (value()->IsLoadKeyed()) {
+ return IsExternalFloatOrDoubleElementsKind(
+ HLoadKeyed::cast(value())->elements_kind());
+ }
+
+ if (value()->IsChange()) {
+ if (HChange::cast(value())->from().IsSmiOrInteger32()) {
+ return false;
+ }
+ if (HChange::cast(value())->value()->type().IsSmi()) {
+ return false;
+ }
+ }
return true;
}
-#define H_CONSTANT_INT32(val) \
-new(zone) HConstant(FACTORY->NewNumberFromInt(val, TENURED), \
- Representation::Integer32())
+#define H_CONSTANT_INT(val) \
+HConstant::New(zone, context, static_cast<int32_t>(val))
#define H_CONSTANT_DOUBLE(val) \
-new(zone) HConstant(FACTORY->NewNumber(val, TENURED), \
- Representation::Double())
+HConstant::New(zone, context, static_cast<double>(val))
#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op) \
-HInstruction* HInstr::New##HInstr(Zone* zone, \
- HValue* context, \
- HValue* left, \
- HValue* right) { \
- if (left->IsConstant() && right->IsConstant()) { \
+HInstruction* HInstr::New( \
+ Zone* zone, HValue* context, HValue* left, HValue* right) { \
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
HConstant* c_left = HConstant::cast(left); \
HConstant* c_right = HConstant::cast(right); \
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
if (TypeInfo::IsInt32Double(double_res)) { \
- return H_CONSTANT_INT32(static_cast<int32_t>(double_res)); \
+ return H_CONSTANT_INT(double_res); \
} \
return H_CONSTANT_DOUBLE(double_res); \
} \
@@ -2420,44 +3716,204 @@ DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
#undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR
-HInstruction* HMod::NewHMod(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
- if (left->IsConstant() && right->IsConstant()) {
+HInstruction* HStringAdd::New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right,
+ StringAddFlags flags) {
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
+ HConstant* c_right = HConstant::cast(right);
+ HConstant* c_left = HConstant::cast(left);
+ if (c_left->HasStringValue() && c_right->HasStringValue()) {
+ Handle<String> concat = zone->isolate()->factory()->NewFlatConcatString(
+ c_left->StringValue(), c_right->StringValue());
+ return HConstant::New(zone, context, concat);
+ }
+ }
+ return new(zone) HStringAdd(context, left, right, flags);
+}
+
+
+HInstruction* HStringCharFromCode::New(
+ Zone* zone, HValue* context, HValue* char_code) {
+ if (FLAG_fold_constants && char_code->IsConstant()) {
+ HConstant* c_code = HConstant::cast(char_code);
+ Isolate* isolate = zone->isolate();
+ if (c_code->HasNumberValue()) {
+ if (std::isfinite(c_code->DoubleValue())) {
+ uint32_t code = c_code->NumberValueAsInteger32() & 0xffff;
+ return HConstant::New(zone, context,
+ LookupSingleCharacterStringFromCode(isolate, code));
+ }
+ return HConstant::New(zone, context, isolate->factory()->empty_string());
+ }
+ }
+ return new(zone) HStringCharFromCode(context, char_code);
+}
+
+
+HInstruction* HUnaryMathOperation::New(
+ Zone* zone, HValue* context, HValue* value, BuiltinFunctionId op) {
+ do {
+ if (!FLAG_fold_constants) break;
+ if (!value->IsConstant()) break;
+ HConstant* constant = HConstant::cast(value);
+ if (!constant->HasNumberValue()) break;
+ double d = constant->DoubleValue();
+ if (std::isnan(d)) { // NaN poisons everything.
+ return H_CONSTANT_DOUBLE(OS::nan_value());
+ }
+ if (std::isinf(d)) { // +Infinity and -Infinity.
+ switch (op) {
+ case kMathSin:
+ case kMathCos:
+ case kMathTan:
+ return H_CONSTANT_DOUBLE(OS::nan_value());
+ case kMathExp:
+ return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0);
+ case kMathLog:
+ case kMathSqrt:
+ return H_CONSTANT_DOUBLE((d > 0.0) ? d : OS::nan_value());
+ case kMathPowHalf:
+ case kMathAbs:
+ return H_CONSTANT_DOUBLE((d > 0.0) ? d : -d);
+ case kMathRound:
+ case kMathFloor:
+ return H_CONSTANT_DOUBLE(d);
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ switch (op) {
+ case kMathSin:
+ return H_CONSTANT_DOUBLE(fast_sin(d));
+ case kMathCos:
+ return H_CONSTANT_DOUBLE(fast_cos(d));
+ case kMathTan:
+ return H_CONSTANT_DOUBLE(fast_tan(d));
+ case kMathExp:
+ return H_CONSTANT_DOUBLE(fast_exp(d));
+ case kMathLog:
+ return H_CONSTANT_DOUBLE(fast_log(d));
+ case kMathSqrt:
+ return H_CONSTANT_DOUBLE(fast_sqrt(d));
+ case kMathPowHalf:
+ return H_CONSTANT_DOUBLE(power_double_double(d, 0.5));
+ case kMathAbs:
+ return H_CONSTANT_DOUBLE((d >= 0.0) ? d + 0.0 : -d);
+ case kMathRound:
+ // -0.5 .. -0.0 round to -0.0.
+ if ((d >= -0.5 && Double(d).Sign() < 0)) return H_CONSTANT_DOUBLE(-0.0);
+ // Doubles are represented as Significant * 2 ^ Exponent. If the
+ // Exponent is not negative, the double value is already an integer.
+ if (Double(d).Exponent() >= 0) return H_CONSTANT_DOUBLE(d);
+ return H_CONSTANT_DOUBLE(floor(d + 0.5));
+ case kMathFloor:
+ return H_CONSTANT_DOUBLE(floor(d));
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } while (false);
+ return new(zone) HUnaryMathOperation(context, value, op);
+}
+
+
+HInstruction* HPower::New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right) {
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
+ HConstant* c_left = HConstant::cast(left);
+ HConstant* c_right = HConstant::cast(right);
+ if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
+ double result = power_helper(c_left->DoubleValue(),
+ c_right->DoubleValue());
+ return H_CONSTANT_DOUBLE(std::isnan(result) ? OS::nan_value() : result);
+ }
+ }
+ return new(zone) HPower(left, right);
+}
+
+
+HInstruction* HMathMinMax::New(
+ Zone* zone, HValue* context, HValue* left, HValue* right, Operation op) {
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
+ HConstant* c_left = HConstant::cast(left);
+ HConstant* c_right = HConstant::cast(right);
+ if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
+ double d_left = c_left->DoubleValue();
+ double d_right = c_right->DoubleValue();
+ if (op == kMathMin) {
+ if (d_left > d_right) return H_CONSTANT_DOUBLE(d_right);
+ if (d_left < d_right) return H_CONSTANT_DOUBLE(d_left);
+ if (d_left == d_right) {
+ // Handle +0 and -0.
+ return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_left
+ : d_right);
+ }
+ } else {
+ if (d_left < d_right) return H_CONSTANT_DOUBLE(d_right);
+ if (d_left > d_right) return H_CONSTANT_DOUBLE(d_left);
+ if (d_left == d_right) {
+ // Handle +0 and -0.
+ return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_right
+ : d_left);
+ }
+ }
+ // All comparisons failed, must be NaN.
+ return H_CONSTANT_DOUBLE(OS::nan_value());
+ }
+ }
+ return new(zone) HMathMinMax(context, left, right, op);
+}
+
+
+HInstruction* HMod::New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right,
+ Maybe<int> fixed_right_arg) {
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) {
int32_t dividend = c_left->Integer32Value();
int32_t divisor = c_right->Integer32Value();
+ if (dividend == kMinInt && divisor == -1) {
+ return H_CONSTANT_DOUBLE(-0.0);
+ }
if (divisor != 0) {
int32_t res = dividend % divisor;
if ((res == 0) && (dividend < 0)) {
return H_CONSTANT_DOUBLE(-0.0);
}
- return H_CONSTANT_INT32(res);
+ return H_CONSTANT_INT(res);
}
}
}
- return new(zone) HMod(context, left, right);
+ return new(zone) HMod(context, left, right, fixed_right_arg);
}
-HInstruction* HDiv::NewHDiv(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
+HInstruction* HDiv::New(
+ Zone* zone, HValue* context, HValue* left, HValue* right) {
// If left and right are constant values, try to return a constant value.
- if (left->IsConstant() && right->IsConstant()) {
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
if (c_right->DoubleValue() != 0) {
double double_res = c_left->DoubleValue() / c_right->DoubleValue();
if (TypeInfo::IsInt32Double(double_res)) {
- return H_CONSTANT_INT32(static_cast<int32_t>(double_res));
+ return H_CONSTANT_INT(double_res);
}
return H_CONSTANT_DOUBLE(double_res);
+ } else {
+ int sign = Double(c_left->DoubleValue()).Sign() *
+ Double(c_right->DoubleValue()).Sign(); // Right could be -0.
+ return H_CONSTANT_DOUBLE(sign * V8_INFINITY);
}
}
}
@@ -2465,12 +3921,9 @@ HInstruction* HDiv::NewHDiv(Zone* zone,
}
-HInstruction* HBitwise::NewHBitwise(Zone* zone,
- Token::Value op,
- HValue* context,
- HValue* left,
- HValue* right) {
- if (left->IsConstant() && right->IsConstant()) {
+HInstruction* HBitwise::New(
+ Zone* zone, HValue* context, Token::Value op, HValue* left, HValue* right) {
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
@@ -2491,23 +3944,21 @@ HInstruction* HBitwise::NewHBitwise(Zone* zone,
result = 0; // Please the compiler.
UNREACHABLE();
}
- return H_CONSTANT_INT32(result);
+ return H_CONSTANT_INT(result);
}
}
- return new(zone) HBitwise(op, context, left, right);
+ return new(zone) HBitwise(context, op, left, right);
}
#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result) \
-HInstruction* HInstr::New##HInstr(Zone* zone, \
- HValue* context, \
- HValue* left, \
- HValue* right) { \
- if (left->IsConstant() && right->IsConstant()) { \
+HInstruction* HInstr::New( \
+ Zone* zone, HValue* context, HValue* left, HValue* right) { \
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
HConstant* c_left = HConstant::cast(left); \
HConstant* c_right = HConstant::cast(right); \
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
- return H_CONSTANT_INT32(result); \
+ return H_CONSTANT_INT(result); \
} \
} \
return new(zone) HInstr(context, left, right); \
@@ -2522,38 +3973,28 @@ c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f))
#undef DEFINE_NEW_H_BITWISE_INSTR
-HInstruction* HShr::NewHShr(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
- if (left->IsConstant() && right->IsConstant()) {
+HInstruction* HShr::New(
+ Zone* zone, HValue* context, HValue* left, HValue* right) {
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
int32_t left_val = c_left->NumberValueAsInteger32();
int32_t right_val = c_right->NumberValueAsInteger32() & 0x1f;
if ((right_val == 0) && (left_val < 0)) {
- return H_CONSTANT_DOUBLE(
- static_cast<double>(static_cast<uint32_t>(left_val)));
+ return H_CONSTANT_DOUBLE(static_cast<uint32_t>(left_val));
}
- return H_CONSTANT_INT32(static_cast<uint32_t>(left_val) >> right_val);
+ return H_CONSTANT_INT(static_cast<uint32_t>(left_val) >> right_val);
}
}
return new(zone) HShr(context, left, right);
}
-#undef H_CONSTANT_INT32
+#undef H_CONSTANT_INT
#undef H_CONSTANT_DOUBLE
-void HIn::PrintDataTo(StringStream* stream) {
- key()->PrintNameTo(stream);
- stream->Add(" ");
- object()->PrintNameTo(stream);
-}
-
-
void HBitwise::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(op_));
stream->Add(" ");
@@ -2561,43 +4002,100 @@ void HBitwise::PrintDataTo(StringStream* stream) {
}
-Representation HPhi::InferredRepresentation() {
- bool double_occurred = false;
- bool int32_occurred = false;
+void HPhi::SimplifyConstantInputs() {
+ // Convert constant inputs to integers when all uses are truncating.
+ // This must happen before representation inference takes place.
+ if (!CheckUsesForFlag(kTruncatingToInt32)) return;
for (int i = 0; i < OperandCount(); ++i) {
- HValue* value = OperandAt(i);
- if (value->IsUnknownOSRValue()) {
- HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
- if (hint_value != NULL) {
- Representation hint = hint_value->representation();
- if (hint.IsDouble()) double_occurred = true;
- if (hint.IsInteger32()) int32_occurred = true;
- }
+ if (!OperandAt(i)->IsConstant()) return;
+ }
+ HGraph* graph = block()->graph();
+ for (int i = 0; i < OperandCount(); ++i) {
+ HConstant* operand = HConstant::cast(OperandAt(i));
+ if (operand->HasInteger32Value()) {
continue;
+ } else if (operand->HasDoubleValue()) {
+ HConstant* integer_input =
+ HConstant::New(graph->zone(), graph->GetInvalidContext(),
+ DoubleToInt32(operand->DoubleValue()));
+ integer_input->InsertAfter(operand);
+ SetOperandAt(i, integer_input);
+ } else if (operand->HasBooleanValue()) {
+ SetOperandAt(i, operand->BooleanValue() ? graph->GetConstant1()
+ : graph->GetConstant0());
+ } else if (operand->ImmortalImmovable()) {
+ SetOperandAt(i, graph->GetConstant0());
}
- if (value->representation().IsDouble()) double_occurred = true;
- if (value->representation().IsInteger32()) int32_occurred = true;
- if (value->representation().IsTagged()) {
- if (value->IsConstant()) {
- HConstant* constant = HConstant::cast(value);
- if (constant->IsConvertibleToInteger()) {
- int32_occurred = true;
- } else if (constant->HasNumberValue()) {
- double_occurred = true;
- } else {
- return Representation::Tagged();
- }
- } else {
- return Representation::Tagged();
- }
+ }
+ // Overwrite observed input representations because they are likely Tagged.
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ if (use->IsBinaryOperation()) {
+ HBinaryOperation::cast(use)->set_observed_input_representation(
+ it.index(), Representation::Smi());
}
}
+}
+
- if (double_occurred) return Representation::Double();
+void HPhi::InferRepresentation(HInferRepresentationPhase* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ Representation new_rep = RepresentationFromInputs();
+ UpdateRepresentation(new_rep, h_infer, "inputs");
+ new_rep = RepresentationFromUses();
+ UpdateRepresentation(new_rep, h_infer, "uses");
+ new_rep = RepresentationFromUseRequirements();
+ UpdateRepresentation(new_rep, h_infer, "use requirements");
+}
- if (int32_occurred) return Representation::Integer32();
- return Representation::None();
+Representation HPhi::RepresentationFromInputs() {
+ Representation r = Representation::None();
+ for (int i = 0; i < OperandCount(); ++i) {
+ r = r.generalize(OperandAt(i)->KnownOptimalRepresentation());
+ }
+ return r;
+}
+
+
+// Returns a representation if all uses agree on the same representation.
+// Integer32 is also returned when some uses are Smi but others are Integer32.
+Representation HValue::RepresentationFromUseRequirements() {
+ Representation rep = Representation::None();
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ // Ignore the use requirement from never run code
+ if (it.value()->block()->IsUnreachable()) continue;
+
+ // We check for observed_input_representation elsewhere.
+ Representation use_rep =
+ it.value()->RequiredInputRepresentation(it.index());
+ if (rep.IsNone()) {
+ rep = use_rep;
+ continue;
+ }
+ if (use_rep.IsNone() || rep.Equals(use_rep)) continue;
+ if (rep.generalize(use_rep).IsInteger32()) {
+ rep = Representation::Integer32();
+ continue;
+ }
+ return Representation::None();
+ }
+ return rep;
+}
+
+
+bool HValue::HasNonSmiUse() {
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ // We check for observed_input_representation elsewhere.
+ Representation use_rep =
+ it.value()->RequiredInputRepresentation(it.index());
+ if (!use_rep.IsNone() &&
+ !use_rep.IsSmi() &&
+ !use_rep.IsTagged()) {
+ return true;
+ }
+ }
+ return false;
}
@@ -2622,29 +4120,188 @@ void HSimulate::Verify() {
}
-void HCheckSmi::Verify() {
+void HCheckHeapObject::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
-void HCheckNonSmi::Verify() {
+void HCheckValue::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
+#endif
+
-void HCheckFunction::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
+HObjectAccess HObjectAccess::ForFixedArrayHeader(int offset) {
+ ASSERT(offset >= 0);
+ ASSERT(offset < FixedArray::kHeaderSize);
+ if (offset == FixedArray::kLengthOffset) return ForFixedArrayLength();
+ return HObjectAccess(kInobject, offset);
}
-void HCheckPrototypeMaps::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
+HObjectAccess HObjectAccess::ForJSObjectOffset(int offset,
+ Representation representation) {
+ ASSERT(offset >= 0);
+ Portion portion = kInobject;
+
+ if (offset == JSObject::kElementsOffset) {
+ portion = kElementsPointer;
+ } else if (offset == JSObject::kMapOffset) {
+ portion = kMaps;
+ }
+ return HObjectAccess(portion, offset, representation);
}
-#endif
+
+HObjectAccess HObjectAccess::ForContextSlot(int index) {
+ ASSERT(index >= 0);
+ Portion portion = kInobject;
+ int offset = Context::kHeaderSize + index * kPointerSize;
+ ASSERT_EQ(offset, Context::SlotOffset(index) + kHeapObjectTag);
+ return HObjectAccess(portion, offset, Representation::Tagged());
+}
+
+
+HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) {
+ ASSERT(offset >= 0);
+ Portion portion = kInobject;
+
+ if (offset == JSObject::kElementsOffset) {
+ portion = kElementsPointer;
+ } else if (offset == JSArray::kLengthOffset) {
+ portion = kArrayLengths;
+ } else if (offset == JSObject::kMapOffset) {
+ portion = kMaps;
+ }
+ return HObjectAccess(portion, offset);
+}
+
+
+HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset,
+ Representation representation) {
+ ASSERT(offset >= 0);
+ return HObjectAccess(kBackingStore, offset, representation);
+}
+
+
+HObjectAccess HObjectAccess::ForField(Handle<Map> map,
+ LookupResult *lookup, Handle<String> name) {
+ ASSERT(lookup->IsField() || lookup->IsTransitionToField(*map));
+ int index;
+ Representation representation;
+ if (lookup->IsField()) {
+ index = lookup->GetLocalFieldIndexFromMap(*map);
+ representation = lookup->representation();
+ } else {
+ Map* transition = lookup->GetTransitionMapFromMap(*map);
+ int descriptor = transition->LastAdded();
+ index = transition->instance_descriptors()->GetFieldIndex(descriptor) -
+ map->inobject_properties();
+ PropertyDetails details =
+ transition->instance_descriptors()->GetDetails(descriptor);
+ representation = details.representation();
+ }
+ if (index < 0) {
+ // Negative property indices are in-object properties, indexed
+ // from the end of the fixed part of the object.
+ int offset = (index * kPointerSize) + map->instance_size();
+ return HObjectAccess(kInobject, offset, representation);
+ } else {
+ // Non-negative property indices are in the properties array.
+ int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
+ return HObjectAccess(kBackingStore, offset, representation, name);
+ }
+}
+
+
+HObjectAccess HObjectAccess::ForCellPayload(Isolate* isolate) {
+ return HObjectAccess(
+ kInobject, Cell::kValueOffset, Representation::Tagged(),
+ Handle<String>(isolate->heap()->cell_value_string()));
+}
+
+
+void HObjectAccess::SetGVNFlags(HValue *instr, bool is_store) {
+ // set the appropriate GVN flags for a given load or store instruction
+ if (is_store) {
+ // track dominating allocations in order to eliminate write barriers
+ instr->SetGVNFlag(kDependsOnNewSpacePromotion);
+ instr->SetFlag(HValue::kTrackSideEffectDominators);
+ } else {
+ // try to GVN loads, but don't hoist above map changes
+ instr->SetFlag(HValue::kUseGVN);
+ instr->SetGVNFlag(kDependsOnMaps);
+ }
+
+ switch (portion()) {
+ case kArrayLengths:
+ instr->SetGVNFlag(is_store
+ ? kChangesArrayLengths : kDependsOnArrayLengths);
+ break;
+ case kStringLengths:
+ instr->SetGVNFlag(is_store
+ ? kChangesStringLengths : kDependsOnStringLengths);
+ break;
+ case kInobject:
+ instr->SetGVNFlag(is_store
+ ? kChangesInobjectFields : kDependsOnInobjectFields);
+ break;
+ case kDouble:
+ instr->SetGVNFlag(is_store
+ ? kChangesDoubleFields : kDependsOnDoubleFields);
+ break;
+ case kBackingStore:
+ instr->SetGVNFlag(is_store
+ ? kChangesBackingStoreFields : kDependsOnBackingStoreFields);
+ break;
+ case kElementsPointer:
+ instr->SetGVNFlag(is_store
+ ? kChangesElementsPointer : kDependsOnElementsPointer);
+ break;
+ case kMaps:
+ instr->SetGVNFlag(is_store
+ ? kChangesMaps : kDependsOnMaps);
+ break;
+ case kExternalMemory:
+ instr->SetGVNFlag(is_store
+ ? kChangesExternalMemory : kDependsOnExternalMemory);
+ break;
+ }
+}
+
+
+void HObjectAccess::PrintTo(StringStream* stream) {
+ stream->Add(".");
+
+ switch (portion()) {
+ case kArrayLengths:
+ case kStringLengths:
+ stream->Add("%length");
+ break;
+ case kElementsPointer:
+ stream->Add("%elements");
+ break;
+ case kMaps:
+ stream->Add("%map");
+ break;
+ case kDouble: // fall through
+ case kInobject:
+ if (!name_.is_null()) stream->Add(*String::cast(*name_)->ToCString());
+ stream->Add("[in-object]");
+ break;
+ case kBackingStore:
+ if (!name_.is_null()) stream->Add(*String::cast(*name_)->ToCString());
+ stream->Add("[backing-store]");
+ break;
+ case kExternalMemory:
+ stream->Add("[external-memory]");
+ break;
+ }
+
+ stream->Add("@%d", offset());
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index a1e5b9707..10c4b945f 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -33,8 +33,10 @@
#include "allocation.h"
#include "code-stubs.h"
#include "data-flow.h"
+#include "deoptimizer.h"
#include "small-pointer-list.h"
#include "string-stream.h"
+#include "unique.h"
#include "v8conversions.h"
#include "v8utils.h"
#include "zone.h"
@@ -45,14 +47,16 @@ namespace internal {
// Forward declarations.
class HBasicBlock;
class HEnvironment;
+class HInferRepresentationPhase;
class HInstruction;
class HLoopInformation;
+class HStoreNamedField;
class HValue;
class LInstruction;
class LChunkBuilder;
-
#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \
+ V(ArithmeticBinaryOperation) \
V(BinaryOperation) \
V(BitwiseBinaryOperation) \
V(ControlInstruction) \
@@ -63,16 +67,15 @@ class LChunkBuilder;
V(AbnormalExit) \
V(AccessArgumentsAt) \
V(Add) \
- V(AllocateObject) \
+ V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
V(ArgumentsObject) \
- V(ArrayLiteral) \
V(Bitwise) \
- V(BitNot) \
V(BlockEntry) \
V(BoundsCheck) \
+ V(BoundsCheckBaseIndexInformation) \
V(Branch) \
V(CallConstantFunction) \
V(CallFunction) \
@@ -81,33 +84,38 @@ class LChunkBuilder;
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
+ V(CapturedObject) \
V(Change) \
- V(CheckFunction) \
+ V(CheckHeapObject) \
V(CheckInstanceType) \
V(CheckMaps) \
- V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
+ V(CheckMapValue) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampToUint8) \
V(ClassOfTestAndBranch) \
- V(CompareIDAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(CompareHoleAndBranch) \
V(CompareGeneric) \
V(CompareObjectEqAndBranch) \
V(CompareMap) \
- V(CompareConstantEqAndBranch) \
V(Constant) \
V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
V(DeclareGlobals) \
- V(DeleteProperty) \
V(Deoptimize) \
V(Div) \
+ V(DummyUse) \
V(ElementsKind) \
V(EnterInlined) \
- V(FastLiteral) \
- V(FixedArrayBaseLength) \
+ V(EnvironmentMarker) \
V(ForceRepresentation) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -115,37 +123,32 @@ class LChunkBuilder;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
- V(In) \
+ V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(JSArrayLength) \
V(LeaveInlined) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
+ V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyedFastDoubleElement) \
- V(LoadKeyedFastElement) \
+ V(LoadKeyed) \
V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(LoadRoot) \
V(MapEnumLength) \
V(MathFloorOfDiv) \
V(MathMinMax) \
V(Mod) \
V(Mul) \
- V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -154,90 +157,81 @@ class LChunkBuilder;
V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(Ror) \
V(Sar) \
+ V(SeqStringSetChar) \
V(Shl) \
V(Shr) \
V(Simulate) \
- V(SoftDeoptimize) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyedFastDoubleElement) \
- V(StoreKeyedFastElement) \
+ V(StoreKeyed) \
V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
V(StringCompareAndBranch) \
- V(StringLength) \
V(Sub) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(UseConst) \
V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
V(WrapReceiver)
#define GVN_TRACKED_FLAG_LIST(V) \
+ V(Maps) \
V(NewSpacePromotion)
#define GVN_UNTRACKED_FLAG_LIST(V) \
- V(Calls) \
- V(InobjectFields) \
+ V(ArrayElements) \
+ V(ArrayLengths) \
+ V(StringLengths) \
V(BackingStoreFields) \
+ V(Calls) \
+ V(ContextSlots) \
+ V(DoubleArrayElements) \
+ V(DoubleFields) \
V(ElementsKind) \
V(ElementsPointer) \
- V(ArrayElements) \
- V(DoubleArrayElements) \
- V(SpecializedArrayElements) \
V(GlobalVars) \
- V(Maps) \
- V(ArrayLengths) \
- V(ContextSlots) \
- V(OsrEntries)
+ V(InobjectFields) \
+ V(OsrEntries) \
+ V(ExternalMemory)
-#define DECLARE_ABSTRACT_INSTRUCTION(type) \
- virtual bool Is##type() const { return true; } \
- static H##type* cast(HValue* value) { \
- ASSERT(value->Is##type()); \
- return reinterpret_cast<H##type*>(value); \
- }
+#define DECLARE_ABSTRACT_INSTRUCTION(type) \
+ virtual bool Is##type() const V8_FINAL V8_OVERRIDE { return true; } \
+ static H##type* cast(HValue* value) { \
+ ASSERT(value->Is##type()); \
+ return reinterpret_cast<H##type*>(value); \
+ }
-#define DECLARE_CONCRETE_INSTRUCTION(type) \
- virtual LInstruction* CompileToLithium(LChunkBuilder* builder); \
- static H##type* cast(HValue* value) { \
- ASSERT(value->Is##type()); \
- return reinterpret_cast<H##type*>(value); \
- } \
- virtual Opcode opcode() const { return HValue::k##type; }
+#define DECLARE_CONCRETE_INSTRUCTION(type) \
+ virtual LInstruction* CompileToLithium( \
+ LChunkBuilder* builder) V8_FINAL V8_OVERRIDE; \
+ static H##type* cast(HValue* value) { \
+ ASSERT(value->Is##type()); \
+ return reinterpret_cast<H##type*>(value); \
+ } \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return HValue::k##type; \
+ }
-#ifdef DEBUG
-#define ASSERT_ALLOCATION_DISABLED do { \
- OptimizingCompilerThread* thread = \
- ISOLATE->optimizing_compiler_thread(); \
- ASSERT(thread->IsOptimizerThread() || !HEAP->IsAllocationAllowed()); \
- } while (0)
-#else
-#define ASSERT_ALLOCATION_DISABLED do {} while (0)
-#endif
-class Range: public ZoneObject {
+class Range V8_FINAL : public ZoneObject {
public:
Range()
: lower_(kMinInt),
@@ -270,6 +264,7 @@ class Range: public ZoneObject {
bool CanBeMinusZero() const { return CanBeZero() && can_be_minus_zero_; }
bool CanBeZero() const { return upper_ >= 0 && lower_ <= 0; }
bool CanBeNegative() const { return lower_ < 0; }
+ bool CanBePositive() const { return upper_ > 0; }
bool Includes(int value) const { return lower_ <= value && upper_ >= value; }
bool IsMostGeneric() const {
return lower_ == kMinInt && upper_ == kMaxInt && CanBeMinusZero();
@@ -277,6 +272,10 @@ class Range: public ZoneObject {
bool IsInSmiRange() const {
return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
}
+ void ClampToSmi() {
+ lower_ = Max(lower_, Smi::kMinValue);
+ upper_ = Min(upper_, Smi::kMaxValue);
+ }
void KeepOrder();
#ifdef DEBUG
void Verify() const;
@@ -295,9 +294,9 @@ class Range: public ZoneObject {
void AddConstant(int32_t value);
void Sar(int32_t value);
void Shl(int32_t value);
- bool AddAndCheckOverflow(Range* other);
- bool SubAndCheckOverflow(Range* other);
- bool MulAndCheckOverflow(Range* other);
+ bool AddAndCheckOverflow(const Representation& r, Range* other);
+ bool SubAndCheckOverflow(const Representation& r, Range* other);
+ bool MulAndCheckOverflow(const Representation& r, Range* other);
private:
int32_t lower_;
@@ -307,54 +306,9 @@ class Range: public ZoneObject {
};
-class Representation {
- public:
- enum Kind {
- kNone,
- kTagged,
- kDouble,
- kInteger32,
- kExternal,
- kNumRepresentations
- };
-
- Representation() : kind_(kNone) { }
-
- static Representation None() { return Representation(kNone); }
- static Representation Tagged() { return Representation(kTagged); }
- static Representation Integer32() { return Representation(kInteger32); }
- static Representation Double() { return Representation(kDouble); }
- static Representation External() { return Representation(kExternal); }
-
- bool Equals(const Representation& other) {
- return kind_ == other.kind_;
- }
-
- Kind kind() const { return static_cast<Kind>(kind_); }
- bool IsNone() const { return kind_ == kNone; }
- bool IsTagged() const { return kind_ == kTagged; }
- bool IsInteger32() const { return kind_ == kInteger32; }
- bool IsDouble() const { return kind_ == kDouble; }
- bool IsExternal() const { return kind_ == kExternal; }
- bool IsSpecialization() const {
- return kind_ == kInteger32 || kind_ == kDouble;
- }
- const char* Mnemonic() const;
-
- private:
- explicit Representation(Kind k) : kind_(k) { }
-
- // Make sure kind fits in int8.
- STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
-
- int8_t kind_;
-};
-
-
-class HType {
+class HType V8_FINAL {
public:
- HType() : type_(kUninitialized) { }
-
+ static HType None() { return HType(kNone); }
static HType Tagged() { return HType(kTagged); }
static HType TaggedPrimitive() { return HType(kTaggedPrimitive); }
static HType TaggedNumber() { return HType(kTaggedNumber); }
@@ -365,14 +319,13 @@ class HType {
static HType NonPrimitive() { return HType(kNonPrimitive); }
static HType JSArray() { return HType(kJSArray); }
static HType JSObject() { return HType(kJSObject); }
- static HType Uninitialized() { return HType(kUninitialized); }
// Return the weakest (least precise) common type.
HType Combine(HType other) {
return HType(static_cast<Type>(type_ & other.type_));
}
- bool Equals(const HType& other) {
+ bool Equals(const HType& other) const {
return type_ == other.type_;
}
@@ -380,63 +333,67 @@ class HType {
return Combine(other).Equals(other);
}
- bool IsTagged() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kTagged) == kTagged);
- }
-
- bool IsTaggedPrimitive() {
- ASSERT(type_ != kUninitialized);
+ bool IsTaggedPrimitive() const {
return ((type_ & kTaggedPrimitive) == kTaggedPrimitive);
}
- bool IsTaggedNumber() {
- ASSERT(type_ != kUninitialized);
+ bool IsTaggedNumber() const {
return ((type_ & kTaggedNumber) == kTaggedNumber);
}
- bool IsSmi() {
- ASSERT(type_ != kUninitialized);
+ bool IsSmi() const {
return ((type_ & kSmi) == kSmi);
}
- bool IsHeapNumber() {
- ASSERT(type_ != kUninitialized);
+ bool IsHeapNumber() const {
return ((type_ & kHeapNumber) == kHeapNumber);
}
- bool IsString() {
- ASSERT(type_ != kUninitialized);
+ bool IsString() const {
return ((type_ & kString) == kString);
}
- bool IsBoolean() {
- ASSERT(type_ != kUninitialized);
+ bool IsNonString() const {
+ return IsTaggedPrimitive() || IsSmi() || IsHeapNumber() ||
+ IsBoolean() || IsJSArray();
+ }
+
+ bool IsBoolean() const {
return ((type_ & kBoolean) == kBoolean);
}
- bool IsNonPrimitive() {
- ASSERT(type_ != kUninitialized);
+ bool IsNonPrimitive() const {
return ((type_ & kNonPrimitive) == kNonPrimitive);
}
- bool IsJSArray() {
- ASSERT(type_ != kUninitialized);
+ bool IsJSArray() const {
return ((type_ & kJSArray) == kJSArray);
}
- bool IsJSObject() {
- ASSERT(type_ != kUninitialized);
+ bool IsJSObject() const {
return ((type_ & kJSObject) == kJSObject);
}
- bool IsUninitialized() {
- return type_ == kUninitialized;
+ bool IsHeapObject() const {
+ return IsHeapNumber() || IsString() || IsBoolean() || IsNonPrimitive();
}
- bool IsHeapObject() {
- ASSERT(type_ != kUninitialized);
- return IsHeapNumber() || IsString() || IsNonPrimitive();
+ bool ToStringOrToNumberCanBeObserved(Representation representation) {
+ switch (type_) {
+ case kTaggedPrimitive: // fallthru
+ case kTaggedNumber: // fallthru
+ case kSmi: // fallthru
+ case kHeapNumber: // fallthru
+ case kString: // fallthru
+ case kBoolean:
+ return false;
+ case kJSArray: // fallthru
+ case kJSObject:
+ return true;
+ case kTagged:
+ break;
+ }
+ return !representation.IsSmiOrInteger32() && !representation.IsDouble();
}
static HType TypeFromValue(Handle<Object> value);
@@ -445,6 +402,7 @@ class HType {
private:
enum Type {
+ kNone = 0x0, // 0000 0000 0000 0000
kTagged = 0x1, // 0000 0000 0000 0001
kTaggedPrimitive = 0x5, // 0000 0000 0000 0101
kTaggedNumber = 0xd, // 0000 0000 0000 1101
@@ -454,12 +412,11 @@ class HType {
kBoolean = 0x85, // 0000 0000 1000 0101
kNonPrimitive = 0x101, // 0000 0001 0000 0001
kJSObject = 0x301, // 0000 0011 0000 0001
- kJSArray = 0x701, // 0000 0111 0000 0001
- kUninitialized = 0x1fff // 0001 1111 1111 1111
+ kJSArray = 0x701 // 0000 0111 0000 0001
};
// Make sure type fits in int16.
- STATIC_ASSERT(kUninitialized < (1 << (2 * kBitsPerByte)));
+ STATIC_ASSERT(kJSArray < (1 << (2 * kBitsPerByte)));
explicit HType(Type t) : type_(t) { }
@@ -496,7 +453,7 @@ class HUseListNode: public ZoneObject {
// We reuse use list nodes behind the scenes as uses are added and deleted.
// This class is the safe way to iterate uses while deleting them.
-class HUseIterator BASE_EMBEDDED {
+class HUseIterator V8_FINAL BASE_EMBEDDED {
public:
bool Done() { return current_ == NULL; }
void Advance();
@@ -539,30 +496,81 @@ enum GVNFlag {
#undef COUNT_FLAG
};
+
+class DecompositionResult V8_FINAL BASE_EMBEDDED {
+ public:
+ DecompositionResult() : base_(NULL), offset_(0), scale_(0) {}
+
+ HValue* base() { return base_; }
+ int offset() { return offset_; }
+ int scale() { return scale_; }
+
+ bool Apply(HValue* other_base, int other_offset, int other_scale = 0) {
+ if (base_ == NULL) {
+ base_ = other_base;
+ offset_ = other_offset;
+ scale_ = other_scale;
+ return true;
+ } else {
+ if (scale_ == 0) {
+ base_ = other_base;
+ offset_ += other_offset;
+ scale_ = other_scale;
+ return true;
+ } else {
+ return false;
+ }
+ }
+ }
+
+ void SwapValues(HValue** other_base, int* other_offset, int* other_scale) {
+ swap(&base_, other_base);
+ swap(&offset_, other_offset);
+ swap(&scale_, other_scale);
+ }
+
+ private:
+ template <class T> void swap(T* a, T* b) {
+ T c(*a);
+ *a = *b;
+ *b = c;
+ }
+
+ HValue* base_;
+ int offset_;
+ int scale_;
+};
+
+
typedef EnumSet<GVNFlag> GVNFlagSet;
-class HValue: public ZoneObject {
+class HValue : public ZoneObject {
public:
static const int kNoNumber = -1;
enum Flag {
kFlexibleRepresentation,
+ kCannotBeTagged,
// Participate in Global Value Numbering, i.e. elimination of
// unnecessary recomputations. If an instruction sets this flag, it must
// implement DataEquals(), which will be used to determine if other
// occurrences of the instruction are indeed the same.
kUseGVN,
// Track instructions that are dominating side effects. If an instruction
- // sets this flag, it must implement SetSideEffectDominator() and should
+ // sets this flag, it must implement HandleSideEffectDominator() and should
// indicate which side effects to track by setting GVN flags.
kTrackSideEffectDominators,
kCanOverflow,
kBailoutOnMinusZero,
kCanBeDivByZero,
- kDeoptimizeOnUndefined,
+ kAllowUndefinedAsNaN,
kIsArguments,
kTruncatingToInt32,
+ kAllUsesTruncatingToInt32,
+ kTruncatingToSmi,
+ kAllUsesTruncatingToSmi,
+ // Set after an instruction is killed.
kIsDead,
// Instructions that are allowed to produce full range unsigned integer
// values are marked with kUint32 flag. If arithmetic shift or a load from
@@ -571,7 +579,16 @@ class HValue: public ZoneObject {
// HGraph::ComputeSafeUint32Operations is responsible for setting this
// flag.
kUint32,
- kLastFlag = kUint32
+ kHasNoObservableSideEffects,
+ // Indicates the instruction is live during dead code elimination.
+ kIsLive,
+
+ // HEnvironmentMarkers are deleted before dead code
+ // elimination takes place, so they can repurpose the kIsLive flag:
+ kEndsLiveRange = kIsLive,
+
+ // TODO(everyone): Don't forget to update this!
+ kLastFlag = kIsLive
};
STATIC_ASSERT(kLastFlag < kBitsPerInt);
@@ -612,35 +629,55 @@ class HValue: public ZoneObject {
HYDROGEN_ABSTRACT_INSTRUCTION_LIST(DECLARE_PREDICATE)
#undef DECLARE_PREDICATE
- HValue() : block_(NULL),
- id_(kNoNumber),
- type_(HType::Tagged()),
- use_list_(NULL),
- range_(NULL),
- flags_(0) {}
+ HValue(HType type = HType::Tagged())
+ : block_(NULL),
+ id_(kNoNumber),
+ type_(type),
+ use_list_(NULL),
+ range_(NULL),
+ flags_(0) {}
virtual ~HValue() {}
+ virtual int position() const { return RelocInfo::kNoPosition; }
+
HBasicBlock* block() const { return block_; }
void SetBlock(HBasicBlock* block);
int LoopWeight() const;
+ // Note: Never call this method for an unlinked value.
+ Isolate* isolate() const;
+
int id() const { return id_; }
void set_id(int id) { id_ = id; }
HUseIterator uses() const { return HUseIterator(use_list_); }
virtual bool EmitAtUses() { return false; }
+
Representation representation() const { return representation_; }
void ChangeRepresentation(Representation r) {
- // Representation was already set and is allowed to be changed.
- ASSERT(!r.IsNone());
ASSERT(CheckFlag(kFlexibleRepresentation));
+ ASSERT(!CheckFlag(kCannotBeTagged) || !r.IsTagged());
RepresentationChanged(r);
representation_ = r;
+ if (r.IsTagged()) {
+ // Tagged is the bottom of the lattice, don't go any further.
+ ClearFlag(kFlexibleRepresentation);
+ }
}
- void AssumeRepresentation(Representation r);
+ virtual void AssumeRepresentation(Representation r);
- virtual bool IsConvertibleToInteger() const { return true; }
+ virtual Representation KnownOptimalRepresentation() {
+ Representation r = representation();
+ if (r.IsTagged()) {
+ HType t = type();
+ if (t.IsSmi()) return Representation::Smi();
+ if (t.IsHeapNumber()) return Representation::Double();
+ if (t.IsHeapObject()) return r;
+ return Representation::None();
+ }
+ return r;
+ }
HType type() const { return type_; }
void set_type(HType new_type) {
@@ -648,6 +685,10 @@ class HValue: public ZoneObject {
type_ = new_type;
}
+ bool IsHeapObject() {
+ return representation_.IsHeapObject() || type_.IsHeapObject();
+ }
+
// An operation needs to override this function iff:
// 1) it can produce an int32 output.
// 2) the true value of its output can potentially be minus zero.
@@ -663,6 +704,48 @@ class HValue: public ZoneObject {
return NULL;
}
+ // There are HInstructions that do not really change a value, they
+ // only add pieces of information to it (like bounds checks, map checks,
+ // smi checks...).
+ // We call these instructions "informative definitions", or "iDef".
+ // One of the iDef operands is special because it is the value that is
+ // "transferred" to the output, we call it the "redefined operand".
+ // If an HValue is an iDef it must override RedefinedOperandIndex() so that
+ // it does not return kNoRedefinedOperand;
+ static const int kNoRedefinedOperand = -1;
+ virtual int RedefinedOperandIndex() { return kNoRedefinedOperand; }
+ bool IsInformativeDefinition() {
+ return RedefinedOperandIndex() != kNoRedefinedOperand;
+ }
+ HValue* RedefinedOperand() {
+ int index = RedefinedOperandIndex();
+ return index == kNoRedefinedOperand ? NULL : OperandAt(index);
+ }
+
+ bool CanReplaceWithDummyUses();
+
+ virtual int argument_delta() const { return 0; }
+
+ // A purely informative definition is an idef that will not emit code and
+ // should therefore be removed from the graph in the RestoreActualValues
+ // phase (so that live ranges will be shorter).
+ virtual bool IsPurelyInformativeDefinition() { return false; }
+
+ // This method must always return the original HValue SSA definition,
+ // regardless of any chain of iDefs of this value.
+ HValue* ActualValue() {
+ HValue* value = this;
+ int index;
+ while ((index = value->RedefinedOperandIndex()) != kNoRedefinedOperand) {
+ value = value->OperandAt(index);
+ }
+ return value;
+ }
+
+ bool IsInteger32Constant();
+ int32_t GetInteger32Constant();
+ bool EqualsInteger32Constant(int32_t value);
+
bool IsDefinedAfter(HBasicBlock* other) const;
// Operands.
@@ -685,9 +768,17 @@ class HValue: public ZoneObject {
void SetFlag(Flag f) { flags_ |= (1 << f); }
void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
+ void CopyFlag(Flag f, HValue* other) {
+ if (other->CheckFlag(f)) SetFlag(f);
+ }
// Returns true if the flag specified is set for all uses, false otherwise.
- bool CheckUsesForFlag(Flag f);
+ bool CheckUsesForFlag(Flag f) const;
+ // Same as before and the first one without the flag is returned in value.
+ bool CheckUsesForFlag(Flag f, HValue** value) const;
+ // Returns true if the flag specified is set for all uses, and this set
+ // of uses is non-empty.
+ bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const;
GVNFlagSet gvn_flags() const { return gvn_flags_; }
void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
@@ -701,7 +792,8 @@ class HValue: public ZoneObject {
return gvn_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
}
bool HasObservableSideEffects() const {
- return gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
+ return !CheckFlag(kHasNoObservableSideEffects) &&
+ gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
}
GVNFlagSet DependsOnFlags() const {
@@ -730,22 +822,27 @@ class HValue: public ZoneObject {
}
Range* range() const { return range_; }
+ // TODO(svenpanne) We should really use the null object pattern here.
bool HasRange() const { return range_ != NULL; }
+ bool CanBeNegative() const { return !HasRange() || range()->CanBeNegative(); }
+ bool CanBeZero() const { return !HasRange() || range()->CanBeZero(); }
+ bool RangeCanInclude(int value) const {
+ return !HasRange() || range()->Includes(value);
+ }
void AddNewRange(Range* r, Zone* zone);
void RemoveLastAddedRange();
void ComputeInitialRange(Zone* zone);
- // Representation helpers.
- virtual Representation RequiredInputRepresentation(int index) = 0;
-
- virtual Representation InferredRepresentation() {
- return representation();
- }
+ // Escape analysis helpers.
+ virtual bool HasEscapingOperandAt(int index) { return true; }
+ virtual bool HasOutOfBoundsAccess(int size) { return false; }
- // Type feedback access.
- virtual Representation ObservedInputRepresentation(int index) {
- return RequiredInputRepresentation(index);
+ // Representation helpers.
+ virtual Representation observed_input_representation(int index) {
+ return Representation::None();
}
+ virtual Representation RequiredInputRepresentation(int index) = 0;
+ virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
// This gives the instruction an opportunity to replace itself with an
// instruction that does the same in some better way. To replace an
@@ -756,6 +853,9 @@ class HValue: public ZoneObject {
bool Equals(HValue* other);
virtual intptr_t Hashcode();
+ // Compute unique ids upfront that is safe wrt GC and concurrent compilation.
+ virtual void FinalizeUniqueness() { }
+
// Printing support.
virtual void PrintTo(StringStream* stream) = 0;
void PrintNameTo(StringStream* stream);
@@ -765,6 +865,14 @@ class HValue: public ZoneObject {
const char* Mnemonic() const;
+ // Type information helpers.
+ bool HasMonomorphicJSObjectType();
+
+ // TODO(mstarzinger): For now instructions can override this function to
+ // specify statically known types, once HType can convey more information
+ // it should be based on the HType.
+ virtual Handle<Map> GetMonomorphicJSObjectMap() { return Handle<Map>(); }
+
// Updated the inferred type of this instruction and returns true if
// it has changed.
bool UpdateInferredType();
@@ -774,18 +882,45 @@ class HValue: public ZoneObject {
// This function must be overridden for instructions which have the
// kTrackSideEffectDominators flag set, to track instructions that are
// dominating side effects.
- virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
+ virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) {
UNREACHABLE();
}
- bool IsDead() const {
- return HasNoUses() && !HasObservableSideEffects() && IsDeletable();
+ // Check if this instruction has some reason that prevents elimination.
+ bool CannotBeEliminated() const {
+ return HasObservableSideEffects() || !IsDeletable();
}
#ifdef DEBUG
virtual void Verify() = 0;
#endif
+ virtual bool TryDecompose(DecompositionResult* decomposition) {
+ if (RedefinedOperand() != NULL) {
+ return RedefinedOperand()->TryDecompose(decomposition);
+ } else {
+ return false;
+ }
+ }
+
+ // Returns true conservatively if the program might be able to observe a
+ // ToString() operation on this value.
+ bool ToStringCanBeObserved() const {
+ return type().ToStringOrToNumberCanBeObserved(representation());
+ }
+
+ // Returns true conservatively if the program might be able to observe a
+ // ToNumber() operation on this value.
+ bool ToNumberCanBeObserved() const {
+ return type().ToStringOrToNumberCanBeObserved(representation());
+ }
+
+ MinusZeroMode GetMinusZeroMode() {
+ return CheckFlag(kBailoutOnMinusZero)
+ ? FAIL_ON_MINUS_ZERO : TREAT_MINUS_ZERO_AS_ZERO;
+ }
+
protected:
// This function must be overridden for instructions with flag kUseGVN, to
// compare the non-Operand parts of the instruction.
@@ -793,7 +928,20 @@ class HValue: public ZoneObject {
UNREACHABLE();
return false;
}
+
+ virtual Representation RepresentationFromInputs() {
+ return representation();
+ }
+ Representation RepresentationFromUses();
+ Representation RepresentationFromUseRequirements();
+ bool HasNonSmiUse();
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason);
+ void AddDependantsToWorklist(HInferRepresentationPhase* h_infer);
+
virtual void RepresentationChanged(Representation to) { }
+
virtual Range* InferRange(Zone* zone);
virtual void DeleteFromGraph() = 0;
virtual void InternalSetOperandAt(int index, HValue* value) = 0;
@@ -803,7 +951,6 @@ class HValue: public ZoneObject {
}
void set_representation(Representation r) {
- // Representation is set-once.
ASSERT(representation_.IsNone() && !r.IsNone());
representation_ = r;
}
@@ -872,13 +1019,96 @@ class HValue: public ZoneObject {
};
-class HInstruction: public HValue {
+#define DECLARE_INSTRUCTION_FACTORY_P0(I) \
+ static I* New(Zone* zone, HValue* context) { \
+ return new(zone) I(); \
+}
+
+#define DECLARE_INSTRUCTION_FACTORY_P1(I, P1) \
+ static I* New(Zone* zone, HValue* context, P1 p1) { \
+ return new(zone) I(p1); \
+ }
+
+#define DECLARE_INSTRUCTION_FACTORY_P2(I, P1, P2) \
+ static I* New(Zone* zone, HValue* context, P1 p1, P2 p2) { \
+ return new(zone) I(p1, p2); \
+ }
+
+#define DECLARE_INSTRUCTION_FACTORY_P3(I, P1, P2, P3) \
+ static I* New(Zone* zone, HValue* context, P1 p1, P2 p2, P3 p3) { \
+ return new(zone) I(p1, p2, p3); \
+ }
+
+#define DECLARE_INSTRUCTION_FACTORY_P4(I, P1, P2, P3, P4) \
+ static I* New(Zone* zone, \
+ HValue* context, \
+ P1 p1, \
+ P2 p2, \
+ P3 p3, \
+ P4 p4) { \
+ return new(zone) I(p1, p2, p3, p4); \
+ }
+
+#define DECLARE_INSTRUCTION_FACTORY_P5(I, P1, P2, P3, P4, P5) \
+ static I* New(Zone* zone, \
+ HValue* context, \
+ P1 p1, \
+ P2 p2, \
+ P3 p3, \
+ P4 p4, \
+ P5 p5) { \
+ return new(zone) I(p1, p2, p3, p4, p5); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I) \
+ static I* New(Zone* zone, HValue* context) { \
+ return new(zone) I(context); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(I, P1) \
+ static I* New(Zone* zone, HValue* context, P1 p1) { \
+ return new(zone) I(context, p1); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(I, P1, P2) \
+ static I* New(Zone* zone, HValue* context, P1 p1, P2 p2) { \
+ return new(zone) I(context, p1, p2); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(I, P1, P2, P3) \
+ static I* New(Zone* zone, HValue* context, P1 p1, P2 p2, P3 p3) { \
+ return new(zone) I(context, p1, p2, p3); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(I, P1, P2, P3, P4) \
+ static I* New(Zone* zone, \
+ HValue* context, \
+ P1 p1, \
+ P2 p2, \
+ P3 p3, \
+ P4 p4) { \
+ return new(zone) I(context, p1, p2, p3, p4); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(I, P1, P2, P3, P4, P5) \
+ static I* New(Zone* zone, \
+ HValue* context, \
+ P1 p1, \
+ P2 p2, \
+ P3 p3, \
+ P4 p4, \
+ P5 p5) { \
+ return new(zone) I(context, p1, p2, p3, p4, p5); \
+ }
+
+
+class HInstruction : public HValue {
public:
HInstruction* next() const { return next_; }
HInstruction* previous() const { return previous_; }
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream) { }
+ virtual void PrintTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream);
bool IsLinked() const { return block() != NULL; }
void Unlink();
@@ -886,7 +1116,7 @@ class HInstruction: public HValue {
void InsertAfter(HInstruction* previous);
// The position is a write-once variable.
- int position() const { return position_; }
+ virtual int position() const V8_OVERRIDE { return position_; }
bool has_position() const { return position_ != RelocInfo::kNoPosition; }
void set_position(int position) {
ASSERT(!has_position());
@@ -899,7 +1129,7 @@ class HInstruction: public HValue {
virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
#endif
virtual bool IsCall() { return false; }
@@ -907,14 +1137,15 @@ class HInstruction: public HValue {
DECLARE_ABSTRACT_INSTRUCTION(Instruction)
protected:
- HInstruction()
- : next_(NULL),
+ HInstruction(HType type = HType::Tagged())
+ : HValue(type),
+ next_(NULL),
previous_(NULL),
position_(RelocInfo::kNoPosition) {
SetGVNFlag(kDependsOnOsrEntries);
}
- virtual void DeleteFromGraph() { Unlink(); }
+ virtual void DeleteFromGraph() V8_OVERRIDE { Unlink(); }
private:
void InitializeAsFirst(HBasicBlock* block) {
@@ -935,24 +1166,35 @@ class HInstruction: public HValue {
template<int V>
class HTemplateInstruction : public HInstruction {
public:
- int OperandCount() { return V; }
- HValue* OperandAt(int i) const { return inputs_[i]; }
+ virtual int OperandCount() V8_FINAL V8_OVERRIDE { return V; }
+ virtual HValue* OperandAt(int i) const V8_FINAL V8_OVERRIDE {
+ return inputs_[i];
+ }
protected:
- void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
+ HTemplateInstruction(HType type = HType::Tagged()) : HInstruction(type) {}
+
+ virtual void InternalSetOperandAt(int i, HValue* value) V8_FINAL V8_OVERRIDE {
+ inputs_[i] = value;
+ }
private:
EmbeddedContainer<HValue*, V> inputs_;
};
-class HControlInstruction: public HInstruction {
+class HControlInstruction : public HInstruction {
public:
virtual HBasicBlock* SuccessorAt(int i) = 0;
virtual int SuccessorCount() = 0;
virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) {
+ *block = NULL;
+ return false;
+ }
HBasicBlock* FirstSuccessor() {
return SuccessorCount() > 0 ? SuccessorAt(0) : NULL;
@@ -961,11 +1203,17 @@ class HControlInstruction: public HInstruction {
return SuccessorCount() > 1 ? SuccessorAt(1) : NULL;
}
+ void Not() {
+ HBasicBlock* swap = SuccessorAt(0);
+ SetSuccessorAt(0, SuccessorAt(1));
+ SetSuccessorAt(1, swap);
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(ControlInstruction)
};
-class HSuccessorIterator BASE_EMBEDDED {
+class HSuccessorIterator V8_FINAL BASE_EMBEDDED {
public:
explicit HSuccessorIterator(HControlInstruction* instr)
: instr_(instr), current_(0) { }
@@ -981,18 +1229,22 @@ class HSuccessorIterator BASE_EMBEDDED {
template<int S, int V>
-class HTemplateControlInstruction: public HControlInstruction {
+class HTemplateControlInstruction : public HControlInstruction {
public:
- int SuccessorCount() { return S; }
- HBasicBlock* SuccessorAt(int i) { return successors_[i]; }
- void SetSuccessorAt(int i, HBasicBlock* block) { successors_[i] = block; }
+ int SuccessorCount() V8_OVERRIDE { return S; }
+ HBasicBlock* SuccessorAt(int i) V8_OVERRIDE { return successors_[i]; }
+ void SetSuccessorAt(int i, HBasicBlock* block) V8_OVERRIDE {
+ successors_[i] = block;
+ }
- int OperandCount() { return V; }
- HValue* OperandAt(int i) const { return inputs_[i]; }
+ int OperandCount() V8_OVERRIDE { return V; }
+ HValue* OperandAt(int i) const V8_OVERRIDE { return inputs_[i]; }
protected:
- void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
+ void InternalSetOperandAt(int i, HValue* value) V8_OVERRIDE {
+ inputs_[i] = value;
+ }
private:
EmbeddedContainer<HBasicBlock*, S> successors_;
@@ -1000,9 +1252,9 @@ class HTemplateControlInstruction: public HControlInstruction {
};
-class HBlockEntry: public HTemplateInstruction<0> {
+class HBlockEntry V8_FINAL : public HTemplateInstruction<0> {
public:
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -1010,80 +1262,101 @@ class HBlockEntry: public HTemplateInstruction<0> {
};
-// We insert soft-deoptimize when we hit code with unknown typefeedback,
-// so that we get a chance of re-optimizing with useful typefeedback.
-// HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
-class HSoftDeoptimize: public HTemplateInstruction<0> {
+class HDummyUse V8_FINAL : public HTemplateInstruction<1> {
public:
- virtual Representation RequiredInputRepresentation(int index) {
+ explicit HDummyUse(HValue* value)
+ : HTemplateInstruction<1>(HType::Smi()) {
+ SetOperandAt(0, value);
+ // Pretend to be a Smi so that the HChange instructions inserted
+ // before any use generate as little code as possible.
+ set_representation(Representation::Tagged());
+ }
+
+ HValue* value() { return OperandAt(0); }
+
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(SoftDeoptimize)
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse);
};
-class HDeoptimize: public HControlInstruction {
+// Inserts an int3/stop break instruction for debugging purposes.
+class HDebugBreak V8_FINAL : public HTemplateInstruction<0> {
public:
- HDeoptimize(int environment_length, Zone* zone)
- : values_(environment_length, zone) { }
+ DECLARE_INSTRUCTION_FACTORY_P0(HDebugBreak);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual int OperandCount() { return values_.length(); }
- virtual HValue* OperandAt(int index) const { return values_[index]; }
- virtual void PrintDataTo(StringStream* stream);
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak)
+};
- virtual int SuccessorCount() { return 0; }
- virtual HBasicBlock* SuccessorAt(int i) {
- UNREACHABLE();
- return NULL;
- }
- virtual void SetSuccessorAt(int i, HBasicBlock* block) {
- UNREACHABLE();
- }
- void AddEnvironmentValue(HValue* value, Zone* zone) {
- values_.Add(NULL, zone);
- SetOperandAt(values_.length() - 1, value);
+class HGoto V8_FINAL : public HTemplateControlInstruction<1, 0> {
+ public:
+ explicit HGoto(HBasicBlock* target) {
+ SetSuccessorAt(0, target);
}
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
-
- enum UseEnvironment {
- kNoUses,
- kUseAll
- };
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
+ *block = FirstSuccessor();
+ return true;
+ }
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- values_[index] = value;
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::None();
}
- private:
- ZoneList<HValue*> values_;
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(Goto)
};
-class HGoto: public HTemplateControlInstruction<1, 0> {
+class HDeoptimize V8_FINAL : public HTemplateControlInstruction<1, 0> {
public:
- explicit HGoto(HBasicBlock* target) {
- SetSuccessorAt(0, target);
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ const char* reason,
+ Deoptimizer::BailoutType type,
+ HBasicBlock* unreachable_continuation) {
+ return new(zone) HDeoptimize(reason, type, unreachable_continuation);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
+ *block = NULL;
+ return true;
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual void PrintDataTo(StringStream* stream);
+ const char* reason() const { return reason_; }
+ Deoptimizer::BailoutType type() { return type_; }
- DECLARE_CONCRETE_INSTRUCTION(Goto)
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
+
+ private:
+ explicit HDeoptimize(const char* reason,
+ Deoptimizer::BailoutType type,
+ HBasicBlock* unreachable_continuation)
+ : reason_(reason), type_(type) {
+ SetSuccessorAt(0, unreachable_continuation);
+ }
+
+ const char* reason_;
+ Deoptimizer::BailoutType type_;
};
-class HUnaryControlInstruction: public HTemplateControlInstruction<2, 1> {
+class HUnaryControlInstruction : public HTemplateControlInstruction<2, 1> {
public:
HUnaryControlInstruction(HValue* value,
HBasicBlock* true_target,
@@ -1093,29 +1366,27 @@ class HUnaryControlInstruction: public HTemplateControlInstruction<2, 1> {
SetSuccessorAt(1, false_target);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
};
-class HBranch: public HUnaryControlInstruction {
+class HBranch V8_FINAL : public HUnaryControlInstruction {
public:
- HBranch(HValue* value,
- HBasicBlock* true_target,
- HBasicBlock* false_target,
- ToBooleanStub::Types expected_input_types = ToBooleanStub::no_types())
- : HUnaryControlInstruction(value, true_target, false_target),
- expected_input_types_(expected_input_types) {
- ASSERT(true_target != NULL && false_target != NULL);
- }
- explicit HBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
-
+ DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*,
+ ToBooleanStub::Types);
+ DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*,
+ ToBooleanStub::Types,
+ HBasicBlock*, HBasicBlock*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE;
+
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
ToBooleanStub::Types expected_input_types() const {
return expected_input_types_;
@@ -1124,69 +1395,121 @@ class HBranch: public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(Branch)
private:
+ HBranch(HValue* value,
+ ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(),
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target),
+ expected_input_types_(expected_input_types) {
+ SetFlag(kAllowUndefinedAsNaN);
+ }
+
ToBooleanStub::Types expected_input_types_;
};
-class HCompareMap: public HUnaryControlInstruction {
+class HCompareMap V8_FINAL : public HUnaryControlInstruction {
public:
+ DECLARE_INSTRUCTION_FACTORY_P2(HCompareMap, HValue*, Handle<Map>);
+ DECLARE_INSTRUCTION_FACTORY_P4(HCompareMap, HValue*, Handle<Map>,
+ HBasicBlock*, HBasicBlock*);
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Unique<Map> map() const { return map_; }
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMap)
+
+ protected:
+ virtual int RedefinedOperandIndex() { return 0; }
+
+ private:
HCompareMap(HValue* value,
Handle<Map> map,
- HBasicBlock* true_target,
- HBasicBlock* false_target)
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
: HUnaryControlInstruction(value, true_target, false_target),
- map_(map) {
- ASSERT(true_target != NULL);
- ASSERT(false_target != NULL);
+ map_(Unique<Map>(map)) {
ASSERT(!map.is_null());
}
- virtual void PrintDataTo(StringStream* stream);
+ Unique<Map> map_;
+};
- Handle<Map> map() const { return map_; }
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
+class HContext V8_FINAL : public HTemplateInstruction<0> {
+ public:
+ static HContext* New(Zone* zone) {
+ return new(zone) HContext();
}
- DECLARE_CONCRETE_INSTRUCTION(CompareMap)
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::None();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Context)
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- Handle<Map> map_;
+ HContext() {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HReturn: public HTemplateControlInstruction<0, 1> {
+class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> {
public:
- explicit HReturn(HValue* value) {
- SetOperandAt(0, value);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HReturn, HValue*, HValue*);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HReturn, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
+ HValue* context() { return OperandAt(1); }
+ HValue* parameter_count() { return OperandAt(2); }
DECLARE_CONCRETE_INSTRUCTION(Return)
+
+ private:
+ HReturn(HValue* context, HValue* value, HValue* parameter_count = 0) {
+ SetOperandAt(0, value);
+ SetOperandAt(1, context);
+ SetOperandAt(2, parameter_count);
+ }
};
-class HAbnormalExit: public HTemplateControlInstruction<0, 0> {
+class HAbnormalExit V8_FINAL : public HTemplateControlInstruction<0, 0> {
public:
- virtual Representation RequiredInputRepresentation(int index) {
+ DECLARE_INSTRUCTION_FACTORY_P0(HAbnormalExit);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(AbnormalExit)
+ private:
+ HAbnormalExit() {}
};
-class HUnaryOperation: public HTemplateInstruction<1> {
+class HUnaryOperation : public HTemplateInstruction<1> {
public:
- explicit HUnaryOperation(HValue* value) {
+ HUnaryOperation(HValue* value, HType type = HType::Tagged())
+ : HTemplateInstruction<1>(type) {
SetOperandAt(0, value);
}
@@ -1195,19 +1518,15 @@ class HUnaryOperation: public HTemplateInstruction<1> {
}
HValue* value() const { return OperandAt(0); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class HThrow: public HTemplateInstruction<2> {
+class HThrow V8_FINAL : public HTemplateInstruction<2> {
public:
- HThrow(HValue* context, HValue* value) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HThrow, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -1215,122 +1534,161 @@ class HThrow: public HTemplateInstruction<2> {
HValue* value() { return OperandAt(1); }
DECLARE_CONCRETE_INSTRUCTION(Throw)
+
+ private:
+ HThrow(HValue* context, HValue* value) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, value);
+ SetAllSideEffects();
+ }
};
-class HUseConst: public HUnaryOperation {
+class HUseConst V8_FINAL : public HUnaryOperation {
public:
- explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HUseConst, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(UseConst)
+
+ private:
+ explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { }
};
-class HForceRepresentation: public HTemplateInstruction<1> {
+class HForceRepresentation V8_FINAL : public HTemplateInstruction<1> {
public:
- HForceRepresentation(HValue* value, Representation required_representation) {
- SetOperandAt(0, value);
- set_representation(required_representation);
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(HForceRepresentation, HValue*, Representation);
HValue* value() { return OperandAt(0); }
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation(); // Same as the output representation.
}
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
+
+ private:
+ HForceRepresentation(HValue* value, Representation required_representation) {
+ SetOperandAt(0, value);
+ set_representation(required_representation);
+ }
};
-class HChange: public HUnaryOperation {
+class HChange V8_FINAL : public HUnaryOperation {
public:
HChange(HValue* value,
Representation to,
- bool is_truncating,
- bool deoptimize_on_undefined)
+ bool is_truncating_to_smi,
+ bool is_truncating_to_int32)
: HUnaryOperation(value) {
- ASSERT(!value->representation().IsNone() && !to.IsNone());
+ ASSERT(!value->representation().IsNone());
+ ASSERT(!to.IsNone());
ASSERT(!value->representation().Equals(to));
set_representation(to);
- set_type(HType::TaggedNumber());
SetFlag(kUseGVN);
- if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
- if (is_truncating) SetFlag(kTruncatingToInt32);
- if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
+ if (is_truncating_to_smi) {
+ SetFlag(kTruncatingToSmi);
+ SetFlag(kTruncatingToInt32);
+ }
+ if (is_truncating_to_int32) SetFlag(kTruncatingToInt32);
+ if (value->representation().IsSmi() || value->type().IsSmi()) {
+ set_type(HType::Smi());
+ } else {
+ set_type(HType::TaggedNumber());
+ if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
+ }
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- virtual HType CalculateInferredType();
- virtual HValue* Canonicalize();
+ bool can_convert_undefined_to_nan() {
+ return CheckUsesForFlag(kAllowUndefinedAsNaN);
+ }
+
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
+ virtual HType CalculateInferredType() V8_OVERRIDE;
+ virtual HValue* Canonicalize() V8_OVERRIDE;
Representation from() const { return value()->representation(); }
Representation to() const { return representation(); }
- bool deoptimize_on_undefined() const {
- return CheckFlag(kDeoptimizeOnUndefined);
- }
bool deoptimize_on_minus_zero() const {
return CheckFlag(kBailoutOnMinusZero);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return from();
}
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Change)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const {
+ virtual bool IsDeletable() const V8_OVERRIDE {
return !from().IsTagged() || value()->type().IsSmi();
}
};
-class HClampToUint8: public HUnaryOperation {
+class HClampToUint8 V8_FINAL : public HUnaryOperation {
public:
- explicit HClampToUint8(HValue* value)
- : HUnaryOperation(value) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HClampToUint8, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(ClampToUint8)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ explicit HClampToUint8(HValue* value)
+ : HUnaryOperation(value) {
+ set_representation(Representation::Integer32());
+ SetFlag(kAllowUndefinedAsNaN);
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HSimulate: public HInstruction {
+enum RemovableSimulate {
+ REMOVABLE_SIMULATE,
+ FIXED_SIMULATE
+};
+
+
+class HSimulate V8_FINAL : public HInstruction {
public:
- HSimulate(BailoutId ast_id, int pop_count, Zone* zone)
+ HSimulate(BailoutId ast_id,
+ int pop_count,
+ Zone* zone,
+ RemovableSimulate removable)
: ast_id_(ast_id),
pop_count_(pop_count),
values_(2, zone),
assigned_indexes_(2, zone),
- zone_(zone) {}
- virtual ~HSimulate() {}
+ zone_(zone),
+ removable_(removable) {}
+ ~HSimulate() {}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool HasAstId() const { return !ast_id_.IsNone(); }
BailoutId ast_id() const { return ast_id_; }
@@ -1354,21 +1712,38 @@ class HSimulate: public HInstruction {
void AddPushedValue(HValue* value) {
AddValue(kNoIndex, value);
}
- virtual int OperandCount() { return values_.length(); }
- virtual HValue* OperandAt(int index) const { return values_[index]; }
+ int ToOperandIndex(int environment_index) {
+ for (int i = 0; i < assigned_indexes_.length(); ++i) {
+ if (assigned_indexes_[i] == environment_index) return i;
+ }
+ return -1;
+ }
+ virtual int OperandCount() V8_OVERRIDE { return values_.length(); }
+ virtual HValue* OperandAt(int index) const V8_OVERRIDE {
+ return values_[index];
+ }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
+ void MergeWith(ZoneList<HSimulate*>* list);
+ bool is_candidate_for_removal() { return removable_ == REMOVABLE_SIMULATE; }
+
+ // Replay effects of this instruction on the given environment.
+ void ReplayEnvironment(HEnvironment* env);
+
DECLARE_CONCRETE_INSTRUCTION(Simulate)
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
+ void set_closure(Handle<JSFunction> closure) { closure_ = closure; }
+ Handle<JSFunction> closure() const { return closure_; }
#endif
protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
+ virtual void InternalSetOperandAt(int index, HValue* value) V8_OVERRIDE {
values_[index] = value;
}
@@ -1382,29 +1757,81 @@ class HSimulate: public HInstruction {
// use lists are correctly updated.
SetOperandAt(values_.length() - 1, value);
}
+ bool HasValueForIndex(int index) {
+ for (int i = 0; i < assigned_indexes_.length(); ++i) {
+ if (assigned_indexes_[i] == index) return true;
+ }
+ return false;
+ }
BailoutId ast_id_;
int pop_count_;
ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_;
Zone* zone_;
+ RemovableSimulate removable_;
+
+#ifdef DEBUG
+ Handle<JSFunction> closure_;
+#endif
};
-class HStackCheck: public HTemplateInstruction<1> {
+class HEnvironmentMarker V8_FINAL : public HTemplateInstruction<1> {
+ public:
+ enum Kind { BIND, LOOKUP };
+
+ DECLARE_INSTRUCTION_FACTORY_P2(HEnvironmentMarker, Kind, int);
+
+ Kind kind() { return kind_; }
+ int index() { return index_; }
+ HSimulate* next_simulate() { return next_simulate_; }
+ void set_next_simulate(HSimulate* simulate) {
+ next_simulate_ = simulate;
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::None();
+ }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+#ifdef DEBUG
+ void set_closure(Handle<JSFunction> closure) {
+ ASSERT(closure_.is_null());
+ ASSERT(!closure.is_null());
+ closure_ = closure;
+ }
+ Handle<JSFunction> closure() const { return closure_; }
+#endif
+
+ DECLARE_CONCRETE_INSTRUCTION(EnvironmentMarker);
+
+ private:
+ HEnvironmentMarker(Kind kind, int index)
+ : kind_(kind), index_(index), next_simulate_(NULL) { }
+
+ Kind kind_;
+ int index_;
+ HSimulate* next_simulate_;
+
+#ifdef DEBUG
+ Handle<JSFunction> closure_;
+#endif
+};
+
+
+class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
public:
enum Type {
kFunctionEntry,
kBackwardsBranch
};
- HStackCheck(HValue* context, Type type) : type_(type) {
- SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HStackCheck, Type);
HValue* context() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -1412,7 +1839,7 @@ class HStackCheck: public HTemplateInstruction<1> {
// The stack check eliminator might try to eliminate the same stack
// check instruction multiple times.
if (IsLinked()) {
- DeleteFromGraph();
+ DeleteAndReplaceWith(NULL);
}
}
@@ -1422,6 +1849,11 @@ class HStackCheck: public HTemplateInstruction<1> {
DECLARE_CONCRETE_INSTRUCTION(StackCheck)
private:
+ HStackCheck(HValue* context, Type type) : type_(type) {
+ SetOperandAt(0, context);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+
Type type_;
};
@@ -1435,158 +1867,173 @@ enum InliningKind {
};
-class HEnterInlined: public HTemplateInstruction<0> {
+class HArgumentsObject;
+
+
+class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
public:
- HEnterInlined(Handle<JSFunction> closure,
- int arguments_count,
- FunctionLiteral* function,
- CallKind call_kind,
- InliningKind inlining_kind,
- Variable* arguments_var,
- ZoneList<HValue*>* arguments_values)
- : closure_(closure),
- arguments_count_(arguments_count),
- arguments_pushed_(false),
- function_(function),
- call_kind_(call_kind),
- inlining_kind_(inlining_kind),
- arguments_var_(arguments_var),
- arguments_values_(arguments_values) {
+ static HEnterInlined* New(Zone* zone,
+ HValue* context,
+ Handle<JSFunction> closure,
+ int arguments_count,
+ FunctionLiteral* function,
+ InliningKind inlining_kind,
+ Variable* arguments_var,
+ HArgumentsObject* arguments_object,
+ bool undefined_receiver) {
+ return new(zone) HEnterInlined(closure, arguments_count, function,
+ inlining_kind, arguments_var,
+ arguments_object, undefined_receiver, zone);
}
- virtual void PrintDataTo(StringStream* stream);
+ void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone);
+ ZoneList<HBasicBlock*>* return_targets() { return &return_targets_; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> closure() const { return closure_; }
int arguments_count() const { return arguments_count_; }
bool arguments_pushed() const { return arguments_pushed_; }
void set_arguments_pushed() { arguments_pushed_ = true; }
FunctionLiteral* function() const { return function_; }
- CallKind call_kind() const { return call_kind_; }
InliningKind inlining_kind() const { return inlining_kind_; }
+ bool undefined_receiver() const { return undefined_receiver_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
Variable* arguments_var() { return arguments_var_; }
- ZoneList<HValue*>* arguments_values() { return arguments_values_; }
+ HArgumentsObject* arguments_object() { return arguments_object_; }
DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
private:
+ HEnterInlined(Handle<JSFunction> closure,
+ int arguments_count,
+ FunctionLiteral* function,
+ InliningKind inlining_kind,
+ Variable* arguments_var,
+ HArgumentsObject* arguments_object,
+ bool undefined_receiver,
+ Zone* zone)
+ : closure_(closure),
+ arguments_count_(arguments_count),
+ arguments_pushed_(false),
+ function_(function),
+ inlining_kind_(inlining_kind),
+ arguments_var_(arguments_var),
+ arguments_object_(arguments_object),
+ undefined_receiver_(undefined_receiver),
+ return_targets_(2, zone) {
+ }
+
Handle<JSFunction> closure_;
int arguments_count_;
bool arguments_pushed_;
FunctionLiteral* function_;
- CallKind call_kind_;
InliningKind inlining_kind_;
Variable* arguments_var_;
- ZoneList<HValue*>* arguments_values_;
+ HArgumentsObject* arguments_object_;
+ bool undefined_receiver_;
+ ZoneList<HBasicBlock*> return_targets_;
};
-class HLeaveInlined: public HTemplateInstruction<0> {
+class HLeaveInlined V8_FINAL : public HTemplateInstruction<0> {
public:
- HLeaveInlined() { }
+ HLeaveInlined(HEnterInlined* entry,
+ int drop_count)
+ : entry_(entry),
+ drop_count_(drop_count) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
+ virtual int argument_delta() const V8_OVERRIDE {
+ return entry_->arguments_pushed() ? -drop_count_ : 0;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
+
+ private:
+ HEnterInlined* entry_;
+ int drop_count_;
};
-class HPushArgument: public HUnaryOperation {
+class HPushArgument V8_FINAL : public HUnaryOperation {
public:
- explicit HPushArgument(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HPushArgument, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
+ virtual int argument_delta() const V8_OVERRIDE { return 1; }
HValue* argument() { return OperandAt(0); }
DECLARE_CONCRETE_INSTRUCTION(PushArgument)
+
+ private:
+ explicit HPushArgument(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Tagged());
+ }
};
-class HThisFunction: public HTemplateInstruction<0> {
+class HThisFunction V8_FINAL : public HTemplateInstruction<0> {
public:
- HThisFunction() {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P0(HThisFunction);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HContext: public HTemplateInstruction<0> {
- public:
- HContext() {
+ HThisFunction() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Context)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HOuterContext: public HUnaryOperation {
+class HOuterContext V8_FINAL : public HUnaryOperation {
public:
- explicit HOuterContext(HValue* inner) : HUnaryOperation(inner) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HOuterContext, HValue*);
DECLARE_CONCRETE_INSTRUCTION(OuterContext);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ explicit HOuterContext(HValue* inner) : HUnaryOperation(inner) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HDeclareGlobals: public HUnaryOperation {
+class HDeclareGlobals V8_FINAL : public HUnaryOperation {
public:
- HDeclareGlobals(HValue* context,
- Handle<FixedArray> pairs,
- int flags)
- : HUnaryOperation(context),
- pairs_(pairs),
- flags_(flags) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HDeclareGlobals,
+ Handle<FixedArray>,
+ int);
HValue* context() { return OperandAt(0); }
Handle<FixedArray> pairs() const { return pairs_; }
@@ -1594,60 +2041,75 @@ class HDeclareGlobals: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
+
private:
+ HDeclareGlobals(HValue* context,
+ Handle<FixedArray> pairs,
+ int flags)
+ : HUnaryOperation(context),
+ pairs_(pairs),
+ flags_(flags) {
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
Handle<FixedArray> pairs_;
int flags_;
};
-class HGlobalObject: public HUnaryOperation {
+class HGlobalObject V8_FINAL : public HUnaryOperation {
public:
- explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(HGlobalObject);
DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HGlobalReceiver: public HUnaryOperation {
+class HGlobalReceiver V8_FINAL : public HUnaryOperation {
public:
- explicit HGlobalReceiver(HValue* global_object)
- : HUnaryOperation(global_object) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HGlobalReceiver, HValue*);
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ explicit HGlobalReceiver(HValue* global_object)
+ : HUnaryOperation(global_object) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
template <int V>
-class HCall: public HTemplateInstruction<V> {
+class HCall : public HTemplateInstruction<V> {
public:
// The argument count includes the receiver.
explicit HCall<V>(int argument_count) : argument_count_(argument_count) {
@@ -1655,35 +2117,44 @@ class HCall: public HTemplateInstruction<V> {
this->SetAllSideEffects();
}
- virtual HType CalculateInferredType() { return HType::Tagged(); }
+ virtual HType CalculateInferredType() V8_FINAL V8_OVERRIDE {
+ return HType::Tagged();
+ }
+
+ virtual int argument_count() const {
+ return argument_count_;
+ }
- virtual int argument_count() const { return argument_count_; }
+ virtual int argument_delta() const V8_OVERRIDE {
+ return -argument_count();
+ }
- virtual bool IsCall() { return true; }
+ virtual bool IsCall() V8_FINAL V8_OVERRIDE { return true; }
private:
int argument_count_;
};
-class HUnaryCall: public HCall<1> {
+class HUnaryCall : public HCall<1> {
public:
HUnaryCall(HValue* value, int argument_count)
: HCall<1>(argument_count) {
SetOperandAt(0, value);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(
+ int index) V8_FINAL V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
};
-class HBinaryCall: public HCall<2> {
+class HBinaryCall : public HCall<2> {
public:
HBinaryCall(HValue* first, HValue* second, int argument_count)
: HCall<2>(argument_count) {
@@ -1691,9 +2162,10 @@ class HBinaryCall: public HCall<2> {
SetOperandAt(1, second);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(
+ int index) V8_FINAL V8_OVERRIDE {
return Representation::Tagged();
}
@@ -1702,11 +2174,9 @@ class HBinaryCall: public HCall<2> {
};
-class HInvokeFunction: public HBinaryCall {
+class HInvokeFunction V8_FINAL : public HBinaryCall {
public:
- HInvokeFunction(HValue* context, HValue* function, int argument_count)
- : HBinaryCall(context, function, argument_count) {
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int);
HInvokeFunction(HValue* context,
HValue* function,
@@ -1714,273 +2184,280 @@ class HInvokeFunction: public HBinaryCall {
int argument_count)
: HBinaryCall(context, function, argument_count),
known_function_(known_function) {
+ formal_parameter_count_ = known_function.is_null()
+ ? 0 : known_function->shared()->formal_parameter_count();
}
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
+ static HInvokeFunction* New(Zone* zone,
+ HValue* context,
+ HValue* function,
+ Handle<JSFunction> known_function,
+ int argument_count) {
+ return new(zone) HInvokeFunction(context, function,
+ known_function, argument_count);
}
HValue* context() { return first(); }
HValue* function() { return second(); }
Handle<JSFunction> known_function() { return known_function_; }
+ int formal_parameter_count() const { return formal_parameter_count_; }
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
private:
+ HInvokeFunction(HValue* context, HValue* function, int argument_count)
+ : HBinaryCall(context, function, argument_count) {
+ }
+
Handle<JSFunction> known_function_;
+ int formal_parameter_count_;
};
-class HCallConstantFunction: public HCall<0> {
+class HCallConstantFunction V8_FINAL : public HCall<0> {
public:
- HCallConstantFunction(Handle<JSFunction> function, int argument_count)
- : HCall<0>(argument_count), function_(function) { }
+ DECLARE_INSTRUCTION_FACTORY_P2(HCallConstantFunction,
+ Handle<JSFunction>,
+ int);
Handle<JSFunction> function() const { return function_; }
+ int formal_parameter_count() const { return formal_parameter_count_; }
bool IsApplyFunction() const {
return function_->code() ==
- Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply);
+ function_->GetIsolate()->builtins()->builtin(Builtins::kFunctionApply);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction)
private:
+ HCallConstantFunction(Handle<JSFunction> function, int argument_count)
+ : HCall<0>(argument_count),
+ function_(function),
+ formal_parameter_count_(function->shared()->formal_parameter_count()) {}
+
Handle<JSFunction> function_;
+ int formal_parameter_count_;
};
-class HCallKeyed: public HBinaryCall {
+class HCallKeyed V8_FINAL : public HBinaryCall {
public:
- HCallKeyed(HValue* context, HValue* key, int argument_count)
- : HBinaryCall(context, key, argument_count) {
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallKeyed, HValue*, int);
HValue* context() { return first(); }
HValue* key() { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed)
+
+ private:
+ HCallKeyed(HValue* context, HValue* key, int argument_count)
+ : HBinaryCall(context, key, argument_count) {
+ }
};
-class HCallNamed: public HUnaryCall {
+class HCallNamed V8_FINAL : public HUnaryCall {
public:
- HCallNamed(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name) {
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNamed, Handle<String>, int);
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* context() { return value(); }
Handle<String> name() const { return name_; }
DECLARE_CONCRETE_INSTRUCTION(CallNamed)
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
+ private:
+ HCallNamed(HValue* context, Handle<String> name, int argument_count)
+ : HUnaryCall(context, argument_count), name_(name) {
}
- private:
Handle<String> name_;
};
-class HCallFunction: public HBinaryCall {
+class HCallFunction V8_FINAL : public HBinaryCall {
public:
- HCallFunction(HValue* context, HValue* function, int argument_count)
- : HBinaryCall(context, function, argument_count) {
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallFunction, HValue*, int);
HValue* context() { return first(); }
HValue* function() { return second(); }
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
DECLARE_CONCRETE_INSTRUCTION(CallFunction)
+
+ private:
+ HCallFunction(HValue* context, HValue* function, int argument_count)
+ : HBinaryCall(context, function, argument_count) {
+ }
};
-class HCallGlobal: public HUnaryCall {
+class HCallGlobal V8_FINAL : public HUnaryCall {
public:
- HCallGlobal(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name) {
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallGlobal, Handle<String>, int);
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* context() { return value(); }
Handle<String> name() const { return name_; }
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
DECLARE_CONCRETE_INSTRUCTION(CallGlobal)
private:
+ HCallGlobal(HValue* context, Handle<String> name, int argument_count)
+ : HUnaryCall(context, argument_count), name_(name) {
+ }
+
Handle<String> name_;
};
-class HCallKnownGlobal: public HCall<0> {
+class HCallKnownGlobal V8_FINAL : public HCall<0> {
public:
- HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
- : HCall<0>(argument_count), target_(target) { }
+ DECLARE_INSTRUCTION_FACTORY_P2(HCallKnownGlobal, Handle<JSFunction>, int);
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> target() const { return target_; }
+ int formal_parameter_count() const { return formal_parameter_count_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal)
private:
+ HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
+ : HCall<0>(argument_count),
+ target_(target),
+ formal_parameter_count_(target->shared()->formal_parameter_count()) { }
+
Handle<JSFunction> target_;
+ int formal_parameter_count_;
};
-class HCallNew: public HBinaryCall {
+class HCallNew V8_FINAL : public HBinaryCall {
public:
- HCallNew(HValue* context, HValue* constructor, int argument_count)
- : HBinaryCall(context, constructor, argument_count) {
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNew, HValue*, int);
HValue* context() { return first(); }
HValue* constructor() { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallNew)
+
+ private:
+ HCallNew(HValue* context, HValue* constructor, int argument_count)
+ : HBinaryCall(context, constructor, argument_count) {}
};
-class HCallRuntime: public HCall<1> {
+class HCallNewArray V8_FINAL : public HBinaryCall {
public:
- HCallRuntime(HValue* context,
- Handle<String> name,
- const Runtime::Function* c_function,
- int argument_count)
- : HCall<1>(argument_count), c_function_(c_function), name_(name) {
- SetOperandAt(0, context);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HCallNewArray,
+ HValue*,
+ int,
+ Handle<Cell>,
+ ElementsKind);
- virtual void PrintDataTo(StringStream* stream);
+ HValue* context() { return first(); }
+ HValue* constructor() { return second(); }
- HValue* context() { return OperandAt(0); }
- const Runtime::Function* function() const { return c_function_; }
- Handle<String> name() const { return name_; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
+ Handle<Cell> property_cell() const {
+ return type_cell_;
}
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime)
+ ElementsKind elements_kind() const { return elements_kind_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray)
private:
- const Runtime::Function* c_function_;
- Handle<String> name_;
+ HCallNewArray(HValue* context, HValue* constructor, int argument_count,
+ Handle<Cell> type_cell, ElementsKind elements_kind)
+ : HBinaryCall(context, constructor, argument_count),
+ elements_kind_(elements_kind),
+ type_cell_(type_cell) {}
+
+ ElementsKind elements_kind_;
+ Handle<Cell> type_cell_;
};
-class HJSArrayLength: public HTemplateInstruction<2> {
+class HCallRuntime V8_FINAL : public HCall<1> {
public:
- HJSArrayLength(HValue* value, HValue* typecheck,
- HType type = HType::Tagged()) {
- set_type(type);
- // The length of an array is stored as a tagged value in the array
- // object. It is guaranteed to be 32 bit integer, but it can be
- // represented as either a smi or heap number.
- SetOperandAt(0, value);
- SetOperandAt(1, typecheck);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnArrayLengths);
- SetGVNFlag(kDependsOnMaps);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallRuntime,
+ Handle<String>,
+ const Runtime::Function*,
+ int);
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ HValue* context() { return OperandAt(0); }
+ const Runtime::Function* function() const { return c_function_; }
+ Handle<String> name() const { return name_; }
+ SaveFPRegsMode save_doubles() const { return save_doubles_; }
+ void set_save_doubles(SaveFPRegsMode save_doubles) {
+ save_doubles_ = save_doubles;
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
-
- HValue* value() { return OperandAt(0); }
- HValue* typecheck() { return OperandAt(1); }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength)
-
- protected:
- virtual bool DataEquals(HValue* other_raw) { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime)
private:
- virtual bool IsDeletable() const { return true; }
+ HCallRuntime(HValue* context,
+ Handle<String> name,
+ const Runtime::Function* c_function,
+ int argument_count)
+ : HCall<1>(argument_count), c_function_(c_function), name_(name),
+ save_doubles_(kDontSaveFPRegs) {
+ SetOperandAt(0, context);
+ }
+
+ const Runtime::Function* c_function_;
+ Handle<String> name_;
+ SaveFPRegsMode save_doubles_;
};
-class HFixedArrayBaseLength: public HUnaryOperation {
+class HMapEnumLength V8_FINAL : public HUnaryOperation {
public:
- explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) {
- set_type(HType::Smi());
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnArrayLengths);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HMapEnumLength, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength)
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HMapEnumLength: public HUnaryOperation {
- public:
- explicit HMapEnumLength(HValue* value) : HUnaryOperation(value) {
- set_type(HType::Smi());
- set_representation(Representation::Tagged());
+ explicit HMapEnumLength(HValue* value)
+ : HUnaryOperation(value, HType::Smi()) {
+ set_representation(Representation::Smi());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
}
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HElementsKind: public HUnaryOperation {
+class HElementsKind V8_FINAL : public HUnaryOperation {
public:
explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Integer32());
@@ -1988,97 +2465,46 @@ class HElementsKind: public HUnaryOperation {
SetGVNFlag(kDependsOnElementsKind);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ElementsKind)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HBitNot: public HUnaryOperation {
+class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
public:
- explicit HBitNot(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetFlag(kTruncatingToInt32);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Integer32();
- }
- virtual HType CalculateInferredType();
-
- virtual HValue* Canonicalize();
-
- DECLARE_CONCRETE_INSTRUCTION(BitNot)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HUnaryMathOperation: public HTemplateInstruction<2> {
- public:
- HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op)
- : op_(op) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- switch (op) {
- case kMathFloor:
- case kMathRound:
- case kMathCeil:
- set_representation(Representation::Integer32());
- break;
- case kMathAbs:
- set_representation(Representation::Tagged());
- SetFlag(kFlexibleRepresentation);
- SetGVNFlag(kChangesNewSpacePromotion);
- break;
- case kMathSqrt:
- case kMathPowHalf:
- case kMathLog:
- case kMathSin:
- case kMathCos:
- case kMathTan:
- set_representation(Representation::Double());
- SetGVNFlag(kChangesNewSpacePromotion);
- break;
- default:
- UNREACHABLE();
- }
- SetFlag(kUseGVN);
- }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* value,
+ BuiltinFunctionId op);
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
-
- virtual HType CalculateInferredType();
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0) {
return Representation::Tagged();
} else {
switch (op_) {
case kMathFloor:
case kMathRound:
- case kMathCeil:
case kMathSqrt:
case kMathPowHalf:
case kMathLog:
+ case kMathExp:
case kMathSin:
case kMathCos:
case kMathTan:
@@ -2092,7 +2518,10 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
}
}
- virtual HValue* Canonicalize();
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+
+ virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual Representation RepresentationFromInputs() V8_OVERRIDE;
BuiltinFunctionId op() const { return op_; }
const char* OpName() const;
@@ -2100,49 +2529,106 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HUnaryMathOperation* b = HUnaryMathOperation::cast(other);
return op_ == b->op();
}
private:
- virtual bool IsDeletable() const { return true; }
+ HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op)
+ : HTemplateInstruction<2>(HType::TaggedNumber()), op_(op) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, value);
+ switch (op) {
+ case kMathFloor:
+ case kMathRound:
+ set_representation(Representation::Integer32());
+ break;
+ case kMathAbs:
+ // Not setting representation here: it is None intentionally.
+ SetFlag(kFlexibleRepresentation);
+ // TODO(svenpanne) This flag is actually only needed if representation()
+ // is tagged, and not when it is an unboxed double or unboxed integer.
+ SetGVNFlag(kChangesNewSpacePromotion);
+ break;
+ case kMathLog:
+ case kMathSin:
+ case kMathCos:
+ case kMathTan:
+ set_representation(Representation::Double());
+ // These operations use the TranscendentalCache, so they may allocate.
+ SetGVNFlag(kChangesNewSpacePromotion);
+ break;
+ case kMathExp:
+ case kMathSqrt:
+ case kMathPowHalf:
+ set_representation(Representation::Double());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ SetFlag(kUseGVN);
+ SetFlag(kAllowUndefinedAsNaN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
BuiltinFunctionId op_;
};
-class HLoadElements: public HTemplateInstruction<2> {
+class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
public:
- HLoadElements(HValue* value, HValue* typecheck) {
- SetOperandAt(0, value);
- SetOperandAt(1, typecheck);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnElementsPointer);
+ DECLARE_INSTRUCTION_FACTORY_P1(HLoadRoot, Heap::RootListIndex);
+ DECLARE_INSTRUCTION_FACTORY_P2(HLoadRoot, Heap::RootListIndex, HType);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::None();
}
- HValue* value() { return OperandAt(0); }
- HValue* typecheck() { return OperandAt(1); }
+ Heap::RootListIndex index() const { return index_; }
- virtual void PrintDataTo(StringStream* stream);
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot)
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ HLoadRoot* b = HLoadRoot::cast(other);
+ return index_ == b->index_;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadElements)
+ private:
+ HLoadRoot(Heap::RootListIndex index, HType type = HType::Tagged())
+ : HTemplateInstruction<0>(type), index_(index) {
+ SetFlag(kUseGVN);
+ // TODO(bmeurer): We'll need kDependsOnRoots once we add the
+ // corresponding HStoreRoot instruction.
+ SetGVNFlag(kDependsOnCalls);
+ }
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
- private:
- virtual bool IsDeletable() const { return true; }
+ const Heap::RootListIndex index_;
};
-class HLoadExternalArrayPointer: public HUnaryOperation {
+class HLoadExternalArrayPointer V8_FINAL : public HUnaryOperation {
public:
+ DECLARE_INSTRUCTION_FACTORY_P1(HLoadExternalArrayPointer, HValue*);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Tagged();
+ }
+
+ virtual HType CalculateInferredType() V8_OVERRIDE {
+ return HType::None();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer)
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
explicit HLoadExternalArrayPointer(HValue* value)
: HUnaryOperation(value) {
set_representation(Representation::External());
@@ -2153,152 +2639,161 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HCheckMaps: public HTemplateInstruction<2> {
+class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
public:
- HCheckMaps(HValue* value, Handle<Map> map, Zone* zone,
- HValue* typecheck = NULL) {
- SetOperandAt(0, value);
- // If callers don't depend on a typecheck, they can pass in NULL. In that
- // case we use a copy of the |value| argument as a dummy value.
- SetOperandAt(1, typecheck != NULL ? typecheck : value);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
- map_set()->Add(map, zone);
- }
- HCheckMaps(HValue* value, SmallMapList* maps, Zone* zone) {
- SetOperandAt(0, value);
- SetOperandAt(1, value);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
+ static HCheckMaps* New(Zone* zone, HValue* context, HValue* value,
+ Handle<Map> map, CompilationInfo* info,
+ HValue *typecheck = NULL);
+ static HCheckMaps* New(Zone* zone, HValue* context,
+ HValue* value, SmallMapList* maps,
+ HValue *typecheck = NULL) {
+ HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
for (int i = 0; i < maps->length(); i++) {
- map_set()->Add(maps->at(i), zone);
+ check_map->Add(maps->at(i), zone);
}
- map_set()->Sort();
- }
-
- static HCheckMaps* NewWithTransitions(HValue* object, Handle<Map> map,
- Zone* zone) {
- HCheckMaps* check_map = new(zone) HCheckMaps(object, map, zone);
- SmallMapList* map_set = check_map->map_set();
-
- // Since transitioned elements maps of the initial map don't fail the map
- // check, the CheckMaps instruction doesn't need to depend on ElementsKinds.
- check_map->ClearGVNFlag(kDependsOnElementsKind);
-
- ElementsKind kind = map->elements_kind();
- bool packed = IsFastPackedElementsKind(kind);
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- Map* transitioned_map =
- map->LookupElementsTransitionMap(kind);
- if (transitioned_map) {
- map_set->Add(Handle<Map>(transitioned_map), zone);
- }
- };
- map_set->Sort();
return check_map;
}
- virtual Representation RequiredInputRepresentation(int index) {
+ bool CanOmitMapChecks() { return omit_; }
+
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
- virtual HType CalculateInferredType();
+ virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
- SmallMapList* map_set() { return &map_set_; }
+
+ Unique<Map> first_map() const { return map_set_.at(0); }
+ UniqueSet<Map> map_set() const { return map_set_; }
+
+ bool has_migration_target() const {
+ return has_migration_target_;
+ }
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected:
- virtual bool DataEquals(HValue* other) {
- HCheckMaps* b = HCheckMaps::cast(other);
- // Relies on the fact that map_set has been sorted before.
- if (map_set()->length() != b->map_set()->length()) return false;
- for (int i = 0; i < map_set()->length(); i++) {
- if (!map_set()->at(i).is_identical_to(b->map_set()->at(i))) return false;
- }
- return true;
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ return this->map_set_.Equals(&HCheckMaps::cast(other)->map_set_);
}
+ virtual int RedefinedOperandIndex() { return 0; }
+
private:
- SmallMapList map_set_;
+ void Add(Handle<Map> map, Zone* zone) {
+ map_set_.Add(Unique<Map>(map), zone);
+ if (!has_migration_target_ && map->is_migration_target()) {
+ has_migration_target_ = true;
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+ }
+
+ // Clients should use one of the static New* methods above.
+ HCheckMaps(HValue* value, Zone *zone, HValue* typecheck)
+ : HTemplateInstruction<2>(value->type()),
+ omit_(false), has_migration_target_(false) {
+ SetOperandAt(0, value);
+ // Use the object value for the dependency if NULL is passed.
+ SetOperandAt(1, typecheck != NULL ? typecheck : value);
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetFlag(kTrackSideEffectDominators);
+ SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnElementsKind);
+ }
+
+ bool omit_;
+ bool has_migration_target_;
+ UniqueSet<Map> map_set_;
};
-class HCheckFunction: public HUnaryOperation {
+class HCheckValue V8_FINAL : public HUnaryOperation {
public:
- HCheckFunction(HValue* value, Handle<JSFunction> function)
- : HUnaryOperation(value), target_(function) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
+ static HCheckValue* New(Zone* zone, HValue* context,
+ HValue* value, Handle<JSFunction> func) {
+ bool in_new_space = zone->isolate()->heap()->InNewSpace(*func);
+ // NOTE: We create an uninitialized Unique and initialize it later.
+ // This is because a JSFunction can move due to GC during graph creation.
+ // TODO(titzer): This is a migration crutch. Replace with some kind of
+ // Uniqueness scope later.
+ Unique<JSFunction> target = Unique<JSFunction>::CreateUninitialized(func);
+ HCheckValue* check = new(zone) HCheckValue(value, target, in_new_space);
+ return check;
+ }
+ static HCheckValue* New(Zone* zone, HValue* context,
+ HValue* value, Unique<HeapObject> target,
+ bool object_in_new_space) {
+ return new(zone) HCheckValue(value, target, object_in_new_space);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
+ object_ = Unique<HeapObject>(object_.handle());
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
- virtual HType CalculateInferredType();
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ virtual HValue* Canonicalize() V8_OVERRIDE;
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
#endif
- Handle<JSFunction> target() const { return target_; }
+ Unique<HeapObject> object() const { return object_; }
+ bool object_in_new_space() const { return object_in_new_space_; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue)
protected:
- virtual bool DataEquals(HValue* other) {
- HCheckFunction* b = HCheckFunction::cast(other);
- return target_.is_identical_to(b->target());
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ HCheckValue* b = HCheckValue::cast(other);
+ return object_ == b->object_;
}
private:
- Handle<JSFunction> target_;
+ HCheckValue(HValue* value, Unique<HeapObject> object,
+ bool object_in_new_space)
+ : HUnaryOperation(value, value->type()),
+ object_(object),
+ object_in_new_space_(object_in_new_space) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ Unique<HeapObject> object_;
+ bool object_in_new_space_;
};
-class HCheckInstanceType: public HUnaryOperation {
+class HCheckInstanceType V8_FINAL : public HUnaryOperation {
public:
- static HCheckInstanceType* NewIsSpecObject(HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_SPEC_OBJECT);
- }
- static HCheckInstanceType* NewIsJSArray(HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_JS_ARRAY);
- }
- static HCheckInstanceType* NewIsString(HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_STRING);
- }
- static HCheckInstanceType* NewIsSymbol(HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_SYMBOL);
- }
+ enum Check {
+ IS_SPEC_OBJECT,
+ IS_JS_ARRAY,
+ IS_STRING,
+ IS_INTERNALIZED_STRING,
+ LAST_INTERVAL_CHECK = IS_JS_ARRAY
+ };
- virtual void PrintDataTo(StringStream* stream);
+ DECLARE_INSTRUCTION_FACTORY_P2(HCheckInstanceType, HValue*, Check);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
void GetCheckInterval(InstanceType* first, InstanceType* last);
@@ -2310,20 +2805,14 @@ class HCheckInstanceType: public HUnaryOperation {
// TODO(ager): It could be nice to allow the ommision of instance
// type checks if we have already performed an instance type check
// with a larger range.
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HCheckInstanceType* b = HCheckInstanceType::cast(other);
return check_ == b->check_;
}
- private:
- enum Check {
- IS_SPEC_OBJECT,
- IS_JS_ARRAY,
- IS_STRING,
- IS_SYMBOL,
- LAST_INTERVAL_CHECK = IS_JS_ARRAY
- };
+ virtual int RedefinedOperandIndex() { return 0; }
+ private:
const char* GetCheckName();
HCheckInstanceType(HValue* value, Check check)
@@ -2336,146 +2825,349 @@ class HCheckInstanceType: public HUnaryOperation {
};
-class HCheckNonSmi: public HUnaryOperation {
+class HCheckSmi V8_FINAL : public HUnaryOperation {
public:
- explicit HCheckNonSmi(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HCheckSmi, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual HType CalculateInferredType();
-
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
- virtual HValue* Canonicalize() {
+ virtual HValue* Canonicalize() V8_OVERRIDE {
HType value_type = value()->type();
- if (!value_type.IsUninitialized() &&
- (value_type.IsHeapNumber() ||
- value_type.IsString() ||
- value_type.IsBoolean() ||
- value_type.IsNonPrimitive())) {
+ if (value_type.IsSmi()) {
return NULL;
}
return this;
}
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi)
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ explicit HCheckSmi(HValue* value) : HUnaryOperation(value, HType::Smi()) {
+ set_representation(Representation::Smi());
+ SetFlag(kUseGVN);
+ }
};
-class HCheckPrototypeMaps: public HTemplateInstruction<0> {
+class HCheckHeapObject V8_FINAL : public HUnaryOperation {
public:
- HCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder)
- : prototype_(prototype), holder_(holder) {
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
+ DECLARE_INSTRUCTION_FACTORY_P1(HCheckHeapObject, HValue*);
+
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Tagged();
}
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
#endif
- Handle<JSObject> prototype() const { return prototype_; }
- Handle<JSObject> holder() const { return holder_; }
+ virtual HValue* Canonicalize() V8_OVERRIDE {
+ return value()->type().IsHeapObject() ? NULL : this;
+ }
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps)
+ DECLARE_CONCRETE_INSTRUCTION(CheckHeapObject)
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ explicit HCheckHeapObject(HValue* value)
+ : HUnaryOperation(value, HType::NonPrimitive()) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
}
+};
- virtual void PrintDataTo(StringStream* stream);
- virtual intptr_t Hashcode() {
- ASSERT_ALLOCATION_DISABLED;
- intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
- hash = 17 * hash + reinterpret_cast<intptr_t>(*holder());
- return hash;
- }
+class InductionVariableData;
- protected:
- virtual bool DataEquals(HValue* other) {
- HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other);
- return prototype_.is_identical_to(b->prototype()) &&
- holder_.is_identical_to(b->holder());
- }
- private:
- Handle<JSObject> prototype_;
- Handle<JSObject> holder_;
+struct InductionVariableLimitUpdate {
+ InductionVariableData* updated_variable;
+ HValue* limit;
+ bool limit_is_upper;
+ bool limit_is_included;
+
+ InductionVariableLimitUpdate()
+ : updated_variable(NULL), limit(NULL),
+ limit_is_upper(false), limit_is_included(false) {}
};
-class HCheckSmi: public HUnaryOperation {
+class HBoundsCheck;
+class HPhi;
+class HConstant;
+class HBitwise;
+
+
+class InductionVariableData V8_FINAL : public ZoneObject {
public:
- explicit HCheckSmi(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
+ class InductionVariableCheck : public ZoneObject {
+ public:
+ HBoundsCheck* check() { return check_; }
+ InductionVariableCheck* next() { return next_; }
+ bool HasUpperLimit() { return upper_limit_ >= 0; }
+ int32_t upper_limit() {
+ ASSERT(HasUpperLimit());
+ return upper_limit_;
+ }
+ void set_upper_limit(int32_t upper_limit) {
+ upper_limit_ = upper_limit;
+ }
+
+ bool processed() { return processed_; }
+ void set_processed() { processed_ = true; }
+
+ InductionVariableCheck(HBoundsCheck* check,
+ InductionVariableCheck* next,
+ int32_t upper_limit = kNoLimit)
+ : check_(check), next_(next), upper_limit_(upper_limit),
+ processed_(false) {}
+
+ private:
+ HBoundsCheck* check_;
+ InductionVariableCheck* next_;
+ int32_t upper_limit_;
+ bool processed_;
+ };
+
+ class ChecksRelatedToLength : public ZoneObject {
+ public:
+ HValue* length() { return length_; }
+ ChecksRelatedToLength* next() { return next_; }
+ InductionVariableCheck* checks() { return checks_; }
+
+ void AddCheck(HBoundsCheck* check, int32_t upper_limit = kNoLimit);
+ void CloseCurrentBlock();
+
+ ChecksRelatedToLength(HValue* length, ChecksRelatedToLength* next)
+ : length_(length), next_(next), checks_(NULL),
+ first_check_in_block_(NULL),
+ added_index_(NULL),
+ added_constant_(NULL),
+ current_and_mask_in_block_(0),
+ current_or_mask_in_block_(0) {}
+
+ private:
+ void UseNewIndexInCurrentBlock(Token::Value token,
+ int32_t mask,
+ HValue* index_base,
+ HValue* context);
+
+ HBoundsCheck* first_check_in_block() { return first_check_in_block_; }
+ HBitwise* added_index() { return added_index_; }
+ void set_added_index(HBitwise* index) { added_index_ = index; }
+ HConstant* added_constant() { return added_constant_; }
+ void set_added_constant(HConstant* constant) { added_constant_ = constant; }
+ int32_t current_and_mask_in_block() { return current_and_mask_in_block_; }
+ int32_t current_or_mask_in_block() { return current_or_mask_in_block_; }
+ int32_t current_upper_limit() { return current_upper_limit_; }
+
+ HValue* length_;
+ ChecksRelatedToLength* next_;
+ InductionVariableCheck* checks_;
+
+ HBoundsCheck* first_check_in_block_;
+ HBitwise* added_index_;
+ HConstant* added_constant_;
+ int32_t current_and_mask_in_block_;
+ int32_t current_or_mask_in_block_;
+ int32_t current_upper_limit_;
+ };
+
+ struct LimitFromPredecessorBlock {
+ InductionVariableData* variable;
+ Token::Value token;
+ HValue* limit;
+ HBasicBlock* other_target;
+
+ bool LimitIsValid() { return token != Token::ILLEGAL; }
+
+ bool LimitIsIncluded() {
+ return Token::IsEqualityOp(token) ||
+ token == Token::GTE || token == Token::LTE;
+ }
+ bool LimitIsUpper() {
+ return token == Token::LTE || token == Token::LT || token == Token::NE;
+ }
+
+ LimitFromPredecessorBlock()
+ : variable(NULL),
+ token(Token::ILLEGAL),
+ limit(NULL),
+ other_target(NULL) {}
+ };
+
+ static const int32_t kNoLimit = -1;
+
+ static InductionVariableData* ExaminePhi(HPhi* phi);
+ static void ComputeLimitFromPredecessorBlock(
+ HBasicBlock* block,
+ LimitFromPredecessorBlock* result);
+ static bool ComputeInductionVariableLimit(
+ HBasicBlock* block,
+ InductionVariableLimitUpdate* additional_limit);
+
+ struct BitwiseDecompositionResult {
+ HValue* base;
+ int32_t and_mask;
+ int32_t or_mask;
+ HValue* context;
+
+ BitwiseDecompositionResult()
+ : base(NULL), and_mask(0), or_mask(0), context(NULL) {}
+ };
+ static void DecomposeBitwise(HValue* value,
+ BitwiseDecompositionResult* result);
+
+ void AddCheck(HBoundsCheck* check, int32_t upper_limit = kNoLimit);
+
+ bool CheckIfBranchIsLoopGuard(Token::Value token,
+ HBasicBlock* current_branch,
+ HBasicBlock* other_branch);
+
+ void UpdateAdditionalLimit(InductionVariableLimitUpdate* update);
+
+ HPhi* phi() { return phi_; }
+ HValue* base() { return base_; }
+ int32_t increment() { return increment_; }
+ HValue* limit() { return limit_; }
+ bool limit_included() { return limit_included_; }
+ HBasicBlock* limit_validity() { return limit_validity_; }
+ HBasicBlock* induction_exit_block() { return induction_exit_block_; }
+ HBasicBlock* induction_exit_target() { return induction_exit_target_; }
+ ChecksRelatedToLength* checks() { return checks_; }
+ HValue* additional_upper_limit() { return additional_upper_limit_; }
+ bool additional_upper_limit_is_included() {
+ return additional_upper_limit_is_included_;
+ }
+ HValue* additional_lower_limit() { return additional_lower_limit_; }
+ bool additional_lower_limit_is_included() {
+ return additional_lower_limit_is_included_;
+ }
+
+ bool LowerLimitIsNonNegativeConstant() {
+ if (base()->IsInteger32Constant() && base()->GetInteger32Constant() >= 0) {
+ return true;
+ }
+ if (additional_lower_limit() != NULL &&
+ additional_lower_limit()->IsInteger32Constant() &&
+ additional_lower_limit()->GetInteger32Constant() >= 0) {
+ // Ignoring the corner case of !additional_lower_limit_is_included()
+ // is safe, handling it adds unneeded complexity.
+ return true;
+ }
+ return false;
}
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
+ int32_t ComputeUpperLimit(int32_t and_mask, int32_t or_mask);
+
+ private:
+ template <class T> void swap(T* a, T* b) {
+ T c(*a);
+ *a = *b;
+ *b = c;
}
- virtual HType CalculateInferredType();
-#ifdef DEBUG
- virtual void Verify();
-#endif
+ InductionVariableData(HPhi* phi, HValue* base, int32_t increment)
+ : phi_(phi), base_(IgnoreOsrValue(base)), increment_(increment),
+ limit_(NULL), limit_included_(false), limit_validity_(NULL),
+ induction_exit_block_(NULL), induction_exit_target_(NULL),
+ checks_(NULL),
+ additional_upper_limit_(NULL),
+ additional_upper_limit_is_included_(false),
+ additional_lower_limit_(NULL),
+ additional_lower_limit_is_included_(false) {}
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
+ static int32_t ComputeIncrement(HPhi* phi, HValue* phi_operand);
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ static HValue* IgnoreOsrValue(HValue* v);
+ static InductionVariableData* GetInductionVariableData(HValue* v);
+
+ HPhi* phi_;
+ HValue* base_;
+ int32_t increment_;
+ HValue* limit_;
+ bool limit_included_;
+ HBasicBlock* limit_validity_;
+ HBasicBlock* induction_exit_block_;
+ HBasicBlock* induction_exit_target_;
+ ChecksRelatedToLength* checks_;
+ HValue* additional_upper_limit_;
+ bool additional_upper_limit_is_included_;
+ HValue* additional_lower_limit_;
+ bool additional_lower_limit_is_included_;
};
-class HPhi: public HValue {
+class HPhi V8_FINAL : public HValue {
public:
HPhi(int merged_index, Zone* zone)
: inputs_(2, zone),
merged_index_(merged_index),
phi_id_(-1),
- is_live_(false),
- is_convertible_to_integer_(true) {
+ induction_variable_data_(NULL) {
for (int i = 0; i < Representation::kNumRepresentations; i++) {
non_phi_uses_[i] = 0;
indirect_uses_[i] = 0;
}
- ASSERT(merged_index >= 0);
- set_representation(Representation::Tagged());
+ ASSERT(merged_index >= 0 || merged_index == kInvalidMergedIndex);
SetFlag(kFlexibleRepresentation);
+ SetFlag(kAllowUndefinedAsNaN);
}
- virtual Representation InferredRepresentation();
+ virtual Representation RepresentationFromInputs() V8_OVERRIDE;
- virtual Range* InferRange(Zone* zone);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation();
}
- virtual HType CalculateInferredType();
- virtual int OperandCount() { return inputs_.length(); }
- virtual HValue* OperandAt(int index) const { return inputs_[index]; }
+ virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
+ return representation();
+ }
+ virtual HType CalculateInferredType() V8_OVERRIDE;
+ virtual int OperandCount() V8_OVERRIDE { return inputs_.length(); }
+ virtual HValue* OperandAt(int index) const V8_OVERRIDE {
+ return inputs_[index];
+ }
HValue* GetRedundantReplacement();
void AddInput(HValue* value);
bool HasRealUses();
- bool IsReceiver() { return merged_index_ == 0; }
+ bool IsReceiver() const { return merged_index_ == 0; }
+ bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
+
+ virtual int position() const V8_OVERRIDE;
int merged_index() const { return merged_index_; }
- virtual void PrintTo(StringStream* stream);
+ InductionVariableData* induction_variable_data() {
+ return induction_variable_data_;
+ }
+ bool IsInductionVariable() {
+ return induction_variable_data_ != NULL;
+ }
+ bool IsLimitedInductionVariable() {
+ return IsInductionVariable() &&
+ induction_variable_data_->limit() != NULL;
+ }
+ void DetectInductionVariable() {
+ ASSERT(induction_variable_data_ == NULL);
+ induction_variable_data_ = InductionVariableData::ExaminePhi(this);
+ }
+
+ virtual void PrintTo(StringStream* stream) V8_OVERRIDE;
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
#endif
void InitRealUses(int id);
@@ -2485,6 +3177,9 @@ class HPhi: public HValue {
int tagged_non_phi_uses() const {
return non_phi_uses_[Representation::kTagged];
}
+ int smi_non_phi_uses() const {
+ return non_phi_uses_[Representation::kSmi];
+ }
int int32_non_phi_uses() const {
return non_phi_uses_[Representation::kInteger32];
}
@@ -2494,6 +3189,9 @@ class HPhi: public HValue {
int tagged_indirect_uses() const {
return indirect_uses_[Representation::kTagged];
}
+ int smi_indirect_uses() const {
+ return indirect_uses_[Representation::kSmi];
+ }
int int32_indirect_uses() const {
return indirect_uses_[Representation::kInteger32];
}
@@ -2501,37 +3199,21 @@ class HPhi: public HValue {
return indirect_uses_[Representation::kDouble];
}
int phi_id() { return phi_id_; }
- bool is_live() { return is_live_; }
- void set_is_live(bool b) { is_live_ = b; }
static HPhi* cast(HValue* value) {
ASSERT(value->IsPhi());
return reinterpret_cast<HPhi*>(value);
}
- virtual Opcode opcode() const { return HValue::kPhi; }
-
- virtual bool IsConvertibleToInteger() const {
- return is_convertible_to_integer_;
- }
+ virtual Opcode opcode() const V8_OVERRIDE { return HValue::kPhi; }
- void set_is_convertible_to_integer(bool b) {
- is_convertible_to_integer_ = b;
- }
+ void SimplifyConstantInputs();
- bool AllOperandsConvertibleToInteger() {
- for (int i = 0; i < OperandCount(); ++i) {
- if (!OperandAt(i)->IsConvertibleToInteger()) {
- return false;
- }
- }
- return true;
- }
-
- void ResetInteger32Uses();
+ // Marker value representing an invalid merge index.
+ static const int kInvalidMergedIndex = -1;
protected:
- virtual void DeleteFromGraph();
- virtual void InternalSetOperandAt(int index, HValue* value) {
+ virtual void DeleteFromGraph() V8_OVERRIDE;
+ virtual void InternalSetOperandAt(int index, HValue* value) V8_OVERRIDE {
inputs_[index] = value;
}
@@ -2542,97 +3224,244 @@ class HPhi: public HValue {
int non_phi_uses_[Representation::kNumRepresentations];
int indirect_uses_[Representation::kNumRepresentations];
int phi_id_;
- bool is_live_;
- bool is_convertible_to_integer_;
+ InductionVariableData* induction_variable_data_;
+
+ // TODO(titzer): we can't eliminate the receiver for generating backtraces
+ virtual bool IsDeletable() const V8_OVERRIDE { return !IsReceiver(); }
};
-class HArgumentsObject: public HTemplateInstruction<0> {
+// Common base class for HArgumentsObject and HCapturedObject.
+class HDematerializedObject : public HInstruction {
public:
- HArgumentsObject() {
- set_representation(Representation::Tagged());
- SetFlag(kIsArguments);
+ HDematerializedObject(int count, Zone* zone) : values_(count, zone) {}
+
+ virtual int OperandCount() V8_FINAL V8_OVERRIDE { return values_.length(); }
+ virtual HValue* OperandAt(int index) const V8_FINAL V8_OVERRIDE {
+ return values_[index];
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_FINAL V8_OVERRIDE {
+ return false;
+ }
+ virtual Representation RequiredInputRepresentation(
+ int index) V8_FINAL V8_OVERRIDE {
return Representation::None();
}
+ protected:
+ virtual void InternalSetOperandAt(int index,
+ HValue* value) V8_FINAL V8_OVERRIDE {
+ values_[index] = value;
+ }
+
+ // List of values tracked by this marker.
+ ZoneList<HValue*> values_;
+
+ private:
+ virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return true; }
+};
+
+
+class HArgumentsObject V8_FINAL : public HDematerializedObject {
+ public:
+ static HArgumentsObject* New(Zone* zone, HValue* context, int count) {
+ return new(zone) HArgumentsObject(count, zone);
+ }
+
+ // The values contain a list of all elements in the arguments object
+ // including the receiver object, which is skipped when materializing.
+ const ZoneList<HValue*>* arguments_values() const { return &values_; }
+ int arguments_count() const { return values_.length(); }
+
+ void AddArgument(HValue* argument, Zone* zone) {
+ values_.Add(NULL, zone); // Resize list.
+ SetOperandAt(values_.length() - 1, argument);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject)
private:
- virtual bool IsDeletable() const { return true; }
+ HArgumentsObject(int count, Zone* zone)
+ : HDematerializedObject(count, zone) {
+ set_representation(Representation::Tagged());
+ SetFlag(kIsArguments);
+ }
};
-class HConstant: public HTemplateInstruction<0> {
+class HCapturedObject V8_FINAL : public HDematerializedObject {
public:
- HConstant(Handle<Object> handle, Representation r);
- HConstant(int32_t value, Representation r);
- HConstant(double value, Representation r);
+ HCapturedObject(int length, int id, Zone* zone)
+ : HDematerializedObject(length, zone), capture_id_(id) {
+ set_representation(Representation::Tagged());
+ values_.AddBlock(NULL, length, zone); // Resize list.
+ }
+
+ // The values contain a list of all in-object properties inside the
+ // captured object and is index by field index. Properties in the
+ // properties or elements backing store are not tracked here.
+ const ZoneList<HValue*>* values() const { return &values_; }
+ int length() const { return values_.length(); }
+ int capture_id() const { return capture_id_; }
+
+ // Shortcut for the map value of this captured object.
+ HValue* map_value() const { return values()->first(); }
+
+ void ReuseSideEffectsFromStore(HInstruction* store) {
+ ASSERT(store->HasObservableSideEffects());
+ ASSERT(store->IsStoreNamedField());
+ gvn_flags_.Add(store->gvn_flags());
+ }
+
+ // Replay effects of this instruction on the given environment.
+ void ReplayEnvironment(HEnvironment* env);
- Handle<Object> handle() {
- if (handle_.is_null()) {
- handle_ = FACTORY->NewNumber(double_value_, TENURED);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(CapturedObject)
+
+ private:
+ int capture_id_;
+};
+
+
+class HConstant V8_FINAL : public HTemplateInstruction<0> {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P1(HConstant, int32_t);
+ DECLARE_INSTRUCTION_FACTORY_P2(HConstant, int32_t, Representation);
+ DECLARE_INSTRUCTION_FACTORY_P1(HConstant, double);
+ DECLARE_INSTRUCTION_FACTORY_P1(HConstant, Handle<Object>);
+ DECLARE_INSTRUCTION_FACTORY_P1(HConstant, ExternalReference);
+
+ static HConstant* CreateAndInsertAfter(Zone* zone,
+ HValue* context,
+ int32_t value,
+ Representation representation,
+ HInstruction* instruction) {
+ HConstant* new_constant =
+ HConstant::New(zone, context, value, representation);
+ new_constant->InsertAfter(instruction);
+ return new_constant;
+ }
+
+ static HConstant* CreateAndInsertBefore(Zone* zone,
+ HValue* context,
+ int32_t value,
+ Representation representation,
+ HInstruction* instruction) {
+ HConstant* new_constant =
+ HConstant::New(zone, context, value, representation);
+ new_constant->InsertBefore(instruction);
+ return new_constant;
+ }
+
+ static HConstant* CreateAndInsertBefore(Zone* zone,
+ Unique<Object> unique,
+ bool is_not_in_new_space,
+ HInstruction* instruction) {
+ HConstant* new_constant = new(zone) HConstant(unique,
+ Representation::Tagged(), HType::Tagged(), false, is_not_in_new_space,
+ false, false);
+ new_constant->InsertBefore(instruction);
+ return new_constant;
+ }
+
+ Handle<Object> handle(Isolate* isolate) {
+ if (object_.handle().is_null()) {
+ // Default arguments to is_not_in_new_space depend on this heap number
+ // to be tenured so that it's guaranteed not to be located in new space.
+ object_ = Unique<Object>::CreateUninitialized(
+ isolate->factory()->NewNumber(double_value_, TENURED));
}
- ASSERT(has_int32_value_ || !handle_->IsSmi());
- return handle_;
+ AllowDeferredHandleDereference smi_check;
+ ASSERT(has_int32_value_ || !object_.handle()->IsSmi());
+ return object_.handle();
}
- bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
+ bool HasMap(Handle<Map> map) {
+ Handle<Object> constant_object = handle(map->GetIsolate());
+ return constant_object->IsHeapObject() &&
+ Handle<HeapObject>::cast(constant_object)->map() == *map;
+ }
+
+ bool IsSpecialDouble() const {
+ return has_double_value_ &&
+ (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
+ FixedDoubleArray::is_the_hole_nan(double_value_) ||
+ std::isnan(double_value_));
+ }
+
+ bool NotInNewSpace() const {
+ return is_not_in_new_space_;
+ }
bool ImmortalImmovable() const {
if (has_int32_value_) {
return false;
}
if (has_double_value_) {
- if (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
- isnan(double_value_)) {
+ if (IsSpecialDouble()) {
return true;
}
return false;
}
+ if (has_external_reference_value_) {
+ return false;
+ }
- ASSERT(!handle_.is_null());
- Heap* heap = HEAP;
- // We should have handled minus_zero_value and nan_value in the
- // has_double_value_ clause above.
- ASSERT(*handle_ != heap->minus_zero_value());
- ASSERT(*handle_ != heap->nan_value());
- if (*handle_ == heap->undefined_value()) return true;
- if (*handle_ == heap->null_value()) return true;
- if (*handle_ == heap->true_value()) return true;
- if (*handle_ == heap->false_value()) return true;
- if (*handle_ == heap->the_hole_value()) return true;
- if (*handle_ == heap->empty_string()) return true;
- return false;
+ ASSERT(!object_.handle().is_null());
+ Heap* heap = isolate()->heap();
+ ASSERT(!object_.IsKnownGlobal(heap->minus_zero_value()));
+ ASSERT(!object_.IsKnownGlobal(heap->nan_value()));
+ return
+ object_.IsKnownGlobal(heap->undefined_value()) ||
+ object_.IsKnownGlobal(heap->null_value()) ||
+ object_.IsKnownGlobal(heap->true_value()) ||
+ object_.IsKnownGlobal(heap->false_value()) ||
+ object_.IsKnownGlobal(heap->the_hole_value()) ||
+ object_.IsKnownGlobal(heap->empty_string()) ||
+ object_.IsKnownGlobal(heap->empty_fixed_array());
}
- virtual Representation RequiredInputRepresentation(int index) {
+ bool IsCell() const {
+ return is_cell_;
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual bool IsConvertibleToInteger() const {
- return has_int32_value_;
+ virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
+ if (HasSmiValue() && SmiValuesAre31Bits()) return Representation::Smi();
+ if (HasInteger32Value()) return Representation::Integer32();
+ if (HasNumberValue()) return Representation::Double();
+ if (HasExternalReferenceValue()) return Representation::External();
+ return Representation::Tagged();
}
- virtual bool EmitAtUses() { return !representation().IsDouble(); }
- virtual HValue* Canonicalize();
- virtual void PrintDataTo(StringStream* stream);
- virtual HType CalculateInferredType();
- bool IsInteger() { return handle()->IsSmi(); }
+ virtual bool EmitAtUses() V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
- HConstant* CopyToTruncatedInt32(Zone* zone) const;
+ Maybe<HConstant*> CopyToTruncatedInt32(Zone* zone);
+ Maybe<HConstant*> CopyToTruncatedNumber(Zone* zone);
bool HasInteger32Value() const { return has_int32_value_; }
int32_t Integer32Value() const {
ASSERT(HasInteger32Value());
return int32_value_;
}
+ bool HasSmiValue() const { return has_smi_value_; }
bool HasDoubleValue() const { return has_double_value_; }
double DoubleValue() const {
ASSERT(HasDoubleValue());
return double_value_;
}
+ bool IsTheHole() const {
+ if (HasDoubleValue() && FixedDoubleArray::is_the_hole_nan(double_value_)) {
+ return true;
+ }
+ return object_.IsKnownGlobal(isolate()->heap()->the_hole_value());
+ }
bool HasNumberValue() const { return has_double_value_; }
int32_t NumberValueAsInteger32() const {
ASSERT(HasNumberValue());
@@ -2641,39 +3470,63 @@ class HConstant: public HTemplateInstruction<0> {
// representation of the number in int32_value_.
return int32_value_;
}
+ bool HasStringValue() const {
+ if (has_double_value_ || has_int32_value_) return false;
+ ASSERT(!object_.handle().is_null());
+ return type_.IsString();
+ }
+ Handle<String> StringValue() const {
+ ASSERT(HasStringValue());
+ return Handle<String>::cast(object_.handle());
+ }
+ bool HasInternalizedStringValue() const {
+ return HasStringValue() && is_internalized_string_;
+ }
- bool ToBoolean();
-
- bool IsUint32() {
- return HasInteger32Value() && (Integer32Value() >= 0);
+ bool HasExternalReferenceValue() const {
+ return has_external_reference_value_;
+ }
+ ExternalReference ExternalReferenceValue() const {
+ return external_reference_value_;
}
- virtual intptr_t Hashcode() {
- ASSERT_ALLOCATION_DISABLED;
- intptr_t hash;
+ bool HasBooleanValue() const { return type_.IsBoolean(); }
+ bool BooleanValue() const { return boolean_value_; }
+ virtual intptr_t Hashcode() V8_OVERRIDE {
if (has_int32_value_) {
- hash = static_cast<intptr_t>(int32_value_);
+ return static_cast<intptr_t>(int32_value_);
} else if (has_double_value_) {
- hash = static_cast<intptr_t>(BitCast<int64_t>(double_value_));
+ return static_cast<intptr_t>(BitCast<int64_t>(double_value_));
+ } else if (has_external_reference_value_) {
+ return reinterpret_cast<intptr_t>(external_reference_value_.address());
} else {
- ASSERT(!handle_.is_null());
- hash = reinterpret_cast<intptr_t>(*handle_);
+ ASSERT(!object_.handle().is_null());
+ return object_.Hashcode();
+ }
+ }
+
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
+ if (!has_double_value_ && !has_external_reference_value_) {
+ ASSERT(!object_.handle().is_null());
+ object_ = Unique<Object>(object_.handle());
}
+ }
- return hash;
+ Unique<Object> GetUnique() const {
+ return object_;
}
#ifdef DEBUG
- virtual void Verify() { }
+ virtual void Verify() V8_OVERRIDE { }
#endif
DECLARE_CONCRETE_INSTRUCTION(Constant)
protected:
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HConstant* other_constant = HConstant::cast(other);
if (has_int32_value_) {
return other_constant->has_int32_value_ &&
@@ -2682,105 +3535,191 @@ class HConstant: public HTemplateInstruction<0> {
return other_constant->has_double_value_ &&
BitCast<int64_t>(double_value_) ==
BitCast<int64_t>(other_constant->double_value_);
+ } else if (has_external_reference_value_) {
+ return other_constant->has_external_reference_value_ &&
+ external_reference_value_ ==
+ other_constant->external_reference_value_;
} else {
- ASSERT(!handle_.is_null());
- return !other_constant->handle_.is_null() &&
- *handle_ == *other_constant->handle_;
+ if (other_constant->has_int32_value_ ||
+ other_constant->has_double_value_ ||
+ other_constant->has_external_reference_value_) {
+ return false;
+ }
+ ASSERT(!object_.handle().is_null());
+ return other_constant->object_ == object_;
}
}
private:
- virtual bool IsDeletable() const { return true; }
-
- // If this is a numerical constant, handle_ either points to to the
+ friend class HGraph;
+ HConstant(Handle<Object> handle, Representation r = Representation::None());
+ HConstant(int32_t value,
+ Representation r = Representation::None(),
+ bool is_not_in_new_space = true,
+ Unique<Object> optional = Unique<Object>(Handle<Object>::null()));
+ HConstant(double value,
+ Representation r = Representation::None(),
+ bool is_not_in_new_space = true,
+ Unique<Object> optional = Unique<Object>(Handle<Object>::null()));
+ HConstant(Unique<Object> unique,
+ Representation r,
+ HType type,
+ bool is_internalized_string,
+ bool is_not_in_new_space,
+ bool is_cell,
+ bool boolean_value);
+
+ explicit HConstant(ExternalReference reference);
+
+ void Initialize(Representation r);
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ // If this is a numerical constant, object_ either points to the
// HeapObject the constant originated from or is null. If the
- // constant is non-numeric, handle_ always points to a valid
+ // constant is non-numeric, object_ always points to a valid
// constant HeapObject.
- Handle<Object> handle_;
+ Unique<Object> object_;
// We store the HConstant in the most specific form safely possible.
// The two flags, has_int32_value_ and has_double_value_ tell us if
// int32_value_ and double_value_ hold valid, safe representations
// of the constant. has_int32_value_ implies has_double_value_ but
// not the converse.
+ bool has_smi_value_ : 1;
bool has_int32_value_ : 1;
bool has_double_value_ : 1;
+ bool has_external_reference_value_ : 1;
+ bool is_internalized_string_ : 1; // TODO(yangguo): make this part of HType.
+ bool is_not_in_new_space_ : 1;
+ bool is_cell_ : 1;
+ bool boolean_value_ : 1;
int32_t int32_value_;
double double_value_;
+ ExternalReference external_reference_value_;
};
-class HBinaryOperation: public HTemplateInstruction<3> {
+class HBinaryOperation : public HTemplateInstruction<3> {
public:
- HBinaryOperation(HValue* context, HValue* left, HValue* right) {
+ HBinaryOperation(HValue* context, HValue* left, HValue* right,
+ HType type = HType::Tagged())
+ : HTemplateInstruction<3>(type),
+ observed_output_representation_(Representation::None()) {
ASSERT(left != NULL && right != NULL);
SetOperandAt(0, context);
SetOperandAt(1, left);
SetOperandAt(2, right);
+ observed_input_representation_[0] = Representation::None();
+ observed_input_representation_[1] = Representation::None();
}
- HValue* context() { return OperandAt(0); }
- HValue* left() { return OperandAt(1); }
- HValue* right() { return OperandAt(2); }
+ HValue* context() const { return OperandAt(0); }
+ HValue* left() const { return OperandAt(1); }
+ HValue* right() const { return OperandAt(2); }
+
+ // True if switching left and right operands likely generates better code.
+ bool AreOperandsBetterSwitched() {
+ if (!IsCommutative()) return false;
+
+ // Constant operands are better off on the right, they can be inlined in
+ // many situations on most platforms.
+ if (left()->IsConstant()) return true;
+ if (right()->IsConstant()) return false;
+
+ // Otherwise, if there is only one use of the right operand, it would be
+ // better off on the left for platforms that only have 2-arg arithmetic
+ // ops (e.g ia32, x64) that clobber the left operand.
+ return right()->UseCount() == 1;
+ }
- // TODO(kasperl): Move these helpers to the IA-32 Lithium
- // instruction sequence builder.
- HValue* LeastConstantOperand() {
- if (IsCommutative() && left()->IsConstant()) return right();
- return left();
+ HValue* BetterLeftOperand() {
+ return AreOperandsBetterSwitched() ? right() : left();
}
- HValue* MostConstantOperand() {
- if (IsCommutative() && left()->IsConstant()) return left();
- return right();
+ HValue* BetterRightOperand() {
+ return AreOperandsBetterSwitched() ? left() : right();
}
+ void set_observed_input_representation(int index, Representation rep) {
+ ASSERT(index >= 1 && index <= 2);
+ observed_input_representation_[index - 1] = rep;
+ }
+
+ virtual void initialize_output_representation(Representation observed) {
+ observed_output_representation_ = observed;
+ }
+
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ if (index == 0) return Representation::Tagged();
+ return observed_input_representation_[index - 1];
+ }
+
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) V8_OVERRIDE {
+ Representation rep = !FLAG_smi_binop && new_rep.IsSmi()
+ ? Representation::Integer32() : new_rep;
+ HValue::UpdateRepresentation(rep, h_infer, reason);
+ }
+
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+ virtual Representation RepresentationFromInputs() V8_OVERRIDE;
+ Representation RepresentationFromOutput();
+ virtual void AssumeRepresentation(Representation r) V8_OVERRIDE;
+
virtual bool IsCommutative() const { return false; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ if (index == 0) return Representation::Tagged();
+ return representation();
+ }
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
+
+ private:
+ bool IgnoreObservedOutputRepresentation(Representation current_rep);
+
+ Representation observed_input_representation_[2];
+ Representation observed_output_representation_;
};
-class HWrapReceiver: public HTemplateInstruction<2> {
+class HWrapReceiver V8_FINAL : public HTemplateInstruction<2> {
public:
- HWrapReceiver(HValue* receiver, HValue* function) {
- set_representation(Representation::Tagged());
- SetOperandAt(0, receiver);
- SetOperandAt(1, function);
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(HWrapReceiver, HValue*, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
HValue* receiver() { return OperandAt(0); }
HValue* function() { return OperandAt(1); }
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
+
+ private:
+ HWrapReceiver(HValue* receiver, HValue* function) {
+ set_representation(Representation::Tagged());
+ SetOperandAt(0, receiver);
+ SetOperandAt(1, function);
+ }
};
-class HApplyArguments: public HTemplateInstruction<4> {
+class HApplyArguments V8_FINAL : public HTemplateInstruction<4> {
public:
- HApplyArguments(HValue* function,
- HValue* receiver,
- HValue* length,
- HValue* elements) {
- set_representation(Representation::Tagged());
- SetOperandAt(0, function);
- SetOperandAt(1, receiver);
- SetOperandAt(2, length);
- SetOperandAt(3, elements);
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_FACTORY_P4(HApplyArguments, HValue*, HValue*, HValue*,
+ HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// The length is untagged, all other inputs are tagged.
return (index == 2)
? Representation::Integer32()
@@ -2793,70 +3732,81 @@ class HApplyArguments: public HTemplateInstruction<4> {
HValue* elements() { return OperandAt(3); }
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments)
+
+ private:
+ HApplyArguments(HValue* function,
+ HValue* receiver,
+ HValue* length,
+ HValue* elements) {
+ set_representation(Representation::Tagged());
+ SetOperandAt(0, function);
+ SetOperandAt(1, receiver);
+ SetOperandAt(2, length);
+ SetOperandAt(3, elements);
+ SetAllSideEffects();
+ }
};
-class HArgumentsElements: public HTemplateInstruction<0> {
+class HArgumentsElements V8_FINAL : public HTemplateInstruction<0> {
public:
- explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
- // The value produced by this instruction is a pointer into the stack
- // that looks as if it was a smi because of alignment.
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsElements, bool);
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
bool from_inlined() const { return from_inlined_; }
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
+ // The value produced by this instruction is a pointer into the stack
+ // that looks as if it was a smi because of alignment.
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
bool from_inlined_;
};
-class HArgumentsLength: public HUnaryOperation {
+class HArgumentsLength V8_FINAL : public HUnaryOperation {
public:
- explicit HArgumentsLength(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsLength, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ explicit HArgumentsLength(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Integer32());
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HAccessArgumentsAt: public HTemplateInstruction<3> {
+class HAccessArgumentsAt V8_FINAL : public HTemplateInstruction<3> {
public:
- HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetOperandAt(0, arguments);
- SetOperandAt(1, length);
- SetOperandAt(2, index);
- }
+ DECLARE_INSTRUCTION_FACTORY_P3(HAccessArgumentsAt, HValue*, HValue*, HValue*);
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// The arguments elements is considered tagged.
return index == 0
? Representation::Tagged()
@@ -2869,373 +3819,487 @@ class HAccessArgumentsAt: public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt)
- virtual bool DataEquals(HValue* other) { return true; }
+ private:
+ HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetOperandAt(0, arguments);
+ SetOperandAt(1, length);
+ SetOperandAt(2, index);
+ }
+
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
};
-enum BoundsCheckKeyMode {
- DONT_ALLOW_SMI_KEY,
- ALLOW_SMI_KEY
-};
+class HBoundsCheckBaseIndexInformation;
-class HBoundsCheck: public HTemplateInstruction<2> {
+class HBoundsCheck V8_FINAL : public HTemplateInstruction<2> {
public:
- HBoundsCheck(HValue* index, HValue* length,
- BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY)
- : key_mode_(key_mode) {
- SetOperandAt(0, index);
- SetOperandAt(1, length);
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(HBoundsCheck, HValue*, HValue*);
- virtual Representation RequiredInputRepresentation(int arg_index) {
- if (key_mode_ == DONT_ALLOW_SMI_KEY ||
- !length()->representation().IsTagged()) {
- return Representation::Integer32();
- }
- // If the index is tagged and isn't constant, then allow the length
- // to be tagged, since it is usually already tagged from loading it out of
- // the length field of a JSArray. This allows for direct comparison without
- // untagging.
- if (index()->representation().IsTagged() && !index()->IsConstant()) {
- return Representation::Tagged();
- }
- // Also allow the length to be tagged if the index is constant, because
- // it can be tagged to allow direct comparison.
- if (index()->IsConstant() &&
- index()->representation().IsInteger32() &&
- arg_index == 1) {
- return Representation::Tagged();
+ bool skip_check() const { return skip_check_; }
+ void set_skip_check() { skip_check_ = true; }
+
+ HValue* base() { return base_; }
+ int offset() { return offset_; }
+ int scale() { return scale_; }
+
+ void ApplyIndexChange();
+ bool DetectCompoundIndex() {
+ ASSERT(base() == NULL);
+
+ DecompositionResult decomposition;
+ if (index()->TryDecompose(&decomposition)) {
+ base_ = decomposition.base();
+ offset_ = decomposition.offset();
+ scale_ = decomposition.scale();
+ return true;
+ } else {
+ base_ = index();
+ offset_ = 0;
+ scale_ = 0;
+ return false;
}
- return Representation::Integer32();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return representation();
+ }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
HValue* index() { return OperandAt(0); }
HValue* length() { return OperandAt(1); }
+ bool allow_equality() { return allow_equality_; }
+ void set_allow_equality(bool v) { allow_equality_ = v; }
+
+ virtual int RedefinedOperandIndex() V8_OVERRIDE { return 0; }
+ virtual bool IsPurelyInformativeDefinition() V8_OVERRIDE {
+ return skip_check();
+ }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
- BoundsCheckKeyMode key_mode_;
+ friend class HBoundsCheckBaseIndexInformation;
+
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ bool skip_check_;
+ HValue* base_;
+ int offset_;
+ int scale_;
+ bool allow_equality_;
+
+ private:
+ // Normally HBoundsCheck should be created using the
+ // HGraphBuilder::AddBoundsCheck() helper.
+ // However when building stubs, where we know that the arguments are Int32,
+ // it makes sense to invoke this constructor directly.
+ HBoundsCheck(HValue* index, HValue* length)
+ : skip_check_(false),
+ base_(NULL), offset_(0), scale_(0),
+ allow_equality_(false) {
+ SetOperandAt(0, index);
+ SetOperandAt(1, length);
+ SetFlag(kFlexibleRepresentation);
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE {
+ return skip_check() && !FLAG_debug_code;
+ }
};
-class HBitwiseBinaryOperation: public HBinaryOperation {
+class HBoundsCheckBaseIndexInformation V8_FINAL
+ : public HTemplateInstruction<2> {
public:
- HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right) {
- set_representation(Representation::Tagged());
- SetFlag(kFlexibleRepresentation);
- SetAllSideEffects();
- observed_input_representation_[0] = Representation::Tagged();
- observed_input_representation_[1] = Representation::None();
- observed_input_representation_[2] = Representation::None();
+ explicit HBoundsCheckBaseIndexInformation(HBoundsCheck* check) {
+ DecompositionResult decomposition;
+ if (check->index()->TryDecompose(&decomposition)) {
+ SetOperandAt(0, decomposition.base());
+ SetOperandAt(1, check);
+ } else {
+ UNREACHABLE();
+ }
}
- virtual Representation RequiredInputRepresentation(int index) {
- return index == 0
- ? Representation::Tagged()
- : representation();
+ HValue* base_index() { return OperandAt(0); }
+ HBoundsCheck* bounds_check() { return HBoundsCheck::cast(OperandAt(1)); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheckBaseIndexInformation)
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return representation();
}
- virtual void RepresentationChanged(Representation to) {
- if (!to.IsTagged()) {
- ASSERT(to.IsInteger32());
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ virtual int RedefinedOperandIndex() V8_OVERRIDE { return 0; }
+ virtual bool IsPurelyInformativeDefinition() V8_OVERRIDE { return true; }
+};
+
+
+class HBitwiseBinaryOperation : public HBinaryOperation {
+ public:
+ HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right,
+ HType type = HType::Tagged())
+ : HBinaryOperation(context, left, right, type) {
+ SetFlag(kFlexibleRepresentation);
+ SetFlag(kTruncatingToInt32);
+ SetFlag(kAllowUndefinedAsNaN);
+ SetAllSideEffects();
+ }
+
+ virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
+ if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
+ if (to.IsTagged() &&
+ (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
+ SetAllSideEffects();
+ ClearFlag(kUseGVN);
+ } else {
ClearAllSideEffects();
- SetFlag(kTruncatingToInt32);
SetFlag(kUseGVN);
}
}
- virtual HType CalculateInferredType();
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) V8_OVERRIDE {
+ // We only generate either int32 or generic tagged bitwise operations.
+ if (new_rep.IsDouble()) new_rep = Representation::Integer32();
+ HBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
- virtual Representation ObservedInputRepresentation(int index) {
- return observed_input_representation_[index];
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ Representation r = HBinaryOperation::observed_input_representation(index);
+ if (r.IsDouble()) return Representation::Integer32();
+ return r;
}
- void InitializeObservedInputRepresentation(Representation r) {
- observed_input_representation_[1] = r;
- observed_input_representation_[2] = r;
+ virtual void initialize_output_representation(Representation observed) {
+ if (observed.IsDouble()) observed = Representation::Integer32();
+ HBinaryOperation::initialize_output_representation(observed);
}
DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
private:
- virtual bool IsDeletable() const { return true; }
-
- Representation observed_input_representation_[3];
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HMathFloorOfDiv: public HBinaryOperation {
+class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
public:
- HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetFlag(kCanOverflow);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HMathFloorOfDiv,
+ HValue*,
+ HValue*);
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Integer32();
- }
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
+ : HBinaryOperation(context, left, right) {
+ set_representation(Representation::Integer32());
+ SetFlag(kUseGVN);
+ SetFlag(kCanOverflow);
+ if (!right->IsConstant()) {
+ SetFlag(kCanBeDivByZero);
+ }
+ SetFlag(kAllowUndefinedAsNaN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HArithmeticBinaryOperation: public HBinaryOperation {
+class HArithmeticBinaryOperation : public HBinaryOperation {
public:
HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right) {
- set_representation(Representation::Tagged());
- SetFlag(kFlexibleRepresentation);
+ : HBinaryOperation(context, left, right, HType::TaggedNumber()) {
SetAllSideEffects();
+ SetFlag(kFlexibleRepresentation);
+ SetFlag(kAllowUndefinedAsNaN);
}
- virtual void RepresentationChanged(Representation to) {
- if (!to.IsTagged()) {
+ virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
+ if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
+ if (to.IsTagged() &&
+ (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
+ SetAllSideEffects();
+ ClearFlag(kUseGVN);
+ } else {
ClearAllSideEffects();
SetFlag(kUseGVN);
}
}
- virtual HType CalculateInferredType();
- virtual Representation RequiredInputRepresentation(int index) {
+ DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
+ private:
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+};
+
+
+class HCompareGeneric V8_FINAL : public HBinaryOperation {
+ public:
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCompareGeneric, HValue*,
+ HValue*, Token::Value);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return index == 0
? Representation::Tagged()
: representation();
}
- virtual Representation InferredRepresentation() {
- if (left()->representation().Equals(right()->representation())) {
- return left()->representation();
- }
- return HValue::InferredRepresentation();
- }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
+ Token::Value token() const { return token_; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
-class HCompareGeneric: public HBinaryOperation {
- public:
+ private:
HCompareGeneric(HValue* context,
HValue* left,
HValue* right,
Token::Value token)
- : HBinaryOperation(context, left, right), token_(token) {
+ : HBinaryOperation(context, left, right, HType::Boolean()),
+ token_(token) {
ASSERT(Token::IsCompareOp(token));
set_representation(Representation::Tagged());
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- Representation GetInputRepresentation() const {
- return Representation::Tagged();
- }
-
- Token::Value token() const { return token_; }
- virtual void PrintDataTo(StringStream* stream);
-
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
-
- private:
Token::Value token_;
};
-class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
+class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
public:
- HCompareIDAndBranch(HValue* left, HValue* right, Token::Value token)
- : token_(token) {
- ASSERT(Token::IsCompareOp(token));
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- }
+ DECLARE_INSTRUCTION_FACTORY_P3(HCompareNumericAndBranch,
+ HValue*, HValue*, Token::Value);
+ DECLARE_INSTRUCTION_FACTORY_P5(HCompareNumericAndBranch,
+ HValue*, HValue*, Token::Value,
+ HBasicBlock*, HBasicBlock*);
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
Token::Value token() const { return token_; }
- void SetInputRepresentation(Representation r);
- Representation GetInputRepresentation() const {
- return input_representation_;
+ void set_observed_input_representation(Representation left,
+ Representation right) {
+ observed_input_representation_[0] = left;
+ observed_input_representation_[1] = right;
}
- virtual Representation RequiredInputRepresentation(int index) {
- return input_representation_;
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return representation();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ return observed_input_representation_[index];
+ }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- DECLARE_CONCRETE_INSTRUCTION(CompareIDAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch)
private:
- Representation input_representation_;
- Token::Value token_;
-};
-
-
-class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
- public:
- HCompareObjectEqAndBranch(HValue* left, HValue* right) {
+ HCompareNumericAndBranch(HValue* left,
+ HValue* right,
+ Token::Value token,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : token_(token) {
+ SetFlag(kFlexibleRepresentation);
+ ASSERT(Token::IsCompareOp(token));
SetOperandAt(0, left);
SetOperandAt(1, right);
+ SetSuccessorAt(0, true_target);
+ SetSuccessorAt(1, false_target);
}
- HValue* left() { return OperandAt(0); }
- HValue* right() { return OperandAt(1); }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch)
+ Representation observed_input_representation_[2];
+ Token::Value token_;
};
-class HCompareConstantEqAndBranch: public HUnaryControlInstruction {
+class HCompareHoleAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- HCompareConstantEqAndBranch(HValue* left, int right, Token::Value op)
- : HUnaryControlInstruction(left, NULL, NULL), op_(op), right_(right) {
- ASSERT(op == Token::EQ_STRICT);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HCompareHoleAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HCompareHoleAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
- Token::Value op() const { return op_; }
- HValue* left() { return value(); }
- int right() const { return right_; }
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Integer32();
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return representation();
}
- DECLARE_CONCRETE_INSTRUCTION(CompareConstantEqAndBranch);
+ DECLARE_CONCRETE_INSTRUCTION(CompareHoleAndBranch)
private:
- const Token::Value op_;
- const int right_;
+ HCompareHoleAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {
+ SetFlag(kFlexibleRepresentation);
+ SetFlag(kAllowUndefinedAsNaN);
+ }
};
-class HIsNilAndBranch: public HUnaryControlInstruction {
+class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
public:
- HIsNilAndBranch(HValue* value, EqualityKind kind, NilValue nil)
- : HUnaryControlInstruction(value, NULL, NULL), kind_(kind), nil_(nil) { }
+ HCompareObjectEqAndBranch(HValue* left,
+ HValue* right,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL) {
+ // TODO(danno): make this private when the IfBuilder properly constructs
+ // control flow instructions.
+ ASSERT(!left->IsConstant() ||
+ (!HConstant::cast(left)->HasInteger32Value() ||
+ HConstant::cast(left)->HasSmiValue()));
+ ASSERT(!right->IsConstant() ||
+ (!HConstant::cast(right)->HasInteger32Value() ||
+ HConstant::cast(right)->HasSmiValue()));
+ SetOperandAt(0, left);
+ SetOperandAt(1, right);
+ SetSuccessorAt(0, true_target);
+ SetSuccessorAt(1, false_target);
+ }
- EqualityKind kind() const { return kind_; }
- NilValue nil() const { return nil_; }
+ DECLARE_INSTRUCTION_FACTORY_P2(HCompareObjectEqAndBranch, HValue*, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P4(HCompareObjectEqAndBranch, HValue*, HValue*,
+ HBasicBlock*, HBasicBlock*);
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ HValue* left() { return OperandAt(0); }
+ HValue* right() { return OperandAt(1); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch)
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ return Representation::Tagged();
+ }
- private:
- EqualityKind kind_;
- NilValue nil_;
+ DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch)
};
-class HIsObjectAndBranch: public HUnaryControlInstruction {
+class HIsObjectAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsObjectAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsObjectAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsObjectAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
+
+ private:
+ HIsObjectAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
-class HIsStringAndBranch: public HUnaryControlInstruction {
+
+class HIsStringAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsStringAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsStringAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsStringAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
+
+ private:
+ HIsStringAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
-class HIsSmiAndBranch: public HUnaryControlInstruction {
+class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsSmiAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsSmiAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsSmiAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ HIsSmiAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
-class HIsUndetectableAndBranch: public HUnaryControlInstruction {
+class HIsUndetectableAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsUndetectableAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsUndetectableAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsUndetectableAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
+
+ private:
+ HIsUndetectableAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
-class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
+class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
public:
- HStringCompareAndBranch(HValue* context,
- HValue* left,
- HValue* right,
- Token::Value token)
- : token_(token) {
- ASSERT(Token::IsCompareOp(token));
- SetOperandAt(0, context);
- SetOperandAt(1, left);
- SetOperandAt(2, right);
- set_representation(Representation::Tagged());
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HStringCompareAndBranch,
+ HValue*,
+ HValue*,
+ Token::Value);
HValue* context() { return OperandAt(0); }
HValue* left() { return OperandAt(1); }
HValue* right() { return OperandAt(2); }
Token::Value token() const { return token_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -3246,313 +4310,424 @@ class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch)
private:
+ HStringCompareAndBranch(HValue* context,
+ HValue* left,
+ HValue* right,
+ Token::Value token)
+ : token_(token) {
+ ASSERT(Token::IsCompareOp(token));
+ SetOperandAt(0, context);
+ SetOperandAt(1, left);
+ SetOperandAt(2, right);
+ set_representation(Representation::Tagged());
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+
Token::Value token_;
};
-class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
+class HIsConstructCallAndBranch : public HTemplateControlInstruction<2, 0> {
public:
- virtual Representation RequiredInputRepresentation(int index) {
+ DECLARE_INSTRUCTION_FACTORY_P0(HIsConstructCallAndBranch);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch)
+ private:
+ HIsConstructCallAndBranch() {}
};
-class HHasInstanceTypeAndBranch: public HUnaryControlInstruction {
+class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
- : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
- HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to)
- : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) {
- ASSERT(to == LAST_TYPE); // Others not implemented yet in backend.
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(
+ HHasInstanceTypeAndBranch, HValue*, InstanceType);
+ DECLARE_INSTRUCTION_FACTORY_P3(
+ HHasInstanceTypeAndBranch, HValue*, InstanceType, InstanceType);
InstanceType from() { return from_; }
InstanceType to() { return to_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
private:
+ HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
+ : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
+ HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to)
+ : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) {
+ ASSERT(to == LAST_TYPE); // Others not implemented yet in backend.
+ }
+
InstanceType from_;
InstanceType to_; // Inclusive range, not all combinations work.
};
-class HHasCachedArrayIndexAndBranch: public HUnaryControlInstruction {
+class HHasCachedArrayIndexAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HHasCachedArrayIndexAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HHasCachedArrayIndexAndBranch, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch)
+ private:
+ explicit HHasCachedArrayIndexAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
};
-class HGetCachedArrayIndex: public HUnaryOperation {
+class HGetCachedArrayIndex V8_FINAL : public HUnaryOperation {
public:
- explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HGetCachedArrayIndex, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HClassOfTestAndBranch: public HUnaryControlInstruction {
+class HClassOfTestAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
- : HUnaryControlInstruction(value, NULL, NULL),
- class_name_(class_name) { }
+ DECLARE_INSTRUCTION_FACTORY_P2(HClassOfTestAndBranch, HValue*,
+ Handle<String>);
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> class_name() const { return class_name_; }
private:
+ HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
+ : HUnaryControlInstruction(value, NULL, NULL),
+ class_name_(class_name) { }
+
Handle<String> class_name_;
};
-class HTypeofIsAndBranch: public HUnaryControlInstruction {
+class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
- : HUnaryControlInstruction(value, NULL, NULL),
- type_literal_(type_literal) { }
+ DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>);
Handle<String> type_literal() { return type_literal_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
private:
+ HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
+ : HUnaryControlInstruction(value, NULL, NULL),
+ type_literal_(type_literal) { }
+
Handle<String> type_literal_;
};
-class HInstanceOf: public HBinaryOperation {
+class HInstanceOf V8_FINAL : public HBinaryOperation {
public:
- HInstanceOf(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOf, HValue*, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual HType CalculateInferredType();
-
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
-};
-
-class HInstanceOfKnownGlobal: public HTemplateInstruction<2> {
- public:
- HInstanceOfKnownGlobal(HValue* context,
- HValue* left,
- Handle<JSFunction> right)
- : function_(right) {
- SetOperandAt(0, context);
- SetOperandAt(1, left);
+ private:
+ HInstanceOf(HValue* context, HValue* left, HValue* right)
+ : HBinaryOperation(context, left, right, HType::Boolean()) {
set_representation(Representation::Tagged());
SetAllSideEffects();
}
+};
+
+
+class HInstanceOfKnownGlobal V8_FINAL : public HTemplateInstruction<2> {
+ public:
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOfKnownGlobal,
+ HValue*,
+ Handle<JSFunction>);
HValue* context() { return OperandAt(0); }
HValue* left() { return OperandAt(1); }
Handle<JSFunction> function() { return function_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual HType CalculateInferredType();
-
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal)
private:
+ HInstanceOfKnownGlobal(HValue* context,
+ HValue* left,
+ Handle<JSFunction> right)
+ : HTemplateInstruction<2>(HType::Boolean()), function_(right) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, left);
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
Handle<JSFunction> function_;
};
-class HPower: public HTemplateInstruction<2> {
+class HPower V8_FINAL : public HTemplateInstruction<2> {
public:
- HPower(HValue* left, HValue* right) {
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- set_representation(Representation::Double());
- SetFlag(kUseGVN);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
HValue* left() { return OperandAt(0); }
HValue* right() const { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return index == 0
? Representation::Double()
: Representation::None();
}
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ return RequiredInputRepresentation(index);
+ }
DECLARE_CONCRETE_INSTRUCTION(Power)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const {
+ HPower(HValue* left, HValue* right) {
+ SetOperandAt(0, left);
+ SetOperandAt(1, right);
+ set_representation(Representation::Double());
+ SetFlag(kUseGVN);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE {
return !right()->representation().IsTagged();
}
};
-class HRandom: public HTemplateInstruction<1> {
+class HRandom V8_FINAL : public HTemplateInstruction<1> {
public:
- explicit HRandom(HValue* global_object) {
- SetOperandAt(0, global_object);
- set_representation(Representation::Double());
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HRandom, HValue*);
HValue* global_object() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(Random)
private:
- virtual bool IsDeletable() const { return true; }
+ explicit HRandom(HValue* global_object) {
+ SetOperandAt(0, global_object);
+ set_representation(Representation::Double());
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HAdd: public HArithmeticBinaryOperation {
+class HAdd V8_FINAL : public HArithmeticBinaryOperation {
public:
- HAdd(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanOverflow);
- }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
// Add is only commutative if two integer values are added and not if two
// tagged values are added (because it might be a String concatenation).
- virtual bool IsCommutative() const {
+ virtual bool IsCommutative() const V8_OVERRIDE {
return !representation().IsTagged();
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- static HInstruction* NewHAdd(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ virtual HValue* Canonicalize() V8_OVERRIDE;
- virtual HType CalculateInferredType();
+ virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
+ if (left()->IsInteger32Constant()) {
+ decomposition->Apply(right(), left()->GetInteger32Constant());
+ return true;
+ } else if (right()->IsInteger32Constant()) {
+ decomposition->Apply(left(), right()->GetInteger32Constant());
+ return true;
+ } else {
+ return false;
+ }
+ }
- virtual HValue* Canonicalize();
+ virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
+ if (to.IsTagged()) {
+ SetGVNFlag(kChangesNewSpacePromotion);
+ ClearFlag(kAllowUndefinedAsNaN);
+ }
+ if (to.IsTagged() &&
+ (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() ||
+ left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved())) {
+ SetAllSideEffects();
+ ClearFlag(kUseGVN);
+ } else {
+ ClearAllSideEffects();
+ SetFlag(kUseGVN);
+ }
+ }
DECLARE_CONCRETE_INSTRUCTION(Add)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange(Zone* zone);
-};
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
-class HSub: public HArithmeticBinaryOperation {
- public:
- HSub(HValue* context, HValue* left, HValue* right)
+ private:
+ HAdd(HValue* context, HValue* left, HValue* right)
: HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanOverflow);
}
+};
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- virtual HValue* Canonicalize();
+class HSub V8_FINAL : public HArithmeticBinaryOperation {
+ public:
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
- static HInstruction* NewHSub(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
+
+ virtual HValue* Canonicalize() V8_OVERRIDE;
+
+ virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
+ if (right()->IsInteger32Constant()) {
+ decomposition->Apply(left(), -right()->GetInteger32Constant());
+ return true;
+ } else {
+ return false;
+ }
+ }
DECLARE_CONCRETE_INSTRUCTION(Sub)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+
+ private:
+ HSub(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
+ SetFlag(kCanOverflow);
+ }
};
-class HMul: public HArithmeticBinaryOperation {
+class HMul V8_FINAL : public HArithmeticBinaryOperation {
public:
- HMul(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanOverflow);
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
+
+ static HInstruction* NewImul(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right) {
+ HInstruction* instr = HMul::New(zone, context, left, right);
+ if (!instr->IsMul()) return instr;
+ HMul* mul = HMul::cast(instr);
+ // TODO(mstarzinger): Prevent bailout on minus zero for imul.
+ mul->AssumeRepresentation(Representation::Integer32());
+ mul->ClearFlag(HValue::kCanOverflow);
+ return mul;
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
+
+ virtual HValue* Canonicalize() V8_OVERRIDE;
// Only commutative if it is certain that not two objects are multiplicated.
- virtual bool IsCommutative() const {
+ virtual bool IsCommutative() const V8_OVERRIDE {
return !representation().IsTagged();
}
- static HInstruction* NewHMul(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) V8_OVERRIDE {
+ HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
+
+ bool MulMinusOne();
DECLARE_CONCRETE_INSTRUCTION(Mul)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+
+ private:
+ HMul(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
+ SetFlag(kCanOverflow);
+ }
};
-class HMod: public HArithmeticBinaryOperation {
+class HMod V8_FINAL : public HArithmeticBinaryOperation {
public:
- HMod(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanBeDivByZero);
- }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right,
+ Maybe<int> fixed_right_arg);
+
+ Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
bool HasPowerOf2Divisor() {
if (right()->IsConstant() &&
@@ -3564,229 +4739,403 @@ class HMod: public HArithmeticBinaryOperation {
return false;
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- static HInstruction* NewHMod(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ virtual HValue* Canonicalize() V8_OVERRIDE;
+
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) V8_OVERRIDE {
+ if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+ HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
DECLARE_CONCRETE_INSTRUCTION(Mod)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+
+ private:
+ HMod(HValue* context,
+ HValue* left,
+ HValue* right,
+ Maybe<int> fixed_right_arg)
+ : HArithmeticBinaryOperation(context, left, right),
+ fixed_right_arg_(fixed_right_arg) {
+ SetFlag(kCanBeDivByZero);
+ SetFlag(kCanOverflow);
+ }
+
+ const Maybe<int> fixed_right_arg_;
};
-class HDiv: public HArithmeticBinaryOperation {
+class HDiv V8_FINAL : public HArithmeticBinaryOperation {
public:
- HDiv(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanBeDivByZero);
- SetFlag(kCanOverflow);
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
+
+ bool HasPowerOf2Divisor() {
+ if (right()->IsInteger32Constant()) {
+ int32_t value = right()->GetInteger32Constant();
+ return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
+ }
+
+ return false;
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- static HInstruction* NewHDiv(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ virtual HValue* Canonicalize() V8_OVERRIDE;
+
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) V8_OVERRIDE {
+ if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+ HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
DECLARE_CONCRETE_INSTRUCTION(Div)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+
+ private:
+ HDiv(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
+ SetFlag(kCanBeDivByZero);
+ SetFlag(kCanOverflow);
+ }
};
-class HMathMinMax: public HArithmeticBinaryOperation {
+class HMathMinMax V8_FINAL : public HArithmeticBinaryOperation {
public:
enum Operation { kMathMin, kMathMax };
- HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
- : HArithmeticBinaryOperation(context, left, right),
- operation_(op) { }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right,
+ Operation op);
- virtual Representation RequiredInputRepresentation(int index) {
- return index == 0
- ? Representation::Tagged()
- : representation();
- }
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ return RequiredInputRepresentation(index);
+ }
- virtual Representation InferredRepresentation() {
- if (left()->representation().IsInteger32() &&
- right()->representation().IsInteger32()) {
- return Representation::Integer32();
- }
- return Representation::Double();
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+
+ virtual Representation RepresentationFromInputs() V8_OVERRIDE {
+ Representation left_rep = left()->representation();
+ Representation right_rep = right()->representation();
+ Representation result = Representation::Smi();
+ result = result.generalize(left_rep);
+ result = result.generalize(right_rep);
+ if (result.IsTagged()) return Representation::Double();
+ return result;
}
- virtual bool IsCommutative() const { return true; }
+ virtual bool IsCommutative() const V8_OVERRIDE { return true; }
Operation operation() { return operation_; }
DECLARE_CONCRETE_INSTRUCTION(MathMinMax)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
return other->IsMathMinMax() &&
HMathMinMax::cast(other)->operation_ == operation_;
}
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
+ HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
+ : HArithmeticBinaryOperation(context, left, right),
+ operation_(op) { }
+
Operation operation_;
};
-class HBitwise: public HBitwiseBinaryOperation {
+class HBitwise V8_FINAL : public HBitwiseBinaryOperation {
public:
- HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right), op_(op) {
- ASSERT(op == Token::BIT_AND ||
- op == Token::BIT_OR ||
- op == Token::BIT_XOR);
- }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ Token::Value op,
+ HValue* left,
+ HValue* right);
Token::Value op() const { return op_; }
- virtual bool IsCommutative() const { return true; }
-
- virtual HValue* Canonicalize();
+ virtual bool IsCommutative() const V8_OVERRIDE { return true; }
- static HInstruction* NewHBitwise(Zone* zone,
- Token::Value op,
- HValue* context,
- HValue* left,
- HValue* right);
+ virtual HValue* Canonicalize() V8_OVERRIDE;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Bitwise)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
return op() == HBitwise::cast(other)->op();
}
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
+ HBitwise(HValue* context,
+ Token::Value op,
+ HValue* left,
+ HValue* right)
+ : HBitwiseBinaryOperation(context, left, right, HType::TaggedNumber()),
+ op_(op) {
+ ASSERT(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR);
+ // BIT_AND with a smi-range positive value will always unset the
+ // entire sign-extension of the smi-sign.
+ if (op == Token::BIT_AND &&
+ ((left->IsConstant() &&
+ left->representation().IsSmi() &&
+ HConstant::cast(left)->Integer32Value() >= 0) ||
+ (right->IsConstant() &&
+ right->representation().IsSmi() &&
+ HConstant::cast(right)->Integer32Value() >= 0))) {
+ SetFlag(kTruncatingToSmi);
+ SetFlag(kTruncatingToInt32);
+ // BIT_OR with a smi-range negative value will always set the entire
+ // sign-extension of the smi-sign.
+ } else if (op == Token::BIT_OR &&
+ ((left->IsConstant() &&
+ left->representation().IsSmi() &&
+ HConstant::cast(left)->Integer32Value() < 0) ||
+ (right->IsConstant() &&
+ right->representation().IsSmi() &&
+ HConstant::cast(right)->Integer32Value() < 0))) {
+ SetFlag(kTruncatingToSmi);
+ SetFlag(kTruncatingToInt32);
+ }
+ }
+
Token::Value op_;
};
-class HShl: public HBitwiseBinaryOperation {
+class HShl V8_FINAL : public HBitwiseBinaryOperation {
public:
- HShl(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
- static HInstruction* NewHShl(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) V8_OVERRIDE {
+ if (new_rep.IsSmi() &&
+ !(right()->IsInteger32Constant() &&
+ right()->GetInteger32Constant() >= 0)) {
+ new_rep = Representation::Integer32();
+ }
+ HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
DECLARE_CONCRETE_INSTRUCTION(Shl)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ HShl(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) { }
};
-class HShr: public HBitwiseBinaryOperation {
+class HShr V8_FINAL : public HBitwiseBinaryOperation {
public:
- HShr(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
+
+ virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
+ if (right()->IsInteger32Constant()) {
+ if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
+ // This is intended to look for HAdd and HSub, to handle compounds
+ // like ((base + offset) >> scale) with one single decomposition.
+ left()->TryDecompose(decomposition);
+ return true;
+ }
+ }
+ return false;
+ }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
- static HInstruction* NewHShr(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) V8_OVERRIDE {
+ if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+ HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
DECLARE_CONCRETE_INSTRUCTION(Shr)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ HShr(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) { }
};
-class HSar: public HBitwiseBinaryOperation {
+class HSar V8_FINAL : public HBitwiseBinaryOperation {
public:
- HSar(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
+
+ virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
+ if (right()->IsInteger32Constant()) {
+ if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
+ // This is intended to look for HAdd and HSub, to handle compounds
+ // like ((base + offset) >> scale) with one single decomposition.
+ left()->TryDecompose(decomposition);
+ return true;
+ }
+ }
+ return false;
+ }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
- static HInstruction* NewHSar(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) V8_OVERRIDE {
+ if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+ HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
DECLARE_CONCRETE_INSTRUCTION(Sar)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ HSar(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) { }
};
-class HOsrEntry: public HTemplateInstruction<0> {
+class HRor V8_FINAL : public HBitwiseBinaryOperation {
public:
- explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
- SetGVNFlag(kChangesOsrEntries);
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right) {
+ return new(zone) HRor(context, left, right);
}
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) V8_OVERRIDE {
+ if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+ HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Ror)
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ HRor(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) {
+ ChangeRepresentation(Representation::Integer32());
+ }
+};
+
+
+class HOsrEntry V8_FINAL : public HTemplateInstruction<0> {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P1(HOsrEntry, BailoutId);
+
BailoutId ast_id() const { return ast_id_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(OsrEntry)
private:
+ explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
+ SetGVNFlag(kChangesOsrEntries);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+
BailoutId ast_id_;
};
-class HParameter: public HTemplateInstruction<0> {
+class HParameter V8_FINAL : public HTemplateInstruction<0> {
public:
- explicit HParameter(unsigned index) : index_(index) {
- set_representation(Representation::Tagged());
- }
+ enum ParameterKind {
+ STACK_PARAMETER,
+ REGISTER_PARAMETER
+ };
+
+ DECLARE_INSTRUCTION_FACTORY_P1(HParameter, unsigned);
+ DECLARE_INSTRUCTION_FACTORY_P2(HParameter, unsigned, ParameterKind);
+ DECLARE_INSTRUCTION_FACTORY_P3(HParameter, unsigned, ParameterKind,
+ Representation);
unsigned index() const { return index_; }
+ ParameterKind kind() const { return kind_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(Parameter)
private:
+ explicit HParameter(unsigned index,
+ ParameterKind kind = STACK_PARAMETER)
+ : index_(index),
+ kind_(kind) {
+ set_representation(Representation::Tagged());
+ }
+
+ explicit HParameter(unsigned index,
+ ParameterKind kind,
+ Representation r)
+ : index_(index),
+ kind_(kind) {
+ set_representation(r);
+ }
+
unsigned index_;
+ ParameterKind kind_;
};
-class HCallStub: public HUnaryCall {
+class HCallStub V8_FINAL : public HUnaryCall {
public:
- HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
- : HUnaryCall(context, argument_count),
- major_key_(major_key),
- transcendental_type_(TranscendentalCache::kNumberOfCaches) {
- }
-
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallStub, CodeStub::Major, int);
CodeStub::Major major_key() { return major_key_; }
HValue* context() { return value(); }
@@ -3798,87 +5147,121 @@ class HCallStub: public HUnaryCall {
return transcendental_type_;
}
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CallStub)
private:
+ HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
+ : HUnaryCall(context, argument_count),
+ major_key_(major_key),
+ transcendental_type_(TranscendentalCache::kNumberOfCaches) {
+ }
+
CodeStub::Major major_key_;
TranscendentalCache::Type transcendental_type_;
};
-class HUnknownOSRValue: public HTemplateInstruction<0> {
+class HUnknownOSRValue V8_FINAL : public HTemplateInstruction<0> {
public:
- HUnknownOSRValue()
- : incoming_value_(NULL) {
- set_representation(Representation::Tagged());
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(HUnknownOSRValue, HEnvironment*, int);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- void set_incoming_value(HPhi* value) {
- incoming_value_ = value;
- }
+ void set_incoming_value(HPhi* value) { incoming_value_ = value; }
+ HPhi* incoming_value() { return incoming_value_; }
+ HEnvironment *environment() { return environment_; }
+ int index() { return index_; }
- HPhi* incoming_value() {
- return incoming_value_;
+ virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
+ if (incoming_value_ == NULL) return Representation::None();
+ return incoming_value_->KnownOptimalRepresentation();
}
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
private:
+ HUnknownOSRValue(HEnvironment* environment, int index)
+ : environment_(environment),
+ index_(index),
+ incoming_value_(NULL) {
+ set_representation(Representation::Tagged());
+ }
+
+ HEnvironment* environment_;
+ int index_;
HPhi* incoming_value_;
};
-class HLoadGlobalCell: public HTemplateInstruction<0> {
+class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
public:
- HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, PropertyDetails details)
- : cell_(cell), details_(details) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnGlobalVars);
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(HLoadGlobalCell, Handle<Cell>,
+ PropertyDetails);
- Handle<JSGlobalPropertyCell> cell() const { return cell_; }
+ Unique<Cell> cell() const { return cell_; }
bool RequiresHoleCheck() const;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual intptr_t Hashcode() {
- ASSERT_ALLOCATION_DISABLED;
- return reinterpret_cast<intptr_t>(*cell_);
+ virtual intptr_t Hashcode() V8_OVERRIDE {
+ return cell_.Hashcode();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
+ cell_ = Unique<Cell>(cell_.handle());
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell)
protected:
- virtual bool DataEquals(HValue* other) {
- HLoadGlobalCell* b = HLoadGlobalCell::cast(other);
- return cell_.is_identical_to(b->cell());
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ return cell_ == HLoadGlobalCell::cast(other)->cell_;
}
private:
- virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
+ HLoadGlobalCell(Handle<Cell> cell, PropertyDetails details)
+ : cell_(Unique<Cell>::CreateUninitialized(cell)), details_(details) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnGlobalVars);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
- Handle<JSGlobalPropertyCell> cell_;
+ Unique<Cell> cell_;
PropertyDetails details_;
};
-class HLoadGlobalGeneric: public HTemplateInstruction<2> {
+class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> {
public:
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadGlobalGeneric, HValue*,
+ Handle<Object>, bool);
+
+ HValue* context() { return OperandAt(0); }
+ HValue* global_object() { return OperandAt(1); }
+ Handle<Object> name() const { return name_; }
+ bool for_typeof() const { return for_typeof_; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric)
+
+ private:
HLoadGlobalGeneric(HValue* context,
HValue* global_object,
Handle<Object> name,
@@ -3891,22 +5274,207 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> {
SetAllSideEffects();
}
+ Handle<Object> name_;
+ bool for_typeof_;
+};
+
+
+class HAllocate V8_FINAL : public HTemplateInstruction<2> {
+ public:
+ static HAllocate* New(Zone* zone,
+ HValue* context,
+ HValue* size,
+ HType type,
+ PretenureFlag pretenure_flag,
+ InstanceType instance_type) {
+ return new(zone) HAllocate(context, size, type, pretenure_flag,
+ instance_type);
+ }
+
+ // Maximum instance size for which allocations will be inlined.
+ static const int kMaxInlineSize = 64 * kPointerSize;
+
HValue* context() { return OperandAt(0); }
- HValue* global_object() { return OperandAt(1); }
- Handle<Object> name() const { return name_; }
- bool for_typeof() const { return for_typeof_; }
+ HValue* size() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ if (index == 0) {
+ return Representation::Tagged();
+ } else {
+ return Representation::Integer32();
+ }
+ }
+
+ virtual Handle<Map> GetMonomorphicJSObjectMap() {
+ return known_initial_map_;
+ }
+
+ void set_known_initial_map(Handle<Map> known_initial_map) {
+ known_initial_map_ = known_initial_map;
+ }
+
+ bool IsNewSpaceAllocation() const {
+ return (flags_ & ALLOCATE_IN_NEW_SPACE) != 0;
+ }
+
+ bool IsOldDataSpaceAllocation() const {
+ return (flags_ & ALLOCATE_IN_OLD_DATA_SPACE) != 0;
+ }
+
+ bool IsOldPointerSpaceAllocation() const {
+ return (flags_ & ALLOCATE_IN_OLD_POINTER_SPACE) != 0;
+ }
+
+ bool MustAllocateDoubleAligned() const {
+ return (flags_ & ALLOCATE_DOUBLE_ALIGNED) != 0;
+ }
+
+ bool MustPrefillWithFiller() const {
+ return (flags_ & PREFILL_WITH_FILLER) != 0;
+ }
+
+ void MakePrefillWithFiller() {
+ flags_ = static_cast<HAllocate::Flags>(flags_ | PREFILL_WITH_FILLER);
+ }
+
+ void MakeDoubleAligned() {
+ flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
+ }
+
+ virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) V8_OVERRIDE;
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate)
+
+ private:
+ enum Flags {
+ ALLOCATE_IN_NEW_SPACE = 1 << 0,
+ ALLOCATE_IN_OLD_DATA_SPACE = 1 << 1,
+ ALLOCATE_IN_OLD_POINTER_SPACE = 1 << 2,
+ ALLOCATE_DOUBLE_ALIGNED = 1 << 3,
+ PREFILL_WITH_FILLER = 1 << 4
+ };
+
+ HAllocate(HValue* context,
+ HValue* size,
+ HType type,
+ PretenureFlag pretenure_flag,
+ InstanceType instance_type)
+ : HTemplateInstruction<2>(type),
+ dominating_allocate_(NULL),
+ filler_free_space_size_(NULL),
+ clear_next_map_word_(false) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, size);
+ set_representation(Representation::Tagged());
+ SetFlag(kTrackSideEffectDominators);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ SetGVNFlag(kDependsOnNewSpacePromotion);
+ flags_ = pretenure_flag == TENURED
+ ? (Heap::TargetSpaceId(instance_type) == OLD_POINTER_SPACE
+ ? ALLOCATE_IN_OLD_POINTER_SPACE : ALLOCATE_IN_OLD_DATA_SPACE)
+ : ALLOCATE_IN_NEW_SPACE;
+ if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
+ flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
+ }
+ // We have to fill the allocated object with one word fillers if we do
+ // not use allocation folding since some allocations may depend on each
+ // other, i.e., have a pointer to each other. A GC in between these
+ // allocations may leave such objects behind in a not completely initialized
+ // state.
+ if (!FLAG_use_gvn || !FLAG_use_allocation_folding) {
+ flags_ = static_cast<HAllocate::Flags>(flags_ | PREFILL_WITH_FILLER);
+ }
+ clear_next_map_word_ = pretenure_flag == NOT_TENURED &&
+ AllocationSite::CanTrack(instance_type);
+ }
+
+ void UpdateSize(HValue* size) {
+ SetOperandAt(1, size);
+ }
+
+ HAllocate* GetFoldableDominator(HAllocate* dominator);
+
+ void UpdateFreeSpaceFiller(int32_t filler_size);
+
+ void CreateFreeSpaceFiller(int32_t filler_size);
+
+ bool IsFoldable(HAllocate* allocate) {
+ return (IsNewSpaceAllocation() && allocate->IsNewSpaceAllocation()) ||
+ (IsOldDataSpaceAllocation() && allocate->IsOldDataSpaceAllocation()) ||
+ (IsOldPointerSpaceAllocation() &&
+ allocate->IsOldPointerSpaceAllocation());
+ }
+
+ void ClearNextMapWord(int offset);
+
+ Flags flags_;
+ Handle<Map> known_initial_map_;
+ HAllocate* dominating_allocate_;
+ HStoreNamedField* filler_free_space_size_;
+ bool clear_next_map_word_;
+};
+
+
+class HStoreCodeEntry V8_FINAL: public HTemplateInstruction<2> {
+ public:
+ static HStoreCodeEntry* New(Zone* zone,
+ HValue* context,
+ HValue* function,
+ HValue* code) {
+ return new(zone) HStoreCodeEntry(function, code);
+ }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric)
+ HValue* function() { return OperandAt(0); }
+ HValue* code_object() { return OperandAt(1); }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry)
private:
- Handle<Object> name_;
- bool for_typeof_;
+ HStoreCodeEntry(HValue* function, HValue* code) {
+ SetOperandAt(0, function);
+ SetOperandAt(1, code);
+ }
+};
+
+
+class HInnerAllocatedObject V8_FINAL: public HTemplateInstruction<1> {
+ public:
+ static HInnerAllocatedObject* New(Zone* zone,
+ HValue* context,
+ HValue* value,
+ int offset,
+ HType type = HType::Tagged()) {
+ return new(zone) HInnerAllocatedObject(value, offset, type);
+ }
+
+ HValue* base_object() { return OperandAt(0); }
+ int offset() { return offset_; }
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Tagged();
+ }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject)
+
+ private:
+ HInnerAllocatedObject(HValue* value, int offset, HType type = HType::Tagged())
+ : HTemplateInstruction<1>(type), offset_(offset) {
+ ASSERT(value->IsAllocate());
+ SetOperandAt(0, value);
+ set_type(type);
+ set_representation(Representation::Tagged());
+ }
+
+ int offset_;
};
@@ -3919,22 +5487,33 @@ inline bool StoringValueNeedsWriteBarrier(HValue* value) {
inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
HValue* new_space_dominator) {
- return !object->IsAllocateObject() || (object != new_space_dominator);
+ if (object->IsInnerAllocatedObject()) {
+ return ReceiverObjectNeedsWriteBarrier(
+ HInnerAllocatedObject::cast(object)->base_object(),
+ new_space_dominator);
+ }
+ if (object->IsConstant() && HConstant::cast(object)->IsCell()) {
+ return false;
+ }
+ if (object->IsConstant() &&
+ HConstant::cast(object)->HasExternalReferenceValue()) {
+ // Stores to external references require no write barriers
+ return false;
+ }
+ if (object != new_space_dominator) return true;
+ if (object->IsAllocate()) {
+ return !HAllocate::cast(object)->IsNewSpaceAllocation();
+ }
+ return true;
}
-class HStoreGlobalCell: public HUnaryOperation {
+class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
public:
- HStoreGlobalCell(HValue* value,
- Handle<JSGlobalPropertyCell> cell,
- PropertyDetails details)
- : HUnaryOperation(value),
- cell_(cell),
- details_(details) {
- SetGVNFlag(kChangesGlobalVars);
- }
+ DECLARE_INSTRUCTION_FACTORY_P3(HStoreGlobalCell, HValue*,
+ Handle<PropertyCell>, PropertyDetails);
- Handle<JSGlobalPropertyCell> cell() const { return cell_; }
+ Unique<PropertyCell> cell() const { return cell_; }
bool RequiresHoleCheck() {
return !details_.IsDontDelete() || details_.IsReadOnly();
}
@@ -3942,33 +5521,42 @@ class HStoreGlobalCell: public HUnaryOperation {
return StoringValueNeedsWriteBarrier(value());
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
+ cell_ = Unique<PropertyCell>(cell_.handle());
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell)
private:
- Handle<JSGlobalPropertyCell> cell_;
+ HStoreGlobalCell(HValue* value,
+ Handle<PropertyCell> cell,
+ PropertyDetails details)
+ : HUnaryOperation(value),
+ cell_(Unique<PropertyCell>::CreateUninitialized(cell)),
+ details_(details) {
+ SetGVNFlag(kChangesGlobalVars);
+ }
+
+ Unique<PropertyCell> cell_;
PropertyDetails details_;
};
-class HStoreGlobalGeneric: public HTemplateInstruction<3> {
+class HStoreGlobalGeneric : public HTemplateInstruction<3> {
public:
- HStoreGlobalGeneric(HValue* context,
- HValue* global_object,
- Handle<Object> name,
- HValue* value,
- StrictModeFlag strict_mode_flag)
- : name_(name),
- strict_mode_flag_(strict_mode_flag) {
- SetOperandAt(0, context);
- SetOperandAt(1, global_object);
- SetOperandAt(2, value);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
+ inline static HStoreGlobalGeneric* New(Zone* zone,
+ HValue* context,
+ HValue* global_object,
+ Handle<Object> name,
+ HValue* value,
+ StrictModeFlag strict_mode_flag) {
+ return new(zone) HStoreGlobalGeneric(context, global_object,
+ name, value, strict_mode_flag);
}
HValue* context() { return OperandAt(0); }
@@ -3977,21 +5565,35 @@ class HStoreGlobalGeneric: public HTemplateInstruction<3> {
HValue* value() { return OperandAt(2); }
StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric)
private:
+ HStoreGlobalGeneric(HValue* context,
+ HValue* global_object,
+ Handle<Object> name,
+ HValue* value,
+ StrictModeFlag strict_mode_flag)
+ : name_(name),
+ strict_mode_flag_(strict_mode_flag) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, global_object);
+ SetOperandAt(2, value);
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
Handle<Object> name_;
StrictModeFlag strict_mode_flag_;
};
-class HLoadContextSlot: public HUnaryOperation {
+class HLoadContextSlot V8_FINAL : public HUnaryOperation {
public:
enum Mode {
// Perform a normal load of the context slot without checking its value.
@@ -4036,29 +5638,29 @@ class HLoadContextSlot: public HUnaryOperation {
return mode_ != kNoCheck;
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HLoadContextSlot* b = HLoadContextSlot::cast(other);
return (slot_index() == b->slot_index());
}
private:
- virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
+ virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
int slot_index_;
Mode mode_;
};
-class HStoreContextSlot: public HTemplateInstruction<2> {
+class HStoreContextSlot V8_FINAL : public HTemplateInstruction<2> {
public:
enum Mode {
// Perform a normal store to the context slot without checking its previous
@@ -4073,12 +5675,8 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
kCheckIgnoreAssignment
};
- HStoreContextSlot(HValue* context, int slot_index, Mode mode, HValue* value)
- : slot_index_(slot_index), mode_(mode) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- SetGVNFlag(kChangesContextSlots);
- }
+ DECLARE_INSTRUCTION_FACTORY_P4(HStoreContextSlot, HValue*, int,
+ Mode, HValue*);
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
@@ -4097,142 +5695,374 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
return mode_ != kNoCheck;
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot)
private:
+ HStoreContextSlot(HValue* context, int slot_index, Mode mode, HValue* value)
+ : slot_index_(slot_index), mode_(mode) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, value);
+ SetGVNFlag(kChangesContextSlots);
+ }
+
int slot_index_;
Mode mode_;
};
-class HLoadNamedField: public HUnaryOperation {
+// Represents an access to a portion of an object, such as the map pointer,
+// array elements pointer, etc, but not accesses to array elements themselves.
+class HObjectAccess V8_FINAL {
public:
- HLoadNamedField(HValue* object, bool is_in_object, int offset)
- : HUnaryOperation(object),
- is_in_object_(is_in_object),
- offset_(offset) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- if (is_in_object) {
- SetGVNFlag(kDependsOnInobjectFields);
- } else {
- SetGVNFlag(kDependsOnBackingStoreFields);
- }
+ inline bool IsInobject() const {
+ return portion() != kBackingStore && portion() != kExternalMemory;
}
- HValue* object() { return OperandAt(0); }
- bool is_in_object() const { return is_in_object_; }
- int offset() const { return offset_; }
+ inline bool IsExternalMemory() const {
+ return portion() == kExternalMemory;
+ }
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
+ inline bool IsStringLength() const {
+ return portion() == kStringLengths;
}
- virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
+ inline int offset() const {
+ return OffsetField::decode(value_);
+ }
- protected:
- virtual bool DataEquals(HValue* other) {
- HLoadNamedField* b = HLoadNamedField::cast(other);
- return is_in_object_ == b->is_in_object_ && offset_ == b->offset_;
+ inline Representation representation() const {
+ return Representation::FromKind(RepresentationField::decode(value_));
}
- private:
- virtual bool IsDeletable() const { return true; }
+ inline Handle<String> name() const {
+ return name_;
+ }
- bool is_in_object_;
- int offset_;
-};
+ inline HObjectAccess WithRepresentation(Representation representation) {
+ return HObjectAccess(portion(), offset(), representation, name());
+ }
+ static HObjectAccess ForHeapNumberValue() {
+ return HObjectAccess(
+ kDouble, HeapNumber::kValueOffset, Representation::Double());
+ }
-class HLoadNamedFieldPolymorphic: public HTemplateInstruction<2> {
- public:
- HLoadNamedFieldPolymorphic(HValue* context,
- HValue* object,
- SmallMapList* types,
- Handle<String> name,
- Zone* zone);
+ static HObjectAccess ForHeapNumberValueLowestBits() {
+ return HObjectAccess(kDouble,
+ HeapNumber::kValueOffset,
+ Representation::Integer32());
+ }
- HValue* context() { return OperandAt(0); }
- HValue* object() { return OperandAt(1); }
- SmallMapList* types() { return &types_; }
- Handle<String> name() { return name_; }
- bool need_generic() { return need_generic_; }
+ static HObjectAccess ForHeapNumberValueHighestBits() {
+ return HObjectAccess(kDouble,
+ HeapNumber::kValueOffset + kIntSize,
+ Representation::Integer32());
+ }
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
+ static HObjectAccess ForElementsPointer() {
+ return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
}
- virtual void PrintDataTo(StringStream* stream);
+ static HObjectAccess ForLiteralsPointer() {
+ return HObjectAccess(kInobject, JSFunction::kLiteralsOffset);
+ }
+
+ static HObjectAccess ForNextFunctionLinkPointer() {
+ return HObjectAccess(kInobject, JSFunction::kNextFunctionLinkOffset);
+ }
+
+ static HObjectAccess ForArrayLength(ElementsKind elements_kind) {
+ return HObjectAccess(
+ kArrayLengths,
+ JSArray::kLengthOffset,
+ IsFastElementsKind(elements_kind) &&
+ FLAG_track_fields
+ ? Representation::Smi() : Representation::Tagged());
+ }
+
+ static HObjectAccess ForAllocationSiteOffset(int offset) {
+ ASSERT(offset >= HeapObject::kHeaderSize && offset < AllocationSite::kSize);
+ return HObjectAccess(kInobject, offset);
+ }
+
+ static HObjectAccess ForAllocationSiteList() {
+ return HObjectAccess(kExternalMemory, 0, Representation::Tagged());
+ }
+
+ static HObjectAccess ForFixedArrayLength() {
+ return HObjectAccess(
+ kArrayLengths,
+ FixedArray::kLengthOffset,
+ FLAG_track_fields ? Representation::Smi() : Representation::Tagged());
+ }
+
+ static HObjectAccess ForStringLength() {
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ return HObjectAccess(
+ kStringLengths,
+ String::kLengthOffset,
+ FLAG_track_fields ? Representation::Smi() : Representation::Tagged());
+ }
+
+ static HObjectAccess ForPropertiesPointer() {
+ return HObjectAccess(kInobject, JSObject::kPropertiesOffset);
+ }
+
+ static HObjectAccess ForPrototypeOrInitialMap() {
+ return HObjectAccess(kInobject, JSFunction::kPrototypeOrInitialMapOffset);
+ }
+
+ static HObjectAccess ForSharedFunctionInfoPointer() {
+ return HObjectAccess(kInobject, JSFunction::kSharedFunctionInfoOffset);
+ }
+
+ static HObjectAccess ForCodeEntryPointer() {
+ return HObjectAccess(kInobject, JSFunction::kCodeEntryOffset);
+ }
+
+ static HObjectAccess ForCodeOffset() {
+ return HObjectAccess(kInobject, SharedFunctionInfo::kCodeOffset);
+ }
+
+ static HObjectAccess ForFirstCodeSlot() {
+ return HObjectAccess(kInobject, SharedFunctionInfo::kFirstCodeSlot);
+ }
+
+ static HObjectAccess ForFirstContextSlot() {
+ return HObjectAccess(kInobject, SharedFunctionInfo::kFirstContextSlot);
+ }
+
+ static HObjectAccess ForOptimizedCodeMap() {
+ return HObjectAccess(kInobject,
+ SharedFunctionInfo::kOptimizedCodeMapOffset);
+ }
+
+ static HObjectAccess ForFunctionContextPointer() {
+ return HObjectAccess(kInobject, JSFunction::kContextOffset);
+ }
+
+ static HObjectAccess ForMap() {
+ return HObjectAccess(kMaps, JSObject::kMapOffset);
+ }
+
+ static HObjectAccess ForMapInstanceSize() {
+ return HObjectAccess(kInobject,
+ Map::kInstanceSizeOffset,
+ Representation::Byte());
+ }
+
+ static HObjectAccess ForPropertyCellValue() {
+ return HObjectAccess(kInobject, PropertyCell::kValueOffset);
+ }
+
+ static HObjectAccess ForCellValue() {
+ return HObjectAccess(kInobject, Cell::kValueOffset);
+ }
+
+ static HObjectAccess ForAllocationMementoSite() {
+ return HObjectAccess(kInobject, AllocationMemento::kAllocationSiteOffset);
+ }
+
+ static HObjectAccess ForCounter() {
+ return HObjectAccess(kExternalMemory, 0, Representation::Integer32());
+ }
+
+ // Create an access to an offset in a fixed array header.
+ static HObjectAccess ForFixedArrayHeader(int offset);
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedFieldPolymorphic)
+ // Create an access to an in-object property in a JSObject.
+ static HObjectAccess ForJSObjectOffset(int offset,
+ Representation representation = Representation::Tagged());
- static const int kMaxLoadPolymorphism = 4;
+ // Create an access to an in-object property in a JSArray.
+ static HObjectAccess ForJSArrayOffset(int offset);
+
+ static HObjectAccess ForContextSlot(int index);
+
+ // Create an access to the backing store of an object.
+ static HObjectAccess ForBackingStoreOffset(int offset,
+ Representation representation = Representation::Tagged());
+
+ // Create an access to a resolved field (in-object or backing store).
+ static HObjectAccess ForField(Handle<Map> map,
+ LookupResult *lookup, Handle<String> name = Handle<String>::null());
+
+ // Create an access for the payload of a Cell or JSGlobalPropertyCell.
+ static HObjectAccess ForCellPayload(Isolate* isolate);
+
+ void PrintTo(StringStream* stream);
+
+ inline bool Equals(HObjectAccess that) const {
+ return value_ == that.value_; // portion and offset must match
+ }
protected:
- virtual bool DataEquals(HValue* value);
+ void SetGVNFlags(HValue *instr, bool is_store);
private:
- SmallMapList types_;
+ // internal use only; different parts of an object or array
+ enum Portion {
+ kMaps, // map of an object
+ kArrayLengths, // the length of an array
+ kStringLengths, // the length of a string
+ kElementsPointer, // elements pointer
+ kBackingStore, // some field in the backing store
+ kDouble, // some double field
+ kInobject, // some other in-object field
+ kExternalMemory // some field in external memory
+ };
+
+ HObjectAccess(Portion portion, int offset,
+ Representation representation = Representation::Tagged(),
+ Handle<String> name = Handle<String>::null())
+ : value_(PortionField::encode(portion) |
+ RepresentationField::encode(representation.kind()) |
+ OffsetField::encode(offset)),
+ name_(name) {
+ // assert that the fields decode correctly
+ ASSERT(this->offset() == offset);
+ ASSERT(this->portion() == portion);
+ ASSERT(RepresentationField::decode(value_) == representation.kind());
+ }
+
+ class PortionField : public BitField<Portion, 0, 3> {};
+ class RepresentationField : public BitField<Representation::Kind, 3, 3> {};
+ class OffsetField : public BitField<int, 6, 26> {};
+
+ uint32_t value_; // encodes portion, representation, and offset
Handle<String> name_;
- bool need_generic_;
-};
+ friend class HLoadNamedField;
+ friend class HStoreNamedField;
+
+ inline Portion portion() const {
+ return PortionField::decode(value_);
+ }
+};
-class HLoadNamedGeneric: public HTemplateInstruction<2> {
+class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> {
public:
- HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
- : name_(name) {
- SetOperandAt(0, context);
- SetOperandAt(1, object);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
+ DECLARE_INSTRUCTION_FACTORY_P2(HLoadNamedField, HValue*, HObjectAccess);
+
+ HValue* object() { return OperandAt(0); }
+ bool HasTypeCheck() { return object()->IsCheckMaps(); }
+ HObjectAccess access() const { return access_; }
+ Representation field_representation() const {
+ return access_.representation();
}
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual bool HasOutOfBoundsAccess(int size) V8_OVERRIDE {
+ return !access().IsInobject() || access().offset() >= size;
+ }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ if (index == 0 && access().IsExternalMemory()) {
+ // object must be external in case of external memory access
+ return Representation::External();
+ }
+ return Representation::Tagged();
+ }
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ HLoadNamedField* b = HLoadNamedField::cast(other);
+ return access_.Equals(b->access_);
+ }
+
+ private:
+ HLoadNamedField(HValue* object, HObjectAccess access) : access_(access) {
+ ASSERT(object != NULL);
+ SetOperandAt(0, object);
+
+ Representation representation = access.representation();
+ if (representation.IsByte()) {
+ set_representation(Representation::Integer32());
+ } else if (representation.IsSmi()) {
+ set_type(HType::Smi());
+ set_representation(representation);
+ } else if (representation.IsDouble() ||
+ representation.IsExternal() ||
+ representation.IsInteger32()) {
+ set_representation(representation);
+ } else if (FLAG_track_heap_object_fields &&
+ representation.IsHeapObject()) {
+ set_type(HType::NonPrimitive());
+ set_representation(Representation::Tagged());
+ } else {
+ set_representation(Representation::Tagged());
+ }
+ access.SetGVNFlags(this, false);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ HObjectAccess access_;
+};
+
+
+class HLoadNamedGeneric V8_FINAL : public HTemplateInstruction<2> {
+ public:
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadNamedGeneric, HValue*,
+ Handle<Object>);
+
HValue* context() { return OperandAt(0); }
HValue* object() { return OperandAt(1); }
Handle<Object> name() const { return name_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
private:
+ HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
+ : name_(name) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, object);
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
Handle<Object> name_;
};
-class HLoadFunctionPrototype: public HUnaryOperation {
+class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation {
public:
- explicit HLoadFunctionPrototype(HValue* function)
- : HUnaryOperation(function) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnCalls);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HLoadFunctionPrototype, HValue*);
HValue* function() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ explicit HLoadFunctionPrototype(HValue* function)
+ : HUnaryOperation(function) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnCalls);
+ }
};
class ArrayInstructionInterface {
@@ -4244,38 +6074,45 @@ class ArrayInstructionInterface {
virtual bool IsDehoisted() = 0;
virtual void SetDehoisted(bool is_dehoisted) = 0;
virtual ~ArrayInstructionInterface() { };
+
+ static Representation KeyedAccessIndexRequirement(Representation r) {
+ return r.IsInteger32() || SmiValuesAre32Bits()
+ ? Representation::Integer32() : Representation::Smi();
+ }
};
-class HLoadKeyedFastElement
+
+enum LoadKeyedHoleMode {
+ NEVER_RETURN_HOLE,
+ ALLOW_RETURN_HOLE
+};
+
+
+class HLoadKeyed V8_FINAL
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
- HLoadKeyedFastElement(HValue* obj,
- HValue* key,
- HValue* dependency,
- ElementsKind elements_kind = FAST_ELEMENTS)
- : bit_field_(0) {
- ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
- bit_field_ = ElementsKindField::encode(elements_kind);
- if (IsFastSmiElementsKind(elements_kind) &&
- IsFastPackedElementsKind(elements_kind)) {
- set_type(HType::Smi());
- }
- SetOperandAt(0, obj);
- SetOperandAt(1, key);
- SetOperandAt(2, dependency);
- set_representation(Representation::Tagged());
- SetGVNFlag(kDependsOnArrayElements);
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P4(HLoadKeyed, HValue*, HValue*, HValue*,
+ ElementsKind);
+ DECLARE_INSTRUCTION_FACTORY_P5(HLoadKeyed, HValue*, HValue*, HValue*,
+ ElementsKind, LoadKeyedHoleMode);
- HValue* object() { return OperandAt(0); }
+ bool is_external() const {
+ return IsExternalArrayElementsKind(elements_kind());
+ }
+ HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
- HValue* dependency() { return OperandAt(2); }
+ HValue* dependency() {
+ ASSERT(HasDependency());
+ return OperandAt(2);
+ }
+ bool HasDependency() const { return OperandAt(0) != OperandAt(2); }
uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
void SetIndexOffset(uint32_t index_offset) {
bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
}
- int MaxIndexOffsetBits() { return 25; }
+ virtual int MaxIndexOffsetBits() {
+ return kBitsForIndexOffset;
+ }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return IsDehoistedField::decode(bit_field_); }
@@ -4285,173 +6122,156 @@ class HLoadKeyedFastElement
ElementsKind elements_kind() const {
return ElementsKindField::decode(bit_field_);
}
+ LoadKeyedHoleMode hole_mode() const {
+ return HoleModeField::decode(bit_field_);
+ }
- virtual Representation RequiredInputRepresentation(int index) {
- // The key is supposed to be Integer32.
- if (index == 0) return Representation::Tagged();
- if (index == 1) return Representation::Integer32();
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ // kind_fast: tagged[int32] (none)
+ // kind_double: tagged[int32] (none)
+ // kind_external: external[int32] (none)
+ if (index == 0) {
+ return is_external() ? Representation::External()
+ : Representation::Tagged();
+ }
+ if (index == 1) {
+ return ArrayInstructionInterface::KeyedAccessIndexRequirement(
+ OperandAt(1)->representation());
+ }
return Representation::None();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ return RequiredInputRepresentation(index);
+ }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ bool UsesMustHandleHole() const;
+ bool AllUsesCanTreatHoleAsNaN() const;
bool RequiresHoleCheck() const;
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement)
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed)
protected:
- virtual bool DataEquals(HValue* other) {
- if (!other->IsLoadKeyedFastElement()) return false;
- HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other);
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ if (!other->IsLoadKeyed()) return false;
+ HLoadKeyed* other_load = HLoadKeyed::cast(other);
+
if (IsDehoisted() && index_offset() != other_load->index_offset())
return false;
return elements_kind() == other_load->elements_kind();
}
private:
- virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
-
- class ElementsKindField: public BitField<ElementsKind, 0, 4> {};
- class IndexOffsetField: public BitField<uint32_t, 4, 27> {};
- class IsDehoistedField: public BitField<bool, 31, 1> {};
- uint32_t bit_field_;
-};
-
-
-enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
-
+ HLoadKeyed(HValue* obj,
+ HValue* key,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ LoadKeyedHoleMode mode = NEVER_RETURN_HOLE)
+ : bit_field_(0) {
+ bit_field_ = ElementsKindField::encode(elements_kind) |
+ HoleModeField::encode(mode);
-class HLoadKeyedFastDoubleElement
- : public HTemplateInstruction<3>, public ArrayInstructionInterface {
- public:
- HLoadKeyedFastDoubleElement(
- HValue* elements,
- HValue* key,
- HValue* dependency,
- HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
- : index_offset_(0),
- is_dehoisted_(false),
- hole_check_mode_(hole_check_mode) {
- SetOperandAt(0, elements);
+ SetOperandAt(0, obj);
SetOperandAt(1, key);
- SetOperandAt(2, dependency);
- set_representation(Representation::Double());
- SetGVNFlag(kDependsOnDoubleArrayElements);
- SetFlag(kUseGVN);
- }
+ SetOperandAt(2, dependency != NULL ? dependency : obj);
+
+ if (!is_external()) {
+ // I can detect the case between storing double (holey and fast) and
+ // smi/object by looking at elements_kind_.
+ ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
+ IsFastDoubleElementsKind(elements_kind));
+
+ if (IsFastSmiOrObjectElementsKind(elements_kind)) {
+ if (IsFastSmiElementsKind(elements_kind) &&
+ (!IsHoleyElementsKind(elements_kind) ||
+ mode == NEVER_RETURN_HOLE)) {
+ set_type(HType::Smi());
+ set_representation(Representation::Smi());
+ } else {
+ set_representation(Representation::Tagged());
+ }
+
+ SetGVNFlag(kDependsOnArrayElements);
+ } else {
+ set_representation(Representation::Double());
+ SetGVNFlag(kDependsOnDoubleArrayElements);
+ }
+ } else {
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ set_representation(Representation::Double());
+ } else {
+ set_representation(Representation::Integer32());
+ }
- HValue* elements() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* dependency() { return OperandAt(2); }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- int MaxIndexOffsetBits() { return 25; }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
+ SetGVNFlag(kDependsOnExternalMemory);
+ // Native code could change the specialized array.
+ SetGVNFlag(kDependsOnCalls);
+ }
- virtual Representation RequiredInputRepresentation(int index) {
- // The key is supposed to be Integer32.
- if (index == 0) return Representation::Tagged();
- if (index == 1) return Representation::Integer32();
- return Representation::None();
+ SetFlag(kUseGVN);
}
- bool RequiresHoleCheck() const {
- return hole_check_mode_ == PERFORM_HOLE_CHECK;
+ virtual bool IsDeletable() const V8_OVERRIDE {
+ return !RequiresHoleCheck();
}
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
+ // Establish some checks around our packed fields
+ enum LoadKeyedBits {
+ kBitsForElementsKind = 5,
+ kBitsForHoleMode = 1,
+ kBitsForIndexOffset = 25,
+ kBitsForIsDehoisted = 1,
- protected:
- virtual bool DataEquals(HValue* other) {
- if (!other->IsLoadKeyedFastDoubleElement()) return false;
- HLoadKeyedFastDoubleElement* other_load =
- HLoadKeyedFastDoubleElement::cast(other);
- return hole_check_mode_ == other_load->hole_check_mode_;
- }
-
- private:
- virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
+ kStartElementsKind = 0,
+ kStartHoleMode = kStartElementsKind + kBitsForElementsKind,
+ kStartIndexOffset = kStartHoleMode + kBitsForHoleMode,
+ kStartIsDehoisted = kStartIndexOffset + kBitsForIndexOffset
+ };
- uint32_t index_offset_;
- bool is_dehoisted_;
- HoleCheckMode hole_check_mode_;
+ STATIC_ASSERT((kBitsForElementsKind + kBitsForIndexOffset +
+ kBitsForIsDehoisted) <= sizeof(uint32_t)*8);
+ STATIC_ASSERT(kElementsKindCount <= (1 << kBitsForElementsKind));
+ class ElementsKindField:
+ public BitField<ElementsKind, kStartElementsKind, kBitsForElementsKind>
+ {}; // NOLINT
+ class HoleModeField:
+ public BitField<LoadKeyedHoleMode, kStartHoleMode, kBitsForHoleMode>
+ {}; // NOLINT
+ class IndexOffsetField:
+ public BitField<uint32_t, kStartIndexOffset, kBitsForIndexOffset>
+ {}; // NOLINT
+ class IsDehoistedField:
+ public BitField<bool, kStartIsDehoisted, kBitsForIsDehoisted>
+ {}; // NOLINT
+ uint32_t bit_field_;
};
-class HLoadKeyedSpecializedArrayElement
- : public HTemplateInstruction<3>, public ArrayInstructionInterface {
+class HLoadKeyedGeneric V8_FINAL : public HTemplateInstruction<3> {
public:
- HLoadKeyedSpecializedArrayElement(HValue* external_elements,
- HValue* key,
- HValue* dependency,
- ElementsKind elements_kind)
- : elements_kind_(elements_kind),
- index_offset_(0),
- is_dehoisted_(false) {
- SetOperandAt(0, external_elements);
- SetOperandAt(1, key);
- SetOperandAt(2, dependency);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- set_representation(Representation::Double());
- } else {
- set_representation(Representation::Integer32());
- }
- SetGVNFlag(kDependsOnSpecializedArrayElements);
- // Native code could change the specialized array.
- SetGVNFlag(kDependsOnCalls);
- SetFlag(kUseGVN);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- // The key is supposed to be Integer32.
- if (index == 0) return Representation::External();
- if (index == 1) return Representation::Integer32();
- return Representation::None();
- }
-
- HValue* external_pointer() { return OperandAt(0); }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadKeyedGeneric, HValue*,
+ HValue*);
+ HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
- HValue* dependency() { return OperandAt(2); }
- ElementsKind elements_kind() const { return elements_kind_; }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- int MaxIndexOffsetBits() { return 25; }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
-
- virtual Range* InferRange(Zone* zone);
+ HValue* context() { return OperandAt(2); }
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement)
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- protected:
- virtual bool DataEquals(HValue* other) {
- if (!other->IsLoadKeyedSpecializedArrayElement()) return false;
- HLoadKeyedSpecializedArrayElement* cast_other =
- HLoadKeyedSpecializedArrayElement::cast(other);
- return elements_kind_ == cast_other->elements_kind();
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ // tagged[tagged]
+ return Representation::Tagged();
}
- private:
- virtual bool IsDeletable() const { return true; }
-
- ElementsKind elements_kind_;
- uint32_t index_offset_;
- bool is_dehoisted_;
-};
+ virtual HValue* Canonicalize() V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
-class HLoadKeyedGeneric: public HTemplateInstruction<3> {
- public:
+ private:
HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) {
set_representation(Representation::Tagged());
SetOperandAt(0, obj);
@@ -4459,278 +6279,325 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> {
SetOperandAt(2, context);
SetAllSideEffects();
}
-
- HValue* object() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* context() { return OperandAt(2); }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual HValue* Canonicalize();
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
};
-class HStoreNamedField: public HTemplateInstruction<2> {
+class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
public:
- HStoreNamedField(HValue* obj,
- Handle<String> name,
- HValue* val,
- bool in_object,
- int offset)
- : name_(name),
- is_in_object_(in_object),
- offset_(offset),
- new_space_dominator_(NULL) {
- SetOperandAt(0, obj);
- SetOperandAt(1, val);
- SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnNewSpacePromotion);
- if (is_in_object_) {
- SetGVNFlag(kChangesInobjectFields);
- } else {
- SetGVNFlag(kChangesBackingStoreFields);
- }
- }
+ DECLARE_INSTRUCTION_FACTORY_P3(HStoreNamedField, HValue*,
+ HObjectAccess, HValue*);
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE {
+ return index == 1;
+ }
+ virtual bool HasOutOfBoundsAccess(int size) V8_OVERRIDE {
+ return !access().IsInobject() || access().offset() >= size;
+ }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ if (index == 0 && access().IsExternalMemory()) {
+ // object must be external in case of external memory access
+ return Representation::External();
+ } else if (index == 1) {
+ if (field_representation().IsByte() ||
+ field_representation().IsInteger32()) {
+ return Representation::Integer32();
+ } else if (field_representation().IsDouble() ||
+ field_representation().IsSmi()) {
+ return field_representation();
+ }
+ }
return Representation::Tagged();
}
- virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
+ virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) V8_OVERRIDE {
ASSERT(side_effect == kChangesNewSpacePromotion);
new_space_dominator_ = dominator;
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- HValue* object() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
+ void SkipWriteBarrier() { write_barrier_mode_ = SKIP_WRITE_BARRIER; }
+ bool IsSkipWriteBarrier() const {
+ return write_barrier_mode_ == SKIP_WRITE_BARRIER;
+ }
- Handle<String> name() const { return name_; }
- bool is_in_object() const { return is_in_object_; }
- int offset() const { return offset_; }
- Handle<Map> transition() const { return transition_; }
- void set_transition(Handle<Map> map) { transition_ = map; }
+ HValue* object() const { return OperandAt(0); }
+ HValue* value() const { return OperandAt(1); }
+ HValue* transition() const { return OperandAt(2); }
+
+ HObjectAccess access() const { return access_; }
HValue* new_space_dominator() const { return new_space_dominator_; }
+ bool has_transition() const { return has_transition_; }
+
+ Handle<Map> transition_map() const {
+ if (has_transition()) {
+ return Handle<Map>::cast(
+ HConstant::cast(transition())->handle(Isolate::Current()));
+ } else {
+ return Handle<Map>();
+ }
+ }
+
+ void SetTransition(HConstant* map_constant, CompilationInfo* info) {
+ ASSERT(!has_transition()); // Only set once.
+ Handle<Map> map = Handle<Map>::cast(map_constant->handle(info->isolate()));
+ if (map->CanBeDeprecated()) {
+ map->AddDependentCompilationInfo(DependentCode::kTransitionGroup, info);
+ }
+ SetOperandAt(2, map_constant);
+ has_transition_ = true;
+ }
bool NeedsWriteBarrier() {
+ ASSERT(!(FLAG_track_double_fields && field_representation().IsDouble()) ||
+ !has_transition());
+ if (IsSkipWriteBarrier()) return false;
+ if (field_representation().IsDouble()) return false;
+ if (field_representation().IsSmi()) return false;
+ if (field_representation().IsInteger32()) return false;
+ if (field_representation().IsExternal()) return false;
return StoringValueNeedsWriteBarrier(value()) &&
ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
bool NeedsWriteBarrierForMap() {
+ if (IsSkipWriteBarrier()) return false;
return ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
+ Representation field_representation() const {
+ return access_.representation();
+ }
+
+ void UpdateValue(HValue* value) {
+ SetOperandAt(1, value);
+ }
+
private:
- Handle<String> name_;
- bool is_in_object_;
- int offset_;
- Handle<Map> transition_;
+ HStoreNamedField(HValue* obj,
+ HObjectAccess access,
+ HValue* val)
+ : access_(access),
+ new_space_dominator_(NULL),
+ write_barrier_mode_(UPDATE_WRITE_BARRIER),
+ has_transition_(false) {
+ SetOperandAt(0, obj);
+ SetOperandAt(1, val);
+ SetOperandAt(2, obj);
+ access.SetGVNFlags(this, true);
+ }
+
+ HObjectAccess access_;
HValue* new_space_dominator_;
+ WriteBarrierMode write_barrier_mode_ : 1;
+ bool has_transition_ : 1;
};
-class HStoreNamedGeneric: public HTemplateInstruction<3> {
+class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
public:
- HStoreNamedGeneric(HValue* context,
- HValue* object,
- Handle<String> name,
- HValue* value,
- StrictModeFlag strict_mode_flag)
- : name_(name),
- strict_mode_flag_(strict_mode_flag) {
- SetOperandAt(0, object);
- SetOperandAt(1, value);
- SetOperandAt(2, context);
- SetAllSideEffects();
- }
-
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreNamedGeneric, HValue*,
+ Handle<String>, HValue*,
+ StrictModeFlag);
HValue* object() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
HValue* context() { return OperandAt(2); }
Handle<String> name() { return name_; }
StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric)
private:
+ HStoreNamedGeneric(HValue* context,
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ StrictModeFlag strict_mode_flag)
+ : name_(name),
+ strict_mode_flag_(strict_mode_flag) {
+ SetOperandAt(0, object);
+ SetOperandAt(1, value);
+ SetOperandAt(2, context);
+ SetAllSideEffects();
+ }
+
Handle<String> name_;
StrictModeFlag strict_mode_flag_;
};
-class HStoreKeyedFastElement
+class HStoreKeyed V8_FINAL
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
- HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
- ElementsKind elements_kind = FAST_ELEMENTS)
- : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
- SetOperandAt(0, obj);
- SetOperandAt(1, key);
- SetOperandAt(2, val);
- SetGVNFlag(kChangesArrayElements);
+ DECLARE_INSTRUCTION_FACTORY_P4(HStoreKeyed, HValue*, HValue*, HValue*,
+ ElementsKind);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ // kind_fast: tagged[int32] = tagged
+ // kind_double: tagged[int32] = double
+ // kind_smi : tagged[int32] = smi
+ // kind_external: external[int32] = (double | int32)
+ if (index == 0) {
+ return is_external() ? Representation::External()
+ : Representation::Tagged();
+ } else if (index == 1) {
+ return ArrayInstructionInterface::KeyedAccessIndexRequirement(
+ OperandAt(1)->representation());
+ }
+
+ ASSERT_EQ(index, 2);
+ if (IsDoubleOrFloatElementsKind(elements_kind())) {
+ return Representation::Double();
+ }
+
+ if (IsFastSmiElementsKind(elements_kind())) {
+ return Representation::Smi();
+ }
+
+ return is_external() ? Representation::Integer32()
+ : Representation::Tagged();
}
- virtual Representation RequiredInputRepresentation(int index) {
- // The key is supposed to be Integer32.
- return index == 1
- ? Representation::Integer32()
- : Representation::Tagged();
+ bool is_external() const {
+ return IsExternalArrayElementsKind(elements_kind());
}
- HValue* object() { return OperandAt(0); }
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ if (index < 2) return RequiredInputRepresentation(index);
+ if (IsUninitialized()) {
+ return Representation::None();
+ }
+ if (IsFastSmiElementsKind(elements_kind())) {
+ return Representation::Smi();
+ }
+ if (IsDoubleOrFloatElementsKind(elements_kind())) {
+ return Representation::Double();
+ }
+ if (is_external()) {
+ return Representation::Integer32();
+ }
+ // For fast object elements kinds, don't assume anything.
+ return Representation::None();
+ }
+
+ HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
- bool value_is_smi() {
+ bool value_is_smi() const {
return IsFastSmiElementsKind(elements_kind_);
}
+ ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- int MaxIndexOffsetBits() { return 25; }
+ virtual int MaxIndexOffsetBits() {
+ return 31 - ElementsKindToShiftSize(elements_kind_);
+ }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
+ bool IsUninitialized() { return is_uninitialized_; }
+ void SetUninitialized(bool is_uninitialized) {
+ is_uninitialized_ = is_uninitialized;
+ }
+
+ bool IsConstantHoleStore() {
+ return value()->IsConstant() && HConstant::cast(value())->IsTheHole();
+ }
+
+ virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) V8_OVERRIDE {
+ ASSERT(side_effect == kChangesNewSpacePromotion);
+ new_space_dominator_ = dominator;
+ }
+
+ HValue* new_space_dominator() const { return new_space_dominator_; }
bool NeedsWriteBarrier() {
if (value_is_smi()) {
return false;
} else {
- return StoringValueNeedsWriteBarrier(value());
+ return StoringValueNeedsWriteBarrier(value()) &&
+ ReceiverObjectNeedsWriteBarrier(elements(), new_space_dominator());
}
}
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement)
+ bool NeedsCanonicalization();
- private:
- ElementsKind elements_kind_;
- uint32_t index_offset_;
- bool is_dehoisted_;
-};
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed)
-class HStoreKeyedFastDoubleElement
- : public HTemplateInstruction<3>, public ArrayInstructionInterface {
- public:
- HStoreKeyedFastDoubleElement(HValue* elements,
- HValue* key,
- HValue* val)
- : index_offset_(0), is_dehoisted_(false) {
- SetOperandAt(0, elements);
+ private:
+ HStoreKeyed(HValue* obj, HValue* key, HValue* val,
+ ElementsKind elements_kind)
+ : elements_kind_(elements_kind),
+ index_offset_(0),
+ is_dehoisted_(false),
+ is_uninitialized_(false),
+ new_space_dominator_(NULL) {
+ SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
- SetFlag(kDeoptimizeOnUndefined);
- SetGVNFlag(kChangesDoubleArrayElements);
- }
- virtual Representation RequiredInputRepresentation(int index) {
- if (index == 1) {
- return Representation::Integer32();
- } else if (index == 2) {
- return Representation::Double();
+ if (IsFastObjectElementsKind(elements_kind)) {
+ SetFlag(kTrackSideEffectDominators);
+ SetGVNFlag(kDependsOnNewSpacePromotion);
+ }
+ if (is_external()) {
+ SetGVNFlag(kChangesExternalMemory);
+ SetFlag(kAllowUndefinedAsNaN);
+ } else if (IsFastDoubleElementsKind(elements_kind)) {
+ SetGVNFlag(kChangesDoubleArrayElements);
+ } else if (IsFastSmiElementsKind(elements_kind)) {
+ SetGVNFlag(kChangesArrayElements);
} else {
- return Representation::Tagged();
+ SetGVNFlag(kChangesArrayElements);
}
- }
-
- HValue* elements() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- int MaxIndexOffsetBits() { return 25; }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
- bool NeedsWriteBarrier() {
- return StoringValueNeedsWriteBarrier(value());
+ // EXTERNAL_{UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
+ if (elements_kind >= EXTERNAL_BYTE_ELEMENTS &&
+ elements_kind <= EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ SetFlag(kTruncatingToInt32);
+ }
}
- bool NeedsCanonicalization();
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement)
-
- private:
+ ElementsKind elements_kind_;
uint32_t index_offset_;
- bool is_dehoisted_;
+ bool is_dehoisted_ : 1;
+ bool is_uninitialized_ : 1;
+ HValue* new_space_dominator_;
};
-class HStoreKeyedSpecializedArrayElement
- : public HTemplateInstruction<3>, public ArrayInstructionInterface {
+class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
public:
- HStoreKeyedSpecializedArrayElement(HValue* external_elements,
- HValue* key,
- HValue* val,
- ElementsKind elements_kind)
- : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
- SetGVNFlag(kChangesSpecializedArrayElements);
- SetOperandAt(0, external_elements);
- SetOperandAt(1, key);
- SetOperandAt(2, val);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreKeyedGeneric, HValue*,
+ HValue*, HValue*, StrictModeFlag);
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- if (index == 0) {
- return Representation::External();
- } else {
- bool float_or_double_elements =
- elements_kind() == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind() == EXTERNAL_DOUBLE_ELEMENTS;
- if (index == 2 && float_or_double_elements) {
- return Representation::Double();
- } else {
- return Representation::Integer32();
- }
- }
- }
-
- HValue* external_pointer() { return OperandAt(0); }
+ HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
- ElementsKind elements_kind() const { return elements_kind_; }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- int MaxIndexOffsetBits() {
- return 31 - ElementsKindToShiftSize(elements_kind_);
- }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
+ HValue* context() { return OperandAt(3); }
+ StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement)
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ // tagged[tagged] = tagged
+ return Representation::Tagged();
+ }
- private:
- ElementsKind elements_kind_;
- uint32_t index_offset_;
- bool is_dehoisted_;
-};
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
-class HStoreKeyedGeneric: public HTemplateInstruction<4> {
- public:
+ private:
HStoreKeyedGeneric(HValue* context,
HValue* object,
HValue* key,
@@ -4744,111 +6611,123 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
SetAllSideEffects();
}
- HValue* object() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
- HValue* context() { return OperandAt(3); }
- StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
+ StrictModeFlag strict_mode_flag_;
+};
- virtual Representation RequiredInputRepresentation(int index) {
+
+class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
+ public:
+ inline static HTransitionElementsKind* New(Zone* zone,
+ HValue* context,
+ HValue* object,
+ Handle<Map> original_map,
+ Handle<Map> transitioned_map) {
+ return new(zone) HTransitionElementsKind(context, object,
+ original_map, transitioned_map);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ HValue* object() { return OperandAt(0); }
+ HValue* context() { return OperandAt(1); }
+ Unique<Map> original_map() { return original_map_; }
+ Unique<Map> transitioned_map() { return transitioned_map_; }
+ ElementsKind from_kind() { return from_kind_; }
+ ElementsKind to_kind() { return to_kind_; }
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- private:
- StrictModeFlag strict_mode_flag_;
-};
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
+ return original_map_ == instr->original_map_ &&
+ transitioned_map_ == instr->transitioned_map_;
+ }
-class HTransitionElementsKind: public HTemplateInstruction<1> {
- public:
- HTransitionElementsKind(HValue* object,
+ private:
+ HTransitionElementsKind(HValue* context,
+ HValue* object,
Handle<Map> original_map,
Handle<Map> transitioned_map)
- : original_map_(original_map),
- transitioned_map_(transitioned_map) {
+ : original_map_(Unique<Map>(original_map)),
+ transitioned_map_(Unique<Map>(transitioned_map)),
+ from_kind_(original_map->elements_kind()),
+ to_kind_(transitioned_map->elements_kind()) {
SetOperandAt(0, object);
+ SetOperandAt(1, context);
SetFlag(kUseGVN);
SetGVNFlag(kChangesElementsKind);
- if (original_map->has_fast_double_elements()) {
- SetGVNFlag(kChangesElementsPointer);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
- if (transitioned_map->has_fast_double_elements()) {
+ if (!IsSimpleMapChangeTransition(from_kind_, to_kind_)) {
SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kChangesNewSpacePromotion);
}
set_representation(Representation::Tagged());
}
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
+ Unique<Map> original_map_;
+ Unique<Map> transitioned_map_;
+ ElementsKind from_kind_;
+ ElementsKind to_kind_;
+};
- HValue* object() { return OperandAt(0); }
- Handle<Map> original_map() { return original_map_; }
- Handle<Map> transitioned_map() { return transitioned_map_; }
- virtual void PrintDataTo(StringStream* stream);
+class HStringAdd V8_FINAL : public HBinaryOperation {
+ public:
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right,
+ StringAddFlags flags = STRING_ADD_CHECK_NONE);
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
+ StringAddFlags flags() const { return flags_; }
- protected:
- virtual bool DataEquals(HValue* other) {
- HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
- return original_map_.is_identical_to(instr->original_map()) &&
- transitioned_map_.is_identical_to(instr->transitioned_map());
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Tagged();
}
- private:
- Handle<Map> original_map_;
- Handle<Map> transitioned_map_;
-};
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd)
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
-class HStringAdd: public HBinaryOperation {
- public:
- HStringAdd(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right) {
+ private:
+ HStringAdd(HValue* context, HValue* left, HValue* right, StringAddFlags flags)
+ : HBinaryOperation(context, left, right, HType::String()), flags_(flags) {
set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kChangesNewSpacePromotion);
+ if (MightHaveSideEffects()) {
+ SetAllSideEffects();
+ } else {
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
}
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
+ bool MightHaveSideEffects() const {
+ return flags_ != STRING_ADD_CHECK_NONE &&
+ (left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved());
}
- virtual HType CalculateInferredType() {
- return HType::String();
+ // No side-effects except possible allocation:
+ // NOTE: this instruction does not call ToString() on its inputs, when flags_
+ // is set to STRING_ADD_CHECK_NONE.
+ virtual bool IsDeletable() const V8_OVERRIDE {
+ return !MightHaveSideEffects();
}
- DECLARE_CONCRETE_INSTRUCTION(StringAdd)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
- // private:
- // virtual bool IsDeletable() const { return true; }
+ const StringAddFlags flags_;
};
-class HStringCharCodeAt: public HTemplateInstruction<3> {
+class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
public:
- HStringCharCodeAt(HValue* context, HValue* string, HValue* index) {
- SetOperandAt(0, context);
- SetOperandAt(1, string);
- SetOperandAt(2, index);
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HStringCharCodeAt,
+ HValue*,
+ HValue*);
virtual Representation RequiredInputRepresentation(int index) {
// The index is supposed to be Integer32.
@@ -4857,249 +6736,119 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
: Representation::Tagged();
}
- HValue* context() { return OperandAt(0); }
- HValue* string() { return OperandAt(1); }
- HValue* index() { return OperandAt(2); }
+ HValue* context() const { return OperandAt(0); }
+ HValue* string() const { return OperandAt(1); }
+ HValue* index() const { return OperandAt(2); }
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone) {
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE {
return new(zone) Range(0, String::kMaxUtf16CodeUnit);
}
- // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
- // private:
- // virtual bool IsDeletable() const { return true; }
-};
-
-
-class HStringCharFromCode: public HTemplateInstruction<2> {
- public:
- HStringCharFromCode(HValue* context, HValue* char_code) {
+ private:
+ HStringCharCodeAt(HValue* context, HValue* string, HValue* index) {
SetOperandAt(0, context);
- SetOperandAt(1, char_code);
- set_representation(Representation::Tagged());
+ SetOperandAt(1, string);
+ SetOperandAt(2, index);
+ set_representation(Representation::Integer32());
SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kChangesNewSpacePromotion);
}
- virtual Representation RequiredInputRepresentation(int index) {
- return index == 0
- ? Representation::Tagged()
- : Representation::Integer32();
- }
- virtual HType CalculateInferredType();
-
- HValue* context() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
-
- virtual bool DataEquals(HValue* other) { return true; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
-
- // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
- // private:
- // virtual bool IsDeletable() const { return true; }
+ // No side effects: runtime function assumes string + number inputs.
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HStringLength: public HUnaryOperation {
+class HStringCharFromCode V8_FINAL : public HTemplateInstruction<2> {
public:
- explicit HStringLength(HValue* string) : HUnaryOperation(string) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* char_code);
- virtual HType CalculateInferredType() {
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- return HType::Smi();
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return index == 0
+ ? Representation::Tagged()
+ : Representation::Integer32();
}
- DECLARE_CONCRETE_INSTRUCTION(StringLength)
+ HValue* context() const { return OperandAt(0); }
+ HValue* value() const { return OperandAt(1); }
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone) {
- return new(zone) Range(0, String::kMaxLength);
- }
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HAllocateObject: public HTemplateInstruction<1> {
- public:
- HAllocateObject(HValue* context, Handle<JSFunction> constructor)
- : constructor_(constructor) {
+ HStringCharFromCode(HValue* context, HValue* char_code)
+ : HTemplateInstruction<2>(HType::String()) {
SetOperandAt(0, context);
+ SetOperandAt(1, char_code);
set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
SetGVNFlag(kChangesNewSpacePromotion);
}
- // Maximum instance size for which allocations will be inlined.
- static const int kMaxSize = 64 * kPointerSize;
-
- HValue* context() { return OperandAt(0); }
- Handle<JSFunction> constructor() { return constructor_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
+ virtual bool IsDeletable() const V8_OVERRIDE {
+ return !value()->ToNumberCanBeObserved();
}
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
-
- private:
- // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
- // virtual bool IsDeletable() const { return true; }
-
- Handle<JSFunction> constructor_;
};
template <int V>
-class HMaterializedLiteral: public HTemplateInstruction<V> {
+class HMaterializedLiteral : public HTemplateInstruction<V> {
public:
+ HMaterializedLiteral<V>(int index, int depth, AllocationSiteMode mode)
+ : literal_index_(index), depth_(depth), allocation_site_mode_(mode) {
+ this->set_representation(Representation::Tagged());
+ }
+
HMaterializedLiteral<V>(int index, int depth)
- : literal_index_(index), depth_(depth) {
+ : literal_index_(index), depth_(depth),
+ allocation_site_mode_(DONT_TRACK_ALLOCATION_SITE) {
this->set_representation(Representation::Tagged());
}
int literal_index() const { return literal_index_; }
int depth() const { return depth_; }
+ AllocationSiteMode allocation_site_mode() const {
+ return allocation_site_mode_;
+ }
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return true; }
int literal_index_;
int depth_;
+ AllocationSiteMode allocation_site_mode_;
};
-class HFastLiteral: public HMaterializedLiteral<1> {
+class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> {
public:
- HFastLiteral(HValue* context,
- Handle<JSObject> boilerplate,
- int total_size,
- int literal_index,
- int depth)
- : HMaterializedLiteral<1>(literal_index, depth),
- boilerplate_(boilerplate),
- total_size_(total_size) {
- SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- // Maximum depth and total number of elements and properties for literal
- // graphs to be considered for fast deep-copying.
- static const int kMaxLiteralDepth = 3;
- static const int kMaxLiteralProperties = 8;
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HRegExpLiteral,
+ Handle<FixedArray>,
+ Handle<String>,
+ Handle<String>,
+ int);
HValue* context() { return OperandAt(0); }
- Handle<JSObject> boilerplate() const { return boilerplate_; }
- int total_size() const { return total_size_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral)
-
- private:
- Handle<JSObject> boilerplate_;
- int total_size_;
-};
-
-
-class HArrayLiteral: public HMaterializedLiteral<1> {
- public:
- HArrayLiteral(HValue* context,
- Handle<HeapObject> boilerplate_object,
- int length,
- int literal_index,
- int depth)
- : HMaterializedLiteral<1>(literal_index, depth),
- length_(length),
- boilerplate_object_(boilerplate_object) {
- SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- HValue* context() { return OperandAt(0); }
- ElementsKind boilerplate_elements_kind() const {
- if (!boilerplate_object_->IsJSObject()) {
- return TERMINAL_FAST_ELEMENTS_KIND;
- }
- return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
- }
- Handle<HeapObject> boilerplate_object() const { return boilerplate_object_; }
- int length() const { return length_; }
-
- bool IsCopyOnWrite() const;
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral)
-
- private:
- int length_;
- Handle<HeapObject> boilerplate_object_;
-};
-
-
-class HObjectLiteral: public HMaterializedLiteral<1> {
- public:
- HObjectLiteral(HValue* context,
- Handle<FixedArray> constant_properties,
- bool fast_elements,
- int literal_index,
- int depth,
- bool has_function)
- : HMaterializedLiteral<1>(literal_index, depth),
- constant_properties_(constant_properties),
- fast_elements_(fast_elements),
- has_function_(has_function) {
- SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- HValue* context() { return OperandAt(0); }
- Handle<FixedArray> constant_properties() const {
- return constant_properties_;
- }
- bool fast_elements() const { return fast_elements_; }
- bool has_function() const { return has_function_; }
+ Handle<FixedArray> literals() { return literals_; }
+ Handle<String> pattern() { return pattern_; }
+ Handle<String> flags() { return flags_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral)
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
private:
- Handle<FixedArray> constant_properties_;
- bool fast_elements_;
- bool has_function_;
-};
-
-
-class HRegExpLiteral: public HMaterializedLiteral<1> {
- public:
HRegExpLiteral(HValue* context,
Handle<FixedArray> literals,
Handle<String> pattern,
@@ -5111,209 +6860,217 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
flags_(flags) {
SetOperandAt(0, context);
SetAllSideEffects();
+ set_type(HType::JSObject());
}
- HValue* context() { return OperandAt(0); }
- Handle<FixedArray> literals() { return literals_; }
- Handle<String> pattern() { return pattern_; }
- Handle<String> flags() { return flags_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
-
- private:
Handle<FixedArray> literals_;
Handle<String> pattern_;
Handle<String> flags_;
};
-class HFunctionLiteral: public HTemplateInstruction<1> {
+class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
public:
- HFunctionLiteral(HValue* context,
- Handle<SharedFunctionInfo> shared,
- bool pretenure)
- : shared_info_(shared), pretenure_(pretenure) {
- SetOperandAt(0, context);
- set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HFunctionLiteral,
+ Handle<SharedFunctionInfo>,
+ bool);
HValue* context() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral)
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
bool pretenure() const { return pretenure_; }
+ bool has_no_literals() const { return has_no_literals_; }
+ bool is_generator() const { return is_generator_; }
+ LanguageMode language_mode() const { return language_mode_; }
private:
- virtual bool IsDeletable() const { return true; }
+ HFunctionLiteral(HValue* context,
+ Handle<SharedFunctionInfo> shared,
+ bool pretenure)
+ : HTemplateInstruction<1>(HType::JSObject()),
+ shared_info_(shared),
+ pretenure_(pretenure),
+ has_no_literals_(shared->num_literals() == 0),
+ is_generator_(shared->is_generator()),
+ language_mode_(shared->language_mode()) {
+ SetOperandAt(0, context);
+ set_representation(Representation::Tagged());
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
Handle<SharedFunctionInfo> shared_info_;
- bool pretenure_;
+ bool pretenure_ : 1;
+ bool has_no_literals_ : 1;
+ bool is_generator_ : 1;
+ LanguageMode language_mode_;
};
-class HTypeof: public HTemplateInstruction<2> {
+class HTypeof V8_FINAL : public HTemplateInstruction<2> {
public:
- explicit HTypeof(HValue* context, HValue* value) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- set_representation(Representation::Tagged());
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HTypeof, HValue*);
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
- virtual HValue* Canonicalize();
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(Typeof)
private:
- virtual bool IsDeletable() const { return true; }
+ explicit HTypeof(HValue* context, HValue* value) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, value);
+ set_representation(Representation::Tagged());
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HToFastProperties: public HUnaryOperation {
+class HTrapAllocationMemento V8_FINAL : public HTemplateInstruction<1> {
public:
- explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
- // This instruction is not marked as having side effects, but
- // changes the map of the input operand. Use it only when creating
- // object literals.
- ASSERT(value->IsObjectLiteral() || value->IsFastLiteral());
- set_representation(Representation::Tagged());
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HTrapAllocationMemento, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties)
+ HValue* object() { return OperandAt(0); }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento)
private:
- virtual bool IsDeletable() const { return true; }
+ explicit HTrapAllocationMemento(HValue* obj) {
+ SetOperandAt(0, obj);
+ }
};
-class HValueOf: public HUnaryOperation {
+class HToFastProperties V8_FINAL : public HUnaryOperation {
public:
- explicit HValueOf(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HToFastProperties, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(ValueOf)
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties)
private:
- virtual bool IsDeletable() const { return true; }
+ explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Tagged());
+ SetGVNFlag(kChangesNewSpacePromotion);
+
+ // This instruction is not marked as kChangesMaps, but does
+ // change the map of the input operand. Use it only when creating
+ // object literals via a runtime call.
+ ASSERT(value->IsCallRuntime());
+#ifdef DEBUG
+ const Runtime::Function* function = HCallRuntime::cast(value)->function();
+ ASSERT(function->function_id == Runtime::kCreateObjectLiteral);
+#endif
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HDateField: public HUnaryOperation {
+class HValueOf V8_FINAL : public HUnaryOperation {
public:
- HDateField(HValue* date, Smi* index)
- : HUnaryOperation(date), index_(index) {
- set_representation(Representation::Tagged());
- }
-
- Smi* index() const { return index_; }
+ DECLARE_INSTRUCTION_FACTORY_P1(HValueOf, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(DateField)
+ DECLARE_CONCRETE_INSTRUCTION(ValueOf)
private:
- Smi* index_;
+ explicit HValueOf(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Tagged());
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HDeleteProperty: public HBinaryOperation {
+class HDateField V8_FINAL : public HUnaryOperation {
public:
- HDeleteProperty(HValue* context, HValue* obj, HValue* key)
- : HBinaryOperation(context, obj, key) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(HDateField, HValue*, Smi*);
- virtual Representation RequiredInputRepresentation(int index) {
+ Smi* index() const { return index_; }
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual HType CalculateInferredType();
+ DECLARE_CONCRETE_INSTRUCTION(DateField)
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty)
+ private:
+ HDateField(HValue* date, Smi* index)
+ : HUnaryOperation(date), index_(index) {
+ set_representation(Representation::Tagged());
+ }
- HValue* object() { return left(); }
- HValue* key() { return right(); }
+ Smi* index_;
};
-class HIn: public HTemplateInstruction<3> {
+class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<3> {
public:
- HIn(HValue* context, HValue* key, HValue* object) {
- SetOperandAt(0, context);
- SetOperandAt(1, key);
- SetOperandAt(2, object);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_FACTORY_P4(HSeqStringSetChar, String::Encoding,
+ HValue*, HValue*, HValue*);
- HValue* context() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* object() { return OperandAt(2); }
+ String::Encoding encoding() { return encoding_; }
+ HValue* string() { return OperandAt(0); }
+ HValue* index() { return OperandAt(1); }
+ HValue* value() { return OperandAt(2); }
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return (index == 0) ? Representation::Tagged()
+ : Representation::Integer32();
}
- virtual HType CalculateInferredType() {
- return HType::Boolean();
- }
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar)
- virtual void PrintDataTo(StringStream* stream);
+ private:
+ HSeqStringSetChar(String::Encoding encoding,
+ HValue* string,
+ HValue* index,
+ HValue* value) : encoding_(encoding) {
+ SetOperandAt(0, string);
+ SetOperandAt(1, index);
+ SetOperandAt(2, value);
+ set_representation(Representation::Tagged());
+ }
- DECLARE_CONCRETE_INSTRUCTION(In)
+ String::Encoding encoding_;
};
-class HCheckMapValue: public HTemplateInstruction<2> {
+class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
public:
- HCheckMapValue(HValue* value,
- HValue* map) {
- SetOperandAt(0, value);
- SetOperandAt(1, map);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(HCheckMapValue, HValue*, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HType CalculateInferredType() {
+ virtual HType CalculateInferredType() V8_OVERRIDE {
return HType::Tagged();
}
@@ -5323,50 +7080,58 @@ class HCheckMapValue: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
return true;
}
+
+ private:
+ HCheckMapValue(HValue* value,
+ HValue* map) {
+ SetOperandAt(0, value);
+ SetOperandAt(1, map);
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnElementsKind);
+ }
};
-class HForInPrepareMap : public HTemplateInstruction<2> {
+class HForInPrepareMap V8_FINAL : public HTemplateInstruction<2> {
public:
- HForInPrepareMap(HValue* context,
- HValue* object) {
- SetOperandAt(0, context);
- SetOperandAt(1, object);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HForInPrepareMap, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
HValue* context() { return OperandAt(0); }
HValue* enumerable() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HType CalculateInferredType() {
+ virtual HType CalculateInferredType() V8_OVERRIDE {
return HType::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap);
+
+ private:
+ HForInPrepareMap(HValue* context,
+ HValue* object) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, object);
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
};
-class HForInCacheArray : public HTemplateInstruction<2> {
+class HForInCacheArray V8_FINAL : public HTemplateInstruction<2> {
public:
- HForInCacheArray(HValue* enumerable,
- HValue* keys,
- int idx) : idx_(idx) {
- SetOperandAt(0, enumerable);
- SetOperandAt(1, keys);
- set_representation(Representation::Tagged());
- }
+ DECLARE_INSTRUCTION_FACTORY_P3(HForInCacheArray, HValue*, HValue*, int);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -5382,21 +7147,29 @@ class HForInCacheArray : public HTemplateInstruction<2> {
index_cache_ = index_cache;
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HType CalculateInferredType() {
+ virtual HType CalculateInferredType() V8_OVERRIDE {
return HType::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray);
private:
+ HForInCacheArray(HValue* enumerable,
+ HValue* keys,
+ int idx) : idx_(idx) {
+ SetOperandAt(0, enumerable);
+ SetOperandAt(1, keys);
+ set_representation(Representation::Tagged());
+ }
+
int idx_;
HForInCacheArray* index_cache_;
};
-class HLoadFieldByIndex : public HTemplateInstruction<2> {
+class HLoadFieldByIndex V8_FINAL : public HTemplateInstruction<2> {
public:
HLoadFieldByIndex(HValue* object,
HValue* index) {
@@ -5405,23 +7178,23 @@ class HLoadFieldByIndex : public HTemplateInstruction<2> {
set_representation(Representation::Tagged());
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
HValue* object() { return OperandAt(0); }
HValue* index() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HType CalculateInferredType() {
+ virtual HType CalculateInferredType() V8_OVERRIDE {
return HType::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex);
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
diff --git a/deps/v8/src/hydrogen-load-elimination.cc b/deps/v8/src/hydrogen-load-elimination.cc
new file mode 100644
index 000000000..3337188f9
--- /dev/null
+++ b/deps/v8/src/hydrogen-load-elimination.cc
@@ -0,0 +1,510 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-alias-analysis.h"
+#include "hydrogen-load-elimination.h"
+#include "hydrogen-instructions.h"
+#include "hydrogen-flow-engine.h"
+
+namespace v8 {
+namespace internal {
+
+#define GLOBAL true
+#define TRACE(x) if (FLAG_trace_load_elimination) PrintF x
+
+static const int kMaxTrackedFields = 16;
+static const int kMaxTrackedObjects = 5;
+
+// An element in the field approximation list.
+class HFieldApproximation : public ZoneObject {
+ public: // Just a data blob.
+ HValue* object_;
+ HLoadNamedField* last_load_;
+ HValue* last_value_;
+ HFieldApproximation* next_;
+
+ // Recursively copy the entire linked list of field approximations.
+ HFieldApproximation* Copy(Zone* zone) {
+ if (this == NULL) return NULL;
+ HFieldApproximation* copy = new(zone) HFieldApproximation();
+ copy->object_ = this->object_;
+ copy->last_load_ = this->last_load_;
+ copy->last_value_ = this->last_value_;
+ copy->next_ = this->next_->Copy(zone);
+ return copy;
+ }
+};
+
+
+// The main datastructure used during load/store elimination. Each in-object
+// field is tracked separately. For each field, store a list of known field
+// values for known objects.
+class HLoadEliminationTable : public ZoneObject {
+ public:
+ HLoadEliminationTable(Zone* zone, HAliasAnalyzer* aliasing)
+ : zone_(zone), fields_(kMaxTrackedFields, zone), aliasing_(aliasing) { }
+
+ // The main processing of instructions.
+ HLoadEliminationTable* Process(HInstruction* instr, Zone* zone) {
+ switch (instr->opcode()) {
+ case HValue::kLoadNamedField: {
+ HLoadNamedField* l = HLoadNamedField::cast(instr);
+ TRACE((" process L%d field %d (o%d)\n",
+ instr->id(),
+ FieldOf(l->access()),
+ l->object()->ActualValue()->id()));
+ HValue* result = load(l);
+ if (result != instr) {
+ // The load can be replaced with a previous load or a value.
+ TRACE((" replace L%d -> v%d\n", instr->id(), result->id()));
+ instr->DeleteAndReplaceWith(result);
+ }
+ break;
+ }
+ case HValue::kStoreNamedField: {
+ HStoreNamedField* s = HStoreNamedField::cast(instr);
+ TRACE((" process S%d field %d (o%d) = v%d\n",
+ instr->id(),
+ FieldOf(s->access()),
+ s->object()->ActualValue()->id(),
+ s->value()->id()));
+ HValue* result = store(s);
+ if (result == NULL) {
+ // The store is redundant. Remove it.
+ TRACE((" remove S%d\n", instr->id()));
+ instr->DeleteAndReplaceWith(NULL);
+ }
+ break;
+ }
+ default: {
+ if (instr->CheckGVNFlag(kChangesInobjectFields)) {
+ TRACE((" kill-all i%d\n", instr->id()));
+ Kill();
+ break;
+ }
+ if (instr->CheckGVNFlag(kChangesMaps)) {
+ TRACE((" kill-maps i%d\n", instr->id()));
+ KillOffset(JSObject::kMapOffset);
+ }
+ if (instr->CheckGVNFlag(kChangesElementsKind)) {
+ TRACE((" kill-elements-kind i%d\n", instr->id()));
+ KillOffset(JSObject::kMapOffset);
+ KillOffset(JSObject::kElementsOffset);
+ }
+ if (instr->CheckGVNFlag(kChangesElementsPointer)) {
+ TRACE((" kill-elements i%d\n", instr->id()));
+ KillOffset(JSObject::kElementsOffset);
+ }
+ if (instr->CheckGVNFlag(kChangesOsrEntries)) {
+ TRACE((" kill-osr i%d\n", instr->id()));
+ Kill();
+ }
+ }
+ // Improvements possible:
+ // - learn from HCheckMaps for field 0
+ // - remove unobservable stores (write-after-write)
+ // - track cells
+ // - track globals
+ // - track roots
+ }
+ return this;
+ }
+
+ // Support for global analysis with HFlowEngine: Copy state to sucessor block.
+ HLoadEliminationTable* Copy(HBasicBlock* succ, Zone* zone) {
+ HLoadEliminationTable* copy =
+ new(zone) HLoadEliminationTable(zone, aliasing_);
+ copy->EnsureFields(fields_.length());
+ for (int i = 0; i < fields_.length(); i++) {
+ copy->fields_[i] = fields_[i]->Copy(zone);
+ }
+ if (FLAG_trace_load_elimination) {
+ TRACE((" copy-to B%d\n", succ->block_id()));
+ copy->Print();
+ }
+ return copy;
+ }
+
+ // Support for global analysis with HFlowEngine: Merge this state with
+ // the other incoming state.
+ HLoadEliminationTable* Merge(HBasicBlock* succ,
+ HLoadEliminationTable* that, Zone* zone) {
+ if (that->fields_.length() < fields_.length()) {
+ // Drop fields not in the other table.
+ fields_.Rewind(that->fields_.length());
+ }
+ for (int i = 0; i < fields_.length(); i++) {
+ // Merge the field approximations for like fields.
+ HFieldApproximation* approx = fields_[i];
+ HFieldApproximation* prev = NULL;
+ while (approx != NULL) {
+ // TODO(titzer): Merging is O(N * M); sort?
+ HFieldApproximation* other = that->Find(approx->object_, i);
+ if (other == NULL || !Equal(approx->last_value_, other->last_value_)) {
+ // Kill an entry that doesn't agree with the other value.
+ if (prev != NULL) {
+ prev->next_ = approx->next_;
+ } else {
+ fields_[i] = approx->next_;
+ }
+ approx = approx->next_;
+ continue;
+ }
+ prev = approx;
+ approx = approx->next_;
+ }
+ }
+ return this;
+ }
+
+ friend class HLoadEliminationEffects; // Calls Kill() and others.
+ friend class HLoadEliminationPhase;
+
+ private:
+ // Process a load instruction, updating internal table state. If a previous
+ // load or store for this object and field exists, return the new value with
+ // which the load should be replaced. Otherwise, return {instr}.
+ HValue* load(HLoadNamedField* instr) {
+ int field = FieldOf(instr->access());
+ if (field < 0) return instr;
+
+ HValue* object = instr->object()->ActualValue();
+ HFieldApproximation* approx = FindOrCreate(object, field);
+
+ if (approx->last_value_ == NULL) {
+ // Load is not redundant. Fill out a new entry.
+ approx->last_load_ = instr;
+ approx->last_value_ = instr;
+ return instr;
+ } else {
+ // Eliminate the load. Reuse previously stored value or load instruction.
+ return approx->last_value_;
+ }
+ }
+
+ // Process a store instruction, updating internal table state. If a previous
+ // store to the same object and field makes this store redundant (e.g. because
+ // the stored values are the same), return NULL indicating that this store
+ // instruction is redundant. Otherwise, return {instr}.
+ HValue* store(HStoreNamedField* instr) {
+ int field = FieldOf(instr->access());
+ if (field < 0) return KillIfMisaligned(instr);
+
+ HValue* object = instr->object()->ActualValue();
+ HValue* value = instr->value();
+
+ // Kill non-equivalent may-alias entries.
+ KillFieldInternal(object, field, value);
+ if (instr->has_transition()) {
+ // A transition store alters the map of the object.
+ // TODO(titzer): remember the new map (a constant) for the object.
+ KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
+ }
+ HFieldApproximation* approx = FindOrCreate(object, field);
+
+ if (Equal(approx->last_value_, value)) {
+ // The store is redundant because the field already has this value.
+ return NULL;
+ } else {
+ // The store is not redundant. Update the entry.
+ approx->last_load_ = NULL;
+ approx->last_value_ = value;
+ return instr;
+ }
+ }
+
+ // Kill everything in this table.
+ void Kill() {
+ fields_.Rewind(0);
+ }
+
+ // Kill all entries matching the given offset.
+ void KillOffset(int offset) {
+ int field = FieldOf(offset);
+ if (field >= 0 && field < fields_.length()) {
+ fields_[field] = NULL;
+ }
+ }
+
+ // Kill all entries aliasing the given store.
+ void KillStore(HStoreNamedField* s) {
+ int field = FieldOf(s->access());
+ if (field >= 0) {
+ KillFieldInternal(s->object()->ActualValue(), field, s->value());
+ } else {
+ KillIfMisaligned(s);
+ }
+ }
+
+ // Kill multiple entries in the case of a misaligned store.
+ HValue* KillIfMisaligned(HStoreNamedField* instr) {
+ HObjectAccess access = instr->access();
+ if (access.IsInobject()) {
+ int offset = access.offset();
+ if ((offset % kPointerSize) != 0) {
+ // Kill the field containing the first word of the access.
+ HValue* object = instr->object()->ActualValue();
+ int field = offset / kPointerSize;
+ KillFieldInternal(object, field, NULL);
+
+ // Kill the next field in case of overlap.
+ int size = kPointerSize;
+ if (access.representation().IsByte()) size = 1;
+ else if (access.representation().IsInteger32()) size = 4;
+ int next_field = (offset + size - 1) / kPointerSize;
+ if (next_field != field) KillFieldInternal(object, next_field, NULL);
+ }
+ }
+ return instr;
+ }
+
+ // Find an entry for the given object and field pair.
+ HFieldApproximation* Find(HValue* object, int field) {
+ // Search for a field approximation for this object.
+ HFieldApproximation* approx = fields_[field];
+ while (approx != NULL) {
+ if (aliasing_->MustAlias(object, approx->object_)) return approx;
+ approx = approx->next_;
+ }
+ return NULL;
+ }
+
+ // Find or create an entry for the given object and field pair.
+ HFieldApproximation* FindOrCreate(HValue* object, int field) {
+ EnsureFields(field + 1);
+
+ // Search for a field approximation for this object.
+ HFieldApproximation* approx = fields_[field];
+ int count = 0;
+ while (approx != NULL) {
+ if (aliasing_->MustAlias(object, approx->object_)) return approx;
+ count++;
+ approx = approx->next_;
+ }
+
+ if (count >= kMaxTrackedObjects) {
+ // Pull the last entry off the end and repurpose it for this object.
+ approx = ReuseLastApproximation(field);
+ } else {
+ // Allocate a new entry.
+ approx = new(zone_) HFieldApproximation();
+ }
+
+ // Insert the entry at the head of the list.
+ approx->object_ = object;
+ approx->last_load_ = NULL;
+ approx->last_value_ = NULL;
+ approx->next_ = fields_[field];
+ fields_[field] = approx;
+
+ return approx;
+ }
+
+ // Kill all entries for a given field that _may_ alias the given object
+ // and do _not_ have the given value.
+ void KillFieldInternal(HValue* object, int field, HValue* value) {
+ if (field >= fields_.length()) return; // Nothing to do.
+
+ HFieldApproximation* approx = fields_[field];
+ HFieldApproximation* prev = NULL;
+ while (approx != NULL) {
+ if (aliasing_->MayAlias(object, approx->object_)) {
+ if (!Equal(approx->last_value_, value)) {
+ // Kill an aliasing entry that doesn't agree on the value.
+ if (prev != NULL) {
+ prev->next_ = approx->next_;
+ } else {
+ fields_[field] = approx->next_;
+ }
+ approx = approx->next_;
+ continue;
+ }
+ }
+ prev = approx;
+ approx = approx->next_;
+ }
+ }
+
+ bool Equal(HValue* a, HValue* b) {
+ if (a == b) return true;
+ if (a != NULL && b != NULL) return a->Equals(b);
+ return false;
+ }
+
+ // Remove the last approximation for a field so that it can be reused.
+ // We reuse the last entry because it was the first inserted and is thus
+ // farthest away from the current instruction.
+ HFieldApproximation* ReuseLastApproximation(int field) {
+ HFieldApproximation* approx = fields_[field];
+ ASSERT(approx != NULL);
+
+ HFieldApproximation* prev = NULL;
+ while (approx->next_ != NULL) {
+ prev = approx;
+ approx = approx->next_;
+ }
+ if (prev != NULL) prev->next_ = NULL;
+ return approx;
+ }
+
+ // Compute the field index for the given object access; -1 if not tracked.
+ int FieldOf(HObjectAccess access) {
+ return access.IsInobject() ? FieldOf(access.offset()) : -1;
+ }
+
+ // Compute the field index for the given in-object offset; -1 if not tracked.
+ int FieldOf(int offset) {
+ if (offset >= kMaxTrackedFields * kPointerSize) return -1;
+ // TODO(titzer): track misaligned loads in a separate list?
+ if ((offset % kPointerSize) != 0) return -1; // Ignore misaligned accesses.
+ return offset / kPointerSize;
+ }
+
+ // Ensure internal storage for the given number of fields.
+ void EnsureFields(int num_fields) {
+ if (fields_.length() < num_fields) {
+ fields_.AddBlock(NULL, num_fields - fields_.length(), zone_);
+ }
+ }
+
+ // Print this table to stdout.
+ void Print() {
+ for (int i = 0; i < fields_.length(); i++) {
+ PrintF(" field %d: ", i);
+ for (HFieldApproximation* a = fields_[i]; a != NULL; a = a->next_) {
+ PrintF("[o%d =", a->object_->id());
+ if (a->last_load_ != NULL) PrintF(" L%d", a->last_load_->id());
+ if (a->last_value_ != NULL) PrintF(" v%d", a->last_value_->id());
+ PrintF("] ");
+ }
+ PrintF("\n");
+ }
+ }
+
+ Zone* zone_;
+ ZoneList<HFieldApproximation*> fields_;
+ HAliasAnalyzer* aliasing_;
+};
+
+
+// Support for HFlowEngine: collect store effects within loops.
+class HLoadEliminationEffects : public ZoneObject {
+ public:
+ explicit HLoadEliminationEffects(Zone* zone)
+ : zone_(zone),
+ maps_stored_(false),
+ fields_stored_(false),
+ elements_stored_(false),
+ stores_(5, zone) { }
+
+ inline bool Disabled() {
+ return false; // Effects are _not_ disabled.
+ }
+
+ // Process a possibly side-effecting instruction.
+ void Process(HInstruction* instr, Zone* zone) {
+ switch (instr->opcode()) {
+ case HValue::kStoreNamedField: {
+ stores_.Add(HStoreNamedField::cast(instr), zone_);
+ break;
+ }
+ case HValue::kOsrEntry: {
+ // Kill everything. Loads must not be hoisted past the OSR entry.
+ maps_stored_ = true;
+ fields_stored_ = true;
+ elements_stored_ = true;
+ }
+ default: {
+ fields_stored_ |= instr->CheckGVNFlag(kChangesInobjectFields);
+ maps_stored_ |= instr->CheckGVNFlag(kChangesMaps);
+ maps_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
+ elements_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
+ elements_stored_ |= instr->CheckGVNFlag(kChangesElementsPointer);
+ }
+ }
+ }
+
+ // Apply these effects to the given load elimination table.
+ void Apply(HLoadEliminationTable* table) {
+ if (fields_stored_) {
+ table->Kill();
+ return;
+ }
+ if (maps_stored_) {
+ table->KillOffset(JSObject::kMapOffset);
+ }
+ if (elements_stored_) {
+ table->KillOffset(JSObject::kElementsOffset);
+ }
+
+ // Kill non-agreeing fields for each store contained in these effects.
+ for (int i = 0; i < stores_.length(); i++) {
+ table->KillStore(stores_[i]);
+ }
+ }
+
+ // Union these effects with the other effects.
+ void Union(HLoadEliminationEffects* that, Zone* zone) {
+ maps_stored_ |= that->maps_stored_;
+ fields_stored_ |= that->fields_stored_;
+ elements_stored_ |= that->elements_stored_;
+ for (int i = 0; i < that->stores_.length(); i++) {
+ stores_.Add(that->stores_[i], zone);
+ }
+ }
+
+ private:
+ Zone* zone_;
+ bool maps_stored_ : 1;
+ bool fields_stored_ : 1;
+ bool elements_stored_ : 1;
+ ZoneList<HStoreNamedField*> stores_;
+};
+
+
+// The main routine of the analysis phase. Use the HFlowEngine for either a
+// local or a global analysis.
+void HLoadEliminationPhase::Run() {
+ HFlowEngine<HLoadEliminationTable, HLoadEliminationEffects>
+ engine(graph(), zone());
+ HAliasAnalyzer aliasing;
+ HLoadEliminationTable* table =
+ new(zone()) HLoadEliminationTable(zone(), &aliasing);
+
+ if (GLOBAL) {
+ // Perform a global analysis.
+ engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table);
+ } else {
+ // Perform only local analysis.
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ table->Kill();
+ engine.AnalyzeOneBlock(graph()->blocks()->at(i), table);
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-load-elimination.h b/deps/v8/src/hydrogen-load-elimination.h
new file mode 100644
index 000000000..ef6f71fa1
--- /dev/null
+++ b/deps/v8/src/hydrogen-load-elimination.h
@@ -0,0 +1,50 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_LOAD_ELIMINATION_H_
+#define V8_HYDROGEN_LOAD_ELIMINATION_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+class HLoadEliminationPhase : public HPhase {
+ public:
+ explicit HLoadEliminationPhase(HGraph* graph)
+ : HPhase("H_Load elimination", graph) { }
+
+ void Run();
+
+ private:
+ void EliminateLoads(HBasicBlock* block);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_LOAD_ELIMINATION_H_
diff --git a/deps/v8/src/hydrogen-mark-deoptimize.cc b/deps/v8/src/hydrogen-mark-deoptimize.cc
new file mode 100644
index 000000000..c0236e91c
--- /dev/null
+++ b/deps/v8/src/hydrogen-mark-deoptimize.cc
@@ -0,0 +1,84 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-mark-deoptimize.h"
+
+namespace v8 {
+namespace internal {
+
+void HMarkDeoptimizeOnUndefinedPhase::Run() {
+ const ZoneList<HPhi*>* phi_list = graph()->phi_list();
+ for (int i = 0; i < phi_list->length(); i++) {
+ HPhi* phi = phi_list->at(i);
+ if (phi->CheckFlag(HValue::kAllowUndefinedAsNaN) &&
+ !phi->CheckUsesForFlag(HValue::kAllowUndefinedAsNaN)) {
+ ProcessPhi(phi);
+ }
+ }
+}
+
+
+void HMarkDeoptimizeOnUndefinedPhase::ProcessPhi(HPhi* phi) {
+ ASSERT(phi->CheckFlag(HValue::kAllowUndefinedAsNaN));
+ ASSERT(worklist_.is_empty());
+
+ // Push the phi onto the worklist
+ phi->ClearFlag(HValue::kAllowUndefinedAsNaN);
+ worklist_.Add(phi, zone());
+
+ // Process all phis that can reach this phi
+ while (!worklist_.is_empty()) {
+ phi = worklist_.RemoveLast();
+ for (int i = phi->OperandCount() - 1; i >= 0; --i) {
+ HValue* input = phi->OperandAt(i);
+ if (input->IsPhi() && input->CheckFlag(HValue::kAllowUndefinedAsNaN)) {
+ input->ClearFlag(HValue::kAllowUndefinedAsNaN);
+ worklist_.Add(HPhi::cast(input), zone());
+ }
+ }
+ }
+}
+
+
+void HComputeChangeUndefinedToNaN::Run() {
+ const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+ for (int i = 0; i < blocks->length(); ++i) {
+ const HBasicBlock* block(blocks->at(i));
+ for (HInstruction* current = block->first(); current != NULL; ) {
+ HInstruction* next = current->next();
+ if (current->IsChange()) {
+ if (HChange::cast(current)->can_convert_undefined_to_nan()) {
+ current->SetFlag(HValue::kAllowUndefinedAsNaN);
+ }
+ }
+ current = next;
+ }
+ }
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-mark-deoptimize.h b/deps/v8/src/hydrogen-mark-deoptimize.h
new file mode 100644
index 000000000..30f35b3de
--- /dev/null
+++ b/deps/v8/src/hydrogen-mark-deoptimize.h
@@ -0,0 +1,75 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_MARK_DEOPTIMIZE_H_
+#define V8_HYDROGEN_MARK_DEOPTIMIZE_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Compute DeoptimizeOnUndefined flag for phis. Any phi that can reach a use
+// with DeoptimizeOnUndefined set must have DeoptimizeOnUndefined set.
+// Currently only HCompareNumericAndBranch, with double input representation,
+// has this flag set. The flag is used by HChange tagged->double, which must
+// deoptimize if one of its uses has this flag set.
+class HMarkDeoptimizeOnUndefinedPhase : public HPhase {
+ public:
+ explicit HMarkDeoptimizeOnUndefinedPhase(HGraph* graph)
+ : HPhase("H_Mark deoptimize on undefined", graph),
+ worklist_(16, zone()) {}
+
+ void Run();
+
+ private:
+ void ProcessPhi(HPhi* phi);
+
+ // Preallocated worklist used as an optimization so we don't have
+ // to allocate a new ZoneList for every ProcessPhi() invocation.
+ ZoneList<HPhi*> worklist_;
+
+ DISALLOW_COPY_AND_ASSIGN(HMarkDeoptimizeOnUndefinedPhase);
+};
+
+
+class HComputeChangeUndefinedToNaN : public HPhase {
+ public:
+ explicit HComputeChangeUndefinedToNaN(HGraph* graph)
+ : HPhase("H_Compute change undefined to nan", graph) {}
+
+ void Run();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HComputeChangeUndefinedToNaN);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_MARK_DEOPTIMIZE_H_
diff --git a/deps/v8/src/hydrogen-mark-unreachable.cc b/deps/v8/src/hydrogen-mark-unreachable.cc
new file mode 100644
index 000000000..d7c5ed2b1
--- /dev/null
+++ b/deps/v8/src/hydrogen-mark-unreachable.cc
@@ -0,0 +1,77 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-mark-unreachable.h"
+
+namespace v8 {
+namespace internal {
+
+
+void HMarkUnreachableBlocksPhase::MarkUnreachableBlocks() {
+ // If there is unreachable code in the graph, propagate the unreachable marks
+ // using a fixed-point iteration.
+ bool changed = true;
+ const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ while (changed) {
+ changed = false;
+ for (int i = 0; i < blocks->length(); i++) {
+ HBasicBlock* block = blocks->at(i);
+ if (!block->IsReachable()) continue;
+ bool is_reachable = blocks->at(0) == block;
+ for (HPredecessorIterator it(block); !it.Done(); it.Advance()) {
+ HBasicBlock* predecessor = it.Current();
+ // A block is reachable if one of its predecessors is reachable,
+ // doesn't deoptimize and either is known to transfer control to the
+ // block or has a control flow instruction for which the next block
+ // cannot be determined.
+ if (predecessor->IsReachable() && !predecessor->IsDeoptimizing()) {
+ HBasicBlock* pred_succ;
+ bool known_pred_succ =
+ predecessor->end()->KnownSuccessorBlock(&pred_succ);
+ if (!known_pred_succ || pred_succ == block) {
+ is_reachable = true;
+ break;
+ }
+ }
+ if (block->is_osr_entry()) {
+ is_reachable = true;
+ }
+ }
+ if (!is_reachable) {
+ block->MarkUnreachable();
+ changed = true;
+ }
+ }
+ }
+}
+
+
+void HMarkUnreachableBlocksPhase::Run() {
+ MarkUnreachableBlocks();
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-mark-unreachable.h b/deps/v8/src/hydrogen-mark-unreachable.h
new file mode 100644
index 000000000..e9459d520
--- /dev/null
+++ b/deps/v8/src/hydrogen-mark-unreachable.h
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_MARK_UNREACHABLE_H_
+#define V8_HYDROGEN_MARK_UNREACHABLE_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HMarkUnreachableBlocksPhase : public HPhase {
+ public:
+ explicit HMarkUnreachableBlocksPhase(HGraph* graph)
+ : HPhase("H_Mark unrechable blocks", graph) { }
+
+ void Run();
+
+ private:
+ void MarkUnreachableBlocks();
+
+ DISALLOW_COPY_AND_ASSIGN(HMarkUnreachableBlocksPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_MARK_UNREACHABLE_H_
diff --git a/deps/v8/src/hydrogen-minus-zero.cc b/deps/v8/src/hydrogen-minus-zero.cc
new file mode 100644
index 000000000..28ae6eba4
--- /dev/null
+++ b/deps/v8/src/hydrogen-minus-zero.cc
@@ -0,0 +1,83 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-minus-zero.h"
+
+namespace v8 {
+namespace internal {
+
+void HComputeMinusZeroChecksPhase::Run() {
+ const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+ for (int i = 0; i < blocks->length(); ++i) {
+ for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
+ if (current->IsChange()) {
+ HChange* change = HChange::cast(current);
+ // Propagate flags for negative zero checks upwards from conversions
+ // int32-to-tagged and int32-to-double.
+ Representation from = change->value()->representation();
+ ASSERT(from.Equals(change->from()));
+ if (from.IsSmiOrInteger32()) {
+ ASSERT(change->to().IsTagged() ||
+ change->to().IsDouble() ||
+ change->to().IsSmiOrInteger32());
+ ASSERT(visited_.IsEmpty());
+ PropagateMinusZeroChecks(change->value());
+ visited_.Clear();
+ }
+ }
+ }
+ }
+}
+
+
+void HComputeMinusZeroChecksPhase::PropagateMinusZeroChecks(HValue* value) {
+ for (HValue* current = value;
+ current != NULL && !visited_.Contains(current->id());
+ current = current->EnsureAndPropagateNotMinusZero(&visited_)) {
+ // For phis, we must propagate the check to all of its inputs.
+ if (current->IsPhi()) {
+ visited_.Add(current->id());
+ HPhi* phi = HPhi::cast(current);
+ for (int i = 0; i < phi->OperandCount(); ++i) {
+ PropagateMinusZeroChecks(phi->OperandAt(i));
+ }
+ break;
+ }
+
+ // For multiplication, division, and Math.min/max(), we must propagate
+ // to the left and the right side.
+ if (current->IsMul() || current->IsDiv() || current->IsMathMinMax()) {
+ HBinaryOperation* operation = HBinaryOperation::cast(current);
+ operation->EnsureAndPropagateNotMinusZero(&visited_);
+ PropagateMinusZeroChecks(operation->left());
+ PropagateMinusZeroChecks(operation->right());
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-minus-zero.h b/deps/v8/src/hydrogen-minus-zero.h
new file mode 100644
index 000000000..d23ec1196
--- /dev/null
+++ b/deps/v8/src/hydrogen-minus-zero.h
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_MINUS_ZERO_H_
+#define V8_HYDROGEN_MINUS_ZERO_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HComputeMinusZeroChecksPhase : public HPhase {
+ public:
+ explicit HComputeMinusZeroChecksPhase(HGraph* graph)
+ : HPhase("H_Compute minus zero checks", graph),
+ visited_(graph->GetMaximumValueID(), zone()) { }
+
+ void Run();
+
+ private:
+ void PropagateMinusZeroChecks(HValue* value);
+
+ BitVector visited_;
+
+ DISALLOW_COPY_AND_ASSIGN(HComputeMinusZeroChecksPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_MINUS_ZERO_H_
diff --git a/deps/v8/src/hydrogen-osr.cc b/deps/v8/src/hydrogen-osr.cc
new file mode 100644
index 000000000..6e39df6aa
--- /dev/null
+++ b/deps/v8/src/hydrogen-osr.cc
@@ -0,0 +1,125 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen.h"
+#include "hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+// True iff. we are compiling for OSR and the statement is the entry.
+bool HOsrBuilder::HasOsrEntryAt(IterationStatement* statement) {
+ return statement->OsrEntryId() == builder_->current_info()->osr_ast_id();
+}
+
+
+HBasicBlock* HOsrBuilder::BuildOsrLoopEntry(IterationStatement* statement) {
+ ASSERT(HasOsrEntryAt(statement));
+
+ Zone* zone = builder_->zone();
+ HGraph* graph = builder_->graph();
+
+ // only one OSR point per compile is allowed.
+ ASSERT(graph->osr() == NULL);
+
+ // remember this builder as the one OSR builder in the graph.
+ graph->set_osr(this);
+
+ HBasicBlock* non_osr_entry = graph->CreateBasicBlock();
+ osr_entry_ = graph->CreateBasicBlock();
+ HValue* true_value = graph->GetConstantTrue();
+ HBranch* test = builder_->New<HBranch>(true_value, ToBooleanStub::Types(),
+ non_osr_entry, osr_entry_);
+ builder_->FinishCurrentBlock(test);
+
+ HBasicBlock* loop_predecessor = graph->CreateBasicBlock();
+ builder_->Goto(non_osr_entry, loop_predecessor);
+
+ builder_->set_current_block(osr_entry_);
+ osr_entry_->set_osr_entry();
+ BailoutId osr_entry_id = statement->OsrEntryId();
+
+ HEnvironment *environment = builder_->environment();
+ int first_expression_index = environment->first_expression_index();
+ int length = environment->length();
+ osr_values_ = new(zone) ZoneList<HUnknownOSRValue*>(length, zone);
+
+ for (int i = 0; i < first_expression_index; ++i) {
+ HUnknownOSRValue* osr_value
+ = builder_->Add<HUnknownOSRValue>(environment, i);
+ environment->Bind(i, osr_value);
+ osr_values_->Add(osr_value, zone);
+ }
+
+ if (first_expression_index != length) {
+ environment->Drop(length - first_expression_index);
+ for (int i = first_expression_index; i < length; ++i) {
+ HUnknownOSRValue* osr_value
+ = builder_->Add<HUnknownOSRValue>(environment, i);
+ environment->Push(osr_value);
+ osr_values_->Add(osr_value, zone);
+ }
+ }
+
+ unoptimized_frame_slots_ =
+ environment->local_count() + environment->push_count();
+
+ // Keep a copy of the old environment, since the OSR values need it
+ // to figure out where exactly they are located in the unoptimized frame.
+ environment = environment->Copy();
+ builder_->current_block()->UpdateEnvironment(environment);
+
+ builder_->Add<HSimulate>(osr_entry_id);
+ builder_->Add<HOsrEntry>(osr_entry_id);
+ HContext* context = builder_->Add<HContext>();
+ environment->BindContext(context);
+ builder_->Goto(loop_predecessor);
+ loop_predecessor->SetJoinId(statement->EntryId());
+ builder_->set_current_block(loop_predecessor);
+
+ // Create the final loop entry
+ osr_loop_entry_ = builder_->BuildLoopEntry();
+ return osr_loop_entry_;
+}
+
+
+void HOsrBuilder::FinishGraph() {
+ // do nothing for now.
+}
+
+
+void HOsrBuilder::FinishOsrValues() {
+ const ZoneList<HPhi*>* phis = osr_loop_entry_->phis();
+ for (int j = 0; j < phis->length(); j++) {
+ HPhi* phi = phis->at(j);
+ if (phi->HasMergedIndex()) {
+ osr_values_->at(phi->merged_index())->set_incoming_value(phi);
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-osr.h b/deps/v8/src/hydrogen-osr.h
new file mode 100644
index 000000000..ae72ce650
--- /dev/null
+++ b/deps/v8/src/hydrogen-osr.h
@@ -0,0 +1,77 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_OSR_H_
+#define V8_HYDROGEN_OSR_H_
+
+#include "hydrogen.h"
+#include "ast.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Responsible for building graph parts related to OSR and otherwise
+// setting up the graph to do an OSR compile.
+class HOsrBuilder : public ZoneObject {
+ public:
+ explicit HOsrBuilder(HOptimizedGraphBuilder* builder)
+ : unoptimized_frame_slots_(0),
+ builder_(builder),
+ osr_entry_(NULL),
+ osr_loop_entry_(NULL),
+ osr_values_(NULL) { }
+
+ // Creates the loop entry block for the given statement, setting up OSR
+ // entries as necessary, and sets the current block to the new block.
+ HBasicBlock* BuildOsrLoopEntry(IterationStatement* statement);
+
+ // Process the hydrogen graph after it has been completed, performing
+ // any OSR-specific cleanups or changes.
+ void FinishGraph();
+
+ // Process the OSR values and phis after initial graph optimization.
+ void FinishOsrValues();
+
+ // Return the number of slots in the unoptimized frame at the entry to OSR.
+ int UnoptimizedFrameSlots() const {
+ return unoptimized_frame_slots_;
+ }
+
+ bool HasOsrEntryAt(IterationStatement* statement);
+
+ private:
+ int unoptimized_frame_slots_;
+ HOptimizedGraphBuilder* builder_;
+ HBasicBlock* osr_entry_;
+ HBasicBlock* osr_loop_entry_;
+ ZoneList<HUnknownOSRValue*>* osr_values_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_OSR_H_
diff --git a/deps/v8/src/hydrogen-range-analysis.cc b/deps/v8/src/hydrogen-range-analysis.cc
new file mode 100644
index 000000000..76fd5f35f
--- /dev/null
+++ b/deps/v8/src/hydrogen-range-analysis.cc
@@ -0,0 +1,200 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-range-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+
+class Pending {
+ public:
+ Pending(HBasicBlock* block, int last_changed_range)
+ : block_(block), last_changed_range_(last_changed_range) {}
+
+ HBasicBlock* block() const { return block_; }
+ int last_changed_range() const { return last_changed_range_; }
+
+ private:
+ HBasicBlock* block_;
+ int last_changed_range_;
+};
+
+
+void HRangeAnalysisPhase::TraceRange(const char* msg, ...) {
+ if (FLAG_trace_range) {
+ va_list arguments;
+ va_start(arguments, msg);
+ OS::VPrint(msg, arguments);
+ va_end(arguments);
+ }
+}
+
+
+void HRangeAnalysisPhase::Run() {
+ HBasicBlock* block(graph()->entry_block());
+ ZoneList<Pending> stack(graph()->blocks()->length(), zone());
+ while (block != NULL) {
+ TraceRange("Analyzing block B%d\n", block->block_id());
+
+ // Infer range based on control flow.
+ if (block->predecessors()->length() == 1) {
+ HBasicBlock* pred = block->predecessors()->first();
+ if (pred->end()->IsCompareNumericAndBranch()) {
+ InferControlFlowRange(HCompareNumericAndBranch::cast(pred->end()),
+ block);
+ }
+ }
+
+ // Process phi instructions.
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ InferRange(phi);
+ }
+
+ // Go through all instructions of the current block.
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ InferRange(it.Current());
+ }
+
+ // Continue analysis in all dominated blocks.
+ const ZoneList<HBasicBlock*>* dominated_blocks(block->dominated_blocks());
+ if (!dominated_blocks->is_empty()) {
+ // Continue with first dominated block, and push the
+ // remaining blocks on the stack (in reverse order).
+ int last_changed_range = changed_ranges_.length();
+ for (int i = dominated_blocks->length() - 1; i > 0; --i) {
+ stack.Add(Pending(dominated_blocks->at(i), last_changed_range), zone());
+ }
+ block = dominated_blocks->at(0);
+ } else if (!stack.is_empty()) {
+ // Pop next pending block from stack.
+ Pending pending = stack.RemoveLast();
+ RollBackTo(pending.last_changed_range());
+ block = pending.block();
+ } else {
+ // All blocks done.
+ block = NULL;
+ }
+ }
+}
+
+
+void HRangeAnalysisPhase::InferControlFlowRange(HCompareNumericAndBranch* test,
+ HBasicBlock* dest) {
+ ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
+ if (test->representation().IsSmiOrInteger32()) {
+ Token::Value op = test->token();
+ if (test->SecondSuccessor() == dest) {
+ op = Token::NegateCompareOp(op);
+ }
+ Token::Value inverted_op = Token::ReverseCompareOp(op);
+ UpdateControlFlowRange(op, test->left(), test->right());
+ UpdateControlFlowRange(inverted_op, test->right(), test->left());
+ }
+}
+
+
+// We know that value [op] other. Use this information to update the range on
+// value.
+void HRangeAnalysisPhase::UpdateControlFlowRange(Token::Value op,
+ HValue* value,
+ HValue* other) {
+ Range temp_range;
+ Range* range = other->range() != NULL ? other->range() : &temp_range;
+ Range* new_range = NULL;
+
+ TraceRange("Control flow range infer %d %s %d\n",
+ value->id(),
+ Token::Name(op),
+ other->id());
+
+ if (op == Token::EQ || op == Token::EQ_STRICT) {
+ // The same range has to apply for value.
+ new_range = range->Copy(graph()->zone());
+ } else if (op == Token::LT || op == Token::LTE) {
+ new_range = range->CopyClearLower(graph()->zone());
+ if (op == Token::LT) {
+ new_range->AddConstant(-1);
+ }
+ } else if (op == Token::GT || op == Token::GTE) {
+ new_range = range->CopyClearUpper(graph()->zone());
+ if (op == Token::GT) {
+ new_range->AddConstant(1);
+ }
+ }
+
+ if (new_range != NULL && !new_range->IsMostGeneric()) {
+ AddRange(value, new_range);
+ }
+}
+
+
+void HRangeAnalysisPhase::InferRange(HValue* value) {
+ ASSERT(!value->HasRange());
+ if (!value->representation().IsNone()) {
+ value->ComputeInitialRange(graph()->zone());
+ Range* range = value->range();
+ TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n",
+ value->id(),
+ value->Mnemonic(),
+ range->lower(),
+ range->upper());
+ }
+}
+
+
+void HRangeAnalysisPhase::RollBackTo(int index) {
+ ASSERT(index <= changed_ranges_.length());
+ for (int i = index; i < changed_ranges_.length(); ++i) {
+ changed_ranges_[i]->RemoveLastAddedRange();
+ }
+ changed_ranges_.Rewind(index);
+}
+
+
+void HRangeAnalysisPhase::AddRange(HValue* value, Range* range) {
+ Range* original_range = value->range();
+ value->AddNewRange(range, graph()->zone());
+ changed_ranges_.Add(value, zone());
+ Range* new_range = value->range();
+ TraceRange("Updated range of %d set to [%d,%d]\n",
+ value->id(),
+ new_range->lower(),
+ new_range->upper());
+ if (original_range != NULL) {
+ TraceRange("Original range was [%d,%d]\n",
+ original_range->lower(),
+ original_range->upper());
+ }
+ TraceRange("New information was [%d,%d]\n",
+ range->lower(),
+ range->upper());
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/inspector.cc b/deps/v8/src/hydrogen-range-analysis.h
index 833d33843..a1e9737c5 100644
--- a/deps/v8/src/inspector.cc
+++ b/deps/v8/src/hydrogen-range-analysis.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,39 +25,35 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef V8_HYDROGEN_RANGE_ANALYSIS_H_
+#define V8_HYDROGEN_RANGE_ANALYSIS_H_
-#include "v8.h"
-#include "inspector.h"
-
+#include "hydrogen.h"
namespace v8 {
namespace internal {
-#ifdef INSPECTOR
-//============================================================================
-// The Inspector.
+class HRangeAnalysisPhase : public HPhase {
+ public:
+ explicit HRangeAnalysisPhase(HGraph* graph)
+ : HPhase("H_Range analysis", graph), changed_ranges_(16, zone()) { }
-void Inspector::DumpObjectType(FILE* out, Object* obj, bool print_more) {
- // Dump the object pointer.
- OS::FPrint(out, "%p:", reinterpret_cast<void*>(obj));
- if (obj->IsHeapObject()) {
- HeapObject* hobj = HeapObject::cast(obj);
- OS::FPrint(out, " size %d :", hobj->Size());
- }
+ void Run();
- // Dump each object classification that matches this object.
-#define FOR_EACH_TYPE(type) \
- if (obj->Is##type()) { \
- OS::FPrint(out, " %s", #type); \
- }
- OBJECT_TYPE_LIST(FOR_EACH_TYPE)
- HEAP_OBJECT_TYPE_LIST(FOR_EACH_TYPE)
-#undef FOR_EACH_TYPE
-}
+ private:
+ void TraceRange(const char* msg, ...);
+ void InferControlFlowRange(HCompareNumericAndBranch* test,
+ HBasicBlock* dest);
+ void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other);
+ void InferRange(HValue* value);
+ void RollBackTo(int index);
+ void AddRange(HValue* value, Range* range);
+ ZoneList<HValue*> changed_ranges_;
+};
-#endif // INSPECTOR
} } // namespace v8::internal
+#endif // V8_HYDROGEN_RANGE_ANALYSIS_H_
diff --git a/deps/v8/src/hydrogen-redundant-phi.cc b/deps/v8/src/hydrogen-redundant-phi.cc
new file mode 100644
index 000000000..1263833da
--- /dev/null
+++ b/deps/v8/src/hydrogen-redundant-phi.cc
@@ -0,0 +1,88 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-redundant-phi.h"
+
+namespace v8 {
+namespace internal {
+
+void HRedundantPhiEliminationPhase::Run() {
+ // Gather all phis from all blocks first.
+ const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+ ZoneList<HPhi*> all_phis(blocks->length(), zone());
+ for (int i = 0; i < blocks->length(); ++i) {
+ HBasicBlock* block = blocks->at(i);
+ for (int j = 0; j < block->phis()->length(); j++) {
+ all_phis.Add(block->phis()->at(j), zone());
+ }
+ }
+
+ // Iteratively reduce all phis in the list.
+ ProcessPhis(&all_phis);
+
+#if DEBUG
+ // Make sure that we *really* removed all redundant phis.
+ for (int i = 0; i < blocks->length(); ++i) {
+ for (int j = 0; j < blocks->at(i)->phis()->length(); j++) {
+ ASSERT(blocks->at(i)->phis()->at(j)->GetRedundantReplacement() == NULL);
+ }
+ }
+#endif
+}
+
+
+void HRedundantPhiEliminationPhase::ProcessBlock(HBasicBlock* block) {
+ ProcessPhis(block->phis());
+}
+
+
+void HRedundantPhiEliminationPhase::ProcessPhis(const ZoneList<HPhi*>* phis) {
+ bool updated;
+ do {
+ // Iterately replace all redundant phis in the given list.
+ updated = false;
+ for (int i = 0; i < phis->length(); i++) {
+ HPhi* phi = phis->at(i);
+ if (phi->CheckFlag(HValue::kIsDead)) continue; // Already replaced.
+
+ HValue* replacement = phi->GetRedundantReplacement();
+ if (replacement != NULL) {
+ phi->SetFlag(HValue::kIsDead);
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ HValue* value = it.value();
+ value->SetOperandAt(it.index(), replacement);
+ // Iterate again if used in another non-dead phi.
+ updated |= value->IsPhi() && !value->CheckFlag(HValue::kIsDead);
+ }
+ phi->block()->RemovePhi(phi);
+ }
+ }
+ } while (updated);
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-redundant-phi.h b/deps/v8/src/hydrogen-redundant-phi.h
new file mode 100644
index 000000000..960ae69c9
--- /dev/null
+++ b/deps/v8/src/hydrogen-redundant-phi.h
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_REDUNDANT_PHI_H_
+#define V8_HYDROGEN_REDUNDANT_PHI_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Replace all phis consisting of a single non-loop operand plus any number of
+// loop operands by that single non-loop operand.
+class HRedundantPhiEliminationPhase : public HPhase {
+ public:
+ explicit HRedundantPhiEliminationPhase(HGraph* graph)
+ : HPhase("H_Redundant phi elimination", graph) { }
+
+ void Run();
+ void ProcessBlock(HBasicBlock* block);
+
+ private:
+ void ProcessPhis(const ZoneList<HPhi*>* phis);
+
+ DISALLOW_COPY_AND_ASSIGN(HRedundantPhiEliminationPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_REDUNDANT_PHI_H_
diff --git a/deps/v8/src/hydrogen-removable-simulates.cc b/deps/v8/src/hydrogen-removable-simulates.cc
new file mode 100644
index 000000000..f95283243
--- /dev/null
+++ b/deps/v8/src/hydrogen-removable-simulates.cc
@@ -0,0 +1,94 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-removable-simulates.h"
+
+namespace v8 {
+namespace internal {
+
+void HMergeRemovableSimulatesPhase::Run() {
+ ZoneList<HSimulate*> mergelist(2, zone());
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ // Make sure the merge list is empty at the start of a block.
+ ASSERT(mergelist.is_empty());
+ // Nasty heuristic: Never remove the first simulate in a block. This
+ // just so happens to have a beneficial effect on register allocation.
+ bool first = true;
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
+ if (current->IsLeaveInlined()) {
+ // Never fold simulates from inlined environments into simulates
+ // in the outer environment.
+ // (Before each HEnterInlined, there is a non-foldable HSimulate
+ // anyway, so we get the barrier in the other direction for free.)
+ // Simply remove all accumulated simulates without merging. This
+ // is safe because simulates after instructions with side effects
+ // are never added to the merge list.
+ while (!mergelist.is_empty()) {
+ mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
+ }
+ continue;
+ }
+ if (current->IsReturn()) {
+ // Drop mergeable simulates in the list. This is safe because
+ // simulates after instructions with side effects are never added
+ // to the merge list.
+ while (!mergelist.is_empty()) {
+ mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
+ }
+ continue;
+ }
+ // Skip the non-simulates and the first simulate.
+ if (!current->IsSimulate()) continue;
+ if (first) {
+ first = false;
+ continue;
+ }
+ HSimulate* current_simulate = HSimulate::cast(current);
+ if ((current_simulate->previous()->HasObservableSideEffects() &&
+ !current_simulate->next()->IsSimulate()) ||
+ !current_simulate->is_candidate_for_removal()) {
+ // This simulate is not suitable for folding.
+ // Fold the ones accumulated so far.
+ current_simulate->MergeWith(&mergelist);
+ continue;
+ } else {
+ // Accumulate this simulate for folding later on.
+ mergelist.Add(current_simulate, zone());
+ }
+ }
+
+ if (!mergelist.is_empty()) {
+ // Merge the accumulated simulates at the end of the block.
+ HSimulate* last = mergelist.RemoveLast();
+ last->MergeWith(&mergelist);
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-removable-simulates.h b/deps/v8/src/hydrogen-removable-simulates.h
new file mode 100644
index 000000000..f5bcd6ddf
--- /dev/null
+++ b/deps/v8/src/hydrogen-removable-simulates.h
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_REMOVABLE_SIMULATES_H_
+#define V8_HYDROGEN_REMOVABLE_SIMULATES_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HMergeRemovableSimulatesPhase : public HPhase {
+ public:
+ explicit HMergeRemovableSimulatesPhase(HGraph* graph)
+ : HPhase("H_Merge removable simulates", graph) { }
+
+ void Run();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HMergeRemovableSimulatesPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_REMOVABLE_SIMULATES_H_
diff --git a/deps/v8/src/hydrogen-representation-changes.cc b/deps/v8/src/hydrogen-representation-changes.cc
new file mode 100644
index 000000000..d0c9b5825
--- /dev/null
+++ b/deps/v8/src/hydrogen-representation-changes.cc
@@ -0,0 +1,197 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-representation-changes.h"
+
+namespace v8 {
+namespace internal {
+
+void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
+ HValue* value, HValue* use_value, int use_index, Representation to) {
+ // Insert the representation change right before its use. For phi-uses we
+ // insert at the end of the corresponding predecessor.
+ HInstruction* next = NULL;
+ if (use_value->IsPhi()) {
+ next = use_value->block()->predecessors()->at(use_index)->end();
+ } else {
+ next = HInstruction::cast(use_value);
+ }
+ // For constants we try to make the representation change at compile
+ // time. When a representation change is not possible without loss of
+ // information we treat constants like normal instructions and insert the
+ // change instructions for them.
+ HInstruction* new_value = NULL;
+ bool is_truncating_to_smi = use_value->CheckFlag(HValue::kTruncatingToSmi);
+ bool is_truncating_to_int = use_value->CheckFlag(HValue::kTruncatingToInt32);
+ if (value->IsConstant()) {
+ HConstant* constant = HConstant::cast(value);
+ // Try to create a new copy of the constant with the new representation.
+ if (is_truncating_to_int && to.IsInteger32()) {
+ Maybe<HConstant*> res = constant->CopyToTruncatedInt32(graph()->zone());
+ if (res.has_value) new_value = res.value;
+ } else {
+ new_value = constant->CopyToRepresentation(to, graph()->zone());
+ }
+ }
+
+ if (new_value == NULL) {
+ new_value = new(graph()->zone()) HChange(
+ value, to, is_truncating_to_smi, is_truncating_to_int);
+ if (use_value->position() != RelocInfo::kNoPosition) {
+ new_value->set_position(use_value->position());
+ } else {
+ ASSERT(!FLAG_emit_opt_code_positions || !graph()->info()->IsOptimizing());
+ }
+ }
+
+ new_value->InsertBefore(next);
+ use_value->SetOperandAt(use_index, new_value);
+}
+
+
+void HRepresentationChangesPhase::InsertRepresentationChangesForValue(
+ HValue* value) {
+ Representation r = value->representation();
+ if (r.IsNone()) return;
+ if (value->HasNoUses()) return;
+
+ for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+ HValue* use_value = it.value();
+ int use_index = it.index();
+ Representation req = use_value->RequiredInputRepresentation(use_index);
+ if (req.IsNone() || req.Equals(r)) continue;
+ InsertRepresentationChangeForUse(value, use_value, use_index, req);
+ }
+ if (value->HasNoUses()) {
+ ASSERT(value->IsConstant());
+ value->DeleteAndReplaceWith(NULL);
+ }
+
+ // The only purpose of a HForceRepresentation is to represent the value
+ // after the (possible) HChange instruction. We make it disappear.
+ if (value->IsForceRepresentation()) {
+ value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value());
+ }
+}
+
+
+void HRepresentationChangesPhase::Run() {
+ // Compute truncation flag for phis: Initially assume that all
+ // int32-phis allow truncation and iteratively remove the ones that
+ // are used in an operation that does not allow a truncating
+ // conversion.
+ ZoneList<HPhi*> int_worklist(8, zone());
+ ZoneList<HPhi*> smi_worklist(8, zone());
+
+ const ZoneList<HPhi*>* phi_list(graph()->phi_list());
+ for (int i = 0; i < phi_list->length(); i++) {
+ HPhi* phi = phi_list->at(i);
+ if (phi->representation().IsInteger32()) {
+ phi->SetFlag(HValue::kTruncatingToInt32);
+ } else if (phi->representation().IsSmi()) {
+ phi->SetFlag(HValue::kTruncatingToSmi);
+ phi->SetFlag(HValue::kTruncatingToInt32);
+ }
+ }
+
+ for (int i = 0; i < phi_list->length(); i++) {
+ HPhi* phi = phi_list->at(i);
+ HValue* value = NULL;
+ if (phi->representation().IsSmiOrInteger32() &&
+ !phi->CheckUsesForFlag(HValue::kTruncatingToInt32, &value)) {
+ int_worklist.Add(phi, zone());
+ phi->ClearFlag(HValue::kTruncatingToInt32);
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
+ phi->id(), value->id(), value->Mnemonic());
+ }
+ }
+
+ if (phi->representation().IsSmi() &&
+ !phi->CheckUsesForFlag(HValue::kTruncatingToSmi, &value)) {
+ smi_worklist.Add(phi, zone());
+ phi->ClearFlag(HValue::kTruncatingToSmi);
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating Smi because of #%d %s\n",
+ phi->id(), value->id(), value->Mnemonic());
+ }
+ }
+ }
+
+ while (!int_worklist.is_empty()) {
+ HPhi* current = int_worklist.RemoveLast();
+ for (int i = 0; i < current->OperandCount(); ++i) {
+ HValue* input = current->OperandAt(i);
+ if (input->IsPhi() &&
+ input->representation().IsSmiOrInteger32() &&
+ input->CheckFlag(HValue::kTruncatingToInt32)) {
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
+ input->id(), current->id(), current->Mnemonic());
+ }
+ input->ClearFlag(HValue::kTruncatingToInt32);
+ int_worklist.Add(HPhi::cast(input), zone());
+ }
+ }
+ }
+
+ while (!smi_worklist.is_empty()) {
+ HPhi* current = smi_worklist.RemoveLast();
+ for (int i = 0; i < current->OperandCount(); ++i) {
+ HValue* input = current->OperandAt(i);
+ if (input->IsPhi() &&
+ input->representation().IsSmi() &&
+ input->CheckFlag(HValue::kTruncatingToSmi)) {
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating Smi because of #%d %s\n",
+ input->id(), current->id(), current->Mnemonic());
+ }
+ input->ClearFlag(HValue::kTruncatingToSmi);
+ smi_worklist.Add(HPhi::cast(input), zone());
+ }
+ }
+ }
+
+ const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+ for (int i = 0; i < blocks->length(); ++i) {
+ // Process phi instructions first.
+ const HBasicBlock* block(blocks->at(i));
+ const ZoneList<HPhi*>* phis = block->phis();
+ for (int j = 0; j < phis->length(); j++) {
+ InsertRepresentationChangesForValue(phis->at(j));
+ }
+
+ // Process normal instructions.
+ for (HInstruction* current = block->first(); current != NULL; ) {
+ HInstruction* next = current->next();
+ InsertRepresentationChangesForValue(current);
+ current = next;
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-representation-changes.h b/deps/v8/src/hydrogen-representation-changes.h
new file mode 100644
index 000000000..77e899b60
--- /dev/null
+++ b/deps/v8/src/hydrogen-representation-changes.h
@@ -0,0 +1,55 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_REPRESENTATION_CHANGES_H_
+#define V8_HYDROGEN_REPRESENTATION_CHANGES_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HRepresentationChangesPhase : public HPhase {
+ public:
+ explicit HRepresentationChangesPhase(HGraph* graph)
+ : HPhase("H_Representation changes", graph) { }
+
+ void Run();
+
+ private:
+ void InsertRepresentationChangeForUse(HValue* value,
+ HValue* use_value,
+ int use_index,
+ Representation to);
+ void InsertRepresentationChangesForValue(HValue* value);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_REPRESENTATION_CHANGES_H_
diff --git a/deps/v8/src/platform-tls-win32.h b/deps/v8/src/hydrogen-sce.cc
index 4056e8cc6..a6995f647 100644
--- a/deps/v8/src/platform-tls-win32.h
+++ b/deps/v8/src/hydrogen-sce.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,38 +25,38 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_PLATFORM_TLS_WIN32_H_
-#define V8_PLATFORM_TLS_WIN32_H_
-
-#include "checks.h"
-#include "globals.h"
-#include "win32-headers.h"
+#include "hydrogen-sce.h"
+#include "v8.h"
namespace v8 {
namespace internal {
-#if defined(_WIN32) && !defined(_WIN64)
-
-#define V8_FAST_TLS_SUPPORTED 1
-
-inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
- const intptr_t kTibInlineTlsOffset = 0xE10;
- const intptr_t kTibExtraTlsOffset = 0xF94;
- const intptr_t kMaxInlineSlots = 64;
- const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
- ASSERT(0 <= index && index < kMaxSlots);
- if (index < kMaxInlineSlots) {
- return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
- kPointerSize * index));
+void HStackCheckEliminationPhase::Run() {
+ // For each loop block walk the dominator tree from the backwards branch to
+ // the loop header. If a call instruction is encountered the backwards branch
+ // is dominated by a call and the stack check in the backwards branch can be
+ // removed.
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ if (block->IsLoopHeader()) {
+ HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
+ HBasicBlock* dominator = back_edge;
+ while (true) {
+ for (HInstructionIterator it(dominator); !it.Done(); it.Advance()) {
+ if (it.Current()->IsCall()) {
+ block->loop_information()->stack_check()->Eliminate();
+ break;
+ }
+ }
+
+ // Done when the loop header is processed.
+ if (dominator == block) break;
+
+ // Move up the dominator tree.
+ dominator = dominator->dominator();
+ }
+ }
}
- intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
- ASSERT(extra != 0);
- return *reinterpret_cast<intptr_t*>(extra +
- kPointerSize * (index - kMaxInlineSlots));
}
-#endif
-
} } // namespace v8::internal
-
-#endif // V8_PLATFORM_TLS_WIN32_H_
diff --git a/deps/v8/src/v8preparserdll-main.cc b/deps/v8/src/hydrogen-sce.h
index c0344d344..55e153e0e 100644
--- a/deps/v8/src/v8preparserdll-main.cc
+++ b/deps/v8/src/hydrogen-sce.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,15 +25,24 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <windows.h>
+#ifndef V8_HYDROGEN_SCE_H_
+#define V8_HYDROGEN_SCE_H_
-#include "../include/v8-preparser.h"
+#include "hydrogen.h"
-extern "C" {
-BOOL WINAPI DllMain(HANDLE hinstDLL,
- DWORD dwReason,
- LPVOID lpvReserved) {
- // Do nothing.
- return TRUE;
-}
-}
+namespace v8 {
+namespace internal {
+
+
+class HStackCheckEliminationPhase : public HPhase {
+ public:
+ explicit HStackCheckEliminationPhase(HGraph* graph)
+ : HPhase("H_Stack check elimination", graph) { }
+
+ void Run();
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_SCE_H_
diff --git a/deps/v8/src/hydrogen-uint32-analysis.cc b/deps/v8/src/hydrogen-uint32-analysis.cc
new file mode 100644
index 000000000..8de887d6f
--- /dev/null
+++ b/deps/v8/src/hydrogen-uint32-analysis.cc
@@ -0,0 +1,236 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-uint32-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool HUint32AnalysisPhase::IsSafeUint32Use(HValue* val, HValue* use) {
+ // Operations that operate on bits are safe.
+ if (use->IsBitwise() || use->IsShl() || use->IsSar() || use->IsShr()) {
+ return true;
+ } else if (use->IsSimulate()) {
+ // Deoptimization has special support for uint32.
+ return true;
+ } else if (use->IsChange()) {
+ // Conversions have special support for uint32.
+ // This ASSERT guards that the conversion in question is actually
+ // implemented. Do not extend the whitelist without adding
+ // support to LChunkBuilder::DoChange().
+ ASSERT(HChange::cast(use)->to().IsDouble() ||
+ HChange::cast(use)->to().IsSmi() ||
+ HChange::cast(use)->to().IsTagged());
+ return true;
+ } else if (use->IsStoreKeyed()) {
+ HStoreKeyed* store = HStoreKeyed::cast(use);
+ if (store->is_external()) {
+ // Storing a value into an external integer array is a bit level
+ // operation.
+ if (store->value() == val) {
+ // Clamping or a conversion to double should have beed inserted.
+ ASSERT(store->elements_kind() != EXTERNAL_PIXEL_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_FLOAT_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+
+// Iterate over all uses and verify that they are uint32 safe: either don't
+// distinguish between int32 and uint32 due to their bitwise nature or
+// have special support for uint32 values.
+// Encountered phis are optimistically treated as safe uint32 uses,
+// marked with kUint32 flag and collected in the phis_ list. A separate
+// pass will be performed later by UnmarkUnsafePhis to clear kUint32 from
+// phis that are not actually uint32-safe (it requires fix point iteration).
+bool HUint32AnalysisPhase::Uint32UsesAreSafe(HValue* uint32val) {
+ bool collect_phi_uses = false;
+ for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+
+ if (use->IsPhi()) {
+ if (!use->CheckFlag(HInstruction::kUint32)) {
+ // There is a phi use of this value from a phi that is not yet
+ // collected in phis_ array. Separate pass is required.
+ collect_phi_uses = true;
+ }
+
+ // Optimistically treat phis as uint32 safe.
+ continue;
+ }
+
+ if (!IsSafeUint32Use(uint32val, use)) {
+ return false;
+ }
+ }
+
+ if (collect_phi_uses) {
+ for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+
+ // There is a phi use of this value from a phi that is not yet
+ // collected in phis_ array. Separate pass is required.
+ if (use->IsPhi() && !use->CheckFlag(HInstruction::kUint32)) {
+ use->SetFlag(HInstruction::kUint32);
+ phis_.Add(HPhi::cast(use), zone());
+ }
+ }
+ }
+
+ return true;
+}
+
+
+// Check if all operands to the given phi are marked with kUint32 flag.
+bool HUint32AnalysisPhase::CheckPhiOperands(HPhi* phi) {
+ if (!phi->CheckFlag(HInstruction::kUint32)) {
+ // This phi is not uint32 safe. No need to check operands.
+ return false;
+ }
+
+ for (int j = 0; j < phi->OperandCount(); j++) {
+ HValue* operand = phi->OperandAt(j);
+ if (!operand->CheckFlag(HInstruction::kUint32)) {
+ // Lazily mark constants that fit into uint32 range with kUint32 flag.
+ if (operand->IsInteger32Constant() &&
+ operand->GetInteger32Constant() >= 0) {
+ operand->SetFlag(HInstruction::kUint32);
+ continue;
+ }
+
+ // This phi is not safe, some operands are not uint32 values.
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+// Remove kUint32 flag from the phi itself and its operands. If any operand
+// was a phi marked with kUint32 place it into a worklist for
+// transitive clearing of kUint32 flag.
+void HUint32AnalysisPhase::UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist) {
+ phi->ClearFlag(HInstruction::kUint32);
+ for (int j = 0; j < phi->OperandCount(); j++) {
+ HValue* operand = phi->OperandAt(j);
+ if (operand->CheckFlag(HInstruction::kUint32)) {
+ operand->ClearFlag(HInstruction::kUint32);
+ if (operand->IsPhi()) {
+ worklist->Add(HPhi::cast(operand), zone());
+ }
+ }
+ }
+}
+
+
+void HUint32AnalysisPhase::UnmarkUnsafePhis() {
+ // No phis were collected. Nothing to do.
+ if (phis_.length() == 0) return;
+
+ // Worklist used to transitively clear kUint32 from phis that
+ // are used as arguments to other phis.
+ ZoneList<HPhi*> worklist(phis_.length(), zone());
+
+ // Phi can be used as a uint32 value if and only if
+ // all its operands are uint32 values and all its
+ // uses are uint32 safe.
+
+ // Iterate over collected phis and unmark those that
+ // are unsafe. When unmarking phi unmark its operands
+ // and add it to the worklist if it is a phi as well.
+ // Phis that are still marked as safe are shifted down
+ // so that all safe phis form a prefix of the phis_ array.
+ int phi_count = 0;
+ for (int i = 0; i < phis_.length(); i++) {
+ HPhi* phi = phis_[i];
+
+ if (CheckPhiOperands(phi) && Uint32UsesAreSafe(phi)) {
+ phis_[phi_count++] = phi;
+ } else {
+ UnmarkPhi(phi, &worklist);
+ }
+ }
+
+ // Now phis array contains only those phis that have safe
+ // non-phi uses. Start transitively clearing kUint32 flag
+ // from phi operands of discovered non-safe phis until
+ // only safe phis are left.
+ while (!worklist.is_empty()) {
+ while (!worklist.is_empty()) {
+ HPhi* phi = worklist.RemoveLast();
+ UnmarkPhi(phi, &worklist);
+ }
+
+ // Check if any operands to safe phis were unmarked
+ // turning a safe phi into unsafe. The same value
+ // can flow into several phis.
+ int new_phi_count = 0;
+ for (int i = 0; i < phi_count; i++) {
+ HPhi* phi = phis_[i];
+
+ if (CheckPhiOperands(phi)) {
+ phis_[new_phi_count++] = phi;
+ } else {
+ UnmarkPhi(phi, &worklist);
+ }
+ }
+ phi_count = new_phi_count;
+ }
+}
+
+
+void HUint32AnalysisPhase::Run() {
+ if (!graph()->has_uint32_instructions()) return;
+
+ ZoneList<HInstruction*>* uint32_instructions = graph()->uint32_instructions();
+ for (int i = 0; i < uint32_instructions->length(); ++i) {
+ // Analyze instruction and mark it with kUint32 if all
+ // its uses are uint32 safe.
+ HInstruction* current = uint32_instructions->at(i);
+ if (current->IsLinked() &&
+ current->representation().IsInteger32() &&
+ Uint32UsesAreSafe(current)) {
+ current->SetFlag(HInstruction::kUint32);
+ }
+ }
+
+ // Some phis might have been optimistically marked with kUint32 flag.
+ // Remove this flag from those phis that are unsafe and propagate
+ // this information transitively potentially clearing kUint32 flag
+ // from some non-phi operations that are used as operands to unsafe phis.
+ UnmarkUnsafePhis();
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-uint32-analysis.h b/deps/v8/src/hydrogen-uint32-analysis.h
new file mode 100644
index 000000000..59739d1cc
--- /dev/null
+++ b/deps/v8/src/hydrogen-uint32-analysis.h
@@ -0,0 +1,59 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_UINT32_ANALYSIS_H_
+#define V8_HYDROGEN_UINT32_ANALYSIS_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Discover instructions that can be marked with kUint32 flag allowing
+// them to produce full range uint32 values.
+class HUint32AnalysisPhase : public HPhase {
+ public:
+ explicit HUint32AnalysisPhase(HGraph* graph)
+ : HPhase("H_Compute safe UInt32 operations", graph), phis_(4, zone()) { }
+
+ void Run();
+
+ private:
+ INLINE(bool IsSafeUint32Use(HValue* val, HValue* use));
+ INLINE(bool Uint32UsesAreSafe(HValue* uint32val));
+ INLINE(bool CheckPhiOperands(HPhi* phi));
+ INLINE(void UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist));
+ INLINE(void UnmarkUnsafePhis());
+
+ ZoneList<HPhi*> phis_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_UINT32_ANALYSIS_H_
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index e3f79eed0..9587dd344 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,17 +25,43 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
#include "hydrogen.h"
+#include <algorithm>
+
+#include "v8.h"
+#include "allocation-site-scopes.h"
#include "codegen.h"
#include "full-codegen.h"
#include "hashmap.h"
+#include "hydrogen-bce.h"
+#include "hydrogen-bch.h"
+#include "hydrogen-canonicalize.h"
+#include "hydrogen-check-elimination.h"
+#include "hydrogen-dce.h"
+#include "hydrogen-dehoist.h"
+#include "hydrogen-environment-liveness.h"
+#include "hydrogen-escape-analysis.h"
+#include "hydrogen-infer-representation.h"
+#include "hydrogen-infer-types.h"
+#include "hydrogen-load-elimination.h"
+#include "hydrogen-gvn.h"
+#include "hydrogen-mark-deoptimize.h"
+#include "hydrogen-mark-unreachable.h"
+#include "hydrogen-minus-zero.h"
+#include "hydrogen-osr.h"
+#include "hydrogen-range-analysis.h"
+#include "hydrogen-redundant-phi.h"
+#include "hydrogen-removable-simulates.h"
+#include "hydrogen-representation-changes.h"
+#include "hydrogen-sce.h"
+#include "hydrogen-uint32-analysis.h"
#include "lithium-allocator.h"
#include "parser.h"
#include "scopeinfo.h"
#include "scopes.h"
#include "stub-cache.h"
+#include "typing.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/lithium-codegen-ia32.h"
@@ -69,9 +95,21 @@ HBasicBlock::HBasicBlock(HGraph* graph)
last_instruction_index_(-1),
deleted_phis_(4, graph->zone()),
parent_loop_header_(NULL),
+ inlined_entry_block_(NULL),
is_inline_return_target_(false),
- is_deoptimizing_(false),
- dominates_loop_successors_(false) { }
+ is_reachable_(true),
+ dominates_loop_successors_(false),
+ is_osr_entry_(false) { }
+
+
+Isolate* HBasicBlock::isolate() const {
+ return graph_->isolate();
+}
+
+
+void HBasicBlock::MarkUnreachable() {
+ is_reachable_ = false;
+}
void HBasicBlock::AttachLoopInformation() {
@@ -96,58 +134,74 @@ void HBasicBlock::AddPhi(HPhi* phi) {
void HBasicBlock::RemovePhi(HPhi* phi) {
ASSERT(phi->block() == this);
ASSERT(phis_.Contains(phi));
- ASSERT(phi->HasNoUses() || !phi->is_live());
phi->Kill();
phis_.RemoveElement(phi);
phi->SetBlock(NULL);
}
-void HBasicBlock::AddInstruction(HInstruction* instr) {
+void HBasicBlock::AddInstruction(HInstruction* instr, int position) {
ASSERT(!IsStartBlock() || !IsFinished());
ASSERT(!instr->IsLinked());
ASSERT(!IsFinished());
+
+ if (position != RelocInfo::kNoPosition) {
+ instr->set_position(position);
+ }
if (first_ == NULL) {
+ ASSERT(last_environment() != NULL);
+ ASSERT(!last_environment()->ast_id().IsNone());
HBlockEntry* entry = new(zone()) HBlockEntry();
entry->InitializeAsFirst(this);
+ if (position != RelocInfo::kNoPosition) {
+ entry->set_position(position);
+ } else {
+ ASSERT(!FLAG_emit_opt_code_positions ||
+ !graph()->info()->IsOptimizing());
+ }
first_ = last_ = entry;
}
instr->InsertAfter(last_);
}
-HDeoptimize* HBasicBlock::CreateDeoptimize(
- HDeoptimize::UseEnvironment has_uses) {
- ASSERT(HasEnvironment());
- if (has_uses == HDeoptimize::kNoUses)
- return new(zone()) HDeoptimize(0, zone());
-
- HEnvironment* environment = last_environment();
- HDeoptimize* instr = new(zone()) HDeoptimize(environment->length(), zone());
- for (int i = 0; i < environment->length(); i++) {
- HValue* val = environment->values()->at(i);
- instr->AddEnvironmentValue(val, zone());
+HPhi* HBasicBlock::AddNewPhi(int merged_index) {
+ if (graph()->IsInsideNoSideEffectsScope()) {
+ merged_index = HPhi::kInvalidMergedIndex;
}
-
- return instr;
+ HPhi* phi = new(zone()) HPhi(merged_index, zone());
+ AddPhi(phi);
+ return phi;
}
-HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id) {
+HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
+ RemovableSimulate removable) {
ASSERT(HasEnvironment());
HEnvironment* environment = last_environment();
ASSERT(ast_id.IsNone() ||
+ ast_id == BailoutId::StubEntry() ||
environment->closure()->shared()->VerifyBailoutId(ast_id));
int push_count = environment->push_count();
int pop_count = environment->pop_count();
- HSimulate* instr = new(zone()) HSimulate(ast_id, pop_count, zone());
- for (int i = push_count - 1; i >= 0; --i) {
+ HSimulate* instr =
+ new(zone()) HSimulate(ast_id, pop_count, zone(), removable);
+#ifdef DEBUG
+ instr->set_closure(environment->closure());
+#endif
+ // Order of pushed values: newest (top of stack) first. This allows
+ // HSimulate::MergeWith() to easily append additional pushed values
+ // that are older (from further down the stack).
+ for (int i = 0; i < push_count; ++i) {
instr->AddPushedValue(environment->ExpressionStackAt(i));
}
- for (int i = 0; i < environment->assigned_variables()->length(); ++i) {
- int index = environment->assigned_variables()->at(i);
+ for (GrowableBitVector::Iterator it(environment->assigned_variables(),
+ zone());
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
instr->AddAssignedValue(index, environment->Lookup(index));
}
environment->ClearHistory();
@@ -155,9 +209,9 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id) {
}
-void HBasicBlock::Finish(HControlInstruction* end) {
+void HBasicBlock::Finish(HControlInstruction* end, int position) {
ASSERT(!IsFinished());
- AddInstruction(end);
+ AddInstruction(end, position);
end_ = end;
for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
it.Current()->RegisterPredecessor(this);
@@ -165,34 +219,45 @@ void HBasicBlock::Finish(HControlInstruction* end) {
}
-void HBasicBlock::Goto(HBasicBlock* block, FunctionState* state) {
+void HBasicBlock::Goto(HBasicBlock* block,
+ int position,
+ FunctionState* state,
+ bool add_simulate) {
bool drop_extra = state != NULL &&
state->inlining_kind() == DROP_EXTRA_ON_RETURN;
if (block->IsInlineReturnTarget()) {
- AddInstruction(new(zone()) HLeaveInlined());
- last_environment_ = last_environment()->DiscardInlined(drop_extra);
+ HEnvironment* env = last_environment();
+ int argument_count = env->arguments_environment()->parameter_count();
+ AddInstruction(new(zone())
+ HLeaveInlined(state->entry(), argument_count),
+ position);
+ UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
}
- AddSimulate(BailoutId::None());
+ if (add_simulate) AddNewSimulate(BailoutId::None(), position);
HGoto* instr = new(zone()) HGoto(block);
- Finish(instr);
+ Finish(instr, position);
}
void HBasicBlock::AddLeaveInlined(HValue* return_value,
- FunctionState* state) {
+ FunctionState* state,
+ int position) {
HBasicBlock* target = state->function_return();
bool drop_extra = state->inlining_kind() == DROP_EXTRA_ON_RETURN;
ASSERT(target->IsInlineReturnTarget());
ASSERT(return_value != NULL);
- AddInstruction(new(zone()) HLeaveInlined());
- last_environment_ = last_environment()->DiscardInlined(drop_extra);
+ HEnvironment* env = last_environment();
+ int argument_count = env->arguments_environment()->parameter_count();
+ AddInstruction(new(zone()) HLeaveInlined(state->entry(), argument_count),
+ position);
+ UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
last_environment()->Push(return_value);
- AddSimulate(BailoutId::None());
+ AddNewSimulate(BailoutId::None(), position);
HGoto* instr = new(zone()) HGoto(target);
- Finish(instr);
+ Finish(instr, position);
}
@@ -203,6 +268,12 @@ void HBasicBlock::SetInitialEnvironment(HEnvironment* env) {
}
+void HBasicBlock::UpdateEnvironment(HEnvironment* env) {
+ last_environment_ = env;
+ graph()->update_maximum_environment_size(env->first_expression_index());
+}
+
+
void HBasicBlock::SetJoinId(BailoutId ast_id) {
int length = predecessors_.length();
ASSERT(length > 0);
@@ -210,11 +281,12 @@ void HBasicBlock::SetJoinId(BailoutId ast_id) {
HBasicBlock* predecessor = predecessors_[i];
ASSERT(predecessor->end()->IsGoto());
HSimulate* simulate = HSimulate::cast(predecessor->end()->previous());
- // We only need to verify the ID once.
ASSERT(i != 0 ||
- predecessor->last_environment()->closure()->shared()
- ->VerifyBailoutId(ast_id));
+ (predecessor->last_environment()->closure().is_null() ||
+ predecessor->last_environment()->closure()->shared()
+ ->VerifyBailoutId(ast_id)));
simulate->set_ast_id(ast_id);
+ predecessor->last_environment()->set_ast_id(ast_id);
}
}
@@ -488,6 +560,9 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
void HGraph::Verify(bool do_full_verify) const {
+ Heap::RelocationLock relocation_lock(isolate()->heap());
+ AllowHandleDereference allow_deref;
+ AllowDeferredHandleDereference allow_deferred_deref;
for (int i = 0; i < blocks_.length(); i++) {
HBasicBlock* block = blocks_.at(i);
@@ -568,94 +643,1596 @@ void HGraph::Verify(bool do_full_verify) const {
HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
- Handle<Object> value) {
+ int32_t value) {
if (!pointer->is_set()) {
- HConstant* constant = new(zone()) HConstant(value,
- Representation::Tagged());
- constant->InsertAfter(GetConstantUndefined());
+ // Can't pass GetInvalidContext() to HConstant::New, because that will
+ // recursively call GetConstant
+ HConstant* constant = HConstant::New(zone(), NULL, value);
+ constant->InsertAfter(entry_block()->first());
pointer->set(constant);
+ return constant;
}
- return pointer->get();
+ return ReinsertConstantIfNecessary(pointer->get());
}
-HConstant* HGraph::GetConstantInt32(SetOncePointer<HConstant>* pointer,
- int32_t value) {
- if (!pointer->is_set()) {
- HConstant* constant =
- new(zone()) HConstant(value, Representation::Integer32());
- constant->InsertAfter(GetConstantUndefined());
- pointer->set(constant);
+HConstant* HGraph::ReinsertConstantIfNecessary(HConstant* constant) {
+ if (!constant->IsLinked()) {
+ // The constant was removed from the graph. Reinsert.
+ constant->ClearFlag(HValue::kIsDead);
+ constant->InsertAfter(entry_block()->first());
}
- return pointer->get();
+ return constant;
+}
+
+
+HConstant* HGraph::GetConstant0() {
+ return GetConstant(&constant_0_, 0);
}
HConstant* HGraph::GetConstant1() {
- return GetConstantInt32(&constant_1_, 1);
+ return GetConstant(&constant_1_, 1);
}
HConstant* HGraph::GetConstantMinus1() {
- return GetConstantInt32(&constant_minus1_, -1);
+ return GetConstant(&constant_minus1_, -1);
+}
+
+
+#define DEFINE_GET_CONSTANT(Name, name, htype, boolean_value) \
+HConstant* HGraph::GetConstant##Name() { \
+ if (!constant_##name##_.is_set()) { \
+ HConstant* constant = new(zone()) HConstant( \
+ Unique<Object>::CreateImmovable(isolate()->factory()->name##_value()), \
+ Representation::Tagged(), \
+ htype, \
+ false, \
+ true, \
+ false, \
+ boolean_value); \
+ constant->InsertAfter(entry_block()->first()); \
+ constant_##name##_.set(constant); \
+ } \
+ return ReinsertConstantIfNecessary(constant_##name##_.get()); \
+}
+
+
+DEFINE_GET_CONSTANT(Undefined, undefined, HType::Tagged(), false)
+DEFINE_GET_CONSTANT(True, true, HType::Boolean(), true)
+DEFINE_GET_CONSTANT(False, false, HType::Boolean(), false)
+DEFINE_GET_CONSTANT(Hole, the_hole, HType::Tagged(), false)
+DEFINE_GET_CONSTANT(Null, null, HType::Tagged(), false)
+
+
+#undef DEFINE_GET_CONSTANT
+
+#define DEFINE_IS_CONSTANT(Name, name) \
+bool HGraph::IsConstant##Name(HConstant* constant) { \
+ return constant_##name##_.is_set() && constant == constant_##name##_.get(); \
+}
+DEFINE_IS_CONSTANT(Undefined, undefined)
+DEFINE_IS_CONSTANT(0, 0)
+DEFINE_IS_CONSTANT(1, 1)
+DEFINE_IS_CONSTANT(Minus1, minus1)
+DEFINE_IS_CONSTANT(True, true)
+DEFINE_IS_CONSTANT(False, false)
+DEFINE_IS_CONSTANT(Hole, the_hole)
+DEFINE_IS_CONSTANT(Null, null)
+
+#undef DEFINE_IS_CONSTANT
+
+
+HConstant* HGraph::GetInvalidContext() {
+ return GetConstant(&constant_invalid_context_, 0xFFFFC0C7);
+}
+
+
+bool HGraph::IsStandardConstant(HConstant* constant) {
+ if (IsConstantUndefined(constant)) return true;
+ if (IsConstant0(constant)) return true;
+ if (IsConstant1(constant)) return true;
+ if (IsConstantMinus1(constant)) return true;
+ if (IsConstantTrue(constant)) return true;
+ if (IsConstantFalse(constant)) return true;
+ if (IsConstantHole(constant)) return true;
+ if (IsConstantNull(constant)) return true;
+ return false;
+}
+
+
+HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder)
+ : builder_(builder),
+ finished_(false),
+ deopt_then_(false),
+ deopt_else_(false),
+ did_then_(false),
+ did_else_(false),
+ did_and_(false),
+ did_or_(false),
+ captured_(false),
+ needs_compare_(true),
+ split_edge_merge_block_(NULL),
+ merge_block_(NULL) {
+ HEnvironment* env = builder->environment();
+ first_true_block_ = builder->CreateBasicBlock(env->Copy());
+ last_true_block_ = NULL;
+ first_false_block_ = builder->CreateBasicBlock(env->Copy());
+}
+
+
+HGraphBuilder::IfBuilder::IfBuilder(
+ HGraphBuilder* builder,
+ HIfContinuation* continuation)
+ : builder_(builder),
+ finished_(false),
+ deopt_then_(false),
+ deopt_else_(false),
+ did_then_(false),
+ did_else_(false),
+ did_and_(false),
+ did_or_(false),
+ captured_(false),
+ needs_compare_(false),
+ first_true_block_(NULL),
+ last_true_block_(NULL),
+ first_false_block_(NULL),
+ split_edge_merge_block_(NULL),
+ merge_block_(NULL) {
+ continuation->Continue(&first_true_block_,
+ &first_false_block_);
+}
+
+
+HControlInstruction* HGraphBuilder::IfBuilder::AddCompare(
+ HControlInstruction* compare) {
+ if (split_edge_merge_block_ != NULL) {
+ HEnvironment* env = first_false_block_->last_environment();
+ HBasicBlock* split_edge =
+ builder_->CreateBasicBlock(env->Copy());
+ if (did_or_) {
+ compare->SetSuccessorAt(0, split_edge);
+ compare->SetSuccessorAt(1, first_false_block_);
+ } else {
+ compare->SetSuccessorAt(0, first_true_block_);
+ compare->SetSuccessorAt(1, split_edge);
+ }
+ builder_->GotoNoSimulate(split_edge, split_edge_merge_block_);
+ } else {
+ compare->SetSuccessorAt(0, first_true_block_);
+ compare->SetSuccessorAt(1, first_false_block_);
+ }
+ builder_->FinishCurrentBlock(compare);
+ needs_compare_ = false;
+ return compare;
+}
+
+
+void HGraphBuilder::IfBuilder::Or() {
+ ASSERT(!needs_compare_);
+ ASSERT(!did_and_);
+ did_or_ = true;
+ HEnvironment* env = first_false_block_->last_environment();
+ if (split_edge_merge_block_ == NULL) {
+ split_edge_merge_block_ =
+ builder_->CreateBasicBlock(env->Copy());
+ builder_->GotoNoSimulate(first_true_block_, split_edge_merge_block_);
+ first_true_block_ = split_edge_merge_block_;
+ }
+ builder_->set_current_block(first_false_block_);
+ first_false_block_ = builder_->CreateBasicBlock(env->Copy());
+}
+
+
+void HGraphBuilder::IfBuilder::And() {
+ ASSERT(!needs_compare_);
+ ASSERT(!did_or_);
+ did_and_ = true;
+ HEnvironment* env = first_false_block_->last_environment();
+ if (split_edge_merge_block_ == NULL) {
+ split_edge_merge_block_ = builder_->CreateBasicBlock(env->Copy());
+ builder_->GotoNoSimulate(first_false_block_, split_edge_merge_block_);
+ first_false_block_ = split_edge_merge_block_;
+ }
+ builder_->set_current_block(first_true_block_);
+ first_true_block_ = builder_->CreateBasicBlock(env->Copy());
+}
+
+
+void HGraphBuilder::IfBuilder::CaptureContinuation(
+ HIfContinuation* continuation) {
+ ASSERT(!finished_);
+ ASSERT(!captured_);
+ HBasicBlock* true_block = last_true_block_ == NULL
+ ? first_true_block_
+ : last_true_block_;
+ HBasicBlock* false_block = did_else_ && (first_false_block_ != NULL)
+ ? builder_->current_block()
+ : first_false_block_;
+ continuation->Capture(true_block, false_block);
+ captured_ = true;
+ End();
+}
+
+
+void HGraphBuilder::IfBuilder::JoinContinuation(HIfContinuation* continuation) {
+ ASSERT(!finished_);
+ ASSERT(!captured_);
+ HBasicBlock* true_block = last_true_block_ == NULL
+ ? first_true_block_
+ : last_true_block_;
+ HBasicBlock* false_block = did_else_ && (first_false_block_ != NULL)
+ ? builder_->current_block()
+ : first_false_block_;
+ if (true_block != NULL && !true_block->IsFinished()) {
+ ASSERT(continuation->IsTrueReachable());
+ builder_->GotoNoSimulate(true_block, continuation->true_branch());
+ }
+ if (false_block != NULL && !false_block->IsFinished()) {
+ ASSERT(continuation->IsFalseReachable());
+ builder_->GotoNoSimulate(false_block, continuation->false_branch());
+ }
+ captured_ = true;
+ End();
+}
+
+
+void HGraphBuilder::IfBuilder::Then() {
+ ASSERT(!captured_);
+ ASSERT(!finished_);
+ did_then_ = true;
+ if (needs_compare_) {
+ // Handle if's without any expressions, they jump directly to the "else"
+ // branch. However, we must pretend that the "then" branch is reachable,
+ // so that the graph builder visits it and sees any live range extending
+ // constructs within it.
+ HConstant* constant_false = builder_->graph()->GetConstantFalse();
+ ToBooleanStub::Types boolean_type = ToBooleanStub::Types();
+ boolean_type.Add(ToBooleanStub::BOOLEAN);
+ HBranch* branch = builder()->New<HBranch>(
+ constant_false, boolean_type, first_true_block_, first_false_block_);
+ builder_->FinishCurrentBlock(branch);
+ }
+ builder_->set_current_block(first_true_block_);
+}
+
+
+void HGraphBuilder::IfBuilder::Else() {
+ ASSERT(did_then_);
+ ASSERT(!captured_);
+ ASSERT(!finished_);
+ last_true_block_ = builder_->current_block();
+ builder_->set_current_block(first_false_block_);
+ did_else_ = true;
+}
+
+
+void HGraphBuilder::IfBuilder::Deopt(const char* reason) {
+ ASSERT(did_then_);
+ if (did_else_) {
+ deopt_else_ = true;
+ } else {
+ deopt_then_ = true;
+ }
+ builder_->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
+}
+
+
+void HGraphBuilder::IfBuilder::Return(HValue* value) {
+ HValue* parameter_count = builder_->graph()->GetConstantMinus1();
+ builder_->FinishExitCurrentBlock(
+ builder_->New<HReturn>(value, parameter_count));
+ if (did_else_) {
+ first_false_block_ = NULL;
+ } else {
+ first_true_block_ = NULL;
+ }
+}
+
+
+void HGraphBuilder::IfBuilder::End() {
+ if (!captured_) {
+ ASSERT(did_then_);
+ if (!did_else_) {
+ last_true_block_ = builder_->current_block();
+ }
+ if (last_true_block_ == NULL || last_true_block_->IsFinished()) {
+ ASSERT(did_else_);
+ // Return on true. Nothing to do, just continue the false block.
+ } else if (first_false_block_ == NULL ||
+ (did_else_ && builder_->current_block()->IsFinished())) {
+ // Deopt on false. Nothing to do except switching to the true block.
+ builder_->set_current_block(last_true_block_);
+ } else {
+ merge_block_ = builder_->graph()->CreateBasicBlock();
+ ASSERT(!finished_);
+ if (!did_else_) Else();
+ ASSERT(!last_true_block_->IsFinished());
+ HBasicBlock* last_false_block = builder_->current_block();
+ ASSERT(!last_false_block->IsFinished());
+ if (deopt_then_) {
+ builder_->GotoNoSimulate(last_false_block, merge_block_);
+ builder_->PadEnvironmentForContinuation(last_true_block_,
+ merge_block_);
+ builder_->GotoNoSimulate(last_true_block_, merge_block_);
+ } else {
+ builder_->GotoNoSimulate(last_true_block_, merge_block_);
+ if (deopt_else_) {
+ builder_->PadEnvironmentForContinuation(last_false_block,
+ merge_block_);
+ }
+ builder_->GotoNoSimulate(last_false_block, merge_block_);
+ }
+ builder_->set_current_block(merge_block_);
+ }
+ }
+ finished_ = true;
+}
+
+
+HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
+ HValue* context,
+ LoopBuilder::Direction direction)
+ : builder_(builder),
+ context_(context),
+ direction_(direction),
+ finished_(false) {
+ header_block_ = builder->CreateLoopHeaderBlock();
+ body_block_ = NULL;
+ exit_block_ = NULL;
+ exit_trampoline_block_ = NULL;
+ increment_amount_ = builder_->graph()->GetConstant1();
+}
+
+
+HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
+ HValue* context,
+ LoopBuilder::Direction direction,
+ HValue* increment_amount)
+ : builder_(builder),
+ context_(context),
+ direction_(direction),
+ finished_(false) {
+ header_block_ = builder->CreateLoopHeaderBlock();
+ body_block_ = NULL;
+ exit_block_ = NULL;
+ exit_trampoline_block_ = NULL;
+ increment_amount_ = increment_amount;
+}
+
+
+HValue* HGraphBuilder::LoopBuilder::BeginBody(
+ HValue* initial,
+ HValue* terminating,
+ Token::Value token) {
+ HEnvironment* env = builder_->environment();
+ phi_ = header_block_->AddNewPhi(env->values()->length());
+ phi_->AddInput(initial);
+ env->Push(initial);
+ builder_->GotoNoSimulate(header_block_);
+
+ HEnvironment* body_env = env->Copy();
+ HEnvironment* exit_env = env->Copy();
+ // Remove the phi from the expression stack
+ body_env->Pop();
+ exit_env->Pop();
+ body_block_ = builder_->CreateBasicBlock(body_env);
+ exit_block_ = builder_->CreateBasicBlock(exit_env);
+
+ builder_->set_current_block(header_block_);
+ env->Pop();
+ builder_->FinishCurrentBlock(builder_->New<HCompareNumericAndBranch>(
+ phi_, terminating, token, body_block_, exit_block_));
+
+ builder_->set_current_block(body_block_);
+ if (direction_ == kPreIncrement || direction_ == kPreDecrement) {
+ HValue* one = builder_->graph()->GetConstant1();
+ if (direction_ == kPreIncrement) {
+ increment_ = HAdd::New(zone(), context_, phi_, one);
+ } else {
+ increment_ = HSub::New(zone(), context_, phi_, one);
+ }
+ increment_->ClearFlag(HValue::kCanOverflow);
+ builder_->AddInstruction(increment_);
+ return increment_;
+ } else {
+ return phi_;
+ }
+}
+
+
+void HGraphBuilder::LoopBuilder::Break() {
+ if (exit_trampoline_block_ == NULL) {
+ // Its the first time we saw a break.
+ HEnvironment* env = exit_block_->last_environment()->Copy();
+ exit_trampoline_block_ = builder_->CreateBasicBlock(env);
+ builder_->GotoNoSimulate(exit_block_, exit_trampoline_block_);
+ }
+
+ builder_->GotoNoSimulate(exit_trampoline_block_);
+}
+
+
+void HGraphBuilder::LoopBuilder::EndBody() {
+ ASSERT(!finished_);
+
+ if (direction_ == kPostIncrement || direction_ == kPostDecrement) {
+ if (direction_ == kPostIncrement) {
+ increment_ = HAdd::New(zone(), context_, phi_, increment_amount_);
+ } else {
+ increment_ = HSub::New(zone(), context_, phi_, increment_amount_);
+ }
+ increment_->ClearFlag(HValue::kCanOverflow);
+ builder_->AddInstruction(increment_);
+ }
+
+ // Push the new increment value on the expression stack to merge into the phi.
+ builder_->environment()->Push(increment_);
+ HBasicBlock* last_block = builder_->current_block();
+ builder_->GotoNoSimulate(last_block, header_block_);
+ header_block_->loop_information()->RegisterBackEdge(last_block);
+
+ if (exit_trampoline_block_ != NULL) {
+ builder_->set_current_block(exit_trampoline_block_);
+ } else {
+ builder_->set_current_block(exit_block_);
+ }
+ finished_ = true;
+}
+
+
+HGraph* HGraphBuilder::CreateGraph() {
+ graph_ = new(zone()) HGraph(info_);
+ if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
+ CompilationPhase phase("H_Block building", info_);
+ set_current_block(graph()->entry_block());
+ if (!BuildGraph()) return NULL;
+ graph()->FinalizeUniqueness();
+ return graph_;
+}
+
+
+HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
+ ASSERT(current_block() != NULL);
+ ASSERT(!FLAG_emit_opt_code_positions ||
+ position_ != RelocInfo::kNoPosition || !info_->IsOptimizing());
+ current_block()->AddInstruction(instr, position_);
+ if (graph()->IsInsideNoSideEffectsScope()) {
+ instr->SetFlag(HValue::kHasNoObservableSideEffects);
+ }
+ return instr;
+}
+
+
+void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
+ ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
+ position_ != RelocInfo::kNoPosition);
+ current_block()->Finish(last, position_);
+ if (last->IsReturn() || last->IsAbnormalExit()) {
+ set_current_block(NULL);
+ }
+}
+
+
+void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
+ ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
+ position_ != RelocInfo::kNoPosition);
+ current_block()->FinishExit(instruction, position_);
+ if (instruction->IsReturn() || instruction->IsAbnormalExit()) {
+ set_current_block(NULL);
+ }
+}
+
+
+void HGraphBuilder::AddIncrementCounter(StatsCounter* counter) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ HValue* reference = Add<HConstant>(ExternalReference(counter));
+ HValue* old_value = Add<HLoadNamedField>(reference,
+ HObjectAccess::ForCounter());
+ HValue* new_value = Add<HAdd>(old_value, graph()->GetConstant1());
+ new_value->ClearFlag(HValue::kCanOverflow); // Ignore counter overflow
+ Add<HStoreNamedField>(reference, HObjectAccess::ForCounter(),
+ new_value);
+ }
+}
+
+
+void HGraphBuilder::AddSimulate(BailoutId id,
+ RemovableSimulate removable) {
+ ASSERT(current_block() != NULL);
+ ASSERT(!graph()->IsInsideNoSideEffectsScope());
+ current_block()->AddNewSimulate(id, removable);
+}
+
+
+HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
+ HBasicBlock* b = graph()->CreateBasicBlock();
+ b->SetInitialEnvironment(env);
+ return b;
+}
+
+
+HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
+ HBasicBlock* header = graph()->CreateBasicBlock();
+ HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
+ header->SetInitialEnvironment(entry_env);
+ header->AttachLoopInformation();
+ return header;
+}
+
+
+HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
+ if (obj->type().IsHeapObject()) return obj;
+ return Add<HCheckHeapObject>(obj);
+}
+
+
+void HGraphBuilder::FinishExitWithHardDeoptimization(
+ const char* reason, HBasicBlock* continuation) {
+ PadEnvironmentForContinuation(current_block(), continuation);
+ Add<HDeoptimize>(reason, Deoptimizer::EAGER);
+ if (graph()->IsInsideNoSideEffectsScope()) {
+ GotoNoSimulate(continuation);
+ } else {
+ Goto(continuation);
+ }
+}
+
+
+void HGraphBuilder::PadEnvironmentForContinuation(
+ HBasicBlock* from,
+ HBasicBlock* continuation) {
+ if (continuation->last_environment() != NULL) {
+ // When merging from a deopt block to a continuation, resolve differences in
+ // environment by pushing constant 0 and popping extra values so that the
+ // environments match during the join. Push 0 since it has the most specific
+ // representation, and will not influence representation inference of the
+ // phi.
+ int continuation_env_length = continuation->last_environment()->length();
+ while (continuation_env_length != from->last_environment()->length()) {
+ if (continuation_env_length > from->last_environment()->length()) {
+ from->last_environment()->Push(graph()->GetConstant0());
+ } else {
+ from->last_environment()->Pop();
+ }
+ }
+ } else {
+ ASSERT(continuation->predecessors()->length() == 0);
+ }
+}
+
+
+HValue* HGraphBuilder::BuildCheckMap(HValue* obj, Handle<Map> map) {
+ return Add<HCheckMaps>(obj, map, top_info());
+}
+
+
+HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
+ if (object->type().IsJSObject()) return object;
+ return Add<HWrapReceiver>(object, function);
+}
+
+
+HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
+ HValue* elements,
+ ElementsKind kind,
+ HValue* length,
+ HValue* key,
+ bool is_js_array) {
+ IfBuilder length_checker(this);
+
+ Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ;
+ length_checker.If<HCompareNumericAndBranch>(key, length, token);
+
+ length_checker.Then();
+
+ HValue* current_capacity = AddLoadFixedArrayLength(elements);
+
+ IfBuilder capacity_checker(this);
+
+ capacity_checker.If<HCompareNumericAndBranch>(key, current_capacity,
+ Token::GTE);
+ capacity_checker.Then();
+
+ HValue* max_gap = Add<HConstant>(static_cast<int32_t>(JSObject::kMaxGap));
+ HValue* max_capacity = Add<HAdd>(current_capacity, max_gap);
+ IfBuilder key_checker(this);
+ key_checker.If<HCompareNumericAndBranch>(key, max_capacity, Token::LT);
+ key_checker.Then();
+ key_checker.ElseDeopt("Key out of capacity range");
+ key_checker.End();
+
+ HValue* new_capacity = BuildNewElementsCapacity(key);
+ HValue* new_elements = BuildGrowElementsCapacity(object, elements,
+ kind, kind, length,
+ new_capacity);
+
+ environment()->Push(new_elements);
+ capacity_checker.Else();
+
+ environment()->Push(elements);
+ capacity_checker.End();
+
+ if (is_js_array) {
+ HValue* new_length = AddUncasted<HAdd>(key, graph_->GetConstant1());
+ new_length->ClearFlag(HValue::kCanOverflow);
+
+ Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(kind),
+ new_length);
+ }
+
+ length_checker.Else();
+ Add<HBoundsCheck>(key, length);
+
+ environment()->Push(elements);
+ length_checker.End();
+
+ return environment()->Pop();
+}
+
+
+HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object,
+ HValue* elements,
+ ElementsKind kind,
+ HValue* length) {
+ Factory* factory = isolate()->factory();
+
+ IfBuilder cow_checker(this);
+
+ cow_checker.If<HCompareMap>(elements, factory->fixed_cow_array_map());
+ cow_checker.Then();
+
+ HValue* capacity = AddLoadFixedArrayLength(elements);
+
+ HValue* new_elements = BuildGrowElementsCapacity(object, elements, kind,
+ kind, length, capacity);
+
+ environment()->Push(new_elements);
+
+ cow_checker.Else();
+
+ environment()->Push(elements);
+
+ cow_checker.End();
+
+ return environment()->Pop();
+}
+
+
+void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
+ HValue* map,
+ ElementsKind from_kind,
+ ElementsKind to_kind,
+ bool is_jsarray) {
+ ASSERT(!IsFastHoleyElementsKind(from_kind) ||
+ IsFastHoleyElementsKind(to_kind));
+
+ if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ Add<HTrapAllocationMemento>(object);
+ }
+
+ if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ HInstruction* elements = AddLoadElements(object);
+
+ HInstruction* empty_fixed_array = Add<HConstant>(
+ isolate()->factory()->empty_fixed_array());
+
+ IfBuilder if_builder(this);
+
+ if_builder.IfNot<HCompareObjectEqAndBranch>(elements, empty_fixed_array);
+
+ if_builder.Then();
+
+ HInstruction* elements_length = AddLoadFixedArrayLength(elements);
+
+ HInstruction* array_length = is_jsarray
+ ? Add<HLoadNamedField>(object, HObjectAccess::ForArrayLength(from_kind))
+ : elements_length;
+
+ BuildGrowElementsCapacity(object, elements, from_kind, to_kind,
+ array_length, elements_length);
+
+ if_builder.End();
+ }
+
+ Add<HStoreNamedField>(object, HObjectAccess::ForMap(), map);
+}
+
+
+HValue* HGraphBuilder::BuildNumberToString(HValue* object,
+ Handle<Type> type) {
+ NoObservableSideEffectsScope scope(this);
+
+ // Create a joinable continuation.
+ HIfContinuation found(graph()->CreateBasicBlock(),
+ graph()->CreateBasicBlock());
+
+ // Load the number string cache.
+ HValue* number_string_cache =
+ Add<HLoadRoot>(Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ HValue* mask = AddLoadFixedArrayLength(number_string_cache);
+ mask->set_type(HType::Smi());
+ mask = Add<HSar>(mask, graph()->GetConstant1());
+ mask = Add<HSub>(mask, graph()->GetConstant1());
+
+ // Check whether object is a smi.
+ IfBuilder if_objectissmi(this);
+ if_objectissmi.If<HIsSmiAndBranch>(object);
+ if_objectissmi.Then();
+ {
+ // Compute hash for smi similar to smi_get_hash().
+ HValue* hash = Add<HBitwise>(Token::BIT_AND, object, mask);
+
+ // Load the key.
+ HValue* key_index = Add<HShl>(hash, graph()->GetConstant1());
+ HValue* key = Add<HLoadKeyed>(number_string_cache, key_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+
+ // Check if object == key.
+ IfBuilder if_objectiskey(this);
+ if_objectiskey.If<HCompareObjectEqAndBranch>(object, key);
+ if_objectiskey.Then();
+ {
+ // Make the key_index available.
+ Push(key_index);
+ }
+ if_objectiskey.JoinContinuation(&found);
+ }
+ if_objectissmi.Else();
+ {
+ if (type->Is(Type::Smi())) {
+ if_objectissmi.Deopt("Excepted smi");
+ } else {
+ // Check if the object is a heap number.
+ IfBuilder if_objectisnumber(this);
+ if_objectisnumber.If<HCompareMap>(
+ object, isolate()->factory()->heap_number_map());
+ if_objectisnumber.Then();
+ {
+ // Compute hash for heap number similar to double_get_hash().
+ HValue* low = Add<HLoadNamedField>(
+ object, HObjectAccess::ForHeapNumberValueLowestBits());
+ HValue* high = Add<HLoadNamedField>(
+ object, HObjectAccess::ForHeapNumberValueHighestBits());
+ HValue* hash = Add<HBitwise>(Token::BIT_XOR, low, high);
+ hash = Add<HBitwise>(Token::BIT_AND, hash, mask);
+
+ // Load the key.
+ HValue* key_index = Add<HShl>(hash, graph()->GetConstant1());
+ HValue* key = Add<HLoadKeyed>(number_string_cache, key_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+
+ // Check if key is a heap number (the number string cache contains only
+ // SMIs and heap number, so it is sufficient to do a SMI check here).
+ IfBuilder if_keyisnotsmi(this);
+ if_keyisnotsmi.IfNot<HIsSmiAndBranch>(key);
+ if_keyisnotsmi.Then();
+ {
+ // Check if values of key and object match.
+ IfBuilder if_keyeqobject(this);
+ if_keyeqobject.If<HCompareNumericAndBranch>(
+ Add<HLoadNamedField>(key, HObjectAccess::ForHeapNumberValue()),
+ Add<HLoadNamedField>(object, HObjectAccess::ForHeapNumberValue()),
+ Token::EQ);
+ if_keyeqobject.Then();
+ {
+ // Make the key_index available.
+ Push(key_index);
+ }
+ if_keyeqobject.JoinContinuation(&found);
+ }
+ if_keyisnotsmi.JoinContinuation(&found);
+ }
+ if_objectisnumber.Else();
+ {
+ if (type->Is(Type::Number())) {
+ if_objectisnumber.Deopt("Expected heap number");
+ }
+ }
+ if_objectisnumber.JoinContinuation(&found);
+ }
+ }
+ if_objectissmi.JoinContinuation(&found);
+
+ // Check for cache hit.
+ IfBuilder if_found(this, &found);
+ if_found.Then();
+ {
+ // Count number to string operation in native code.
+ AddIncrementCounter(isolate()->counters()->number_to_string_native());
+
+ // Load the value in case of cache hit.
+ HValue* key_index = Pop();
+ HValue* value_index = Add<HAdd>(key_index, graph()->GetConstant1());
+ Push(Add<HLoadKeyed>(number_string_cache, value_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE));
+ }
+ if_found.Else();
+ {
+ // Cache miss, fallback to runtime.
+ Add<HPushArgument>(object);
+ Push(Add<HCallRuntime>(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kNumberToStringSkipCache),
+ 1));
+ }
+ if_found.End();
+
+ return Pop();
+}
+
+
+HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
+ HValue* checked_object,
+ HValue* key,
+ HValue* val,
+ bool is_js_array,
+ ElementsKind elements_kind,
+ bool is_store,
+ LoadKeyedHoleMode load_mode,
+ KeyedAccessStoreMode store_mode) {
+ ASSERT(!IsExternalArrayElementsKind(elements_kind) || !is_js_array);
+ // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
+ // on a HElementsTransition instruction. The flag can also be removed if the
+ // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
+ // ElementsKind transitions. Finally, the dependency can be removed for stores
+ // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
+ // generated store code.
+ if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
+ (elements_kind == FAST_ELEMENTS && is_store)) {
+ checked_object->ClearGVNFlag(kDependsOnElementsKind);
+ }
+
+ bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
+ bool fast_elements = IsFastObjectElementsKind(elements_kind);
+ HValue* elements = AddLoadElements(checked_object);
+ if (is_store && (fast_elements || fast_smi_only_elements) &&
+ store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
+ HCheckMaps* check_cow_map = Add<HCheckMaps>(
+ elements, isolate()->factory()->fixed_array_map(), top_info());
+ check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+ }
+ HInstruction* length = NULL;
+ if (is_js_array) {
+ length = Add<HLoadNamedField>(
+ checked_object, HObjectAccess::ForArrayLength(elements_kind));
+ } else {
+ length = AddLoadFixedArrayLength(elements);
+ }
+ length->set_type(HType::Smi());
+ HValue* checked_key = NULL;
+ if (IsExternalArrayElementsKind(elements_kind)) {
+ if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+ NoObservableSideEffectsScope no_effects(this);
+ HLoadExternalArrayPointer* external_elements =
+ Add<HLoadExternalArrayPointer>(elements);
+ IfBuilder length_checker(this);
+ length_checker.If<HCompareNumericAndBranch>(key, length, Token::LT);
+ length_checker.Then();
+ IfBuilder negative_checker(this);
+ HValue* bounds_check = negative_checker.If<HCompareNumericAndBranch>(
+ key, graph()->GetConstant0(), Token::GTE);
+ negative_checker.Then();
+ HInstruction* result = AddElementAccess(
+ external_elements, key, val, bounds_check, elements_kind, is_store);
+ negative_checker.ElseDeopt("Negative key encountered");
+ length_checker.End();
+ return result;
+ } else {
+ ASSERT(store_mode == STANDARD_STORE);
+ checked_key = Add<HBoundsCheck>(key, length);
+ HLoadExternalArrayPointer* external_elements =
+ Add<HLoadExternalArrayPointer>(elements);
+ return AddElementAccess(
+ external_elements, checked_key, val,
+ checked_object, elements_kind, is_store);
+ }
+ }
+ ASSERT(fast_smi_only_elements ||
+ fast_elements ||
+ IsFastDoubleElementsKind(elements_kind));
+
+ // In case val is stored into a fast smi array, assure that the value is a smi
+ // before manipulating the backing store. Otherwise the actual store may
+ // deopt, leaving the backing store in an invalid state.
+ if (is_store && IsFastSmiElementsKind(elements_kind) &&
+ !val->type().IsSmi()) {
+ val = Add<HForceRepresentation>(val, Representation::Smi());
+ }
+
+ if (IsGrowStoreMode(store_mode)) {
+ NoObservableSideEffectsScope no_effects(this);
+ elements = BuildCheckForCapacityGrow(checked_object, elements,
+ elements_kind, length, key,
+ is_js_array);
+ checked_key = key;
+ } else {
+ checked_key = Add<HBoundsCheck>(key, length);
+
+ if (is_store && (fast_elements || fast_smi_only_elements)) {
+ if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
+ NoObservableSideEffectsScope no_effects(this);
+ elements = BuildCopyElementsOnWrite(checked_object, elements,
+ elements_kind, length);
+ } else {
+ HCheckMaps* check_cow_map = Add<HCheckMaps>(
+ elements, isolate()->factory()->fixed_array_map(), top_info());
+ check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+ }
+ }
+ }
+ return AddElementAccess(elements, checked_key, val, checked_object,
+ elements_kind, is_store, load_mode);
+}
+
+
+HValue* HGraphBuilder::BuildAllocateElements(ElementsKind kind,
+ HValue* capacity) {
+ int elements_size;
+ InstanceType instance_type;
+
+ if (IsFastDoubleElementsKind(kind)) {
+ elements_size = kDoubleSize;
+ instance_type = FIXED_DOUBLE_ARRAY_TYPE;
+ } else {
+ elements_size = kPointerSize;
+ instance_type = FIXED_ARRAY_TYPE;
+ }
+
+ HConstant* elements_size_value = Add<HConstant>(elements_size);
+ HValue* mul = Add<HMul>(capacity, elements_size_value);
+ mul->ClearFlag(HValue::kCanOverflow);
+
+ HConstant* header_size = Add<HConstant>(FixedArray::kHeaderSize);
+ HValue* total_size = Add<HAdd>(mul, header_size);
+ total_size->ClearFlag(HValue::kCanOverflow);
+
+ return Add<HAllocate>(total_size, HType::JSArray(),
+ isolate()->heap()->GetPretenureMode(), instance_type);
+}
+
+
+void HGraphBuilder::BuildInitializeElementsHeader(HValue* elements,
+ ElementsKind kind,
+ HValue* capacity) {
+ Factory* factory = isolate()->factory();
+ Handle<Map> map = IsFastDoubleElementsKind(kind)
+ ? factory->fixed_double_array_map()
+ : factory->fixed_array_map();
+
+ AddStoreMapConstant(elements, map);
+ Add<HStoreNamedField>(elements, HObjectAccess::ForFixedArrayLength(),
+ capacity);
+}
+
+
+HValue* HGraphBuilder::BuildAllocateElementsAndInitializeElementsHeader(
+ ElementsKind kind,
+ HValue* capacity) {
+ // The HForceRepresentation is to prevent possible deopt on int-smi
+ // conversion after allocation but before the new object fields are set.
+ capacity = Add<HForceRepresentation>(capacity, Representation::Smi());
+ HValue* new_elements = BuildAllocateElements(kind, capacity);
+ BuildInitializeElementsHeader(new_elements, kind, capacity);
+ return new_elements;
+}
+
+
+HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
+ HValue* array_map,
+ AllocationSiteMode mode,
+ ElementsKind elements_kind,
+ HValue* allocation_site_payload,
+ HValue* length_field) {
+
+ Add<HStoreNamedField>(array, HObjectAccess::ForMap(), array_map);
+
+ HConstant* empty_fixed_array =
+ Add<HConstant>(isolate()->factory()->empty_fixed_array());
+
+ HObjectAccess access = HObjectAccess::ForPropertiesPointer();
+ Add<HStoreNamedField>(array, access, empty_fixed_array);
+ Add<HStoreNamedField>(array, HObjectAccess::ForArrayLength(elements_kind),
+ length_field);
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ BuildCreateAllocationMemento(array,
+ JSArray::kSize,
+ allocation_site_payload);
+ }
+
+ int elements_location = JSArray::kSize;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ elements_location += AllocationMemento::kSize;
+ }
+
+ HValue* elements = Add<HInnerAllocatedObject>(array, elements_location);
+ Add<HStoreNamedField>(array, HObjectAccess::ForElementsPointer(), elements);
+ return static_cast<HInnerAllocatedObject*>(elements);
+}
+
+
+HInstruction* HGraphBuilder::AddElementAccess(
+ HValue* elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store,
+ LoadKeyedHoleMode load_mode) {
+ if (is_store) {
+ ASSERT(val != NULL);
+ if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
+ val = Add<HClampToUint8>(val);
+ }
+ return Add<HStoreKeyed>(elements, checked_key, val, elements_kind);
+ }
+
+ ASSERT(!is_store);
+ ASSERT(val == NULL);
+ HLoadKeyed* load = Add<HLoadKeyed>(
+ elements, checked_key, dependency, elements_kind, load_mode);
+ if (FLAG_opt_safe_uint32_operations &&
+ elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ graph()->RecordUint32Instruction(load);
+ }
+ return load;
+}
+
+
+HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object) {
+ return Add<HLoadNamedField>(object, HObjectAccess::ForElementsPointer());
+}
+
+
+HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength(HValue* object) {
+ return Add<HLoadNamedField>(object,
+ HObjectAccess::ForFixedArrayLength());
+}
+
+
+HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* old_capacity) {
+ HValue* half_old_capacity = AddUncasted<HShr>(old_capacity,
+ graph_->GetConstant1());
+
+ HValue* new_capacity = AddUncasted<HAdd>(half_old_capacity, old_capacity);
+ new_capacity->ClearFlag(HValue::kCanOverflow);
+
+ HValue* min_growth = Add<HConstant>(16);
+
+ new_capacity = AddUncasted<HAdd>(new_capacity, min_growth);
+ new_capacity->ClearFlag(HValue::kCanOverflow);
+
+ return new_capacity;
+}
+
+
+void HGraphBuilder::BuildNewSpaceArrayCheck(HValue* length, ElementsKind kind) {
+ Heap* heap = isolate()->heap();
+ int element_size = IsFastDoubleElementsKind(kind) ? kDoubleSize
+ : kPointerSize;
+ int max_size = heap->MaxRegularSpaceAllocationSize() / element_size;
+ max_size -= JSArray::kSize / element_size;
+ HConstant* max_size_constant = Add<HConstant>(max_size);
+ Add<HBoundsCheck>(length, max_size_constant);
+}
+
+
+HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object,
+ HValue* elements,
+ ElementsKind kind,
+ ElementsKind new_kind,
+ HValue* length,
+ HValue* new_capacity) {
+ BuildNewSpaceArrayCheck(new_capacity, new_kind);
+
+ HValue* new_elements = BuildAllocateElementsAndInitializeElementsHeader(
+ new_kind, new_capacity);
+
+ BuildCopyElements(elements, kind,
+ new_elements, new_kind,
+ length, new_capacity);
+
+ Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+ new_elements);
+
+ return new_elements;
+}
+
+
+void HGraphBuilder::BuildFillElementsWithHole(HValue* elements,
+ ElementsKind elements_kind,
+ HValue* from,
+ HValue* to) {
+ // Fast elements kinds need to be initialized in case statements below cause
+ // a garbage collection.
+ Factory* factory = isolate()->factory();
+
+ double nan_double = FixedDoubleArray::hole_nan_as_double();
+ HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
+ ? Add<HConstant>(factory->the_hole_value())
+ : Add<HConstant>(nan_double);
+
+ // Special loop unfolding case
+ static const int kLoopUnfoldLimit = 4;
+ bool unfold_loop = false;
+ int initial_capacity = JSArray::kPreallocatedArrayElements;
+ if (from->IsConstant() && to->IsConstant() &&
+ initial_capacity <= kLoopUnfoldLimit) {
+ HConstant* constant_from = HConstant::cast(from);
+ HConstant* constant_to = HConstant::cast(to);
+
+ if (constant_from->HasInteger32Value() &&
+ constant_from->Integer32Value() == 0 &&
+ constant_to->HasInteger32Value() &&
+ constant_to->Integer32Value() == initial_capacity) {
+ unfold_loop = true;
+ }
+ }
+
+ // Since we're about to store a hole value, the store instruction below must
+ // assume an elements kind that supports heap object values.
+ if (IsFastSmiOrObjectElementsKind(elements_kind)) {
+ elements_kind = FAST_HOLEY_ELEMENTS;
+ }
+
+ if (unfold_loop) {
+ for (int i = 0; i < initial_capacity; i++) {
+ HInstruction* key = Add<HConstant>(i);
+ Add<HStoreKeyed>(elements, key, hole, elements_kind);
+ }
+ } else {
+ LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+
+ HValue* key = builder.BeginBody(from, to, Token::LT);
+
+ Add<HStoreKeyed>(elements, key, hole, elements_kind);
+
+ builder.EndBody();
+ }
+}
+
+
+void HGraphBuilder::BuildCopyElements(HValue* from_elements,
+ ElementsKind from_elements_kind,
+ HValue* to_elements,
+ ElementsKind to_elements_kind,
+ HValue* length,
+ HValue* capacity) {
+ bool pre_fill_with_holes =
+ IsFastDoubleElementsKind(from_elements_kind) &&
+ IsFastObjectElementsKind(to_elements_kind);
+
+ if (pre_fill_with_holes) {
+ // If the copy might trigger a GC, make sure that the FixedArray is
+ // pre-initialized with holes to make sure that it's always in a consistent
+ // state.
+ BuildFillElementsWithHole(to_elements, to_elements_kind,
+ graph()->GetConstant0(), capacity);
+ }
+
+ LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+
+ HValue* key = builder.BeginBody(graph()->GetConstant0(), length, Token::LT);
+
+ HValue* element = Add<HLoadKeyed>(from_elements, key,
+ static_cast<HValue*>(NULL),
+ from_elements_kind,
+ ALLOW_RETURN_HOLE);
+
+ ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) &&
+ IsFastSmiElementsKind(to_elements_kind))
+ ? FAST_HOLEY_ELEMENTS : to_elements_kind;
+
+ if (IsHoleyElementsKind(from_elements_kind) &&
+ from_elements_kind != to_elements_kind) {
+ IfBuilder if_hole(this);
+ if_hole.If<HCompareHoleAndBranch>(element);
+ if_hole.Then();
+ HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind)
+ ? Add<HConstant>(FixedDoubleArray::hole_nan_as_double())
+ : graph()->GetConstantHole();
+ Add<HStoreKeyed>(to_elements, key, hole_constant, kind);
+ if_hole.Else();
+ HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
+ store->SetFlag(HValue::kAllowUndefinedAsNaN);
+ if_hole.End();
+ } else {
+ HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
+ store->SetFlag(HValue::kAllowUndefinedAsNaN);
+ }
+
+ builder.EndBody();
+
+ if (!pre_fill_with_holes && length != capacity) {
+ // Fill unused capacity with the hole.
+ BuildFillElementsWithHole(to_elements, to_elements_kind,
+ key, capacity);
+ }
+}
+
+
+HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
+ HValue* allocation_site,
+ AllocationSiteMode mode,
+ ElementsKind kind,
+ int length) {
+ NoObservableSideEffectsScope no_effects(this);
+
+ // All sizes here are multiples of kPointerSize.
+ int size = JSArray::kSize;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ size += AllocationMemento::kSize;
+ }
+
+ HValue* size_in_bytes = Add<HConstant>(size);
+ HInstruction* object = Add<HAllocate>(size_in_bytes,
+ HType::JSObject(),
+ NOT_TENURED,
+ JS_OBJECT_TYPE);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length == 0)) {
+ HObjectAccess access = HObjectAccess::ForJSArrayOffset(i);
+ Add<HStoreNamedField>(object, access,
+ Add<HLoadNamedField>(boilerplate, access));
+ }
+ }
+
+ // Create an allocation site info if requested.
+ if (mode == TRACK_ALLOCATION_SITE) {
+ BuildCreateAllocationMemento(object, JSArray::kSize, allocation_site);
+ }
+
+ if (length > 0) {
+ HValue* boilerplate_elements = AddLoadElements(boilerplate);
+ HValue* object_elements;
+ if (IsFastDoubleElementsKind(kind)) {
+ HValue* elems_size = Add<HConstant>(FixedDoubleArray::SizeFor(length));
+ object_elements = Add<HAllocate>(elems_size, HType::JSArray(),
+ NOT_TENURED, FIXED_DOUBLE_ARRAY_TYPE);
+ } else {
+ HValue* elems_size = Add<HConstant>(FixedArray::SizeFor(length));
+ object_elements = Add<HAllocate>(elems_size, HType::JSArray(),
+ NOT_TENURED, FIXED_ARRAY_TYPE);
+ }
+ Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+ object_elements);
+
+ // Copy the elements array header.
+ for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
+ HObjectAccess access = HObjectAccess::ForFixedArrayHeader(i);
+ Add<HStoreNamedField>(object_elements, access,
+ Add<HLoadNamedField>(boilerplate_elements, access));
+ }
+
+ // Copy the elements array contents.
+ // TODO(mstarzinger): Teach HGraphBuilder::BuildCopyElements to unfold
+ // copying loops with constant length up to a given boundary and use this
+ // helper here instead.
+ for (int i = 0; i < length; i++) {
+ HValue* key_constant = Add<HConstant>(i);
+ HInstruction* value = Add<HLoadKeyed>(boilerplate_elements, key_constant,
+ static_cast<HValue*>(NULL), kind);
+ Add<HStoreKeyed>(object_elements, key_constant, value, kind);
+ }
+ }
+
+ return object;
+}
+
+
+void HGraphBuilder::BuildCompareNil(
+ HValue* value,
+ Handle<Type> type,
+ HIfContinuation* continuation) {
+ IfBuilder if_nil(this);
+ bool some_case_handled = false;
+ bool some_case_missing = false;
+
+ if (type->Maybe(Type::Null())) {
+ if (some_case_handled) if_nil.Or();
+ if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
+ some_case_handled = true;
+ } else {
+ some_case_missing = true;
+ }
+
+ if (type->Maybe(Type::Undefined())) {
+ if (some_case_handled) if_nil.Or();
+ if_nil.If<HCompareObjectEqAndBranch>(value,
+ graph()->GetConstantUndefined());
+ some_case_handled = true;
+ } else {
+ some_case_missing = true;
+ }
+
+ if (type->Maybe(Type::Undetectable())) {
+ if (some_case_handled) if_nil.Or();
+ if_nil.If<HIsUndetectableAndBranch>(value);
+ some_case_handled = true;
+ } else {
+ some_case_missing = true;
+ }
+
+ if (some_case_missing) {
+ if_nil.Then();
+ if_nil.Else();
+ if (type->NumClasses() == 1) {
+ BuildCheckHeapObject(value);
+ // For ICs, the map checked below is a sentinel map that gets replaced by
+ // the monomorphic map when the code is used as a template to generate a
+ // new IC. For optimized functions, there is no sentinel map, the map
+ // emitted below is the actual monomorphic map.
+ BuildCheckMap(value, type->Classes().Current());
+ } else {
+ if_nil.Deopt("Too many undetectable types");
+ }
+ }
+
+ if_nil.CaptureContinuation(continuation);
+}
+
+
+HValue* HGraphBuilder::BuildCreateAllocationMemento(HValue* previous_object,
+ int previous_object_size,
+ HValue* alloc_site) {
+ ASSERT(alloc_site != NULL);
+ HInnerAllocatedObject* alloc_memento = Add<HInnerAllocatedObject>(
+ previous_object, previous_object_size);
+ Handle<Map> alloc_memento_map =
+ isolate()->factory()->allocation_memento_map();
+ AddStoreMapConstant(alloc_memento, alloc_memento_map);
+ HObjectAccess access = HObjectAccess::ForAllocationMementoSite();
+ Add<HStoreNamedField>(alloc_memento, access, alloc_site);
+ return alloc_memento;
+}
+
+
+HInstruction* HGraphBuilder::BuildGetNativeContext() {
+ // Get the global context, then the native context
+ HInstruction* global_object = Add<HGlobalObject>();
+ HObjectAccess access = HObjectAccess::ForJSObjectOffset(
+ GlobalObject::kNativeContextOffset);
+ return Add<HLoadNamedField>(global_object, access);
+}
+
+
+HInstruction* HGraphBuilder::BuildGetArrayFunction() {
+ HInstruction* native_context = BuildGetNativeContext();
+ HInstruction* index =
+ Add<HConstant>(static_cast<int32_t>(Context::ARRAY_FUNCTION_INDEX));
+ return Add<HLoadKeyed>(
+ native_context, index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+}
+
+
+HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
+ ElementsKind kind,
+ HValue* allocation_site_payload,
+ HValue* constructor_function,
+ AllocationSiteOverrideMode override_mode) :
+ builder_(builder),
+ kind_(kind),
+ allocation_site_payload_(allocation_site_payload),
+ constructor_function_(constructor_function) {
+ mode_ = override_mode == DISABLE_ALLOCATION_SITES
+ ? DONT_TRACK_ALLOCATION_SITE
+ : AllocationSite::GetMode(kind);
+}
+
+
+HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
+ ElementsKind kind,
+ HValue* constructor_function) :
+ builder_(builder),
+ kind_(kind),
+ mode_(DONT_TRACK_ALLOCATION_SITE),
+ allocation_site_payload_(NULL),
+ constructor_function_(constructor_function) {
+}
+
+
+HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
+ if (kind_ == GetInitialFastElementsKind()) {
+ // No need for a context lookup if the kind_ matches the initial
+ // map, because we can just load the map in that case.
+ HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
+ return builder()->AddLoadNamedField(constructor_function_, access);
+ }
+
+ HInstruction* native_context = builder()->BuildGetNativeContext();
+ HInstruction* index = builder()->Add<HConstant>(
+ static_cast<int32_t>(Context::JS_ARRAY_MAPS_INDEX));
+
+ HInstruction* map_array = builder()->Add<HLoadKeyed>(
+ native_context, index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+
+ HInstruction* kind_index = builder()->Add<HConstant>(kind_);
+
+ return builder()->Add<HLoadKeyed>(
+ map_array, kind_index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+}
+
+
+HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
+ // Find the map near the constructor function
+ HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
+ return builder()->AddLoadNamedField(constructor_function_, access);
+}
+
+
+HValue* HGraphBuilder::JSArrayBuilder::EstablishAllocationSize(
+ HValue* length_node) {
+ ASSERT(length_node != NULL);
+
+ int base_size = JSArray::kSize;
+ if (mode_ == TRACK_ALLOCATION_SITE) {
+ base_size += AllocationMemento::kSize;
+ }
+
+ STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
+ base_size += FixedArray::kHeaderSize;
+
+ HInstruction* elements_size_value =
+ builder()->Add<HConstant>(elements_size());
+ HInstruction* mul = builder()->Add<HMul>(length_node, elements_size_value);
+ mul->ClearFlag(HValue::kCanOverflow);
+
+ HInstruction* base = builder()->Add<HConstant>(base_size);
+ HInstruction* total_size = builder()->Add<HAdd>(base, mul);
+ total_size->ClearFlag(HValue::kCanOverflow);
+ return total_size;
}
-HConstant* HGraph::GetConstantTrue() {
- return GetConstant(&constant_true_, isolate()->factory()->true_value());
+HValue* HGraphBuilder::JSArrayBuilder::EstablishEmptyArrayAllocationSize() {
+ int base_size = JSArray::kSize;
+ if (mode_ == TRACK_ALLOCATION_SITE) {
+ base_size += AllocationMemento::kSize;
+ }
+
+ base_size += IsFastDoubleElementsKind(kind_)
+ ? FixedDoubleArray::SizeFor(initial_capacity())
+ : FixedArray::SizeFor(initial_capacity());
+
+ return builder()->Add<HConstant>(base_size);
+}
+
+
+HValue* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() {
+ HValue* size_in_bytes = EstablishEmptyArrayAllocationSize();
+ HConstant* capacity = builder()->Add<HConstant>(initial_capacity());
+ return AllocateArray(size_in_bytes,
+ capacity,
+ builder()->graph()->GetConstant0(),
+ true);
+}
+
+
+HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* capacity,
+ HValue* length_field,
+ bool fill_with_hole) {
+ HValue* size_in_bytes = EstablishAllocationSize(capacity);
+ return AllocateArray(size_in_bytes, capacity, length_field, fill_with_hole);
+}
+
+
+HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
+ HValue* capacity,
+ HValue* length_field,
+ bool fill_with_hole) {
+ // These HForceRepresentations are because we store these as fields in the
+ // objects we construct, and an int32-to-smi HChange could deopt. Accept
+ // the deopt possibility now, before allocation occurs.
+ capacity = builder()->Add<HForceRepresentation>(capacity,
+ Representation::Smi());
+ length_field = builder()->Add<HForceRepresentation>(length_field,
+ Representation::Smi());
+ // Allocate (dealing with failure appropriately)
+ HAllocate* new_object = builder()->Add<HAllocate>(size_in_bytes,
+ HType::JSArray(), NOT_TENURED, JS_ARRAY_TYPE);
+
+ // Folded array allocation should be aligned if it has fast double elements.
+ if (IsFastDoubleElementsKind(kind_)) {
+ new_object->MakeDoubleAligned();
+ }
+
+ // Fill in the fields: map, properties, length
+ HValue* map;
+ if (allocation_site_payload_ == NULL) {
+ map = EmitInternalMapCode();
+ } else {
+ map = EmitMapCode();
+ }
+ elements_location_ = builder()->BuildJSArrayHeader(new_object,
+ map,
+ mode_,
+ kind_,
+ allocation_site_payload_,
+ length_field);
+
+ // Initialize the elements
+ builder()->BuildInitializeElementsHeader(elements_location_, kind_, capacity);
+
+ if (fill_with_hole) {
+ builder()->BuildFillElementsWithHole(elements_location_, kind_,
+ graph()->GetConstant0(), capacity);
+ }
+
+ return new_object;
}
-HConstant* HGraph::GetConstantFalse() {
- return GetConstant(&constant_false_, isolate()->factory()->false_value());
+HStoreNamedField* HGraphBuilder::AddStoreMapConstant(HValue *object,
+ Handle<Map> map) {
+ return Add<HStoreNamedField>(object, HObjectAccess::ForMap(),
+ Add<HConstant>(map));
}
-HConstant* HGraph::GetConstantHole() {
- return GetConstant(&constant_hole_, isolate()->factory()->the_hole_value());
+HValue* HGraphBuilder::AddLoadJSBuiltin(Builtins::JavaScript builtin) {
+ HGlobalObject* global_object = Add<HGlobalObject>();
+ HObjectAccess access = HObjectAccess::ForJSObjectOffset(
+ GlobalObject::kBuiltinsOffset);
+ HValue* builtins = Add<HLoadNamedField>(global_object, access);
+ HObjectAccess function_access = HObjectAccess::ForJSObjectOffset(
+ JSBuiltinsObject::OffsetOfFunctionWithId(builtin));
+ return Add<HLoadNamedField>(builtins, function_access);
}
-HGraphBuilder::HGraphBuilder(CompilationInfo* info,
- TypeFeedbackOracle* oracle)
- : function_state_(NULL),
- initial_function_state_(this, info, oracle, NORMAL_RETURN),
+HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
+ : HGraphBuilder(info),
+ function_state_(NULL),
+ initial_function_state_(this, info, NORMAL_RETURN),
ast_context_(NULL),
break_scope_(NULL),
- graph_(NULL),
- current_block_(NULL),
inlined_count_(0),
globals_(10, info->zone()),
- zone_(info->zone()),
- inline_bailout_(false) {
+ inline_bailout_(false),
+ osr_(new(info->zone()) HOsrBuilder(this)) {
// This is not initialized in the initializer list because the
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
function_state_= &initial_function_state_;
+ InitializeAstVisitor(info->isolate());
+ if (FLAG_emit_opt_code_positions) {
+ SetSourcePosition(info->shared_info()->start_position());
+ }
}
-HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
- HBasicBlock* second,
- BailoutId join_id) {
+
+HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
+ HBasicBlock* second,
+ BailoutId join_id) {
if (first == NULL) {
return second;
} else if (second == NULL) {
return first;
} else {
- HBasicBlock* join_block = graph_->CreateBasicBlock();
- first->Goto(join_block);
- second->Goto(join_block);
+ HBasicBlock* join_block = graph()->CreateBasicBlock();
+ Goto(first, join_block);
+ Goto(second, join_block);
join_block->SetJoinId(join_id);
return join_block;
}
}
-HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
- HBasicBlock* exit_block,
- HBasicBlock* continue_block) {
+HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
+ HBasicBlock* exit_block,
+ HBasicBlock* continue_block) {
if (continue_block != NULL) {
- if (exit_block != NULL) exit_block->Goto(continue_block);
+ if (exit_block != NULL) Goto(exit_block, continue_block);
continue_block->SetJoinId(statement->ContinueId());
return continue_block;
}
@@ -663,15 +2240,15 @@ HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
}
-HBasicBlock* HGraphBuilder::CreateLoop(IterationStatement* statement,
- HBasicBlock* loop_entry,
- HBasicBlock* body_exit,
- HBasicBlock* loop_successor,
- HBasicBlock* break_block) {
- if (body_exit != NULL) body_exit->Goto(loop_entry);
+HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
+ HBasicBlock* loop_entry,
+ HBasicBlock* body_exit,
+ HBasicBlock* loop_successor,
+ HBasicBlock* break_block) {
+ if (body_exit != NULL) Goto(body_exit, loop_entry);
loop_entry->PostProcessLoopHeader(statement);
if (break_block != NULL) {
- if (loop_successor != NULL) loop_successor->Goto(break_block);
+ if (loop_successor != NULL) Goto(loop_successor, break_block);
break_block->SetJoinId(statement->ExitId());
return break_block;
}
@@ -679,8 +2256,26 @@ HBasicBlock* HGraphBuilder::CreateLoop(IterationStatement* statement,
}
-void HBasicBlock::FinishExit(HControlInstruction* instruction) {
- Finish(instruction);
+// Build a new loop header block and set it as the current block.
+HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry() {
+ HBasicBlock* loop_entry = CreateLoopHeaderBlock();
+ Goto(loop_entry);
+ set_current_block(loop_entry);
+ return loop_entry;
+}
+
+
+HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
+ IterationStatement* statement) {
+ HBasicBlock* loop_entry = osr()->HasOsrEntryAt(statement)
+ ? osr()->BuildOsrLoopEntry(statement)
+ : BuildLoopEntry();
+ return loop_entry;
+}
+
+
+void HBasicBlock::FinishExit(HControlInstruction* instruction, int position) {
+ Finish(instruction, position);
ClearEnvironment();
}
@@ -693,13 +2288,26 @@ HGraph::HGraph(CompilationInfo* info)
values_(16, info->zone()),
phi_list_(NULL),
uint32_instructions_(NULL),
+ osr_(NULL),
info_(info),
zone_(info->zone()),
is_recursive_(false),
use_optimistic_licm_(false),
- type_change_checksum_(0) {
- start_environment_ =
- new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+ depends_on_empty_array_proto_elements_(false),
+ type_change_checksum_(0),
+ maximum_environment_size_(0),
+ no_side_effects_scope_count_(0),
+ disallow_adding_new_values_(false) {
+ if (info->IsStub()) {
+ HydrogenCodeStub* stub = info->code_stub();
+ CodeStubInterfaceDescriptor* descriptor =
+ stub->GetInterfaceDescriptor(isolate_);
+ start_environment_ =
+ new(zone_) HEnvironment(zone_, descriptor->environment_length());
+ } else {
+ start_environment_ =
+ new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+ }
start_environment_->set_ast_id(BailoutId::FunctionEntry());
entry_block_ = CreateBasicBlock();
entry_block_->SetInitialEnvironment(start_environment_);
@@ -713,19 +2321,17 @@ HBasicBlock* HGraph::CreateBasicBlock() {
}
-void HGraph::Canonicalize() {
- if (!FLAG_use_canonicalizing) return;
- HPhase phase("H_Canonicalize", this);
+void HGraph::FinalizeUniqueness() {
+ DisallowHeapAllocation no_gc;
+ ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
for (int i = 0; i < blocks()->length(); ++i) {
- HInstruction* instr = blocks()->at(i)->first();
- while (instr != NULL) {
- HValue* value = instr->Canonicalize();
- if (value != instr) instr->DeleteAndReplaceWith(value);
- instr = instr->next();
+ for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
+ it.Current()->FinalizeUniqueness();
}
}
}
+
// Block ordering was implemented with two mutually recursive methods,
// HGraph::Postorder and HGraph::PostorderLoopBlocks.
// The recursion could lead to stack overflow so the algorithm has been
@@ -1022,7 +2628,7 @@ class PostorderProcessor : public ZoneObject {
void HGraph::OrderBlocks() {
- HPhase phase("H_Block ordering");
+ CompilationPhase phase("H_Block ordering", info());
BitVector visited(blocks_.length(), zone());
ZoneList<HBasicBlock*> reverse_result(8, zone());
@@ -1059,98 +2665,6 @@ void HGraph::AssignDominators() {
}
}
-// Mark all blocks that are dominated by an unconditional soft deoptimize to
-// prevent code motion across those blocks.
-void HGraph::PropagateDeoptimizingMark() {
- HPhase phase("H_Propagate deoptimizing mark", this);
- MarkAsDeoptimizingRecursively(entry_block());
-}
-
-void HGraph::MarkAsDeoptimizingRecursively(HBasicBlock* block) {
- for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
- HBasicBlock* dominated = block->dominated_blocks()->at(i);
- if (block->IsDeoptimizing()) dominated->MarkAsDeoptimizing();
- MarkAsDeoptimizingRecursively(dominated);
- }
-}
-
-void HGraph::EliminateRedundantPhis() {
- HPhase phase("H_Redundant phi elimination", this);
-
- // Worklist of phis that can potentially be eliminated. Initialized with
- // all phi nodes. When elimination of a phi node modifies another phi node
- // the modified phi node is added to the worklist.
- ZoneList<HPhi*> worklist(blocks_.length(), zone());
- for (int i = 0; i < blocks_.length(); ++i) {
- worklist.AddAll(*blocks_[i]->phis(), zone());
- }
-
- while (!worklist.is_empty()) {
- HPhi* phi = worklist.RemoveLast();
- HBasicBlock* block = phi->block();
-
- // Skip phi node if it was already replaced.
- if (block == NULL) continue;
-
- // Get replacement value if phi is redundant.
- HValue* replacement = phi->GetRedundantReplacement();
-
- if (replacement != NULL) {
- // Iterate through the uses and replace them all.
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- HValue* value = it.value();
- value->SetOperandAt(it.index(), replacement);
- if (value->IsPhi()) worklist.Add(HPhi::cast(value), zone());
- }
- block->RemovePhi(phi);
- }
- }
-}
-
-
-void HGraph::EliminateUnreachablePhis() {
- HPhase phase("H_Unreachable phi elimination", this);
-
- // Initialize worklist.
- ZoneList<HPhi*> phi_list(blocks_.length(), zone());
- ZoneList<HPhi*> worklist(blocks_.length(), zone());
- for (int i = 0; i < blocks_.length(); ++i) {
- for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
- HPhi* phi = blocks_[i]->phis()->at(j);
- phi_list.Add(phi, zone());
- // We can't eliminate phis in the receiver position in the environment
- // because in case of throwing an error we need this value to
- // construct a stack trace.
- if (phi->HasRealUses() || phi->IsReceiver()) {
- phi->set_is_live(true);
- worklist.Add(phi, zone());
- }
- }
- }
-
- // Iteratively mark live phis.
- while (!worklist.is_empty()) {
- HPhi* phi = worklist.RemoveLast();
- for (int i = 0; i < phi->OperandCount(); i++) {
- HValue* operand = phi->OperandAt(i);
- if (operand->IsPhi() && !HPhi::cast(operand)->is_live()) {
- HPhi::cast(operand)->set_is_live(true);
- worklist.Add(HPhi::cast(operand), zone());
- }
- }
- }
-
- // Remove unreachable phis.
- for (int i = 0; i < phi_list.length(); i++) {
- HPhi* phi = phi_list[i];
- if (!phi->is_live()) {
- HBasicBlock* block = phi->block();
- block->RemovePhi(phi);
- block->RecordDeletedPhi(phi->merged_index());
- }
- }
-}
-
bool HGraph::CheckArgumentsPhiUses() {
int block_count = blocks_.length();
@@ -1192,1757 +2706,19 @@ void HGraph::CollectPhis() {
}
-void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
- BitVector in_worklist(GetMaximumValueID(), zone());
- for (int i = 0; i < worklist->length(); ++i) {
- ASSERT(!in_worklist.Contains(worklist->at(i)->id()));
- in_worklist.Add(worklist->at(i)->id());
- }
-
- while (!worklist->is_empty()) {
- HValue* current = worklist->RemoveLast();
- in_worklist.Remove(current->id());
- if (current->UpdateInferredType()) {
- for (HUseIterator it(current->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (!in_worklist.Contains(use->id())) {
- in_worklist.Add(use->id());
- worklist->Add(use, zone());
- }
- }
- }
- }
-}
-
-
-class HRangeAnalysis BASE_EMBEDDED {
- public:
- explicit HRangeAnalysis(HGraph* graph) :
- graph_(graph), zone_(graph->zone()), changed_ranges_(16, zone_) { }
-
- void Analyze();
-
- private:
- void TraceRange(const char* msg, ...);
- void Analyze(HBasicBlock* block);
- void InferControlFlowRange(HCompareIDAndBranch* test, HBasicBlock* dest);
- void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other);
- void InferRange(HValue* value);
- void RollBackTo(int index);
- void AddRange(HValue* value, Range* range);
-
- HGraph* graph_;
- Zone* zone_;
- ZoneList<HValue*> changed_ranges_;
-};
-
-
-void HRangeAnalysis::TraceRange(const char* msg, ...) {
- if (FLAG_trace_range) {
- va_list arguments;
- va_start(arguments, msg);
- OS::VPrint(msg, arguments);
- va_end(arguments);
- }
-}
-
-
-void HRangeAnalysis::Analyze() {
- HPhase phase("H_Range analysis", graph_);
- Analyze(graph_->entry_block());
-}
-
-
-void HRangeAnalysis::Analyze(HBasicBlock* block) {
- TraceRange("Analyzing block B%d\n", block->block_id());
-
- int last_changed_range = changed_ranges_.length() - 1;
-
- // Infer range based on control flow.
- if (block->predecessors()->length() == 1) {
- HBasicBlock* pred = block->predecessors()->first();
- if (pred->end()->IsCompareIDAndBranch()) {
- InferControlFlowRange(HCompareIDAndBranch::cast(pred->end()), block);
- }
- }
-
- // Process phi instructions.
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- InferRange(phi);
- }
-
- // Go through all instructions of the current block.
- HInstruction* instr = block->first();
- while (instr != block->end()) {
- InferRange(instr);
- instr = instr->next();
- }
-
- // Continue analysis in all dominated blocks.
- for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
- Analyze(block->dominated_blocks()->at(i));
- }
-
- RollBackTo(last_changed_range);
-}
-
-
-void HRangeAnalysis::InferControlFlowRange(HCompareIDAndBranch* test,
- HBasicBlock* dest) {
- ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
- if (test->GetInputRepresentation().IsInteger32()) {
- Token::Value op = test->token();
- if (test->SecondSuccessor() == dest) {
- op = Token::NegateCompareOp(op);
- }
- Token::Value inverted_op = Token::InvertCompareOp(op);
- UpdateControlFlowRange(op, test->left(), test->right());
- UpdateControlFlowRange(inverted_op, test->right(), test->left());
- }
-}
-
-
-// We know that value [op] other. Use this information to update the range on
-// value.
-void HRangeAnalysis::UpdateControlFlowRange(Token::Value op,
- HValue* value,
- HValue* other) {
- Range temp_range;
- Range* range = other->range() != NULL ? other->range() : &temp_range;
- Range* new_range = NULL;
-
- TraceRange("Control flow range infer %d %s %d\n",
- value->id(),
- Token::Name(op),
- other->id());
-
- if (op == Token::EQ || op == Token::EQ_STRICT) {
- // The same range has to apply for value.
- new_range = range->Copy(zone_);
- } else if (op == Token::LT || op == Token::LTE) {
- new_range = range->CopyClearLower(zone_);
- if (op == Token::LT) {
- new_range->AddConstant(-1);
- }
- } else if (op == Token::GT || op == Token::GTE) {
- new_range = range->CopyClearUpper(zone_);
- if (op == Token::GT) {
- new_range->AddConstant(1);
- }
- }
-
- if (new_range != NULL && !new_range->IsMostGeneric()) {
- AddRange(value, new_range);
- }
-}
-
-
-void HRangeAnalysis::InferRange(HValue* value) {
- ASSERT(!value->HasRange());
- if (!value->representation().IsNone()) {
- value->ComputeInitialRange(zone_);
- Range* range = value->range();
- TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n",
- value->id(),
- value->Mnemonic(),
- range->lower(),
- range->upper());
- }
-}
-
-
-void HRangeAnalysis::RollBackTo(int index) {
- for (int i = index + 1; i < changed_ranges_.length(); ++i) {
- changed_ranges_[i]->RemoveLastAddedRange();
- }
- changed_ranges_.Rewind(index + 1);
-}
-
-
-void HRangeAnalysis::AddRange(HValue* value, Range* range) {
- Range* original_range = value->range();
- value->AddNewRange(range, zone_);
- changed_ranges_.Add(value, zone_);
- Range* new_range = value->range();
- TraceRange("Updated range of %d set to [%d,%d]\n",
- value->id(),
- new_range->lower(),
- new_range->upper());
- if (original_range != NULL) {
- TraceRange("Original range was [%d,%d]\n",
- original_range->lower(),
- original_range->upper());
- }
- TraceRange("New information was [%d,%d]\n",
- range->lower(),
- range->upper());
-}
-
-
-void TraceGVN(const char* msg, ...) {
- va_list arguments;
- va_start(arguments, msg);
- OS::VPrint(msg, arguments);
- va_end(arguments);
-}
-
-// Wrap TraceGVN in macros to avoid the expense of evaluating its arguments when
-// --trace-gvn is off.
-#define TRACE_GVN_1(msg, a1) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1); \
- }
-
-#define TRACE_GVN_2(msg, a1, a2) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2); \
- }
-
-#define TRACE_GVN_3(msg, a1, a2, a3) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2, a3); \
- }
-
-#define TRACE_GVN_4(msg, a1, a2, a3, a4) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2, a3, a4); \
- }
-
-#define TRACE_GVN_5(msg, a1, a2, a3, a4, a5) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2, a3, a4, a5); \
- }
-
-
-HValueMap::HValueMap(Zone* zone, const HValueMap* other)
- : array_size_(other->array_size_),
- lists_size_(other->lists_size_),
- count_(other->count_),
- present_flags_(other->present_flags_),
- array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
- lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
- free_list_head_(other->free_list_head_) {
- memcpy(array_, other->array_, array_size_ * sizeof(HValueMapListElement));
- memcpy(lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
-}
-
-
-void HValueMap::Kill(GVNFlagSet flags) {
- GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(flags);
- if (!present_flags_.ContainsAnyOf(depends_flags)) return;
- present_flags_.RemoveAll();
- for (int i = 0; i < array_size_; ++i) {
- HValue* value = array_[i].value;
- if (value != NULL) {
- // Clear list of collisions first, so we know if it becomes empty.
- int kept = kNil; // List of kept elements.
- int next;
- for (int current = array_[i].next; current != kNil; current = next) {
- next = lists_[current].next;
- HValue* value = lists_[current].value;
- if (value->gvn_flags().ContainsAnyOf(depends_flags)) {
- // Drop it.
- count_--;
- lists_[current].next = free_list_head_;
- free_list_head_ = current;
- } else {
- // Keep it.
- lists_[current].next = kept;
- kept = current;
- present_flags_.Add(value->gvn_flags());
- }
- }
- array_[i].next = kept;
-
- // Now possibly drop directly indexed element.
- value = array_[i].value;
- if (value->gvn_flags().ContainsAnyOf(depends_flags)) { // Drop it.
- count_--;
- int head = array_[i].next;
- if (head == kNil) {
- array_[i].value = NULL;
- } else {
- array_[i].value = lists_[head].value;
- array_[i].next = lists_[head].next;
- lists_[head].next = free_list_head_;
- free_list_head_ = head;
- }
- } else {
- present_flags_.Add(value->gvn_flags()); // Keep it.
- }
- }
- }
-}
-
-
-HValue* HValueMap::Lookup(HValue* value) const {
- uint32_t hash = static_cast<uint32_t>(value->Hashcode());
- uint32_t pos = Bound(hash);
- if (array_[pos].value != NULL) {
- if (array_[pos].value->Equals(value)) return array_[pos].value;
- int next = array_[pos].next;
- while (next != kNil) {
- if (lists_[next].value->Equals(value)) return lists_[next].value;
- next = lists_[next].next;
- }
- }
- return NULL;
-}
-
-
-void HValueMap::Resize(int new_size, Zone* zone) {
- ASSERT(new_size > count_);
- // Hashing the values into the new array has no more collisions than in the
- // old hash map, so we can use the existing lists_ array, if we are careful.
-
- // Make sure we have at least one free element.
- if (free_list_head_ == kNil) {
- ResizeLists(lists_size_ << 1, zone);
- }
-
- HValueMapListElement* new_array =
- zone->NewArray<HValueMapListElement>(new_size);
- memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
-
- HValueMapListElement* old_array = array_;
- int old_size = array_size_;
-
- int old_count = count_;
- count_ = 0;
- // Do not modify present_flags_. It is currently correct.
- array_size_ = new_size;
- array_ = new_array;
-
- if (old_array != NULL) {
- // Iterate over all the elements in lists, rehashing them.
- for (int i = 0; i < old_size; ++i) {
- if (old_array[i].value != NULL) {
- int current = old_array[i].next;
- while (current != kNil) {
- Insert(lists_[current].value, zone);
- int next = lists_[current].next;
- lists_[current].next = free_list_head_;
- free_list_head_ = current;
- current = next;
- }
- // Rehash the directly stored value.
- Insert(old_array[i].value, zone);
- }
- }
- }
- USE(old_count);
- ASSERT(count_ == old_count);
-}
-
-
-void HValueMap::ResizeLists(int new_size, Zone* zone) {
- ASSERT(new_size > lists_size_);
-
- HValueMapListElement* new_lists =
- zone->NewArray<HValueMapListElement>(new_size);
- memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
-
- HValueMapListElement* old_lists = lists_;
- int old_size = lists_size_;
-
- lists_size_ = new_size;
- lists_ = new_lists;
-
- if (old_lists != NULL) {
- memcpy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
- }
- for (int i = old_size; i < lists_size_; ++i) {
- lists_[i].next = free_list_head_;
- free_list_head_ = i;
- }
-}
-
-
-void HValueMap::Insert(HValue* value, Zone* zone) {
- ASSERT(value != NULL);
- // Resizing when half of the hashtable is filled up.
- if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
- ASSERT(count_ < array_size_);
- count_++;
- uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
- if (array_[pos].value == NULL) {
- array_[pos].value = value;
- array_[pos].next = kNil;
- } else {
- if (free_list_head_ == kNil) {
- ResizeLists(lists_size_ << 1, zone);
- }
- int new_element_pos = free_list_head_;
- ASSERT(new_element_pos != kNil);
- free_list_head_ = lists_[free_list_head_].next;
- lists_[new_element_pos].value = value;
- lists_[new_element_pos].next = array_[pos].next;
- ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].value != NULL);
- array_[pos].next = new_element_pos;
- }
-}
-
-
-HSideEffectMap::HSideEffectMap() : count_(0) {
- memset(data_, 0, kNumberOfTrackedSideEffects * kPointerSize);
-}
-
-
-HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) {
- *this = *other; // Calls operator=.
-}
-
-
-HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
- if (this != &other) {
- memcpy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
- }
- return *this;
-}
-
-void HSideEffectMap::Kill(GVNFlagSet flags) {
- for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- if (flags.Contains(changes_flag)) {
- if (data_[i] != NULL) count_--;
- data_[i] = NULL;
- }
- }
-}
-
-
-void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
- for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- if (flags.Contains(changes_flag)) {
- if (data_[i] == NULL) count_++;
- data_[i] = instr;
- }
- }
-}
-
-
-class HStackCheckEliminator BASE_EMBEDDED {
- public:
- explicit HStackCheckEliminator(HGraph* graph) : graph_(graph) { }
-
- void Process();
-
- private:
- HGraph* graph_;
-};
-
-
-void HStackCheckEliminator::Process() {
- // For each loop block walk the dominator tree from the backwards branch to
- // the loop header. If a call instruction is encountered the backwards branch
- // is dominated by a call and the stack check in the backwards branch can be
- // removed.
- for (int i = 0; i < graph_->blocks()->length(); i++) {
- HBasicBlock* block = graph_->blocks()->at(i);
- if (block->IsLoopHeader()) {
- HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
- HBasicBlock* dominator = back_edge;
- while (true) {
- HInstruction* instr = dominator->first();
- while (instr != NULL) {
- if (instr->IsCall()) {
- block->loop_information()->stack_check()->Eliminate();
- break;
- }
- instr = instr->next();
- }
-
- // Done when the loop header is processed.
- if (dominator == block) break;
-
- // Move up the dominator tree.
- dominator = dominator->dominator();
- }
- }
- }
-}
-
-
-// Simple sparse set with O(1) add, contains, and clear.
-class SparseSet {
- public:
- SparseSet(Zone* zone, int capacity)
- : capacity_(capacity),
- length_(0),
- dense_(zone->NewArray<int>(capacity)),
- sparse_(zone->NewArray<int>(capacity)) {
-#ifndef NVALGRIND
- // Initialize the sparse array to make valgrind happy.
- memset(sparse_, 0, sizeof(sparse_[0]) * capacity);
-#endif
- }
-
- bool Contains(int n) const {
- ASSERT(0 <= n && n < capacity_);
- int d = sparse_[n];
- return 0 <= d && d < length_ && dense_[d] == n;
- }
-
- bool Add(int n) {
- if (Contains(n)) return false;
- dense_[length_] = n;
- sparse_[n] = length_;
- ++length_;
- return true;
- }
-
- void Clear() { length_ = 0; }
-
- private:
- int capacity_;
- int length_;
- int* dense_;
- int* sparse_;
-
- DISALLOW_COPY_AND_ASSIGN(SparseSet);
-};
-
-
-class HGlobalValueNumberer BASE_EMBEDDED {
- public:
- explicit HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
- : graph_(graph),
- info_(info),
- removed_side_effects_(false),
- block_side_effects_(graph->blocks()->length(), graph->zone()),
- loop_side_effects_(graph->blocks()->length(), graph->zone()),
- visited_on_paths_(graph->zone(), graph->blocks()->length()) {
-#ifdef DEBUG
- ASSERT(info->isolate()->optimizing_compiler_thread()->IsOptimizerThread() ||
- !info->isolate()->heap()->IsAllocationAllowed());
-#endif
- block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
- graph_->zone());
- loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
- graph_->zone());
- }
-
- // Returns true if values with side effects are removed.
- bool Analyze();
-
- private:
- GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
- HBasicBlock* dominator,
- HBasicBlock* dominated);
- void AnalyzeGraph();
- void ComputeBlockSideEffects();
- void LoopInvariantCodeMotion();
- void ProcessLoopBlock(HBasicBlock* block,
- HBasicBlock* before_loop,
- GVNFlagSet loop_kills,
- GVNFlagSet* accumulated_first_time_depends,
- GVNFlagSet* accumulated_first_time_changes);
- bool AllowCodeMotion();
- bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
-
- HGraph* graph() { return graph_; }
- CompilationInfo* info() { return info_; }
- Zone* zone() const { return graph_->zone(); }
-
- HGraph* graph_;
- CompilationInfo* info_;
- bool removed_side_effects_;
-
- // A map of block IDs to their side effects.
- ZoneList<GVNFlagSet> block_side_effects_;
-
- // A map of loop header block IDs to their loop's side effects.
- ZoneList<GVNFlagSet> loop_side_effects_;
-
- // Used when collecting side effects on paths from dominator to
- // dominated.
- SparseSet visited_on_paths_;
-};
-
-
-bool HGlobalValueNumberer::Analyze() {
- removed_side_effects_ = false;
- ComputeBlockSideEffects();
- if (FLAG_loop_invariant_code_motion) {
- LoopInvariantCodeMotion();
- }
- AnalyzeGraph();
- return removed_side_effects_;
-}
-
-
-void HGlobalValueNumberer::ComputeBlockSideEffects() {
- // The Analyze phase of GVN can be called multiple times. Clear loop side
- // effects before computing them to erase the contents from previous Analyze
- // passes.
- for (int i = 0; i < loop_side_effects_.length(); ++i) {
- loop_side_effects_[i].RemoveAll();
- }
- for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
- // Compute side effects for the block.
- HBasicBlock* block = graph_->blocks()->at(i);
- HInstruction* instr = block->first();
- int id = block->block_id();
- GVNFlagSet side_effects;
- while (instr != NULL) {
- side_effects.Add(instr->ChangesFlags());
- if (instr->IsSoftDeoptimize()) {
- block_side_effects_[id].RemoveAll();
- side_effects.RemoveAll();
- break;
- }
- instr = instr->next();
- }
- block_side_effects_[id].Add(side_effects);
-
- // Loop headers are part of their loop.
- if (block->IsLoopHeader()) {
- loop_side_effects_[id].Add(side_effects);
- }
-
- // Propagate loop side effects upwards.
- if (block->HasParentLoopHeader()) {
- int header_id = block->parent_loop_header()->block_id();
- loop_side_effects_[header_id].Add(block->IsLoopHeader()
- ? loop_side_effects_[id]
- : side_effects);
- }
- }
-}
-
-
-SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
- char underlying_buffer[kLastFlag * 128];
- Vector<char> buffer(underlying_buffer, sizeof(underlying_buffer));
-#if DEBUG
- int offset = 0;
- const char* separator = "";
- const char* comma = ", ";
- buffer[0] = 0;
- uint32_t set_depends_on = 0;
- uint32_t set_changes = 0;
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if ((flags.ToIntegral() & (1 << bit)) != 0) {
- if (bit % 2 == 0) {
- set_changes++;
- } else {
- set_depends_on++;
- }
- }
- }
- bool positive_changes = set_changes < (kLastFlag / 2);
- bool positive_depends_on = set_depends_on < (kLastFlag / 2);
- if (set_changes > 0) {
- if (positive_changes) {
- offset += OS::SNPrintF(buffer + offset, "changes [");
- } else {
- offset += OS::SNPrintF(buffer + offset, "changes all except [");
- }
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_changes) {
- switch (static_cast<GVNFlag>(bit)) {
-#define DECLARE_FLAG(type) \
- case kChanges##type: \
- offset += OS::SNPrintF(buffer + offset, separator); \
- offset += OS::SNPrintF(buffer + offset, #type); \
- separator = comma; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- offset += OS::SNPrintF(buffer + offset, "]");
- }
- if (set_depends_on > 0) {
- separator = "";
- if (set_changes > 0) {
- offset += OS::SNPrintF(buffer + offset, ", ");
- }
- if (positive_depends_on) {
- offset += OS::SNPrintF(buffer + offset, "depends on [");
- } else {
- offset += OS::SNPrintF(buffer + offset, "depends on all except [");
- }
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_depends_on) {
- switch (static_cast<GVNFlag>(bit)) {
-#define DECLARE_FLAG(type) \
- case kDependsOn##type: \
- offset += OS::SNPrintF(buffer + offset, separator); \
- offset += OS::SNPrintF(buffer + offset, #type); \
- separator = comma; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- offset += OS::SNPrintF(buffer + offset, "]");
- }
-#else
- OS::SNPrintF(buffer, "0x%08X", flags.ToIntegral());
-#endif
- size_t string_len = strlen(underlying_buffer) + 1;
- ASSERT(string_len <= sizeof(underlying_buffer));
- char* result = new char[strlen(underlying_buffer) + 1];
- memcpy(result, underlying_buffer, string_len);
- return SmartArrayPointer<char>(result);
-}
-
-
-void HGlobalValueNumberer::LoopInvariantCodeMotion() {
- TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
- graph_->use_optimistic_licm() ? "yes" : "no");
- for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
- HBasicBlock* block = graph_->blocks()->at(i);
- if (block->IsLoopHeader()) {
- GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
- TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
- block->block_id(),
- *GetGVNFlagsString(side_effects));
-
- GVNFlagSet accumulated_first_time_depends;
- GVNFlagSet accumulated_first_time_changes;
- HBasicBlock* last = block->loop_information()->GetLastBackEdge();
- for (int j = block->block_id(); j <= last->block_id(); ++j) {
- ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects,
- &accumulated_first_time_depends,
- &accumulated_first_time_changes);
- }
- }
- }
-}
-
-
-void HGlobalValueNumberer::ProcessLoopBlock(
- HBasicBlock* block,
- HBasicBlock* loop_header,
- GVNFlagSet loop_kills,
- GVNFlagSet* first_time_depends,
- GVNFlagSet* first_time_changes) {
- HBasicBlock* pre_header = loop_header->predecessors()->at(0);
- GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
- TRACE_GVN_2("Loop invariant motion for B%d %s\n",
- block->block_id(),
- *GetGVNFlagsString(depends_flags));
- HInstruction* instr = block->first();
- while (instr != NULL) {
- HInstruction* next = instr->next();
- bool hoisted = false;
- if (instr->CheckFlag(HValue::kUseGVN)) {
- TRACE_GVN_4("Checking instruction %d (%s) %s. Loop %s\n",
- instr->id(),
- instr->Mnemonic(),
- *GetGVNFlagsString(instr->gvn_flags()),
- *GetGVNFlagsString(loop_kills));
- bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
- if (can_hoist && !graph()->use_optimistic_licm()) {
- can_hoist = block->IsLoopSuccessorDominator();
- }
-
- if (can_hoist) {
- bool inputs_loop_invariant = true;
- for (int i = 0; i < instr->OperandCount(); ++i) {
- if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
- inputs_loop_invariant = false;
- }
- }
-
- if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
- TRACE_GVN_1("Hoisting loop invariant instruction %d\n", instr->id());
- // Move the instruction out of the loop.
- instr->Unlink();
- instr->InsertBefore(pre_header->end());
- if (instr->HasSideEffects()) removed_side_effects_ = true;
- hoisted = true;
- }
- }
- }
- if (!hoisted) {
- // If an instruction is not hoisted, we have to account for its side
- // effects when hoisting later HTransitionElementsKind instructions.
- GVNFlagSet previous_depends = *first_time_depends;
- GVNFlagSet previous_changes = *first_time_changes;
- first_time_depends->Add(instr->DependsOnFlags());
- first_time_changes->Add(instr->ChangesFlags());
- if (!(previous_depends == *first_time_depends)) {
- TRACE_GVN_1("Updated first-time accumulated %s\n",
- *GetGVNFlagsString(*first_time_depends));
- }
- if (!(previous_changes == *first_time_changes)) {
- TRACE_GVN_1("Updated first-time accumulated %s\n",
- *GetGVNFlagsString(*first_time_changes));
- }
- }
- instr = next;
- }
-}
-
-
-bool HGlobalValueNumberer::AllowCodeMotion() {
- return info()->shared_info()->opt_count() + 1 < FLAG_max_opt_count;
-}
-
-
-bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
- HBasicBlock* loop_header) {
- // If we've disabled code motion or we're in a block that unconditionally
- // deoptimizes, don't move any instructions.
- return AllowCodeMotion() && !instr->block()->IsDeoptimizing();
-}
-
-
-GVNFlagSet HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
- HBasicBlock* dominator, HBasicBlock* dominated) {
- GVNFlagSet side_effects;
- for (int i = 0; i < dominated->predecessors()->length(); ++i) {
- HBasicBlock* block = dominated->predecessors()->at(i);
- if (dominator->block_id() < block->block_id() &&
- block->block_id() < dominated->block_id() &&
- visited_on_paths_.Add(block->block_id())) {
- side_effects.Add(block_side_effects_[block->block_id()]);
- if (block->IsLoopHeader()) {
- side_effects.Add(loop_side_effects_[block->block_id()]);
- }
- side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock(
- dominator, block));
- }
- }
- return side_effects;
-}
-
-
-// Each instance of this class is like a "stack frame" for the recursive
-// traversal of the dominator tree done during GVN (the stack is handled
-// as a double linked list).
-// We reuse frames when possible so the list length is limited by the depth
-// of the dominator tree but this forces us to initialize each frame calling
-// an explicit "Initialize" method instead of a using constructor.
-class GvnBasicBlockState: public ZoneObject {
- public:
- static GvnBasicBlockState* CreateEntry(Zone* zone,
- HBasicBlock* entry_block,
- HValueMap* entry_map) {
- return new(zone)
- GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
- }
-
- HBasicBlock* block() { return block_; }
- HValueMap* map() { return map_; }
- HSideEffectMap* dominators() { return &dominators_; }
-
- GvnBasicBlockState* next_in_dominator_tree_traversal(
- Zone* zone,
- HBasicBlock** dominator) {
- // This assignment needs to happen before calling next_dominated() because
- // that call can reuse "this" if we are at the last dominated block.
- *dominator = block();
- GvnBasicBlockState* result = next_dominated(zone);
- if (result == NULL) {
- GvnBasicBlockState* dominator_state = pop();
- if (dominator_state != NULL) {
- // This branch is guaranteed not to return NULL because pop() never
- // returns a state where "is_done() == true".
- *dominator = dominator_state->block();
- result = dominator_state->next_dominated(zone);
- } else {
- // Unnecessary (we are returning NULL) but done for cleanness.
- *dominator = NULL;
- }
- }
- return result;
- }
-
- private:
- void Initialize(HBasicBlock* block,
- HValueMap* map,
- HSideEffectMap* dominators,
- bool copy_map,
- Zone* zone) {
- block_ = block;
- map_ = copy_map ? map->Copy(zone) : map;
- dominated_index_ = -1;
- length_ = block->dominated_blocks()->length();
- if (dominators != NULL) {
- dominators_ = *dominators;
- }
- }
- bool is_done() { return dominated_index_ >= length_; }
-
- GvnBasicBlockState(GvnBasicBlockState* previous,
- HBasicBlock* block,
- HValueMap* map,
- HSideEffectMap* dominators,
- Zone* zone)
- : previous_(previous), next_(NULL) {
- Initialize(block, map, dominators, true, zone);
- }
-
- GvnBasicBlockState* next_dominated(Zone* zone) {
- dominated_index_++;
- if (dominated_index_ == length_ - 1) {
- // No need to copy the map for the last child in the dominator tree.
- Initialize(block_->dominated_blocks()->at(dominated_index_),
- map(),
- dominators(),
- false,
- zone);
- return this;
- } else if (dominated_index_ < length_) {
- return push(zone,
- block_->dominated_blocks()->at(dominated_index_),
- dominators());
- } else {
- return NULL;
- }
- }
-
- GvnBasicBlockState* push(Zone* zone,
- HBasicBlock* block,
- HSideEffectMap* dominators) {
- if (next_ == NULL) {
- next_ =
- new(zone) GvnBasicBlockState(this, block, map(), dominators, zone);
- } else {
- next_->Initialize(block, map(), dominators, true, zone);
- }
- return next_;
- }
- GvnBasicBlockState* pop() {
- GvnBasicBlockState* result = previous_;
- while (result != NULL && result->is_done()) {
- TRACE_GVN_2("Backtracking from block B%d to block b%d\n",
- block()->block_id(),
- previous_->block()->block_id())
- result = result->previous_;
- }
- return result;
- }
-
- GvnBasicBlockState* previous_;
- GvnBasicBlockState* next_;
- HBasicBlock* block_;
- HValueMap* map_;
- HSideEffectMap dominators_;
- int dominated_index_;
- int length_;
-};
-
-// This is a recursive traversal of the dominator tree but it has been turned
-// into a loop to avoid stack overflows.
-// The logical "stack frames" of the recursion are kept in a list of
-// GvnBasicBlockState instances.
-void HGlobalValueNumberer::AnalyzeGraph() {
- HBasicBlock* entry_block = graph_->entry_block();
- HValueMap* entry_map = new(zone()) HValueMap(zone());
- GvnBasicBlockState* current =
- GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
-
- while (current != NULL) {
- HBasicBlock* block = current->block();
- HValueMap* map = current->map();
- HSideEffectMap* dominators = current->dominators();
-
- TRACE_GVN_2("Analyzing block B%d%s\n",
- block->block_id(),
- block->IsLoopHeader() ? " (loop header)" : "");
-
- // If this is a loop header kill everything killed by the loop.
- if (block->IsLoopHeader()) {
- map->Kill(loop_side_effects_[block->block_id()]);
- }
-
- // Go through all instructions of the current block.
- HInstruction* instr = block->first();
- while (instr != NULL) {
- HInstruction* next = instr->next();
- GVNFlagSet flags = instr->ChangesFlags();
- if (!flags.IsEmpty()) {
- // Clear all instructions in the map that are affected by side effects.
- // Store instruction as the dominating one for tracked side effects.
- map->Kill(flags);
- dominators->Store(flags, instr);
- TRACE_GVN_2("Instruction %d %s\n", instr->id(),
- *GetGVNFlagsString(flags));
- }
- if (instr->CheckFlag(HValue::kUseGVN)) {
- ASSERT(!instr->HasObservableSideEffects());
- HValue* other = map->Lookup(instr);
- if (other != NULL) {
- ASSERT(instr->Equals(other) && other->Equals(instr));
- TRACE_GVN_4("Replacing value %d (%s) with value %d (%s)\n",
- instr->id(),
- instr->Mnemonic(),
- other->id(),
- other->Mnemonic());
- if (instr->HasSideEffects()) removed_side_effects_ = true;
- instr->DeleteAndReplaceWith(other);
- } else {
- map->Add(instr, zone());
- }
- }
- if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
- for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- HValue* other = dominators->at(i);
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
- if (instr->DependsOnFlags().Contains(depends_on_flag) &&
- (other != NULL)) {
- TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
- i,
- instr->id(),
- instr->Mnemonic(),
- other->id(),
- other->Mnemonic());
- instr->SetSideEffectDominator(changes_flag, other);
- }
- }
- }
- instr = next;
- }
-
- HBasicBlock* dominator_block;
- GvnBasicBlockState* next =
- current->next_in_dominator_tree_traversal(zone(), &dominator_block);
-
- if (next != NULL) {
- HBasicBlock* dominated = next->block();
- HValueMap* successor_map = next->map();
- HSideEffectMap* successor_dominators = next->dominators();
-
- // Kill everything killed on any path between this block and the
- // dominated block. We don't have to traverse these paths if the
- // value map and the dominators list is already empty. If the range
- // of block ids (block_id, dominated_id) is empty there are no such
- // paths.
- if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
- dominator_block->block_id() + 1 < dominated->block_id()) {
- visited_on_paths_.Clear();
- GVNFlagSet side_effects_on_all_paths =
- CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
- dominated);
- successor_map->Kill(side_effects_on_all_paths);
- successor_dominators->Kill(side_effects_on_all_paths);
- }
- }
- current = next;
- }
-}
-
-
-class HInferRepresentation BASE_EMBEDDED {
- public:
- explicit HInferRepresentation(HGraph* graph)
- : graph_(graph),
- worklist_(8, graph->zone()),
- in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
-
- void Analyze();
-
- private:
- Representation TryChange(HValue* current);
- void AddToWorklist(HValue* current);
- void InferBasedOnInputs(HValue* current);
- void AddDependantsToWorklist(HValue* current);
- void InferBasedOnUses(HValue* current);
-
- Zone* zone() const { return graph_->zone(); }
-
- HGraph* graph_;
- ZoneList<HValue*> worklist_;
- BitVector in_worklist_;
-};
-
-
-void HInferRepresentation::AddToWorklist(HValue* current) {
- if (current->representation().IsSpecialization()) return;
- if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
- if (in_worklist_.Contains(current->id())) return;
- worklist_.Add(current, zone());
- in_worklist_.Add(current->id());
-}
-
-
-// This method tries to specialize the representation type of the value
-// given as a parameter. The value is asked to infer its representation type
-// based on its inputs. If the inferred type is more specialized, then this
-// becomes the new representation type of the node.
-void HInferRepresentation::InferBasedOnInputs(HValue* current) {
- Representation r = current->representation();
- if (r.IsSpecialization()) return;
- ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
- Representation inferred = current->InferredRepresentation();
- if (inferred.IsSpecialization()) {
- if (FLAG_trace_representation) {
- PrintF("Changing #%d representation %s -> %s based on inputs\n",
- current->id(),
- r.Mnemonic(),
- inferred.Mnemonic());
- }
- current->ChangeRepresentation(inferred);
- AddDependantsToWorklist(current);
- }
-}
-
-
-void HInferRepresentation::AddDependantsToWorklist(HValue* value) {
- for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
- AddToWorklist(it.value());
- }
- for (int i = 0; i < value->OperandCount(); ++i) {
- AddToWorklist(value->OperandAt(i));
- }
-}
-
-
-// This method calculates whether specializing the representation of the value
-// given as the parameter has a benefit in terms of less necessary type
-// conversions. If there is a benefit, then the representation of the value is
-// specialized.
-void HInferRepresentation::InferBasedOnUses(HValue* value) {
- Representation r = value->representation();
- if (r.IsSpecialization() || value->HasNoUses()) return;
- ASSERT(value->CheckFlag(HValue::kFlexibleRepresentation));
- Representation new_rep = TryChange(value);
- if (!new_rep.IsNone()) {
- if (!value->representation().Equals(new_rep)) {
- if (FLAG_trace_representation) {
- PrintF("Changing #%d representation %s -> %s based on uses\n",
- value->id(),
- r.Mnemonic(),
- new_rep.Mnemonic());
- }
- value->ChangeRepresentation(new_rep);
- AddDependantsToWorklist(value);
- }
- }
-}
-
-
-Representation HInferRepresentation::TryChange(HValue* value) {
- // Array of use counts for each representation.
- int use_count[Representation::kNumRepresentations] = { 0 };
-
- for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- Representation rep = use->ObservedInputRepresentation(it.index());
- if (rep.IsNone()) continue;
- if (FLAG_trace_representation) {
- PrintF("%d %s is used by %d %s as %s\n",
- value->id(),
- value->Mnemonic(),
- use->id(),
- use->Mnemonic(),
- rep.Mnemonic());
- }
- if (use->IsPhi()) HPhi::cast(use)->AddIndirectUsesTo(&use_count[0]);
- use_count[rep.kind()] += use->LoopWeight();
- }
- int tagged_count = use_count[Representation::kTagged];
- int double_count = use_count[Representation::kDouble];
- int int32_count = use_count[Representation::kInteger32];
- int non_tagged_count = double_count + int32_count;
-
- // If a non-loop phi has tagged uses, don't convert it to untagged.
- if (value->IsPhi() && !value->block()->IsLoopHeader() && tagged_count > 0) {
- return Representation::None();
- }
-
- // Prefer unboxing over boxing, the latter is more expensive.
- if (tagged_count > non_tagged_count) return Representation::None();
-
- // Prefer Integer32 over Double, if possible.
- if (int32_count > 0 && value->IsConvertibleToInteger()) {
- return Representation::Integer32();
- }
-
- if (double_count > 0) return Representation::Double();
-
- return Representation::None();
-}
-
-
-void HInferRepresentation::Analyze() {
- HPhase phase("H_Infer representations", graph_);
-
- // (1) Initialize bit vectors and count real uses. Each phi gets a
- // bit-vector of length <number of phis>.
- const ZoneList<HPhi*>* phi_list = graph_->phi_list();
- int phi_count = phi_list->length();
- ZoneList<BitVector*> connected_phis(phi_count, graph_->zone());
- for (int i = 0; i < phi_count; ++i) {
- phi_list->at(i)->InitRealUses(i);
- BitVector* connected_set = new(zone()) BitVector(phi_count, graph_->zone());
- connected_set->Add(i);
- connected_phis.Add(connected_set, zone());
- }
-
- // (2) Do a fixed point iteration to find the set of connected phis. A
- // phi is connected to another phi if its value is used either directly or
- // indirectly through a transitive closure of the def-use relation.
- bool change = true;
- while (change) {
- change = false;
- // We normally have far more "forward edges" than "backward edges",
- // so we terminate faster when we walk backwards.
- for (int i = phi_count - 1; i >= 0; --i) {
- HPhi* phi = phi_list->at(i);
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (use->IsPhi()) {
- int id = HPhi::cast(use)->phi_id();
- if (connected_phis[i]->UnionIsChanged(*connected_phis[id]))
- change = true;
- }
- }
- }
- }
-
- // (3a) Use the phi reachability information from step 2 to
- // push information about values which can't be converted to integer
- // without deoptimization through the phi use-def chains, avoiding
- // unnecessary deoptimizations later.
- for (int i = 0; i < phi_count; ++i) {
- HPhi* phi = phi_list->at(i);
- bool cti = phi->AllOperandsConvertibleToInteger();
- if (cti) continue;
-
- for (BitVector::Iterator it(connected_phis.at(i));
- !it.Done();
- it.Advance()) {
- HPhi* phi = phi_list->at(it.Current());
- phi->set_is_convertible_to_integer(false);
- phi->ResetInteger32Uses();
- }
- }
-
- // (3b) Use the phi reachability information from step 2 to
- // sum up the non-phi use counts of all connected phis.
- for (int i = 0; i < phi_count; ++i) {
- HPhi* phi = phi_list->at(i);
- for (BitVector::Iterator it(connected_phis.at(i));
- !it.Done();
- it.Advance()) {
- int index = it.Current();
- HPhi* it_use = phi_list->at(index);
- if (index != i) phi->AddNonPhiUsesFrom(it_use); // Don't count twice.
- }
- }
-
- // Initialize work list
- for (int i = 0; i < graph_->blocks()->length(); ++i) {
- HBasicBlock* block = graph_->blocks()->at(i);
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); ++j) {
- AddToWorklist(phis->at(j));
- }
-
- HInstruction* current = block->first();
- while (current != NULL) {
- AddToWorklist(current);
- current = current->next();
- }
- }
-
- // Do a fixed point iteration, trying to improve representations
- while (!worklist_.is_empty()) {
- HValue* current = worklist_.RemoveLast();
- in_worklist_.Remove(current->id());
- InferBasedOnInputs(current);
- InferBasedOnUses(current);
- }
-}
-
-
-void HGraph::InitializeInferredTypes() {
- HPhase phase("H_Inferring types", this);
- InitializeInferredTypes(0, this->blocks_.length() - 1);
-}
-
-
-void HGraph::InitializeInferredTypes(int from_inclusive, int to_inclusive) {
- for (int i = from_inclusive; i <= to_inclusive; ++i) {
- HBasicBlock* block = blocks_[i];
-
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); j++) {
- phis->at(j)->UpdateInferredType();
- }
-
- HInstruction* current = block->first();
- while (current != NULL) {
- current->UpdateInferredType();
- current = current->next();
- }
-
- if (block->IsLoopHeader()) {
- HBasicBlock* last_back_edge =
- block->loop_information()->GetLastBackEdge();
- InitializeInferredTypes(i + 1, last_back_edge->block_id());
- // Skip all blocks already processed by the recursive call.
- i = last_back_edge->block_id();
- // Update phis of the loop header now after the whole loop body is
- // guaranteed to be processed.
- ZoneList<HValue*> worklist(block->phis()->length(), zone());
- for (int j = 0; j < block->phis()->length(); ++j) {
- worklist.Add(block->phis()->at(j), zone());
- }
- InferTypes(&worklist);
- }
- }
-}
-
-
-void HGraph::PropagateMinusZeroChecks(HValue* value, BitVector* visited) {
- HValue* current = value;
- while (current != NULL) {
- if (visited->Contains(current->id())) return;
-
- // For phis, we must propagate the check to all of its inputs.
- if (current->IsPhi()) {
- visited->Add(current->id());
- HPhi* phi = HPhi::cast(current);
- for (int i = 0; i < phi->OperandCount(); ++i) {
- PropagateMinusZeroChecks(phi->OperandAt(i), visited);
- }
- break;
- }
-
- // For multiplication, division, and Math.min/max(), we must propagate
- // to the left and the right side.
- if (current->IsMul()) {
- HMul* mul = HMul::cast(current);
- mul->EnsureAndPropagateNotMinusZero(visited);
- PropagateMinusZeroChecks(mul->left(), visited);
- PropagateMinusZeroChecks(mul->right(), visited);
- } else if (current->IsDiv()) {
- HDiv* div = HDiv::cast(current);
- div->EnsureAndPropagateNotMinusZero(visited);
- PropagateMinusZeroChecks(div->left(), visited);
- PropagateMinusZeroChecks(div->right(), visited);
- } else if (current->IsMathMinMax()) {
- HMathMinMax* minmax = HMathMinMax::cast(current);
- visited->Add(minmax->id());
- PropagateMinusZeroChecks(minmax->left(), visited);
- PropagateMinusZeroChecks(minmax->right(), visited);
- }
-
- current = current->EnsureAndPropagateNotMinusZero(visited);
- }
-}
-
-
-void HGraph::InsertRepresentationChangeForUse(HValue* value,
- HValue* use_value,
- int use_index,
- Representation to) {
- // Insert the representation change right before its use. For phi-uses we
- // insert at the end of the corresponding predecessor.
- HInstruction* next = NULL;
- if (use_value->IsPhi()) {
- next = use_value->block()->predecessors()->at(use_index)->end();
- } else {
- next = HInstruction::cast(use_value);
- }
-
- // For constants we try to make the representation change at compile
- // time. When a representation change is not possible without loss of
- // information we treat constants like normal instructions and insert the
- // change instructions for them.
- HInstruction* new_value = NULL;
- bool is_truncating = use_value->CheckFlag(HValue::kTruncatingToInt32);
- bool deoptimize_on_undefined =
- use_value->CheckFlag(HValue::kDeoptimizeOnUndefined);
- if (value->IsConstant()) {
- HConstant* constant = HConstant::cast(value);
- // Try to create a new copy of the constant with the new representation.
- new_value = is_truncating
- ? constant->CopyToTruncatedInt32(zone())
- : constant->CopyToRepresentation(to, zone());
- }
-
- if (new_value == NULL) {
- new_value = new(zone()) HChange(value, to,
- is_truncating, deoptimize_on_undefined);
- }
-
- new_value->InsertBefore(next);
- use_value->SetOperandAt(use_index, new_value);
-}
-
-
-void HGraph::InsertRepresentationChangesForValue(HValue* value) {
- Representation r = value->representation();
- if (r.IsNone()) return;
- if (value->HasNoUses()) return;
-
- for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
- HValue* use_value = it.value();
- int use_index = it.index();
- Representation req = use_value->RequiredInputRepresentation(use_index);
- if (req.IsNone() || req.Equals(r)) continue;
- InsertRepresentationChangeForUse(value, use_value, use_index, req);
- }
- if (value->HasNoUses()) {
- ASSERT(value->IsConstant());
- value->DeleteAndReplaceWith(NULL);
- }
-
- // The only purpose of a HForceRepresentation is to represent the value
- // after the (possible) HChange instruction. We make it disappear.
- if (value->IsForceRepresentation()) {
- value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value());
- }
-}
-
-
-void HGraph::InsertRepresentationChanges() {
- HPhase phase("H_Representation changes", this);
-
- // Compute truncation flag for phis: Initially assume that all
- // int32-phis allow truncation and iteratively remove the ones that
- // are used in an operation that does not allow a truncating
- // conversion.
- // TODO(fschneider): Replace this with a worklist-based iteration.
- for (int i = 0; i < phi_list()->length(); i++) {
- HPhi* phi = phi_list()->at(i);
- if (phi->representation().IsInteger32()) {
- phi->SetFlag(HValue::kTruncatingToInt32);
- }
- }
- bool change = true;
- while (change) {
- change = false;
- for (int i = 0; i < phi_list()->length(); i++) {
- HPhi* phi = phi_list()->at(i);
- if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
- if (!phi->CheckUsesForFlag(HValue::kTruncatingToInt32)) {
- phi->ClearFlag(HValue::kTruncatingToInt32);
- change = true;
- }
- }
- }
-
- for (int i = 0; i < blocks_.length(); ++i) {
- // Process phi instructions first.
- const ZoneList<HPhi*>* phis = blocks_[i]->phis();
- for (int j = 0; j < phis->length(); j++) {
- InsertRepresentationChangesForValue(phis->at(j));
- }
-
- // Process normal instructions.
- HInstruction* current = blocks_[i]->first();
- while (current != NULL) {
- InsertRepresentationChangesForValue(current);
- current = current->next();
- }
- }
-}
-
-
-void HGraph::RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi) {
- if (phi->CheckFlag(HValue::kDeoptimizeOnUndefined)) return;
- phi->SetFlag(HValue::kDeoptimizeOnUndefined);
- for (int i = 0; i < phi->OperandCount(); ++i) {
- HValue* input = phi->OperandAt(i);
- if (input->IsPhi()) {
- RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi::cast(input));
- }
- }
-}
-
-
-void HGraph::MarkDeoptimizeOnUndefined() {
- HPhase phase("H_MarkDeoptimizeOnUndefined", this);
- // Compute DeoptimizeOnUndefined flag for phis.
- // Any phi that can reach a use with DeoptimizeOnUndefined set must
- // have DeoptimizeOnUndefined set. Currently only HCompareIDAndBranch, with
- // double input representation, has this flag set.
- // The flag is used by HChange tagged->double, which must deoptimize
- // if one of its uses has this flag set.
- for (int i = 0; i < phi_list()->length(); i++) {
- HPhi* phi = phi_list()->at(i);
- if (phi->representation().IsDouble()) {
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- if (it.value()->CheckFlag(HValue::kDeoptimizeOnUndefined)) {
- RecursivelyMarkPhiDeoptimizeOnUndefined(phi);
- break;
- }
- }
- }
- }
-}
-
-
-// Discover instructions that can be marked with kUint32 flag allowing
-// them to produce full range uint32 values.
-class Uint32Analysis BASE_EMBEDDED {
- public:
- explicit Uint32Analysis(Zone* zone) : zone_(zone), phis_(4, zone) { }
-
- void Analyze(HInstruction* current);
-
- void UnmarkUnsafePhis();
-
- private:
- bool IsSafeUint32Use(HValue* val, HValue* use);
- bool Uint32UsesAreSafe(HValue* uint32val);
- bool CheckPhiOperands(HPhi* phi);
- void UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist);
-
- Zone* zone_;
- ZoneList<HPhi*> phis_;
-};
-
-
-bool Uint32Analysis::IsSafeUint32Use(HValue* val, HValue* use) {
- // Operations that operatate on bits are safe.
- if (use->IsBitwise() ||
- use->IsShl() ||
- use->IsSar() ||
- use->IsShr() ||
- use->IsBitNot()) {
- return true;
- } else if (use->IsChange() || use->IsSimulate()) {
- // Conversions and deoptimization have special support for unt32.
- return true;
- } else if (use->IsStoreKeyedSpecializedArrayElement()) {
- // Storing a value into an external integer array is a bit level operation.
- HStoreKeyedSpecializedArrayElement* store =
- HStoreKeyedSpecializedArrayElement::cast(use);
-
- if (store->value() == val) {
- // Clamping or a conversion to double should have beed inserted.
- ASSERT(store->elements_kind() != EXTERNAL_PIXEL_ELEMENTS);
- ASSERT(store->elements_kind() != EXTERNAL_FLOAT_ELEMENTS);
- ASSERT(store->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS);
- return true;
- }
- }
-
- return false;
-}
-
-
-// Iterate over all uses and verify that they are uint32 safe: either don't
-// distinguish between int32 and uint32 due to their bitwise nature or
-// have special support for uint32 values.
-// Encountered phis are optimisitically treated as safe uint32 uses,
-// marked with kUint32 flag and collected in the phis_ list. A separate
-// path will be performed later by UnmarkUnsafePhis to clear kUint32 from
-// phis that are not actually uint32-safe (it requries fix point iteration).
-bool Uint32Analysis::Uint32UsesAreSafe(HValue* uint32val) {
- bool collect_phi_uses = false;
- for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
-
- if (use->IsPhi()) {
- if (!use->CheckFlag(HInstruction::kUint32)) {
- // There is a phi use of this value from a phis that is not yet
- // collected in phis_ array. Separate pass is required.
- collect_phi_uses = true;
- }
-
- // Optimistically treat phis as uint32 safe.
- continue;
- }
-
- if (!IsSafeUint32Use(uint32val, use)) {
- return false;
- }
- }
-
- if (collect_phi_uses) {
- for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
-
- // There is a phi use of this value from a phis that is not yet
- // collected in phis_ array. Separate pass is required.
- if (use->IsPhi() && !use->CheckFlag(HInstruction::kUint32)) {
- use->SetFlag(HInstruction::kUint32);
- phis_.Add(HPhi::cast(use), zone_);
- }
- }
- }
-
- return true;
-}
-
-
-// Analyze instruction and mark it with kUint32 if all its uses are uint32
-// safe.
-void Uint32Analysis::Analyze(HInstruction* current) {
- if (Uint32UsesAreSafe(current)) current->SetFlag(HInstruction::kUint32);
-}
-
-
-// Check if all operands to the given phi are marked with kUint32 flag.
-bool Uint32Analysis::CheckPhiOperands(HPhi* phi) {
- if (!phi->CheckFlag(HInstruction::kUint32)) {
- // This phi is not uint32 safe. No need to check operands.
- return false;
- }
-
- for (int j = 0; j < phi->OperandCount(); j++) {
- HValue* operand = phi->OperandAt(j);
- if (!operand->CheckFlag(HInstruction::kUint32)) {
- // Lazyly mark constants that fit into uint32 range with kUint32 flag.
- if (operand->IsConstant() &&
- HConstant::cast(operand)->IsUint32()) {
- operand->SetFlag(HInstruction::kUint32);
- continue;
- }
-
- // This phi is not safe, some operands are not uint32 values.
- return false;
- }
- }
-
- return true;
-}
-
-
-// Remove kUint32 flag from the phi itself and its operands. If any operand
-// was a phi marked with kUint32 place it into a worklist for
-// transitive clearing of kUint32 flag.
-void Uint32Analysis::UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist) {
- phi->ClearFlag(HInstruction::kUint32);
- for (int j = 0; j < phi->OperandCount(); j++) {
- HValue* operand = phi->OperandAt(j);
- if (operand->CheckFlag(HInstruction::kUint32)) {
- operand->ClearFlag(HInstruction::kUint32);
- if (operand->IsPhi()) {
- worklist->Add(HPhi::cast(operand), zone_);
- }
- }
- }
-}
-
-
-void Uint32Analysis::UnmarkUnsafePhis() {
- // No phis were collected. Nothing to do.
- if (phis_.length() == 0) return;
-
- // Worklist used to transitively clear kUint32 from phis that
- // are used as arguments to other phis.
- ZoneList<HPhi*> worklist(phis_.length(), zone_);
-
- // Phi can be used as a uint32 value if and only if
- // all its operands are uint32 values and all its
- // uses are uint32 safe.
-
- // Iterate over collected phis and unmark those that
- // are unsafe. When unmarking phi unmark its operands
- // and add it to the worklist if it is a phi as well.
- // Phis that are still marked as safe are shifted down
- // so that all safe phis form a prefix of the phis_ array.
- int phi_count = 0;
- for (int i = 0; i < phis_.length(); i++) {
- HPhi* phi = phis_[i];
-
- if (CheckPhiOperands(phi) && Uint32UsesAreSafe(phi)) {
- phis_[phi_count++] = phi;
- } else {
- UnmarkPhi(phi, &worklist);
- }
- }
-
- // Now phis array contains only those phis that have safe
- // non-phi uses. Start transitively clearing kUint32 flag
- // from phi operands of discovered non-safe phies until
- // only safe phies are left.
- while (!worklist.is_empty()) {
- while (!worklist.is_empty()) {
- HPhi* phi = worklist.RemoveLast();
- UnmarkPhi(phi, &worklist);
- }
-
- // Check if any operands to safe phies were unmarked
- // turning a safe phi into unsafe. The same value
- // can flow into several phis.
- int new_phi_count = 0;
- for (int i = 0; i < phi_count; i++) {
- HPhi* phi = phis_[i];
-
- if (CheckPhiOperands(phi)) {
- phis_[new_phi_count++] = phi;
- } else {
- UnmarkPhi(phi, &worklist);
- }
- }
- phi_count = new_phi_count;
- }
-}
-
-
-void HGraph::ComputeSafeUint32Operations() {
- if (!FLAG_opt_safe_uint32_operations || uint32_instructions_ == NULL) {
- return;
- }
-
- Uint32Analysis analysis(zone());
- for (int i = 0; i < uint32_instructions_->length(); ++i) {
- HInstruction* current = uint32_instructions_->at(i);
- if (current->IsLinked() && current->representation().IsInteger32()) {
- analysis.Analyze(current);
- }
- }
-
- // Some phis might have been optimistically marked with kUint32 flag.
- // Remove this flag from those phis that are unsafe and propagate
- // this information transitively potentially clearing kUint32 flag
- // from some non-phi operations that are used as operands to unsafe phis.
- analysis.UnmarkUnsafePhis();
-}
-
-
-void HGraph::ComputeMinusZeroChecks() {
- BitVector visited(GetMaximumValueID(), zone());
- for (int i = 0; i < blocks_.length(); ++i) {
- for (HInstruction* current = blocks_[i]->first();
- current != NULL;
- current = current->next()) {
- if (current->IsChange()) {
- HChange* change = HChange::cast(current);
- // Propagate flags for negative zero checks upwards from conversions
- // int32-to-tagged and int32-to-double.
- Representation from = change->value()->representation();
- ASSERT(from.Equals(change->from()));
- if (from.IsInteger32()) {
- ASSERT(change->to().IsTagged() || change->to().IsDouble());
- ASSERT(visited.IsEmpty());
- PropagateMinusZeroChecks(change->value(), &visited);
- visited.Clear();
- }
- }
- }
- }
-}
-
-
// Implementation of utility class to encapsulate the translation state for
// a (possibly inlined) function.
-FunctionState::FunctionState(HGraphBuilder* owner,
+FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
- TypeFeedbackOracle* oracle,
InliningKind inlining_kind)
: owner_(owner),
compilation_info_(info),
- oracle_(oracle),
call_context_(NULL),
inlining_kind_(inlining_kind),
function_return_(NULL),
test_context_(NULL),
entry_(NULL),
+ arguments_object_(NULL),
arguments_elements_(NULL),
outer_(owner->function_state()) {
if (outer_ != NULL) {
@@ -2950,18 +2726,16 @@ FunctionState::FunctionState(HGraphBuilder* owner,
if (owner->ast_context()->IsTest()) {
HBasicBlock* if_true = owner->graph()->CreateBasicBlock();
HBasicBlock* if_false = owner->graph()->CreateBasicBlock();
- if_true->MarkAsInlineReturnTarget();
- if_false->MarkAsInlineReturnTarget();
+ if_true->MarkAsInlineReturnTarget(owner->current_block());
+ if_false->MarkAsInlineReturnTarget(owner->current_block());
TestContext* outer_test_context = TestContext::cast(owner->ast_context());
Expression* cond = outer_test_context->condition();
- TypeFeedbackOracle* outer_oracle = outer_test_context->oracle();
// The AstContext constructor pushed on the context stack. This newed
// instance is the reason that AstContext can't be BASE_EMBEDDED.
- test_context_ =
- new TestContext(owner, cond, outer_oracle, if_true, if_false);
+ test_context_ = new TestContext(owner, cond, if_true, if_false);
} else {
function_return_ = owner->graph()->CreateBasicBlock();
- function_return()->MarkAsInlineReturnTarget();
+ function_return()->MarkAsInlineReturnTarget(owner->current_block());
}
// Set this after possibly allocating a new TestContext above.
call_context_ = owner->ast_context();
@@ -2980,7 +2754,7 @@ FunctionState::~FunctionState() {
// Implementation of utility classes to represent an expression's context in
// the AST.
-AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
+AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind)
: owner_(owner),
kind_(kind),
outer_(owner->ast_context()),
@@ -3023,7 +2797,7 @@ void ValueContext::ReturnValue(HValue* value) {
// The value is tracked in the bailout environment, and communicated
// through the environment as the result of the expression.
if (!arguments_allowed() && value->CheckFlag(HValue::kIsArguments)) {
- owner()->Bailout("bad value context for arguments value");
+ owner()->Bailout(kBadValueContextForArgumentsValue);
}
owner()->Push(value);
}
@@ -3037,7 +2811,9 @@ void TestContext::ReturnValue(HValue* value) {
void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
owner()->AddInstruction(instr);
- if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) {
+ owner()->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+ }
}
@@ -3048,33 +2824,51 @@ void EffectContext::ReturnControl(HControlInstruction* instr,
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, empty_true);
instr->SetSuccessorAt(1, empty_false);
- owner()->current_block()->Finish(instr);
+ owner()->FinishCurrentBlock(instr);
HBasicBlock* join = owner()->CreateJoin(empty_true, empty_false, ast_id);
owner()->set_current_block(join);
}
+void EffectContext::ReturnContinuation(HIfContinuation* continuation,
+ BailoutId ast_id) {
+ HBasicBlock* true_branch = NULL;
+ HBasicBlock* false_branch = NULL;
+ continuation->Continue(&true_branch, &false_branch);
+ if (!continuation->IsTrueReachable()) {
+ owner()->set_current_block(false_branch);
+ } else if (!continuation->IsFalseReachable()) {
+ owner()->set_current_block(true_branch);
+ } else {
+ HBasicBlock* join = owner()->CreateJoin(true_branch, false_branch, ast_id);
+ owner()->set_current_block(join);
+ }
+}
+
+
void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
- return owner()->Bailout("bad value context for arguments object value");
+ return owner()->Bailout(kBadValueContextForArgumentsObjectValue);
}
owner()->AddInstruction(instr);
owner()->Push(instr);
- if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) {
+ owner()->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+ }
}
void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->HasObservableSideEffects());
if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
- return owner()->Bailout("bad value context for arguments object value");
+ return owner()->Bailout(kBadValueContextForArgumentsObjectValue);
}
HBasicBlock* materialize_false = owner()->graph()->CreateBasicBlock();
HBasicBlock* materialize_true = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, materialize_true);
instr->SetSuccessorAt(1, materialize_false);
- owner()->current_block()->Finish(instr);
+ owner()->FinishCurrentBlock(instr);
owner()->set_current_block(materialize_true);
owner()->Push(owner()->graph()->GetConstantTrue());
owner()->set_current_block(materialize_false);
@@ -3085,15 +2879,38 @@ void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
}
+void ValueContext::ReturnContinuation(HIfContinuation* continuation,
+ BailoutId ast_id) {
+ HBasicBlock* materialize_true = NULL;
+ HBasicBlock* materialize_false = NULL;
+ continuation->Continue(&materialize_true, &materialize_false);
+ if (continuation->IsTrueReachable()) {
+ owner()->set_current_block(materialize_true);
+ owner()->Push(owner()->graph()->GetConstantTrue());
+ owner()->set_current_block(materialize_true);
+ }
+ if (continuation->IsFalseReachable()) {
+ owner()->set_current_block(materialize_false);
+ owner()->Push(owner()->graph()->GetConstantFalse());
+ owner()->set_current_block(materialize_false);
+ }
+ if (continuation->TrueAndFalseReachable()) {
+ HBasicBlock* join =
+ owner()->CreateJoin(materialize_true, materialize_false, ast_id);
+ owner()->set_current_block(join);
+ }
+}
+
+
void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
- HGraphBuilder* builder = owner();
+ HOptimizedGraphBuilder* builder = owner();
builder->AddInstruction(instr);
// We expect a simulate after every expression with side effects, though
// this one isn't actually needed (and wouldn't work if it were targeted).
if (instr->HasObservableSideEffects()) {
builder->Push(instr);
- builder->AddSimulate(ast_id);
+ builder->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
builder->Pop();
}
BuildBranch(instr);
@@ -3106,9 +2923,24 @@ void TestContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, empty_true);
instr->SetSuccessorAt(1, empty_false);
- owner()->current_block()->Finish(instr);
- empty_true->Goto(if_true(), owner()->function_state());
- empty_false->Goto(if_false(), owner()->function_state());
+ owner()->FinishCurrentBlock(instr);
+ owner()->Goto(empty_true, if_true(), owner()->function_state());
+ owner()->Goto(empty_false, if_false(), owner()->function_state());
+ owner()->set_current_block(NULL);
+}
+
+
+void TestContext::ReturnContinuation(HIfContinuation* continuation,
+ BailoutId ast_id) {
+ HBasicBlock* true_branch = NULL;
+ HBasicBlock* false_branch = NULL;
+ continuation->Continue(&true_branch, &false_branch);
+ if (continuation->IsTrueReachable()) {
+ owner()->Goto(true_branch, if_true(), owner()->function_state());
+ }
+ if (continuation->IsFalseReachable()) {
+ owner()->Goto(false_branch, if_false(), owner()->function_state());
+ }
owner()->set_current_block(NULL);
}
@@ -3118,24 +2950,23 @@ void TestContext::BuildBranch(HValue* value) {
// connects a branch node to a join node. We conservatively ensure that
// property by always adding an empty block on the outgoing edges of this
// branch.
- HGraphBuilder* builder = owner();
+ HOptimizedGraphBuilder* builder = owner();
if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
- builder->Bailout("arguments object value in a test context");
+ builder->Bailout(kArgumentsObjectValueInATestContext);
}
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
- TypeFeedbackId test_id = condition()->test_id();
- ToBooleanStub::Types expected(oracle()->ToBooleanTypes(test_id));
- HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
- builder->current_block()->Finish(test);
+ ToBooleanStub::Types expected(condition()->to_boolean_types());
+ builder->FinishCurrentBlock(builder->New<HBranch>(
+ value, expected, empty_true, empty_false));
- empty_true->Goto(if_true(), owner()->function_state());
- empty_false->Goto(if_false(), owner()->function_state());
+ owner()->Goto(empty_true, if_true(), builder->function_state());
+ owner()->Goto(empty_false , if_false(), builder->function_state());
builder->set_current_block(NULL);
}
-// HGraphBuilder infrastructure for bailing out and checking bailouts.
+// HOptimizedGraphBuilder infrastructure for bailing out and checking bailouts.
#define CHECK_BAILOUT(call) \
do { \
call; \
@@ -3150,25 +2981,33 @@ void TestContext::BuildBranch(HValue* value) {
} while (false)
-void HGraphBuilder::Bailout(const char* reason) {
- info()->set_bailout_reason(reason);
+#define CHECK_ALIVE_OR_RETURN(call, value) \
+ do { \
+ call; \
+ if (HasStackOverflow() || current_block() == NULL) return value; \
+ } while (false)
+
+
+void HOptimizedGraphBuilder::Bailout(BailoutReason reason) {
+ current_info()->set_bailout_reason(reason);
SetStackOverflow();
}
-void HGraphBuilder::VisitForEffect(Expression* expr) {
+void HOptimizedGraphBuilder::VisitForEffect(Expression* expr) {
EffectContext for_effect(this);
Visit(expr);
}
-void HGraphBuilder::VisitForValue(Expression* expr, ArgumentsAllowedFlag flag) {
+void HOptimizedGraphBuilder::VisitForValue(Expression* expr,
+ ArgumentsAllowedFlag flag) {
ValueContext for_value(this, flag);
Visit(expr);
}
-void HGraphBuilder::VisitForTypeOf(Expression* expr) {
+void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
for_value.set_for_typeof(true);
Visit(expr);
@@ -3176,675 +3015,229 @@ void HGraphBuilder::VisitForTypeOf(Expression* expr) {
-void HGraphBuilder::VisitForControl(Expression* expr,
- HBasicBlock* true_block,
- HBasicBlock* false_block) {
- TestContext for_test(this, expr, oracle(), true_block, false_block);
+void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
+ HBasicBlock* true_block,
+ HBasicBlock* false_block) {
+ TestContext for_test(this, expr, true_block, false_block);
Visit(expr);
}
-void HGraphBuilder::VisitArgument(Expression* expr) {
+void HOptimizedGraphBuilder::VisitArgument(Expression* expr) {
CHECK_ALIVE(VisitForValue(expr));
- Push(AddInstruction(new(zone()) HPushArgument(Pop())));
+ Push(Add<HPushArgument>(Pop()));
}
-void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
+void HOptimizedGraphBuilder::VisitArgumentList(
+ ZoneList<Expression*>* arguments) {
for (int i = 0; i < arguments->length(); i++) {
CHECK_ALIVE(VisitArgument(arguments->at(i)));
}
}
-void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
+void HOptimizedGraphBuilder::VisitExpressions(
+ ZoneList<Expression*>* exprs) {
for (int i = 0; i < exprs->length(); ++i) {
CHECK_ALIVE(VisitForValue(exprs->at(i)));
}
}
-HGraph* HGraphBuilder::CreateGraph() {
- graph_ = new(zone()) HGraph(info());
- if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
-
- {
- HPhase phase("H_Block building");
- current_block_ = graph()->entry_block();
-
- Scope* scope = info()->scope();
- if (scope->HasIllegalRedeclaration()) {
- Bailout("function with illegal redeclaration");
- return NULL;
- }
- if (scope->calls_eval()) {
- Bailout("function calls eval");
- return NULL;
- }
- SetUpScope(scope);
-
- // Add an edge to the body entry. This is warty: the graph's start
- // environment will be used by the Lithium translation as the initial
- // environment on graph entry, but it has now been mutated by the
- // Hydrogen translation of the instructions in the start block. This
- // environment uses values which have not been defined yet. These
- // Hydrogen instructions will then be replayed by the Lithium
- // translation, so they cannot have an environment effect. The edge to
- // the body's entry block (along with some special logic for the start
- // block in HInstruction::InsertAfter) seals the start block from
- // getting unwanted instructions inserted.
- //
- // TODO(kmillikin): Fix this. Stop mutating the initial environment.
- // Make the Hydrogen instructions in the initial block into Hydrogen
- // values (but not instructions), present in the initial environment and
- // not replayed by the Lithium translation.
- HEnvironment* initial_env = environment()->CopyWithoutHistory();
- HBasicBlock* body_entry = CreateBasicBlock(initial_env);
- current_block()->Goto(body_entry);
- body_entry->SetJoinId(BailoutId::FunctionEntry());
- set_current_block(body_entry);
-
- // Handle implicit declaration of the function name in named function
- // expressions before other declarations.
- if (scope->is_function_scope() && scope->function() != NULL) {
- VisitVariableDeclaration(scope->function());
- }
- VisitDeclarations(scope->declarations());
- AddSimulate(BailoutId::Declarations());
-
- HValue* context = environment()->LookupContext();
- AddInstruction(
- new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
-
- VisitStatements(info()->function()->body());
- if (HasStackOverflow()) return NULL;
-
- if (current_block() != NULL) {
- HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
- current_block()->FinishExit(instr);
- set_current_block(NULL);
- }
-
- // If the checksum of the number of type info changes is the same as the
- // last time this function was compiled, then this recompile is likely not
- // due to missing/inadequate type feedback, but rather too aggressive
- // optimization. Disable optimistic LICM in that case.
- Handle<Code> unoptimized_code(info()->shared_info()->code());
- ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Handle<Object> maybe_type_info(unoptimized_code->type_feedback_info());
- Handle<TypeFeedbackInfo> type_info(
- Handle<TypeFeedbackInfo>::cast(maybe_type_info));
- int checksum = type_info->own_type_change_checksum();
- int composite_checksum = graph()->update_type_change_checksum(checksum);
- graph()->set_use_optimistic_licm(
- !type_info->matches_inlined_type_change_checksum(composite_checksum));
- type_info->set_inlined_type_change_checksum(composite_checksum);
- }
-
- return graph();
-}
-
-bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
- *bailout_reason = SmartArrayPointer<char>();
- OrderBlocks();
- AssignDominators();
-
-#ifdef DEBUG
- // Do a full verify after building the graph and computing dominators.
- Verify(true);
-#endif
-
- PropagateDeoptimizingMark();
- if (!CheckConstPhiUses()) {
- *bailout_reason = SmartArrayPointer<char>(StrDup(
- "Unsupported phi use of const variable"));
+bool HOptimizedGraphBuilder::BuildGraph() {
+ if (current_info()->function()->is_generator()) {
+ Bailout(kFunctionIsAGenerator);
return false;
}
- EliminateRedundantPhis();
- if (!CheckArgumentsPhiUses()) {
- *bailout_reason = SmartArrayPointer<char>(StrDup(
- "Unsupported phi use of arguments"));
+ Scope* scope = current_info()->scope();
+ if (scope->HasIllegalRedeclaration()) {
+ Bailout(kFunctionWithIllegalRedeclaration);
return false;
}
- if (FLAG_eliminate_dead_phis) EliminateUnreachablePhis();
- CollectPhis();
-
- if (has_osr_loop_entry()) {
- const ZoneList<HPhi*>* phis = osr_loop_entry()->phis();
- for (int j = 0; j < phis->length(); j++) {
- HPhi* phi = phis->at(j);
- osr_values()->at(phi->merged_index())->set_incoming_value(phi);
- }
+ if (scope->calls_eval()) {
+ Bailout(kFunctionCallsEval);
+ return false;
}
+ SetUpScope(scope);
+
+ // Add an edge to the body entry. This is warty: the graph's start
+ // environment will be used by the Lithium translation as the initial
+ // environment on graph entry, but it has now been mutated by the
+ // Hydrogen translation of the instructions in the start block. This
+ // environment uses values which have not been defined yet. These
+ // Hydrogen instructions will then be replayed by the Lithium
+ // translation, so they cannot have an environment effect. The edge to
+ // the body's entry block (along with some special logic for the start
+ // block in HInstruction::InsertAfter) seals the start block from
+ // getting unwanted instructions inserted.
+ //
+ // TODO(kmillikin): Fix this. Stop mutating the initial environment.
+ // Make the Hydrogen instructions in the initial block into Hydrogen
+ // values (but not instructions), present in the initial environment and
+ // not replayed by the Lithium translation.
+ HEnvironment* initial_env = environment()->CopyWithoutHistory();
+ HBasicBlock* body_entry = CreateBasicBlock(initial_env);
+ Goto(body_entry);
+ body_entry->SetJoinId(BailoutId::FunctionEntry());
+ set_current_block(body_entry);
- HInferRepresentation rep(this);
- rep.Analyze();
-
- MarkDeoptimizeOnUndefined();
- InsertRepresentationChanges();
-
- InitializeInferredTypes();
-
- // Must be performed before canonicalization to ensure that Canonicalize
- // will not remove semantically meaningful ToInt32 operations e.g. BIT_OR with
- // zero.
- ComputeSafeUint32Operations();
+ // Handle implicit declaration of the function name in named function
+ // expressions before other declarations.
+ if (scope->is_function_scope() && scope->function() != NULL) {
+ VisitVariableDeclaration(scope->function());
+ }
+ VisitDeclarations(scope->declarations());
+ Add<HSimulate>(BailoutId::Declarations());
- Canonicalize();
+ Add<HStackCheck>(HStackCheck::kFunctionEntry);
- // Perform common subexpression elimination and loop-invariant code motion.
- if (FLAG_use_gvn) {
- HPhase phase("H_Global value numbering", this);
- HGlobalValueNumberer gvn(this, info());
- bool removed_side_effects = gvn.Analyze();
- // Trigger a second analysis pass to further eliminate duplicate values that
- // could only be discovered by removing side-effect-generating instructions
- // during the first pass.
- if (FLAG_smi_only_arrays && removed_side_effects) {
- removed_side_effects = gvn.Analyze();
- ASSERT(!removed_side_effects);
- }
- }
+ VisitStatements(current_info()->function()->body());
+ if (HasStackOverflow()) return false;
- if (FLAG_use_range) {
- HRangeAnalysis rangeAnalysis(this);
- rangeAnalysis.Analyze();
+ if (current_block() != NULL) {
+ Add<HReturn>(graph()->GetConstantUndefined());
+ set_current_block(NULL);
}
- ComputeMinusZeroChecks();
- // Eliminate redundant stack checks on backwards branches.
- HStackCheckEliminator sce(this);
- sce.Process();
+ // If the checksum of the number of type info changes is the same as the
+ // last time this function was compiled, then this recompile is likely not
+ // due to missing/inadequate type feedback, but rather too aggressive
+ // optimization. Disable optimistic LICM in that case.
+ Handle<Code> unoptimized_code(current_info()->shared_info()->code());
+ ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ Handle<TypeFeedbackInfo> type_info(
+ TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
+ int checksum = type_info->own_type_change_checksum();
+ int composite_checksum = graph()->update_type_change_checksum(checksum);
+ graph()->set_use_optimistic_licm(
+ !type_info->matches_inlined_type_change_checksum(composite_checksum));
+ type_info->set_inlined_type_change_checksum(composite_checksum);
- EliminateRedundantBoundsChecks();
- DehoistSimpleArrayIndexComputations();
- if (FLAG_dead_code_elimination) DeadCodeElimination();
+ // Perform any necessary OSR-specific cleanups or changes to the graph.
+ osr()->FinishGraph();
return true;
}
-// We try to "factor up" HBoundsCheck instructions towards the root of the
-// dominator tree.
-// For now we handle checks where the index is like "exp + int32value".
-// If in the dominator tree we check "exp + v1" and later (dominated)
-// "exp + v2", if v2 <= v1 we can safely remove the second check, and if
-// v2 > v1 we can use v2 in the 1st check and again remove the second.
-// To do so we keep a dictionary of all checks where the key if the pair
-// "exp, length".
-// The class BoundsCheckKey represents this key.
-class BoundsCheckKey : public ZoneObject {
- public:
- HValue* IndexBase() const { return index_base_; }
- HValue* Length() const { return length_; }
-
- uint32_t Hash() {
- return static_cast<uint32_t>(index_base_->Hashcode() ^ length_->Hashcode());
- }
-
- static BoundsCheckKey* Create(Zone* zone,
- HBoundsCheck* check,
- int32_t* offset) {
- if (!check->index()->representation().IsInteger32()) return NULL;
-
- HValue* index_base = NULL;
- HConstant* constant = NULL;
- bool is_sub = false;
-
- if (check->index()->IsAdd()) {
- HAdd* index = HAdd::cast(check->index());
- if (index->left()->IsConstant()) {
- constant = HConstant::cast(index->left());
- index_base = index->right();
- } else if (index->right()->IsConstant()) {
- constant = HConstant::cast(index->right());
- index_base = index->left();
- }
- } else if (check->index()->IsSub()) {
- HSub* index = HSub::cast(check->index());
- is_sub = true;
- if (index->left()->IsConstant()) {
- constant = HConstant::cast(index->left());
- index_base = index->right();
- } else if (index->right()->IsConstant()) {
- constant = HConstant::cast(index->right());
- index_base = index->left();
- }
- }
-
- if (constant != NULL && constant->HasInteger32Value()) {
- *offset = is_sub ? - constant->Integer32Value()
- : constant->Integer32Value();
- } else {
- *offset = 0;
- index_base = check->index();
- }
-
- return new(zone) BoundsCheckKey(index_base, check->length());
- }
-
- private:
- BoundsCheckKey(HValue* index_base, HValue* length)
- : index_base_(index_base),
- length_(length) { }
-
- HValue* index_base_;
- HValue* length_;
-};
+bool HGraph::Optimize(BailoutReason* bailout_reason) {
+ OrderBlocks();
+ AssignDominators();
+ // We need to create a HConstant "zero" now so that GVN will fold every
+ // zero-valued constant in the graph together.
+ // The constant is needed to make idef-based bounds check work: the pass
+ // evaluates relations with "zero" and that zero cannot be created after GVN.
+ GetConstant0();
-// Data about each HBoundsCheck that can be eliminated or moved.
-// It is the "value" in the dictionary indexed by "base-index, length"
-// (the key is BoundsCheckKey).
-// We scan the code with a dominator tree traversal.
-// Traversing the dominator tree we keep a stack (implemented as a singly
-// linked list) of "data" for each basic block that contains a relevant check
-// with the same key (the dictionary holds the head of the list).
-// We also keep all the "data" created for a given basic block in a list, and
-// use it to "clean up" the dictionary when backtracking in the dominator tree
-// traversal.
-// Doing this each dictionary entry always directly points to the check that
-// is dominating the code being examined now.
-// We also track the current "offset" of the index expression and use it to
-// decide if any check is already "covered" (so it can be removed) or not.
-class BoundsCheckBbData: public ZoneObject {
- public:
- BoundsCheckKey* Key() const { return key_; }
- int32_t LowerOffset() const { return lower_offset_; }
- int32_t UpperOffset() const { return upper_offset_; }
- HBasicBlock* BasicBlock() const { return basic_block_; }
- HBoundsCheck* LowerCheck() const { return lower_check_; }
- HBoundsCheck* UpperCheck() const { return upper_check_; }
- BoundsCheckBbData* NextInBasicBlock() const { return next_in_bb_; }
- BoundsCheckBbData* FatherInDominatorTree() const { return father_in_dt_; }
+#ifdef DEBUG
+ // Do a full verify after building the graph and computing dominators.
+ Verify(true);
+#endif
- bool OffsetIsCovered(int32_t offset) const {
- return offset >= LowerOffset() && offset <= UpperOffset();
+ if (FLAG_analyze_environment_liveness && maximum_environment_size() != 0) {
+ Run<HEnvironmentLivenessAnalysisPhase>();
}
- bool HasSingleCheck() { return lower_check_ == upper_check_; }
-
- // The goal of this method is to modify either upper_offset_ or
- // lower_offset_ so that also new_offset is covered (the covered
- // range grows).
- //
- // The precondition is that new_check follows UpperCheck() and
- // LowerCheck() in the same basic block, and that new_offset is not
- // covered (otherwise we could simply remove new_check).
- //
- // If HasSingleCheck() is true then new_check is added as "second check"
- // (either upper or lower; note that HasSingleCheck() becomes false).
- // Otherwise one of the current checks is modified so that it also covers
- // new_offset, and new_check is removed.
- void CoverCheck(HBoundsCheck* new_check,
- int32_t new_offset) {
- ASSERT(new_check->index()->representation().IsInteger32());
- bool keep_new_check = false;
-
- if (new_offset > upper_offset_) {
- upper_offset_ = new_offset;
- if (HasSingleCheck()) {
- keep_new_check = true;
- upper_check_ = new_check;
- } else {
- BuildOffsetAdd(upper_check_,
- &added_upper_index_,
- &added_upper_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- new_offset);
- upper_check_->SetOperandAt(0, added_upper_index_);
- }
- } else if (new_offset < lower_offset_) {
- lower_offset_ = new_offset;
- if (HasSingleCheck()) {
- keep_new_check = true;
- lower_check_ = new_check;
- } else {
- BuildOffsetAdd(lower_check_,
- &added_lower_index_,
- &added_lower_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- new_offset);
- lower_check_->SetOperandAt(0, added_lower_index_);
- }
- } else {
- ASSERT(false);
- }
-
- if (!keep_new_check) {
- new_check->DeleteAndReplaceWith(NULL);
- }
+ if (!CheckConstPhiUses()) {
+ *bailout_reason = kUnsupportedPhiUseOfConstVariable;
+ return false;
}
-
- void RemoveZeroOperations() {
- RemoveZeroAdd(&added_lower_index_, &added_lower_offset_);
- RemoveZeroAdd(&added_upper_index_, &added_upper_offset_);
+ Run<HRedundantPhiEliminationPhase>();
+ if (!CheckArgumentsPhiUses()) {
+ *bailout_reason = kUnsupportedPhiUseOfArguments;
+ return false;
}
- BoundsCheckBbData(BoundsCheckKey* key,
- int32_t lower_offset,
- int32_t upper_offset,
- HBasicBlock* bb,
- HBoundsCheck* lower_check,
- HBoundsCheck* upper_check,
- BoundsCheckBbData* next_in_bb,
- BoundsCheckBbData* father_in_dt)
- : key_(key),
- lower_offset_(lower_offset),
- upper_offset_(upper_offset),
- basic_block_(bb),
- lower_check_(lower_check),
- upper_check_(upper_check),
- added_lower_index_(NULL),
- added_lower_offset_(NULL),
- added_upper_index_(NULL),
- added_upper_offset_(NULL),
- next_in_bb_(next_in_bb),
- father_in_dt_(father_in_dt) { }
+ // Find and mark unreachable code to simplify optimizations, especially gvn,
+ // where unreachable code could unnecessarily defeat LICM.
+ Run<HMarkUnreachableBlocksPhase>();
- private:
- BoundsCheckKey* key_;
- int32_t lower_offset_;
- int32_t upper_offset_;
- HBasicBlock* basic_block_;
- HBoundsCheck* lower_check_;
- HBoundsCheck* upper_check_;
- HAdd* added_lower_index_;
- HConstant* added_lower_offset_;
- HAdd* added_upper_index_;
- HConstant* added_upper_offset_;
- BoundsCheckBbData* next_in_bb_;
- BoundsCheckBbData* father_in_dt_;
-
- void BuildOffsetAdd(HBoundsCheck* check,
- HAdd** add,
- HConstant** constant,
- HValue* original_value,
- Representation representation,
- int32_t new_offset) {
- HConstant* new_constant = new(BasicBlock()->zone())
- HConstant(new_offset, Representation::Integer32());
- if (*add == NULL) {
- new_constant->InsertBefore(check);
- // Because of the bounds checks elimination algorithm, the index is always
- // an HAdd or an HSub here, so we can safely cast to an HBinaryOperation.
- HValue* context = HBinaryOperation::cast(check->index())->context();
- *add = new(BasicBlock()->zone()) HAdd(context,
- original_value,
- new_constant);
- (*add)->AssumeRepresentation(representation);
- (*add)->InsertBefore(check);
- } else {
- new_constant->InsertBefore(*add);
- (*constant)->DeleteAndReplaceWith(new_constant);
- }
- *constant = new_constant;
- }
+ if (FLAG_check_elimination) Run<HCheckEliminationPhase>();
+ if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
+ if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
- void RemoveZeroAdd(HAdd** add, HConstant** constant) {
- if (*add != NULL && (*constant)->Integer32Value() == 0) {
- (*add)->DeleteAndReplaceWith((*add)->left());
- (*constant)->DeleteAndReplaceWith(NULL);
- }
- }
-};
+ if (FLAG_load_elimination) Run<HLoadEliminationPhase>();
+ CollectPhis();
-static bool BoundsCheckKeyMatch(void* key1, void* key2) {
- BoundsCheckKey* k1 = static_cast<BoundsCheckKey*>(key1);
- BoundsCheckKey* k2 = static_cast<BoundsCheckKey*>(key2);
- return k1->IndexBase() == k2->IndexBase() && k1->Length() == k2->Length();
-}
+ if (has_osr()) osr()->FinishOsrValues();
+ Run<HInferRepresentationPhase>();
-class BoundsCheckTable : private ZoneHashMap {
- public:
- BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key, Zone* zone) {
- return reinterpret_cast<BoundsCheckBbData**>(
- &(Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value));
- }
+ // Remove HSimulate instructions that have turned out not to be needed
+ // after all by folding them into the following HSimulate.
+ // This must happen after inferring representations.
+ Run<HMergeRemovableSimulatesPhase>();
- void Insert(BoundsCheckKey* key, BoundsCheckBbData* data, Zone* zone) {
- Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value = data;
- }
+ Run<HMarkDeoptimizeOnUndefinedPhase>();
+ Run<HRepresentationChangesPhase>();
- void Delete(BoundsCheckKey* key) {
- Remove(key, key->Hash());
- }
+ Run<HInferTypesPhase>();
- explicit BoundsCheckTable(Zone* zone)
- : ZoneHashMap(BoundsCheckKeyMatch, ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)) { }
-};
+ // Must be performed before canonicalization to ensure that Canonicalize
+ // will not remove semantically meaningful ToInt32 operations e.g. BIT_OR with
+ // zero.
+ if (FLAG_opt_safe_uint32_operations) Run<HUint32AnalysisPhase>();
+ if (FLAG_use_canonicalizing) Run<HCanonicalizePhase>();
-// Eliminates checks in bb and recursively in the dominated blocks.
-// Also replace the results of check instructions with the original value, if
-// the result is used. This is safe now, since we don't do code motion after
-// this point. It enables better register allocation since the value produced
-// by check instructions is really a copy of the original value.
-void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
- BoundsCheckTable* table) {
- BoundsCheckBbData* bb_data_list = NULL;
-
- for (HInstruction* i = bb->first(); i != NULL; i = i->next()) {
- if (!i->IsBoundsCheck()) continue;
-
- HBoundsCheck* check = HBoundsCheck::cast(i);
- check->ReplaceAllUsesWith(check->index());
-
- if (!FLAG_array_bounds_checks_elimination) continue;
-
- int32_t offset;
- BoundsCheckKey* key =
- BoundsCheckKey::Create(zone(), check, &offset);
- if (key == NULL) continue;
- BoundsCheckBbData** data_p = table->LookupOrInsert(key, zone());
- BoundsCheckBbData* data = *data_p;
- if (data == NULL) {
- bb_data_list = new(zone()) BoundsCheckBbData(key,
- offset,
- offset,
- bb,
- check,
- check,
- bb_data_list,
- NULL);
- *data_p = bb_data_list;
- } else if (data->OffsetIsCovered(offset)) {
- check->DeleteAndReplaceWith(NULL);
- } else if (data->BasicBlock() == bb) {
- data->CoverCheck(check, offset);
- } else {
- int32_t new_lower_offset = offset < data->LowerOffset()
- ? offset
- : data->LowerOffset();
- int32_t new_upper_offset = offset > data->UpperOffset()
- ? offset
- : data->UpperOffset();
- bb_data_list = new(zone()) BoundsCheckBbData(key,
- new_lower_offset,
- new_upper_offset,
- bb,
- data->LowerCheck(),
- data->UpperCheck(),
- bb_data_list,
- data);
- table->Insert(key, bb_data_list, zone());
- }
- }
-
- for (int i = 0; i < bb->dominated_blocks()->length(); ++i) {
- EliminateRedundantBoundsChecks(bb->dominated_blocks()->at(i), table);
- }
-
- for (BoundsCheckBbData* data = bb_data_list;
- data != NULL;
- data = data->NextInBasicBlock()) {
- data->RemoveZeroOperations();
- if (data->FatherInDominatorTree()) {
- table->Insert(data->Key(), data->FatherInDominatorTree(), zone());
- } else {
- table->Delete(data->Key());
- }
- }
-}
+ if (FLAG_use_gvn) Run<HGlobalValueNumberingPhase>();
+ if (FLAG_use_range) Run<HRangeAnalysisPhase>();
-void HGraph::EliminateRedundantBoundsChecks() {
- HPhase phase("H_Eliminate bounds checks", this);
- BoundsCheckTable checks_table(zone());
- EliminateRedundantBoundsChecks(entry_block(), &checks_table);
-}
+ Run<HComputeChangeUndefinedToNaN>();
+ Run<HComputeMinusZeroChecksPhase>();
+ // Eliminate redundant stack checks on backwards branches.
+ Run<HStackCheckEliminationPhase>();
-static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
- HValue* index = array_operation->GetKey();
- if (!index->representation().IsInteger32()) return;
+ if (FLAG_array_bounds_checks_elimination) Run<HBoundsCheckEliminationPhase>();
+ if (FLAG_array_bounds_checks_hoisting) Run<HBoundsCheckHoistingPhase>();
+ if (FLAG_array_index_dehoisting) Run<HDehoistIndexComputationsPhase>();
+ if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
- HConstant* constant;
- HValue* subexpression;
- int32_t sign;
- if (index->IsAdd()) {
- sign = 1;
- HAdd* add = HAdd::cast(index);
- if (add->left()->IsConstant()) {
- subexpression = add->right();
- constant = HConstant::cast(add->left());
- } else if (add->right()->IsConstant()) {
- subexpression = add->left();
- constant = HConstant::cast(add->right());
- } else {
- return;
- }
- } else if (index->IsSub()) {
- sign = -1;
- HSub* sub = HSub::cast(index);
- if (sub->left()->IsConstant()) {
- subexpression = sub->right();
- constant = HConstant::cast(sub->left());
- } else if (sub->right()->IsConstant()) {
- subexpression = sub->left();
- constant = HConstant::cast(sub->right());
- } return;
- } else {
- return;
- }
+ RestoreActualValues();
- if (!constant->HasInteger32Value()) return;
- int32_t value = constant->Integer32Value() * sign;
- // We limit offset values to 30 bits because we want to avoid the risk of
- // overflows when the offset is added to the object header size.
- if (value >= 1 << array_operation->MaxIndexOffsetBits() || value < 0) return;
- array_operation->SetKey(subexpression);
- if (index->HasNoUses()) {
- index->DeleteAndReplaceWith(NULL);
- }
- ASSERT(value >= 0);
- array_operation->SetIndexOffset(static_cast<uint32_t>(value));
- array_operation->SetDehoisted(true);
-}
+ // Find unreachable code a second time, GVN and other optimizations may have
+ // made blocks unreachable that were previously reachable.
+ Run<HMarkUnreachableBlocksPhase>();
+ return true;
+}
-void HGraph::DehoistSimpleArrayIndexComputations() {
- if (!FLAG_array_index_dehoisting) return;
- HPhase phase("H_Dehoist index computations", this);
- for (int i = 0; i < blocks()->length(); ++i) {
- for (HInstruction* instr = blocks()->at(i)->first();
- instr != NULL;
- instr = instr->next()) {
- ArrayInstructionInterface* array_instruction = NULL;
- if (instr->IsLoadKeyedFastElement()) {
- HLoadKeyedFastElement* op = HLoadKeyedFastElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsLoadKeyedFastDoubleElement()) {
- HLoadKeyedFastDoubleElement* op =
- HLoadKeyedFastDoubleElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsLoadKeyedSpecializedArrayElement()) {
- HLoadKeyedSpecializedArrayElement* op =
- HLoadKeyedSpecializedArrayElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsStoreKeyedFastElement()) {
- HStoreKeyedFastElement* op = HStoreKeyedFastElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsStoreKeyedFastDoubleElement()) {
- HStoreKeyedFastDoubleElement* op =
- HStoreKeyedFastDoubleElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsStoreKeyedSpecializedArrayElement()) {
- HStoreKeyedSpecializedArrayElement* op =
- HStoreKeyedSpecializedArrayElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else {
- continue;
- }
- DehoistArrayIndex(array_instruction);
- }
- }
-}
+void HGraph::RestoreActualValues() {
+ HPhase phase("H_Restore actual values", this);
+ for (int block_index = 0; block_index < blocks()->length(); block_index++) {
+ HBasicBlock* block = blocks()->at(block_index);
-void HGraph::DeadCodeElimination() {
- HPhase phase("H_Dead code elimination", this);
- ZoneList<HInstruction*> worklist(blocks_.length(), zone());
- for (int i = 0; i < blocks()->length(); ++i) {
- for (HInstruction* instr = blocks()->at(i)->first();
- instr != NULL;
- instr = instr->next()) {
- if (instr->IsDead()) worklist.Add(instr, zone());
+#ifdef DEBUG
+ for (int i = 0; i < block->phis()->length(); i++) {
+ HPhi* phi = block->phis()->at(i);
+ ASSERT(phi->ActualValue() == phi);
}
- }
+#endif
- while (!worklist.is_empty()) {
- HInstruction* instr = worklist.RemoveLast();
- if (FLAG_trace_dead_code_elimination) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- instr->PrintNameTo(&stream);
- stream.Add(" = ");
- instr->PrintTo(&stream);
- PrintF("[removing dead instruction %s]\n", *stream.ToCString());
- }
- instr->DeleteAndReplaceWith(NULL);
- for (int i = 0; i < instr->OperandCount(); ++i) {
- HValue* operand = instr->OperandAt(i);
- if (operand->IsDead()) worklist.Add(HInstruction::cast(operand), zone());
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (instruction->ActualValue() != instruction) {
+ ASSERT(instruction->IsInformativeDefinition());
+ if (instruction->IsPurelyInformativeDefinition()) {
+ instruction->DeleteAndReplaceWith(instruction->RedefinedOperand());
+ } else {
+ instruction->ReplaceAllUsesWith(instruction->ActualValue());
+ }
+ }
}
}
}
-HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
- ASSERT(current_block() != NULL);
- current_block()->AddInstruction(instr);
- return instr;
-}
-
-
-void HGraphBuilder::AddSimulate(BailoutId ast_id) {
- ASSERT(current_block() != NULL);
- current_block()->AddSimulate(ast_id);
-}
-
-
-void HGraphBuilder::AddPhi(HPhi* instr) {
- ASSERT(current_block() != NULL);
- current_block()->AddPhi(instr);
-}
-
-
-void HGraphBuilder::PushAndAdd(HInstruction* instr) {
- Push(instr);
- AddInstruction(instr);
-}
-
-
template <class Instruction>
-HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
+HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
int count = call->argument_count();
ZoneList<HValue*> arguments(count, zone());
for (int i = 0; i < count; ++i) {
@@ -3852,35 +3245,31 @@ HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
}
while (!arguments.is_empty()) {
- AddInstruction(new(zone()) HPushArgument(arguments.RemoveLast()));
+ Add<HPushArgument>(arguments.RemoveLast());
}
return call;
}
-void HGraphBuilder::SetUpScope(Scope* scope) {
- HConstant* undefined_constant = new(zone()) HConstant(
- isolate()->factory()->undefined_value(), Representation::Tagged());
- AddInstruction(undefined_constant);
- graph_->set_undefined_constant(undefined_constant);
-
- HArgumentsObject* object = new(zone()) HArgumentsObject;
- AddInstruction(object);
- graph()->SetArgumentsObject(object);
+void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
+ // First special is HContext.
+ HInstruction* context = Add<HContext>();
+ environment()->BindContext(context);
- // Set the initial values of parameters including "this". "This" has
- // parameter index 0.
+ // Create an arguments object containing the initial parameters. Set the
+ // initial values of parameters including "this" having parameter index 0.
ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count());
-
+ HArgumentsObject* arguments_object =
+ New<HArgumentsObject>(environment()->parameter_count());
for (int i = 0; i < environment()->parameter_count(); ++i) {
- HInstruction* parameter = AddInstruction(new(zone()) HParameter(i));
+ HInstruction* parameter = Add<HParameter>(i);
+ arguments_object->AddArgument(parameter, zone());
environment()->Bind(i, parameter);
}
+ AddInstruction(arguments_object);
+ graph()->SetArgumentsObject(arguments_object);
- // First special is HContext.
- HInstruction* context = AddInstruction(new(zone()) HContext);
- environment()->BindContext(context);
-
+ HConstant* undefined_constant = graph()->GetConstantUndefined();
// Initialize specials and locals to undefined.
for (int i = environment()->parameter_count() + 1;
i < environment()->length();
@@ -3892,7 +3281,7 @@ void HGraphBuilder::SetUpScope(Scope* scope) {
// not have declarations).
if (scope->arguments() != NULL) {
if (!scope->arguments()->IsStackAllocated()) {
- return Bailout("context-allocated arguments");
+ return Bailout(kContextAllocatedArguments);
}
environment()->Bind(scope->arguments(),
@@ -3901,35 +3290,21 @@ void HGraphBuilder::SetUpScope(Scope* scope) {
}
-void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
+void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
- CHECK_ALIVE(Visit(statements->at(i)));
+ Statement* stmt = statements->at(i);
+ CHECK_ALIVE(Visit(stmt));
+ if (stmt->IsJump()) break;
}
}
-HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
- HBasicBlock* b = graph()->CreateBasicBlock();
- b->SetInitialEnvironment(env);
- return b;
-}
-
-
-HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
- HBasicBlock* header = graph()->CreateBasicBlock();
- HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
- header->SetInitialEnvironment(entry_env);
- header->AttachLoopInformation();
- return header;
-}
-
-
-void HGraphBuilder::VisitBlock(Block* stmt) {
+void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (stmt->scope() != NULL) {
- return Bailout("ScopedBlock");
+ return Bailout(kScopedBlock);
}
BreakAndContinueInfo break_info(stmt);
{ BreakAndContinueScope push(&break_info, this);
@@ -3937,14 +3312,15 @@ void HGraphBuilder::VisitBlock(Block* stmt) {
}
HBasicBlock* break_block = break_info.break_block();
if (break_block != NULL) {
- if (current_block() != NULL) current_block()->Goto(break_block);
+ if (current_block() != NULL) Goto(break_block);
break_block->SetJoinId(stmt->ExitId());
set_current_block(break_block);
}
}
-void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+void HOptimizedGraphBuilder::VisitExpressionStatement(
+ ExpressionStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -3952,22 +3328,22 @@ void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
}
-void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (stmt->condition()->ToBooleanIsTrue()) {
- AddSimulate(stmt->ThenId());
+ Add<HSimulate>(stmt->ThenId());
Visit(stmt->then_statement());
} else if (stmt->condition()->ToBooleanIsFalse()) {
- AddSimulate(stmt->ElseId());
+ Add<HSimulate>(stmt->ElseId());
Visit(stmt->else_statement());
} else {
HBasicBlock* cond_true = graph()->CreateBasicBlock();
@@ -3998,7 +3374,7 @@ void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
}
-HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
+HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get(
BreakableStatement* stmt,
BreakType type,
int* drop_extra) {
@@ -4037,35 +3413,34 @@ HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
}
-void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+void HOptimizedGraphBuilder::VisitContinueStatement(
+ ContinueStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
int drop_extra = 0;
- HBasicBlock* continue_block = break_scope()->Get(stmt->target(),
- CONTINUE,
- &drop_extra);
+ HBasicBlock* continue_block = break_scope()->Get(
+ stmt->target(), BreakAndContinueScope::CONTINUE, &drop_extra);
Drop(drop_extra);
- current_block()->Goto(continue_block);
+ Goto(continue_block);
set_current_block(NULL);
}
-void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
int drop_extra = 0;
- HBasicBlock* break_block = break_scope()->Get(stmt->target(),
- BREAK,
- &drop_extra);
+ HBasicBlock* break_block = break_scope()->Get(
+ stmt->target(), BreakAndContinueScope::BREAK, &drop_extra);
Drop(drop_extra);
- current_block()->Goto(break_block);
+ Goto(break_block);
set_current_block(NULL);
}
-void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4075,7 +3450,7 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
// Not an inlined return, so an actual one.
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* result = environment()->Pop();
- current_block()->FinishExit(new(zone()) HReturn(result));
+ Add<HReturn>(result);
} else if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
// Return from an inlined construct call. In a test context the return value
// will always evaluate to true, in a value context the return value needs
@@ -4083,26 +3458,26 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
if (context->IsTest()) {
TestContext* test = TestContext::cast(context);
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(test->if_true(), state);
+ Goto(test->if_true(), state);
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* return_value = Pop();
HValue* receiver = environment()->arguments_environment()->Lookup(0);
HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(return_value,
- FIRST_SPEC_OBJECT_TYPE,
- LAST_SPEC_OBJECT_TYPE);
+ New<HHasInstanceTypeAndBranch>(return_value,
+ FIRST_SPEC_OBJECT_TYPE,
+ LAST_SPEC_OBJECT_TYPE);
HBasicBlock* if_spec_object = graph()->CreateBasicBlock();
HBasicBlock* not_spec_object = graph()->CreateBasicBlock();
typecheck->SetSuccessorAt(0, if_spec_object);
typecheck->SetSuccessorAt(1, not_spec_object);
- current_block()->Finish(typecheck);
- if_spec_object->AddLeaveInlined(return_value, state);
- not_spec_object->AddLeaveInlined(receiver, state);
+ FinishCurrentBlock(typecheck);
+ AddLeaveInlined(if_spec_object, return_value, state);
+ AddLeaveInlined(not_spec_object, receiver, state);
}
} else if (state->inlining_kind() == SETTER_CALL_RETURN) {
// Return from an inlined setter call. The returned value is never used, the
@@ -4112,11 +3487,11 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
HValue* rhs = environment()->arguments_environment()->Lookup(1);
context->ReturnValue(rhs);
} else if (context->IsEffect()) {
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
HValue* rhs = environment()->arguments_environment()->Lookup(1);
- current_block()->AddLeaveInlined(rhs, state);
+ AddLeaveInlined(rhs, state);
}
} else {
// Return from a normal inlined function. Visit the subexpression in the
@@ -4126,85 +3501,64 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
VisitForControl(stmt->expression(), test->if_true(), test->if_false());
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
- current_block()->AddLeaveInlined(Pop(), state);
+ AddLeaveInlined(Pop(), state);
}
}
set_current_block(NULL);
}
-void HGraphBuilder::VisitWithStatement(WithStatement* stmt) {
+void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout("WithStatement");
+ return Bailout(kWithStatement);
}
-void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+
// We only optimize switch statements with smi-literal smi comparisons,
// with a bounded number of clauses.
const int kCaseClauseLimit = 128;
ZoneList<CaseClause*>* clauses = stmt->cases();
int clause_count = clauses->length();
if (clause_count > kCaseClauseLimit) {
- return Bailout("SwitchStatement: too many clauses");
+ return Bailout(kSwitchStatementTooManyClauses);
}
- HValue* context = environment()->LookupContext();
+ ASSERT(stmt->switch_type() != SwitchStatement::UNKNOWN_SWITCH);
+ if (stmt->switch_type() == SwitchStatement::GENERIC_SWITCH) {
+ return Bailout(kSwitchStatementMixedOrNonLiteralSwitchLabels);
+ }
CHECK_ALIVE(VisitForValue(stmt->tag()));
- AddSimulate(stmt->EntryId());
+ Add<HSimulate>(stmt->EntryId());
HValue* tag_value = Pop();
HBasicBlock* first_test_block = current_block();
- SwitchType switch_type = UNKNOWN_SWITCH;
-
- // 1. Extract clause type
- for (int i = 0; i < clause_count; ++i) {
- CaseClause* clause = clauses->at(i);
- if (clause->is_default()) continue;
-
- if (switch_type == UNKNOWN_SWITCH) {
- if (clause->label()->IsSmiLiteral()) {
- switch_type = SMI_SWITCH;
- } else if (clause->label()->IsStringLiteral()) {
- switch_type = STRING_SWITCH;
- } else {
- return Bailout("SwitchStatement: non-literal switch label");
- }
- } else if ((switch_type == STRING_SWITCH &&
- !clause->label()->IsStringLiteral()) ||
- (switch_type == SMI_SWITCH &&
- !clause->label()->IsSmiLiteral())) {
- return Bailout("SwitchStatemnt: mixed label types are not supported");
- }
- }
-
HUnaryControlInstruction* string_check = NULL;
HBasicBlock* not_string_block = NULL;
// Test switch's tag value if all clauses are string literals
- if (switch_type == STRING_SWITCH) {
- string_check = new(zone()) HIsStringAndBranch(tag_value);
+ if (stmt->switch_type() == SwitchStatement::STRING_SWITCH) {
first_test_block = graph()->CreateBasicBlock();
not_string_block = graph()->CreateBasicBlock();
-
- string_check->SetSuccessorAt(0, first_test_block);
- string_check->SetSuccessorAt(1, not_string_block);
- current_block()->Finish(string_check);
+ string_check = New<HIsStringAndBranch>(
+ tag_value, first_test_block, not_string_block);
+ FinishCurrentBlock(string_check);
set_current_block(first_test_block);
}
- // 2. Build all the tests, with dangling true branches
+ // 1. Build all the tests, with dangling true branches
BailoutId default_id = BailoutId::None();
for (int i = 0; i < clause_count; ++i) {
CaseClause* clause = clauses->at(i);
@@ -4212,9 +3566,6 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
default_id = clause->EntryId();
continue;
}
- if (switch_type == SMI_SWITCH) {
- clause->RecordTypeFeedback(oracle());
- }
// Generate a compare and branch.
CHECK_ALIVE(VisitForValue(clause->label()));
@@ -4225,36 +3576,33 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
HControlInstruction* compare;
- if (switch_type == SMI_SWITCH) {
- if (!clause->IsSmiCompare()) {
- // Finish with deoptimize and add uses of enviroment values to
- // account for invisible uses.
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
- set_current_block(NULL);
- break;
+ if (stmt->switch_type() == SwitchStatement::SMI_SWITCH) {
+ if (!clause->compare_type()->Is(Type::Smi())) {
+ Add<HDeoptimize>("Non-smi switch type", Deoptimizer::SOFT);
}
- HCompareIDAndBranch* compare_ =
- new(zone()) HCompareIDAndBranch(tag_value,
- label_value,
- Token::EQ_STRICT);
- compare_->SetInputRepresentation(Representation::Integer32());
+ HCompareNumericAndBranch* compare_ =
+ New<HCompareNumericAndBranch>(tag_value,
+ label_value,
+ Token::EQ_STRICT);
+ compare_->set_observed_input_representation(
+ Representation::Smi(), Representation::Smi());
compare = compare_;
} else {
- compare = new(zone()) HStringCompareAndBranch(context, tag_value,
- label_value,
- Token::EQ_STRICT);
+ compare = New<HStringCompareAndBranch>(tag_value,
+ label_value,
+ Token::EQ_STRICT);
}
compare->SetSuccessorAt(0, body_block);
compare->SetSuccessorAt(1, next_test_block);
- current_block()->Finish(compare);
+ FinishCurrentBlock(compare);
set_current_block(next_test_block);
}
// Save the current block to use for the default or to join with the
- // exit. This block is NULL if we deoptimized.
+ // exit.
HBasicBlock* last_block = current_block();
if (not_string_block != NULL) {
@@ -4262,7 +3610,7 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
last_block = CreateJoin(last_block, not_string_block, join_id);
}
- // 3. Loop over the clauses and the linked list of tests in lockstep,
+ // 2. Loop over the clauses and the linked list of tests in lockstep,
// translating the clause bodies.
HBasicBlock* curr_test_block = first_test_block;
HBasicBlock* fall_through_block = NULL;
@@ -4280,7 +3628,14 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
normal_block = last_block;
last_block = NULL; // Cleared to indicate we've handled it.
}
- } else if (!curr_test_block->end()->IsDeoptimize()) {
+ } else {
+ // If the current test block is deoptimizing due to an unhandled clause
+ // of the switch, the test instruction is in the next block since the
+ // deopt must end the current block.
+ if (curr_test_block->IsDeoptimizing()) {
+ ASSERT(curr_test_block->end()->SecondSuccessor() == NULL);
+ curr_test_block = curr_test_block->end()->FirstSuccessor();
+ }
normal_block = curr_test_block->end()->FirstSuccessor();
curr_test_block = curr_test_block->end()->SecondSuccessor();
}
@@ -4322,94 +3677,33 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
last_block,
stmt->ExitId()));
} else {
- if (fall_through_block != NULL) fall_through_block->Goto(break_block);
- if (last_block != NULL) last_block->Goto(break_block);
+ if (fall_through_block != NULL) Goto(fall_through_block, break_block);
+ if (last_block != NULL) Goto(last_block, break_block);
break_block->SetJoinId(stmt->ExitId());
set_current_block(break_block);
}
}
-bool HGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
- return statement->OsrEntryId() == info()->osr_ast_id();
-}
-
-
-bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
- if (!HasOsrEntryAt(statement)) return false;
-
- HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
- HBasicBlock* osr_entry = graph()->CreateBasicBlock();
- HValue* true_value = graph()->GetConstantTrue();
- HBranch* test = new(zone()) HBranch(true_value, non_osr_entry, osr_entry);
- current_block()->Finish(test);
-
- HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
- non_osr_entry->Goto(loop_predecessor);
-
- set_current_block(osr_entry);
- BailoutId osr_entry_id = statement->OsrEntryId();
- int first_expression_index = environment()->first_expression_index();
- int length = environment()->length();
- ZoneList<HUnknownOSRValue*>* osr_values =
- new(zone()) ZoneList<HUnknownOSRValue*>(length, zone());
-
- for (int i = 0; i < first_expression_index; ++i) {
- HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
- AddInstruction(osr_value);
- environment()->Bind(i, osr_value);
- osr_values->Add(osr_value, zone());
- }
-
- if (first_expression_index != length) {
- environment()->Drop(length - first_expression_index);
- for (int i = first_expression_index; i < length; ++i) {
- HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
- AddInstruction(osr_value);
- environment()->Push(osr_value);
- osr_values->Add(osr_value, zone());
- }
- }
-
- graph()->set_osr_values(osr_values);
-
- AddSimulate(osr_entry_id);
- AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
- HContext* context = new(zone()) HContext;
- AddInstruction(context);
- environment()->BindContext(context);
- current_block()->Goto(loop_predecessor);
- loop_predecessor->SetJoinId(statement->EntryId());
- set_current_block(loop_predecessor);
- return true;
-}
-
-
-void HGraphBuilder::VisitLoopBody(IterationStatement* stmt,
- HBasicBlock* loop_entry,
- BreakAndContinueInfo* break_info) {
+void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
+ HBasicBlock* loop_entry,
+ BreakAndContinueInfo* break_info) {
BreakAndContinueScope push(break_info, this);
- AddSimulate(stmt->StackCheckId());
- HValue* context = environment()->LookupContext();
+ Add<HSimulate>(stmt->StackCheckId());
HStackCheck* stack_check =
- new(zone()) HStackCheck(context, HStackCheck::kBackwardsBranch);
- AddInstruction(stack_check);
+ HStackCheck::cast(Add<HStackCheck>(HStackCheck::kBackwardsBranch));
ASSERT(loop_entry->IsLoopHeader());
loop_entry->loop_information()->set_stack_check(stack_check);
CHECK_BAILOUT(Visit(stmt->body()));
}
-void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
- bool osr_entry = PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry);
- set_current_block(loop_entry);
- if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
+ HBasicBlock* loop_entry = BuildLoopEntry(stmt);
BreakAndContinueInfo break_info(stmt);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
@@ -4443,17 +3737,12 @@ void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
}
-void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
- bool osr_entry = PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry);
- set_current_block(loop_entry);
- if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
-
+ HBasicBlock* loop_entry = BuildLoopEntry(stmt);
// If the condition is constant true, do not generate a branch.
HBasicBlock* loop_successor = NULL;
@@ -4487,7 +3776,7 @@ void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
}
-void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
+void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4495,11 +3784,7 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
CHECK_ALIVE(Visit(stmt->init()));
}
ASSERT(current_block() != NULL);
- bool osr_entry = PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry);
- set_current_block(loop_entry);
- if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
+ HBasicBlock* loop_entry = BuildLoopEntry(stmt);
HBasicBlock* loop_successor = NULL;
if (stmt->cond() != NULL) {
@@ -4539,22 +3824,22 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
}
-void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (!FLAG_optimize_for_in) {
- return Bailout("ForInStatement optimization is disabled");
+ return Bailout(kForInStatementOptimizationIsDisabled);
}
- if (!oracle()->IsForInFastCase(stmt)) {
- return Bailout("ForInStatement is not fast case");
+ if (stmt->for_in_type() != ForInStatement::FAST_FOR_IN) {
+ return Bailout(kForInStatementIsNotFastCase);
}
if (!stmt->each()->IsVariableProxy() ||
!stmt->each()->AsVariableProxy()->var()->IsStackLocal()) {
- return Bailout("ForInStatement with non-local each variable");
+ return Bailout(kForInStatementWithNonLocalEachVariable);
}
Variable* each_var = stmt->each()->AsVariableProxy()->var();
@@ -4562,71 +3847,59 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
CHECK_ALIVE(VisitForValue(stmt->enumerable()));
HValue* enumerable = Top(); // Leave enumerable at the top.
- HInstruction* map = AddInstruction(new(zone()) HForInPrepareMap(
- environment()->LookupContext(), enumerable));
- AddSimulate(stmt->PrepareId());
+ HInstruction* map = Add<HForInPrepareMap>(enumerable);
+ Add<HSimulate>(stmt->PrepareId());
- HInstruction* array = AddInstruction(
- new(zone()) HForInCacheArray(
- enumerable,
- map,
- DescriptorArray::kEnumCacheBridgeCacheIndex));
+ HInstruction* array = Add<HForInCacheArray>(
+ enumerable, map, DescriptorArray::kEnumCacheBridgeCacheIndex);
- HInstruction* enum_length = AddInstruction(new(zone()) HMapEnumLength(map));
+ HInstruction* enum_length = Add<HMapEnumLength>(map);
- HInstruction* start_index = AddInstruction(new(zone()) HConstant(
- Handle<Object>(Smi::FromInt(0)), Representation::Integer32()));
+ HInstruction* start_index = Add<HConstant>(0);
Push(map);
Push(array);
Push(enum_length);
Push(start_index);
- HInstruction* index_cache = AddInstruction(
- new(zone()) HForInCacheArray(
- enumerable,
- map,
- DescriptorArray::kEnumCacheBridgeIndicesCacheIndex));
+ HInstruction* index_cache = Add<HForInCacheArray>(
+ enumerable, map, DescriptorArray::kEnumCacheBridgeIndicesCacheIndex);
HForInCacheArray::cast(array)->set_index_cache(
HForInCacheArray::cast(index_cache));
- bool osr_entry = PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry);
- set_current_block(loop_entry);
- if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
+ HBasicBlock* loop_entry = BuildLoopEntry(stmt);
HValue* index = environment()->ExpressionStackAt(0);
HValue* limit = environment()->ExpressionStackAt(1);
// Check that we still have more keys.
- HCompareIDAndBranch* compare_index =
- new(zone()) HCompareIDAndBranch(index, limit, Token::LT);
- compare_index->SetInputRepresentation(Representation::Integer32());
+ HCompareNumericAndBranch* compare_index =
+ New<HCompareNumericAndBranch>(index, limit, Token::LT);
+ compare_index->set_observed_input_representation(
+ Representation::Smi(), Representation::Smi());
HBasicBlock* loop_body = graph()->CreateBasicBlock();
HBasicBlock* loop_successor = graph()->CreateBasicBlock();
compare_index->SetSuccessorAt(0, loop_body);
compare_index->SetSuccessorAt(1, loop_successor);
- current_block()->Finish(compare_index);
+ FinishCurrentBlock(compare_index);
set_current_block(loop_successor);
Drop(5);
set_current_block(loop_body);
- HValue* key = AddInstruction(
- new(zone()) HLoadKeyedFastElement(
- environment()->ExpressionStackAt(2), // Enum cache.
- environment()->ExpressionStackAt(0), // Iteration index.
- environment()->ExpressionStackAt(0)));
+ HValue* key = Add<HLoadKeyed>(
+ environment()->ExpressionStackAt(2), // Enum cache.
+ environment()->ExpressionStackAt(0), // Iteration index.
+ environment()->ExpressionStackAt(0),
+ FAST_ELEMENTS);
// Check if the expected map still matches that of the enumerable.
// If not just deoptimize.
- AddInstruction(new(zone()) HCheckMapValue(
- environment()->ExpressionStackAt(4),
- environment()->ExpressionStackAt(3)));
+ Add<HCheckMapValue>(environment()->ExpressionStackAt(4),
+ environment()->ExpressionStackAt(3));
Bind(each_var, key);
@@ -4640,11 +3913,7 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
set_current_block(body_exit);
HValue* current_index = Pop();
- HInstruction* new_index = new(zone()) HAdd(environment()->LookupContext(),
- current_index,
- graph()->GetConstant1());
- new_index->AssumeRepresentation(Representation::Integer32());
- PushAndAdd(new_index);
+ Push(Add<HAdd>(current_index, graph()->GetConstant1()));
body_exit = current_block();
}
@@ -4658,35 +3927,48 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
}
-void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+void HOptimizedGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ return Bailout(kForOfStatement);
+}
+
+
+void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout("TryCatchStatement");
+ return Bailout(kTryCatchStatement);
}
-void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+void HOptimizedGraphBuilder::VisitTryFinallyStatement(
+ TryFinallyStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout("TryFinallyStatement");
+ return Bailout(kTryFinallyStatement);
}
-void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout("DebuggerStatement");
+ return Bailout(kDebuggerStatement);
+}
+
+
+void HOptimizedGraphBuilder::VisitCaseClause(CaseClause* clause) {
+ UNREACHABLE();
}
static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
Code* unoptimized_code, FunctionLiteral* expr) {
int start_position = expr->start_position();
- RelocIterator it(unoptimized_code);
- for (;!it.done(); it.next()) {
+ for (RelocIterator it(unoptimized_code); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
Object* obj = rinfo->target_object();
@@ -4702,35 +3984,33 @@ static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
}
-void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Handle<SharedFunctionInfo> shared_info =
- SearchSharedFunctionInfo(info()->shared_info()->code(),
- expr);
+ SearchSharedFunctionInfo(current_info()->shared_info()->code(), expr);
if (shared_info.is_null()) {
- shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
+ shared_info = Compiler::BuildFunctionInfo(expr, current_info()->script());
}
// We also have a stack overflow if the recursive compilation did.
if (HasStackOverflow()) return;
- HValue* context = environment()->LookupContext();
HFunctionLiteral* instr =
- new(zone()) HFunctionLiteral(context, shared_info, expr->pretenure());
+ New<HFunctionLiteral>(shared_info, expr->pretenure());
return ast_context()->ReturnInstruction(instr, expr->id());
}
-void HGraphBuilder::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
+void HOptimizedGraphBuilder::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout("SharedFunctionInfoLiteral");
+ return Bailout(kNativeFunctionLiteral);
}
-void HGraphBuilder::VisitConditional(Conditional* expr) {
+void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4768,12 +4048,13 @@ void HGraphBuilder::VisitConditional(Conditional* expr) {
}
-HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
- Variable* var, LookupResult* lookup, bool is_store) {
- if (var->is_this() || !info()->has_global_object()) {
+HOptimizedGraphBuilder::GlobalPropertyAccess
+ HOptimizedGraphBuilder::LookupGlobalProperty(
+ Variable* var, LookupResult* lookup, bool is_store) {
+ if (var->is_this() || !current_info()->has_global_object()) {
return kUseGeneric;
}
- Handle<GlobalObject> global(info()->global_object());
+ Handle<GlobalObject> global(current_info()->global_object());
global->Lookup(*var->name(), lookup);
if (!lookup->IsNormal() ||
(is_store && lookup->IsReadOnly()) ||
@@ -4785,20 +4066,18 @@ HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
}
-HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
+HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
ASSERT(var->IsContextSlot());
- HValue* context = environment()->LookupContext();
- int length = info()->scope()->ContextChainLength(var->scope());
+ HValue* context = environment()->context();
+ int length = current_info()->scope()->ContextChainLength(var->scope());
while (length-- > 0) {
- HInstruction* context_instruction = new(zone()) HOuterContext(context);
- AddInstruction(context_instruction);
- context = context_instruction;
+ context = Add<HOuterContext>(context);
}
return context;
}
-void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4807,15 +4086,14 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
case Variable::UNALLOCATED: {
if (IsLexicalVariableMode(variable->mode())) {
// TODO(rossberg): should this be an ASSERT?
- return Bailout("reference to global lexical variable");
+ return Bailout(kReferenceToGlobalLexicalVariable);
}
// Handle known global constants like 'undefined' specially to avoid a
// load from a global cell for them.
Handle<Object> constant_value =
isolate()->factory()->GlobalConstantFor(variable->name());
if (!constant_value.is_null()) {
- HConstant* instr =
- new(zone()) HConstant(constant_value, Representation::Tagged());
+ HConstant* instr = New<HConstant>(constant_value);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -4824,37 +4102,44 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
LookupGlobalProperty(variable, &lookup, false);
if (type == kUseCell &&
- info()->global_object()->IsAccessCheckNeeded()) {
+ current_info()->global_object()->IsAccessCheckNeeded()) {
type = kUseGeneric;
}
if (type == kUseCell) {
- Handle<GlobalObject> global(info()->global_object());
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
- HLoadGlobalCell* instr =
- new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
- return ast_context()->ReturnInstruction(instr, expr->id());
+ Handle<GlobalObject> global(current_info()->global_object());
+ Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
+ if (cell->type()->IsConstant()) {
+ cell->AddDependentCompilationInfo(top_info());
+ Handle<Object> constant_object = cell->type()->AsConstant();
+ if (constant_object->IsConsString()) {
+ constant_object =
+ FlattenGetString(Handle<String>::cast(constant_object));
+ }
+ HConstant* constant = New<HConstant>(constant_object);
+ return ast_context()->ReturnInstruction(constant, expr->id());
+ } else {
+ HLoadGlobalCell* instr =
+ New<HLoadGlobalCell>(cell, lookup.GetPropertyDetails());
+ return ast_context()->ReturnInstruction(instr, expr->id());
+ }
} else {
- HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- AddInstruction(global_object);
+ HGlobalObject* global_object = Add<HGlobalObject>();
HLoadGlobalGeneric* instr =
- new(zone()) HLoadGlobalGeneric(context,
- global_object,
- variable->name(),
- ast_context()->is_for_typeof());
- instr->set_position(expr->position());
+ New<HLoadGlobalGeneric>(global_object,
+ variable->name(),
+ ast_context()->is_for_typeof());
return ast_context()->ReturnInstruction(instr, expr->id());
}
}
case Variable::PARAMETER:
case Variable::LOCAL: {
- HValue* value = environment()->Lookup(variable);
+ HValue* value = LookupAndMakeLive(variable);
if (value == graph()->GetConstantHole()) {
ASSERT(IsDeclaredVariableMode(variable->mode()) &&
variable->mode() != VAR);
- return Bailout("reference to uninitialized variable");
+ return Bailout(kReferenceToUninitializedVariable);
}
return ast_context()->ReturnValue(value);
}
@@ -4866,45 +4151,48 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
}
case Variable::LOOKUP:
- return Bailout("reference to a variable which requires dynamic lookup");
+ return Bailout(kReferenceToAVariableWhichRequiresDynamicLookup);
}
}
-void HGraphBuilder::VisitLiteral(Literal* expr) {
+void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- HConstant* instr =
- new(zone()) HConstant(expr->handle(), Representation::Tagged());
+ HConstant* instr = New<HConstant>(expr->value());
return ast_context()->ReturnInstruction(instr, expr->id());
}
-void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Handle<JSFunction> closure = function_state()->compilation_info()->closure();
Handle<FixedArray> literals(closure->literals());
- HValue* context = environment()->LookupContext();
-
- HRegExpLiteral* instr = new(zone()) HRegExpLiteral(context,
- literals,
- expr->pattern(),
- expr->flags(),
- expr->literal_index());
+ HRegExpLiteral* instr = New<HRegExpLiteral>(literals,
+ expr->pattern(),
+ expr->flags(),
+ expr->literal_index());
return ast_context()->ReturnInstruction(instr, expr->id());
}
+static bool CanInlinePropertyAccess(Map* type) {
+ return type->IsJSObjectMap() &&
+ !type->is_dictionary_map() &&
+ !type->has_named_interceptor();
+}
+
+
static void LookupInPrototypes(Handle<Map> map,
Handle<String> name,
LookupResult* lookup) {
while (map->prototype()->IsJSObject()) {
Handle<JSObject> holder(JSObject::cast(map->prototype()));
- if (!holder->HasFastProperties()) break;
map = Handle<Map>(holder->map());
+ if (!CanInlinePropertyAccess(*map)) break;
map->LookupDescriptor(*holder, *name, lookup);
if (lookup->IsFound()) return;
}
@@ -4920,12 +4208,13 @@ static bool LookupAccessorPair(Handle<Map> map,
Handle<String> name,
Handle<AccessorPair>* accessors,
Handle<JSObject>* holder) {
- LookupResult lookup(map->GetIsolate());
+ Isolate* isolate = map->GetIsolate();
+ LookupResult lookup(isolate);
// Check for a JavaScript accessor directly in the map.
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsPropertyCallbacks()) {
- Handle<Object> callback(lookup.GetValueFromMap(*map));
+ Handle<Object> callback(lookup.GetValueFromMap(*map), isolate);
if (!callback->IsAccessorPair()) return false;
*accessors = Handle<AccessorPair>::cast(callback);
*holder = Handle<JSObject>();
@@ -4938,7 +4227,7 @@ static bool LookupAccessorPair(Handle<Map> map,
// Check for a JavaScript accessor somewhere in the proto chain.
LookupInPrototypes(map, name, &lookup);
if (lookup.IsPropertyCallbacks()) {
- Handle<Object> callback(lookup.GetValue());
+ Handle<Object> callback(lookup.GetValue(), isolate);
if (!callback->IsAccessorPair()) return false;
*accessors = Handle<AccessorPair>::cast(callback);
*holder = Handle<JSObject>(lookup.holder());
@@ -4950,20 +4239,6 @@ static bool LookupAccessorPair(Handle<Map> map,
}
-static bool LookupGetter(Handle<Map> map,
- Handle<String> name,
- Handle<JSFunction>* getter,
- Handle<JSObject>* holder) {
- Handle<AccessorPair> accessors;
- if (LookupAccessorPair(map, name, &accessors, holder) &&
- accessors->getter()->IsJSFunction()) {
- *getter = Handle<JSFunction>(JSFunction::cast(accessors->getter()));
- return true;
- }
- return false;
-}
-
-
static bool LookupSetter(Handle<Map> map,
Handle<String> name,
Handle<JSFunction>* setter,
@@ -4971,7 +4246,11 @@ static bool LookupSetter(Handle<Map> map,
Handle<AccessorPair> accessors;
if (LookupAccessorPair(map, name, &accessors, holder) &&
accessors->setter()->IsJSFunction()) {
- *setter = Handle<JSFunction>(JSFunction::cast(accessors->setter()));
+ Handle<JSFunction> func(JSFunction::cast(accessors->setter()));
+ CallOptimization call_optimization(func);
+ // TODO(dcarney): temporary hack unless crankshaft can handle api calls.
+ if (call_optimization.is_simple_api_call()) return false;
+ *setter = func;
return true;
}
return false;
@@ -4983,34 +4262,35 @@ static bool LookupSetter(Handle<Map> map,
// size of all objects that are part of the graph.
static bool IsFastLiteral(Handle<JSObject> boilerplate,
int max_depth,
- int* max_properties,
- int* total_size) {
+ int* max_properties) {
+ if (boilerplate->map()->is_deprecated()) {
+ Handle<Object> result = JSObject::TryMigrateInstance(boilerplate);
+ if (result.is_null()) return false;
+ }
+
ASSERT(max_depth >= 0 && *max_properties >= 0);
if (max_depth == 0) return false;
+ Isolate* isolate = boilerplate->GetIsolate();
Handle<FixedArrayBase> elements(boilerplate->elements());
if (elements->length() > 0 &&
- elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) {
- if (boilerplate->HasFastDoubleElements()) {
- *total_size += FixedDoubleArray::SizeFor(elements->length());
- } else if (boilerplate->HasFastObjectElements()) {
+ elements->map() != isolate->heap()->fixed_cow_array_map()) {
+ if (boilerplate->HasFastObjectElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length();
for (int i = 0; i < length; i++) {
if ((*max_properties)-- == 0) return false;
- Handle<Object> value(fast_elements->get(i));
+ Handle<Object> value(fast_elements->get(i), isolate);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
max_depth - 1,
- max_properties,
- total_size)) {
+ max_properties)) {
return false;
}
}
}
- *total_size += FixedArray::SizeFor(length);
- } else {
+ } else if (!boilerplate->HasFastDoubleElements()) {
return false;
}
}
@@ -5019,62 +4299,82 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
if (properties->length() > 0) {
return false;
} else {
- int nof = boilerplate->map()->inobject_properties();
- for (int i = 0; i < nof; i++) {
+ Handle<DescriptorArray> descriptors(
+ boilerplate->map()->instance_descriptors());
+ int limit = boilerplate->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ int index = descriptors->GetFieldIndex(i);
if ((*max_properties)-- == 0) return false;
- Handle<Object> value(boilerplate->InObjectPropertyAt(i));
+ Handle<Object> value(boilerplate->InObjectPropertyAt(index), isolate);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
max_depth - 1,
- max_properties,
- total_size)) {
+ max_properties)) {
return false;
}
}
}
}
-
- *total_size += boilerplate->map()->instance_size();
return true;
}
-void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Handle<JSFunction> closure = function_state()->compilation_info()->closure();
- HValue* context = environment()->LookupContext();
HInstruction* literal;
// Check whether to use fast or slow deep-copying for boilerplate.
- int total_size = 0;
- int max_properties = HFastLiteral::kMaxLiteralProperties;
- Handle<Object> boilerplate(closure->literals()->get(expr->literal_index()));
- if (boilerplate->IsJSObject() &&
- IsFastLiteral(Handle<JSObject>::cast(boilerplate),
- HFastLiteral::kMaxLiteralDepth,
- &max_properties,
- &total_size)) {
- Handle<JSObject> boilerplate_object = Handle<JSObject>::cast(boilerplate);
- literal = new(zone()) HFastLiteral(context,
- boilerplate_object,
- total_size,
- expr->literal_index(),
- expr->depth());
+ int max_properties = kMaxFastLiteralProperties;
+ Handle<Object> literals_cell(closure->literals()->get(expr->literal_index()),
+ isolate());
+ Handle<AllocationSite> site;
+ Handle<JSObject> boilerplate;
+ if (!literals_cell->IsUndefined()) {
+ // Retrieve the boilerplate
+ site = Handle<AllocationSite>::cast(literals_cell);
+ boilerplate = Handle<JSObject>(JSObject::cast(site->transition_info()),
+ isolate());
+ }
+
+ if (!boilerplate.is_null() &&
+ IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) {
+ AllocationSiteUsageContext usage_context(isolate(), site, false);
+ usage_context.EnterNewScope();
+ literal = BuildFastLiteral(boilerplate, &usage_context);
+ usage_context.ExitScope(site, boilerplate);
} else {
- literal = new(zone()) HObjectLiteral(context,
- expr->constant_properties(),
- expr->fast_elements(),
- expr->literal_index(),
- expr->depth(),
- expr->has_function());
+ NoObservableSideEffectsScope no_effects(this);
+ Handle<FixedArray> closure_literals(closure->literals(), isolate());
+ Handle<FixedArray> constant_properties = expr->constant_properties();
+ int literal_index = expr->literal_index();
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags;
+
+ Add<HPushArgument>(Add<HConstant>(closure_literals));
+ Add<HPushArgument>(Add<HConstant>(literal_index));
+ Add<HPushArgument>(Add<HConstant>(constant_properties));
+ Add<HPushArgument>(Add<HConstant>(flags));
+
+ // TODO(mvstanton): Add a flag to turn off creation of any
+ // AllocationMementos for this call: we are in crankshaft and should have
+ // learned enough about transition behavior to stop emitting mementos.
+ Runtime::FunctionId function_id = Runtime::kCreateObjectLiteral;
+ literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(function_id),
+ 4);
}
// The object is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
- PushAndAdd(literal);
+ Push(literal);
expr->CalculateEmitStore(zone());
@@ -5090,9 +4390,8 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
+ if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
- property->RecordTypeFeedback(oracle());
CHECK_ALIVE(VisitForValue(value));
HValue* value = Pop();
Handle<Map> map = property->GetReceiverType();
@@ -5113,7 +4412,9 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
map));
}
AddInstruction(store);
- if (store->HasObservableSideEffects()) AddSimulate(key->id());
+ if (store->HasObservableSideEffects()) {
+ Add<HSimulate>(key->id(), REMOVABLE_SIMULATE);
+ }
} else {
CHECK_ALIVE(VisitForEffect(value));
}
@@ -5123,7 +4424,7 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::PROTOTYPE:
case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
- return Bailout("Object literal with complex property");
+ return Bailout(kObjectLiteralWithComplexProperty);
default: UNREACHABLE();
}
}
@@ -5134,8 +4435,7 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// of the object. This makes sure that the original object won't
// be used by other optimized code before it is transformed
// (e.g. because of code motion).
- HToFastProperties* result = new(zone()) HToFastProperties(Pop());
- AddInstruction(result);
+ HToFastProperties* result = Add<HToFastProperties>(Pop());
return ast_context()->ReturnValue(result);
} else {
return ast_context()->ReturnValue(Pop());
@@ -5143,60 +4443,95 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
-void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
- HValue* context = environment()->LookupContext();
HInstruction* literal;
- Handle<FixedArray> literals(environment()->closure()->literals());
- Handle<Object> raw_boilerplate(literals->get(expr->literal_index()));
-
- if (raw_boilerplate->IsUndefined()) {
- raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
+ Handle<AllocationSite> site;
+ Handle<FixedArray> literals(environment()->closure()->literals(), isolate());
+ bool uninitialized = false;
+ Handle<Object> literals_cell(literals->get(expr->literal_index()),
+ isolate());
+ Handle<JSObject> boilerplate_object;
+ if (literals_cell->IsUndefined()) {
+ uninitialized = true;
+ Handle<Object> raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
isolate(), literals, expr->constant_elements());
if (raw_boilerplate.is_null()) {
- return Bailout("array boilerplate creation failed");
+ return Bailout(kArrayBoilerplateCreationFailed);
+ }
+
+ boilerplate_object = Handle<JSObject>::cast(raw_boilerplate);
+ AllocationSiteCreationContext creation_context(isolate());
+ site = creation_context.EnterNewScope();
+ if (JSObject::DeepWalk(boilerplate_object, &creation_context).is_null()) {
+ return Bailout(kArrayBoilerplateCreationFailed);
}
- literals->set(expr->literal_index(), *raw_boilerplate);
- if (JSObject::cast(*raw_boilerplate)->elements()->map() ==
+ creation_context.ExitScope(site, boilerplate_object);
+ literals->set(expr->literal_index(), *site);
+
+ if (boilerplate_object->elements()->map() ==
isolate()->heap()->fixed_cow_array_map()) {
isolate()->counters()->cow_arrays_created_runtime()->Increment();
}
+ } else {
+ ASSERT(literals_cell->IsAllocationSite());
+ site = Handle<AllocationSite>::cast(literals_cell);
+ boilerplate_object = Handle<JSObject>(
+ JSObject::cast(site->transition_info()), isolate());
}
- Handle<JSObject> boilerplate = Handle<JSObject>::cast(raw_boilerplate);
+ ASSERT(!boilerplate_object.is_null());
+ ASSERT(site->SitePointsToLiteral());
+
ElementsKind boilerplate_elements_kind =
- Handle<JSObject>::cast(boilerplate)->GetElementsKind();
+ boilerplate_object->GetElementsKind();
// Check whether to use fast or slow deep-copying for boilerplate.
- int total_size = 0;
- int max_properties = HFastLiteral::kMaxLiteralProperties;
- if (IsFastLiteral(boilerplate,
- HFastLiteral::kMaxLiteralDepth,
- &max_properties,
- &total_size)) {
- literal = new(zone()) HFastLiteral(context,
- boilerplate,
- total_size,
- expr->literal_index(),
- expr->depth());
+ int max_properties = kMaxFastLiteralProperties;
+ if (IsFastLiteral(boilerplate_object,
+ kMaxFastLiteralDepth,
+ &max_properties)) {
+ AllocationSiteUsageContext usage_context(isolate(), site, false);
+ usage_context.EnterNewScope();
+ literal = BuildFastLiteral(boilerplate_object, &usage_context);
+ usage_context.ExitScope(site, boilerplate_object);
} else {
- literal = new(zone()) HArrayLiteral(context,
- boilerplate,
- length,
- expr->literal_index(),
- expr->depth());
+ NoObservableSideEffectsScope no_effects(this);
+ // Boilerplate already exists and constant elements are never accessed,
+ // pass an empty fixed array to the runtime function instead.
+ Handle<FixedArray> constants = isolate()->factory()->empty_fixed_array();
+ int literal_index = expr->literal_index();
+
+ Add<HPushArgument>(Add<HConstant>(literals));
+ Add<HPushArgument>(Add<HConstant>(literal_index));
+ Add<HPushArgument>(Add<HConstant>(constants));
+
+ // TODO(mvstanton): Consider a flag to turn off creation of any
+ // AllocationMementos for this call: we are in crankshaft and should have
+ // learned enough about transition behavior to stop emitting mementos.
+ Runtime::FunctionId function_id = (expr->depth() > 1)
+ ? Runtime::kCreateArrayLiteral : Runtime::kCreateArrayLiteralShallow;
+ literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(function_id),
+ 3);
+
+ // De-opt if elements kind changed from boilerplate_elements_kind.
+ Handle<Map> map = Handle<Map>(boilerplate_object->map(), isolate());
+ literal = Add<HCheckMaps>(literal, map, top_info());
}
// The array is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
- PushAndAdd(literal);
+ Push(literal);
+ // The literal index is on the stack, too.
+ Push(Add<HConstant>(expr->literal_index()));
- HLoadElements* elements = NULL;
+ HInstruction* elements = NULL;
for (int i = 0; i < length; i++) {
Expression* subexpr = subexprs->at(i);
@@ -5206,93 +4541,51 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
CHECK_ALIVE(VisitForValue(subexpr));
HValue* value = Pop();
- if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
+ if (!Smi::IsValid(i)) return Bailout(kNonSmiKeyInArrayLiteral);
- // Pass in literal as dummy depedency, since the receiver always has
- // elements.
- elements = new(zone()) HLoadElements(literal, literal);
- AddInstruction(elements);
+ elements = AddLoadElements(literal);
- HValue* key = AddInstruction(
- new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
- Representation::Integer32()));
+ HValue* key = Add<HConstant>(i);
switch (boilerplate_elements_kind) {
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
- // Smi-only arrays need a smi check.
- AddInstruction(new(zone()) HCheckSmi(value));
- // Fall through.
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
- AddInstruction(new(zone()) HStoreKeyedFastElement(
- elements,
- key,
- value,
- boilerplate_elements_kind));
- break;
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements,
- key,
- value));
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ HStoreKeyed* instr = Add<HStoreKeyed>(elements, key, value,
+ boilerplate_elements_kind);
+ instr->SetUninitialized(uninitialized);
break;
+ }
default:
UNREACHABLE();
break;
}
- AddSimulate(expr->GetIdForElement(i));
+ Add<HSimulate>(expr->GetIdForElement(i));
}
- return ast_context()->ReturnValue(Pop());
-}
-
-
-// Sets the lookup result and returns true if the load/store can be inlined.
-static bool ComputeLoadStoreField(Handle<Map> type,
- Handle<String> name,
- LookupResult* lookup,
- bool is_store) {
- // If we directly find a field, the access can be inlined.
- type->LookupDescriptor(NULL, *name, lookup);
- if (lookup->IsField()) return true;
- // For a load, we are out of luck if there is no such field.
- if (!is_store) return false;
-
- // 2nd chance: A store into a non-existent field can still be inlined if we
- // have a matching transition and some room left in the object.
- type->LookupTransition(NULL, *name, lookup);
- return lookup->IsTransitionToField(*type) &&
- (type->unused_property_fields() > 0);
+ Drop(1); // array literal index
+ return ast_context()->ReturnValue(Pop());
}
-static int ComputeLoadStoreFieldIndex(Handle<Map> type,
- Handle<String> name,
- LookupResult* lookup) {
- ASSERT(lookup->IsField() || lookup->IsTransitionToField(*type));
- if (lookup->IsField()) {
- return lookup->GetLocalFieldIndexFromMap(*type);
- } else {
- Map* transition = lookup->GetTransitionMapFromMap(*type);
- return transition->PropertyIndexFor(*name) - type->inobject_properties();
- }
+HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
+ Handle<Map> map) {
+ BuildCheckHeapObject(object);
+ return Add<HCheckMaps>(object, map, top_info());
}
-HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map,
- LookupResult* lookup,
- bool smi_and_map_check) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
+ HValue* checked_object,
+ Handle<String> name,
+ HValue* value,
+ Handle<Map> map,
+ LookupResult* lookup) {
ASSERT(lookup->IsFound());
- if (smi_and_map_check) {
- AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
- }
-
// If the property does not exist yet, we have to check that it wasn't made
// readonly or turned into a setter by some meanwhile modifications on the
// prototype chain.
@@ -5304,37 +4597,64 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
if (proto_result.IsProperty()) {
// If the inherited property could induce readonly-ness, bail out.
if (proto_result.IsReadOnly() || !proto_result.IsCacheable()) {
- Bailout("improper object on prototype chain for store");
+ Bailout(kImproperObjectOnPrototypeChainForStore);
return NULL;
}
// We only need to check up to the preexisting property.
proto = proto_result.holder();
} else {
// Otherwise, find the top prototype.
- while (proto->GetPrototype()->IsJSObject()) proto = proto->GetPrototype();
- ASSERT(proto->GetPrototype()->IsNull());
+ while (proto->GetPrototype(isolate())->IsJSObject()) {
+ proto = proto->GetPrototype(isolate());
+ }
+ ASSERT(proto->GetPrototype(isolate())->IsNull());
}
ASSERT(proto->IsJSObject());
- AddInstruction(new(zone()) HCheckPrototypeMaps(
+ BuildCheckPrototypeMaps(
Handle<JSObject>(JSObject::cast(map->prototype())),
- Handle<JSObject>(JSObject::cast(proto))));
- }
-
- int index = ComputeLoadStoreFieldIndex(map, name, lookup);
- bool is_in_object = index < 0;
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- offset += map->instance_size();
+ Handle<JSObject>(JSObject::cast(proto)));
+ }
+
+ HObjectAccess field_access = HObjectAccess::ForField(map, lookup, name);
+ bool transition_to_field = lookup->IsTransitionToField(*map);
+
+ HStoreNamedField *instr;
+ if (FLAG_track_double_fields && field_access.representation().IsDouble()) {
+ HObjectAccess heap_number_access =
+ field_access.WithRepresentation(Representation::Tagged());
+ if (transition_to_field) {
+ // The store requires a mutable HeapNumber to be allocated.
+ NoObservableSideEffectsScope no_side_effects(this);
+ HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
+ HInstruction* heap_number = Add<HAllocate>(heap_number_size,
+ HType::HeapNumber(), isolate()->heap()->GetPretenureMode(),
+ HEAP_NUMBER_TYPE);
+ AddStoreMapConstant(heap_number, isolate()->factory()->heap_number_map());
+ Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
+ value);
+ instr = New<HStoreNamedField>(checked_object->ActualValue(),
+ heap_number_access,
+ heap_number);
+ } else {
+ // Already holds a HeapNumber; load the box and write its value field.
+ HInstruction* heap_number = Add<HLoadNamedField>(checked_object,
+ heap_number_access);
+ heap_number->set_type(HType::HeapNumber());
+ instr = New<HStoreNamedField>(heap_number,
+ HObjectAccess::ForHeapNumberValue(),
+ value);
+ }
} else {
- offset += FixedArray::kHeaderSize;
+ // This is a normal store.
+ instr = New<HStoreNamedField>(checked_object->ActualValue(),
+ field_access,
+ value);
}
- HStoreNamedField* instr =
- new(zone()) HStoreNamedField(object, name, value, is_in_object, offset);
- if (lookup->IsTransitionToField(*map)) {
+
+ if (transition_to_field) {
Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
- instr->set_transition(transition);
+ HConstant* transition_constant = Add<HConstant>(transition);
+ instr->SetTransition(transition_constant, top_info());
// TODO(fschneider): Record the new map type of the object in the IR to
// enable elimination of redundant checks after the transition store.
instr->SetGVNFlag(kChangesMaps);
@@ -5343,12 +4663,11 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
}
-HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
- Handle<String> name,
- HValue* value) {
- HValue* context = environment()->LookupContext();
- return new(zone()) HStoreNamedGeneric(
- context,
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
+ HValue* object,
+ Handle<String> name,
+ HValue* value) {
+ return New<HStoreNamedGeneric>(
object,
name,
value,
@@ -5356,27 +4675,38 @@ HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
}
-HInstruction* HGraphBuilder::BuildCallSetter(HValue* object,
- HValue* value,
- Handle<Map> map,
- Handle<JSFunction> setter,
- Handle<JSObject> holder) {
- AddCheckConstantFunction(holder, object, map, true);
- AddInstruction(new(zone()) HPushArgument(object));
- AddInstruction(new(zone()) HPushArgument(value));
- return new(zone()) HCallConstantFunction(setter, 2);
+// Sets the lookup result and returns true if the load/store can be inlined.
+static bool ComputeStoreField(Handle<Map> type,
+ Handle<String> name,
+ LookupResult* lookup,
+ bool lookup_transition = true) {
+ ASSERT(!type->is_observed());
+ if (!CanInlinePropertyAccess(*type)) {
+ lookup->NotFound();
+ return false;
+ }
+ // If we directly find a field, the access can be inlined.
+ type->LookupDescriptor(NULL, *name, lookup);
+ if (lookup->IsField()) return true;
+
+ if (!lookup_transition) return false;
+
+ type->LookupTransition(NULL, *name, lookup);
+ return lookup->IsTransitionToField(*type) &&
+ (type->unused_property_fields() > 0);
}
-HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ Handle<Map> map) {
// Handle a store to a known field.
LookupResult lookup(isolate());
- if (ComputeLoadStoreField(map, name, &lookup, true)) {
- // true = needs smi and map check.
- return BuildStoreNamedField(object, name, value, map, &lookup, true);
+ if (ComputeStoreField(map, name, &lookup)) {
+ HCheckMaps* checked_object = AddCheckMap(object, map);
+ return BuildStoreNamedField(checked_object, name, value, map, &lookup);
}
// No luck, do a generic store.
@@ -5384,66 +4714,319 @@ HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object,
}
-void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
- HValue* object,
- SmallMapList* types,
- Handle<String> name) {
+bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatibleForLoad(
+ PropertyAccessInfo* info) {
+ if (!CanInlinePropertyAccess(*map_)) return false;
+
+ if (!LookupDescriptor()) return false;
+
+ if (!lookup_.IsFound()) {
+ return (!info->lookup_.IsFound() || info->has_holder()) &&
+ map_->prototype() == info->map_->prototype();
+ }
+
+ // Mismatch if the other access info found the property in the prototype
+ // chain.
+ if (info->has_holder()) return false;
+
+ if (lookup_.IsPropertyCallbacks()) {
+ return accessor_.is_identical_to(info->accessor_);
+ }
+
+ if (lookup_.IsConstant()) {
+ return constant_.is_identical_to(info->constant_);
+ }
+
+ ASSERT(lookup_.IsField());
+ if (!info->lookup_.IsField()) return false;
+
+ Representation r = access_.representation();
+ if (!info->access_.representation().IsCompatibleForLoad(r)) return false;
+ if (info->access_.offset() != access_.offset()) return false;
+ if (info->access_.IsInobject() != access_.IsInobject()) return false;
+ info->GeneralizeRepresentation(r);
+ return true;
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupDescriptor() {
+ map_->LookupDescriptor(NULL, *name_, &lookup_);
+ return LoadResult(map_);
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
+ if (lookup_.IsField()) {
+ access_ = HObjectAccess::ForField(map, &lookup_, name_);
+ } else if (lookup_.IsPropertyCallbacks()) {
+ Handle<Object> callback(lookup_.GetValueFromMap(*map), isolate());
+ if (!callback->IsAccessorPair()) return false;
+ Object* getter = Handle<AccessorPair>::cast(callback)->getter();
+ if (!getter->IsJSFunction()) return false;
+ Handle<JSFunction> accessor = handle(JSFunction::cast(getter));
+ CallOptimization call_optimization(accessor);
+ // TODO(dcarney): temporary hack unless crankshaft can handle api calls.
+ if (call_optimization.is_simple_api_call()) return false;
+ accessor_ = accessor;
+ } else if (lookup_.IsConstant()) {
+ constant_ = handle(lookup_.GetConstantFromMap(*map), isolate());
+ }
+
+ return true;
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
+ Handle<Map> map = map_;
+ while (map->prototype()->IsJSObject()) {
+ holder_ = handle(JSObject::cast(map->prototype()));
+ if (holder_->map()->is_deprecated()) {
+ JSObject::TryMigrateInstance(holder_);
+ }
+ map = Handle<Map>(holder_->map());
+ if (!CanInlinePropertyAccess(*map)) {
+ lookup_.NotFound();
+ return false;
+ }
+ map->LookupDescriptor(*holder_, *name_, &lookup_);
+ if (lookup_.IsFound()) return LoadResult(map);
+ }
+ lookup_.NotFound();
+ return true;
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadMonomorphic() {
+ if (!CanInlinePropertyAccess(*map_)) return IsStringLength();
+ if (IsJSObjectFieldAccessor()) return true;
+ if (!LookupDescriptor()) return false;
+ if (lookup_.IsFound()) return true;
+ return LookupInPrototypes();
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadAsMonomorphic(
+ SmallMapList* types) {
+ ASSERT(map_.is_identical_to(types->first()));
+ if (!CanLoadMonomorphic()) return false;
+ if (types->length() > kMaxLoadPolymorphism) return false;
+
+ if (IsStringLength()) {
+ for (int i = 1; i < types->length(); ++i) {
+ if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+ }
+ return true;
+ }
+
+ if (IsArrayLength()) {
+ bool is_fast = IsFastElementsKind(map_->elements_kind());
+ for (int i = 1; i < types->length(); ++i) {
+ Handle<Map> test_map = types->at(i);
+ if (test_map->instance_type() != JS_ARRAY_TYPE) return false;
+ if (IsFastElementsKind(test_map->elements_kind()) != is_fast) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ if (IsJSObjectFieldAccessor()) {
+ InstanceType instance_type = map_->instance_type();
+ for (int i = 1; i < types->length(); ++i) {
+ if (types->at(i)->instance_type() != instance_type) return false;
+ }
+ return true;
+ }
+
+ for (int i = 1; i < types->length(); ++i) {
+ PropertyAccessInfo test_info(isolate(), types->at(i), name_);
+ if (!test_info.IsCompatibleForLoad(this)) return false;
+ }
+
+ return true;
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildLoadMonomorphic(
+ PropertyAccessInfo* info,
+ HValue* object,
+ HInstruction* checked_object,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool can_inline_accessor) {
+
+ HObjectAccess access = HObjectAccess::ForMap(); // bogus default
+ if (info->GetJSObjectFieldAccess(&access)) {
+ return New<HLoadNamedField>(checked_object, access);
+ }
+
+ HValue* checked_holder = checked_object;
+ if (info->has_holder()) {
+ Handle<JSObject> prototype(JSObject::cast(info->map()->prototype()));
+ checked_holder = BuildCheckPrototypeMaps(prototype, info->holder());
+ }
+
+ if (!info->lookup()->IsFound()) return graph()->GetConstantUndefined();
+
+ if (info->lookup()->IsField()) {
+ return BuildLoadNamedField(checked_holder, info->access());
+ }
+
+ if (info->lookup()->IsPropertyCallbacks()) {
+ Push(checked_object);
+ if (FLAG_inline_accessors &&
+ can_inline_accessor &&
+ TryInlineGetter(info->accessor(), ast_id, return_id)) {
+ return NULL;
+ }
+ Add<HPushArgument>(Pop());
+ return New<HCallConstantFunction>(info->accessor(), 1);
+ }
+
+ ASSERT(info->lookup()->IsConstant());
+ return New<HConstant>(info->constant());
+}
+
+
+void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
+ BailoutId ast_id,
+ BailoutId return_id,
+ HValue* object,
+ SmallMapList* types,
+ Handle<String> name) {
+ // Something did not match; must use a polymorphic load.
int count = 0;
- int previous_field_offset = 0;
- bool previous_field_is_in_object = false;
- bool is_monomorphic_field = true;
- Handle<Map> map;
- LookupResult lookup(isolate());
+ HBasicBlock* join = NULL;
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- map = types->at(i);
- if (ComputeLoadStoreField(map, name, &lookup, false)) {
- int index = ComputeLoadStoreFieldIndex(map, name, &lookup);
- bool is_in_object = index < 0;
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- offset += map->instance_size();
- } else {
- offset += FixedArray::kHeaderSize;
- }
+ PropertyAccessInfo info(isolate(), types->at(i), name);
+ if (info.CanLoadMonomorphic()) {
if (count == 0) {
- previous_field_offset = offset;
- previous_field_is_in_object = is_in_object;
- } else if (is_monomorphic_field) {
- is_monomorphic_field = (offset == previous_field_offset) &&
- (is_in_object == previous_field_is_in_object);
+ BuildCheckHeapObject(object);
+ join = graph()->CreateBasicBlock();
}
++count;
+ HBasicBlock* if_true = graph()->CreateBasicBlock();
+ HBasicBlock* if_false = graph()->CreateBasicBlock();
+ HCompareMap* compare = New<HCompareMap>(
+ object, info.map(), if_true, if_false);
+ FinishCurrentBlock(compare);
+
+ set_current_block(if_true);
+
+ HInstruction* load = BuildLoadMonomorphic(
+ &info, object, compare, ast_id, return_id, FLAG_polymorphic_inlining);
+ if (load == NULL) {
+ if (HasStackOverflow()) return;
+ } else {
+ if (!load->IsLinked()) {
+ AddInstruction(load);
+ }
+ if (!ast_context()->IsEffect()) Push(load);
+ }
+
+ if (current_block() != NULL) Goto(join);
+ set_current_block(if_false);
}
}
- // Use monomorphic load if property lookup results in the same field index
- // for all maps. Requires special map check on the set of all handled maps.
- AddInstruction(new(zone()) HCheckNonSmi(object));
- HInstruction* instr;
- if (count == types->length() && is_monomorphic_field) {
- AddInstruction(new(zone()) HCheckMaps(object, types, zone()));
- instr = BuildLoadNamedField(object, map, &lookup, false);
+ // Finish up. Unconditionally deoptimize if we've handled all the maps we
+ // know about and do not want to handle ones we've never seen. Otherwise
+ // use a generic IC.
+ if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
+ // Because the deopt may be the only path in the polymorphic load, make sure
+ // that the environment stack matches the depth on deopt that it otherwise
+ // would have had after a successful load.
+ if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic load", join);
} else {
- HValue* context = environment()->LookupContext();
- instr = new(zone()) HLoadNamedFieldPolymorphic(context,
- object,
- types,
- name,
- zone());
+ HInstruction* load = Add<HLoadNamedGeneric>(object, name);
+ if (!ast_context()->IsEffect()) Push(load);
+
+ if (join != NULL) {
+ Goto(join);
+ } else {
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ return;
+ }
}
- instr->set_position(expr->position());
- return ast_context()->ReturnInstruction(instr, expr->id());
+ ASSERT(join != NULL);
+ join->SetJoinId(ast_id);
+ set_current_block(join);
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+}
+
+
+bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
+ BailoutId assignment_id,
+ HValue* object,
+ HValue* value,
+ SmallMapList* types,
+ Handle<String> name) {
+ // Use monomorphic store if property lookup results in the same field index
+ // for all maps. Requires special map check on the set of all handled maps.
+ if (types->length() > kMaxStorePolymorphism) return false;
+
+ LookupResult lookup(isolate());
+ int count;
+ Representation representation = Representation::None();
+ HObjectAccess access = HObjectAccess::ForMap(); // initial value unused.
+ for (count = 0; count < types->length(); ++count) {
+ Handle<Map> map = types->at(count);
+ // Pass false to ignore transitions.
+ if (!ComputeStoreField(map, name, &lookup, false)) break;
+ ASSERT(!map->is_observed());
+
+ HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name);
+ Representation new_representation = new_access.representation();
+
+ if (count == 0) {
+ // First time through the loop; set access and representation.
+ access = new_access;
+ representation = new_representation;
+ } else if (!representation.IsCompatibleForStore(new_representation)) {
+ // Representations did not match.
+ break;
+ } else if (access.offset() != new_access.offset()) {
+ // Offsets did not match.
+ break;
+ } else if (access.IsInobject() != new_access.IsInobject()) {
+ // In-objectness did not match.
+ break;
+ }
+ }
+
+ if (count != types->length()) return false;
+
+ // Everything matched; can use monomorphic store.
+ BuildCheckHeapObject(object);
+ HCheckMaps* checked_object = Add<HCheckMaps>(object, types);
+ HInstruction* store;
+ CHECK_ALIVE_OR_RETURN(
+ store = BuildStoreNamedField(
+ checked_object, name, value, types->at(count - 1), &lookup),
+ true);
+ if (!ast_context()->IsEffect()) Push(value);
+ AddInstruction(store);
+ Add<HSimulate>(assignment_id);
+ if (!ast_context()->IsEffect()) Drop(1);
+ ast_context()->ReturnValue(value);
+ return true;
}
-void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
+ BailoutId assignment_id,
+ HValue* object,
+ HValue* value,
+ SmallMapList* types,
+ Handle<String> name) {
+ if (TryStorePolymorphicAsMonomorphic(
+ assignment_id, object, value, types, name)) {
+ return;
+ }
+
// TODO(ager): We should recognize when the prototype chains for different
// maps are identical. In that case we can avoid repeatedly generating the
// same prototype map checks.
@@ -5452,27 +5035,25 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) {
Handle<Map> map = types->at(i);
LookupResult lookup(isolate());
- if (ComputeLoadStoreField(map, name, &lookup, true)) {
+ if (ComputeStoreField(map, name, &lookup)) {
if (count == 0) {
- AddInstruction(new(zone()) HCheckNonSmi(object)); // Only needed once.
+ BuildCheckHeapObject(object);
join = graph()->CreateBasicBlock();
}
++count;
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare =
- new(zone()) HCompareMap(object, map, if_true, if_false);
- current_block()->Finish(compare);
+ HCompareMap* compare = New<HCompareMap>(object, map, if_true, if_false);
+ FinishCurrentBlock(compare);
set_current_block(if_true);
HInstruction* instr;
- CHECK_ALIVE(instr =
- BuildStoreNamedField(object, name, value, map, &lookup, false));
- instr->set_position(expr->position());
+ CHECK_ALIVE(instr = BuildStoreNamedField(
+ compare, name, value, map, &lookup));
// Goto will add the HSimulate for the store.
AddInstruction(instr);
if (!ast_context()->IsEffect()) Push(value);
- current_block()->Goto(join);
+ Goto(join);
set_current_block(if_false);
}
@@ -5482,25 +5063,26 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic store", join);
} else {
HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
- instr->set_position(expr->position());
AddInstruction(instr);
if (join != NULL) {
- if (!ast_context()->IsEffect()) Push(value);
- current_block()->Goto(join);
+ if (!ast_context()->IsEffect()) {
+ Push(value);
+ }
+ Goto(join);
} else {
// The HSimulate for the store should not see the stored value in
// effect contexts (it is not materialized at expr->id() in the
// unoptimized code).
if (instr->HasObservableSideEffects()) {
if (ast_context()->IsEffect()) {
- AddSimulate(expr->id());
+ Add<HSimulate>(assignment_id, REMOVABLE_SIMULATE);
} else {
Push(value);
- AddSimulate(expr->id());
+ Add<HSimulate>(assignment_id, REMOVABLE_SIMULATE);
Drop(1);
}
}
@@ -5509,126 +5091,163 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
}
ASSERT(join != NULL);
- join->SetJoinId(expr->id());
+ join->SetJoinId(assignment_id);
set_current_block(join);
- if (!ast_context()->IsEffect()) return ast_context()->ReturnValue(Pop());
+ if (!ast_context()->IsEffect()) {
+ ast_context()->ReturnValue(Pop());
+ }
}
-void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- expr->RecordTypeFeedback(oracle(), zone());
- CHECK_ALIVE(VisitForValue(prop->obj()));
+static bool ComputeReceiverTypes(Expression* expr,
+ HValue* receiver,
+ SmallMapList** t) {
+ SmallMapList* types = expr->GetReceiverTypes();
+ *t = types;
+ bool monomorphic = expr->IsMonomorphic();
+ if (types != NULL && receiver->HasMonomorphicJSObjectType()) {
+ Map* root_map = receiver->GetMonomorphicJSObjectMap()->FindRootMap();
+ types->FilterForPossibleTransitions(root_map);
+ monomorphic = types->length() == 1;
+ }
+ return monomorphic && CanInlinePropertyAccess(*types->first());
+}
- if (prop->key()->IsPropertyName()) {
- // Named store.
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* value = environment()->ExpressionStackAt(0);
- HValue* object = environment()->ExpressionStackAt(1);
- Literal* key = prop->key()->AsLiteral();
- Handle<String> name = Handle<String>::cast(key->handle());
- ASSERT(!name.is_null());
+void HOptimizedGraphBuilder::BuildStore(Expression* expr,
+ Property* prop,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool is_uninitialized) {
+ HValue* value = environment()->ExpressionStackAt(0);
- HInstruction* instr = NULL;
- SmallMapList* types = expr->GetReceiverTypes();
- bool monomorphic = expr->IsMonomorphic();
- Handle<Map> map;
- if (monomorphic) {
- map = types->first();
- if (map->is_dictionary_map()) monomorphic = false;
- }
- if (monomorphic) {
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- if (LookupSetter(map, name, &setter, &holder)) {
- AddCheckConstantFunction(holder, object, map, true);
- if (FLAG_inline_accessors && TryInlineSetter(setter, expr, value)) {
- return;
- }
- Drop(2);
- AddInstruction(new(zone()) HPushArgument(object));
- AddInstruction(new(zone()) HPushArgument(value));
- instr = new(zone()) HCallConstantFunction(setter, 2);
- } else {
- Drop(2);
- CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
- name,
- value,
- map));
- }
+ if (!prop->key()->IsPropertyName()) {
+ // Keyed store.
+ HValue* key = environment()->ExpressionStackAt(1);
+ HValue* object = environment()->ExpressionStackAt(2);
+ bool has_side_effects = false;
+ HandleKeyedElementAccess(object, key, value, expr,
+ true, // is_store
+ &has_side_effects);
+ Drop(3);
+ Push(value);
+ Add<HSimulate>(return_id, REMOVABLE_SIMULATE);
+ return ast_context()->ReturnValue(Pop());
+ }
- } else if (types != NULL && types->length() > 1) {
+ // Named store.
+ HValue* object = environment()->ExpressionStackAt(1);
+
+ if (is_uninitialized) {
+ Add<HDeoptimize>("Insufficient type feedback for property assignment",
+ Deoptimizer::SOFT);
+ }
+
+ Literal* key = prop->key()->AsLiteral();
+ Handle<String> name = Handle<String>::cast(key->value());
+ ASSERT(!name.is_null());
+
+ HInstruction* instr = NULL;
+
+ SmallMapList* types;
+ bool monomorphic = ComputeReceiverTypes(expr, object, &types);
+
+ if (monomorphic) {
+ Handle<Map> map = types->first();
+ Handle<JSFunction> setter;
+ Handle<JSObject> holder;
+ if (LookupSetter(map, name, &setter, &holder)) {
+ AddCheckConstantFunction(holder, object, map);
+ if (FLAG_inline_accessors &&
+ TryInlineSetter(setter, ast_id, return_id, value)) {
+ return;
+ }
Drop(2);
- return HandlePolymorphicStoreNamedField(expr, object, value, types, name);
+ Add<HPushArgument>(object);
+ Add<HPushArgument>(value);
+ instr = New<HCallConstantFunction>(setter, 2);
} else {
Drop(2);
- instr = BuildStoreNamedGeneric(object, name, value);
- }
+ CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
+ name,
+ value,
+ map));
+ }
+ } else if (types != NULL && types->length() > 1) {
+ Drop(2);
+ return HandlePolymorphicStoreNamedField(ast_id, object, value, types, name);
+ } else {
+ Drop(2);
+ instr = BuildStoreNamedGeneric(object, name, value);
+ }
- Push(value);
- instr->set_position(expr->position());
- AddInstruction(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
- return ast_context()->ReturnValue(Pop());
+ if (!ast_context()->IsEffect()) Push(value);
+ AddInstruction(instr);
+ if (instr->HasObservableSideEffects()) {
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+ }
+ if (!ast_context()->IsEffect()) Drop(1);
+ return ast_context()->ReturnValue(value);
+}
- } else {
- // Keyed store.
+
+void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ if (!prop->key()->IsPropertyName()) {
CHECK_ALIVE(VisitForValue(prop->key()));
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* value = Pop();
- HValue* key = Pop();
- HValue* object = Pop();
- bool has_side_effects = false;
- HandleKeyedElementAccess(object, key, value, expr, expr->AssignmentId(),
- expr->position(),
- true, // is_store
- &has_side_effects);
- Push(value);
- ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId());
- return ast_context()->ReturnValue(Pop());
}
+ CHECK_ALIVE(VisitForValue(expr->value()));
+ BuildStore(expr, prop, expr->id(),
+ expr->AssignmentId(), expr->IsUninitialized());
}
// Because not every expression has a position and there is not common
// superclass of Assignment and CountOperation, we cannot just pass the
// owning expression instead of position and ast_id separately.
-void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
- HValue* value,
- int position,
- BailoutId ast_id) {
+void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
+ Variable* var,
+ HValue* value,
+ BailoutId ast_id) {
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
- Handle<GlobalObject> global(info()->global_object());
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+ Handle<GlobalObject> global(current_info()->global_object());
+ Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
+ if (cell->type()->IsConstant()) {
+ IfBuilder builder(this);
+ HValue* constant = Add<HConstant>(cell->type()->AsConstant());
+ if (cell->type()->AsConstant()->IsNumber()) {
+ builder.If<HCompareNumericAndBranch>(value, constant, Token::EQ);
+ } else {
+ builder.If<HCompareObjectEqAndBranch>(value, constant);
+ }
+ builder.Then();
+ builder.Else();
+ Add<HDeoptimize>("Constant global variable assignment",
+ Deoptimizer::EAGER);
+ builder.End();
+ }
HInstruction* instr =
- new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
- instr->set_position(position);
- AddInstruction(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
+ Add<HStoreGlobalCell>(value, cell, lookup.GetPropertyDetails());
+ if (instr->HasObservableSideEffects()) {
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+ }
} else {
- HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- AddInstruction(global_object);
+ HGlobalObject* global_object = Add<HGlobalObject>();
HStoreGlobalGeneric* instr =
- new(zone()) HStoreGlobalGeneric(context,
- global_object,
- var->name(),
- value,
- function_strict_mode_flag());
- instr->set_position(position);
- AddInstruction(instr);
+ Add<HStoreGlobalGeneric>(global_object, var->name(),
+ value, function_strict_mode_flag());
+ USE(instr);
ASSERT(instr->HasObservableSideEffects());
- if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
}
-void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Expression* target = expr->target();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
@@ -5641,7 +5260,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
if (proxy != NULL) {
Variable* var = proxy->var();
if (var->mode() == LET) {
- return Bailout("unsupported let compound assignment");
+ return Bailout(kUnsupportedLetCompoundAssignment);
}
CHECK_ALIVE(VisitForValue(operation));
@@ -5650,31 +5269,29 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
case Variable::UNALLOCATED:
HandleGlobalVariableAssignment(var,
Top(),
- expr->position(),
expr->AssignmentId());
break;
case Variable::PARAMETER:
case Variable::LOCAL:
if (var->mode() == CONST) {
- return Bailout("unsupported const compound assignment");
+ return Bailout(kUnsupportedConstCompoundAssignment);
}
- Bind(var, Top());
+ BindIfLive(var, Top());
break;
case Variable::CONTEXT: {
// Bail out if we try to mutate a parameter value in a function
// using the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
- if (info()->scope()->arguments() != NULL) {
+ if (current_info()->scope()->arguments() != NULL) {
// Parameters will be allocated to context slots. We have no
// direct way to detect that the variable is a parameter so we do
// a linear search of the parameter variables.
- int count = info()->scope()->num_parameters();
+ int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
- if (var == info()->scope()->parameter(i)) {
- Bailout(
- "assignment to parameter, function uses arguments object");
+ if (var == current_info()->scope()->parameter(i)) {
+ Bailout(kAssignmentToParameterFunctionUsesArgumentsObject);
}
}
}
@@ -5696,128 +5313,50 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
}
HValue* context = BuildContextChainWalk(var);
- HStoreContextSlot* instr =
- new(zone()) HStoreContextSlot(context, var->index(), mode, Top());
- AddInstruction(instr);
+ HStoreContextSlot* instr = Add<HStoreContextSlot>(
+ context, var->index(), mode, Top());
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId());
+ Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
break;
}
case Variable::LOOKUP:
- return Bailout("compound assignment to lookup slot");
+ return Bailout(kCompoundAssignmentToLookupSlot);
}
return ast_context()->ReturnValue(Pop());
} else if (prop != NULL) {
- prop->RecordTypeFeedback(oracle(), zone());
-
- if (prop->key()->IsPropertyName()) {
- // Named property.
- CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* object = Top();
-
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- Handle<Map> map;
- HInstruction* load;
- bool monomorphic = prop->IsMonomorphic();
- if (monomorphic) {
- map = prop->GetReceiverTypes()->first();
- // We can't generate code for a monomorphic dict mode load so
- // just pretend it is not monomorphic.
- if (map->is_dictionary_map()) monomorphic = false;
- }
- if (monomorphic) {
- Handle<JSFunction> getter;
- Handle<JSObject> holder;
- if (LookupGetter(map, name, &getter, &holder)) {
- load = BuildCallGetter(object, map, getter, holder);
- } else {
- load = BuildLoadNamedMonomorphic(object, name, prop, map);
- }
- } else {
- load = BuildLoadNamedGeneric(object, name, prop);
- }
- PushAndAdd(load);
- if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId());
-
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* right = Pop();
- HValue* left = Pop();
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ HValue* object = Top();
+ HValue* key = NULL;
+ if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
+ prop->IsStringAccess()) {
+ CHECK_ALIVE(VisitForValue(prop->key()));
+ key = Top();
+ }
- HInstruction* instr = BuildBinaryOperation(operation, left, right);
- PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
+ CHECK_ALIVE(PushLoad(prop, object, key));
- HInstruction* store;
- if (!monomorphic) {
- // If we don't know the monomorphic type, do a generic store.
- CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, instr));
- } else {
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- if (LookupSetter(map, name, &setter, &holder)) {
- store = BuildCallSetter(object, instr, map, setter, holder);
- } else {
- CHECK_ALIVE(store = BuildStoreNamedMonomorphic(object,
- name,
- instr,
- map));
- }
- }
- AddInstruction(store);
- // Drop the simulated receiver and value. Return the value.
- Drop(2);
- Push(instr);
- if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
- return ast_context()->ReturnValue(Pop());
+ CHECK_ALIVE(VisitForValue(expr->value()));
+ HValue* right = Pop();
+ HValue* left = Pop();
- } else {
- // Keyed property.
- CHECK_ALIVE(VisitForValue(prop->obj()));
- CHECK_ALIVE(VisitForValue(prop->key()));
- HValue* obj = environment()->ExpressionStackAt(1);
- HValue* key = environment()->ExpressionStackAt(0);
-
- bool has_side_effects = false;
- HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, prop, prop->LoadId(), RelocInfo::kNoPosition,
- false, // is_store
- &has_side_effects);
- Push(load);
- if (has_side_effects) AddSimulate(prop->LoadId());
-
-
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* right = Pop();
- HValue* left = Pop();
-
- HInstruction* instr = BuildBinaryOperation(operation, left, right);
- PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
-
- expr->RecordTypeFeedback(oracle(), zone());
- HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
- RelocInfo::kNoPosition,
- true, // is_store
- &has_side_effects);
-
- // Drop the simulated receiver, key, and value. Return the value.
- Drop(3);
- Push(instr);
- ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId());
- return ast_context()->ReturnValue(Pop());
+ HInstruction* instr = BuildBinaryOperation(operation, left, right);
+ AddInstruction(instr);
+ Push(instr);
+ if (instr->HasObservableSideEffects()) {
+ Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
}
-
+ BuildStore(expr, prop, expr->id(),
+ expr->AssignmentId(), expr->IsUninitialized());
} else {
- return Bailout("invalid lhs in compound assignment");
+ return Bailout(kInvalidLhsInCompoundAssignment);
}
}
-void HGraphBuilder::VisitAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -5845,15 +5384,15 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
// We insert a use of the old value to detect unsupported uses of const
// variables (e.g. initialization inside a loop).
HValue* old_value = environment()->Lookup(var);
- AddInstruction(new(zone()) HUseConst(old_value));
+ Add<HUseConst>(old_value);
}
} else if (var->mode() == CONST_HARMONY) {
if (expr->op() != Token::INIT_CONST_HARMONY) {
- return Bailout("non-initializer assignment to const");
+ return Bailout(kNonInitializerAssignmentToConst);
}
}
- if (proxy->IsArguments()) return Bailout("assignment to arguments");
+ if (proxy->IsArguments()) return Bailout(kAssignmentToArguments);
// Handle the assignment.
switch (var->location()) {
@@ -5861,7 +5400,6 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
CHECK_ALIVE(VisitForValue(expr->value()));
HandleGlobalVariableAssignment(var,
Top(),
- expr->position(),
expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
@@ -5872,7 +5410,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
if (var->mode() == LET && expr->op() == Token::ASSIGN) {
HValue* env_value = environment()->Lookup(var);
if (env_value == graph()->GetConstantHole()) {
- return Bailout("assignment to let variable before initialization");
+ return Bailout(kAssignmentToLetVariableBeforeInitialization);
}
}
// We do not allow the arguments object to occur in a context where it
@@ -5880,7 +5418,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
// permitted.
CHECK_ALIVE(VisitForValue(expr->value(), ARGUMENTS_ALLOWED));
HValue* value = Pop();
- Bind(var, value);
+ BindIfLive(var, value);
return ast_context()->ReturnValue(value);
}
@@ -5888,13 +5426,13 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
// Bail out if we try to mutate a parameter value in a function using
// the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
- if (info()->scope()->arguments() != NULL) {
+ if (current_info()->scope()->arguments() != NULL) {
// Parameters will rewrite to context slots. We have no direct way
// to detect that the variable is a parameter.
- int count = info()->scope()->num_parameters();
+ int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
- if (var == info()->scope()->parameter(i)) {
- return Bailout("assignment to parameter in arguments object");
+ if (var == current_info()->scope()->parameter(i)) {
+ return Bailout(kAssignmentToParameterInArgumentsObject);
}
}
}
@@ -5926,25 +5464,30 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
}
HValue* context = BuildContextChainWalk(var);
- HStoreContextSlot* instr = new(zone()) HStoreContextSlot(
+ HStoreContextSlot* instr = Add<HStoreContextSlot>(
context, var->index(), mode, Top());
- AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId());
+ Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
return ast_context()->ReturnValue(Pop());
}
case Variable::LOOKUP:
- return Bailout("assignment to LOOKUP variable");
+ return Bailout(kAssignmentToLOOKUPVariable);
}
} else {
- return Bailout("invalid left-hand side in assignment");
+ return Bailout(kInvalidLeftHandSideInAssignment);
}
}
-void HGraphBuilder::VisitThrow(Throw* expr) {
+void HOptimizedGraphBuilder::VisitYield(Yield* expr) {
+ // Generators are not optimized, so we should never get here.
+ UNREACHABLE();
+}
+
+
+void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -5954,268 +5497,112 @@ void HGraphBuilder::VisitThrow(Throw* expr) {
ASSERT(ast_context()->IsEffect());
CHECK_ALIVE(VisitForValue(expr->exception()));
- HValue* context = environment()->LookupContext();
HValue* value = environment()->Pop();
- HThrow* instr = new(zone()) HThrow(context, value);
- instr->set_position(expr->position());
- AddInstruction(instr);
- AddSimulate(expr->id());
- current_block()->FinishExit(new(zone()) HAbnormalExit);
- set_current_block(NULL);
-}
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ Add<HThrow>(value);
+ Add<HSimulate>(expr->id());
-
-HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
- Handle<Map> map,
- LookupResult* lookup,
- bool smi_and_map_check) {
- if (smi_and_map_check) {
- AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
- }
-
- int index = lookup->GetLocalFieldIndexFromMap(*map);
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- int offset = (index * kPointerSize) + map->instance_size();
- return new(zone()) HLoadNamedField(object, true, offset);
- } else {
- // Non-negative property indices are in the properties array.
- int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
- return new(zone()) HLoadNamedField(object, false, offset);
+ // If the throw definitely exits the function, we can finish with a dummy
+ // control flow at this point. This is not the case if the throw is inside
+ // an inlined function which may be replaced.
+ if (call_context() == NULL) {
+ FinishExitCurrentBlock(New<HAbnormalExit>());
}
}
-HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* object,
- Handle<String> name,
- Property* expr) {
- if (expr->IsUninitialized() && !FLAG_always_opt) {
- AddInstruction(new(zone()) HSoftDeoptimize);
- current_block()->MarkAsDeoptimizing();
+HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
+ HObjectAccess access) {
+ if (FLAG_track_double_fields && access.representation().IsDouble()) {
+ // load the heap number
+ HLoadNamedField* heap_number = Add<HLoadNamedField>(
+ object, access.WithRepresentation(Representation::Tagged()));
+ heap_number->set_type(HType::HeapNumber());
+ // load the double value from it
+ return New<HLoadNamedField>(
+ heap_number, HObjectAccess::ForHeapNumberValue());
}
- HValue* context = environment()->LookupContext();
- return new(zone()) HLoadNamedGeneric(context, object, name);
+ return New<HLoadNamedField>(object, access);
}
-HInstruction* HGraphBuilder::BuildCallGetter(HValue* object,
- Handle<Map> map,
- Handle<JSFunction> getter,
- Handle<JSObject> holder) {
- AddCheckConstantFunction(holder, object, map, true);
- AddInstruction(new(zone()) HPushArgument(object));
- return new(zone()) HCallConstantFunction(getter, 1);
+HInstruction* HGraphBuilder::AddLoadNamedField(HValue* object,
+ HObjectAccess access) {
+ return AddInstruction(BuildLoadNamedField(object, access));
}
-HInstruction* HGraphBuilder::BuildLoadNamedMonomorphic(HValue* object,
- Handle<String> name,
- Property* expr,
- Handle<Map> map) {
- // Handle a load from a known field.
- ASSERT(!map->is_dictionary_map());
- LookupResult lookup(isolate());
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsField()) {
- return BuildLoadNamedField(object, map, &lookup, true);
- }
-
- // Handle a load of a constant known function.
- if (lookup.IsConstantFunction()) {
- AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
- return new(zone()) HConstant(function, Representation::Tagged());
+HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* object,
+ HValue* checked_string) {
+ if (FLAG_fold_constants && object->IsConstant()) {
+ HConstant* constant = HConstant::cast(object);
+ if (constant->HasStringValue()) {
+ return New<HConstant>(constant->StringValue()->length());
+ }
}
-
- // No luck, do a generic load.
- return BuildLoadNamedGeneric(object, name, expr);
+ return BuildLoadNamedField(checked_string, HObjectAccess::ForStringLength());
}
-HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
- HValue* key) {
- HValue* context = environment()->LookupContext();
- return new(zone()) HLoadKeyedGeneric(context, object, key);
-}
-
-
-HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store) {
- if (is_store) {
- ASSERT(val != NULL);
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- val = AddInstruction(new(zone()) HClampToUint8(val));
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- if (!val->representation().IsInteger32()) {
- val = AddInstruction(new(zone()) HChange(
- val,
- Representation::Integer32(),
- true, // Truncate to int32.
- false)); // Don't deoptimize undefined (irrelevant here).
- }
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- break;
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- return new(zone()) HStoreKeyedSpecializedArrayElement(
- external_elements, checked_key, val, elements_kind);
- } else {
- ASSERT(val == NULL);
- HLoadKeyedSpecializedArrayElement* load =
- new(zone()) HLoadKeyedSpecializedArrayElement(
- external_elements, checked_key, dependency, elements_kind);
- if (FLAG_opt_safe_uint32_operations &&
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- graph()->RecordUint32Instruction(load);
- }
- return load;
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
+ HValue* object,
+ Handle<String> name,
+ Property* expr) {
+ if (expr->IsUninitialized()) {
+ Add<HDeoptimize>("Insufficient type feedback for generic named load",
+ Deoptimizer::SOFT);
}
+ return New<HLoadNamedGeneric>(object, name);
}
-HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* load_dependency,
- ElementsKind elements_kind,
- bool is_store) {
- if (is_store) {
- ASSERT(val != NULL);
- switch (elements_kind) {
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return new(zone()) HStoreKeyedFastDoubleElement(
- elements, checked_key, val);
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- // Smi-only arrays need a smi check.
- AddInstruction(new(zone()) HCheckSmi(val));
- // Fall through.
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- return new(zone()) HStoreKeyedFastElement(
- elements, checked_key, val, elements_kind);
- default:
- UNREACHABLE();
- return NULL;
- }
- }
- // It's an element load (!is_store).
- HoleCheckMode mode = IsFastPackedElementsKind(elements_kind) ?
- OMIT_HOLE_CHECK :
- PERFORM_HOLE_CHECK;
- if (IsFastDoubleElementsKind(elements_kind)) {
- return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key,
- load_dependency, mode);
- } else { // Smi or Object elements.
- return new(zone()) HLoadKeyedFastElement(elements, checked_key,
- load_dependency, elements_kind);
- }
+
+HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
+ HValue* key) {
+ return New<HLoadKeyedGeneric>(object, key);
}
-HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- HValue* dependency,
- Handle<Map> map,
- bool is_store) {
- HCheckMaps* mapcheck = new(zone()) HCheckMaps(object, map,
- zone(), dependency);
- AddInstruction(mapcheck);
- if (dependency) {
- mapcheck->ClearGVNFlag(kDependsOnElementsKind);
+LoadKeyedHoleMode HOptimizedGraphBuilder::BuildKeyedHoleMode(Handle<Map> map) {
+ // Loads from a "stock" fast holey double arrays can elide the hole check.
+ LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE;
+ if (*map == isolate()->get_initial_js_array_map(FAST_HOLEY_DOUBLE_ELEMENTS) &&
+ isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+ Handle<JSObject> prototype(JSObject::cast(map->prototype()), isolate());
+ Handle<JSObject> object_prototype = isolate()->initial_object_prototype();
+ BuildCheckPrototypeMaps(prototype, object_prototype);
+ load_mode = ALLOW_RETURN_HOLE;
+ graph()->MarkDependsOnEmptyArrayProtoElements();
}
- return BuildUncheckedMonomorphicElementAccess(object, key, val,
- mapcheck, map, is_store);
+
+ return load_mode;
}
-HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
+HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
HValue* object,
HValue* key,
HValue* val,
- HCheckMaps* mapcheck,
+ HValue* dependency,
Handle<Map> map,
- bool is_store) {
- // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
- // on a HElementsTransition instruction. The flag can also be removed if the
- // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
- // ElementsKind transitions. Finally, the dependency can be removed for stores
- // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
- // generated store code.
- if ((map->elements_kind() == FAST_HOLEY_ELEMENTS) ||
- (map->elements_kind() == FAST_ELEMENTS && is_store)) {
- mapcheck->ClearGVNFlag(kDependsOnElementsKind);
- }
- bool fast_smi_only_elements = map->has_fast_smi_elements();
- bool fast_elements = map->has_fast_object_elements();
- HInstruction* elements =
- AddInstruction(new(zone()) HLoadElements(object, mapcheck));
- if (is_store && (fast_elements || fast_smi_only_elements)) {
- HCheckMaps* check_cow_map = new(zone()) HCheckMaps(
- elements, isolate()->factory()->fixed_array_map(), zone());
- check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
- AddInstruction(check_cow_map);
- }
- HInstruction* length = NULL;
- HInstruction* checked_key = NULL;
- if (map->has_external_array_elements()) {
- length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
- ALLOW_SMI_KEY));
- HLoadExternalArrayPointer* external_elements =
- new(zone()) HLoadExternalArrayPointer(elements);
- AddInstruction(external_elements);
- return BuildExternalArrayElementAccess(
- external_elements, checked_key, val, mapcheck,
- map->elements_kind(), is_store);
- }
- ASSERT(fast_smi_only_elements ||
- fast_elements ||
- map->has_fast_double_elements());
- if (map->instance_type() == JS_ARRAY_TYPE) {
- length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck,
- HType::Smi()));
- } else {
- length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
+ bool is_store,
+ KeyedAccessStoreMode store_mode) {
+ HCheckMaps* checked_object = Add<HCheckMaps>(object, map, top_info(),
+ dependency);
+ if (dependency) {
+ checked_object->ClearGVNFlag(kDependsOnElementsKind);
}
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
- ALLOW_SMI_KEY));
- return BuildFastElementAccess(elements, checked_key, val, mapcheck,
- map->elements_kind(), is_store);
+
+ LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
+ return BuildUncheckedMonomorphicElementAccess(
+ checked_object, key, val,
+ map->instance_type() == JS_ARRAY_TYPE,
+ map->elements_kind(), is_store,
+ load_mode, store_mode);
}
-HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad(
+HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
HValue* object,
HValue* key,
HValue* val,
@@ -6228,9 +5615,11 @@ HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad(
bool has_smi_or_object_maps = false;
bool has_js_array_access = false;
bool has_non_js_array_access = false;
+ bool has_seen_holey_elements = false;
Handle<Map> most_general_consolidated_map;
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
+ if (!map->IsJSObjectMap()) return NULL;
// Don't allow mixing of JSArrays with JSObjects.
if (map->instance_type() == JS_ARRAY_TYPE) {
if (has_non_js_array_access) return NULL;
@@ -6250,6 +5639,10 @@ HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad(
} else {
return NULL;
}
+ // Remember if we've ever seen holey elements.
+ if (IsHoleyElementsKind(map->elements_kind())) {
+ has_seen_holey_elements = true;
+ }
// Remember the most general elements kind, the code for its load will
// properly handle all of the more specific cases.
if ((i == 0) || IsMoreGeneralElementsKindTransition(
@@ -6260,46 +5653,41 @@ HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad(
}
if (!has_double_maps && !has_smi_or_object_maps) return NULL;
- HCheckMaps* check_maps = new(zone()) HCheckMaps(object, maps, zone());
- AddInstruction(check_maps);
+ HCheckMaps* checked_object = Add<HCheckMaps>(object, maps);
+ // FAST_ELEMENTS is considered more general than FAST_HOLEY_SMI_ELEMENTS.
+ // If we've seen both, the consolidated load must use FAST_HOLEY_ELEMENTS.
+ ElementsKind consolidated_elements_kind = has_seen_holey_elements
+ ? GetHoleyElementsKind(most_general_consolidated_map->elements_kind())
+ : most_general_consolidated_map->elements_kind();
HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
- object, key, val, check_maps, most_general_consolidated_map, false);
+ checked_object, key, val,
+ most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
+ consolidated_elements_kind,
+ false, NEVER_RETURN_HOLE, STANDARD_STORE);
return instr;
}
-HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- Expression* prop,
- BailoutId ast_id,
- int position,
- bool is_store,
- bool* has_side_effects) {
+HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ SmallMapList* maps,
+ bool is_store,
+ KeyedAccessStoreMode store_mode,
+ bool* has_side_effects) {
*has_side_effects = false;
- AddInstruction(new(zone()) HCheckNonSmi(object));
- SmallMapList* maps = prop->GetReceiverTypes();
- bool todo_external_array = false;
+ BuildCheckHeapObject(object);
if (!is_store) {
HInstruction* consolidated_load =
TryBuildConsolidatedElementLoad(object, key, val, maps);
if (consolidated_load != NULL) {
- AddInstruction(consolidated_load);
*has_side_effects |= consolidated_load->HasObservableSideEffects();
- if (position != RelocInfo::kNoPosition) {
- consolidated_load->set_position(position);
- }
return consolidated_load;
}
}
- static const int kNumElementTypes = kElementsKindCount;
- bool type_todo[kNumElementTypes];
- for (int i = 0; i < kNumElementTypes; ++i) {
- type_todo[i] = false;
- }
-
// Elements_kind transition support.
MapHandleList transition_target(maps->length());
// Collect possible transition targets.
@@ -6320,8 +5708,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
transition_target.Add(transitioned_map);
}
- int num_untransitionable_maps = 0;
- Handle<Map> untransitionable_map;
+ MapHandleList untransitionable_maps(maps->length());
HTransitionElementsKind* transition = NULL;
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
@@ -6330,200 +5717,137 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
ASSERT(Map::IsValidElementsTransition(
map->elements_kind(),
transition_target.at(i)->elements_kind()));
- transition = new(zone()) HTransitionElementsKind(
- object, map, transition_target.at(i));
- AddInstruction(transition);
+ transition = Add<HTransitionElementsKind>(object, map,
+ transition_target.at(i));
} else {
- type_todo[map->elements_kind()] = true;
- if (IsExternalArrayElementsKind(map->elements_kind())) {
- todo_external_array = true;
- }
- num_untransitionable_maps++;
- untransitionable_map = map;
+ untransitionable_maps.Add(map);
}
}
// If only one map is left after transitioning, handle this case
// monomorphically.
- if (num_untransitionable_maps == 1) {
+ ASSERT(untransitionable_maps.length() >= 1);
+ if (untransitionable_maps.length() == 1) {
+ Handle<Map> untransitionable_map = untransitionable_maps[0];
HInstruction* instr = NULL;
- if (untransitionable_map->has_slow_elements_kind()) {
+ if (untransitionable_map->has_slow_elements_kind() ||
+ !untransitionable_map->IsJSObjectMap()) {
instr = AddInstruction(is_store ? BuildStoreKeyedGeneric(object, key, val)
: BuildLoadKeyedGeneric(object, key));
} else {
- instr = AddInstruction(BuildMonomorphicElementAccess(
- object, key, val, transition, untransitionable_map, is_store));
+ instr = BuildMonomorphicElementAccess(
+ object, key, val, transition, untransitionable_map, is_store,
+ store_mode);
}
*has_side_effects |= instr->HasObservableSideEffects();
- if (position != RelocInfo::kNoPosition) instr->set_position(position);
return is_store ? NULL : instr;
}
- HInstruction* checkspec =
- AddInstruction(HCheckInstanceType::NewIsSpecObject(object, zone()));
HBasicBlock* join = graph()->CreateBasicBlock();
- HInstruction* elements_kind_instr =
- AddInstruction(new(zone()) HElementsKind(object));
- HCompareConstantEqAndBranch* elements_kind_branch = NULL;
- HInstruction* elements =
- AddInstruction(new(zone()) HLoadElements(object, checkspec));
- HLoadExternalArrayPointer* external_elements = NULL;
- HInstruction* checked_key = NULL;
-
- // Generated code assumes that FAST_* and DICTIONARY_ELEMENTS ElementsKinds
- // are handled before external arrays.
- STATIC_ASSERT(FAST_SMI_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
-
- for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
- elements_kind <= LAST_ELEMENTS_KIND;
- elements_kind = ElementsKind(elements_kind + 1)) {
- // After having handled FAST_* and DICTIONARY_ELEMENTS, we need to add some
- // code that's executed for all external array cases.
- STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
- LAST_ELEMENTS_KIND);
- if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
- && todo_external_array) {
- HInstruction* length =
- AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
- external_elements = new(zone()) HLoadExternalArrayPointer(elements);
- AddInstruction(external_elements);
- }
- if (type_todo[elements_kind]) {
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- elements_kind_branch = new(zone()) HCompareConstantEqAndBranch(
- elements_kind_instr, elements_kind, Token::EQ_STRICT);
- elements_kind_branch->SetSuccessorAt(0, if_true);
- elements_kind_branch->SetSuccessorAt(1, if_false);
- current_block()->Finish(elements_kind_branch);
-
- set_current_block(if_true);
- HInstruction* access;
- if (IsFastElementsKind(elements_kind)) {
- if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
- AddInstruction(new(zone()) HCheckMaps(
- elements, isolate()->factory()->fixed_array_map(),
- zone(), elements_kind_branch));
- }
- // TODO(jkummerow): The need for these two blocks could be avoided
- // in one of two ways:
- // (1) Introduce ElementsKinds for JSArrays that are distinct from
- // those for fast objects.
- // (2) Put the common instructions into a third "join" block. This
- // requires additional AST IDs that we can deopt to from inside
- // that join block. They must be added to the Property class (when
- // it's a keyed property) and registered in the full codegen.
- HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
- HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
- HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(object, JS_ARRAY_TYPE);
- typecheck->SetSuccessorAt(0, if_jsarray);
- typecheck->SetSuccessorAt(1, if_fastobject);
- current_block()->Finish(typecheck);
-
- set_current_block(if_jsarray);
- HInstruction* length;
- length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck,
- HType::Smi()));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
- ALLOW_SMI_KEY));
- access = AddInstruction(BuildFastElementAccess(
- elements, checked_key, val, elements_kind_branch,
- elements_kind, is_store));
- if (!is_store) {
- Push(access);
- }
-
- *has_side_effects |= access->HasObservableSideEffects();
- if (position != -1) {
- access->set_position(position);
- }
- if_jsarray->Goto(join);
-
- set_current_block(if_fastobject);
- length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
- ALLOW_SMI_KEY));
- access = AddInstruction(BuildFastElementAccess(
- elements, checked_key, val, elements_kind_branch,
- elements_kind, is_store));
- } else if (elements_kind == DICTIONARY_ELEMENTS) {
- if (is_store) {
- access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
- } else {
- access = AddInstruction(BuildLoadKeyedGeneric(object, key));
- }
- } else { // External array elements.
- access = AddInstruction(BuildExternalArrayElementAccess(
- external_elements, checked_key, val, elements_kind_branch,
- elements_kind, is_store));
- }
- *has_side_effects |= access->HasObservableSideEffects();
- if (position != RelocInfo::kNoPosition) access->set_position(position);
- if (!is_store) {
- Push(access);
- }
- current_block()->Goto(join);
- set_current_block(if_false);
- }
+ for (int i = 0; i < untransitionable_maps.length(); ++i) {
+ Handle<Map> map = untransitionable_maps[i];
+ if (!map->IsJSObjectMap()) continue;
+ ElementsKind elements_kind = map->elements_kind();
+ HBasicBlock* this_map = graph()->CreateBasicBlock();
+ HBasicBlock* other_map = graph()->CreateBasicBlock();
+ HCompareMap* mapcompare =
+ New<HCompareMap>(object, map, this_map, other_map);
+ FinishCurrentBlock(mapcompare);
+
+ set_current_block(this_map);
+ HInstruction* access = NULL;
+ if (IsDictionaryElementsKind(elements_kind)) {
+ access = is_store
+ ? AddInstruction(BuildStoreKeyedGeneric(object, key, val))
+ : AddInstruction(BuildLoadKeyedGeneric(object, key));
+ } else {
+ ASSERT(IsFastElementsKind(elements_kind) ||
+ IsExternalArrayElementsKind(elements_kind));
+ LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
+ // Happily, mapcompare is a checked object.
+ access = BuildUncheckedMonomorphicElementAccess(
+ mapcompare, key, val,
+ map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind, is_store,
+ load_mode,
+ store_mode);
+ }
+ *has_side_effects |= access->HasObservableSideEffects();
+ // The caller will use has_side_effects and add a correct Simulate.
+ access->SetFlag(HValue::kHasNoObservableSideEffects);
+ if (!is_store) {
+ Push(access);
+ }
+ NoObservableSideEffectsScope scope(this);
+ GotoNoSimulate(join);
+ set_current_block(other_map);
}
// Deopt if none of the cases matched.
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
- join->SetJoinId(ast_id);
+ NoObservableSideEffectsScope scope(this);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic element access",
+ join);
set_current_block(join);
return is_store ? NULL : Pop();
}
-HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
- HValue* key,
- HValue* val,
- Expression* expr,
- BailoutId ast_id,
- int position,
- bool is_store,
- bool* has_side_effects) {
+HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
+ HValue* obj,
+ HValue* key,
+ HValue* val,
+ Expression* expr,
+ bool is_store,
+ bool* has_side_effects) {
ASSERT(!expr->IsPropertyName());
HInstruction* instr = NULL;
- if (expr->IsMonomorphic()) {
- Handle<Map> map = expr->GetMonomorphicReceiverType();
+
+ SmallMapList* types;
+ bool monomorphic = ComputeReceiverTypes(expr, obj, &types);
+
+ if (monomorphic) {
+ Handle<Map> map = types->first();
if (map->has_slow_elements_kind()) {
instr = is_store ? BuildStoreKeyedGeneric(obj, key, val)
: BuildLoadKeyedGeneric(obj, key);
+ AddInstruction(instr);
} else {
- AddInstruction(new(zone()) HCheckNonSmi(obj));
- instr = BuildMonomorphicElementAccess(obj, key, val, NULL, map, is_store);
+ BuildCheckHeapObject(obj);
+ instr = BuildMonomorphicElementAccess(
+ obj, key, val, NULL, map, is_store, expr->GetStoreMode());
}
- } else if (expr->GetReceiverTypes() != NULL &&
- !expr->GetReceiverTypes()->is_empty()) {
+ } else if (types != NULL && !types->is_empty()) {
return HandlePolymorphicElementAccess(
- obj, key, val, expr, ast_id, position, is_store, has_side_effects);
+ obj, key, val, types, is_store,
+ expr->GetStoreMode(), has_side_effects);
} else {
if (is_store) {
+ if (expr->IsAssignment() &&
+ expr->AsAssignment()->HasNoTypeInformation()) {
+ Add<HDeoptimize>("Insufficient type feedback for keyed store",
+ Deoptimizer::SOFT);
+ }
instr = BuildStoreKeyedGeneric(obj, key, val);
} else {
+ if (expr->AsProperty()->HasNoTypeInformation()) {
+ Add<HDeoptimize>("Insufficient type feedback for keyed load",
+ Deoptimizer::SOFT);
+ }
instr = BuildLoadKeyedGeneric(obj, key);
}
+ AddInstruction(instr);
}
- if (position != RelocInfo::kNoPosition) instr->set_position(position);
- AddInstruction(instr);
*has_side_effects = instr->HasObservableSideEffects();
return instr;
}
-HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
- HValue* key,
- HValue* value) {
- HValue* context = environment()->LookupContext();
- return new(zone()) HStoreKeyedGeneric(
- context,
+HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
+ HValue* object,
+ HValue* key,
+ HValue* value) {
+ return New<HStoreKeyedGeneric>(
object,
key,
value,
@@ -6531,7 +5855,7 @@ HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
}
-void HGraphBuilder::EnsureArgumentsArePushedForAccess() {
+void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
// Outermost function already has arguments on the stack.
if (function_state()->outer() == NULL) return;
@@ -6541,25 +5865,25 @@ void HGraphBuilder::EnsureArgumentsArePushedForAccess() {
HEnterInlined* entry = function_state()->entry();
entry->set_arguments_pushed();
- ZoneList<HValue*>* arguments_values = entry->arguments_values();
+ HArgumentsObject* arguments = entry->arguments_object();
+ const ZoneList<HValue*>* arguments_values = arguments->arguments_values();
HInstruction* insert_after = entry;
for (int i = 0; i < arguments_values->length(); i++) {
HValue* argument = arguments_values->at(i);
- HInstruction* push_argument = new(zone()) HPushArgument(argument);
+ HInstruction* push_argument = New<HPushArgument>(argument);
push_argument->InsertAfter(insert_after);
insert_after = push_argument;
}
- HArgumentsElements* arguments_elements =
- new(zone()) HArgumentsElements(true);
+ HArgumentsElements* arguments_elements = New<HArgumentsElements>(true);
arguments_elements->ClearFlag(HValue::kUseGVN);
arguments_elements->InsertAfter(insert_after);
function_state()->set_arguments_elements(arguments_elements);
}
-bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
+bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
VariableProxy* proxy = expr->obj()->AsVariableProxy();
if (proxy == NULL) return false;
if (!proxy->var()->IsStackAllocated()) return false;
@@ -6570,34 +5894,27 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
HInstruction* result = NULL;
if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- if (!name->IsEqualTo(CStrVector("length"))) return false;
+ if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("length"))) return false;
if (function_state()->outer() == NULL) {
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
- result = new(zone()) HArgumentsLength(elements);
+ HInstruction* elements = Add<HArgumentsElements>(false);
+ result = New<HArgumentsLength>(elements);
} else {
// Number of arguments without receiver.
int argument_count = environment()->
arguments_environment()->parameter_count() - 1;
- result = new(zone()) HConstant(
- Handle<Object>(Smi::FromInt(argument_count)),
- Representation::Integer32());
+ result = New<HConstant>(argument_count);
}
} else {
Push(graph()->GetArgumentsObject());
- VisitForValue(expr->key());
- if (HasStackOverflow() || current_block() == NULL) return true;
+ CHECK_ALIVE_OR_RETURN(VisitForValue(expr->key()), true);
HValue* key = Pop();
Drop(1); // Arguments object.
if (function_state()->outer() == NULL) {
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
- HInstruction* length = AddInstruction(
- new(zone()) HArgumentsLength(elements));
- HInstruction* checked_key =
- AddInstruction(new(zone()) HBoundsCheck(key, length));
- result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
+ HInstruction* elements = Add<HArgumentsElements>(false);
+ HInstruction* length = Add<HArgumentsLength>(elements);
+ HInstruction* checked_key = Add<HBoundsCheck>(key, length);
+ result = New<HAccessArgumentsAt>(elements, length, checked_key);
} else {
EnsureArgumentsArePushedForAccess();
@@ -6605,12 +5922,9 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
HInstruction* elements = function_state()->arguments_elements();
int argument_count = environment()->
arguments_environment()->parameter_count() - 1;
- HInstruction* length = AddInstruction(new(zone()) HConstant(
- Handle<Object>(Smi::FromInt(argument_count)),
- Representation::Integer32()));
- HInstruction* checked_key =
- AddInstruction(new(zone()) HBoundsCheck(key, length));
- result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
+ HInstruction* length = Add<HConstant>(argument_count);
+ HInstruction* checked_key = Add<HBoundsCheck>(key, length);
+ result = New<HAccessArgumentsAt>(elements, length, checked_key);
}
}
ast_context()->ReturnInstruction(result, expr->id());
@@ -6618,116 +5932,163 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
}
-void HGraphBuilder::VisitProperty(Property* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- expr->RecordTypeFeedback(oracle(), zone());
+void HOptimizedGraphBuilder::PushLoad(Property* expr,
+ HValue* object,
+ HValue* key) {
+ ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
+ Push(object);
+ if (key != NULL) Push(key);
+ BuildLoad(expr, expr->LoadId());
+}
- if (TryArgumentsAccess(expr)) return;
- CHECK_ALIVE(VisitForValue(expr->obj()));
+static bool AreStringTypes(SmallMapList* types) {
+ for (int i = 0; i < types->length(); i++) {
+ if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+ }
+ return true;
+}
+
+void HOptimizedGraphBuilder::BuildLoad(Property* expr,
+ BailoutId ast_id) {
HInstruction* instr = NULL;
- if (expr->AsProperty()->IsArrayLength()) {
- HValue* array = Pop();
- AddInstruction(new(zone()) HCheckNonSmi(array));
- HInstruction* mapcheck =
- AddInstruction(HCheckInstanceType::NewIsJSArray(array, zone()));
- instr = new(zone()) HJSArrayLength(array, mapcheck);
- } else if (expr->IsStringLength()) {
- HValue* string = Pop();
- AddInstruction(new(zone()) HCheckNonSmi(string));
- AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
- instr = new(zone()) HStringLength(string);
- } else if (expr->IsStringAccess()) {
- CHECK_ALIVE(VisitForValue(expr->key()));
+ if (expr->IsStringAccess()) {
HValue* index = Pop();
HValue* string = Pop();
- HValue* context = environment()->LookupContext();
- HStringCharCodeAt* char_code =
- BuildStringCharCodeAt(context, string, index);
+ HInstruction* char_code = BuildStringCharCodeAt(string, index);
AddInstruction(char_code);
- instr = new(zone()) HStringCharFromCode(context, char_code);
+ instr = NewUncasted<HStringCharFromCode>(char_code);
} else if (expr->IsFunctionPrototype()) {
HValue* function = Pop();
- AddInstruction(new(zone()) HCheckNonSmi(function));
- instr = new(zone()) HLoadFunctionPrototype(function);
+ BuildCheckHeapObject(function);
+ instr = New<HLoadFunctionPrototype>(function);
} else if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- SmallMapList* types = expr->GetReceiverTypes();
+ HValue* object = Pop();
- bool monomorphic = expr->IsMonomorphic();
- Handle<Map> map;
- if (expr->IsMonomorphic()) {
- map = types->first();
- if (map->is_dictionary_map()) monomorphic = false;
- }
- if (monomorphic) {
- Handle<JSFunction> getter;
- Handle<JSObject> holder;
- if (LookupGetter(map, name, &getter, &holder)) {
- AddCheckConstantFunction(holder, Top(), map, true);
- if (FLAG_inline_accessors && TryInlineGetter(getter, expr)) return;
- AddInstruction(new(zone()) HPushArgument(Pop()));
- instr = new(zone()) HCallConstantFunction(getter, 1);
+ SmallMapList* types;
+ ComputeReceiverTypes(expr, object, &types);
+ ASSERT(types != NULL);
+
+ if (types->length() > 0) {
+ PropertyAccessInfo info(isolate(), types->first(), name);
+ if (!info.CanLoadAsMonomorphic(types)) {
+ return HandlePolymorphicLoadNamedField(
+ ast_id, expr->LoadId(), object, types, name);
+ }
+
+ BuildCheckHeapObject(object);
+ HInstruction* checked_object;
+ if (AreStringTypes(types)) {
+ checked_object =
+ Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
} else {
- instr = BuildLoadNamedMonomorphic(Pop(), name, expr, map);
+ checked_object = Add<HCheckMaps>(object, types);
}
- } else if (types != NULL && types->length() > 1) {
- return HandlePolymorphicLoadNamedField(expr, Pop(), types, name);
+ instr = BuildLoadMonomorphic(
+ &info, object, checked_object, ast_id, expr->LoadId());
+ if (instr == NULL) return;
+ if (instr->IsLinked()) return ast_context()->ReturnValue(instr);
} else {
- instr = BuildLoadNamedGeneric(Pop(), name, expr);
+ instr = BuildLoadNamedGeneric(object, name, expr);
}
} else {
- CHECK_ALIVE(VisitForValue(expr->key()));
-
HValue* key = Pop();
HValue* obj = Pop();
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, expr, expr->id(), expr->position(),
+ obj, key, NULL, expr,
false, // is_store
&has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
- AddSimulate(expr->id());
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
} else {
Push(load);
- AddSimulate(expr->id());
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
Drop(1);
}
}
return ast_context()->ReturnValue(load);
}
- instr->set_position(expr->position());
- return ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, ast_id);
}
-void HGraphBuilder::AddCheckConstantFunction(Handle<JSObject> holder,
- HValue* receiver,
- Handle<Map> receiver_map,
- bool smi_and_map_check) {
- // Constant functions have the nice property that the map will change if they
- // are overwritten. Therefore it is enough to check the map of the holder and
- // its prototypes.
- if (smi_and_map_check) {
- AddInstruction(new(zone()) HCheckNonSmi(receiver));
- AddInstruction(HCheckMaps::NewWithTransitions(receiver, receiver_map,
- zone()));
+void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+
+ if (TryArgumentsAccess(expr)) return;
+
+ CHECK_ALIVE(VisitForValue(expr->obj()));
+ if ((!expr->IsFunctionPrototype() && !expr->key()->IsPropertyName()) ||
+ expr->IsStringAccess()) {
+ CHECK_ALIVE(VisitForValue(expr->key()));
}
+
+ BuildLoad(expr, expr->id());
+}
+
+
+HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant,
+ CompilationInfo* info) {
+ HConstant* constant_value = New<HConstant>(constant);
+
+ if (constant->map()->CanOmitMapChecks()) {
+ constant->map()->AddDependentCompilationInfo(
+ DependentCode::kPrototypeCheckGroup, info);
+ return constant_value;
+ }
+
+ AddInstruction(constant_value);
+ HCheckMaps* check =
+ Add<HCheckMaps>(constant_value, handle(constant->map()), info);
+ check->ClearGVNFlag(kDependsOnElementsKind);
+ return check;
+}
+
+
+HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype,
+ Handle<JSObject> holder) {
+ while (!prototype.is_identical_to(holder)) {
+ BuildConstantMapCheck(prototype, top_info());
+ prototype = handle(JSObject::cast(prototype->GetPrototype()));
+ }
+
+ HInstruction* checked_object = BuildConstantMapCheck(prototype, top_info());
+ if (!checked_object->IsLinked()) AddInstruction(checked_object);
+ return checked_object;
+}
+
+
+void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
+ Handle<Map> receiver_map) {
if (!holder.is_null()) {
- AddInstruction(new(zone()) HCheckPrototypeMaps(
- Handle<JSObject>(JSObject::cast(receiver_map->prototype())), holder));
+ Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
+ BuildCheckPrototypeMaps(prototype, holder);
}
}
+void HOptimizedGraphBuilder::AddCheckConstantFunction(
+ Handle<JSObject> holder,
+ HValue* receiver,
+ Handle<Map> receiver_map) {
+ // Constant functions have the nice property that the map will change if they
+ // are overwritten. Therefore it is enough to check the map of the holder and
+ // its prototypes.
+ AddCheckMap(receiver, receiver_map);
+ AddCheckPrototypeMaps(holder, receiver_map);
+}
+
+
class FunctionSorter {
public:
FunctionSorter() : index_(0), ticks_(0), ast_length_(0), src_length_(0) { }
@@ -6750,33 +6111,82 @@ class FunctionSorter {
};
-static int CompareHotness(void const* a, void const* b) {
- FunctionSorter const* function1 = reinterpret_cast<FunctionSorter const*>(a);
- FunctionSorter const* function2 = reinterpret_cast<FunctionSorter const*>(b);
- int diff = function1->ticks() - function2->ticks();
- if (diff != 0) return -diff;
- diff = function1->ast_length() - function2->ast_length();
- if (diff != 0) return diff;
- return function1->src_length() - function2->src_length();
+inline bool operator<(const FunctionSorter& lhs, const FunctionSorter& rhs) {
+ int diff = lhs.ticks() - rhs.ticks();
+ if (diff != 0) return diff > 0;
+ diff = lhs.ast_length() - rhs.ast_length();
+ if (diff != 0) return diff < 0;
+ return lhs.src_length() < rhs.src_length();
}
-void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
- HValue* receiver,
- SmallMapList* types,
- Handle<String> name) {
- // TODO(ager): We should recognize when the prototype chains for different
- // maps are identical. In that case we can avoid repeatedly generating the
- // same prototype map checks.
+bool HOptimizedGraphBuilder::TryCallPolymorphicAsMonomorphic(
+ Call* expr,
+ HValue* receiver,
+ SmallMapList* types,
+ Handle<String> name) {
+ if (types->length() > kMaxCallPolymorphism) return false;
+
+ PropertyAccessInfo info(isolate(), types->at(0), name);
+ if (!info.CanLoadAsMonomorphic(types)) return false;
+ if (!expr->ComputeTarget(info.map(), name)) return false;
+
+ BuildCheckHeapObject(receiver);
+ Add<HCheckMaps>(receiver, types);
+ AddCheckPrototypeMaps(expr->holder(), info.map());
+ if (FLAG_trace_inlining) {
+ Handle<JSFunction> caller = current_info()->closure();
+ SmartArrayPointer<char> caller_name =
+ caller->shared()->DebugName()->ToCString();
+ PrintF("Trying to inline the polymorphic call to %s from %s\n",
+ *name->ToCString(), *caller_name);
+ }
+
+ if (!TryInlineCall(expr)) {
+ int argument_count = expr->arguments()->length() + 1; // Includes receiver.
+ HCallConstantFunction* call =
+ New<HCallConstantFunction>(expr->target(), argument_count);
+ PreProcessCall(call);
+ AddInstruction(call);
+ if (!ast_context()->IsEffect()) Push(call);
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ }
+
+ return true;
+}
+
+
+void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
+ Call* expr,
+ HValue* receiver,
+ SmallMapList* types,
+ Handle<String> name) {
+ if (TryCallPolymorphicAsMonomorphic(expr, receiver, types, name)) return;
+
int argument_count = expr->arguments()->length() + 1; // Includes receiver.
HBasicBlock* join = NULL;
FunctionSorter order[kMaxCallPolymorphism];
int ordered_functions = 0;
+
+ Handle<Map> initial_string_map(
+ isolate()->native_context()->string_function()->initial_map());
+ Handle<Map> string_marker_map(
+ JSObject::cast(initial_string_map->prototype())->map());
+ Handle<Map> initial_number_map(
+ isolate()->native_context()->number_function()->initial_map());
+ Handle<Map> number_marker_map(
+ JSObject::cast(initial_number_map->prototype())->map());
+ Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
+
+ bool handle_smi = false;
+
for (int i = 0;
i < types->length() && ordered_functions < kMaxCallPolymorphism;
++i) {
Handle<Map> map = types->at(i);
if (expr->ComputeTarget(map, name)) {
+ if (map.is_identical_to(number_marker_map)) handle_smi = true;
order[ordered_functions++] =
FunctionSorter(i,
expr->target()->shared()->profiler_ticks(),
@@ -6785,30 +6195,60 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
}
}
- qsort(reinterpret_cast<void*>(&order[0]),
- ordered_functions,
- sizeof(order[0]),
- &CompareHotness);
+ std::sort(order, order + ordered_functions);
+
+ HBasicBlock* number_block = NULL;
for (int fn = 0; fn < ordered_functions; ++fn) {
int i = order[fn].index();
Handle<Map> map = types->at(i);
if (fn == 0) {
// Only needed once.
- AddInstruction(new(zone()) HCheckNonSmi(receiver));
join = graph()->CreateBasicBlock();
+ if (handle_smi) {
+ HBasicBlock* empty_smi_block = graph()->CreateBasicBlock();
+ HBasicBlock* not_smi_block = graph()->CreateBasicBlock();
+ number_block = graph()->CreateBasicBlock();
+ FinishCurrentBlock(New<HIsSmiAndBranch>(
+ receiver, empty_smi_block, not_smi_block));
+ Goto(empty_smi_block, number_block);
+ set_current_block(not_smi_block);
+ } else {
+ BuildCheckHeapObject(receiver);
+ }
}
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare =
- new(zone()) HCompareMap(receiver, map, if_true, if_false);
- current_block()->Finish(compare);
+ HUnaryControlInstruction* compare;
+
+ if (handle_smi && map.is_identical_to(number_marker_map)) {
+ compare = New<HCompareMap>(receiver, heap_number_map, if_true, if_false);
+ map = initial_number_map;
+ expr->set_number_check(
+ Handle<JSObject>(JSObject::cast(map->prototype())));
+ } else if (map.is_identical_to(string_marker_map)) {
+ compare = New<HIsStringAndBranch>(receiver, if_true, if_false);
+ map = initial_string_map;
+ expr->set_string_check(
+ Handle<JSObject>(JSObject::cast(map->prototype())));
+ } else {
+ compare = New<HCompareMap>(receiver, map, if_true, if_false);
+ expr->set_map_check();
+ }
+ FinishCurrentBlock(compare);
+
+ if (expr->check_type() == NUMBER_CHECK) {
+ Goto(if_true, number_block);
+ if_true = number_block;
+ number_block->SetJoinId(expr->id());
+ }
set_current_block(if_true);
+
expr->ComputeTarget(map, name);
- AddCheckConstantFunction(expr->holder(), receiver, map, false);
+ AddCheckPrototypeMaps(expr->holder(), map);
if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
- Handle<JSFunction> caller = info()->closure();
+ Handle<JSFunction> caller = current_info()->closure();
SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
PrintF("Trying to inline the polymorphic call to %s from %s\n",
@@ -6821,14 +6261,13 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
if (HasStackOverflow()) return;
} else {
HCallConstantFunction* call =
- new(zone()) HCallConstantFunction(expr->target(), argument_count);
- call->set_position(expr->position());
+ New<HCallConstantFunction>(expr->target(), argument_count);
PreProcessCall(call);
AddInstruction(call);
if (!ast_context()->IsEffect()) Push(call);
}
- if (current_block() != NULL) current_block()->Goto(join);
+ if (current_block() != NULL) Goto(join);
set_current_block(if_false);
}
@@ -6836,17 +6275,20 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
+ // Because the deopt may be the only path in the polymorphic call, make sure
+ // that the environment stack matches the depth on deopt that it otherwise
+ // would have had after a successful call.
+ Drop(argument_count);
+ if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic call", join);
} else {
- HValue* context = environment()->LookupContext();
- HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count);
- call->set_position(expr->position());
+ HCallNamed* call = New<HCallNamed>(name, argument_count);
PreProcessCall(call);
if (join != NULL) {
AddInstruction(call);
if (!ast_context()->IsEffect()) Push(call);
- current_block()->Goto(join);
+ Goto(join);
} else {
return ast_context()->ReturnInstruction(call, expr->id());
}
@@ -6866,9 +6308,9 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
}
-void HGraphBuilder::TraceInline(Handle<JSFunction> target,
- Handle<JSFunction> caller,
- const char* reason) {
+void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
+ Handle<JSFunction> caller,
+ const char* reason) {
if (FLAG_trace_inlining) {
SmartArrayPointer<char> target_name =
target->shared()->DebugName()->ToCString();
@@ -6887,12 +6329,12 @@ void HGraphBuilder::TraceInline(Handle<JSFunction> target,
static const int kNotInlinable = 1000000000;
-int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
+int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
if (!FLAG_use_inlining) return kNotInlinable;
// Precondition: call is monomorphic and we have found a target with the
// appropriate arity.
- Handle<JSFunction> caller = info()->closure();
+ Handle<JSFunction> caller = current_info()->closure();
Handle<SharedFunctionInfo> target_shared(target->shared());
// Do a quick check on source code length to avoid parsing large
@@ -6918,28 +6360,26 @@ int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
}
-bool HGraphBuilder::TryInline(CallKind call_kind,
- Handle<JSFunction> target,
- int arguments_count,
- HValue* implicit_return_value,
- BailoutId ast_id,
- BailoutId return_id,
- InliningKind inlining_kind) {
+bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
+ Handle<JSFunction> target,
+ int arguments_count,
+ HValue* implicit_return_value,
+ BailoutId ast_id,
+ BailoutId return_id,
+ InliningKind inlining_kind) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
- Handle<JSFunction> caller = info()->closure();
+ Handle<JSFunction> caller = current_info()->closure();
if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
TraceInline(target, caller, "target AST is too large [early]");
return false;
}
- Handle<SharedFunctionInfo> target_shared(target->shared());
-
-#if !defined(V8_TARGET_ARCH_IA32)
+#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
// Target must be able to use caller's context.
- CompilationInfo* outer_info = info();
+ CompilationInfo* outer_info = current_info();
if (target->context() != outer_info->closure()->context() ||
outer_info->scope()->contains_with() ||
outer_info->scope()->num_heap_slots() > 0) {
@@ -6949,11 +6389,11 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
#endif
- // Don't inline deeper than kMaxInliningLevels calls.
+ // Don't inline deeper than the maximum number of inlining levels.
HEnvironment* env = environment();
int current_level = 1;
while (env->outer() != NULL) {
- if (current_level == Compiler::kMaxInliningLevels) {
+ if (current_level == FLAG_max_inlining_levels) {
TraceInline(target, caller, "inline depth limit reached");
return false;
}
@@ -6967,7 +6407,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
for (FunctionState* state = function_state();
state != NULL;
state = state->outer()) {
- if (state->compilation_info()->closure()->shared() == *target_shared) {
+ if (*state->compilation_info()->closure() == *target) {
TraceInline(target, caller, "target is recursive");
return false;
}
@@ -6982,12 +6422,12 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
// Parse and allocate variables.
CompilationInfo target_info(target, zone());
- if (!ParserApi::Parse(&target_info, kNoParsingFlags) ||
- !Scope::Analyze(&target_info)) {
+ Handle<SharedFunctionInfo> target_shared(target->shared());
+ if (!Parser::Parse(&target_info) || !Scope::Analyze(&target_info)) {
if (target_info.isolate()->has_pending_exception()) {
// Parse or scope error, never optimize this function.
SetStackOverflow();
- target_shared->DisableOptimization("parse/scope error");
+ target_shared->DisableOptimization(kParseScopeError);
}
TraceInline(target, caller, "parse failure");
return false;
@@ -7007,7 +6447,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
return false;
}
AstProperties::Flags* flags(function->flags());
- if (flags->Contains(kDontInline) || flags->Contains(kDontOptimize)) {
+ if (flags->Contains(kDontInline) || function->dont_optimize()) {
TraceInline(target, caller, "target contains unsupported syntax [late]");
return false;
}
@@ -7049,7 +6489,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
TraceInline(target, caller, "could not generate deoptimization info");
return false;
}
- if (target_shared->scope_info() == ScopeInfo::Empty()) {
+ if (target_shared->scope_info() == ScopeInfo::Empty(isolate())) {
// The scope info might not have been set if a lazily compiled
// function is inlined before being called for the first time.
Handle<ScopeInfo> target_scope_info =
@@ -7066,75 +6506,59 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
// After this point, we've made a decision to inline this function (so
// TryInline should always return true).
- // Save the pending call context and type feedback oracle. Set up new ones
- // for the inlined function.
+ // Type-check the inlined function.
ASSERT(target_shared->has_deoptimization_support());
- Handle<Code> unoptimized_code(target_shared->code());
- TypeFeedbackOracle target_oracle(
- unoptimized_code,
- Handle<Context>(target->context()->native_context()),
- isolate(),
- zone());
+ AstTyper::Run(&target_info);
+
+ // Save the pending call context. Set up new one for the inlined function.
// The function state is new-allocated because we need to delete it
// in two different places.
FunctionState* target_state = new FunctionState(
- this, &target_info, &target_oracle, inlining_kind);
+ this, &target_info, inlining_kind);
HConstant* undefined = graph()->GetConstantUndefined();
+ bool undefined_receiver = HEnvironment::UseUndefinedReceiver(
+ target, function, call_kind, inlining_kind);
HEnvironment* inner_env =
environment()->CopyForInlining(target,
arguments_count,
function,
undefined,
- call_kind,
- function_state()->inlining_kind());
-#ifdef V8_TARGET_ARCH_IA32
- // IA32 only, overwrite the caller's context in the deoptimization
- // environment with the correct one.
+ function_state()->inlining_kind(),
+ undefined_receiver);
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+ // IA32, ARM and MIPS only, overwrite the caller's context in the
+ // deoptimization environment with the correct one.
//
// TODO(kmillikin): implement the same inlining on other platforms so we
// can remove the unsightly ifdefs in this function.
- HConstant* context =
- new(zone()) HConstant(Handle<Context>(target->context()),
- Representation::Tagged());
- AddInstruction(context);
+ HConstant* context = Add<HConstant>(Handle<Context>(target->context()));
inner_env->BindContext(context);
#endif
- AddSimulate(return_id);
+ Add<HSimulate>(return_id);
current_block()->UpdateEnvironment(inner_env);
+ HArgumentsObject* arguments_object = NULL;
- ZoneList<HValue*>* arguments_values = NULL;
-
- // If the function uses arguments copy current arguments values
- // to use them for materialization.
+ // If the function uses arguments object create and bind one, also copy
+ // current arguments values to use them for materialization.
if (function->scope()->arguments() != NULL) {
+ ASSERT(function->scope()->arguments()->IsStackAllocated());
HEnvironment* arguments_env = inner_env->arguments_environment();
int arguments_count = arguments_env->parameter_count();
- arguments_values = new(zone()) ZoneList<HValue*>(arguments_count, zone());
+ arguments_object = Add<HArgumentsObject>(arguments_count);
+ inner_env->Bind(function->scope()->arguments(), arguments_object);
for (int i = 0; i < arguments_count; i++) {
- arguments_values->Add(arguments_env->Lookup(i), zone());
+ arguments_object->AddArgument(arguments_env->Lookup(i), zone());
}
}
HEnterInlined* enter_inlined =
- new(zone()) HEnterInlined(target,
- arguments_count,
- function,
- call_kind,
- function_state()->inlining_kind(),
- function->scope()->arguments(),
- arguments_values);
+ Add<HEnterInlined>(target, arguments_count, function,
+ function_state()->inlining_kind(),
+ function->scope()->arguments(),
+ arguments_object, undefined_receiver);
function_state()->set_entry(enter_inlined);
- AddInstruction(enter_inlined);
-
- // If the function uses arguments object create and bind one.
- if (function->scope()->arguments() != NULL) {
- ASSERT(function->scope()->arguments()->IsStackAllocated());
- inner_env->Bind(function->scope()->arguments(),
- graph()->GetArgumentsObject());
- }
-
VisitDeclarations(target_info.scope()->declarations());
VisitStatements(function->body());
@@ -7142,7 +6566,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
// Bail out if the inline function did, as we cannot residualize a call
// instead.
TraceInline(target, caller, "inline graph construction failed");
- target_shared->DisableOptimization("inlining bailed out");
+ target_shared->DisableOptimization(kInliningBailedOut);
inline_bailout_ = true;
delete target_state;
return true;
@@ -7151,10 +6575,10 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
// Update inlined nodes count.
inlined_count_ += nodes_added;
+ Handle<Code> unoptimized_code(target_shared->code());
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Handle<Object> maybe_type_info(unoptimized_code->type_feedback_info());
Handle<TypeFeedbackInfo> type_info(
- Handle<TypeFeedbackInfo>::cast(maybe_type_info));
+ TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
graph()->update_type_change_checksum(type_info->own_type_change_checksum());
TraceInline(target, caller, NULL);
@@ -7166,12 +6590,12 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
// return value will always evaluate to true, in a value context the
// return value is the newly allocated receiver.
if (call_context()->IsTest()) {
- current_block()->Goto(inlined_test_context()->if_true(), state);
+ Goto(inlined_test_context()->if_true(), state);
} else if (call_context()->IsEffect()) {
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(call_context()->IsValue());
- current_block()->AddLeaveInlined(implicit_return_value, state);
+ AddLeaveInlined(implicit_return_value, state);
}
} else if (state->inlining_kind() == SETTER_CALL_RETURN) {
// Falling off the end of an inlined setter call. The returned value is
@@ -7180,21 +6604,21 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
if (call_context()->IsTest()) {
inlined_test_context()->ReturnValue(implicit_return_value);
} else if (call_context()->IsEffect()) {
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(call_context()->IsValue());
- current_block()->AddLeaveInlined(implicit_return_value, state);
+ AddLeaveInlined(implicit_return_value, state);
}
} else {
// Falling off the end of a normal inlined function. This basically means
// returning undefined.
if (call_context()->IsTest()) {
- current_block()->Goto(inlined_test_context()->if_false(), state);
+ Goto(inlined_test_context()->if_false(), state);
} else if (call_context()->IsEffect()) {
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(call_context()->IsValue());
- current_block()->AddLeaveInlined(undefined, state);
+ AddLeaveInlined(undefined, state);
}
}
}
@@ -7204,6 +6628,8 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
HBasicBlock* if_true = inlined_test_context()->if_true();
HBasicBlock* if_false = inlined_test_context()->if_false();
+ HEnterInlined* entry = function_state()->entry();
+
// Pop the return test context from the expression context stack.
ASSERT(ast_context() == inlined_test_context());
ClearInlinedTestContext();
@@ -7211,19 +6637,22 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
// Forward to the real test context.
if (if_true->HasPredecessor()) {
+ entry->RegisterReturnTarget(if_true, zone());
if_true->SetJoinId(ast_id);
HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
- if_true->Goto(true_target, function_state());
+ Goto(if_true, true_target, function_state());
}
if (if_false->HasPredecessor()) {
+ entry->RegisterReturnTarget(if_false, zone());
if_false->SetJoinId(ast_id);
HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
- if_false->Goto(false_target, function_state());
+ Goto(if_false, false_target, function_state());
}
set_current_block(NULL);
return true;
} else if (function_return()->HasPredecessor()) {
+ function_state()->entry()->RegisterReturnTarget(function_return(), zone());
function_return()->SetJoinId(ast_id);
set_current_block(function_return());
} else {
@@ -7234,7 +6663,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
}
-bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
+bool HOptimizedGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
// The function call we are inlining is a method call if the call
// is a property call.
CallKind call_kind = (expr->expression()->AsProperty() == NULL)
@@ -7251,8 +6680,8 @@ bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
}
-bool HGraphBuilder::TryInlineConstruct(CallNew* expr,
- HValue* implicit_return_value) {
+bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
+ HValue* implicit_return_value) {
return TryInline(CALL_AS_FUNCTION,
expr->target(),
expr->arguments()->length(),
@@ -7263,36 +6692,55 @@ bool HGraphBuilder::TryInlineConstruct(CallNew* expr,
}
-bool HGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
- Property* prop) {
+bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
+ BailoutId ast_id,
+ BailoutId return_id) {
return TryInline(CALL_AS_METHOD,
getter,
0,
NULL,
- prop->id(),
- prop->LoadId(),
+ ast_id,
+ return_id,
GETTER_CALL_RETURN);
}
-bool HGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
- Assignment* assignment,
- HValue* implicit_return_value) {
+bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
+ BailoutId id,
+ BailoutId assignment_id,
+ HValue* implicit_return_value) {
return TryInline(CALL_AS_METHOD,
setter,
1,
implicit_return_value,
- assignment->id(),
- assignment->AssignmentId(),
+ id, assignment_id,
SETTER_CALL_RETURN);
}
-bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
+bool HOptimizedGraphBuilder::TryInlineApply(Handle<JSFunction> function,
+ Call* expr,
+ int arguments_count) {
+ return TryInline(CALL_AS_METHOD,
+ function,
+ arguments_count,
+ NULL,
+ expr->id(),
+ expr->ReturnId(),
+ NORMAL_RETURN);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
+ bool drop_extra) {
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
switch (id) {
+ case kMathExp:
+ if (!FLAG_fast_math) break;
+ // Fall through if FLAG_fast_math.
case kMathRound:
+ case kMathFloor:
case kMathAbs:
case kMathSqrt:
case kMathLog:
@@ -7301,11 +6749,19 @@ bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
case kMathTan:
if (expr->arguments()->length() == 1) {
HValue* argument = Pop();
- HValue* context = environment()->LookupContext();
Drop(1); // Receiver.
- HUnaryMathOperation* op =
- new(zone()) HUnaryMathOperation(context, argument, id);
- op->set_position(expr->position());
+ HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
+ if (drop_extra) Drop(1); // Optionally drop the function.
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+ case kMathImul:
+ if (expr->arguments()->length() == 2) {
+ HValue* right = Pop();
+ HValue* left = Pop();
+ Drop(1); // Receiver.
+ HInstruction* op = HMul::NewImul(zone(), context(), left, right);
if (drop_extra) Drop(1); // Optionally drop the function.
ast_context()->ReturnInstruction(op, expr->id());
return true;
@@ -7319,10 +6775,11 @@ bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
}
-bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
- HValue* receiver,
- Handle<Map> receiver_map,
- CheckType check_type) {
+bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
+ Call* expr,
+ HValue* receiver,
+ Handle<Map> receiver_map,
+ CheckType check_type) {
ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
// Try to inline calls like Math.* as operations in the calling function.
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
@@ -7334,24 +6791,35 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
if (argument_count == 2 && check_type == STRING_CHECK) {
HValue* index = Pop();
HValue* string = Pop();
- HValue* context = environment()->LookupContext();
ASSERT(!expr->holder().is_null());
- AddInstruction(new(zone()) HCheckPrototypeMaps(
- oracle()->GetPrototypeForPrimitiveCheck(STRING_CHECK),
- expr->holder()));
- HStringCharCodeAt* char_code =
- BuildStringCharCodeAt(context, string, index);
+ BuildCheckPrototypeMaps(Call::GetPrototypeForPrimitiveCheck(
+ STRING_CHECK, expr->holder()->GetIsolate()),
+ expr->holder());
+ HInstruction* char_code =
+ BuildStringCharCodeAt(string, index);
if (id == kStringCharCodeAt) {
ast_context()->ReturnInstruction(char_code, expr->id());
return true;
}
AddInstruction(char_code);
- HStringCharFromCode* result =
- new(zone()) HStringCharFromCode(context, char_code);
+ HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
+ ast_context()->ReturnInstruction(result, expr->id());
+ return true;
+ }
+ break;
+ case kStringFromCharCode:
+ if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ HValue* argument = Pop();
+ Drop(1); // Receiver.
+ HInstruction* result = NewUncasted<HStringCharFromCode>(argument);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
+ case kMathExp:
+ if (!FLAG_fast_math) break;
+ // Fall through if FLAG_fast_math.
case kMathRound:
case kMathFloor:
case kMathAbs:
@@ -7361,54 +6829,41 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
case kMathCos:
case kMathTan:
if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
HValue* argument = Pop();
- HValue* context = environment()->LookupContext();
Drop(1); // Receiver.
- HUnaryMathOperation* op =
- new(zone()) HUnaryMathOperation(context, argument, id);
- op->set_position(expr->position());
+ HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
ast_context()->ReturnInstruction(op, expr->id());
return true;
}
break;
case kMathPow:
if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
HValue* right = Pop();
HValue* left = Pop();
Pop(); // Pop receiver.
- HValue* context = environment()->LookupContext();
HInstruction* result = NULL;
// Use sqrt() if exponent is 0.5 or -0.5.
if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
double exponent = HConstant::cast(right)->DoubleValue();
if (exponent == 0.5) {
- result =
- new(zone()) HUnaryMathOperation(context, left, kMathPowHalf);
+ result = NewUncasted<HUnaryMathOperation>(left, kMathPowHalf);
} else if (exponent == -0.5) {
- HConstant* double_one =
- new(zone()) HConstant(Handle<Object>(Smi::FromInt(1)),
- Representation::Double());
- AddInstruction(double_one);
- HUnaryMathOperation* square_root =
- new(zone()) HUnaryMathOperation(context, left, kMathPowHalf);
- AddInstruction(square_root);
+ HValue* one = graph()->GetConstant1();
+ HInstruction* sqrt = AddUncasted<HUnaryMathOperation>(
+ left, kMathPowHalf);
// MathPowHalf doesn't have side effects so there's no need for
// an environment simulation here.
- ASSERT(!square_root->HasObservableSideEffects());
- result = new(zone()) HDiv(context, double_one, square_root);
+ ASSERT(!sqrt->HasObservableSideEffects());
+ result = NewUncasted<HDiv>(one, sqrt);
} else if (exponent == 2.0) {
- result = new(zone()) HMul(context, left, left);
+ result = NewUncasted<HMul>(left, left);
}
- } else if (right->IsConstant() &&
- HConstant::cast(right)->HasInteger32Value() &&
- HConstant::cast(right)->Integer32Value() == 2) {
- result = new(zone()) HMul(context, left, left);
}
if (result == NULL) {
- result = new(zone()) HPower(left, right);
+ result = NewUncasted<HPower>(left, right);
}
ast_context()->ReturnInstruction(result, expr->id());
return true;
@@ -7416,12 +6871,10 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
break;
case kMathRandom:
if (argument_count == 1 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
Drop(1); // Receiver.
- HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- AddInstruction(global_object);
- HRandom* result = new(zone()) HRandom(global_object);
+ HGlobalObject* global_object = Add<HGlobalObject>();
+ HRandom* result = New<HRandom>(global_object);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -7429,14 +6882,24 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
case kMathMax:
case kMathMin:
if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
HValue* right = Pop();
HValue* left = Pop();
Drop(1); // Receiver.
- HValue* context = environment()->LookupContext();
HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin
: HMathMinMax::kMathMax;
- HMathMinMax* result = new(zone()) HMathMinMax(context, left, right, op);
+ HInstruction* result = NewUncasted<HMathMinMax>(left, right, op);
+ ast_context()->ReturnInstruction(result, expr->id());
+ return true;
+ }
+ break;
+ case kMathImul:
+ if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ HValue* right = Pop();
+ HValue* left = Pop();
+ Drop(1); // Receiver.
+ HInstruction* result = HMul::NewImul(zone(), context(), left, right);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -7449,7 +6912,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
}
-bool HGraphBuilder::TryCallApply(Call* expr) {
+bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
Expression* callee = expr->expression();
Property* prop = callee->AsProperty();
ASSERT(prop != NULL);
@@ -7464,74 +6927,74 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
return false;
}
- if (info()->scope()->arguments() == NULL) return false;
+ if (current_info()->scope()->arguments() == NULL) return false;
ZoneList<Expression*>* args = expr->arguments();
if (args->length() != 2) return false;
VariableProxy* arg_two = args->at(1)->AsVariableProxy();
if (arg_two == NULL || !arg_two->var()->IsStackAllocated()) return false;
- HValue* arg_two_value = environment()->Lookup(arg_two->var());
+ HValue* arg_two_value = LookupAndMakeLive(arg_two->var());
if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
// Found pattern f.apply(receiver, arguments).
- VisitForValue(prop->obj());
- if (HasStackOverflow() || current_block() == NULL) return true;
+ CHECK_ALIVE_OR_RETURN(VisitForValue(prop->obj()), true);
HValue* function = Top();
- AddCheckConstantFunction(expr->holder(), function, function_map, true);
+ AddCheckConstantFunction(expr->holder(), function, function_map);
Drop(1);
- VisitForValue(args->at(0));
- if (HasStackOverflow() || current_block() == NULL) return true;
+ CHECK_ALIVE_OR_RETURN(VisitForValue(args->at(0)), true);
HValue* receiver = Pop();
if (function_state()->outer() == NULL) {
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
- HInstruction* length =
- AddInstruction(new(zone()) HArgumentsLength(elements));
- HValue* wrapped_receiver =
- AddInstruction(new(zone()) HWrapReceiver(receiver, function));
- HInstruction* result =
- new(zone()) HApplyArguments(function,
- wrapped_receiver,
- length,
- elements);
- result->set_position(expr->position());
+ HInstruction* elements = Add<HArgumentsElements>(false);
+ HInstruction* length = Add<HArgumentsLength>(elements);
+ HValue* wrapped_receiver = BuildWrapReceiver(receiver, function);
+ HInstruction* result = New<HApplyArguments>(function,
+ wrapped_receiver,
+ length,
+ elements);
ast_context()->ReturnInstruction(result, expr->id());
return true;
} else {
// We are inside inlined function and we know exactly what is inside
// arguments object. But we need to be able to materialize at deopt.
- // TODO(mstarzinger): For now we just ensure arguments are pushed
- // right after HEnterInlined, but we could be smarter about this.
- EnsureArgumentsArePushedForAccess();
- HValue* context = environment()->LookupContext();
-
- HValue* wrapped_receiver =
- AddInstruction(new(zone()) HWrapReceiver(receiver, function));
- PushAndAdd(new(zone()) HPushArgument(wrapped_receiver));
-
- HEnvironment* arguments_env = environment()->arguments_environment();
-
- int parameter_count = arguments_env->parameter_count();
- for (int i = 1; i < arguments_env->parameter_count(); i++) {
- PushAndAdd(new(zone()) HPushArgument(arguments_env->Lookup(i)));
- }
-
- HInvokeFunction* call = new(zone()) HInvokeFunction(
- context,
- function,
- parameter_count);
- Drop(parameter_count);
- call->set_position(expr->position());
+ ASSERT_EQ(environment()->arguments_environment()->parameter_count(),
+ function_state()->entry()->arguments_object()->arguments_count());
+ HArgumentsObject* args = function_state()->entry()->arguments_object();
+ const ZoneList<HValue*>* arguments_values = args->arguments_values();
+ int arguments_count = arguments_values->length();
+ Push(BuildWrapReceiver(receiver, function));
+ for (int i = 1; i < arguments_count; i++) {
+ Push(arguments_values->at(i));
+ }
+
+ Handle<JSFunction> known_function;
+ if (function->IsConstant()) {
+ HConstant* constant_function = HConstant::cast(function);
+ known_function = Handle<JSFunction>::cast(
+ constant_function->handle(isolate()));
+ int args_count = arguments_count - 1; // Excluding receiver.
+ if (TryInlineApply(known_function, expr, args_count)) return true;
+ }
+
+ Drop(arguments_count - 1);
+ Push(Add<HPushArgument>(Pop()));
+ for (int i = 1; i < arguments_count; i++) {
+ Push(Add<HPushArgument>(arguments_values->at(i)));
+ }
+
+ HInvokeFunction* call = New<HInvokeFunction>(function,
+ known_function,
+ arguments_count);
+ Drop(arguments_count);
ast_context()->ReturnInstruction(call, expr->id());
return true;
}
}
-void HGraphBuilder::VisitCall(Call* expr) {
+void HOptimizedGraphBuilder::VisitCall(Call* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -7554,35 +7017,31 @@ void HGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- HValue* context = environment()->LookupContext();
- call = new(zone()) HCallKeyed(context, key, argument_count);
- call->set_position(expr->position());
+ call = New<HCallKeyed>(key, argument_count);
Drop(argument_count + 1); // 1 is the key.
return ast_context()->ReturnInstruction(call, expr->id());
}
// Named function call.
- expr->RecordTypeFeedback(oracle(), CALL_AS_METHOD);
-
if (TryCallApply(expr)) return;
CHECK_ALIVE(VisitForValue(prop->obj()));
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
-
- SmallMapList* types = expr->GetReceiverTypes();
-
HValue* receiver =
environment()->ExpressionStackAt(expr->arguments()->length());
- if (expr->IsMonomorphic()) {
- Handle<Map> receiver_map = (types == NULL || types->is_empty())
- ? Handle<Map>::null()
- : types->first();
- if (TryInlineBuiltinMethodCall(expr,
- receiver,
- receiver_map,
- expr->check_type())) {
+
+ SmallMapList* types;
+ bool was_monomorphic = expr->IsMonomorphic();
+ bool monomorphic = ComputeReceiverTypes(expr, receiver, &types);
+ if (!was_monomorphic && monomorphic) {
+ monomorphic = expr->ComputeTarget(types->first(), name);
+ }
+
+ if (monomorphic) {
+ Handle<Map> map = types->first();
+ if (TryInlineBuiltinMethodCall(expr, receiver, map, expr->check_type())) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
expr->target()->ShortPrint();
@@ -7596,16 +7055,13 @@ void HGraphBuilder::VisitCall(Call* expr) {
// When the target has a custom call IC generator, use the IC,
// because it is likely to generate better code. Also use the IC
// when a primitive receiver check is required.
- HValue* context = environment()->LookupContext();
- call = PreProcessCall(
- new(zone()) HCallNamed(context, name, argument_count));
+ call = PreProcessCall(New<HCallNamed>(name, argument_count));
} else {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, map);
if (TryInlineCall(expr)) return;
call = PreProcessCall(
- new(zone()) HCallConstantFunction(expr->target(),
- argument_count));
+ New<HCallConstantFunction>(expr->target(), argument_count));
}
} else if (types != NULL && types->length() > 1) {
ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
@@ -7613,20 +7069,15 @@ void HGraphBuilder::VisitCall(Call* expr) {
return;
} else {
- HValue* context = environment()->LookupContext();
- call = PreProcessCall(
- new(zone()) HCallNamed(context, name, argument_count));
+ call = PreProcessCall(New<HCallNamed>(name, argument_count));
}
-
} else {
- expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
VariableProxy* proxy = expr->expression()->AsVariableProxy();
- bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
-
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
- return Bailout("possible direct call to eval");
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ return Bailout(kPossibleDirectCallToEval);
}
+ bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
if (global_call) {
Variable* var = proxy->var();
bool known_global_function = false;
@@ -7636,28 +7087,25 @@ void HGraphBuilder::VisitCall(Call* expr) {
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
if (type == kUseCell &&
- !info()->global_object()->IsAccessCheckNeeded()) {
- Handle<GlobalObject> global(info()->global_object());
+ !current_info()->global_object()->IsAccessCheckNeeded()) {
+ Handle<GlobalObject> global(current_info()->global_object());
known_global_function = expr->ComputeGlobalTarget(global, &lookup);
}
if (known_global_function) {
// Push the global object instead of the global receiver because
// code generated by the full code generator expects it.
- HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- PushAndAdd(global_object);
+ HGlobalObject* global_object = Add<HGlobalObject>();
+ Push(global_object);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Pop();
- AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
+ Add<HCheckValue>(function, expr->target());
// Replace the global object with the global receiver.
- HGlobalReceiver* global_receiver =
- new(zone()) HGlobalReceiver(global_object);
+ HGlobalReceiver* global_receiver = Add<HGlobalReceiver>(global_object);
// Index of the receiver from the top of the expression stack.
const int receiver_index = argument_count - 1;
- AddInstruction(global_receiver);
ASSERT(environment()->ExpressionStackAt(receiver_index)->
IsGlobalObject());
environment()->SetExpressionStackAt(receiver_index, global_receiver);
@@ -7672,20 +7120,24 @@ void HGraphBuilder::VisitCall(Call* expr) {
}
if (TryInlineCall(expr)) return;
- if (expr->target().is_identical_to(info()->closure())) {
+ if (expr->target().is_identical_to(current_info()->closure())) {
graph()->MarkRecursive();
}
- call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
- argument_count));
+ if (CallStubCompiler::HasCustomCallGenerator(expr->target())) {
+ // When the target has a custom call IC generator, use the IC,
+ // because it is likely to generate better code.
+ call = PreProcessCall(New<HCallNamed>(var->name(), argument_count));
+ } else {
+ call = PreProcessCall(New<HCallKnownGlobal>(
+ expr->target(), argument_count));
+ }
} else {
- HValue* context = environment()->LookupContext();
- HGlobalObject* receiver = new(zone()) HGlobalObject(context);
- AddInstruction(receiver);
- PushAndAdd(new(zone()) HPushArgument(receiver));
+ HGlobalObject* receiver = Add<HGlobalObject>();
+ Push(Add<HPushArgument>(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- call = new(zone()) HCallGlobal(context, var->name(), argument_count);
+ call = New<HCallGlobal>(var->name(), argument_count);
Drop(argument_count);
}
@@ -7694,13 +7146,11 @@ void HGraphBuilder::VisitCall(Call* expr) {
// evaluation of the arguments.
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
- HValue* context = environment()->LookupContext();
- HGlobalObject* global = new(zone()) HGlobalObject(context);
- AddInstruction(global);
- HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
- PushAndAdd(receiver);
+ HGlobalObject* global = Add<HGlobalObject>();
+ HGlobalReceiver* receiver = Add<HGlobalReceiver>(global);
+ Push(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
- AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
+ Add<HCheckValue>(function, expr->target());
if (TryInlineBuiltinFunctionCall(expr, true)) { // Drop the function.
if (FLAG_trace_inlining) {
@@ -7714,31 +7164,24 @@ void HGraphBuilder::VisitCall(Call* expr) {
if (TryInlineCall(expr, true)) { // Drop function from environment.
return;
} else {
- call = PreProcessCall(
- new(zone()) HInvokeFunction(context,
- function,
- expr->target(),
- argument_count));
+ call = PreProcessCall(New<HInvokeFunction>(function, expr->target(),
+ argument_count));
Drop(1); // The function.
}
} else {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
- HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- AddInstruction(global_object);
- HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
- AddInstruction(receiver);
- PushAndAdd(new(zone()) HPushArgument(receiver));
+ HGlobalObject* global_object = Add<HGlobalObject>();
+ HGlobalReceiver* receiver = Add<HGlobalReceiver>(global_object);
+ Push(Add<HPushArgument>(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- call = new(zone()) HCallFunction(context, function, argument_count);
+ call = New<HCallFunction>(function, argument_count);
Drop(argument_count + 1);
}
}
- call->set_position(expr->position());
return ast_context()->ReturnInstruction(call, expr->id());
}
@@ -7747,17 +7190,18 @@ void HGraphBuilder::VisitCall(Call* expr) {
static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
return constructor->has_initial_map() &&
constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
- constructor->initial_map()->instance_size() < HAllocateObject::kMaxSize;
+ constructor->initial_map()->instance_size() < HAllocate::kMaxInlineSize &&
+ constructor->initial_map()->InitialPropertiesLength() == 0;
}
-void HGraphBuilder::VisitCallNew(CallNew* expr) {
+void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- expr->RecordTypeFeedback(oracle());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
- HValue* context = environment()->LookupContext();
+ Factory* factory = isolate()->factory();
if (FLAG_inline_construct &&
expr->IsMonomorphic() &&
@@ -7768,8 +7212,7 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) {
HValue* function = Top();
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<JSFunction> constructor = expr->target();
- HValue* check = AddInstruction(
- new(zone()) HCheckFunction(function, constructor));
+ HValue* check = Add<HCheckValue>(function, constructor);
// Force completion of inobject slack tracking before generating
// allocation code to finalize instance size.
@@ -7777,37 +7220,96 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) {
constructor->shared()->CompleteInobjectSlackTracking();
}
- // Replace the constructor function with a newly allocated receiver.
- HInstruction* receiver = new(zone()) HAllocateObject(context, constructor);
- // Index of the receiver from the top of the expression stack.
+ // Calculate instance size from initial map of constructor.
+ ASSERT(constructor->has_initial_map());
+ Handle<Map> initial_map(constructor->initial_map());
+ int instance_size = initial_map->instance_size();
+ ASSERT(initial_map->InitialPropertiesLength() == 0);
+
+ // Allocate an instance of the implicit receiver object.
+ HValue* size_in_bytes = Add<HConstant>(instance_size);
+ PretenureFlag pretenure_flag =
+ (FLAG_pretenuring_call_new &&
+ isolate()->heap()->GetPretenureMode() == TENURED)
+ ? TENURED : NOT_TENURED;
+ HAllocate* receiver =
+ Add<HAllocate>(size_in_bytes, HType::JSObject(), pretenure_flag,
+ JS_OBJECT_TYPE);
+ receiver->set_known_initial_map(initial_map);
+
+ // Load the initial map from the constructor.
+ HValue* constructor_value = Add<HConstant>(constructor);
+ HValue* initial_map_value =
+ Add<HLoadNamedField>(constructor_value, HObjectAccess::ForJSObjectOffset(
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Initialize map and fields of the newly allocated object.
+ { NoObservableSideEffectsScope no_effects(this);
+ ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+ Add<HStoreNamedField>(receiver,
+ HObjectAccess::ForJSObjectOffset(JSObject::kMapOffset),
+ initial_map_value);
+ HValue* empty_fixed_array = Add<HConstant>(factory->empty_fixed_array());
+ Add<HStoreNamedField>(receiver,
+ HObjectAccess::ForJSObjectOffset(JSObject::kPropertiesOffset),
+ empty_fixed_array);
+ Add<HStoreNamedField>(receiver,
+ HObjectAccess::ForJSObjectOffset(JSObject::kElementsOffset),
+ empty_fixed_array);
+ if (initial_map->inobject_properties() != 0) {
+ HConstant* undefined = graph()->GetConstantUndefined();
+ for (int i = 0; i < initial_map->inobject_properties(); i++) {
+ int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+ Add<HStoreNamedField>(receiver,
+ HObjectAccess::ForJSObjectOffset(property_offset),
+ undefined);
+ }
+ }
+ }
+
+ // Replace the constructor function with a newly allocated receiver using
+ // the index of the receiver from the top of the expression stack.
const int receiver_index = argument_count - 1;
- AddInstruction(receiver);
ASSERT(environment()->ExpressionStackAt(receiver_index) == function);
environment()->SetExpressionStackAt(receiver_index, receiver);
if (TryInlineConstruct(expr, receiver)) return;
- // TODO(mstarzinger): For now we remove the previous HAllocateObject and
- // add HPushArgument for the arguments in case inlining failed. What we
- // actually should do is emit HInvokeFunction on the constructor instead
- // of using HCallNew as a fallback.
+ // TODO(mstarzinger): For now we remove the previous HAllocate and all
+ // corresponding instructions and instead add HPushArgument for the
+ // arguments in case inlining failed. What we actually should do is for
+ // inlining to try to build a subgraph without mutating the parent graph.
+ HInstruction* instr = current_block()->last();
+ while (instr != initial_map_value) {
+ HInstruction* prev_instr = instr->previous();
+ instr->DeleteAndReplaceWith(NULL);
+ instr = prev_instr;
+ }
+ initial_map_value->DeleteAndReplaceWith(NULL);
receiver->DeleteAndReplaceWith(NULL);
check->DeleteAndReplaceWith(NULL);
environment()->SetExpressionStackAt(receiver_index, function);
- HInstruction* call = PreProcessCall(
- new(zone()) HCallNew(context, function, argument_count));
- call->set_position(expr->position());
+ HInstruction* call =
+ PreProcessCall(New<HCallNew>(function, argument_count));
return ast_context()->ReturnInstruction(call, expr->id());
} else {
// The constructor function is both an operand to the instruction and an
// argument to the construct call.
+ Handle<JSFunction> array_function(
+ isolate()->global_context()->array_function(), isolate());
CHECK_ALIVE(VisitArgument(expr->expression()));
HValue* constructor = HPushArgument::cast(Top())->argument();
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- HInstruction* call =
- new(zone()) HCallNew(context, constructor, argument_count);
+ HBinaryCall* call;
+ if (expr->target().is_identical_to(array_function)) {
+ Handle<Cell> cell = expr->allocation_info_cell();
+ Add<HCheckValue>(constructor, array_function);
+ call = New<HCallNewArray>(constructor, argument_count,
+ cell, expr->elements_kind());
+ } else {
+ call = New<HCallNew>(constructor, argument_count);
+ }
Drop(argument_count);
- call->set_position(expr->position());
return ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -7815,25 +7317,26 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) {
// Support for generating inlined runtime functions.
-// Lookup table for generators for runtime calls that are generated inline.
-// Elements of the table are member pointers to functions of HGraphBuilder.
+// Lookup table for generators for runtime calls that are generated inline.
+// Elements of the table are member pointers to functions of
+// HOptimizedGraphBuilder.
#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
- &HGraphBuilder::Generate##Name,
+ &HOptimizedGraphBuilder::Generate##Name,
-const HGraphBuilder::InlineFunctionGenerator
- HGraphBuilder::kInlineFunctionGenerators[] = {
+const HOptimizedGraphBuilder::InlineFunctionGenerator
+ HOptimizedGraphBuilder::kInlineFunctionGenerators[] = {
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
};
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (expr->is_jsruntime()) {
- return Bailout("call to a JavaScript runtime function");
+ return Bailout(kCallToAJavaScriptRuntimeFunction);
}
const Runtime::Function* function = expr->function();
@@ -7855,18 +7358,17 @@ void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ASSERT(function->intrinsic_type == Runtime::RUNTIME);
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- HValue* context = environment()->LookupContext();
Handle<String> name = expr->name();
int argument_count = expr->arguments()->length();
- HCallRuntime* call =
- new(zone()) HCallRuntime(context, name, function, argument_count);
+ HCallRuntime* call = New<HCallRuntime>(name, function,
+ argument_count);
Drop(argument_count);
return ast_context()->ReturnInstruction(call, expr->id());
}
}
-void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -7874,15 +7376,13 @@ void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
case Token::DELETE: return VisitDelete(expr);
case Token::VOID: return VisitVoid(expr);
case Token::TYPEOF: return VisitTypeof(expr);
- case Token::ADD: return VisitAdd(expr);
- case Token::SUB: return VisitSub(expr);
- case Token::BIT_NOT: return VisitBitNot(expr);
case Token::NOT: return VisitNot(expr);
default: UNREACHABLE();
}
}
-void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
+
+void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
Property* prop = expr->expression()->AsProperty();
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (prop != NULL) {
@@ -7890,13 +7390,18 @@ void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(prop->key()));
HValue* key = Pop();
HValue* obj = Pop();
- HValue* context = environment()->LookupContext();
- HDeleteProperty* instr = new(zone()) HDeleteProperty(context, obj, key);
+ HValue* function = AddLoadJSBuiltin(Builtins::DELETE);
+ Add<HPushArgument>(obj);
+ Add<HPushArgument>(key);
+ Add<HPushArgument>(Add<HConstant>(function_strict_mode_flag()));
+ // TODO(olivf) InvokeFunction produces a check for the parameter count,
+ // even though we are certain to pass the correct number of arguments here.
+ HInstruction* instr = New<HInvokeFunction>(function, 3);
return ast_context()->ReturnInstruction(instr, expr->id());
} else if (proxy != NULL) {
Variable* var = proxy->var();
if (var->IsUnallocated()) {
- Bailout("delete with global variable");
+ Bailout(kDeleteWithGlobalVariable);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global variables is false. 'this' is not
// really a variable, though we implement it as one. The
@@ -7906,7 +7411,7 @@ void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
: graph()->GetConstantFalse();
return ast_context()->ReturnValue(value);
} else {
- Bailout("delete with non-global variable");
+ Bailout(kDeleteWithNonGlobalVariable);
}
} else {
// Result of deleting non-property, non-variable reference is true.
@@ -7917,64 +7422,21 @@ void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
}
-void HGraphBuilder::VisitVoid(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) {
CHECK_ALIVE(VisitForEffect(expr->expression()));
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
-void HGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
CHECK_ALIVE(VisitForTypeOf(expr->expression()));
HValue* value = Pop();
- HValue* context = environment()->LookupContext();
- HInstruction* instr = new(zone()) HTypeof(context, value);
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HGraphBuilder::VisitAdd(UnaryOperation* expr) {
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* value = Pop();
- HValue* context = environment()->LookupContext();
- HInstruction* instr =
- new(zone()) HMul(context, value, graph_->GetConstant1());
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HGraphBuilder::VisitSub(UnaryOperation* expr) {
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* value = Pop();
- HValue* context = environment()->LookupContext();
- HInstruction* instr =
- new(zone()) HMul(context, value, graph_->GetConstantMinus1());
- TypeInfo info = oracle()->UnaryType(expr);
- if (info.IsUninitialized()) {
- AddInstruction(new(zone()) HSoftDeoptimize);
- current_block()->MarkAsDeoptimizing();
- info = TypeInfo::Unknown();
- }
- Representation rep = ToRepresentation(info);
- TraceRepresentation(expr->op(), info, instr, rep);
- instr->AssumeRepresentation(rep);
+ HInstruction* instr = New<HTypeof>(value);
return ast_context()->ReturnInstruction(instr, expr->id());
}
-void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* value = Pop();
- TypeInfo info = oracle()->UnaryType(expr);
- if (info.IsUninitialized()) {
- AddInstruction(new(zone()) HSoftDeoptimize);
- current_block()->MarkAsDeoptimizing();
- }
- HInstruction* instr = new(zone()) HBitNot(value);
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HGraphBuilder::VisitNot(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) {
if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
VisitForControl(expr->expression(),
@@ -8018,13 +7480,14 @@ void HGraphBuilder::VisitNot(UnaryOperation* expr) {
}
-HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
- CountOperation* expr) {
+HInstruction* HOptimizedGraphBuilder::BuildIncrement(
+ bool returns_original_input,
+ CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
- TypeInfo info = oracle()->IncrementType(expr);
- Representation rep = ToRepresentation(info);
- if (rep.IsTagged()) {
- rep = Representation::Integer32();
+ Handle<Type> info = expr->type();
+ Representation rep = Representation::FromType(info);
+ if (rep.IsNone() || rep.IsTagged()) {
+ rep = Representation::Smi();
}
if (returns_original_input) {
@@ -8032,8 +7495,11 @@ HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
// actual HChange instruction we need is (sometimes) added in a later
// phase, so it is not available now to be used as an input to HAdd and
// as the return value.
- HInstruction* number_input = new(zone()) HForceRepresentation(Pop(), rep);
- AddInstruction(number_input);
+ HInstruction* number_input = Add<HForceRepresentation>(Pop(), rep);
+ if (!rep.IsDouble()) {
+ number_input->SetFlag(HInstruction::kFlexibleRepresentation);
+ number_input->SetFlag(HInstruction::kCannotBeTagged);
+ }
Push(number_input);
}
@@ -8041,26 +7507,45 @@ HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
// to simulate the expression stack after this instruction.
// Any later failures deopt to the load of the input or earlier.
HConstant* delta = (expr->op() == Token::INC)
- ? graph_->GetConstant1()
- : graph_->GetConstantMinus1();
- HValue* context = environment()->LookupContext();
- HInstruction* instr = new(zone()) HAdd(context, Top(), delta);
- TraceRepresentation(expr->op(), info, instr, rep);
- instr->AssumeRepresentation(rep);
- AddInstruction(instr);
+ ? graph()->GetConstant1()
+ : graph()->GetConstantMinus1();
+ HInstruction* instr = AddUncasted<HAdd>(Top(), delta);
+ if (instr->IsAdd()) {
+ HAdd* add = HAdd::cast(instr);
+ add->set_observed_input_representation(1, rep);
+ add->set_observed_input_representation(2, Representation::Smi());
+ }
+ instr->SetFlag(HInstruction::kCannotBeTagged);
+ instr->ClearAllSideEffects();
return instr;
}
-void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
+void HOptimizedGraphBuilder::BuildStoreForEffect(Expression* expr,
+ Property* prop,
+ BailoutId ast_id,
+ BailoutId return_id,
+ HValue* object,
+ HValue* key,
+ HValue* value) {
+ EffectContext for_effect(this);
+ Push(object);
+ if (key != NULL) Push(key);
+ Push(value);
+ BuildStore(expr, prop, ast_id, return_id);
+}
+
+
+void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
Expression* target = expr->expression();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
if (proxy == NULL && prop == NULL) {
- return Bailout("invalid lhs in count operation");
+ return Bailout(kInvalidLhsInCountOperation);
}
// Match the full code generator stack by simulating an extra stack
@@ -8074,7 +7559,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
if (proxy != NULL) {
Variable* var = proxy->var();
if (var->mode() == CONST) {
- return Bailout("unsupported count operation with const");
+ return Bailout(kUnsupportedCountOperationWithConst);
}
// Argument of the count operation is a variable, not a property.
ASSERT(prop == NULL);
@@ -8088,27 +7573,26 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
case Variable::UNALLOCATED:
HandleGlobalVariableAssignment(var,
after,
- expr->position(),
expr->AssignmentId());
break;
case Variable::PARAMETER:
case Variable::LOCAL:
- Bind(var, after);
+ BindIfLive(var, after);
break;
case Variable::CONTEXT: {
// Bail out if we try to mutate a parameter value in a function
// using the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
- if (info()->scope()->arguments() != NULL) {
+ if (current_info()->scope()->arguments() != NULL) {
// Parameters will rewrite to context slots. We have no direct
// way to detect that the variable is a parameter so we use a
// linear search of the parameter list.
- int count = info()->scope()->num_parameters();
+ int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
- if (var == info()->scope()->parameter(i)) {
- return Bailout("assignment to parameter in arguments object");
+ if (var == current_info()->scope()->parameter(i)) {
+ return Bailout(kAssignmentToParameterInArgumentsObject);
}
}
}
@@ -8116,218 +7600,383 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
HValue* context = BuildContextChainWalk(var);
HStoreContextSlot::Mode mode = IsLexicalVariableMode(var->mode())
? HStoreContextSlot::kCheckDeoptimize : HStoreContextSlot::kNoCheck;
- HStoreContextSlot* instr =
- new(zone()) HStoreContextSlot(context, var->index(), mode, after);
- AddInstruction(instr);
+ HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
+ mode, after);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId());
+ Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
break;
}
case Variable::LOOKUP:
- return Bailout("lookup variable in count operation");
+ return Bailout(kLookupVariableInCountOperation);
}
- } else {
- // Argument of the count operation is a property.
- ASSERT(prop != NULL);
- prop->RecordTypeFeedback(oracle(), zone());
-
- if (prop->key()->IsPropertyName()) {
- // Named property.
- if (returns_original_input) Push(graph_->GetConstantUndefined());
-
- CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* object = Top();
-
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- Handle<Map> map;
- HInstruction* load;
- bool monomorphic = prop->IsMonomorphic();
- if (monomorphic) {
- map = prop->GetReceiverTypes()->first();
- if (map->is_dictionary_map()) monomorphic = false;
- }
- if (monomorphic) {
- Handle<JSFunction> getter;
- Handle<JSObject> holder;
- if (LookupGetter(map, name, &getter, &holder)) {
- load = BuildCallGetter(object, map, getter, holder);
- } else {
- load = BuildLoadNamedMonomorphic(object, name, prop, map);
- }
- } else {
- load = BuildLoadNamedGeneric(object, name, prop);
- }
- PushAndAdd(load);
- if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId());
+ Drop(returns_original_input ? 2 : 1);
+ return ast_context()->ReturnValue(expr->is_postfix() ? input : after);
+ }
- after = BuildIncrement(returns_original_input, expr);
- input = Pop();
+ // Argument of the count operation is a property.
+ ASSERT(prop != NULL);
+ if (returns_original_input) Push(graph()->GetConstantUndefined());
- HInstruction* store;
- if (!monomorphic) {
- // If we don't know the monomorphic type, do a generic store.
- CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, after));
- } else {
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- if (LookupSetter(map, name, &setter, &holder)) {
- store = BuildCallSetter(object, after, map, setter, holder);
- } else {
- CHECK_ALIVE(store = BuildStoreNamedMonomorphic(object,
- name,
- after,
- map));
- }
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ HValue* object = Top();
+
+ HValue* key = NULL;
+ if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
+ prop->IsStringAccess()) {
+ CHECK_ALIVE(VisitForValue(prop->key()));
+ key = Top();
+ }
+
+ CHECK_ALIVE(PushLoad(prop, object, key));
+
+ after = BuildIncrement(returns_original_input, expr);
+
+ if (returns_original_input) {
+ input = Pop();
+ // Drop object and key to push it again in the effect context below.
+ Drop(key == NULL ? 1 : 2);
+ environment()->SetExpressionStackAt(0, input);
+ CHECK_ALIVE(BuildStoreForEffect(
+ expr, prop, expr->id(), expr->AssignmentId(), object, key, after));
+ return ast_context()->ReturnValue(Pop());
+ }
+
+ environment()->SetExpressionStackAt(0, after);
+ return BuildStore(expr, prop, expr->id(), expr->AssignmentId());
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
+ HValue* string,
+ HValue* index) {
+ if (string->IsConstant() && index->IsConstant()) {
+ HConstant* c_string = HConstant::cast(string);
+ HConstant* c_index = HConstant::cast(index);
+ if (c_string->HasStringValue() && c_index->HasNumberValue()) {
+ int32_t i = c_index->NumberValueAsInteger32();
+ Handle<String> s = c_string->StringValue();
+ if (i < 0 || i >= s->length()) {
+ return New<HConstant>(OS::nan_value());
}
- AddInstruction(store);
+ return New<HConstant>(s->Get(i));
+ }
+ }
+ BuildCheckHeapObject(string);
+ HValue* checkstring =
+ Add<HCheckInstanceType>(string, HCheckInstanceType::IS_STRING);
+ HInstruction* length = BuildLoadStringLength(string, checkstring);
+ AddInstruction(length);
+ HInstruction* checked_index = Add<HBoundsCheck>(index, length);
+ return New<HStringCharCodeAt>(string, checked_index);
+}
+
+
+// Checks if the given shift amounts have following forms:
+// (N1) and (N2) with N1 + N2 = 32; (sa) and (32 - sa).
+static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
+ HValue* const32_minus_sa) {
+ if (sa->IsConstant() && const32_minus_sa->IsConstant()) {
+ const HConstant* c1 = HConstant::cast(sa);
+ const HConstant* c2 = HConstant::cast(const32_minus_sa);
+ return c1->HasInteger32Value() && c2->HasInteger32Value() &&
+ (c1->Integer32Value() + c2->Integer32Value() == 32);
+ }
+ if (!const32_minus_sa->IsSub()) return false;
+ HSub* sub = HSub::cast(const32_minus_sa);
+ if (sa != sub->right()) return false;
+ HValue* const32 = sub->left();
+ if (!const32->IsConstant() ||
+ HConstant::cast(const32)->Integer32Value() != 32) {
+ return false;
+ }
+ return (sub->right() == sa);
+}
+
+
+// Checks if the left and the right are shift instructions with the oposite
+// directions that can be replaced by one rotate right instruction or not.
+// Returns the operand and the shift amount for the rotate instruction in the
+// former case.
+bool HGraphBuilder::MatchRotateRight(HValue* left,
+ HValue* right,
+ HValue** operand,
+ HValue** shift_amount) {
+ HShl* shl;
+ HShr* shr;
+ if (left->IsShl() && right->IsShr()) {
+ shl = HShl::cast(left);
+ shr = HShr::cast(right);
+ } else if (left->IsShr() && right->IsShl()) {
+ shl = HShl::cast(right);
+ shr = HShr::cast(left);
+ } else {
+ return false;
+ }
+ if (shl->left() != shr->left()) return false;
- // Overwrite the receiver in the bailout environment with the result
- // of the operation, and the placeholder with the original value if
- // necessary.
- environment()->SetExpressionStackAt(0, after);
- if (returns_original_input) environment()->SetExpressionStackAt(1, input);
- if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+ if (!ShiftAmountsAllowReplaceByRotate(shl->right(), shr->right()) &&
+ !ShiftAmountsAllowReplaceByRotate(shr->right(), shl->right())) {
+ return false;
+ }
+ *operand= shr->left();
+ *shift_amount = shr->right();
+ return true;
+}
- } else {
- // Keyed property.
- if (returns_original_input) Push(graph_->GetConstantUndefined());
- CHECK_ALIVE(VisitForValue(prop->obj()));
- CHECK_ALIVE(VisitForValue(prop->key()));
- HValue* obj = environment()->ExpressionStackAt(1);
- HValue* key = environment()->ExpressionStackAt(0);
+bool CanBeZero(HValue* right) {
+ if (right->IsConstant()) {
+ HConstant* right_const = HConstant::cast(right);
+ if (right_const->HasInteger32Value() &&
+ (right_const->Integer32Value() & 0x1f) != 0) {
+ return false;
+ }
+ }
+ return true;
+}
- bool has_side_effects = false;
- HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, prop, prop->LoadId(), RelocInfo::kNoPosition,
- false, // is_store
- &has_side_effects);
- Push(load);
- if (has_side_effects) AddSimulate(prop->LoadId());
- after = BuildIncrement(returns_original_input, expr);
- input = Pop();
+HValue* HGraphBuilder::EnforceNumberType(HValue* number,
+ Handle<Type> expected) {
+ if (expected->Is(Type::Smi())) {
+ return Add<HForceRepresentation>(number, Representation::Smi());
+ }
+ if (expected->Is(Type::Signed32())) {
+ return Add<HForceRepresentation>(number, Representation::Integer32());
+ }
+ return number;
+}
- expr->RecordTypeFeedback(oracle(), zone());
- HandleKeyedElementAccess(obj, key, after, expr, expr->AssignmentId(),
- RelocInfo::kNoPosition,
- true, // is_store
- &has_side_effects);
- // Drop the key from the bailout environment. Overwrite the receiver
- // with the result of the operation, and the placeholder with the
- // original value if necessary.
- Drop(1);
- environment()->SetExpressionStackAt(0, after);
- if (returns_original_input) environment()->SetExpressionStackAt(1, input);
- ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId());
+HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) {
+ if (value->IsConstant()) {
+ HConstant* constant = HConstant::cast(value);
+ Maybe<HConstant*> number = constant->CopyToTruncatedNumber(zone());
+ if (number.has_value) {
+ *expected = handle(Type::Number(), isolate());
+ return AddInstruction(number.value);
}
}
- Drop(returns_original_input ? 2 : 1);
- return ast_context()->ReturnValue(expr->is_postfix() ? input : after);
+ // We put temporary values on the stack, which don't correspond to anything
+ // in baseline code. Since nothing is observable we avoid recording those
+ // pushes with a NoObservableSideEffectsScope.
+ NoObservableSideEffectsScope no_effects(this);
+
+ Handle<Type> expected_type = *expected;
+
+ // Separate the number type from the rest.
+ Handle<Type> expected_obj = handle(Type::Intersect(
+ expected_type, handle(Type::NonNumber(), isolate())), isolate());
+ Handle<Type> expected_number = handle(Type::Intersect(
+ expected_type, handle(Type::Number(), isolate())), isolate());
+
+ // We expect to get a number.
+ // (We need to check first, since Type::None->Is(Type::Any()) == true.
+ if (expected_obj->Is(Type::None())) {
+ ASSERT(!expected_number->Is(Type::None()));
+ return value;
+ }
+
+ if (expected_obj->Is(Type::Undefined())) {
+ // This is already done by HChange.
+ *expected = handle(Type::Union(
+ expected_number, handle(Type::Double(), isolate())), isolate());
+ return value;
+ }
+
+ return value;
}
-HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* context,
- HValue* string,
- HValue* index) {
- AddInstruction(new(zone()) HCheckNonSmi(string));
- AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
- HStringLength* length = new(zone()) HStringLength(string);
- AddInstruction(length);
- HInstruction* checked_index =
- AddInstruction(new(zone()) HBoundsCheck(index, length));
- return new(zone()) HStringCharCodeAt(context, string, checked_index);
+HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
+ BinaryOperation* expr,
+ HValue* left,
+ HValue* right) {
+ Handle<Type> left_type = expr->left()->bounds().lower;
+ Handle<Type> right_type = expr->right()->bounds().lower;
+ Handle<Type> result_type = expr->bounds().lower;
+ Maybe<int> fixed_right_arg = expr->fixed_right_arg();
+
+ return HGraphBuilder::BuildBinaryOperation(expr->op(), left, right,
+ left_type, right_type, result_type, fixed_right_arg);
}
-HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
- HValue* left,
- HValue* right) {
- HValue* context = environment()->LookupContext();
- TypeInfo info = oracle()->BinaryType(expr);
- if (info.IsUninitialized()) {
- AddInstruction(new(zone()) HSoftDeoptimize);
- current_block()->MarkAsDeoptimizing();
- info = TypeInfo::Unknown();
+HInstruction* HGraphBuilder::BuildBinaryOperation(
+ Token::Value op,
+ HValue* left,
+ HValue* right,
+ Handle<Type> left_type,
+ Handle<Type> right_type,
+ Handle<Type> result_type,
+ Maybe<int> fixed_right_arg,
+ bool binop_stub) {
+
+ Representation left_rep = Representation::FromType(left_type);
+ Representation right_rep = Representation::FromType(right_type);
+
+ bool maybe_string_add = op == Token::ADD &&
+ (left_type->Maybe(Type::String()) ||
+ right_type->Maybe(Type::String()));
+
+ if (left_type->Is(Type::None())) {
+ Add<HDeoptimize>("Insufficient type feedback for LHS of binary operation",
+ Deoptimizer::SOFT);
+ // TODO(rossberg): we should be able to get rid of non-continuous
+ // defaults.
+ left_type = handle(Type::Any(), isolate());
+ } else {
+ if (!maybe_string_add) left = TruncateToNumber(left, &left_type);
+ left_rep = Representation::FromType(left_type);
+ }
+
+ if (right_type->Is(Type::None())) {
+ Add<HDeoptimize>("Insufficient type feedback for RHS of binary operation",
+ Deoptimizer::SOFT);
+ right_type = handle(Type::Any(), isolate());
+ } else {
+ if (!maybe_string_add) right = TruncateToNumber(right, &right_type);
+ right_rep = Representation::FromType(right_type);
+ }
+
+ // Special case for string addition here.
+ if (op == Token::ADD &&
+ (left_type->Is(Type::String()) || right_type->Is(Type::String()))) {
+ // Validate type feedback for left argument.
+ if (left_type->Is(Type::String())) {
+ IfBuilder if_isstring(this);
+ if_isstring.If<HIsStringAndBranch>(left);
+ if_isstring.Then();
+ if_isstring.ElseDeopt("Expected string for LHS of binary operation");
+ }
+
+ // Validate type feedback for right argument.
+ if (right_type->Is(Type::String())) {
+ IfBuilder if_isstring(this);
+ if_isstring.If<HIsStringAndBranch>(right);
+ if_isstring.Then();
+ if_isstring.ElseDeopt("Expected string for RHS of binary operation");
+ }
+
+ // Convert left argument as necessary.
+ if (left_type->Is(Type::Number())) {
+ ASSERT(right_type->Is(Type::String()));
+ left = BuildNumberToString(left, left_type);
+ } else if (!left_type->Is(Type::String())) {
+ ASSERT(right_type->Is(Type::String()));
+ HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_RIGHT);
+ Add<HPushArgument>(left);
+ Add<HPushArgument>(right);
+ return NewUncasted<HInvokeFunction>(function, 2);
+ }
+
+ // Convert right argument as necessary.
+ if (right_type->Is(Type::Number())) {
+ ASSERT(left_type->Is(Type::String()));
+ right = BuildNumberToString(right, right_type);
+ } else if (!right_type->Is(Type::String())) {
+ ASSERT(left_type->Is(Type::String()));
+ HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_LEFT);
+ Add<HPushArgument>(left);
+ Add<HPushArgument>(right);
+ return NewUncasted<HInvokeFunction>(function, 2);
+ }
+
+ return NewUncasted<HStringAdd>(left, right, STRING_ADD_CHECK_NONE);
}
+
+ if (binop_stub) {
+ left = EnforceNumberType(left, left_type);
+ right = EnforceNumberType(right, right_type);
+ }
+
+ Representation result_rep = Representation::FromType(result_type);
+
+ bool is_non_primitive = (left_rep.IsTagged() && !left_rep.IsSmi()) ||
+ (right_rep.IsTagged() && !right_rep.IsSmi());
+
HInstruction* instr = NULL;
- switch (expr->op()) {
- case Token::ADD:
- if (info.IsString()) {
- AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
- AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
- instr = new(zone()) HStringAdd(context, left, right);
- } else {
- instr = HAdd::NewHAdd(zone(), context, left, right);
+ // Only the stub is allowed to call into the runtime, since otherwise we would
+ // inline several instructions (including the two pushes) for every tagged
+ // operation in optimized code, which is more expensive, than a stub call.
+ if (binop_stub && is_non_primitive) {
+ HValue* function = AddLoadJSBuiltin(BinaryOpIC::TokenToJSBuiltin(op));
+ Add<HPushArgument>(left);
+ Add<HPushArgument>(right);
+ instr = NewUncasted<HInvokeFunction>(function, 2);
+ } else {
+ switch (op) {
+ case Token::ADD:
+ instr = NewUncasted<HAdd>(left, right);
+ break;
+ case Token::SUB:
+ instr = NewUncasted<HSub>(left, right);
+ break;
+ case Token::MUL:
+ instr = NewUncasted<HMul>(left, right);
+ break;
+ case Token::MOD:
+ instr = NewUncasted<HMod>(left, right, fixed_right_arg);
+ break;
+ case Token::DIV:
+ instr = NewUncasted<HDiv>(left, right);
+ break;
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ instr = NewUncasted<HBitwise>(op, left, right);
+ break;
+ case Token::BIT_OR: {
+ HValue* operand, *shift_amount;
+ if (left_type->Is(Type::Signed32()) &&
+ right_type->Is(Type::Signed32()) &&
+ MatchRotateRight(left, right, &operand, &shift_amount)) {
+ instr = NewUncasted<HRor>(operand, shift_amount);
+ } else {
+ instr = NewUncasted<HBitwise>(op, left, right);
+ }
+ break;
}
- break;
- case Token::SUB:
- instr = HSub::NewHSub(zone(), context, left, right);
- break;
- case Token::MUL:
- instr = HMul::NewHMul(zone(), context, left, right);
- break;
- case Token::MOD:
- instr = HMod::NewHMod(zone(), context, left, right);
- break;
- case Token::DIV:
- instr = HDiv::NewHDiv(zone(), context, left, right);
- break;
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::BIT_OR:
- instr = HBitwise::NewHBitwise(zone(), expr->op(), context, left, right);
- break;
- case Token::SAR:
- instr = HSar::NewHSar(zone(), context, left, right);
- break;
- case Token::SHR:
- instr = HShr::NewHShr(zone(), context, left, right);
- if (FLAG_opt_safe_uint32_operations && instr->IsShr()) {
- bool can_be_shift_by_zero = true;
- if (right->IsConstant()) {
- HConstant* right_const = HConstant::cast(right);
- if (right_const->HasInteger32Value() &&
- (right_const->Integer32Value() & 0x1f) != 0) {
- can_be_shift_by_zero = false;
- }
+ case Token::SAR:
+ instr = NewUncasted<HSar>(left, right);
+ break;
+ case Token::SHR:
+ instr = NewUncasted<HShr>(left, right);
+ if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
+ CanBeZero(right)) {
+ graph()->RecordUint32Instruction(instr);
}
+ break;
+ case Token::SHL:
+ instr = NewUncasted<HShl>(left, right);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
- if (can_be_shift_by_zero) graph()->RecordUint32Instruction(instr);
+ if (instr->IsBinaryOperation()) {
+ HBinaryOperation* binop = HBinaryOperation::cast(instr);
+ binop->set_observed_input_representation(1, left_rep);
+ binop->set_observed_input_representation(2, right_rep);
+ binop->initialize_output_representation(result_rep);
+ if (binop_stub) {
+ // Stub should not call into stub.
+ instr->SetFlag(HValue::kCannotBeTagged);
+ // And should truncate on HForceRepresentation already.
+ if (left->IsForceRepresentation()) {
+ left->CopyFlag(HValue::kTruncatingToSmi, instr);
+ left->CopyFlag(HValue::kTruncatingToInt32, instr);
}
- break;
- case Token::SHL:
- instr = HShl::NewHShl(zone(), context, left, right);
- break;
- default:
- UNREACHABLE();
- }
-
- // If we hit an uninitialized binary op stub we will get type info
- // for a smi operation. If one of the operands is a constant string
- // do not generate code assuming it is a smi operation.
- if (info.IsSmi() &&
- ((left->IsConstant() && HConstant::cast(left)->handle()->IsString()) ||
- (right->IsConstant() && HConstant::cast(right)->handle()->IsString()))) {
- return instr;
- }
- Representation rep = ToRepresentation(info);
- // We only generate either int32 or generic tagged bitwise operations.
- if (instr->IsBitwiseBinaryOperation()) {
- HBitwiseBinaryOperation::cast(instr)->
- InitializeObservedInputRepresentation(rep);
- if (rep.IsDouble()) rep = Representation::Integer32();
- }
- TraceRepresentation(expr->op(), info, instr, rep);
- instr->AssumeRepresentation(rep);
+ if (right->IsForceRepresentation()) {
+ right->CopyFlag(HValue::kTruncatingToSmi, instr);
+ right->CopyFlag(HValue::kTruncatingToInt32, instr);
+ }
+ }
+ }
return instr;
}
@@ -8339,14 +7988,16 @@ static bool IsClassOfTest(CompareOperation* expr) {
if (call == NULL) return false;
Literal* literal = expr->right()->AsLiteral();
if (literal == NULL) return false;
- if (!literal->handle()->IsString()) return false;
- if (!call->name()->IsEqualTo(CStrVector("_ClassOf"))) return false;
+ if (!literal->value()->IsString()) return false;
+ if (!call->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_ClassOf"))) {
+ return false;
+ }
ASSERT(call->arguments()->length() == 1);
return true;
}
-void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -8362,7 +8013,7 @@ void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
}
-void HGraphBuilder::VisitComma(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitComma(BinaryOperation* expr) {
CHECK_ALIVE(VisitForEffect(expr->left()));
// Visit the right subexpression in the same AST context as the entire
// expression.
@@ -8370,7 +8021,7 @@ void HGraphBuilder::VisitComma(BinaryOperation* expr) {
}
-void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
bool is_logical_and = expr->op() == Token::AND;
if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
@@ -8397,16 +8048,26 @@ void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
} else if (ast_context()->IsValue()) {
CHECK_ALIVE(VisitForValue(expr->left()));
ASSERT(current_block() != NULL);
+ HValue* left_value = Top();
+
+ if (left_value->IsConstant()) {
+ HConstant* left_constant = HConstant::cast(left_value);
+ if ((is_logical_and && left_constant->BooleanValue()) ||
+ (!is_logical_and && !left_constant->BooleanValue())) {
+ Drop(1); // left_value.
+ CHECK_ALIVE(VisitForValue(expr->right()));
+ }
+ return ast_context()->ReturnValue(Pop());
+ }
// We need an extra block to maintain edge-split form.
HBasicBlock* empty_block = graph()->CreateBasicBlock();
HBasicBlock* eval_right = graph()->CreateBasicBlock();
- TypeFeedbackId test_id = expr->left()->test_id();
- ToBooleanStub::Types expected(oracle()->ToBooleanTypes(test_id));
+ ToBooleanStub::Types expected(expr->left()->to_boolean_types());
HBranch* test = is_logical_and
- ? new(zone()) HBranch(Top(), eval_right, empty_block, expected)
- : new(zone()) HBranch(Top(), empty_block, eval_right, expected);
- current_block()->Finish(test);
+ ? New<HBranch>(left_value, expected, eval_right, empty_block)
+ : New<HBranch>(left_value, expected, empty_block, eval_right);
+ FinishCurrentBlock(test);
set_current_block(eval_right);
Drop(1); // Value of the left subexpression.
@@ -8460,169 +8121,90 @@ void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
}
-void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
HValue* right = Pop();
HValue* left = Pop();
HInstruction* instr = BuildBinaryOperation(expr, left, right);
- instr->set_position(expr->position());
return ast_context()->ReturnInstruction(instr, expr->id());
}
-void HGraphBuilder::TraceRepresentation(Token::Value op,
- TypeInfo info,
- HValue* value,
- Representation rep) {
- if (!FLAG_trace_representation) return;
- // TODO(svenpanne) Under which circumstances are we actually not flexible?
- // At first glance, this looks a bit weird...
- bool flexible = value->CheckFlag(HValue::kFlexibleRepresentation);
- PrintF("Operation %s has type info %s, %schange representation assumption "
- "for %s (ID %d) from %s to %s\n",
- Token::Name(op),
- info.ToString(),
- flexible ? "" : " DO NOT ",
- value->Mnemonic(),
- graph_->GetMaximumValueID(),
- value->representation().Mnemonic(),
- rep.Mnemonic());
-}
-
-
-Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
- if (info.IsSmi()) return Representation::Integer32();
- if (info.IsInteger32()) return Representation::Integer32();
- if (info.IsDouble()) return Representation::Double();
- if (info.IsNumber()) return Representation::Double();
- return Representation::Tagged();
-}
-
-
-void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
- HTypeof* typeof_expr,
- Handle<String> check) {
- // Note: The HTypeof itself is removed during canonicalization, if possible.
- HValue* value = typeof_expr->value();
- HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
- instr->set_position(expr->position());
+void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
+ Expression* sub_expr,
+ Handle<String> check) {
+ CHECK_ALIVE(VisitForTypeOf(sub_expr));
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ HValue* value = Pop();
+ HTypeofIsAndBranch* instr = New<HTypeofIsAndBranch>(value, check);
return ast_context()->ReturnControl(instr, expr->id());
}
-static bool MatchLiteralCompareNil(HValue* left,
- Token::Value op,
- HValue* right,
- Handle<Object> nil,
- HValue** expr) {
- if (left->IsConstant() &&
- HConstant::cast(left)->handle().is_identical_to(nil) &&
- Token::IsEqualityOp(op)) {
- *expr = right;
- return true;
- }
- return false;
-}
-
-
-static bool MatchLiteralCompareTypeof(HValue* left,
- Token::Value op,
- HValue* right,
- HTypeof** typeof_expr,
- Handle<String>* check) {
- if (left->IsTypeof() &&
- Token::IsEqualityOp(op) &&
- right->IsConstant() &&
- HConstant::cast(right)->handle()->IsString()) {
- *typeof_expr = HTypeof::cast(left);
- *check = Handle<String>::cast(HConstant::cast(right)->handle());
- return true;
- }
- return false;
-}
-
-
-static bool IsLiteralCompareTypeof(HValue* left,
- Token::Value op,
- HValue* right,
- HTypeof** typeof_expr,
- Handle<String>* check) {
- return MatchLiteralCompareTypeof(left, op, right, typeof_expr, check) ||
- MatchLiteralCompareTypeof(right, op, left, typeof_expr, check);
-}
-
-
-static bool IsLiteralCompareNil(HValue* left,
- Token::Value op,
- HValue* right,
- Handle<Object> nil,
- HValue** expr) {
- return MatchLiteralCompareNil(left, op, right, nil, expr) ||
- MatchLiteralCompareNil(right, op, left, nil, expr);
-}
-
-
-static bool IsLiteralCompareBool(HValue* left,
+static bool IsLiteralCompareBool(Isolate* isolate,
+ HValue* left,
Token::Value op,
HValue* right) {
return op == Token::EQ_STRICT &&
- ((left->IsConstant() && HConstant::cast(left)->handle()->IsBoolean()) ||
- (right->IsConstant() && HConstant::cast(right)->handle()->IsBoolean()));
+ ((left->IsConstant() &&
+ HConstant::cast(left)->handle(isolate)->IsBoolean()) ||
+ (right->IsConstant() &&
+ HConstant::cast(right)->handle(isolate)->IsBoolean()));
}
-void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+
+ // Check for a few fast cases. The AST visiting behavior must be in sync
+ // with the full codegen: We don't push both left and right values onto
+ // the expression stack when one side is a special-case literal.
+ Expression* sub_expr = NULL;
+ Handle<String> check;
+ if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+ return HandleLiteralCompareTypeof(expr, sub_expr, check);
+ }
+ if (expr->IsLiteralCompareUndefined(&sub_expr, isolate())) {
+ return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
+ }
+ if (expr->IsLiteralCompareNull(&sub_expr)) {
+ return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
+ }
+
if (IsClassOfTest(expr)) {
CallRuntime* call = expr->left()->AsCallRuntime();
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
Literal* literal = expr->right()->AsLiteral();
- Handle<String> rhs = Handle<String>::cast(literal->handle());
- HClassOfTestAndBranch* instr =
- new(zone()) HClassOfTestAndBranch(value, rhs);
- instr->set_position(expr->position());
+ Handle<String> rhs = Handle<String>::cast(literal->value());
+ HClassOfTestAndBranch* instr = New<HClassOfTestAndBranch>(value, rhs);
return ast_context()->ReturnControl(instr, expr->id());
}
- TypeInfo type_info = oracle()->CompareType(expr);
- // Check if this expression was ever executed according to type feedback.
- // Note that for the special typeof/null/undefined cases we get unknown here.
- if (type_info.IsUninitialized()) {
- AddInstruction(new(zone()) HSoftDeoptimize);
- current_block()->MarkAsDeoptimizing();
- type_info = TypeInfo::Unknown();
- }
+ Handle<Type> left_type = expr->left()->bounds().lower;
+ Handle<Type> right_type = expr->right()->bounds().lower;
+ Handle<Type> combined_type = expr->combined_type();
+ Representation combined_rep = Representation::FromType(combined_type);
+ Representation left_rep = Representation::FromType(left_type);
+ Representation right_rep = Representation::FromType(right_type);
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
- HValue* context = environment()->LookupContext();
HValue* right = Pop();
HValue* left = Pop();
Token::Value op = expr->op();
- HTypeof* typeof_expr = NULL;
- Handle<String> check;
- if (IsLiteralCompareTypeof(left, op, right, &typeof_expr, &check)) {
- return HandleLiteralCompareTypeof(expr, typeof_expr, check);
- }
- HValue* sub_expr = NULL;
- Factory* f = graph()->isolate()->factory();
- if (IsLiteralCompareNil(left, op, right, f->undefined_value(), &sub_expr)) {
- return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
- }
- if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
- return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
- }
- if (IsLiteralCompareBool(left, op, right)) {
+ if (IsLiteralCompareBool(isolate(), left, op, right)) {
HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- result->set_position(expr->position());
+ New<HCompareObjectEqAndBranch>(left, right);
return ast_context()->ReturnControl(result, expr->id());
}
@@ -8634,10 +8216,10 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
VariableProxy* proxy = expr->right()->AsVariableProxy();
bool global_function = (proxy != NULL) && proxy->var()->IsUnallocated();
if (global_function &&
- info()->has_global_object() &&
- !info()->global_object()->IsAccessCheckNeeded()) {
+ current_info()->has_global_object() &&
+ !current_info()->global_object()->IsAccessCheckNeeded()) {
Handle<String> name = proxy->name();
- Handle<GlobalObject> global(info()->global_object());
+ Handle<GlobalObject> global(current_info()->global_object());
LookupResult lookup(isolate());
global->Lookup(*name, &lookup);
if (lookup.IsNormal() && lookup.GetValue()->IsJSFunction()) {
@@ -8653,105 +8235,369 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// If the target is not null we have found a known global function that is
// assumed to stay the same for this instanceof.
if (target.is_null()) {
- HInstanceOf* result = new(zone()) HInstanceOf(context, left, right);
- result->set_position(expr->position());
+ HInstanceOf* result = New<HInstanceOf>(left, right);
return ast_context()->ReturnInstruction(result, expr->id());
} else {
- AddInstruction(new(zone()) HCheckFunction(right, target));
+ Add<HCheckValue>(right, target);
HInstanceOfKnownGlobal* result =
- new(zone()) HInstanceOfKnownGlobal(context, left, target);
- result->set_position(expr->position());
+ New<HInstanceOfKnownGlobal>(left, target);
return ast_context()->ReturnInstruction(result, expr->id());
}
+
+ // Code below assumes that we don't fall through.
+ UNREACHABLE();
} else if (op == Token::IN) {
- HIn* result = new(zone()) HIn(context, left, right);
- result->set_position(expr->position());
+ HValue* function = AddLoadJSBuiltin(Builtins::IN);
+ Add<HPushArgument>(left);
+ Add<HPushArgument>(right);
+ // TODO(olivf) InvokeFunction produces a check for the parameter count,
+ // even though we are certain to pass the correct number of arguments here.
+ HInstruction* result = New<HInvokeFunction>(function, 2);
return ast_context()->ReturnInstruction(result, expr->id());
- } else if (type_info.IsNonPrimitive()) {
+ }
+
+ // Cases handled below depend on collected type feedback. They should
+ // soft deoptimize when there is no type feedback.
+ if (combined_type->Is(Type::None())) {
+ Add<HDeoptimize>("Insufficient type feedback for combined type "
+ "of binary operation",
+ Deoptimizer::SOFT);
+ combined_type = left_type = right_type = handle(Type::Any(), isolate());
+ }
+
+ if (combined_type->Is(Type::Receiver())) {
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: {
// Can we get away with map check and not instance type check?
- Handle<Map> map = oracle()->GetCompareMap(expr);
- if (!map.is_null()) {
- AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckMaps::NewWithTransitions(left, map, zone()));
- AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckMaps::NewWithTransitions(right, map, zone()));
+ if (combined_type->IsClass()) {
+ Handle<Map> map = combined_type->AsClass();
+ AddCheckMap(left, map);
+ AddCheckMap(right, map);
HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- result->set_position(expr->position());
+ New<HCompareObjectEqAndBranch>(left, right);
return ast_context()->ReturnControl(result, expr->id());
} else {
- AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsSpecObject(left, zone()));
- AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsSpecObject(right, zone()));
+ BuildCheckHeapObject(left);
+ Add<HCheckInstanceType>(left, HCheckInstanceType::IS_SPEC_OBJECT);
+ BuildCheckHeapObject(right);
+ Add<HCheckInstanceType>(right, HCheckInstanceType::IS_SPEC_OBJECT);
HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- result->set_position(expr->position());
+ New<HCompareObjectEqAndBranch>(left, right);
return ast_context()->ReturnControl(result, expr->id());
}
}
default:
- return Bailout("Unsupported non-primitive compare");
- }
- } else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
- (op == Token::EQ || op == Token::EQ_STRICT)) {
- AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsSymbol(left, zone()));
- AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsSymbol(right, zone()));
+ return Bailout(kUnsupportedNonPrimitiveCompare);
+ }
+ } else if (combined_type->Is(Type::InternalizedString()) &&
+ Token::IsEqualityOp(op)) {
+ BuildCheckHeapObject(left);
+ Add<HCheckInstanceType>(left, HCheckInstanceType::IS_INTERNALIZED_STRING);
+ BuildCheckHeapObject(right);
+ Add<HCheckInstanceType>(right, HCheckInstanceType::IS_INTERNALIZED_STRING);
HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- result->set_position(expr->position());
+ New<HCompareObjectEqAndBranch>(left, right);
+ return ast_context()->ReturnControl(result, expr->id());
+ } else if (combined_type->Is(Type::String())) {
+ BuildCheckHeapObject(left);
+ Add<HCheckInstanceType>(left, HCheckInstanceType::IS_STRING);
+ BuildCheckHeapObject(right);
+ Add<HCheckInstanceType>(right, HCheckInstanceType::IS_STRING);
+ HStringCompareAndBranch* result =
+ New<HStringCompareAndBranch>(left, right, op);
return ast_context()->ReturnControl(result, expr->id());
} else {
- Representation r = ToRepresentation(type_info);
- if (r.IsTagged()) {
- HCompareGeneric* result =
- new(zone()) HCompareGeneric(context, left, right, op);
- result->set_position(expr->position());
+ if (combined_rep.IsTagged() || combined_rep.IsNone()) {
+ HCompareGeneric* result = New<HCompareGeneric>(left, right, op);
+ result->set_observed_input_representation(1, left_rep);
+ result->set_observed_input_representation(2, right_rep);
return ast_context()->ReturnInstruction(result, expr->id());
} else {
- HCompareIDAndBranch* result =
- new(zone()) HCompareIDAndBranch(left, right, op);
- result->set_position(expr->position());
- result->SetInputRepresentation(r);
+ HCompareNumericAndBranch* result =
+ New<HCompareNumericAndBranch>(left, right, op);
+ result->set_observed_input_representation(left_rep, right_rep);
return ast_context()->ReturnControl(result, expr->id());
}
}
}
-void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
- HValue* value,
- NilValue nil) {
+void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- EqualityKind kind =
- expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality;
- HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil);
- instr->set_position(expr->position());
- return ast_context()->ReturnControl(instr, expr->id());
+ ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ CHECK_ALIVE(VisitForValue(sub_expr));
+ HValue* value = Pop();
+ if (expr->op() == Token::EQ_STRICT) {
+ HConstant* nil_constant = nil == kNullValue
+ ? graph()->GetConstantNull()
+ : graph()->GetConstantUndefined();
+ HCompareObjectEqAndBranch* instr =
+ New<HCompareObjectEqAndBranch>(value, nil_constant);
+ return ast_context()->ReturnControl(instr, expr->id());
+ } else {
+ ASSERT_EQ(Token::EQ, expr->op());
+ Handle<Type> type = expr->combined_type()->Is(Type::None())
+ ? handle(Type::Any(), isolate_)
+ : expr->combined_type();
+ HIfContinuation continuation;
+ BuildCompareNil(value, type, &continuation);
+ return ast_context()->ReturnContinuation(&continuation, expr->id());
+ }
}
-HInstruction* HGraphBuilder::BuildThisFunction() {
+HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
// If we share optimized code between different closures, the
// this-function is not a constant, except inside an inlined body.
if (function_state()->outer() != NULL) {
- return new(zone()) HConstant(
- function_state()->compilation_info()->closure(),
- Representation::Tagged());
+ return New<HConstant>(
+ function_state()->compilation_info()->closure());
} else {
- return new(zone()) HThisFunction;
+ return New<HThisFunction>();
}
}
-void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
+ Handle<JSObject> boilerplate_object,
+ AllocationSiteContext* site_context) {
+ NoObservableSideEffectsScope no_effects(this);
+ InstanceType instance_type = boilerplate_object->map()->instance_type();
+ ASSERT(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE);
+
+ HType type = instance_type == JS_ARRAY_TYPE
+ ? HType::JSArray() : HType::JSObject();
+ HValue* object_size_constant = Add<HConstant>(
+ boilerplate_object->map()->instance_size());
+ HInstruction* object = Add<HAllocate>(object_size_constant, type,
+ isolate()->heap()->GetPretenureMode(), instance_type);
+
+ BuildEmitObjectHeader(boilerplate_object, object);
+
+ Handle<FixedArrayBase> elements(boilerplate_object->elements());
+ int elements_size = (elements->length() > 0 &&
+ elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
+ elements->Size() : 0;
+
+ HInstruction* object_elements = NULL;
+ if (elements_size > 0) {
+ HValue* object_elements_size = Add<HConstant>(elements_size);
+ if (boilerplate_object->HasFastDoubleElements()) {
+ object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
+ isolate()->heap()->GetPretenureMode(), FIXED_DOUBLE_ARRAY_TYPE);
+ } else {
+ object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
+ isolate()->heap()->GetPretenureMode(), FIXED_ARRAY_TYPE);
+ }
+ }
+ BuildInitElementsInObjectHeader(boilerplate_object, object, object_elements);
+
+ // Copy object elements if non-COW.
+ if (object_elements != NULL) {
+ BuildEmitElements(boilerplate_object, elements, object_elements,
+ site_context);
+ }
+
+ // Copy in-object properties.
+ if (boilerplate_object->map()->NumberOfFields() != 0) {
+ BuildEmitInObjectProperties(boilerplate_object, object, site_context);
+ }
+ return object;
+}
+
+
+void HOptimizedGraphBuilder::BuildEmitObjectHeader(
+ Handle<JSObject> boilerplate_object,
+ HInstruction* object) {
+ ASSERT(boilerplate_object->properties()->length() == 0);
+
+ Handle<Map> boilerplate_object_map(boilerplate_object->map());
+ AddStoreMapConstant(object, boilerplate_object_map);
+
+ Handle<Object> properties_field =
+ Handle<Object>(boilerplate_object->properties(), isolate());
+ ASSERT(*properties_field == isolate()->heap()->empty_fixed_array());
+ HInstruction* properties = Add<HConstant>(properties_field);
+ HObjectAccess access = HObjectAccess::ForPropertiesPointer();
+ Add<HStoreNamedField>(object, access, properties);
+
+ if (boilerplate_object->IsJSArray()) {
+ Handle<JSArray> boilerplate_array =
+ Handle<JSArray>::cast(boilerplate_object);
+ Handle<Object> length_field =
+ Handle<Object>(boilerplate_array->length(), isolate());
+ HInstruction* length = Add<HConstant>(length_field);
+
+ ASSERT(boilerplate_array->length()->IsSmi());
+ Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(
+ boilerplate_array->GetElementsKind()), length);
+ }
+}
+
+
+void HOptimizedGraphBuilder::BuildInitElementsInObjectHeader(
+ Handle<JSObject> boilerplate_object,
+ HInstruction* object,
+ HInstruction* object_elements) {
+ ASSERT(boilerplate_object->properties()->length() == 0);
+ if (object_elements == NULL) {
+ Handle<Object> elements_field =
+ Handle<Object>(boilerplate_object->elements(), isolate());
+ object_elements = Add<HConstant>(elements_field);
+ }
+ Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+ object_elements);
+}
+
+
+void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
+ Handle<JSObject> boilerplate_object,
+ HInstruction* object,
+ AllocationSiteContext* site_context) {
+ Handle<DescriptorArray> descriptors(
+ boilerplate_object->map()->instance_descriptors());
+ int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
+
+ int copied_fields = 0;
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ copied_fields++;
+ int index = descriptors->GetFieldIndex(i);
+ int property_offset = boilerplate_object->GetInObjectPropertyOffset(index);
+ Handle<Name> name(descriptors->GetKey(i));
+ Handle<Object> value =
+ Handle<Object>(boilerplate_object->InObjectPropertyAt(index),
+ isolate());
+
+ // The access for the store depends on the type of the boilerplate.
+ HObjectAccess access = boilerplate_object->IsJSArray() ?
+ HObjectAccess::ForJSArrayOffset(property_offset) :
+ HObjectAccess::ForJSObjectOffset(property_offset);
+
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ Handle<AllocationSite> current_site = site_context->EnterNewScope();
+ HInstruction* result =
+ BuildFastLiteral(value_object, site_context);
+ site_context->ExitScope(current_site, value_object);
+ Add<HStoreNamedField>(object, access, result);
+ } else {
+ Representation representation = details.representation();
+ HInstruction* value_instruction = Add<HConstant>(value);
+
+ if (representation.IsDouble()) {
+ // Allocate a HeapNumber box and store the value into it.
+ HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
+ // TODO(mvstanton): This heap number alloc does not have a corresponding
+ // AllocationSite. That is okay because
+ // 1) it's a child object of another object with a valid allocation site
+ // 2) we can just use the mode of the parent object for pretenuring
+ // The todo is replace GetPretenureMode() with
+ // site_context->top()->GetPretenureMode().
+ HInstruction* double_box =
+ Add<HAllocate>(heap_number_constant, HType::HeapNumber(),
+ isolate()->heap()->GetPretenureMode(), HEAP_NUMBER_TYPE);
+ AddStoreMapConstant(double_box,
+ isolate()->factory()->heap_number_map());
+ Add<HStoreNamedField>(double_box, HObjectAccess::ForHeapNumberValue(),
+ value_instruction);
+ value_instruction = double_box;
+ }
+
+ Add<HStoreNamedField>(object, access, value_instruction);
+ }
+ }
+
+ int inobject_properties = boilerplate_object->map()->inobject_properties();
+ HInstruction* value_instruction =
+ Add<HConstant>(isolate()->factory()->one_pointer_filler_map());
+ for (int i = copied_fields; i < inobject_properties; i++) {
+ ASSERT(boilerplate_object->IsJSObject());
+ int property_offset = boilerplate_object->GetInObjectPropertyOffset(i);
+ HObjectAccess access = HObjectAccess::ForJSObjectOffset(property_offset);
+ Add<HStoreNamedField>(object, access, value_instruction);
+ }
+}
+
+
+void HOptimizedGraphBuilder::BuildEmitElements(
+ Handle<JSObject> boilerplate_object,
+ Handle<FixedArrayBase> elements,
+ HValue* object_elements,
+ AllocationSiteContext* site_context) {
+ ElementsKind kind = boilerplate_object->map()->elements_kind();
+ int elements_length = elements->length();
+ HValue* object_elements_length = Add<HConstant>(elements_length);
+ BuildInitializeElementsHeader(object_elements, kind, object_elements_length);
+
+ // Copy elements backing store content.
+ if (elements->IsFixedDoubleArray()) {
+ BuildEmitFixedDoubleArray(elements, kind, object_elements);
+ } else if (elements->IsFixedArray()) {
+ BuildEmitFixedArray(elements, kind, object_elements,
+ site_context);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray(
+ Handle<FixedArrayBase> elements,
+ ElementsKind kind,
+ HValue* object_elements) {
+ HInstruction* boilerplate_elements = Add<HConstant>(elements);
+ int elements_length = elements->length();
+ for (int i = 0; i < elements_length; i++) {
+ HValue* key_constant = Add<HConstant>(i);
+ HInstruction* value_instruction =
+ Add<HLoadKeyed>(boilerplate_elements, key_constant,
+ static_cast<HValue*>(NULL), kind,
+ ALLOW_RETURN_HOLE);
+ HInstruction* store = Add<HStoreKeyed>(object_elements, key_constant,
+ value_instruction, kind);
+ store->SetFlag(HValue::kAllowUndefinedAsNaN);
+ }
+}
+
+
+void HOptimizedGraphBuilder::BuildEmitFixedArray(
+ Handle<FixedArrayBase> elements,
+ ElementsKind kind,
+ HValue* object_elements,
+ AllocationSiteContext* site_context) {
+ HInstruction* boilerplate_elements = Add<HConstant>(elements);
+ int elements_length = elements->length();
+ Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
+ for (int i = 0; i < elements_length; i++) {
+ Handle<Object> value(fast_elements->get(i), isolate());
+ HValue* key_constant = Add<HConstant>(i);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ Handle<AllocationSite> current_site = site_context->EnterNewScope();
+ HInstruction* result =
+ BuildFastLiteral(value_object, site_context);
+ site_context->ExitScope(current_site, value_object);
+ Add<HStoreKeyed>(object_elements, key_constant, result, kind);
+ } else {
+ HInstruction* value_instruction =
+ Add<HLoadKeyed>(boilerplate_elements, key_constant,
+ static_cast<HValue*>(NULL), kind,
+ ALLOW_RETURN_HOLE);
+ Add<HStoreKeyed>(object_elements, key_constant, value_instruction, kind);
+ }
+ }
+}
+
+
+void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -8760,25 +8606,25 @@ void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
}
-void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+void HOptimizedGraphBuilder::VisitDeclarations(
+ ZoneList<Declaration*>* declarations) {
ASSERT(globals_.is_empty());
AstVisitor::VisitDeclarations(declarations);
if (!globals_.is_empty()) {
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(globals_.length(), TENURED);
for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
- int flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
- DeclareGlobalsNativeFlag::encode(info()->is_native()) |
- DeclareGlobalsLanguageMode::encode(info()->language_mode());
- HInstruction* result = new(zone()) HDeclareGlobals(
- environment()->LookupContext(), array, flags);
- AddInstruction(result);
+ int flags = DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
+ DeclareGlobalsNativeFlag::encode(current_info()->is_native()) |
+ DeclareGlobalsLanguageMode::encode(current_info()->language_mode());
+ Add<HDeclareGlobals>(array, flags);
globals_.Clear();
}
}
-void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
@@ -8800,27 +8646,29 @@ void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
case Variable::CONTEXT:
if (hole_init) {
HValue* value = graph()->GetConstantHole();
- HValue* context = environment()->LookupContext();
- HStoreContextSlot* store = new(zone()) HStoreContextSlot(
+ HValue* context = environment()->context();
+ HStoreContextSlot* store = Add<HStoreContextSlot>(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
- AddInstruction(store);
- if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
+ if (store->HasObservableSideEffects()) {
+ Add<HSimulate>(proxy->id(), REMOVABLE_SIMULATE);
+ }
}
break;
case Variable::LOOKUP:
- return Bailout("unsupported lookup slot in declaration");
+ return Bailout(kUnsupportedLookupSlotInDeclaration);
}
}
-void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
globals_.Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), info()->script());
+ Handle<SharedFunctionInfo> function = Compiler::BuildFunctionInfo(
+ declaration->fun(), current_info()->script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_.Add(function, zone());
@@ -8830,156 +8678,163 @@ void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
case Variable::LOCAL: {
CHECK_ALIVE(VisitForValue(declaration->fun()));
HValue* value = Pop();
- environment()->Bind(variable, value);
+ BindIfLive(variable, value);
break;
}
case Variable::CONTEXT: {
CHECK_ALIVE(VisitForValue(declaration->fun()));
HValue* value = Pop();
- HValue* context = environment()->LookupContext();
- HStoreContextSlot* store = new(zone()) HStoreContextSlot(
+ HValue* context = environment()->context();
+ HStoreContextSlot* store = Add<HStoreContextSlot>(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
- AddInstruction(store);
- if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
+ if (store->HasObservableSideEffects()) {
+ Add<HSimulate>(proxy->id(), REMOVABLE_SIMULATE);
+ }
break;
}
case Variable::LOOKUP:
- return Bailout("unsupported lookup slot in declaration");
+ return Bailout(kUnsupportedLookupSlotInDeclaration);
}
}
-void HGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitModuleDeclaration(
+ ModuleDeclaration* declaration) {
UNREACHABLE();
}
-void HGraphBuilder::VisitImportDeclaration(ImportDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitImportDeclaration(
+ ImportDeclaration* declaration) {
UNREACHABLE();
}
-void HGraphBuilder::VisitExportDeclaration(ExportDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitExportDeclaration(
+ ExportDeclaration* declaration) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
+void HOptimizedGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
+void HOptimizedGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModulePath(ModulePath* module) {
+void HOptimizedGraphBuilder::VisitModulePath(ModulePath* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
+void HOptimizedGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
+ UNREACHABLE();
+}
+
+
+void HOptimizedGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
UNREACHABLE();
}
// Generators for inline runtime functions.
// Support for types.
-void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsSmiAndBranch* result = new(zone()) HIsSmiAndBranch(value);
+ HIsSmiAndBranch* result = New<HIsSmiAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
-void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value,
- FIRST_SPEC_OBJECT_TYPE,
- LAST_SPEC_OBJECT_TYPE);
+ New<HHasInstanceTypeAndBranch>(value,
+ FIRST_SPEC_OBJECT_TYPE,
+ LAST_SPEC_OBJECT_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
-void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE);
+ New<HHasInstanceTypeAndBranch>(value, JS_FUNCTION_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
-void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasCachedArrayIndexAndBranch* result =
- new(zone()) HHasCachedArrayIndexAndBranch(value);
+ New<HHasCachedArrayIndexAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
-void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value, JS_ARRAY_TYPE);
+ New<HHasInstanceTypeAndBranch>(value, JS_ARRAY_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
-void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value, JS_REGEXP_TYPE);
+ New<HHasInstanceTypeAndBranch>(value, JS_REGEXP_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
-void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsObjectAndBranch* result = new(zone()) HIsObjectAndBranch(value);
+ HIsObjectAndBranch* result = New<HIsObjectAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
-void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
- return Bailout("inlined runtime function: IsNonNegativeSmi");
+void HOptimizedGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
+ return Bailout(kInlinedRuntimeFunctionIsNonNegativeSmi);
}
-void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsUndetectableAndBranch* result =
- new(zone()) HIsUndetectableAndBranch(value);
+ HIsUndetectableAndBranch* result = New<HIsUndetectableAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
-void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
+void HOptimizedGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
CallRuntime* call) {
- return Bailout(
- "inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
+ return Bailout(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf);
}
// Support for construct call checks.
-void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
ASSERT(call->arguments()->length() == 0);
if (function_state()->outer() != NULL) {
// We are generating graph for inlined function.
@@ -8988,27 +8843,26 @@ void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
: graph()->GetConstantFalse();
return ast_context()->ReturnValue(value);
} else {
- return ast_context()->ReturnControl(new(zone()) HIsConstructCallAndBranch,
+ return ast_context()->ReturnControl(New<HIsConstructCallAndBranch>(),
call->id());
}
}
// Support for arguments.length and arguments[?].
-void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
// Our implementation of arguments (based on this stack frame or an
// adapter below it) does not work for inlined functions. This runtime
// function is blacklisted by AstNode::IsInlineable.
ASSERT(function_state()->outer() == NULL);
ASSERT(call->arguments()->length() == 0);
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
- HArgumentsLength* result = new(zone()) HArgumentsLength(elements);
+ HInstruction* elements = Add<HArgumentsElements>(false);
+ HArgumentsLength* result = New<HArgumentsLength>(elements);
return ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateArguments(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
// Our implementation of arguments (based on this stack frame or an
// adapter below it) does not work for inlined functions. This runtime
// function is blacklisted by AstNode::IsInlineable.
@@ -9016,81 +8870,102 @@ void HGraphBuilder::GenerateArguments(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* index = Pop();
- HInstruction* elements = AddInstruction(
- new(zone()) HArgumentsElements(false));
- HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
- HInstruction* checked_index =
- AddInstruction(new(zone()) HBoundsCheck(index, length));
- HAccessArgumentsAt* result =
- new(zone()) HAccessArgumentsAt(elements, length, checked_index);
+ HInstruction* elements = Add<HArgumentsElements>(false);
+ HInstruction* length = Add<HArgumentsLength>(elements);
+ HInstruction* checked_index = Add<HBoundsCheck>(index, length);
+ HAccessArgumentsAt* result = New<HAccessArgumentsAt>(
+ elements, length, checked_index);
return ast_context()->ReturnInstruction(result, call->id());
}
// Support for accessing the class and value fields of an object.
-void HGraphBuilder::GenerateClassOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateClassOf(CallRuntime* call) {
// The special form detected by IsClassOfTest is detected before we get here
// and does not cause a bailout.
- return Bailout("inlined runtime function: ClassOf");
+ return Bailout(kInlinedRuntimeFunctionClassOf);
}
-void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HValueOf* result = new(zone()) HValueOf(value);
+ HValueOf* result = New<HValueOf>(value);
return ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateDateField(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->handle()));
+ Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->value()));
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* date = Pop();
- HDateField* result = new(zone()) HDateField(date, index);
+ HDateField* result = New<HDateField>(date, index);
return ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
+ CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 3);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ HValue* value = Pop();
+ HValue* index = Pop();
+ HValue* string = Pop();
+ HSeqStringSetChar* result = New<HSeqStringSetChar>(
+ String::ONE_BYTE_ENCODING, string, index, value);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
+ CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 3);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ HValue* value = Pop();
+ HValue* index = Pop();
+ HValue* string = Pop();
+ HSeqStringSetChar* result = New<HSeqStringSetChar>(
+ String::TWO_BYTE_ENCODING, string, index, value);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* value = Pop();
HValue* object = Pop();
// Check if object is a not a smi.
- HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(object);
HBasicBlock* if_smi = graph()->CreateBasicBlock();
HBasicBlock* if_heap_object = graph()->CreateBasicBlock();
HBasicBlock* join = graph()->CreateBasicBlock();
- smicheck->SetSuccessorAt(0, if_smi);
- smicheck->SetSuccessorAt(1, if_heap_object);
- current_block()->Finish(smicheck);
- if_smi->Goto(join);
+ FinishCurrentBlock(New<HIsSmiAndBranch>(object, if_smi, if_heap_object));
+ Goto(if_smi, join);
// Check if object is a JSValue.
set_current_block(if_heap_object);
HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(object, JS_VALUE_TYPE);
+ New<HHasInstanceTypeAndBranch>(object, JS_VALUE_TYPE);
HBasicBlock* if_js_value = graph()->CreateBasicBlock();
HBasicBlock* not_js_value = graph()->CreateBasicBlock();
typecheck->SetSuccessorAt(0, if_js_value);
typecheck->SetSuccessorAt(1, not_js_value);
- current_block()->Finish(typecheck);
- not_js_value->Goto(join);
+ FinishCurrentBlock(typecheck);
+ Goto(not_js_value, join);
// Create in-object property store to kValueOffset.
set_current_block(if_js_value);
- Handle<String> name = isolate()->factory()->undefined_symbol();
- AddInstruction(new(zone()) HStoreNamedField(object,
- name,
- value,
- true, // in-object store.
- JSValue::kValueOffset));
- if_js_value->Goto(join);
+ Add<HStoreNamedField>(object,
+ HObjectAccess::ForJSObjectOffset(JSValue::kValueOffset), value);
+ Goto(if_js_value, join);
join->SetJoinId(call->id());
set_current_block(join);
return ast_context()->ReturnValue(value);
@@ -9098,152 +8973,139 @@ void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
// Fast support for charCodeAt(n).
-void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* index = Pop();
HValue* string = Pop();
- HValue* context = environment()->LookupContext();
- HStringCharCodeAt* result = BuildStringCharCodeAt(context, string, index);
+ HInstruction* result = BuildStringCharCodeAt(string, index);
return ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* char_code = Pop();
- HValue* context = environment()->LookupContext();
- HStringCharFromCode* result =
- new(zone()) HStringCharFromCode(context, char_code);
+ HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
return ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* index = Pop();
HValue* string = Pop();
- HValue* context = environment()->LookupContext();
- HStringCharCodeAt* char_code = BuildStringCharCodeAt(context, string, index);
+ HInstruction* char_code = BuildStringCharCodeAt(string, index);
AddInstruction(char_code);
- HStringCharFromCode* result =
- new(zone()) HStringCharFromCode(context, char_code);
+ HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
return ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for object equality testing.
-void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
+ New<HCompareObjectEqAndBranch>(left, right);
return ast_context()->ReturnControl(result, call->id());
}
-void HGraphBuilder::GenerateLog(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) {
// %_Log is ignored in optimized code.
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
// Fast support for Math.random().
-void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
- HValue* context = environment()->LookupContext();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- AddInstruction(global_object);
- HRandom* result = new(zone()) HRandom(global_object);
+void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
+ HGlobalObject* global_object = Add<HGlobalObject>();
+ HRandom* result = New<HRandom>(global_object);
return ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for StringAdd.
-void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result = new(zone()) HCallStub(context, CodeStub::StringAdd, 2);
- Drop(2);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ HValue* right = Pop();
+ HValue* left = Pop();
+ HInstruction* result = New<HStringAdd>(left, right, STRING_ADD_CHECK_BOTH);
return ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for SubString.
-void HGraphBuilder::GenerateSubString(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result = new(zone()) HCallStub(context, CodeStub::SubString, 3);
+ HCallStub* result = New<HCallStub>(CodeStub::SubString, 3);
Drop(3);
return ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for StringCompare.
-void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::StringCompare, 2);
+ HCallStub* result = New<HCallStub>(CodeStub::StringCompare, 2);
Drop(2);
return ast_context()->ReturnInstruction(result, call->id());
}
// Support for direct calls from JavaScript to native RegExp code.
-void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
ASSERT_EQ(4, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result = new(zone()) HCallStub(context, CodeStub::RegExpExec, 4);
+ HCallStub* result = New<HCallStub>(CodeStub::RegExpExec, 4);
Drop(4);
return ast_context()->ReturnInstruction(result, call->id());
}
// Construct a RegExp exec result with two in-object properties.
-void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::RegExpConstructResult, 3);
+ HCallStub* result = New<HCallStub>(CodeStub::RegExpConstructResult, 3);
Drop(3);
return ast_context()->ReturnInstruction(result, call->id());
}
// Support for fast native caches.
-void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
- return Bailout("inlined runtime function: GetFromCache");
+void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
+ return Bailout(kInlinedRuntimeFunctionGetFromCache);
}
// Fast support for number to string.
-void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::NumberToString, 1);
- Drop(1);
- return ast_context()->ReturnInstruction(result, call->id());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* number = Pop();
+ HValue* result = BuildNumberToString(
+ number, handle(Type::Number(), isolate()));
+ return ast_context()->ReturnValue(result);
}
// Fast call for custom callbacks.
-void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
// 1 ~ The function to call is not itself an argument to the call.
int arg_count = call->arguments()->length() - 1;
ASSERT(arg_count >= 1); // There's always at least a receiver.
@@ -9254,31 +9116,28 @@ void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->last()));
HValue* function = Pop();
- HValue* context = environment()->LookupContext();
// Branch for function proxies, or other non-functions.
HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(function, JS_FUNCTION_TYPE);
+ New<HHasInstanceTypeAndBranch>(function, JS_FUNCTION_TYPE);
HBasicBlock* if_jsfunction = graph()->CreateBasicBlock();
HBasicBlock* if_nonfunction = graph()->CreateBasicBlock();
HBasicBlock* join = graph()->CreateBasicBlock();
typecheck->SetSuccessorAt(0, if_jsfunction);
typecheck->SetSuccessorAt(1, if_nonfunction);
- current_block()->Finish(typecheck);
+ FinishCurrentBlock(typecheck);
set_current_block(if_jsfunction);
- HInstruction* invoke_result = AddInstruction(
- new(zone()) HInvokeFunction(context, function, arg_count));
+ HInstruction* invoke_result = Add<HInvokeFunction>(function, arg_count);
Drop(arg_count);
Push(invoke_result);
- if_jsfunction->Goto(join);
+ Goto(if_jsfunction, join);
set_current_block(if_nonfunction);
- HInstruction* call_result = AddInstruction(
- new(zone()) HCallFunction(context, function, arg_count));
+ HInstruction* call_result = Add<HCallFunction>(function, arg_count);
Drop(arg_count);
Push(call_result);
- if_nonfunction->Goto(join);
+ Goto(if_nonfunction, join);
set_current_block(join);
join->SetJoinId(call->id());
@@ -9287,87 +9146,101 @@ void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
// Fast call to math functions.
-void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HPower* result = new(zone()) HPower(left, right);
+ HInstruction* result = NewUncasted<HPower>(left, right);
return ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+ HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::SIN);
Drop(1);
return ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+ HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::COS);
Drop(1);
return ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateMathTan(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+ HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::TAN);
Drop(1);
return ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->LookupContext();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+ HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::LOG);
Drop(1);
return ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
- return Bailout("inlined runtime function: MathSqrt");
+void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HInstruction* result = New<HUnaryMathOperation>(value, kMathSqrt);
+ return ast_context()->ReturnInstruction(result, call->id());
}
// Check whether two RegExps are equivalent
-void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
- return Bailout("inlined runtime function: IsRegExpEquivalent");
+void HOptimizedGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
+ return Bailout(kInlinedRuntimeFunctionIsRegExpEquivalent);
}
-void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HGetCachedArrayIndex* result = new(zone()) HGetCachedArrayIndex(value);
+ HGetCachedArrayIndex* result = New<HGetCachedArrayIndex>(value);
return ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
- return Bailout("inlined runtime function: FastAsciiArrayJoin");
+void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
+ return Bailout(kInlinedRuntimeFunctionFastAsciiArrayJoin);
+}
+
+
+// Support for generators.
+void HOptimizedGraphBuilder::GenerateGeneratorNext(CallRuntime* call) {
+ return Bailout(kInlinedRuntimeFunctionGeneratorNext);
+}
+
+
+void HOptimizedGraphBuilder::GenerateGeneratorThrow(CallRuntime* call) {
+ return Bailout(kInlinedRuntimeFunctionGeneratorThrow);
+}
+
+
+void HOptimizedGraphBuilder::GenerateDebugBreakInOptimizedCode(
+ CallRuntime* call) {
+ Add<HDebugBreak>();
+ return ast_context()->ReturnValue(graph()->GetConstant0());
}
@@ -9381,7 +9254,6 @@ HEnvironment::HEnvironment(HEnvironment* outer,
Zone* zone)
: closure_(closure),
values_(0, zone),
- assigned_variables_(4, zone),
frame_type_(JS_FUNCTION),
parameter_count_(0),
specials_count_(1),
@@ -9396,12 +9268,27 @@ HEnvironment::HEnvironment(HEnvironment* outer,
}
+HEnvironment::HEnvironment(Zone* zone, int parameter_count)
+ : values_(0, zone),
+ frame_type_(STUB),
+ parameter_count_(parameter_count),
+ specials_count_(1),
+ local_count_(0),
+ outer_(NULL),
+ entry_(NULL),
+ pop_count_(0),
+ push_count_(0),
+ ast_id_(BailoutId::None()),
+ zone_(zone) {
+ Initialize(parameter_count, 0, 0);
+}
+
+
HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone)
: values_(0, zone),
- assigned_variables_(0, zone),
frame_type_(JS_FUNCTION),
parameter_count_(0),
- specials_count_(1),
+ specials_count_(0),
local_count_(0),
outer_(NULL),
entry_(NULL),
@@ -9420,9 +9307,9 @@ HEnvironment::HEnvironment(HEnvironment* outer,
Zone* zone)
: closure_(closure),
values_(arguments, zone),
- assigned_variables_(0, zone),
frame_type_(frame_type),
parameter_count_(arguments),
+ specials_count_(0),
local_count_(0),
outer_(outer),
entry_(NULL),
@@ -9449,7 +9336,7 @@ void HEnvironment::Initialize(int parameter_count,
void HEnvironment::Initialize(const HEnvironment* other) {
closure_ = other->closure();
values_.AddAll(other->values_, zone());
- assigned_variables_.AddAll(other->assigned_variables_, zone());
+ assigned_variables_.Union(other->assigned_variables_, zone());
frame_type_ = other->frame_type_;
parameter_count_ = other->parameter_count_;
local_count_ = other->local_count_;
@@ -9457,6 +9344,7 @@ void HEnvironment::Initialize(const HEnvironment* other) {
entry_ = other->entry_;
pop_count_ = other->pop_count_;
push_count_ = other->push_count_;
+ specials_count_ = other->specials_count_;
ast_id_ = other->ast_id_;
}
@@ -9472,20 +9360,19 @@ void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
// There is already a phi for the i'th value.
HPhi* phi = HPhi::cast(value);
// Assert index is correct and that we haven't missed an incoming edge.
- ASSERT(phi->merged_index() == i);
+ ASSERT(phi->merged_index() == i || !phi->HasMergedIndex());
ASSERT(phi->OperandCount() == block->predecessors()->length());
phi->AddInput(other->values_[i]);
} else if (values_[i] != other->values_[i]) {
// There is a fresh value on the incoming edge, a phi is needed.
ASSERT(values_[i] != NULL && other->values_[i] != NULL);
- HPhi* phi = new(zone()) HPhi(i, zone());
+ HPhi* phi = block->AddNewPhi(i);
HValue* old_value = values_[i];
for (int j = 0; j < block->predecessors()->length(); j++) {
phi->AddInput(old_value);
}
phi->AddInput(other->values_[i]);
this->values_[i] = phi;
- block->AddPhi(phi);
}
}
}
@@ -9493,9 +9380,7 @@ void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
void HEnvironment::Bind(int index, HValue* value) {
ASSERT(value != NULL);
- if (!assigned_variables_.Contains(index)) {
- assigned_variables_.Add(index, zone());
- }
+ assigned_variables_.Add(index, zone());
values_[index] = value;
}
@@ -9548,10 +9433,9 @@ HEnvironment* HEnvironment::CopyWithoutHistory() const {
HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
HEnvironment* new_env = Copy();
for (int i = 0; i < values_.length(); ++i) {
- HPhi* phi = new(zone()) HPhi(i, zone());
+ HPhi* phi = loop_header->AddNewPhi(i);
phi->AddInput(values_[i]);
new_env->values_[i] = phi;
- loop_header->AddPhi(phi);
}
new_env->ClearHistory();
return new_env;
@@ -9578,8 +9462,8 @@ HEnvironment* HEnvironment::CopyForInlining(
int arguments,
FunctionLiteral* function,
HConstant* undefined,
- CallKind call_kind,
- InliningKind inlining_kind) const {
+ InliningKind inlining_kind,
+ bool undefined_receiver) const {
ASSERT(frame_type() == JS_FUNCTION);
// Outer environment is a copy of this one without the arguments.
@@ -9620,11 +9504,10 @@ HEnvironment* HEnvironment::CopyForInlining(
// If the function we are inlining is a strict mode function or a
// builtin function, pass undefined as the receiver for function
// calls (instead of the global receiver).
- if ((target->shared()->native() || !function->is_classic_mode()) &&
- call_kind == CALL_AS_FUNCTION && inlining_kind != CONSTRUCT_CALL_RETURN) {
+ if (undefined_receiver) {
inner->SetValueAt(0, undefined);
}
- inner->SetValueAt(arity + 1, LookupContext());
+ inner->SetValueAt(arity + 1, context());
for (int i = arity + 2; i < inner->length(); ++i) {
inner->SetValueAt(i, undefined);
}
@@ -9663,21 +9546,33 @@ void HEnvironment::PrintToStd() {
}
-void HTracer::TraceCompilation(FunctionLiteral* function) {
+void HTracer::TraceCompilation(CompilationInfo* info) {
Tag tag(this, "compilation");
- Handle<String> name = function->debug_name();
- PrintStringProperty("name", *name->ToCString());
- PrintStringProperty("method", *name->ToCString());
+ if (info->IsOptimizing()) {
+ Handle<String> name = info->function()->debug_name();
+ PrintStringProperty("name", *name->ToCString());
+ PrintStringProperty("method", *name->ToCString());
+ } else {
+ CodeStub::Major major_key = info->code_stub()->MajorKey();
+ PrintStringProperty("name", CodeStub::MajorName(major_key, false));
+ PrintStringProperty("method", "stub");
+ }
PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis()));
}
void HTracer::TraceLithium(const char* name, LChunk* chunk) {
+ ASSERT(!FLAG_concurrent_recompilation);
+ AllowHandleDereference allow_deref;
+ AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, chunk->graph(), chunk);
}
void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
+ ASSERT(!FLAG_concurrent_recompilation);
+ AllowHandleDereference allow_deref;
+ AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, graph, NULL);
}
@@ -9757,8 +9652,8 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
{
Tag HIR_tag(this, "HIR");
- HInstruction* instruction = current->first();
- while (instruction != NULL) {
+ for (HInstructionIterator it(current); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
int bci = 0;
int uses = instruction->UseCount();
PrintIndent();
@@ -9767,7 +9662,6 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
trace_.Add(" ");
instruction->PrintTo(&trace_);
trace_.Add(" <|@\n");
- instruction = instruction->next();
}
}
@@ -9873,112 +9767,92 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type,
void HTracer::FlushToFile() {
- AppendChars(filename_, *trace_.ToCString(), trace_.length(), false);
+ AppendChars(filename_.start(), *trace_.ToCString(), trace_.length(), false);
trace_.Reset();
}
void HStatistics::Initialize(CompilationInfo* info) {
+ if (info->shared_info().is_null()) return;
source_size_ += info->shared_info()->SourceSize();
}
void HStatistics::Print() {
PrintF("Timing results:\n");
- int64_t sum = 0;
- for (int i = 0; i < timing_.length(); ++i) {
- sum += timing_[i];
+ TimeDelta sum;
+ for (int i = 0; i < times_.length(); ++i) {
+ sum += times_[i];
}
for (int i = 0; i < names_.length(); ++i) {
- PrintF("%30s", names_[i]);
- double ms = static_cast<double>(timing_[i]) / 1000;
- double percent = static_cast<double>(timing_[i]) * 100 / sum;
- PrintF(" - %7.3f ms / %4.1f %% ", ms, percent);
+ PrintF("%32s", names_[i]);
+ double ms = times_[i].InMillisecondsF();
+ double percent = times_[i].PercentOf(sum);
+ PrintF(" %8.3f ms / %4.1f %% ", ms, percent);
unsigned size = sizes_[i];
double size_percent = static_cast<double>(size) * 100 / total_size_;
- PrintF(" %8u bytes / %4.1f %%\n", size, size_percent);
- }
+ PrintF(" %9u bytes / %4.1f %%\n", size, size_percent);
+ }
+
+ PrintF("----------------------------------------"
+ "---------------------------------------\n");
+ TimeDelta total = create_graph_ + optimize_graph_ + generate_code_;
+ PrintF("%32s %8.3f ms / %4.1f %% \n",
+ "Create graph",
+ create_graph_.InMillisecondsF(),
+ create_graph_.PercentOf(total));
+ PrintF("%32s %8.3f ms / %4.1f %% \n",
+ "Optimize graph",
+ optimize_graph_.InMillisecondsF(),
+ optimize_graph_.PercentOf(total));
+ PrintF("%32s %8.3f ms / %4.1f %% \n",
+ "Generate and install code",
+ generate_code_.InMillisecondsF(),
+ generate_code_.PercentOf(total));
+ PrintF("----------------------------------------"
+ "---------------------------------------\n");
+ PrintF("%32s %8.3f ms (%.1f times slower than full code gen)\n",
+ "Total",
+ total.InMillisecondsF(),
+ total.TimesOf(full_code_gen_));
+
double source_size_in_kb = static_cast<double>(source_size_) / 1024;
double normalized_time = source_size_in_kb > 0
- ? (static_cast<double>(sum) / 1000) / source_size_in_kb
+ ? total.InMillisecondsF() / source_size_in_kb
: 0;
- double normalized_bytes = source_size_in_kb > 0
- ? total_size_ / source_size_in_kb
+ double normalized_size_in_kb = source_size_in_kb > 0
+ ? total_size_ / 1024 / source_size_in_kb
: 0;
- PrintF("%30s - %7.3f ms %7.3f bytes\n", "Sum",
- normalized_time, normalized_bytes);
- PrintF("---------------------------------------------------------------\n");
- PrintF("%30s - %7.3f ms (%.1f times slower than full code gen)\n",
- "Total",
- static_cast<double>(total_) / 1000,
- static_cast<double>(total_) / full_code_gen_);
+ PrintF("%32s %8.3f ms %7.3f kB allocated\n",
+ "Average per kB source",
+ normalized_time, normalized_size_in_kb);
}
-void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
- if (name == HPhase::kFullCodeGen) {
- full_code_gen_ += ticks;
- } else if (name == HPhase::kTotal) {
- total_ += ticks;
- } else {
- total_size_ += size;
- for (int i = 0; i < names_.length(); ++i) {
- if (names_[i] == name) {
- timing_[i] += ticks;
- sizes_[i] += size;
- return;
- }
+void HStatistics::SaveTiming(const char* name, TimeDelta time, unsigned size) {
+ total_size_ += size;
+ for (int i = 0; i < names_.length(); ++i) {
+ if (strcmp(names_[i], name) == 0) {
+ times_[i] += time;
+ sizes_[i] += size;
+ return;
}
- names_.Add(name);
- timing_.Add(ticks);
- sizes_.Add(size);
- }
-}
-
-
-const char* const HPhase::kFullCodeGen = "Full code generator";
-const char* const HPhase::kTotal = "Total";
-
-
-void HPhase::Begin(const char* name,
- HGraph* graph,
- LChunk* chunk,
- LAllocator* allocator) {
- name_ = name;
- graph_ = graph;
- chunk_ = chunk;
- allocator_ = allocator;
- if (allocator != NULL && chunk_ == NULL) {
- chunk_ = allocator->chunk();
}
- if (FLAG_hydrogen_stats) start_ = OS::Ticks();
- start_allocation_size_ = Zone::allocation_size_;
+ names_.Add(name);
+ times_.Add(time);
+ sizes_.Add(size);
}
-void HPhase::End() const {
- if (FLAG_hydrogen_stats) {
- int64_t end = OS::Ticks();
- unsigned size = Zone::allocation_size_ - start_allocation_size_;
- HStatistics::Instance()->SaveTiming(name_, end - start_, size);
- }
-
- // Produce trace output if flag is set so that the first letter of the
- // phase name matches the command line parameter FLAG_trace_phase.
- if (FLAG_trace_hydrogen &&
- OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL) {
- if (graph_ != NULL) HTracer::Instance()->TraceHydrogen(name_, graph_);
- if (chunk_ != NULL) HTracer::Instance()->TraceLithium(name_, chunk_);
- if (allocator_ != NULL) {
- HTracer::Instance()->TraceLiveRanges(name_, allocator_);
- }
+HPhase::~HPhase() {
+ if (ShouldProduceTraceOutput()) {
+ isolate()->GetHTracer()->TraceHydrogen(name(), graph_);
}
#ifdef DEBUG
- if (graph_ != NULL) graph_->Verify(false); // No full verify.
- if (allocator_ != NULL) allocator_->Verify();
+ graph_->Verify(false); // No full verify.
#endif
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index a0d81497f..8f4878d93 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -30,12 +30,13 @@
#include "v8.h"
+#include "accessors.h"
#include "allocation.h"
#include "ast.h"
#include "compiler.h"
#include "hydrogen-instructions.h"
-#include "type-info.h"
#include "zone.h"
+#include "scopes.h"
namespace v8 {
namespace internal {
@@ -46,28 +47,34 @@ class FunctionState;
class HEnvironment;
class HGraph;
class HLoopInformation;
+class HOsrBuilder;
class HTracer;
class LAllocator;
class LChunk;
class LiveRange;
-class HBasicBlock: public ZoneObject {
+class HBasicBlock V8_FINAL : public ZoneObject {
public:
explicit HBasicBlock(HGraph* graph);
- virtual ~HBasicBlock() { }
+ ~HBasicBlock() { }
// Simple accessors.
int block_id() const { return block_id_; }
void set_block_id(int id) { block_id_ = id; }
HGraph* graph() const { return graph_; }
+ Isolate* isolate() const;
const ZoneList<HPhi*>* phis() const { return &phis_; }
HInstruction* first() const { return first_; }
HInstruction* last() const { return last_; }
void set_last(HInstruction* instr) { last_ = instr; }
- HInstruction* GetLastInstruction();
HControlInstruction* end() const { return end_; }
HLoopInformation* loop_information() const { return loop_information_; }
+ HLoopInformation* current_loop() const {
+ return IsLoopHeader() ? loop_information()
+ : (parent_loop_header() != NULL
+ ? parent_loop_header()->loop_information() : NULL);
+ }
const ZoneList<HBasicBlock*>* predecessors() const { return &predecessors_; }
bool HasPredecessor() const { return predecessors_.length() > 0; }
const ZoneList<HBasicBlock*>* dominated_blocks() const {
@@ -91,6 +98,8 @@ class HBasicBlock: public ZoneObject {
void set_last_instruction_index(int index) {
last_instruction_index_ = index;
}
+ bool is_osr_entry() { return is_osr_entry_; }
+ void set_osr_entry() { is_osr_entry_ = true; }
void AttachLoopInformation();
void DetachLoopInformation();
@@ -101,14 +110,18 @@ class HBasicBlock: public ZoneObject {
bool IsFinished() const { return end_ != NULL; }
void AddPhi(HPhi* phi);
void RemovePhi(HPhi* phi);
- void AddInstruction(HInstruction* instr);
+ void AddInstruction(HInstruction* instr, int position);
bool Dominates(HBasicBlock* other) const;
int LoopNestingDepth() const;
void SetInitialEnvironment(HEnvironment* env);
- void ClearEnvironment() { last_environment_ = NULL; }
+ void ClearEnvironment() {
+ ASSERT(IsFinished());
+ ASSERT(end()->SuccessorCount() == 0);
+ last_environment_ = NULL;
+ }
bool HasEnvironment() const { return last_environment_ != NULL; }
- void UpdateEnvironment(HEnvironment* env) { last_environment_ = env; }
+ void UpdateEnvironment(HEnvironment* env);
HBasicBlock* parent_loop_header() const { return parent_loop_header_; }
void set_parent_loop_header(HBasicBlock* block) {
@@ -120,23 +133,18 @@ class HBasicBlock: public ZoneObject {
void SetJoinId(BailoutId ast_id);
- void Finish(HControlInstruction* last);
- void FinishExit(HControlInstruction* instruction);
- void Goto(HBasicBlock* block, FunctionState* state = NULL);
-
int PredecessorIndexOf(HBasicBlock* predecessor) const;
- void AddSimulate(BailoutId ast_id) { AddInstruction(CreateSimulate(ast_id)); }
+ HPhi* AddNewPhi(int merged_index);
+ HSimulate* AddNewSimulate(BailoutId ast_id,
+ int position,
+ RemovableSimulate removable = FIXED_SIMULATE) {
+ HSimulate* instr = CreateSimulate(ast_id, removable);
+ AddInstruction(instr, position);
+ return instr;
+ }
void AssignCommonDominator(HBasicBlock* other);
void AssignLoopSuccessorDominators();
- void FinishExitWithDeoptimization(HDeoptimize::UseEnvironment has_uses) {
- FinishExit(CreateDeoptimize(has_uses));
- }
-
- // Add the inlined function exit sequence, adding an HLeaveInlined
- // instruction and updating the bailout environment.
- void AddLeaveInlined(HValue* return_value, FunctionState* state);
-
// If a target block is tagged as an inline function return, all
// predecessors should contain the inlined exit sequence:
//
@@ -144,10 +152,19 @@ class HBasicBlock: public ZoneObject {
// Simulate (caller's environment)
// Goto (target block)
bool IsInlineReturnTarget() const { return is_inline_return_target_; }
- void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
+ void MarkAsInlineReturnTarget(HBasicBlock* inlined_entry_block) {
+ is_inline_return_target_ = true;
+ inlined_entry_block_ = inlined_entry_block;
+ }
+ HBasicBlock* inlined_entry_block() { return inlined_entry_block_; }
+
+ bool IsDeoptimizing() const {
+ return end() != NULL && end()->IsDeoptimize();
+ }
- bool IsDeoptimizing() const { return is_deoptimizing_; }
- void MarkAsDeoptimizing() { is_deoptimizing_ = true; }
+ void MarkUnreachable();
+ bool IsUnreachable() const { return !is_reachable_; }
+ bool IsReachable() const { return is_reachable_; }
bool IsLoopSuccessorDominator() const {
return dominates_loop_successors_;
@@ -162,13 +179,30 @@ class HBasicBlock: public ZoneObject {
void Verify();
#endif
+ protected:
+ friend class HGraphBuilder;
+
+ HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
+ void Finish(HControlInstruction* last, int position);
+ void FinishExit(HControlInstruction* instruction, int position);
+ void Goto(HBasicBlock* block,
+ int position,
+ FunctionState* state = NULL,
+ bool add_simulate = true);
+ void GotoNoSimulate(HBasicBlock* block, int position) {
+ Goto(block, position, NULL, false);
+ }
+
+ // Add the inlined function exit sequence, adding an HLeaveInlined
+ // instruction and updating the bailout environment.
+ void AddLeaveInlined(HValue* return_value,
+ FunctionState* state,
+ int position);
+
private:
void RegisterPredecessor(HBasicBlock* pred);
void AddDominatedBlock(HBasicBlock* block);
- HSimulate* CreateSimulate(BailoutId ast_id);
- HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
-
int block_id_;
HGraph* graph_;
ZoneList<HPhi*> phis_;
@@ -187,13 +221,16 @@ class HBasicBlock: public ZoneObject {
int last_instruction_index_;
ZoneList<int> deleted_phis_;
HBasicBlock* parent_loop_header_;
- bool is_inline_return_target_;
- bool is_deoptimizing_;
- bool dominates_loop_successors_;
+ // For blocks marked as inline return target: the block with HEnterInlined.
+ HBasicBlock* inlined_entry_block_;
+ bool is_inline_return_target_ : 1;
+ bool is_reachable_ : 1;
+ bool dominates_loop_successors_ : 1;
+ bool is_osr_entry_ : 1;
};
-class HPredecessorIterator BASE_EMBEDDED {
+class HPredecessorIterator V8_FINAL BASE_EMBEDDED {
public:
explicit HPredecessorIterator(HBasicBlock* block)
: predecessor_list_(block->predecessors()), current_(0) { }
@@ -208,7 +245,27 @@ class HPredecessorIterator BASE_EMBEDDED {
};
-class HLoopInformation: public ZoneObject {
+class HInstructionIterator V8_FINAL BASE_EMBEDDED {
+ public:
+ explicit HInstructionIterator(HBasicBlock* block)
+ : instr_(block->first()) {
+ next_ = Done() ? NULL : instr_->next();
+ }
+
+ inline bool Done() const { return instr_ == NULL; }
+ inline HInstruction* Current() { return instr_; }
+ inline void Advance() {
+ instr_ = next_;
+ next_ = Done() ? NULL : instr_->next();
+ }
+
+ private:
+ HInstruction* instr_;
+ HInstruction* next_;
+};
+
+
+class HLoopInformation V8_FINAL : public ZoneObject {
public:
HLoopInformation(HBasicBlock* loop_header, Zone* zone)
: back_edges_(4, zone),
@@ -217,7 +274,7 @@ class HLoopInformation: public ZoneObject {
stack_check_(NULL) {
blocks_.Add(loop_header, zone);
}
- virtual ~HLoopInformation() {}
+ ~HLoopInformation() {}
const ZoneList<HBasicBlock*>* back_edges() const { return &back_edges_; }
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
@@ -230,6 +287,20 @@ class HLoopInformation: public ZoneObject {
stack_check_ = stack_check;
}
+ bool IsNestedInThisLoop(HLoopInformation* other) {
+ while (other != NULL) {
+ if (other == this) {
+ return true;
+ }
+ other = other->parent_loop();
+ }
+ return false;
+ }
+ HLoopInformation* parent_loop() {
+ HBasicBlock* parent_header = loop_header()->parent_loop_header();
+ return parent_header != NULL ? parent_header->loop_information() : NULL;
+ }
+
private:
void AddBlock(HBasicBlock* block);
@@ -239,12 +310,14 @@ class HLoopInformation: public ZoneObject {
HStackCheck* stack_check_;
};
+
class BoundsCheckTable;
-class HGraph: public ZoneObject {
+class InductionVariableBlocksTable;
+class HGraph V8_FINAL : public ZoneObject {
public:
explicit HGraph(CompilationInfo* info);
- Isolate* isolate() { return isolate_; }
+ Isolate* isolate() const { return isolate_; }
Zone* zone() const { return zone_; }
CompilationInfo* info() const { return info_; }
@@ -253,23 +326,11 @@ class HGraph: public ZoneObject {
HBasicBlock* entry_block() const { return entry_block_; }
HEnvironment* start_environment() const { return start_environment_; }
- void InitializeInferredTypes();
- void InsertTypeConversions();
- void InsertRepresentationChanges();
- void MarkDeoptimizeOnUndefined();
- void ComputeMinusZeroChecks();
- void ComputeSafeUint32Operations();
+ void FinalizeUniqueness();
bool ProcessArgumentsObject();
- void EliminateRedundantPhis();
- void EliminateUnreachablePhis();
- void Canonicalize();
void OrderBlocks();
void AssignDominators();
- void ReplaceCheckedValues();
- void EliminateRedundantBoundsChecks();
- void DehoistSimpleArrayIndexComputations();
- void DeadCodeElimination();
- void PropagateDeoptimizingMark();
+ void RestoreActualValues();
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
@@ -281,15 +342,25 @@ class HGraph: public ZoneObject {
void CollectPhis();
- void set_undefined_constant(HConstant* constant) {
- undefined_constant_.set(constant);
- }
- HConstant* GetConstantUndefined() const { return undefined_constant_.get(); }
+ HConstant* GetConstantUndefined();
+ HConstant* GetConstant0();
HConstant* GetConstant1();
HConstant* GetConstantMinus1();
HConstant* GetConstantTrue();
HConstant* GetConstantFalse();
HConstant* GetConstantHole();
+ HConstant* GetConstantNull();
+ HConstant* GetInvalidContext();
+
+ bool IsConstantUndefined(HConstant* constant);
+ bool IsConstant0(HConstant* constant);
+ bool IsConstant1(HConstant* constant);
+ bool IsConstantMinus1(HConstant* constant);
+ bool IsConstantTrue(HConstant* constant);
+ bool IsConstantFalse(HConstant* constant);
+ bool IsConstantHole(HConstant* constant);
+ bool IsConstantNull(HConstant* constant);
+ bool IsStandardConstant(HConstant* constant);
HBasicBlock* CreateBasicBlock();
HArgumentsObject* GetArgumentsObject() const {
@@ -303,6 +374,7 @@ class HGraph: public ZoneObject {
int GetMaximumValueID() const { return values_.length(); }
int GetNextBlockID() { return next_block_id_++; }
int GetNextValueID(HValue* value) {
+ ASSERT(!disallow_adding_new_values_);
values_.Add(value, zone());
return values_.length() - 1;
}
@@ -310,31 +382,26 @@ class HGraph: public ZoneObject {
if (id >= 0 && id < values_.length()) return values_[id];
return NULL;
}
+ void DisallowAddingNewValues() {
+ disallow_adding_new_values_ = true;
+ }
- bool Optimize(SmartArrayPointer<char>* bailout_reason);
+ bool Optimize(BailoutReason* bailout_reason);
#ifdef DEBUG
void Verify(bool do_full_verify) const;
#endif
- bool has_osr_loop_entry() {
- return osr_loop_entry_.is_set();
- }
-
- HBasicBlock* osr_loop_entry() {
- return osr_loop_entry_.get();
+ bool has_osr() {
+ return osr_ != NULL;
}
- void set_osr_loop_entry(HBasicBlock* entry) {
- osr_loop_entry_.set(entry);
+ void set_osr(HOsrBuilder* osr) {
+ osr_ = osr;
}
- ZoneList<HUnknownOSRValue*>* osr_values() {
- return osr_values_.get();
- }
-
- void set_osr_values(ZoneList<HUnknownOSRValue*>* values) {
- osr_values_.set(values);
+ HOsrBuilder* osr() {
+ return osr_;
}
int update_type_change_checksum(int delta) {
@@ -342,6 +409,13 @@ class HGraph: public ZoneObject {
return type_change_checksum_;
}
+ void update_maximum_environment_size(int environment_size) {
+ if (environment_size > maximum_environment_size_) {
+ maximum_environment_size_ = environment_size;
+ }
+ }
+ int maximum_environment_size() { return maximum_environment_size_; }
+
bool use_optimistic_licm() {
return use_optimistic_licm_;
}
@@ -358,32 +432,54 @@ class HGraph: public ZoneObject {
return is_recursive_;
}
+ void MarkDependsOnEmptyArrayProtoElements() {
+ // Add map dependency if not already added.
+ if (depends_on_empty_array_proto_elements_) return;
+ isolate()->initial_object_prototype()->map()->AddDependentCompilationInfo(
+ DependentCode::kElementsCantBeAddedGroup, info());
+ isolate()->initial_array_prototype()->map()->AddDependentCompilationInfo(
+ DependentCode::kElementsCantBeAddedGroup, info());
+ depends_on_empty_array_proto_elements_ = true;
+ }
+
+ bool depends_on_empty_array_proto_elements() {
+ return depends_on_empty_array_proto_elements_;
+ }
+
+ bool has_uint32_instructions() {
+ ASSERT(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
+ return uint32_instructions_ != NULL;
+ }
+
+ ZoneList<HInstruction*>* uint32_instructions() {
+ ASSERT(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
+ return uint32_instructions_;
+ }
+
void RecordUint32Instruction(HInstruction* instr) {
+ ASSERT(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
if (uint32_instructions_ == NULL) {
uint32_instructions_ = new(zone()) ZoneList<HInstruction*>(4, zone());
}
uint32_instructions_->Add(instr, zone());
}
+ void IncrementInNoSideEffectsScope() { no_side_effects_scope_count_++; }
+ void DecrementInNoSideEffectsScope() { no_side_effects_scope_count_--; }
+ bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; }
+
private:
+ HConstant* ReinsertConstantIfNecessary(HConstant* constant);
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
- Handle<Object> value);
- HConstant* GetConstantInt32(SetOncePointer<HConstant>* pointer,
- int32_t integer_value);
-
- void MarkAsDeoptimizingRecursively(HBasicBlock* block);
- void InsertTypeConversions(HInstruction* instr);
- void PropagateMinusZeroChecks(HValue* value, BitVector* visited);
- void RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi);
- void InsertRepresentationChangeForUse(HValue* value,
- HValue* use_value,
- int use_index,
- Representation to);
- void InsertRepresentationChangesForValue(HValue* value);
- void InferTypes(ZoneList<HValue*>* worklist);
- void InitializeInferredTypes(int from_inclusive, int to_inclusive);
- void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
- void EliminateRedundantBoundsChecks(HBasicBlock* bb, BoundsCheckTable* table);
+ int32_t integer_value);
+
+ template<class Phase>
+ void Run() {
+ Phase phase(this);
+ phase.Run();
+ }
+
+ void EliminateRedundantBoundsChecksUsingInductionVariables();
Isolate* isolate_;
int next_block_id_;
@@ -393,23 +489,29 @@ class HGraph: public ZoneObject {
ZoneList<HValue*> values_;
ZoneList<HPhi*>* phi_list_;
ZoneList<HInstruction*>* uint32_instructions_;
- SetOncePointer<HConstant> undefined_constant_;
+ SetOncePointer<HConstant> constant_undefined_;
+ SetOncePointer<HConstant> constant_0_;
SetOncePointer<HConstant> constant_1_;
SetOncePointer<HConstant> constant_minus1_;
SetOncePointer<HConstant> constant_true_;
SetOncePointer<HConstant> constant_false_;
- SetOncePointer<HConstant> constant_hole_;
+ SetOncePointer<HConstant> constant_the_hole_;
+ SetOncePointer<HConstant> constant_null_;
+ SetOncePointer<HConstant> constant_invalid_context_;
SetOncePointer<HArgumentsObject> arguments_object_;
- SetOncePointer<HBasicBlock> osr_loop_entry_;
- SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_;
+ HOsrBuilder* osr_;
CompilationInfo* info_;
Zone* zone_;
bool is_recursive_;
bool use_optimistic_licm_;
+ bool depends_on_empty_array_proto_elements_;
int type_change_checksum_;
+ int maximum_environment_size_;
+ int no_side_effects_scope_count_;
+ bool disallow_adding_new_values_;
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -424,17 +526,20 @@ enum FrameType {
JS_CONSTRUCT,
JS_GETTER,
JS_SETTER,
- ARGUMENTS_ADAPTOR
+ ARGUMENTS_ADAPTOR,
+ STUB
};
-class HEnvironment: public ZoneObject {
+class HEnvironment V8_FINAL : public ZoneObject {
public:
HEnvironment(HEnvironment* outer,
Scope* scope,
Handle<JSFunction> closure,
Zone* zone);
+ HEnvironment(Zone* zone, int parameter_count);
+
HEnvironment* arguments_environment() {
return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
}
@@ -442,7 +547,7 @@ class HEnvironment: public ZoneObject {
// Simple accessors.
Handle<JSFunction> closure() const { return closure_; }
const ZoneList<HValue*>* values() const { return &values_; }
- const ZoneList<int>* assigned_variables() const {
+ const GrowableBitVector* assigned_variables() const {
return &assigned_variables_;
}
FrameType frame_type() const { return frame_type_; }
@@ -460,14 +565,15 @@ class HEnvironment: public ZoneObject {
void set_entry(HEnterInlined* entry) { entry_ = entry; }
int length() const { return values_.length(); }
- bool is_special_index(int i) const {
- return i >= parameter_count() && i < parameter_count() + specials_count();
- }
int first_expression_index() const {
return parameter_count() + specials_count() + local_count();
}
+ int first_local_index() const {
+ return parameter_count() + specials_count();
+ }
+
void Bind(Variable* variable, HValue* value) {
Bind(IndexFor(variable), value);
}
@@ -488,7 +594,7 @@ class HEnvironment: public ZoneObject {
return result;
}
- HValue* LookupContext() const {
+ HValue* context() const {
// Return first special.
return Lookup(parameter_count());
}
@@ -534,8 +640,16 @@ class HEnvironment: public ZoneObject {
int arguments,
FunctionLiteral* function,
HConstant* undefined,
- CallKind call_kind,
- InliningKind inlining_kind) const;
+ InliningKind inlining_kind,
+ bool undefined_receiver) const;
+
+ static bool UseUndefinedReceiver(Handle<JSFunction> closure,
+ FunctionLiteral* function,
+ CallKind call_kind,
+ InliningKind inlining_kind) {
+ return (closure->shared()->native() || !function->is_classic_mode()) &&
+ call_kind == CALL_AS_FUNCTION && inlining_kind != CONSTRUCT_CALL_RETURN;
+ }
HEnvironment* DiscardInlined(bool drop_extra) {
HEnvironment* outer = outer_;
@@ -549,7 +663,7 @@ class HEnvironment: public ZoneObject {
void ClearHistory() {
pop_count_ = 0;
push_count_ = 0;
- assigned_variables_.Rewind(0);
+ assigned_variables_.Clear();
}
void SetValueAt(int index, HValue* value) {
@@ -557,6 +671,29 @@ class HEnvironment: public ZoneObject {
values_[index] = value;
}
+ // Map a variable to an environment index. Parameter indices are shifted
+ // by 1 (receiver is parameter index -1 but environment index 0).
+ // Stack-allocated local indices are shifted by the number of parameters.
+ int IndexFor(Variable* variable) const {
+ ASSERT(variable->IsStackAllocated());
+ int shift = variable->IsParameter()
+ ? 1
+ : parameter_count_ + specials_count_;
+ return variable->index() + shift;
+ }
+
+ bool is_local_index(int i) const {
+ return i >= first_local_index() && i < first_expression_index();
+ }
+
+ bool is_parameter_index(int i) const {
+ return i >= 0 && i < parameter_count();
+ }
+
+ bool is_special_index(int i) const {
+ return i >= parameter_count() && i < parameter_count() + specials_count();
+ }
+
void PrintTo(StringStream* stream);
void PrintToStd();
@@ -584,21 +721,10 @@ class HEnvironment: public ZoneObject {
void Initialize(int parameter_count, int local_count, int stack_height);
void Initialize(const HEnvironment* other);
- // Map a variable to an environment index. Parameter indices are shifted
- // by 1 (receiver is parameter index -1 but environment index 0).
- // Stack-allocated local indices are shifted by the number of parameters.
- int IndexFor(Variable* variable) const {
- ASSERT(variable->IsStackAllocated());
- int shift = variable->IsParameter()
- ? 1
- : parameter_count_ + specials_count_;
- return variable->index() + shift;
- }
-
Handle<JSFunction> closure_;
// Value array [parameters] [specials] [locals] [temporaries].
ZoneList<HValue*> values_;
- ZoneList<int> assigned_variables_;
+ GrowableBitVector assigned_variables_;
FrameType frame_type_;
int parameter_count_;
int specials_count_;
@@ -612,13 +738,16 @@ class HEnvironment: public ZoneObject {
};
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
enum ArgumentsAllowedFlag {
ARGUMENTS_NOT_ALLOWED,
ARGUMENTS_ALLOWED
};
+
+class HIfContinuation;
+
// This class is not BASE_EMBEDDED because our inlining implementation uses
// new and delete.
class AstContext {
@@ -644,14 +773,21 @@ class AstContext {
// expressions.
virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id) = 0;
+ // Finishes the current basic block and materialize a boolean for
+ // value context, nothing for effect, generate a branch for test context.
+ // Call this function in tail position in the Visit functions for
+ // expressions that use an IfBuilder.
+ virtual void ReturnContinuation(HIfContinuation* continuation,
+ BailoutId ast_id) = 0;
+
void set_for_typeof(bool for_typeof) { for_typeof_ = for_typeof; }
bool is_for_typeof() { return for_typeof_; }
protected:
- AstContext(HGraphBuilder* owner, Expression::Context kind);
+ AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind);
virtual ~AstContext();
- HGraphBuilder* owner() const { return owner_; }
+ HOptimizedGraphBuilder* owner() const { return owner_; }
inline Zone* zone() const;
@@ -662,36 +798,44 @@ class AstContext {
#endif
private:
- HGraphBuilder* owner_;
+ HOptimizedGraphBuilder* owner_;
Expression::Context kind_;
AstContext* outer_;
bool for_typeof_;
};
-class EffectContext: public AstContext {
+class EffectContext V8_FINAL : public AstContext {
public:
- explicit EffectContext(HGraphBuilder* owner)
+ explicit EffectContext(HOptimizedGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {
}
virtual ~EffectContext();
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
- virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
+ virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+ virtual void ReturnInstruction(HInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
+ virtual void ReturnControl(HControlInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
+ virtual void ReturnContinuation(HIfContinuation* continuation,
+ BailoutId ast_id) V8_OVERRIDE;
};
-class ValueContext: public AstContext {
+class ValueContext V8_FINAL : public AstContext {
public:
- explicit ValueContext(HGraphBuilder* owner, ArgumentsAllowedFlag flag)
+ ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
: AstContext(owner, Expression::kValue), flag_(flag) {
}
virtual ~ValueContext();
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
- virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
+ virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+ virtual void ReturnInstruction(HInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
+ virtual void ReturnControl(HControlInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
+ virtual void ReturnContinuation(HIfContinuation* continuation,
+ BailoutId ast_id) V8_OVERRIDE;
bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
@@ -700,23 +844,25 @@ class ValueContext: public AstContext {
};
-class TestContext: public AstContext {
+class TestContext V8_FINAL : public AstContext {
public:
- TestContext(HGraphBuilder* owner,
+ TestContext(HOptimizedGraphBuilder* owner,
Expression* condition,
- TypeFeedbackOracle* oracle,
HBasicBlock* if_true,
HBasicBlock* if_false)
: AstContext(owner, Expression::kTest),
condition_(condition),
- oracle_(oracle),
if_true_(if_true),
if_false_(if_false) {
}
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
- virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
+ virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+ virtual void ReturnInstruction(HInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
+ virtual void ReturnControl(HControlInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
+ virtual void ReturnContinuation(HIfContinuation* continuation,
+ BailoutId ast_id) V8_OVERRIDE;
static TestContext* cast(AstContext* context) {
ASSERT(context->IsTest());
@@ -724,7 +870,6 @@ class TestContext: public AstContext {
}
Expression* condition() const { return condition_; }
- TypeFeedbackOracle* oracle() const { return oracle_; }
HBasicBlock* if_true() const { return if_true_; }
HBasicBlock* if_false() const { return if_false_; }
@@ -734,22 +879,19 @@ class TestContext: public AstContext {
void BuildBranch(HValue* value);
Expression* condition_;
- TypeFeedbackOracle* oracle_;
HBasicBlock* if_true_;
HBasicBlock* if_false_;
};
-class FunctionState {
+class FunctionState V8_FINAL {
public:
- FunctionState(HGraphBuilder* owner,
+ FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
- TypeFeedbackOracle* oracle,
InliningKind inlining_kind);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
- TypeFeedbackOracle* oracle() { return oracle_; }
AstContext* call_context() { return call_context_; }
InliningKind inlining_kind() const { return inlining_kind_; }
HBasicBlock* function_return() { return function_return_; }
@@ -764,6 +906,11 @@ class FunctionState {
HEnterInlined* entry() { return entry_; }
void set_entry(HEnterInlined* entry) { entry_ = entry; }
+ HArgumentsObject* arguments_object() { return arguments_object_; }
+ void set_arguments_object(HArgumentsObject* arguments_object) {
+ arguments_object_ = arguments_object;
+ }
+
HArgumentsElements* arguments_elements() { return arguments_elements_; }
void set_arguments_elements(HArgumentsElements* arguments_elements) {
arguments_elements_ = arguments_elements;
@@ -772,10 +919,9 @@ class FunctionState {
bool arguments_pushed() { return arguments_elements() != NULL; }
private:
- HGraphBuilder* owner_;
+ HOptimizedGraphBuilder* owner_;
CompilationInfo* compilation_info_;
- TypeFeedbackOracle* oracle_;
// During function inlining, expression context of the call being
// inlined. NULL when not inlining.
@@ -798,21 +944,823 @@ class FunctionState {
// entry.
HEnterInlined* entry_;
+ HArgumentsObject* arguments_object_;
HArgumentsElements* arguments_elements_;
FunctionState* outer_;
};
-class HGraphBuilder: public AstVisitor {
+class HIfContinuation V8_FINAL {
public:
- enum BreakType { BREAK, CONTINUE };
- enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
+ HIfContinuation() : continuation_captured_(false) {}
+ HIfContinuation(HBasicBlock* true_branch,
+ HBasicBlock* false_branch)
+ : continuation_captured_(true), true_branch_(true_branch),
+ false_branch_(false_branch) {}
+ ~HIfContinuation() { ASSERT(!continuation_captured_); }
+
+ void Capture(HBasicBlock* true_branch,
+ HBasicBlock* false_branch) {
+ ASSERT(!continuation_captured_);
+ true_branch_ = true_branch;
+ false_branch_ = false_branch;
+ continuation_captured_ = true;
+ }
+
+ void Continue(HBasicBlock** true_branch,
+ HBasicBlock** false_branch) {
+ ASSERT(continuation_captured_);
+ *true_branch = true_branch_;
+ *false_branch = false_branch_;
+ continuation_captured_ = false;
+ }
+
+ bool IsTrueReachable() { return true_branch_ != NULL; }
+ bool IsFalseReachable() { return false_branch_ != NULL; }
+ bool TrueAndFalseReachable() {
+ return IsTrueReachable() || IsFalseReachable();
+ }
+
+ HBasicBlock* true_branch() const { return true_branch_; }
+ HBasicBlock* false_branch() const { return false_branch_; }
+
+ private:
+ bool continuation_captured_;
+ HBasicBlock* true_branch_;
+ HBasicBlock* false_branch_;
+};
+
+
+class HGraphBuilder {
+ public:
+ explicit HGraphBuilder(CompilationInfo* info)
+ : info_(info),
+ graph_(NULL),
+ current_block_(NULL),
+ position_(RelocInfo::kNoPosition) {}
+ virtual ~HGraphBuilder() {}
+
+ HBasicBlock* current_block() const { return current_block_; }
+ void set_current_block(HBasicBlock* block) { current_block_ = block; }
+ HEnvironment* environment() const {
+ return current_block()->last_environment();
+ }
+ Zone* zone() const { return info_->zone(); }
+ HGraph* graph() const { return graph_; }
+ Isolate* isolate() const { return graph_->isolate(); }
+ CompilationInfo* top_info() { return info_; }
+
+ HGraph* CreateGraph();
+
+ // Bailout environment manipulation.
+ void Push(HValue* value) { environment()->Push(value); }
+ HValue* Pop() { return environment()->Pop(); }
+
+ virtual HValue* context() = 0;
+
+ // Adding instructions.
+ HInstruction* AddInstruction(HInstruction* instr);
+ void FinishCurrentBlock(HControlInstruction* last);
+ void FinishExitCurrentBlock(HControlInstruction* instruction);
+
+ void Goto(HBasicBlock* from,
+ HBasicBlock* target,
+ FunctionState* state = NULL,
+ bool add_simulate = true) {
+ from->Goto(target, position_, state, add_simulate);
+ }
+ void Goto(HBasicBlock* target,
+ FunctionState* state = NULL,
+ bool add_simulate = true) {
+ Goto(current_block(), target, state, add_simulate);
+ }
+ void GotoNoSimulate(HBasicBlock* from, HBasicBlock* target) {
+ Goto(from, target, NULL, false);
+ }
+ void GotoNoSimulate(HBasicBlock* target) {
+ Goto(target, NULL, false);
+ }
+ void AddLeaveInlined(HBasicBlock* block,
+ HValue* return_value,
+ FunctionState* state) {
+ block->AddLeaveInlined(return_value, state, position_);
+ }
+ void AddLeaveInlined(HValue* return_value, FunctionState* state) {
+ return AddLeaveInlined(current_block(), return_value, state);
+ }
+
+ template<class I>
+ HInstruction* NewUncasted() { return I::New(zone(), context()); }
+
+ template<class I>
+ I* New() { return I::cast(NewUncasted<I>()); }
+
+ template<class I>
+ HInstruction* AddUncasted() { return AddInstruction(NewUncasted<I>());}
+
+ template<class I>
+ I* Add() { return I::cast(AddUncasted<I>());}
+
+ template<class I, class P1>
+ HInstruction* NewUncasted(P1 p1) {
+ return I::New(zone(), context(), p1);
+ }
+
+ template<class I, class P1>
+ I* New(P1 p1) { return I::cast(NewUncasted<I>(p1)); }
+
+ template<class I, class P1>
+ HInstruction* AddUncasted(P1 p1) {
+ HInstruction* result = AddInstruction(NewUncasted<I>(p1));
+ // Specializations must have their parameters properly casted
+ // to avoid landing here.
+ ASSERT(!result->IsReturn() && !result->IsSimulate() &&
+ !result->IsDeoptimize());
+ return result;
+ }
+
+ template<class I, class P1>
+ I* Add(P1 p1) {
+ return I::cast(AddUncasted<I>(p1));
+ }
+
+ template<class I, class P1, class P2>
+ HInstruction* NewUncasted(P1 p1, P2 p2) {
+ return I::New(zone(), context(), p1, p2);
+ }
+
+ template<class I, class P1, class P2>
+ I* New(P1 p1, P2 p2) {
+ return I::cast(NewUncasted<I>(p1, p2));
+ }
+
+ template<class I, class P1, class P2>
+ HInstruction* AddUncasted(P1 p1, P2 p2) {
+ HInstruction* result = AddInstruction(NewUncasted<I>(p1, p2));
+ // Specializations must have their parameters properly casted
+ // to avoid landing here.
+ ASSERT(!result->IsSimulate());
+ return result;
+ }
+
+ template<class I, class P1, class P2>
+ I* Add(P1 p1, P2 p2) {
+ return I::cast(AddUncasted<I>(p1, p2));
+ }
+
+ template<class I, class P1, class P2, class P3>
+ HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3) {
+ return I::New(zone(), context(), p1, p2, p3);
+ }
+
+ template<class I, class P1, class P2, class P3>
+ I* New(P1 p1, P2 p2, P3 p3) {
+ return I::cast(NewUncasted<I>(p1, p2, p3));
+ }
+
+ template<class I, class P1, class P2, class P3>
+ HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3) {
+ return AddInstruction(NewUncasted<I>(p1, p2, p3));
+ }
+
+ template<class I, class P1, class P2, class P3>
+ I* Add(P1 p1, P2 p2, P3 p3) {
+ return I::cast(AddUncasted<I>(p1, p2, p3));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4>
+ HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4) {
+ return I::New(zone(), context(), p1, p2, p3, p4);
+ }
+
+ template<class I, class P1, class P2, class P3, class P4>
+ I* New(P1 p1, P2 p2, P3 p3, P4 p4) {
+ return I::cast(NewUncasted<I>(p1, p2, p3, p4));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4>
+ HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4) {
+ return AddInstruction(NewUncasted<I>(p1, p2, p3, p4));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4>
+ I* Add(P1 p1, P2 p2, P3 p3, P4 p4) {
+ return I::cast(AddUncasted<I>(p1, p2, p3, p4));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4, class P5>
+ HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
+ return I::New(zone(), context(), p1, p2, p3, p4, p5);
+ }
+
+ template<class I, class P1, class P2, class P3, class P4, class P5>
+ I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
+ return I::cast(NewUncasted<I>(p1, p2, p3, p4, p5));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4, class P5>
+ HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
+ return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4, class P5>
+ I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
+ return I::cast(AddUncasted<I>(p1, p2, p3, p4, p5));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
+ HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
+ return I::New(zone(), context(), p1, p2, p3, p4, p5, p6);
+ }
+
+ template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
+ I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
+ return I::cast(NewUncasted<I>(p1, p2, p3, p4, p5, p6));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
+ HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
+ return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
+ I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
+ return I::cast(AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6)));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4,
+ class P5, class P6, class P7>
+ HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
+ return I::New(zone(), context(), p1, p2, p3, p4, p5, p6, p7);
+ }
+
+ template<class I, class P1, class P2, class P3, class P4,
+ class P5, class P6, class P7>
+ I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
+ return I::cast(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7));
+ }
+
+ template<class I, class P1, class P2, class P3,
+ class P4, class P5, class P6, class P7>
+ HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
+ return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7));
+ }
+
+ template<class I, class P1, class P2, class P3,
+ class P4, class P5, class P6, class P7>
+ I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
+ return I::cast(AddInstruction(NewUncasted<I>(p1, p2, p3, p4,
+ p5, p6, p7)));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4,
+ class P5, class P6, class P7, class P8>
+ HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4,
+ P5 p5, P6 p6, P7 p7, P8 p8) {
+ return I::New(zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8);
+ }
+
+ template<class I, class P1, class P2, class P3, class P4,
+ class P5, class P6, class P7, class P8>
+ I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) {
+ return I::cast(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4,
+ class P5, class P6, class P7, class P8>
+ HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4,
+ P5 p5, P6 p6, P7 p7, P8 p8) {
+ return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8));
+ }
+
+ template<class I, class P1, class P2, class P3, class P4,
+ class P5, class P6, class P7, class P8>
+ I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) {
+ return I::cast(
+ AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8)));
+ }
+
+ void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE);
+
+ int position() const { return position_; }
+
+ protected:
+ virtual bool BuildGraph() = 0;
+
+ HBasicBlock* CreateBasicBlock(HEnvironment* env);
+ HBasicBlock* CreateLoopHeaderBlock();
+
+ HValue* BuildCheckHeapObject(HValue* object);
+ HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
+ HValue* BuildWrapReceiver(HValue* object, HValue* function);
+
+ // Building common constructs
+ HValue* BuildCheckForCapacityGrow(HValue* object,
+ HValue* elements,
+ ElementsKind kind,
+ HValue* length,
+ HValue* key,
+ bool is_js_array);
+
+ HValue* BuildCopyElementsOnWrite(HValue* object,
+ HValue* elements,
+ ElementsKind kind,
+ HValue* length);
+
+ void BuildTransitionElementsKind(HValue* object,
+ HValue* map,
+ ElementsKind from_kind,
+ ElementsKind to_kind,
+ bool is_jsarray);
+
+ HValue* BuildNumberToString(HValue* object, Handle<Type> type);
+
+ HInstruction* BuildUncheckedMonomorphicElementAccess(
+ HValue* checked_object,
+ HValue* key,
+ HValue* val,
+ bool is_js_array,
+ ElementsKind elements_kind,
+ bool is_store,
+ LoadKeyedHoleMode load_mode,
+ KeyedAccessStoreMode store_mode);
+
+ HInstruction* AddElementAccess(
+ HValue* elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store,
+ LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE);
+
+ HLoadNamedField* BuildLoadNamedField(HValue* object, HObjectAccess access);
+ HInstruction* AddLoadNamedField(HValue* object, HObjectAccess access);
+ HInstruction* BuildLoadStringLength(HValue* object, HValue* checked_value);
+ HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map>);
+ HLoadNamedField* AddLoadElements(HValue* object);
+
+ bool MatchRotateRight(HValue* left,
+ HValue* right,
+ HValue** operand,
+ HValue** shift_amount);
+
+ HInstruction* BuildBinaryOperation(Token::Value op,
+ HValue* left,
+ HValue* right,
+ Handle<Type> left_type,
+ Handle<Type> right_type,
+ Handle<Type> result_type,
+ Maybe<int> fixed_right_arg,
+ bool binop_stub = false);
+
+ HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
+
+ HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin);
+
+ HValue* EnforceNumberType(HValue* number, Handle<Type> expected);
+ HValue* TruncateToNumber(HValue* value, Handle<Type>* expected);
+
+ void FinishExitWithHardDeoptimization(const char* reason,
+ HBasicBlock* continuation);
+
+ void AddIncrementCounter(StatsCounter* counter);
+
+ class IfBuilder V8_FINAL {
+ public:
+ explicit IfBuilder(HGraphBuilder* builder);
+ IfBuilder(HGraphBuilder* builder,
+ HIfContinuation* continuation);
+
+ ~IfBuilder() {
+ if (!finished_) End();
+ }
+ template<class Condition>
+ Condition* If(HValue *p) {
+ Condition* compare = builder()->New<Condition>(p);
+ AddCompare(compare);
+ return compare;
+ }
+
+ template<class Condition, class P2>
+ Condition* If(HValue* p1, P2 p2) {
+ Condition* compare = builder()->New<Condition>(p1, p2);
+ AddCompare(compare);
+ return compare;
+ }
+
+ template<class Condition, class P2, class P3>
+ Condition* If(HValue* p1, P2 p2, P3 p3) {
+ Condition* compare = builder()->New<Condition>(p1, p2, p3);
+ AddCompare(compare);
+ return compare;
+ }
+
+ template<class Condition>
+ Condition* IfNot(HValue* p) {
+ Condition* compare = If<Condition>(p);
+ compare->Not();
+ return compare;
+ }
+
+ template<class Condition, class P2>
+ Condition* IfNot(HValue* p1, P2 p2) {
+ Condition* compare = If<Condition>(p1, p2);
+ compare->Not();
+ return compare;
+ }
+
+ template<class Condition, class P2, class P3>
+ Condition* IfNot(HValue* p1, P2 p2, P3 p3) {
+ Condition* compare = If<Condition>(p1, p2, p3);
+ compare->Not();
+ return compare;
+ }
+
+ template<class Condition>
+ Condition* OrIf(HValue *p) {
+ Or();
+ return If<Condition>(p);
+ }
+
+ template<class Condition, class P2>
+ Condition* OrIf(HValue* p1, P2 p2) {
+ Or();
+ return If<Condition>(p1, p2);
+ }
+
+ template<class Condition, class P2, class P3>
+ Condition* OrIf(HValue* p1, P2 p2, P3 p3) {
+ Or();
+ return If<Condition>(p1, p2, p3);
+ }
+
+ template<class Condition>
+ Condition* AndIf(HValue *p) {
+ And();
+ return If<Condition>(p);
+ }
+
+ template<class Condition, class P2>
+ Condition* AndIf(HValue* p1, P2 p2) {
+ And();
+ return If<Condition>(p1, p2);
+ }
+
+ template<class Condition, class P2, class P3>
+ Condition* AndIf(HValue* p1, P2 p2, P3 p3) {
+ And();
+ return If<Condition>(p1, p2, p3);
+ }
+
+ void Or();
+ void And();
+
+ // Captures the current state of this IfBuilder in the specified
+ // continuation and ends this IfBuilder.
+ void CaptureContinuation(HIfContinuation* continuation);
+
+ // Joins the specified continuation from this IfBuilder and ends this
+ // IfBuilder. This appends a Goto instruction from the true branch of
+ // this IfBuilder to the true branch of the continuation unless the
+ // true branch of this IfBuilder is already finished. And vice versa
+ // for the false branch.
+ //
+ // The basic idea is as follows: You have several nested IfBuilder's
+ // that you want to join based on two possible outcomes (i.e. success
+ // and failure, or whatever). You can do this easily using this method
+ // now, for example:
+ //
+ // HIfContinuation cont(graph()->CreateBasicBlock(),
+ // graph()->CreateBasicBlock());
+ // ...
+ // IfBuilder if_whatever(this);
+ // if_whatever.If<Condition>(arg);
+ // if_whatever.Then();
+ // ...
+ // if_whatever.Else();
+ // ...
+ // if_whatever.JoinContinuation(&cont);
+ // ...
+ // IfBuilder if_something(this);
+ // if_something.If<Condition>(arg1, arg2);
+ // if_something.Then();
+ // ...
+ // if_something.Else();
+ // ...
+ // if_something.JoinContinuation(&cont);
+ // ...
+ // IfBuilder if_finally(this, &cont);
+ // if_finally.Then();
+ // // continues after then code of if_whatever or if_something.
+ // ...
+ // if_finally.Else();
+ // // continues after else code of if_whatever or if_something.
+ // ...
+ // if_finally.End();
+ void JoinContinuation(HIfContinuation* continuation);
+
+ void Then();
+ void Else();
+ void End();
+
+ void Deopt(const char* reason);
+ void ElseDeopt(const char* reason) {
+ Else();
+ Deopt(reason);
+ }
+
+ void Return(HValue* value);
+
+ private:
+ HControlInstruction* AddCompare(HControlInstruction* compare);
+
+ HGraphBuilder* builder() const { return builder_; }
+
+ HGraphBuilder* builder_;
+ bool finished_ : 1;
+ bool deopt_then_ : 1;
+ bool deopt_else_ : 1;
+ bool did_then_ : 1;
+ bool did_else_ : 1;
+ bool did_and_ : 1;
+ bool did_or_ : 1;
+ bool captured_ : 1;
+ bool needs_compare_ : 1;
+ HBasicBlock* first_true_block_;
+ HBasicBlock* last_true_block_;
+ HBasicBlock* first_false_block_;
+ HBasicBlock* split_edge_merge_block_;
+ HBasicBlock* merge_block_;
+ };
+
+ class LoopBuilder V8_FINAL {
+ public:
+ enum Direction {
+ kPreIncrement,
+ kPostIncrement,
+ kPreDecrement,
+ kPostDecrement
+ };
+
+ LoopBuilder(HGraphBuilder* builder,
+ HValue* context,
+ Direction direction);
+ LoopBuilder(HGraphBuilder* builder,
+ HValue* context,
+ Direction direction,
+ HValue* increment_amount);
+
+ ~LoopBuilder() {
+ ASSERT(finished_);
+ }
+
+ HValue* BeginBody(
+ HValue* initial,
+ HValue* terminating,
+ Token::Value token);
+
+ void Break();
+
+ void EndBody();
+
+ private:
+ Zone* zone() { return builder_->zone(); }
+
+ HGraphBuilder* builder_;
+ HValue* context_;
+ HValue* increment_amount_;
+ HInstruction* increment_;
+ HPhi* phi_;
+ HBasicBlock* header_block_;
+ HBasicBlock* body_block_;
+ HBasicBlock* exit_block_;
+ HBasicBlock* exit_trampoline_block_;
+ Direction direction_;
+ bool finished_;
+ };
+
+ HValue* BuildNewElementsCapacity(HValue* old_capacity);
+
+ void BuildNewSpaceArrayCheck(HValue* length,
+ ElementsKind kind);
+
+ class JSArrayBuilder V8_FINAL {
+ public:
+ JSArrayBuilder(HGraphBuilder* builder,
+ ElementsKind kind,
+ HValue* allocation_site_payload,
+ HValue* constructor_function,
+ AllocationSiteOverrideMode override_mode);
+
+ JSArrayBuilder(HGraphBuilder* builder,
+ ElementsKind kind,
+ HValue* constructor_function);
+
+ HValue* AllocateEmptyArray();
+ HValue* AllocateArray(HValue* capacity, HValue* length_field,
+ bool fill_with_hole);
+ HValue* GetElementsLocation() { return elements_location_; }
+
+ private:
+ Zone* zone() const { return builder_->zone(); }
+ int elements_size() const {
+ return IsFastDoubleElementsKind(kind_) ? kDoubleSize : kPointerSize;
+ }
+ HGraphBuilder* builder() { return builder_; }
+ HGraph* graph() { return builder_->graph(); }
+ int initial_capacity() {
+ STATIC_ASSERT(JSArray::kPreallocatedArrayElements > 0);
+ return JSArray::kPreallocatedArrayElements;
+ }
+
+ HValue* EmitMapCode();
+ HValue* EmitInternalMapCode();
+ HValue* EstablishEmptyArrayAllocationSize();
+ HValue* EstablishAllocationSize(HValue* length_node);
+ HValue* AllocateArray(HValue* size_in_bytes, HValue* capacity,
+ HValue* length_field, bool fill_with_hole);
+
+ HGraphBuilder* builder_;
+ ElementsKind kind_;
+ AllocationSiteMode mode_;
+ HValue* allocation_site_payload_;
+ HValue* constructor_function_;
+ HInnerAllocatedObject* elements_location_;
+ };
+
+ HValue* BuildAllocateElements(ElementsKind kind,
+ HValue* capacity);
+
+ void BuildInitializeElementsHeader(HValue* elements,
+ ElementsKind kind,
+ HValue* capacity);
+
+ HValue* BuildAllocateElementsAndInitializeElementsHeader(ElementsKind kind,
+ HValue* capacity);
+
+ // array must have been allocated with enough room for
+ // 1) the JSArray, 2) a AllocationMemento if mode requires it,
+ // 3) a FixedArray or FixedDoubleArray.
+ // A pointer to the Fixed(Double)Array is returned.
+ HInnerAllocatedObject* BuildJSArrayHeader(HValue* array,
+ HValue* array_map,
+ AllocationSiteMode mode,
+ ElementsKind elements_kind,
+ HValue* allocation_site_payload,
+ HValue* length_field);
+
+ HValue* BuildGrowElementsCapacity(HValue* object,
+ HValue* elements,
+ ElementsKind kind,
+ ElementsKind new_kind,
+ HValue* length,
+ HValue* new_capacity);
+
+ void BuildFillElementsWithHole(HValue* elements,
+ ElementsKind elements_kind,
+ HValue* from,
+ HValue* to);
+
+ void BuildCopyElements(HValue* from_elements,
+ ElementsKind from_elements_kind,
+ HValue* to_elements,
+ ElementsKind to_elements_kind,
+ HValue* length,
+ HValue* capacity);
+
+ HValue* BuildCloneShallowArray(HValue* boilerplate,
+ HValue* allocation_site,
+ AllocationSiteMode mode,
+ ElementsKind kind,
+ int length);
+
+ void BuildCompareNil(
+ HValue* value,
+ Handle<Type> type,
+ HIfContinuation* continuation);
+
+ HValue* BuildCreateAllocationMemento(HValue* previous_object,
+ int previous_object_size,
+ HValue* payload);
+
+ HInstruction* BuildConstantMapCheck(Handle<JSObject> constant,
+ CompilationInfo* info);
+ HInstruction* BuildCheckPrototypeMaps(Handle<JSObject> prototype,
+ Handle<JSObject> holder);
+
+ HInstruction* BuildGetNativeContext();
+ HInstruction* BuildGetArrayFunction();
+
+ protected:
+ void SetSourcePosition(int position) {
+ ASSERT(position != RelocInfo::kNoPosition);
+ position_ = position;
+ }
+
+ private:
+ HGraphBuilder();
+
+ void PadEnvironmentForContinuation(HBasicBlock* from,
+ HBasicBlock* continuation);
+
+ CompilationInfo* info_;
+ HGraph* graph_;
+ HBasicBlock* current_block_;
+ int position_;
+};
+
+
+template<>
+inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
+ const char* reason, Deoptimizer::BailoutType type) {
+ if (type == Deoptimizer::SOFT) {
+ isolate()->counters()->soft_deopts_requested()->Increment();
+ if (FLAG_always_opt) return NULL;
+ }
+ if (current_block()->IsDeoptimizing()) return NULL;
+ HBasicBlock* after_deopt_block = CreateBasicBlock(
+ current_block()->last_environment());
+ HDeoptimize* instr = New<HDeoptimize>(reason, type, after_deopt_block);
+ if (type == Deoptimizer::SOFT) {
+ isolate()->counters()->soft_deopts_inserted()->Increment();
+ }
+ FinishCurrentBlock(instr);
+ set_current_block(after_deopt_block);
+ return instr;
+}
+
+
+template<>
+inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
+ const char* reason, Deoptimizer::BailoutType type) {
+ return static_cast<HDeoptimize*>(AddUncasted<HDeoptimize>(reason, type));
+}
+
+
+template<>
+inline HInstruction* HGraphBuilder::AddUncasted<HSimulate>(
+ BailoutId id,
+ RemovableSimulate removable) {
+ HSimulate* instr = current_block()->CreateSimulate(id, removable);
+ AddInstruction(instr);
+ return instr;
+}
+
+
+template<>
+inline HInstruction* HGraphBuilder::AddUncasted<HSimulate>(BailoutId id) {
+ return AddUncasted<HSimulate>(id, FIXED_SIMULATE);
+}
+
+
+template<>
+inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HValue* value) {
+ int num_parameters = graph()->info()->num_parameters();
+ HValue* params = AddUncasted<HConstant>(num_parameters);
+ HReturn* return_instruction = New<HReturn>(value, params);
+ FinishExitCurrentBlock(return_instruction);
+ return return_instruction;
+}
+
+
+template<>
+inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HConstant* value) {
+ return AddUncasted<HReturn>(static_cast<HValue*>(value));
+}
+
+
+template<>
+inline HInstruction* HGraphBuilder::AddUncasted<HCallRuntime>(
+ Handle<String> name,
+ const Runtime::Function* c_function,
+ int argument_count) {
+ HCallRuntime* instr = New<HCallRuntime>(name, c_function, argument_count);
+ if (graph()->info()->IsStub()) {
+ // When compiling code stubs, we don't want to save all double registers
+ // upon entry to the stub, but instead have the call runtime instruction
+ // save the double registers only on-demand (in the fallback case).
+ instr->set_save_doubles(kSaveFPRegs);
+ }
+ AddInstruction(instr);
+ return instr;
+}
+
+
+template<>
+inline HInstruction* HGraphBuilder::NewUncasted<HContext>() {
+ return HContext::New(zone());
+}
+
+
+class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
+ public:
// A class encapsulating (lazily-allocated) break and continue blocks for
// a breakable statement. Separated from BreakAndContinueScope so that it
// can have a separate lifetime.
- class BreakAndContinueInfo BASE_EMBEDDED {
+ class BreakAndContinueInfo V8_FINAL BASE_EMBEDDED {
public:
explicit BreakAndContinueInfo(BreakableStatement* target,
int drop_extra = 0)
@@ -838,9 +1786,10 @@ class HGraphBuilder: public AstVisitor {
// A helper class to maintain a stack of current BreakAndContinueInfo
// structures mirroring BreakableStatement nesting.
- class BreakAndContinueScope BASE_EMBEDDED {
+ class BreakAndContinueScope V8_FINAL BASE_EMBEDDED {
public:
- BreakAndContinueScope(BreakAndContinueInfo* info, HGraphBuilder* owner)
+ BreakAndContinueScope(BreakAndContinueInfo* info,
+ HOptimizedGraphBuilder* owner)
: info_(info), owner_(owner), next_(owner->break_scope()) {
owner->set_break_scope(this);
}
@@ -848,51 +1797,39 @@ class HGraphBuilder: public AstVisitor {
~BreakAndContinueScope() { owner_->set_break_scope(next_); }
BreakAndContinueInfo* info() { return info_; }
- HGraphBuilder* owner() { return owner_; }
+ HOptimizedGraphBuilder* owner() { return owner_; }
BreakAndContinueScope* next() { return next_; }
// Search the break stack for a break or continue target.
+ enum BreakType { BREAK, CONTINUE };
HBasicBlock* Get(BreakableStatement* stmt, BreakType type, int* drop_extra);
private:
BreakAndContinueInfo* info_;
- HGraphBuilder* owner_;
+ HOptimizedGraphBuilder* owner_;
BreakAndContinueScope* next_;
};
- HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
+ explicit HOptimizedGraphBuilder(CompilationInfo* info);
- HGraph* CreateGraph();
+ virtual bool BuildGraph() V8_OVERRIDE;
// Simple accessors.
- HGraph* graph() const { return graph_; }
BreakAndContinueScope* break_scope() const { return break_scope_; }
void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
- HBasicBlock* current_block() const { return current_block_; }
- void set_current_block(HBasicBlock* block) { current_block_ = block; }
- HEnvironment* environment() const {
- return current_block()->last_environment();
- }
-
bool inline_bailout() { return inline_bailout_; }
- // Adding instructions.
- HInstruction* AddInstruction(HInstruction* instr);
- void AddSimulate(BailoutId ast_id);
+ HValue* context() { return environment()->context(); }
- // Bailout environment manipulation.
- void Push(HValue* value) { environment()->Push(value); }
- HValue* Pop() { return environment()->Pop(); }
+ HOsrBuilder* osr() const { return osr_; }
- void Bailout(const char* reason);
+ void Bailout(BailoutReason reason);
HBasicBlock* CreateJoin(HBasicBlock* first,
HBasicBlock* second,
BailoutId join_id);
- TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
-
FunctionState* function_state() const { return function_state_; }
void VisitDeclarations(ZoneList<Declaration*>* declarations);
@@ -903,9 +1840,12 @@ class HGraphBuilder: public AstVisitor {
void operator delete(void* pointer, Zone* zone) { }
void operator delete(void* pointer) { }
- private:
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+ protected:
// Type of a member function that generates inline code for a native function.
- typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
+ typedef void (HOptimizedGraphBuilder::*InlineFunctionGenerator)
+ (CallRuntime* call);
// Forward declarations for inner scope classes.
class SubgraphScope;
@@ -922,6 +1862,11 @@ class HGraphBuilder: public AstVisitor {
static const int kUnlimitedMaxInlinedNodes = 10000;
static const int kUnlimitedMaxInlinedNodesCumulative = 10000;
+ // Maximum depth and total number of elements and properties for literal
+ // graphs to be considered for fast deep-copying.
+ static const int kMaxFastLiteralDepth = 3;
+ static const int kMaxFastLiteralProperties = 8;
+
// Simple accessors.
void set_function_state(FunctionState* state) { function_state_ = state; }
@@ -929,7 +1874,7 @@ class HGraphBuilder: public AstVisitor {
void set_ast_context(AstContext* context) { ast_context_ = context; }
// Accessors forwarded to the function state.
- CompilationInfo* info() const {
+ CompilationInfo* current_info() const {
return function_state()->compilation_info();
}
AstContext* call_context() const {
@@ -960,9 +1905,6 @@ class HGraphBuilder: public AstVisitor {
void VisitDelete(UnaryOperation* expr);
void VisitVoid(UnaryOperation* expr);
void VisitTypeof(UnaryOperation* expr);
- void VisitAdd(UnaryOperation* expr);
- void VisitSub(UnaryOperation* expr);
- void VisitBitNot(UnaryOperation* expr);
void VisitNot(UnaryOperation* expr);
void VisitComma(BinaryOperation* expr);
@@ -970,8 +1912,6 @@ class HGraphBuilder: public AstVisitor {
void VisitArithmeticExpression(BinaryOperation* expr);
bool PreProcessOsrEntry(IterationStatement* statement);
- // True iff. we are compiling for OSR and the statement is the entry.
- bool HasOsrEntryAt(IterationStatement* statement);
void VisitLoopBody(IterationStatement* stmt,
HBasicBlock* loop_entry,
BreakAndContinueInfo* break_info);
@@ -989,6 +1929,12 @@ class HGraphBuilder: public AstVisitor {
HBasicBlock* loop_successor,
HBasicBlock* break_block);
+ // Build a loop entry
+ HBasicBlock* BuildLoopEntry();
+
+ // Builds a loop entry respectful of OSR requirements
+ HBasicBlock* BuildLoopEntry(IterationStatement* statement);
+
HBasicBlock* JoinContinue(IterationStatement* statement,
HBasicBlock* exit_block,
HBasicBlock* continue_block);
@@ -996,6 +1942,46 @@ class HGraphBuilder: public AstVisitor {
HValue* Top() const { return environment()->Top(); }
void Drop(int n) { environment()->Drop(n); }
void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
+ bool IsEligibleForEnvironmentLivenessAnalysis(Variable* var,
+ int index,
+ HValue* value,
+ HEnvironment* env) {
+ if (!FLAG_analyze_environment_liveness) return false;
+ // |this| and |arguments| are always live; zapping parameters isn't
+ // safe because function.arguments can inspect them at any time.
+ return !var->is_this() &&
+ !var->is_arguments() &&
+ !value->IsArgumentsObject() &&
+ env->is_local_index(index);
+ }
+ void BindIfLive(Variable* var, HValue* value) {
+ HEnvironment* env = environment();
+ int index = env->IndexFor(var);
+ env->Bind(index, value);
+ if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
+ HEnvironmentMarker* bind =
+ Add<HEnvironmentMarker>(HEnvironmentMarker::BIND, index);
+ USE(bind);
+#ifdef DEBUG
+ bind->set_closure(env->closure());
+#endif
+ }
+ }
+
+ HValue* LookupAndMakeLive(Variable* var) {
+ HEnvironment* env = environment();
+ int index = env->IndexFor(var);
+ HValue* value = env->Lookup(index);
+ if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
+ HEnvironmentMarker* lookup =
+ Add<HEnvironmentMarker>(HEnvironmentMarker::LOOKUP, index);
+ USE(lookup);
+#ifdef DEBUG
+ lookup->set_closure(env->closure());
+#endif
+ }
+ return value;
+ }
// The value of the arguments object is allowed in some but not most value
// contexts. (It's allowed in all effect contexts and disallowed in all
@@ -1016,30 +2002,18 @@ class HGraphBuilder: public AstVisitor {
// Visit a list of expressions from left to right, each in a value context.
void VisitExpressions(ZoneList<Expression*>* exprs);
- void AddPhi(HPhi* phi);
-
- void PushAndAdd(HInstruction* instr);
-
// Remove the arguments from the bailout environment and emit instructions
// to push them as outgoing parameters.
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
- void TraceRepresentation(Token::Value op,
- TypeInfo info,
- HValue* value,
- Representation rep);
- static Representation ToRepresentation(TypeInfo info);
-
void SetUpScope(Scope* scope);
- virtual void VisitStatements(ZoneList<Statement*>* statements);
+ virtual void VisitStatements(ZoneList<Statement*>* statements) V8_OVERRIDE;
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node) V8_OVERRIDE;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- HBasicBlock* CreateBasicBlock(HEnvironment* env);
- HBasicBlock* CreateLoopHeaderBlock();
-
+ private:
// Helpers for flow graph construction.
enum GlobalPropertyAccess {
kUseCell,
@@ -1066,10 +2040,16 @@ class HGraphBuilder: public AstVisitor {
bool TryInlineCall(Call* expr, bool drop_extra = false);
bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
- bool TryInlineGetter(Handle<JSFunction> getter, Property* prop);
+ bool TryInlineGetter(Handle<JSFunction> getter,
+ BailoutId ast_id,
+ BailoutId return_id);
bool TryInlineSetter(Handle<JSFunction> setter,
- Assignment* assignment,
+ BailoutId id,
+ BailoutId assignment_id,
HValue* implicit_return_value);
+ bool TryInlineApply(Handle<JSFunction> function,
+ Call* expr,
+ int arguments_count);
bool TryInlineBuiltinMethodCall(Call* expr,
HValue* receiver,
Handle<Map> receiver_map,
@@ -1085,16 +2065,113 @@ class HGraphBuilder: public AstVisitor {
void HandleGlobalVariableAssignment(Variable* var,
HValue* value,
- int position,
BailoutId ast_id);
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
- void HandlePolymorphicLoadNamedField(Property* expr,
+ void HandlePolymorphicLoadNamedField(BailoutId ast_id,
+ BailoutId return_id,
HValue* object,
SmallMapList* types,
Handle<String> name);
- void HandlePolymorphicStoreNamedField(Assignment* expr,
+
+ class PropertyAccessInfo {
+ public:
+ PropertyAccessInfo(Isolate* isolate, Handle<Map> map, Handle<String> name)
+ : lookup_(isolate),
+ map_(map),
+ name_(name),
+ access_(HObjectAccess::ForMap()) { }
+
+ // Checkes whether this PropertyAccessInfo can be handled as a monomorphic
+ // load named. It additionally fills in the fields necessary to generate the
+ // lookup code.
+ bool CanLoadMonomorphic();
+
+ // Checks whether all types behave uniform when loading name. If all maps
+ // behave the same, a single monomorphic load instruction can be emitted,
+ // guarded by a single map-checks instruction that whether the receiver is
+ // an instance of any of the types.
+ // This method skips the first type in types, assuming that this
+ // PropertyAccessInfo is built for types->first().
+ bool CanLoadAsMonomorphic(SmallMapList* types);
+
+ bool IsJSObjectFieldAccessor() {
+ int offset; // unused
+ return Accessors::IsJSObjectFieldAccessor(map_, name_, &offset);
+ }
+
+ bool GetJSObjectFieldAccess(HObjectAccess* access) {
+ if (IsStringLength()) {
+ *access = HObjectAccess::ForStringLength();
+ return true;
+ } else if (IsArrayLength()) {
+ *access = HObjectAccess::ForArrayLength(map_->elements_kind());
+ return true;
+ } else {
+ int offset;
+ if (Accessors::IsJSObjectFieldAccessor(map_, name_, &offset)) {
+ *access = HObjectAccess::ForJSObjectOffset(offset);
+ return true;
+ }
+ return false;
+ }
+ }
+
+ bool has_holder() { return !holder_.is_null(); }
+
+ LookupResult* lookup() { return &lookup_; }
+ Handle<Map> map() { return map_; }
+ Handle<JSObject> holder() { return holder_; }
+ Handle<JSFunction> accessor() { return accessor_; }
+ Handle<Object> constant() { return constant_; }
+ HObjectAccess access() { return access_; }
+
+ private:
+ Isolate* isolate() { return lookup_.isolate(); }
+
+ bool IsStringLength() {
+ return map_->instance_type() < FIRST_NONSTRING_TYPE &&
+ name_->Equals(isolate()->heap()->length_string());
+ }
+
+ bool IsArrayLength() {
+ return map_->instance_type() == JS_ARRAY_TYPE &&
+ name_->Equals(isolate()->heap()->length_string());
+ }
+
+ bool LoadResult(Handle<Map> map);
+ bool LookupDescriptor();
+ bool LookupInPrototypes();
+ bool IsCompatibleForLoad(PropertyAccessInfo* other);
+
+ void GeneralizeRepresentation(Representation r) {
+ access_ = access_.WithRepresentation(
+ access_.representation().generalize(r));
+ }
+
+ LookupResult lookup_;
+ Handle<Map> map_;
+ Handle<String> name_;
+ Handle<JSObject> holder_;
+ Handle<JSFunction> accessor_;
+ Handle<Object> constant_;
+ HObjectAccess access_;
+ };
+
+ HInstruction* BuildLoadMonomorphic(PropertyAccessInfo* info,
+ HValue* object,
+ HInstruction* checked_object,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool can_inline_accessor = true);
+
+ void HandlePolymorphicStoreNamedField(BailoutId assignment_id,
+ HValue* object,
+ HValue* value,
+ SmallMapList* types,
+ Handle<String> name);
+ bool TryStorePolymorphicAsMonomorphic(BailoutId assignment_id,
HValue* object,
HValue* value,
SmallMapList* types,
@@ -1103,103 +2180,91 @@ class HGraphBuilder: public AstVisitor {
HValue* receiver,
SmallMapList* types,
Handle<String> name);
+ bool TryCallPolymorphicAsMonomorphic(Call* expr,
+ HValue* receiver,
+ SmallMapList* types,
+ Handle<String> name);
void HandleLiteralCompareTypeof(CompareOperation* expr,
- HTypeof* typeof_expr,
+ Expression* sub_expr,
Handle<String> check);
void HandleLiteralCompareNil(CompareOperation* expr,
- HValue* value,
+ Expression* sub_expr,
NilValue nil);
- HStringCharCodeAt* BuildStringCharCodeAt(HValue* context,
- HValue* string,
- HValue* index);
+ HInstruction* BuildStringCharCodeAt(HValue* string,
+ HValue* index);
HInstruction* BuildBinaryOperation(BinaryOperation* expr,
HValue* left,
HValue* right);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
- HInstruction* BuildFastElementAccess(HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store);
+ HInstruction* BuildLoadKeyedGeneric(HValue* object,
+ HValue* key);
HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
HValue* key,
HValue* val,
SmallMapList* maps);
- HInstruction* BuildUncheckedMonomorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- HCheckMaps* mapcheck,
- Handle<Map> map,
- bool is_store);
+ LoadKeyedHoleMode BuildKeyedHoleMode(Handle<Map> map);
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
HValue* dependency,
Handle<Map> map,
- bool is_store);
+ bool is_store,
+ KeyedAccessStoreMode store_mode);
HValue* HandlePolymorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
- Expression* prop,
- BailoutId ast_id,
- int position,
+ SmallMapList* maps,
bool is_store,
+ KeyedAccessStoreMode store_mode,
bool* has_side_effects);
HValue* HandleKeyedElementAccess(HValue* obj,
HValue* key,
HValue* val,
Expression* expr,
- BailoutId ast_id,
- int position,
bool is_store,
bool* has_side_effects);
- HLoadNamedField* BuildLoadNamedField(HValue* object,
- Handle<Map> map,
- LookupResult* result,
- bool smi_and_map_check);
HInstruction* BuildLoadNamedGeneric(HValue* object,
Handle<String> name,
Property* expr);
- HInstruction* BuildCallGetter(HValue* object,
- Handle<Map> map,
- Handle<JSFunction> getter,
- Handle<JSObject> holder);
- HInstruction* BuildLoadNamedMonomorphic(HValue* object,
- Handle<String> name,
- Property* expr,
- Handle<Map> map);
- HInstruction* BuildLoadKeyedGeneric(HValue* object, HValue* key);
- HInstruction* BuildExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store);
+
+ HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
+
+ void BuildLoad(Property* property,
+ BailoutId ast_id);
+ void PushLoad(Property* property,
+ HValue* object,
+ HValue* key);
+
+ void BuildStoreForEffect(Expression* expression,
+ Property* prop,
+ BailoutId ast_id,
+ BailoutId return_id,
+ HValue* object,
+ HValue* key,
+ HValue* value);
+
+ void BuildStore(Expression* expression,
+ Property* prop,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool is_uninitialized = false);
HInstruction* BuildStoreNamedField(HValue* object,
Handle<String> name,
HValue* value,
Handle<Map> map,
- LookupResult* lookup,
- bool smi_and_map_check);
+ LookupResult* lookup);
HInstruction* BuildStoreNamedGeneric(HValue* object,
Handle<String> name,
HValue* value);
- HInstruction* BuildCallSetter(HValue* object,
- HValue* value,
- Handle<Map> map,
- Handle<JSFunction> setter,
- Handle<JSObject> holder);
HInstruction* BuildStoreNamedMonomorphic(HValue* object,
Handle<String> name,
HValue* value,
@@ -1212,12 +2277,40 @@ class HGraphBuilder: public AstVisitor {
HInstruction* BuildThisFunction();
+ HInstruction* BuildFastLiteral(Handle<JSObject> boilerplate_object,
+ AllocationSiteContext* site_context);
+
+ void BuildEmitObjectHeader(Handle<JSObject> boilerplate_object,
+ HInstruction* object);
+
+ void BuildInitElementsInObjectHeader(Handle<JSObject> boilerplate_object,
+ HInstruction* object,
+ HInstruction* object_elements);
+
+ void BuildEmitInObjectProperties(Handle<JSObject> boilerplate_object,
+ HInstruction* object,
+ AllocationSiteContext* site_context);
+
+ void BuildEmitElements(Handle<JSObject> boilerplate_object,
+ Handle<FixedArrayBase> elements,
+ HValue* object_elements,
+ AllocationSiteContext* site_context);
+
+ void BuildEmitFixedDoubleArray(Handle<FixedArrayBase> elements,
+ ElementsKind kind,
+ HValue* object_elements);
+
+ void BuildEmitFixedArray(Handle<FixedArrayBase> elements,
+ ElementsKind kind,
+ HValue* object_elements,
+ AllocationSiteContext* site_context);
+
+ void AddCheckPrototypeMaps(Handle<JSObject> holder,
+ Handle<Map> receiver_map);
+
void AddCheckConstantFunction(Handle<JSObject> holder,
HValue* receiver,
- Handle<Map> receiver_map,
- bool smi_and_map_check);
-
- Zone* zone() const { return zone_; }
+ Handle<Map> receiver_map);
// The translation state of the currently-being-translated function.
FunctionState* function_state_;
@@ -1232,196 +2325,102 @@ class HGraphBuilder: public AstVisitor {
// A stack of breakable statements entered.
BreakAndContinueScope* break_scope_;
- HGraph* graph_;
- HBasicBlock* current_block_;
-
int inlined_count_;
ZoneList<Handle<Object> > globals_;
- Zone* zone_;
-
bool inline_bailout_;
+ HOsrBuilder* osr_;
+
friend class FunctionState; // Pushes and pops the state stack.
friend class AstContext; // Pushes and pops the AST context stack.
+ friend class KeyedLoadFastElementStub;
+ friend class HOsrBuilder;
- DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
+ DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder);
};
Zone* AstContext::zone() const { return owner_->zone(); }
-class HValueMap: public ZoneObject {
+class HStatistics V8_FINAL: public Malloced {
public:
- explicit HValueMap(Zone* zone)
- : array_size_(0),
- lists_size_(0),
- count_(0),
- present_flags_(0),
- array_(NULL),
- lists_(NULL),
- free_list_head_(kNil) {
- ResizeLists(kInitialSize, zone);
- Resize(kInitialSize, zone);
- }
-
- void Kill(GVNFlagSet flags);
-
- void Add(HValue* value, Zone* zone) {
- present_flags_.Add(value->gvn_flags());
- Insert(value, zone);
- }
-
- HValue* Lookup(HValue* value) const;
-
- HValueMap* Copy(Zone* zone) const {
- return new(zone) HValueMap(zone, this);
- }
-
- bool IsEmpty() const { return count_ == 0; }
-
- private:
- // A linked list of HValue* values. Stored in arrays.
- struct HValueMapListElement {
- HValue* value;
- int next; // Index in the array of the next list element.
- };
- static const int kNil = -1; // The end of a linked list
-
- // Must be a power of 2.
- static const int kInitialSize = 16;
-
- HValueMap(Zone* zone, const HValueMap* other);
-
- void Resize(int new_size, Zone* zone);
- void ResizeLists(int new_size, Zone* zone);
- void Insert(HValue* value, Zone* zone);
- uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
-
- int array_size_;
- int lists_size_;
- int count_; // The number of values stored in the HValueMap.
- GVNFlagSet present_flags_; // All flags that are in any value in the
- // HValueMap.
- HValueMapListElement* array_; // Primary store - contains the first value
- // with a given hash. Colliding elements are stored in linked lists.
- HValueMapListElement* lists_; // The linked lists containing hash collisions.
- int free_list_head_; // Unused elements in lists_ are on the free list.
-};
-
-
-class HSideEffectMap BASE_EMBEDDED {
- public:
- HSideEffectMap();
- explicit HSideEffectMap(HSideEffectMap* other);
- HSideEffectMap& operator= (const HSideEffectMap& other);
-
- void Kill(GVNFlagSet flags);
-
- void Store(GVNFlagSet flags, HInstruction* instr);
+ HStatistics()
+ : times_(5),
+ names_(5),
+ sizes_(5),
+ total_size_(0),
+ source_size_(0) { }
- bool IsEmpty() const { return count_ == 0; }
+ void Initialize(CompilationInfo* info);
+ void Print();
+ void SaveTiming(const char* name, TimeDelta time, unsigned size);
- inline HInstruction* operator[](int i) const {
- ASSERT(0 <= i);
- ASSERT(i < kNumberOfTrackedSideEffects);
- return data_[i];
+ void IncrementFullCodeGen(TimeDelta full_code_gen) {
+ full_code_gen_ += full_code_gen;
}
- inline HInstruction* at(int i) const { return operator[](i); }
-
- private:
- int count_;
- HInstruction* data_[kNumberOfTrackedSideEffects];
-};
-
-class HStatistics: public Malloced {
- public:
- void Initialize(CompilationInfo* info);
- void Print();
- void SaveTiming(const char* name, int64_t ticks, unsigned size);
- static HStatistics* Instance() {
- static SetOncePointer<HStatistics> instance;
- if (!instance.is_set()) {
- instance.set(new HStatistics());
- }
- return instance.get();
+ void IncrementSubtotals(TimeDelta create_graph,
+ TimeDelta optimize_graph,
+ TimeDelta generate_code) {
+ create_graph_ += create_graph;
+ optimize_graph_ += optimize_graph;
+ generate_code_ += generate_code;
}
private:
- HStatistics()
- : timing_(5),
- names_(5),
- sizes_(5),
- total_(0),
- total_size_(0),
- full_code_gen_(0),
- source_size_(0) { }
-
- List<int64_t> timing_;
+ List<TimeDelta> times_;
List<const char*> names_;
List<unsigned> sizes_;
- int64_t total_;
+ TimeDelta create_graph_;
+ TimeDelta optimize_graph_;
+ TimeDelta generate_code_;
unsigned total_size_;
- int64_t full_code_gen_;
+ TimeDelta full_code_gen_;
double source_size_;
};
-class HPhase BASE_EMBEDDED {
+class HPhase : public CompilationPhase {
public:
- static const char* const kFullCodeGen;
- static const char* const kTotal;
-
- explicit HPhase(const char* name) { Begin(name, NULL, NULL, NULL); }
- HPhase(const char* name, HGraph* graph) {
- Begin(name, graph, NULL, NULL);
- }
- HPhase(const char* name, LChunk* chunk) {
- Begin(name, NULL, chunk, NULL);
- }
- HPhase(const char* name, LAllocator* allocator) {
- Begin(name, NULL, NULL, allocator);
- }
+ HPhase(const char* name, HGraph* graph)
+ : CompilationPhase(name, graph->info()),
+ graph_(graph) { }
+ ~HPhase();
- ~HPhase() {
- End();
- }
+ protected:
+ HGraph* graph() const { return graph_; }
private:
- void Begin(const char* name,
- HGraph* graph,
- LChunk* chunk,
- LAllocator* allocator);
- void End() const;
-
- int64_t start_;
- const char* name_;
HGraph* graph_;
- LChunk* chunk_;
- LAllocator* allocator_;
- unsigned start_allocation_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(HPhase);
};
-class HTracer: public Malloced {
+class HTracer V8_FINAL : public Malloced {
public:
- void TraceCompilation(FunctionLiteral* function);
+ explicit HTracer(int isolate_id)
+ : trace_(&string_allocator_), indent_(0) {
+ if (FLAG_trace_hydrogen_file == NULL) {
+ OS::SNPrintF(filename_,
+ "hydrogen-%d-%d.cfg",
+ OS::GetCurrentProcessId(),
+ isolate_id);
+ } else {
+ OS::StrNCpy(filename_, FLAG_trace_hydrogen_file, filename_.length());
+ }
+ WriteChars(filename_.start(), "", 0, false);
+ }
+
+ void TraceCompilation(CompilationInfo* info);
void TraceHydrogen(const char* name, HGraph* graph);
void TraceLithium(const char* name, LChunk* chunk);
void TraceLiveRanges(const char* name, LAllocator* allocator);
- static HTracer* Instance() {
- static SetOncePointer<HTracer> instance;
- if (!instance.is_set()) {
- instance.set(new HTracer("hydrogen.cfg"));
- }
- return instance.get();
- }
-
private:
- class Tag BASE_EMBEDDED {
+ class Tag V8_FINAL BASE_EMBEDDED {
public:
Tag(HTracer* tracer, const char* name) {
name_ = name;
@@ -1444,11 +2443,6 @@ class HTracer: public Malloced {
const char* name_;
};
- explicit HTracer(const char* filename)
- : filename_(filename), trace_(&string_allocator_), indent_(0) {
- WriteChars(filename, "", 0, false);
- }
-
void TraceLiveRange(LiveRange* range, const char* type, Zone* zone);
void Trace(const char* name, HGraph* graph, LChunk* chunk);
void FlushToFile();
@@ -1484,13 +2478,28 @@ class HTracer: public Malloced {
}
}
- const char* filename_;
+ EmbeddedVector<char, 64> filename_;
HeapStringAllocator string_allocator_;
StringStream trace_;
int indent_;
};
+class NoObservableSideEffectsScope V8_FINAL {
+ public:
+ explicit NoObservableSideEffectsScope(HGraphBuilder* builder) :
+ builder_(builder) {
+ builder_->graph()->IncrementInNoSideEffectsScope();
+ }
+ ~NoObservableSideEffectsScope() {
+ builder_->graph()->DecrementInNoSideEffectsScope();
+ }
+
+ private:
+ HGraphBuilder* builder_;
+};
+
+
} } // namespace v8::internal
#endif // V8_HYDROGEN_H_
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
new file mode 100644
index 000000000..dbff6e5f5
--- /dev/null
+++ b/deps/v8/src/i18n.cc
@@ -0,0 +1,1070 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#include "i18n.h"
+
+#include "unicode/brkiter.h"
+#include "unicode/calendar.h"
+#include "unicode/coll.h"
+#include "unicode/curramt.h"
+#include "unicode/dcfmtsym.h"
+#include "unicode/decimfmt.h"
+#include "unicode/dtfmtsym.h"
+#include "unicode/dtptngen.h"
+#include "unicode/locid.h"
+#include "unicode/numfmt.h"
+#include "unicode/numsys.h"
+#include "unicode/rbbi.h"
+#include "unicode/smpdtfmt.h"
+#include "unicode/timezone.h"
+#include "unicode/uchar.h"
+#include "unicode/ucol.h"
+#include "unicode/ucurr.h"
+#include "unicode/unum.h"
+#include "unicode/uversion.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+bool ExtractStringSetting(Isolate* isolate,
+ Handle<JSObject> options,
+ const char* key,
+ icu::UnicodeString* setting) {
+ Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key));
+ MaybeObject* maybe_object = options->GetProperty(*str);
+ Object* object;
+ if (maybe_object->ToObject(&object) && object->IsString()) {
+ v8::String::Utf8Value utf8_string(
+ v8::Utils::ToLocal(Handle<String>(String::cast(object))));
+ *setting = icu::UnicodeString::fromUTF8(*utf8_string);
+ return true;
+ }
+ return false;
+}
+
+
+bool ExtractIntegerSetting(Isolate* isolate,
+ Handle<JSObject> options,
+ const char* key,
+ int32_t* value) {
+ Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key));
+ MaybeObject* maybe_object = options->GetProperty(*str);
+ Object* object;
+ if (maybe_object->ToObject(&object) && object->IsNumber()) {
+ object->ToInt32(value);
+ return true;
+ }
+ return false;
+}
+
+
+bool ExtractBooleanSetting(Isolate* isolate,
+ Handle<JSObject> options,
+ const char* key,
+ bool* value) {
+ Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key));
+ MaybeObject* maybe_object = options->GetProperty(*str);
+ Object* object;
+ if (maybe_object->ToObject(&object) && object->IsBoolean()) {
+ *value = object->BooleanValue();
+ return true;
+ }
+ return false;
+}
+
+
+icu::SimpleDateFormat* CreateICUDateFormat(
+ Isolate* isolate,
+ const icu::Locale& icu_locale,
+ Handle<JSObject> options) {
+ // Create time zone as specified by the user. We have to re-create time zone
+ // since calendar takes ownership.
+ icu::TimeZone* tz = NULL;
+ icu::UnicodeString timezone;
+ if (ExtractStringSetting(isolate, options, "timeZone", &timezone)) {
+ tz = icu::TimeZone::createTimeZone(timezone);
+ } else {
+ tz = icu::TimeZone::createDefault();
+ }
+
+ // Create a calendar using locale, and apply time zone to it.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Calendar* calendar =
+ icu::Calendar::createInstance(tz, icu_locale, status);
+
+ // Make formatter from skeleton. Calendar and numbering system are added
+ // to the locale as Unicode extension (if they were specified at all).
+ icu::SimpleDateFormat* date_format = NULL;
+ icu::UnicodeString skeleton;
+ if (ExtractStringSetting(isolate, options, "skeleton", &skeleton)) {
+ icu::DateTimePatternGenerator* generator =
+ icu::DateTimePatternGenerator::createInstance(icu_locale, status);
+ icu::UnicodeString pattern;
+ if (U_SUCCESS(status)) {
+ pattern = generator->getBestPattern(skeleton, status);
+ delete generator;
+ }
+
+ date_format = new icu::SimpleDateFormat(pattern, icu_locale, status);
+ if (U_SUCCESS(status)) {
+ date_format->adoptCalendar(calendar);
+ }
+ }
+
+ if (U_FAILURE(status)) {
+ delete calendar;
+ delete date_format;
+ date_format = NULL;
+ }
+
+ return date_format;
+}
+
+
+void SetResolvedDateSettings(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ icu::SimpleDateFormat* date_format,
+ Handle<JSObject> resolved) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString pattern;
+ date_format->toPattern(pattern);
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("pattern")),
+ isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
+ pattern.length())),
+ NONE,
+ kNonStrictMode);
+
+ // Set time zone and calendar.
+ const icu::Calendar* calendar = date_format->getCalendar();
+ const char* calendar_name = calendar->getType();
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("calendar")),
+ isolate->factory()->NewStringFromAscii(CStrVector(calendar_name)),
+ NONE,
+ kNonStrictMode);
+
+ const icu::TimeZone& tz = calendar->getTimeZone();
+ icu::UnicodeString time_zone;
+ tz.getID(time_zone);
+
+ icu::UnicodeString canonical_time_zone;
+ icu::TimeZone::getCanonicalID(time_zone, canonical_time_zone, status);
+ if (U_SUCCESS(status)) {
+ if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("timeZone")),
+ isolate->factory()->NewStringFromAscii(CStrVector("UTC")),
+ NONE,
+ kNonStrictMode);
+ } else {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("timeZone")),
+ isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(
+ canonical_time_zone.getBuffer()),
+ canonical_time_zone.length())),
+ NONE,
+ kNonStrictMode);
+ }
+ }
+
+ // Ugly hack. ICU doesn't expose numbering system in any way, so we have
+ // to assume that for given locale NumberingSystem constructor produces the
+ // same digits as NumberFormat/Calendar would.
+ status = U_ZERO_ERROR;
+ icu::NumberingSystem* numbering_system =
+ icu::NumberingSystem::createInstance(icu_locale, status);
+ if (U_SUCCESS(status)) {
+ const char* ns = numbering_system->getName();
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
+ isolate->factory()->NewStringFromAscii(CStrVector(ns)),
+ NONE,
+ kNonStrictMode);
+ } else {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
+ isolate->factory()->undefined_value(),
+ NONE,
+ kNonStrictMode);
+ }
+ delete numbering_system;
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ NONE,
+ kNonStrictMode);
+ } else {
+ // This would never happen, since we got the locale from ICU.
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector("und")),
+ NONE,
+ kNonStrictMode);
+ }
+}
+
+
+template<int internal_fields, EternalHandles::SingletonHandle field>
+Handle<ObjectTemplateInfo> GetEternal(Isolate* isolate) {
+ if (isolate->eternal_handles()->Exists(field)) {
+ return Handle<ObjectTemplateInfo>::cast(
+ isolate->eternal_handles()->GetSingleton(field));
+ }
+ v8::Local<v8::ObjectTemplate> raw_template(v8::ObjectTemplate::New());
+ raw_template->SetInternalFieldCount(internal_fields);
+ return Handle<ObjectTemplateInfo>::cast(
+ isolate->eternal_handles()->CreateSingleton(
+ isolate,
+ *v8::Utils::OpenHandle(*raw_template),
+ field));
+}
+
+
+icu::DecimalFormat* CreateICUNumberFormat(
+ Isolate* isolate,
+ const icu::Locale& icu_locale,
+ Handle<JSObject> options) {
+ // Make formatter from options. Numbering system is added
+ // to the locale as Unicode extension (if it was specified at all).
+ UErrorCode status = U_ZERO_ERROR;
+ icu::DecimalFormat* number_format = NULL;
+ icu::UnicodeString style;
+ icu::UnicodeString currency;
+ if (ExtractStringSetting(isolate, options, "style", &style)) {
+ if (style == UNICODE_STRING_SIMPLE("currency")) {
+ icu::UnicodeString display;
+ ExtractStringSetting(isolate, options, "currency", &currency);
+ ExtractStringSetting(isolate, options, "currencyDisplay", &display);
+
+#if (U_ICU_VERSION_MAJOR_NUM == 4) && (U_ICU_VERSION_MINOR_NUM <= 6)
+ icu::NumberFormat::EStyles format_style;
+ if (display == UNICODE_STRING_SIMPLE("code")) {
+ format_style = icu::NumberFormat::kIsoCurrencyStyle;
+ } else if (display == UNICODE_STRING_SIMPLE("name")) {
+ format_style = icu::NumberFormat::kPluralCurrencyStyle;
+ } else {
+ format_style = icu::NumberFormat::kCurrencyStyle;
+ }
+#else // ICU version is 4.8 or above (we ignore versions below 4.0).
+ UNumberFormatStyle format_style;
+ if (display == UNICODE_STRING_SIMPLE("code")) {
+ format_style = UNUM_CURRENCY_ISO;
+ } else if (display == UNICODE_STRING_SIMPLE("name")) {
+ format_style = UNUM_CURRENCY_PLURAL;
+ } else {
+ format_style = UNUM_CURRENCY;
+ }
+#endif
+
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, format_style, status));
+ } else if (style == UNICODE_STRING_SIMPLE("percent")) {
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createPercentInstance(icu_locale, status));
+ if (U_FAILURE(status)) {
+ delete number_format;
+ return NULL;
+ }
+ // Make sure 1.1% doesn't go into 2%.
+ number_format->setMinimumFractionDigits(1);
+ } else {
+ // Make a decimal instance by default.
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, status));
+ }
+ }
+
+ if (U_FAILURE(status)) {
+ delete number_format;
+ return NULL;
+ }
+
+ // Set all options.
+ if (!currency.isEmpty()) {
+ number_format->setCurrency(currency.getBuffer(), status);
+ }
+
+ int32_t digits;
+ if (ExtractIntegerSetting(
+ isolate, options, "minimumIntegerDigits", &digits)) {
+ number_format->setMinimumIntegerDigits(digits);
+ }
+
+ if (ExtractIntegerSetting(
+ isolate, options, "minimumFractionDigits", &digits)) {
+ number_format->setMinimumFractionDigits(digits);
+ }
+
+ if (ExtractIntegerSetting(
+ isolate, options, "maximumFractionDigits", &digits)) {
+ number_format->setMaximumFractionDigits(digits);
+ }
+
+ bool significant_digits_used = false;
+ if (ExtractIntegerSetting(
+ isolate, options, "minimumSignificantDigits", &digits)) {
+ number_format->setMinimumSignificantDigits(digits);
+ significant_digits_used = true;
+ }
+
+ if (ExtractIntegerSetting(
+ isolate, options, "maximumSignificantDigits", &digits)) {
+ number_format->setMaximumSignificantDigits(digits);
+ significant_digits_used = true;
+ }
+
+ number_format->setSignificantDigitsUsed(significant_digits_used);
+
+ bool grouping;
+ if (ExtractBooleanSetting(isolate, options, "useGrouping", &grouping)) {
+ number_format->setGroupingUsed(grouping);
+ }
+
+ // Set rounding mode.
+ number_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp);
+
+ return number_format;
+}
+
+
+void SetResolvedNumberSettings(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ icu::DecimalFormat* number_format,
+ Handle<JSObject> resolved) {
+ icu::UnicodeString pattern;
+ number_format->toPattern(pattern);
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("pattern")),
+ isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
+ pattern.length())),
+ NONE,
+ kNonStrictMode);
+
+ // Set resolved currency code in options.currency if not empty.
+ icu::UnicodeString currency(number_format->getCurrency());
+ if (!currency.isEmpty()) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("currency")),
+ isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(currency.getBuffer()),
+ currency.length())),
+ NONE,
+ kNonStrictMode);
+ }
+
+ // Ugly hack. ICU doesn't expose numbering system in any way, so we have
+ // to assume that for given locale NumberingSystem constructor produces the
+ // same digits as NumberFormat/Calendar would.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::NumberingSystem* numbering_system =
+ icu::NumberingSystem::createInstance(icu_locale, status);
+ if (U_SUCCESS(status)) {
+ const char* ns = numbering_system->getName();
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
+ isolate->factory()->NewStringFromAscii(CStrVector(ns)),
+ NONE,
+ kNonStrictMode);
+ } else {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
+ isolate->factory()->undefined_value(),
+ NONE,
+ kNonStrictMode);
+ }
+ delete numbering_system;
+
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("useGrouping")),
+ isolate->factory()->ToBoolean(number_format->isGroupingUsed()),
+ NONE,
+ kNonStrictMode);
+
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(
+ CStrVector("minimumIntegerDigits")),
+ isolate->factory()->NewNumberFromInt(
+ number_format->getMinimumIntegerDigits()),
+ NONE,
+ kNonStrictMode);
+
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(
+ CStrVector("minimumFractionDigits")),
+ isolate->factory()->NewNumberFromInt(
+ number_format->getMinimumFractionDigits()),
+ NONE,
+ kNonStrictMode);
+
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(
+ CStrVector("maximumFractionDigits")),
+ isolate->factory()->NewNumberFromInt(
+ number_format->getMaximumFractionDigits()),
+ NONE,
+ kNonStrictMode);
+
+ Handle<String> key = isolate->factory()->NewStringFromAscii(
+ CStrVector("minimumSignificantDigits"));
+ if (JSReceiver::HasLocalProperty(resolved, key)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(
+ CStrVector("minimumSignificantDigits")),
+ isolate->factory()->NewNumberFromInt(
+ number_format->getMinimumSignificantDigits()),
+ NONE,
+ kNonStrictMode);
+ }
+
+ key = isolate->factory()->NewStringFromAscii(
+ CStrVector("maximumSignificantDigits"));
+ if (JSReceiver::HasLocalProperty(resolved, key)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(
+ CStrVector("maximumSignificantDigits")),
+ isolate->factory()->NewNumberFromInt(
+ number_format->getMaximumSignificantDigits()),
+ NONE,
+ kNonStrictMode);
+ }
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ NONE,
+ kNonStrictMode);
+ } else {
+ // This would never happen, since we got the locale from ICU.
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector("und")),
+ NONE,
+ kNonStrictMode);
+ }
+}
+
+
+icu::Collator* CreateICUCollator(
+ Isolate* isolate,
+ const icu::Locale& icu_locale,
+ Handle<JSObject> options) {
+ // Make collator from options.
+ icu::Collator* collator = NULL;
+ UErrorCode status = U_ZERO_ERROR;
+ collator = icu::Collator::createInstance(icu_locale, status);
+
+ if (U_FAILURE(status)) {
+ delete collator;
+ return NULL;
+ }
+
+ // Set flags first, and then override them with sensitivity if necessary.
+ bool numeric;
+ if (ExtractBooleanSetting(isolate, options, "numeric", &numeric)) {
+ collator->setAttribute(
+ UCOL_NUMERIC_COLLATION, numeric ? UCOL_ON : UCOL_OFF, status);
+ }
+
+ // Normalization is always on, by the spec. We are free to optimize
+ // if the strings are already normalized (but we don't have a way to tell
+ // that right now).
+ collator->setAttribute(UCOL_NORMALIZATION_MODE, UCOL_ON, status);
+
+ icu::UnicodeString case_first;
+ if (ExtractStringSetting(isolate, options, "caseFirst", &case_first)) {
+ if (case_first == UNICODE_STRING_SIMPLE("upper")) {
+ collator->setAttribute(UCOL_CASE_FIRST, UCOL_UPPER_FIRST, status);
+ } else if (case_first == UNICODE_STRING_SIMPLE("lower")) {
+ collator->setAttribute(UCOL_CASE_FIRST, UCOL_LOWER_FIRST, status);
+ } else {
+ // Default (false/off).
+ collator->setAttribute(UCOL_CASE_FIRST, UCOL_OFF, status);
+ }
+ }
+
+ icu::UnicodeString sensitivity;
+ if (ExtractStringSetting(isolate, options, "sensitivity", &sensitivity)) {
+ if (sensitivity == UNICODE_STRING_SIMPLE("base")) {
+ collator->setStrength(icu::Collator::PRIMARY);
+ } else if (sensitivity == UNICODE_STRING_SIMPLE("accent")) {
+ collator->setStrength(icu::Collator::SECONDARY);
+ } else if (sensitivity == UNICODE_STRING_SIMPLE("case")) {
+ collator->setStrength(icu::Collator::PRIMARY);
+ collator->setAttribute(UCOL_CASE_LEVEL, UCOL_ON, status);
+ } else {
+ // variant (default)
+ collator->setStrength(icu::Collator::TERTIARY);
+ }
+ }
+
+ bool ignore;
+ if (ExtractBooleanSetting(isolate, options, "ignorePunctuation", &ignore)) {
+ if (ignore) {
+ collator->setAttribute(UCOL_ALTERNATE_HANDLING, UCOL_SHIFTED, status);
+ }
+ }
+
+ return collator;
+}
+
+
+void SetResolvedCollatorSettings(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ icu::Collator* collator,
+ Handle<JSObject> resolved) {
+ UErrorCode status = U_ZERO_ERROR;
+
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("numeric")),
+ isolate->factory()->ToBoolean(
+ collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON),
+ NONE,
+ kNonStrictMode);
+
+ switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
+ case UCOL_LOWER_FIRST:
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
+ isolate->factory()->NewStringFromAscii(CStrVector("lower")),
+ NONE,
+ kNonStrictMode);
+ break;
+ case UCOL_UPPER_FIRST:
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
+ isolate->factory()->NewStringFromAscii(CStrVector("upper")),
+ NONE,
+ kNonStrictMode);
+ break;
+ default:
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
+ isolate->factory()->NewStringFromAscii(CStrVector("false")),
+ NONE,
+ kNonStrictMode);
+ }
+
+ switch (collator->getAttribute(UCOL_STRENGTH, status)) {
+ case UCOL_PRIMARY: {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("strength")),
+ isolate->factory()->NewStringFromAscii(CStrVector("primary")),
+ NONE,
+ kNonStrictMode);
+
+ // case level: true + s1 -> case, s1 -> base.
+ if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
+ isolate->factory()->NewStringFromAscii(CStrVector("case")),
+ NONE,
+ kNonStrictMode);
+ } else {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
+ isolate->factory()->NewStringFromAscii(CStrVector("base")),
+ NONE,
+ kNonStrictMode);
+ }
+ break;
+ }
+ case UCOL_SECONDARY:
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("strength")),
+ isolate->factory()->NewStringFromAscii(CStrVector("secondary")),
+ NONE,
+ kNonStrictMode);
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
+ isolate->factory()->NewStringFromAscii(CStrVector("accent")),
+ NONE,
+ kNonStrictMode);
+ break;
+ case UCOL_TERTIARY:
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("strength")),
+ isolate->factory()->NewStringFromAscii(CStrVector("tertiary")),
+ NONE,
+ kNonStrictMode);
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
+ isolate->factory()->NewStringFromAscii(CStrVector("variant")),
+ NONE,
+ kNonStrictMode);
+ break;
+ case UCOL_QUATERNARY:
+ // We shouldn't get quaternary and identical from ICU, but if we do
+ // put them into variant.
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("strength")),
+ isolate->factory()->NewStringFromAscii(CStrVector("quaternary")),
+ NONE,
+ kNonStrictMode);
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
+ isolate->factory()->NewStringFromAscii(CStrVector("variant")),
+ NONE,
+ kNonStrictMode);
+ break;
+ default:
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("strength")),
+ isolate->factory()->NewStringFromAscii(CStrVector("identical")),
+ NONE,
+ kNonStrictMode);
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
+ isolate->factory()->NewStringFromAscii(CStrVector("variant")),
+ NONE,
+ kNonStrictMode);
+ }
+
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("ignorePunctuation")),
+ isolate->factory()->ToBoolean(collator->getAttribute(
+ UCOL_ALTERNATE_HANDLING, status) == UCOL_SHIFTED),
+ NONE,
+ kNonStrictMode);
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ NONE,
+ kNonStrictMode);
+ } else {
+ // This would never happen, since we got the locale from ICU.
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector("und")),
+ NONE,
+ kNonStrictMode);
+ }
+}
+
+
+icu::BreakIterator* CreateICUBreakIterator(
+ Isolate* isolate,
+ const icu::Locale& icu_locale,
+ Handle<JSObject> options) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::BreakIterator* break_iterator = NULL;
+ icu::UnicodeString type;
+ if (!ExtractStringSetting(isolate, options, "type", &type)) return NULL;
+
+ if (type == UNICODE_STRING_SIMPLE("character")) {
+ break_iterator =
+ icu::BreakIterator::createCharacterInstance(icu_locale, status);
+ } else if (type == UNICODE_STRING_SIMPLE("sentence")) {
+ break_iterator =
+ icu::BreakIterator::createSentenceInstance(icu_locale, status);
+ } else if (type == UNICODE_STRING_SIMPLE("line")) {
+ break_iterator =
+ icu::BreakIterator::createLineInstance(icu_locale, status);
+ } else {
+ // Defualt is word iterator.
+ break_iterator =
+ icu::BreakIterator::createWordInstance(icu_locale, status);
+ }
+
+ if (U_FAILURE(status)) {
+ delete break_iterator;
+ return NULL;
+ }
+
+ return break_iterator;
+}
+
+
+void SetResolvedBreakIteratorSettings(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ icu::BreakIterator* break_iterator,
+ Handle<JSObject> resolved) {
+ UErrorCode status = U_ZERO_ERROR;
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ NONE,
+ kNonStrictMode);
+ } else {
+ // This would never happen, since we got the locale from ICU.
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector("und")),
+ NONE,
+ kNonStrictMode);
+ }
+}
+
+} // namespace
+
+
+// static
+Handle<ObjectTemplateInfo> I18N::GetTemplate(Isolate* isolate) {
+ return GetEternal<1, i::EternalHandles::I18N_TEMPLATE_ONE>(isolate);
+}
+
+
+// static
+Handle<ObjectTemplateInfo> I18N::GetTemplate2(Isolate* isolate) {
+ return GetEternal<2, i::EternalHandles::I18N_TEMPLATE_TWO>(isolate);
+}
+
+
+// static
+icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved) {
+ // Convert BCP47 into ICU locale format.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+ v8::String::Utf8Value bcp47_locale(v8::Utils::ToLocal(locale));
+ if (bcp47_locale.length() != 0) {
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &status);
+ if (U_FAILURE(status) || icu_length == 0) {
+ return NULL;
+ }
+ icu_locale = icu::Locale(icu_result);
+ }
+
+ icu::SimpleDateFormat* date_format = CreateICUDateFormat(
+ isolate, icu_locale, options);
+ if (!date_format) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ date_format = CreateICUDateFormat(isolate, no_extension_locale, options);
+
+ // Set resolved settings (pattern, numbering system, calendar).
+ SetResolvedDateSettings(
+ isolate, no_extension_locale, date_format, resolved);
+ } else {
+ SetResolvedDateSettings(isolate, icu_locale, date_format, resolved);
+ }
+
+ return date_format;
+}
+
+
+icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
+ Isolate* isolate,
+ Handle<JSObject> obj) {
+ Handle<String> key =
+ isolate->factory()->NewStringFromAscii(CStrVector("dateFormat"));
+ if (JSReceiver::HasLocalProperty(obj, key)) {
+ return reinterpret_cast<icu::SimpleDateFormat*>(
+ obj->GetInternalField(0));
+ }
+
+ return NULL;
+}
+
+
+void DateFormat::DeleteDateFormat(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param) {
+ // First delete the hidden C++ object.
+ delete reinterpret_cast<icu::SimpleDateFormat*>(Handle<JSObject>::cast(
+ v8::Utils::OpenPersistent(object))->GetInternalField(0));
+
+ // Then dispose of the persistent handle to JS object.
+ object->Dispose();
+}
+
+
+icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved) {
+ // Convert BCP47 into ICU locale format.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+ v8::String::Utf8Value bcp47_locale(v8::Utils::ToLocal(locale));
+ if (bcp47_locale.length() != 0) {
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &status);
+ if (U_FAILURE(status) || icu_length == 0) {
+ return NULL;
+ }
+ icu_locale = icu::Locale(icu_result);
+ }
+
+ icu::DecimalFormat* number_format =
+ CreateICUNumberFormat(isolate, icu_locale, options);
+ if (!number_format) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ number_format = CreateICUNumberFormat(
+ isolate, no_extension_locale, options);
+
+ // Set resolved settings (pattern, numbering system).
+ SetResolvedNumberSettings(
+ isolate, no_extension_locale, number_format, resolved);
+ } else {
+ SetResolvedNumberSettings(isolate, icu_locale, number_format, resolved);
+ }
+
+ return number_format;
+}
+
+
+icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
+ Isolate* isolate,
+ Handle<JSObject> obj) {
+ Handle<String> key =
+ isolate->factory()->NewStringFromAscii(CStrVector("numberFormat"));
+ if (JSReceiver::HasLocalProperty(obj, key)) {
+ return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0));
+ }
+
+ return NULL;
+}
+
+
+void NumberFormat::DeleteNumberFormat(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param) {
+ // First delete the hidden C++ object.
+ delete reinterpret_cast<icu::DecimalFormat*>(Handle<JSObject>::cast(
+ v8::Utils::OpenPersistent(object))->GetInternalField(0));
+
+ // Then dispose of the persistent handle to JS object.
+ object->Dispose();
+}
+
+
+icu::Collator* Collator::InitializeCollator(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved) {
+ // Convert BCP47 into ICU locale format.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+ v8::String::Utf8Value bcp47_locale(v8::Utils::ToLocal(locale));
+ if (bcp47_locale.length() != 0) {
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &status);
+ if (U_FAILURE(status) || icu_length == 0) {
+ return NULL;
+ }
+ icu_locale = icu::Locale(icu_result);
+ }
+
+ icu::Collator* collator = CreateICUCollator(isolate, icu_locale, options);
+ if (!collator) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ collator = CreateICUCollator(isolate, no_extension_locale, options);
+
+ // Set resolved settings (pattern, numbering system).
+ SetResolvedCollatorSettings(
+ isolate, no_extension_locale, collator, resolved);
+ } else {
+ SetResolvedCollatorSettings(isolate, icu_locale, collator, resolved);
+ }
+
+ return collator;
+}
+
+
+icu::Collator* Collator::UnpackCollator(Isolate* isolate,
+ Handle<JSObject> obj) {
+ Handle<String> key =
+ isolate->factory()->NewStringFromAscii(CStrVector("collator"));
+ if (JSReceiver::HasLocalProperty(obj, key)) {
+ return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0));
+ }
+
+ return NULL;
+}
+
+
+void Collator::DeleteCollator(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param) {
+ // First delete the hidden C++ object.
+ delete reinterpret_cast<icu::Collator*>(Handle<JSObject>::cast(
+ v8::Utils::OpenPersistent(object))->GetInternalField(0));
+
+ // Then dispose of the persistent handle to JS object.
+ object->Dispose();
+}
+
+
+icu::BreakIterator* BreakIterator::InitializeBreakIterator(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved) {
+ // Convert BCP47 into ICU locale format.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+ v8::String::Utf8Value bcp47_locale(v8::Utils::ToLocal(locale));
+ if (bcp47_locale.length() != 0) {
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &status);
+ if (U_FAILURE(status) || icu_length == 0) {
+ return NULL;
+ }
+ icu_locale = icu::Locale(icu_result);
+ }
+
+ icu::BreakIterator* break_iterator = CreateICUBreakIterator(
+ isolate, icu_locale, options);
+ if (!break_iterator) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ break_iterator = CreateICUBreakIterator(
+ isolate, no_extension_locale, options);
+
+ // Set resolved settings (locale).
+ SetResolvedBreakIteratorSettings(
+ isolate, no_extension_locale, break_iterator, resolved);
+ } else {
+ SetResolvedBreakIteratorSettings(
+ isolate, icu_locale, break_iterator, resolved);
+ }
+
+ return break_iterator;
+}
+
+
+icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate,
+ Handle<JSObject> obj) {
+ Handle<String> key =
+ isolate->factory()->NewStringFromAscii(CStrVector("breakIterator"));
+ if (JSReceiver::HasLocalProperty(obj, key)) {
+ return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0));
+ }
+
+ return NULL;
+}
+
+
+void BreakIterator::DeleteBreakIterator(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param) {
+ // First delete the hidden C++ object.
+ delete reinterpret_cast<icu::BreakIterator*>(Handle<JSObject>::cast(
+ v8::Utils::OpenPersistent(object))->GetInternalField(0));
+
+ delete reinterpret_cast<icu::UnicodeString*>(Handle<JSObject>::cast(
+ v8::Utils::OpenPersistent(object))->GetInternalField(1));
+
+ // Then dispose of the persistent handle to JS object.
+ object->Dispose();
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/i18n.h b/deps/v8/src/i18n.h
new file mode 100644
index 000000000..08e7f2b71
--- /dev/null
+++ b/deps/v8/src/i18n.h
@@ -0,0 +1,154 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#ifndef V8_I18N_H_
+#define V8_I18N_H_
+
+#include "unicode/uversion.h"
+#include "v8.h"
+
+namespace U_ICU_NAMESPACE {
+class BreakIterator;
+class Collator;
+class DecimalFormat;
+class SimpleDateFormat;
+}
+
+namespace v8 {
+namespace internal {
+
+class I18N {
+ public:
+ // Creates an ObjectTemplate with one internal field.
+ static Handle<ObjectTemplateInfo> GetTemplate(Isolate* isolate);
+
+ // Creates an ObjectTemplate with two internal fields.
+ static Handle<ObjectTemplateInfo> GetTemplate2(Isolate* isolate);
+
+ private:
+ I18N();
+};
+
+
+class DateFormat {
+ public:
+ // Create a formatter for the specificied locale and options. Returns the
+ // resolved settings for the locale / options.
+ static icu::SimpleDateFormat* InitializeDateTimeFormat(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved);
+
+ // Unpacks date format object from corresponding JavaScript object.
+ static icu::SimpleDateFormat* UnpackDateFormat(Isolate* isolate,
+ Handle<JSObject> obj);
+
+ // Release memory we allocated for the DateFormat once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteDateFormat(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param);
+ private:
+ DateFormat();
+};
+
+
+class NumberFormat {
+ public:
+ // Create a formatter for the specificied locale and options. Returns the
+ // resolved settings for the locale / options.
+ static icu::DecimalFormat* InitializeNumberFormat(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved);
+
+ // Unpacks number format object from corresponding JavaScript object.
+ static icu::DecimalFormat* UnpackNumberFormat(Isolate* isolate,
+ Handle<JSObject> obj);
+
+ // Release memory we allocated for the NumberFormat once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteNumberFormat(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param);
+ private:
+ NumberFormat();
+};
+
+
+class Collator {
+ public:
+ // Create a collator for the specificied locale and options. Returns the
+ // resolved settings for the locale / options.
+ static icu::Collator* InitializeCollator(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved);
+
+ // Unpacks collator object from corresponding JavaScript object.
+ static icu::Collator* UnpackCollator(Isolate* isolate, Handle<JSObject> obj);
+
+ // Release memory we allocated for the Collator once the JS object that holds
+ // the pointer gets garbage collected.
+ static void DeleteCollator(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param);
+ private:
+ Collator();
+};
+
+class BreakIterator {
+ public:
+ // Create a BreakIterator for the specificied locale and options. Returns the
+ // resolved settings for the locale / options.
+ static icu::BreakIterator* InitializeBreakIterator(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved);
+
+ // Unpacks break iterator object from corresponding JavaScript object.
+ static icu::BreakIterator* UnpackBreakIterator(Isolate* isolate,
+ Handle<JSObject> obj);
+
+ // Release memory we allocated for the BreakIterator once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteBreakIterator(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param);
+
+ private:
+ BreakIterator();
+};
+
+} } // namespace v8::internal
+
+#endif // V8_I18N_H_
diff --git a/deps/v8/src/i18n.js b/deps/v8/src/i18n.js
new file mode 100644
index 000000000..a64c7e678
--- /dev/null
+++ b/deps/v8/src/i18n.js
@@ -0,0 +1,2116 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+// ECMAScript 402 API implementation.
+
+/**
+ * Intl object is a single object that has some named properties,
+ * all of which are constructors.
+ */
+$Object.defineProperty(global, "Intl", { enumerable: false, value: (function() {
+
+'use strict';
+
+var Intl = {};
+
+var undefined = global.undefined;
+
+var AVAILABLE_SERVICES = ['collator',
+ 'numberformat',
+ 'dateformat',
+ 'breakiterator'];
+
+/**
+ * Caches available locales for each service.
+ */
+var AVAILABLE_LOCALES = {
+ 'collator': undefined,
+ 'numberformat': undefined,
+ 'dateformat': undefined,
+ 'breakiterator': undefined
+};
+
+/**
+ * Caches default ICU locale.
+ */
+var DEFAULT_ICU_LOCALE = undefined;
+
+/**
+ * Unicode extension regular expression.
+ */
+var UNICODE_EXTENSION_RE = undefined;
+
+function GetUnicodeExtensionRE() {
+ if (UNICODE_EXTENSION_RE === undefined) {
+ UNICODE_EXTENSION_RE = new $RegExp('-u(-[a-z0-9]{2,8})+', 'g');
+ }
+ return UNICODE_EXTENSION_RE;
+}
+
+/**
+ * Matches any Unicode extension.
+ */
+var ANY_EXTENSION_RE = undefined;
+
+function GetAnyExtensionRE() {
+ if (ANY_EXTENSION_RE === undefined) {
+ ANY_EXTENSION_RE = new $RegExp('-[a-z0-9]{1}-.*', 'g');
+ }
+ return ANY_EXTENSION_RE;
+}
+
+/**
+ * Replace quoted text (single quote, anything but the quote and quote again).
+ */
+var QUOTED_STRING_RE = undefined;
+
+function GetQuotedStringRE() {
+ if (QUOTED_STRING_RE === undefined) {
+ QUOTED_STRING_RE = new $RegExp("'[^']+'", 'g');
+ }
+ return QUOTED_STRING_RE;
+}
+
+/**
+ * Matches valid service name.
+ */
+var SERVICE_RE = undefined;
+
+function GetServiceRE() {
+ if (SERVICE_RE === undefined) {
+ SERVICE_RE =
+ new $RegExp('^(collator|numberformat|dateformat|breakiterator)$');
+ }
+ return SERVICE_RE;
+}
+
+/**
+ * Validates a language tag against bcp47 spec.
+ * Actual value is assigned on first run.
+ */
+var LANGUAGE_TAG_RE = undefined;
+
+function GetLanguageTagRE() {
+ if (LANGUAGE_TAG_RE === undefined) {
+ BuildLanguageTagREs();
+ }
+ return LANGUAGE_TAG_RE;
+}
+
+/**
+ * Helps find duplicate variants in the language tag.
+ */
+var LANGUAGE_VARIANT_RE = undefined;
+
+function GetLanguageVariantRE() {
+ if (LANGUAGE_VARIANT_RE === undefined) {
+ BuildLanguageTagREs();
+ }
+ return LANGUAGE_VARIANT_RE;
+}
+
+/**
+ * Helps find duplicate singletons in the language tag.
+ */
+var LANGUAGE_SINGLETON_RE = undefined;
+
+function GetLanguageSingletonRE() {
+ if (LANGUAGE_SINGLETON_RE === undefined) {
+ BuildLanguageTagREs();
+ }
+ return LANGUAGE_SINGLETON_RE;
+}
+
+/**
+ * Matches valid IANA time zone names.
+ */
+var TIMEZONE_NAME_CHECK_RE = undefined;
+
+function GetTimezoneNameCheckRE() {
+ if (TIMEZONE_NAME_CHECK_RE === undefined) {
+ TIMEZONE_NAME_CHECK_RE =
+ new $RegExp('^([A-Za-z]+)/([A-Za-z]+)(?:_([A-Za-z]+))*$');
+ }
+ return TIMEZONE_NAME_CHECK_RE;
+}
+
+/**
+ * Maps ICU calendar names into LDML type.
+ */
+var ICU_CALENDAR_MAP = {
+ 'gregorian': 'gregory',
+ 'japanese': 'japanese',
+ 'buddhist': 'buddhist',
+ 'roc': 'roc',
+ 'persian': 'persian',
+ 'islamic-civil': 'islamicc',
+ 'islamic': 'islamic',
+ 'hebrew': 'hebrew',
+ 'chinese': 'chinese',
+ 'indian': 'indian',
+ 'coptic': 'coptic',
+ 'ethiopic': 'ethiopic',
+ 'ethiopic-amete-alem': 'ethioaa'
+};
+
+/**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a collator.
+ */
+var COLLATOR_KEY_MAP = {
+ 'kn': {'property': 'numeric', 'type': 'boolean'},
+ 'kf': {'property': 'caseFirst', 'type': 'string',
+ 'values': ['false', 'lower', 'upper']}
+};
+
+/**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a number format.
+ */
+var NUMBER_FORMAT_KEY_MAP = {
+ 'nu': {'property': undefined, 'type': 'string'}
+};
+
+/**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a date/time format.
+ */
+var DATETIME_FORMAT_KEY_MAP = {
+ 'ca': {'property': undefined, 'type': 'string'},
+ 'nu': {'property': undefined, 'type': 'string'}
+};
+
+/**
+ * Allowed -u-co- values. List taken from:
+ * http://unicode.org/repos/cldr/trunk/common/bcp47/collation.xml
+ */
+var ALLOWED_CO_VALUES = [
+ 'big5han', 'dict', 'direct', 'ducet', 'gb2312', 'phonebk', 'phonetic',
+ 'pinyin', 'reformed', 'searchjl', 'stroke', 'trad', 'unihan', 'zhuyin'
+];
+
+/**
+ * Error message for when function object is created with new and it's not
+ * a constructor.
+ */
+var ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR =
+ 'Function object that\'s not a constructor was created with new';
+
+
+/**
+ * Adds bound method to the prototype of the given object.
+ */
+function addBoundMethod(obj, methodName, implementation, length) {
+ function getter() {
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject === undefined) {
+ throw new $TypeError('Method ' + methodName + ' called on a ' +
+ 'non-object or on a wrong type of object.');
+ }
+ var internalName = '__bound' + methodName + '__';
+ if (this[internalName] === undefined) {
+ var that = this;
+ var boundMethod;
+ if (length === undefined || length === 2) {
+ boundMethod = function(x, y) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+ return implementation(that, x, y);
+ }
+ } else if (length === 1) {
+ boundMethod = function(x) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+ return implementation(that, x);
+ }
+ } else {
+ boundMethod = function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+ // DateTimeFormat.format needs to be 0 arg method, but can stil
+ // receive optional dateValue param. If one was provided, pass it
+ // along.
+ if (%_ArgumentsLength() > 0) {
+ return implementation(that, %_Arguments(0));
+ } else {
+ return implementation(that);
+ }
+ }
+ }
+ %FunctionSetName(boundMethod, internalName);
+ %FunctionRemovePrototype(boundMethod);
+ %SetNativeFlag(boundMethod);
+ this[internalName] = boundMethod;
+ }
+ return this[internalName];
+ }
+
+ %FunctionSetName(getter, methodName);
+ %FunctionRemovePrototype(getter);
+ %SetNativeFlag(getter);
+
+ $Object.defineProperty(obj.prototype, methodName, {
+ get: getter,
+ enumerable: false,
+ configurable: true
+ });
+}
+
+
+/**
+ * Returns an intersection of locales and service supported locales.
+ * Parameter locales is treated as a priority list.
+ */
+function supportedLocalesOf(service, locales, options) {
+ if (IS_NULL(service.match(GetServiceRE()))) {
+ throw new $Error('Internal error, wrong service type: ' + service);
+ }
+
+ // Provide defaults if matcher was not specified.
+ if (options === undefined) {
+ options = {};
+ } else {
+ options = toObject(options);
+ }
+
+ var matcher = options.localeMatcher;
+ if (matcher !== undefined) {
+ matcher = $String(matcher);
+ if (matcher !== 'lookup' && matcher !== 'best fit') {
+ throw new $RangeError('Illegal value for localeMatcher:' + matcher);
+ }
+ } else {
+ matcher = 'best fit';
+ }
+
+ var requestedLocales = initializeLocaleList(locales);
+
+ // Cache these, they don't ever change per service.
+ if (AVAILABLE_LOCALES[service] === undefined) {
+ AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
+ }
+
+ // Use either best fit or lookup algorithm to match locales.
+ if (matcher === 'best fit') {
+ return initializeLocaleList(bestFitSupportedLocalesOf(
+ requestedLocales, AVAILABLE_LOCALES[service]));
+ }
+
+ return initializeLocaleList(lookupSupportedLocalesOf(
+ requestedLocales, AVAILABLE_LOCALES[service]));
+}
+
+
+/**
+ * Returns the subset of the provided BCP 47 language priority list for which
+ * this service has a matching locale when using the BCP 47 Lookup algorithm.
+ * Locales appear in the same order in the returned list as in the input list.
+ */
+function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
+ var matchedLocales = [];
+ for (var i = 0; i < requestedLocales.length; ++i) {
+ // Remove -u- extension.
+ var locale = requestedLocales[i].replace(GetUnicodeExtensionRE(), '');
+ do {
+ if (availableLocales[locale] !== undefined) {
+ // Push requested locale not the resolved one.
+ matchedLocales.push(requestedLocales[i]);
+ break;
+ }
+ // Truncate locale if possible, if not break.
+ var pos = locale.lastIndexOf('-');
+ if (pos === -1) {
+ break;
+ }
+ locale = locale.substring(0, pos);
+ } while (true);
+ }
+
+ return matchedLocales;
+}
+
+
+/**
+ * Returns the subset of the provided BCP 47 language priority list for which
+ * this service has a matching locale when using the implementation
+ * dependent algorithm.
+ * Locales appear in the same order in the returned list as in the input list.
+ */
+function bestFitSupportedLocalesOf(requestedLocales, availableLocales) {
+ // TODO(cira): implement better best fit algorithm.
+ return lookupSupportedLocalesOf(requestedLocales, availableLocales);
+}
+
+
+/**
+ * Returns a getOption function that extracts property value for given
+ * options object. If property is missing it returns defaultValue. If value
+ * is out of range for that property it throws RangeError.
+ */
+function getGetOption(options, caller) {
+ if (options === undefined) {
+ throw new $Error('Internal ' + caller + ' error. ' +
+ 'Default options are missing.');
+ }
+
+ var getOption = function getOption(property, type, values, defaultValue) {
+ if (options[property] !== undefined) {
+ var value = options[property];
+ switch (type) {
+ case 'boolean':
+ value = $Boolean(value);
+ break;
+ case 'string':
+ value = $String(value);
+ break;
+ case 'number':
+ value = $Number(value);
+ break;
+ default:
+ throw new $Error('Internal error. Wrong value type.');
+ }
+ if (values !== undefined && values.indexOf(value) === -1) {
+ throw new $RangeError('Value ' + value + ' out of range for ' + caller +
+ ' options property ' + property);
+ }
+
+ return value;
+ }
+
+ return defaultValue;
+ }
+
+ return getOption;
+}
+
+
+/**
+ * Compares a BCP 47 language priority list requestedLocales against the locales
+ * in availableLocales and determines the best available language to meet the
+ * request. Two algorithms are available to match the locales: the Lookup
+ * algorithm described in RFC 4647 section 3.4, and an implementation dependent
+ * best-fit algorithm. Independent of the locale matching algorithm, options
+ * specified through Unicode locale extension sequences are negotiated
+ * separately, taking the caller's relevant extension keys and locale data as
+ * well as client-provided options into consideration. Returns an object with
+ * a locale property whose value is the language tag of the selected locale,
+ * and properties for each key in relevantExtensionKeys providing the selected
+ * value for that key.
+ */
+function resolveLocale(service, requestedLocales, options) {
+ requestedLocales = initializeLocaleList(requestedLocales);
+
+ var getOption = getGetOption(options, service);
+ var matcher = getOption('localeMatcher', 'string',
+ ['lookup', 'best fit'], 'best fit');
+ var resolved;
+ if (matcher === 'lookup') {
+ resolved = lookupMatcher(service, requestedLocales);
+ } else {
+ resolved = bestFitMatcher(service, requestedLocales);
+ }
+
+ return resolved;
+}
+
+
+/**
+ * Returns best matched supported locale and extension info using basic
+ * lookup algorithm.
+ */
+function lookupMatcher(service, requestedLocales) {
+ if (IS_NULL(service.match(GetServiceRE()))) {
+ throw new $Error('Internal error, wrong service type: ' + service);
+ }
+
+ // Cache these, they don't ever change per service.
+ if (AVAILABLE_LOCALES[service] === undefined) {
+ AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
+ }
+
+ for (var i = 0; i < requestedLocales.length; ++i) {
+ // Remove all extensions.
+ var locale = requestedLocales[i].replace(GetAnyExtensionRE(), '');
+ do {
+ if (AVAILABLE_LOCALES[service][locale] !== undefined) {
+ // Return the resolved locale and extension.
+ var extensionMatch = requestedLocales[i].match(GetUnicodeExtensionRE());
+ var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
+ return {'locale': locale, 'extension': extension, 'position': i};
+ }
+ // Truncate locale if possible.
+ var pos = locale.lastIndexOf('-');
+ if (pos === -1) {
+ break;
+ }
+ locale = locale.substring(0, pos);
+ } while (true);
+ }
+
+ // Didn't find a match, return default.
+ if (DEFAULT_ICU_LOCALE === undefined) {
+ DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
+ }
+
+ return {'locale': DEFAULT_ICU_LOCALE, 'extension': '', 'position': -1};
+}
+
+
+/**
+ * Returns best matched supported locale and extension info using
+ * implementation dependend algorithm.
+ */
+function bestFitMatcher(service, requestedLocales) {
+ // TODO(cira): implement better best fit algorithm.
+ return lookupMatcher(service, requestedLocales);
+}
+
+
+/**
+ * Parses Unicode extension into key - value map.
+ * Returns empty object if the extension string is invalid.
+ * We are not concerned with the validity of the values at this point.
+ */
+function parseExtension(extension) {
+ var extensionSplit = extension.split('-');
+
+ // Assume ['', 'u', ...] input, but don't throw.
+ if (extensionSplit.length <= 2 ||
+ (extensionSplit[0] !== '' && extensionSplit[1] !== 'u')) {
+ return {};
+ }
+
+ // Key is {2}alphanum, value is {3,8}alphanum.
+ // Some keys may not have explicit values (booleans).
+ var extensionMap = {};
+ var previousKey = undefined;
+ for (var i = 2; i < extensionSplit.length; ++i) {
+ var length = extensionSplit[i].length;
+ var element = extensionSplit[i];
+ if (length === 2) {
+ extensionMap[element] = undefined;
+ previousKey = element;
+ } else if (length >= 3 && length <=8 && previousKey !== undefined) {
+ extensionMap[previousKey] = element;
+ previousKey = undefined;
+ } else {
+ // There is a value that's too long, or that doesn't have a key.
+ return {};
+ }
+ }
+
+ return extensionMap;
+}
+
+
+/**
+ * Converts parameter to an Object if possible.
+ */
+function toObject(value) {
+ if (IS_NULL_OR_UNDEFINED(value)) {
+ throw new $TypeError('Value cannot be converted to an Object.');
+ }
+
+ return $Object(value);
+}
+
+
+/**
+ * Populates internalOptions object with boolean key-value pairs
+ * from extensionMap and options.
+ * Returns filtered extension (number and date format constructors use
+ * Unicode extensions for passing parameters to ICU).
+ * It's used for extension-option pairs only, e.g. kn-normalization, but not
+ * for 'sensitivity' since it doesn't have extension equivalent.
+ * Extensions like nu and ca don't have options equivalent, so we place
+ * undefined in the map.property to denote that.
+ */
+function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
+ var extension = '';
+
+ var updateExtension = function updateExtension(key, value) {
+ return '-' + key + '-' + $String(value);
+ }
+
+ var updateProperty = function updateProperty(property, type, value) {
+ if (type === 'boolean' && (typeof value === 'string')) {
+ value = (value === 'true') ? true : false;
+ }
+
+ if (property !== undefined) {
+ defineWEProperty(outOptions, property, value);
+ }
+ }
+
+ for (var key in keyValues) {
+ if (keyValues.hasOwnProperty(key)) {
+ var value = undefined;
+ var map = keyValues[key];
+ if (map.property !== undefined) {
+ // This may return true if user specifies numeric: 'false', since
+ // Boolean('nonempty') === true.
+ value = getOption(map.property, map.type, map.values);
+ }
+ if (value !== undefined) {
+ updateProperty(map.property, map.type, value);
+ extension += updateExtension(key, value);
+ continue;
+ }
+ // User options didn't have it, check Unicode extension.
+ // Here we want to convert strings 'true', 'false' into proper Boolean
+ // values (not a user error).
+ if (extensionMap.hasOwnProperty(key)) {
+ value = extensionMap[key];
+ if (value !== undefined) {
+ updateProperty(map.property, map.type, value);
+ extension += updateExtension(key, value);
+ } else if (map.type === 'boolean') {
+ // Boolean keys are allowed not to have values in Unicode extension.
+ // Those default to true.
+ updateProperty(map.property, map.type, true);
+ extension += updateExtension(key, true);
+ }
+ }
+ }
+ }
+
+ return extension === ''? '' : '-u' + extension;
+}
+
+
+/**
+ * Converts all OwnProperties into
+ * configurable: false, writable: false, enumerable: true.
+ */
+function freezeArray(array) {
+ array.forEach(function(element, index) {
+ $Object.defineProperty(array, index, {value: element,
+ configurable: false,
+ writable: false,
+ enumerable: true});
+ });
+
+ $Object.defineProperty(array, 'length', {value: array.length,
+ writable: false});
+
+ return array;
+}
+
+
+/**
+ * It's sometimes desireable to leave user requested locale instead of ICU
+ * supported one (zh-TW is equivalent to zh-Hant-TW, so we should keep shorter
+ * one, if that was what user requested).
+ * This function returns user specified tag if its maximized form matches ICU
+ * resolved locale. If not we return ICU result.
+ */
+function getOptimalLanguageTag(original, resolved) {
+ // Returns Array<Object>, where each object has maximized and base properties.
+ // Maximized: zh -> zh-Hans-CN
+ // Base: zh-CN-u-ca-gregory -> zh-CN
+ // Take care of grandfathered or simple cases.
+ if (original === resolved) {
+ return original;
+ }
+
+ var locales = %GetLanguageTagVariants([original, resolved]);
+ if (locales[0].maximized !== locales[1].maximized) {
+ return resolved;
+ }
+
+ // Preserve extensions of resolved locale, but swap base tags with original.
+ var resolvedBase = new $RegExp('^' + locales[1].base);
+ return resolved.replace(resolvedBase, locales[0].base);
+}
+
+
+/**
+ * Returns an Object that contains all of supported locales for a given
+ * service.
+ * In addition to the supported locales we add xx-ZZ locale for each xx-Yyyy-ZZ
+ * that is supported. This is required by the spec.
+ */
+function getAvailableLocalesOf(service) {
+ var available = %AvailableLocalesOf(service);
+
+ for (var i in available) {
+ if (available.hasOwnProperty(i)) {
+ var parts = i.match(/^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/);
+ if (parts !== null) {
+ // Build xx-ZZ. We don't care about the actual value,
+ // as long it's not undefined.
+ available[parts[1] + '-' + parts[3]] = null;
+ }
+ }
+ }
+
+ return available;
+}
+
+
+/**
+ * Defines a property and sets writable and enumerable to true.
+ * Configurable is false by default.
+ */
+function defineWEProperty(object, property, value) {
+ $Object.defineProperty(object, property,
+ {value: value, writable: true, enumerable: true});
+}
+
+
+/**
+ * Adds property to an object if the value is not undefined.
+ * Sets configurable descriptor to false.
+ */
+function addWEPropertyIfDefined(object, property, value) {
+ if (value !== undefined) {
+ defineWEProperty(object, property, value);
+ }
+}
+
+
+/**
+ * Defines a property and sets writable, enumerable and configurable to true.
+ */
+function defineWECProperty(object, property, value) {
+ $Object.defineProperty(object, property,
+ {value: value,
+ writable: true,
+ enumerable: true,
+ configurable: true});
+}
+
+
+/**
+ * Adds property to an object if the value is not undefined.
+ * Sets all descriptors to true.
+ */
+function addWECPropertyIfDefined(object, property, value) {
+ if (value !== undefined) {
+ defineWECProperty(object, property, value);
+ }
+}
+
+
+/**
+ * Returns titlecased word, aMeRricA -> America.
+ */
+function toTitleCaseWord(word) {
+ return word.substr(0, 1).toUpperCase() + word.substr(1).toLowerCase();
+}
+
+/**
+ * Canonicalizes the language tag, or throws in case the tag is invalid.
+ */
+function canonicalizeLanguageTag(localeID) {
+ // null is typeof 'object' so we have to do extra check.
+ if (typeof localeID !== 'string' && typeof localeID !== 'object' ||
+ IS_NULL(localeID)) {
+ throw new $TypeError('Language ID should be string or object.');
+ }
+
+ var localeString = $String(localeID);
+
+ if (isValidLanguageTag(localeString) === false) {
+ throw new $RangeError('Invalid language tag: ' + localeString);
+ }
+
+ // This call will strip -kn but not -kn-true extensions.
+ // ICU bug filled - http://bugs.icu-project.org/trac/ticket/9265.
+ // TODO(cira): check if -u-kn-true-kc-true-kh-true still throws after
+ // upgrade to ICU 4.9.
+ var tag = %CanonicalizeLanguageTag(localeString);
+ if (tag === 'invalid-tag') {
+ throw new $RangeError('Invalid language tag: ' + localeString);
+ }
+
+ return tag;
+}
+
+
+/**
+ * Returns an array where all locales are canonicalized and duplicates removed.
+ * Throws on locales that are not well formed BCP47 tags.
+ */
+function initializeLocaleList(locales) {
+ var seen = [];
+ if (locales === undefined) {
+ // Constructor is called without arguments.
+ seen = [];
+ } else {
+ // We allow single string localeID.
+ if (typeof locales === 'string') {
+ seen.push(canonicalizeLanguageTag(locales));
+ return freezeArray(seen);
+ }
+
+ var o = toObject(locales);
+ // Converts it to UInt32 (>>> is shr on 32bit integers).
+ var len = o.length >>> 0;
+
+ for (var k = 0; k < len; k++) {
+ if (k in o) {
+ var value = o[k];
+
+ var tag = canonicalizeLanguageTag(value);
+
+ if (seen.indexOf(tag) === -1) {
+ seen.push(tag);
+ }
+ }
+ }
+ }
+
+ return freezeArray(seen);
+}
+
+
+/**
+ * Validates the language tag. Section 2.2.9 of the bcp47 spec
+ * defines a valid tag.
+ *
+ * ICU is too permissible and lets invalid tags, like
+ * hant-cmn-cn, through.
+ *
+ * Returns false if the language tag is invalid.
+ */
+function isValidLanguageTag(locale) {
+ // Check if it's well-formed, including grandfadered tags.
+ if (GetLanguageTagRE().test(locale) === false) {
+ return false;
+ }
+
+ // Just return if it's a x- form. It's all private.
+ if (locale.indexOf('x-') === 0) {
+ return true;
+ }
+
+ // Check if there are any duplicate variants or singletons (extensions).
+
+ // Remove private use section.
+ locale = locale.split(/-x-/)[0];
+
+ // Skip language since it can match variant regex, so we start from 1.
+ // We are matching i-klingon here, but that's ok, since i-klingon-klingon
+ // is not valid and would fail LANGUAGE_TAG_RE test.
+ var variants = [];
+ var extensions = [];
+ var parts = locale.split(/-/);
+ for (var i = 1; i < parts.length; i++) {
+ var value = parts[i];
+ if (GetLanguageVariantRE().test(value) === true && extensions.length === 0) {
+ if (variants.indexOf(value) === -1) {
+ variants.push(value);
+ } else {
+ return false;
+ }
+ }
+
+ if (GetLanguageSingletonRE().test(value) === true) {
+ if (extensions.indexOf(value) === -1) {
+ extensions.push(value);
+ } else {
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+
+/**
+ * Builds a regular expresion that validates the language tag
+ * against bcp47 spec.
+ * Uses http://tools.ietf.org/html/bcp47, section 2.1, ABNF.
+ * Runs on load and initializes the global REs.
+ */
+function BuildLanguageTagREs() {
+ var alpha = '[a-zA-Z]';
+ var digit = '[0-9]';
+ var alphanum = '(' + alpha + '|' + digit + ')';
+ var regular = '(art-lojban|cel-gaulish|no-bok|no-nyn|zh-guoyu|zh-hakka|' +
+ 'zh-min|zh-min-nan|zh-xiang)';
+ var irregular = '(en-GB-oed|i-ami|i-bnn|i-default|i-enochian|i-hak|' +
+ 'i-klingon|i-lux|i-mingo|i-navajo|i-pwn|i-tao|i-tay|' +
+ 'i-tsu|sgn-BE-FR|sgn-BE-NL|sgn-CH-DE)';
+ var grandfathered = '(' + irregular + '|' + regular + ')';
+ var privateUse = '(x(-' + alphanum + '{1,8})+)';
+
+ var singleton = '(' + digit + '|[A-WY-Za-wy-z])';
+ LANGUAGE_SINGLETON_RE = new $RegExp('^' + singleton + '$', 'i');
+
+ var extension = '(' + singleton + '(-' + alphanum + '{2,8})+)';
+
+ var variant = '(' + alphanum + '{5,8}|(' + digit + alphanum + '{3}))';
+ LANGUAGE_VARIANT_RE = new $RegExp('^' + variant + '$', 'i');
+
+ var region = '(' + alpha + '{2}|' + digit + '{3})';
+ var script = '(' + alpha + '{4})';
+ var extLang = '(' + alpha + '{3}(-' + alpha + '{3}){0,2})';
+ var language = '(' + alpha + '{2,3}(-' + extLang + ')?|' + alpha + '{4}|' +
+ alpha + '{5,8})';
+ var langTag = language + '(-' + script + ')?(-' + region + ')?(-' +
+ variant + ')*(-' + extension + ')*(-' + privateUse + ')?';
+
+ var languageTag =
+ '^(' + langTag + '|' + privateUse + '|' + grandfathered + ')$';
+ LANGUAGE_TAG_RE = new $RegExp(languageTag, 'i');
+}
+
+/**
+ * Initializes the given object so it's a valid Collator instance.
+ * Useful for subclassing.
+ */
+function initializeCollator(collator, locales, options) {
+ if (collator.hasOwnProperty('__initializedIntlObject')) {
+ throw new $TypeError('Trying to re-initialize Collator object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var getOption = getGetOption(options, 'collator');
+
+ var internalOptions = {};
+
+ defineWEProperty(internalOptions, 'usage', getOption(
+ 'usage', 'string', ['sort', 'search'], 'sort'));
+
+ var sensitivity = getOption('sensitivity', 'string',
+ ['base', 'accent', 'case', 'variant']);
+ if (sensitivity === undefined && internalOptions.usage === 'sort') {
+ sensitivity = 'variant';
+ }
+ defineWEProperty(internalOptions, 'sensitivity', sensitivity);
+
+ defineWEProperty(internalOptions, 'ignorePunctuation', getOption(
+ 'ignorePunctuation', 'boolean', undefined, false));
+
+ var locale = resolveLocale('collator', locales, options);
+
+ // ICU can't take kb, kc... parameters through localeID, so we need to pass
+ // them as options.
+ // One exception is -co- which has to be part of the extension, but only for
+ // usage: sort, and its value can't be 'standard' or 'search'.
+ var extensionMap = parseExtension(locale.extension);
+ setOptions(
+ options, extensionMap, COLLATOR_KEY_MAP, getOption, internalOptions);
+
+ var collation = 'default';
+ var extension = '';
+ if (extensionMap.hasOwnProperty('co') && internalOptions.usage === 'sort') {
+ if (ALLOWED_CO_VALUES.indexOf(extensionMap.co) !== -1) {
+ extension = '-u-co-' + extensionMap.co;
+ // ICU can't tell us what the collation is, so save user's input.
+ collation = extensionMap.co;
+ }
+ } else if (internalOptions.usage === 'search') {
+ extension = '-u-co-search';
+ }
+ defineWEProperty(internalOptions, 'collation', collation);
+
+ var requestedLocale = locale.locale + extension;
+
+ // We define all properties C++ code may produce, to prevent security
+ // problems. If malicious user decides to redefine Object.prototype.locale
+ // we can't just use plain x.locale = 'us' or in C++ Set("locale", "us").
+ // Object.defineProperties will either succeed defining or throw an error.
+ var resolved = $Object.defineProperties({}, {
+ caseFirst: {writable: true},
+ collation: {value: internalOptions.collation, writable: true},
+ ignorePunctuation: {writable: true},
+ locale: {writable: true},
+ numeric: {writable: true},
+ requestedLocale: {value: requestedLocale, writable: true},
+ sensitivity: {writable: true},
+ strength: {writable: true},
+ usage: {value: internalOptions.usage, writable: true}
+ });
+
+ var internalCollator = %CreateCollator(requestedLocale,
+ internalOptions,
+ resolved);
+
+ // Writable, configurable and enumerable are set to false by default.
+ $Object.defineProperty(collator, 'collator', {value: internalCollator});
+ $Object.defineProperty(collator, '__initializedIntlObject',
+ {value: 'collator'});
+ $Object.defineProperty(collator, 'resolved', {value: resolved});
+
+ return collator;
+}
+
+
+/**
+ * Constructs Intl.Collator object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'Collator', function() {
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.Collator(locales, options);
+ }
+
+ return initializeCollator(toObject(this), locales, options);
+ },
+ DONT_ENUM
+);
+
+
+/**
+ * Collator resolvedOptions method.
+ */
+%SetProperty(Intl.Collator.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'collator') {
+ throw new $TypeError('resolvedOptions method called on a non-object ' +
+ 'or on a object that is not Intl.Collator.');
+ }
+
+ var coll = this;
+ var locale = getOptimalLanguageTag(coll.resolved.requestedLocale,
+ coll.resolved.locale);
+
+ return {
+ locale: locale,
+ usage: coll.resolved.usage,
+ sensitivity: coll.resolved.sensitivity,
+ ignorePunctuation: coll.resolved.ignorePunctuation,
+ numeric: coll.resolved.numeric,
+ caseFirst: coll.resolved.caseFirst,
+ collation: coll.resolved.collation
+ };
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
+%FunctionRemovePrototype(Intl.Collator.prototype.resolvedOptions);
+%SetNativeFlag(Intl.Collator.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.Collator, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('collator', locales, %_Arguments(1));
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.Collator.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.Collator.supportedLocalesOf);
+%SetNativeFlag(Intl.Collator.supportedLocalesOf);
+
+
+/**
+ * When the compare method is called with two arguments x and y, it returns a
+ * Number other than NaN that represents the result of a locale-sensitive
+ * String comparison of x with y.
+ * The result is intended to order String values in the sort order specified
+ * by the effective locale and collation options computed during construction
+ * of this Collator object, and will be negative, zero, or positive, depending
+ * on whether x comes before y in the sort order, the Strings are equal under
+ * the sort order, or x comes after y in the sort order, respectively.
+ */
+function compare(collator, x, y) {
+ return %InternalCompare(collator.collator, $String(x), $String(y));
+};
+
+
+addBoundMethod(Intl.Collator, 'compare', compare, 2);
+
+/**
+ * Verifies that the input is a well-formed ISO 4217 currency code.
+ * Don't uppercase to test. It could convert invalid code into a valid one.
+ * For example \u00DFP (Eszett+P) becomes SSP.
+ */
+function isWellFormedCurrencyCode(currency) {
+ return typeof currency == "string" &&
+ currency.length == 3 &&
+ currency.match(/[^A-Za-z]/) == null;
+}
+
+
+/**
+ * Returns the valid digit count for a property, or throws RangeError on
+ * a value out of the range.
+ */
+function getNumberOption(options, property, min, max, fallback) {
+ var value = options[property];
+ if (value !== undefined) {
+ value = $Number(value);
+ if ($isNaN(value) || value < min || value > max) {
+ throw new $RangeError(property + ' value is out of range.');
+ }
+ return $floor(value);
+ }
+
+ return fallback;
+}
+
+
+/**
+ * Initializes the given object so it's a valid NumberFormat instance.
+ * Useful for subclassing.
+ */
+function initializeNumberFormat(numberFormat, locales, options) {
+ if (numberFormat.hasOwnProperty('__initializedIntlObject')) {
+ throw new $TypeError('Trying to re-initialize NumberFormat object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var getOption = getGetOption(options, 'numberformat');
+
+ var locale = resolveLocale('numberformat', locales, options);
+
+ var internalOptions = {};
+ defineWEProperty(internalOptions, 'style', getOption(
+ 'style', 'string', ['decimal', 'percent', 'currency'], 'decimal'));
+
+ var currency = getOption('currency', 'string');
+ if (currency !== undefined && !isWellFormedCurrencyCode(currency)) {
+ throw new $RangeError('Invalid currency code: ' + currency);
+ }
+
+ if (internalOptions.style === 'currency' && currency === undefined) {
+ throw new $TypeError('Currency code is required with currency style.');
+ }
+
+ var currencyDisplay = getOption(
+ 'currencyDisplay', 'string', ['code', 'symbol', 'name'], 'symbol');
+ if (internalOptions.style === 'currency') {
+ defineWEProperty(internalOptions, 'currency', currency.toUpperCase());
+ defineWEProperty(internalOptions, 'currencyDisplay', currencyDisplay);
+ }
+
+ // Digit ranges.
+ var mnid = getNumberOption(options, 'minimumIntegerDigits', 1, 21, 1);
+ defineWEProperty(internalOptions, 'minimumIntegerDigits', mnid);
+
+ var mnfd = getNumberOption(options, 'minimumFractionDigits', 0, 20, 0);
+ defineWEProperty(internalOptions, 'minimumFractionDigits', mnfd);
+
+ var mxfd = getNumberOption(options, 'maximumFractionDigits', mnfd, 20, 3);
+ defineWEProperty(internalOptions, 'maximumFractionDigits', mxfd);
+
+ var mnsd = options['minimumSignificantDigits'];
+ var mxsd = options['maximumSignificantDigits'];
+ if (mnsd !== undefined || mxsd !== undefined) {
+ mnsd = getNumberOption(options, 'minimumSignificantDigits', 1, 21, 0);
+ defineWEProperty(internalOptions, 'minimumSignificantDigits', mnsd);
+
+ mxsd = getNumberOption(options, 'maximumSignificantDigits', mnsd, 21, 21);
+ defineWEProperty(internalOptions, 'maximumSignificantDigits', mxsd);
+ }
+
+ // Grouping.
+ defineWEProperty(internalOptions, 'useGrouping', getOption(
+ 'useGrouping', 'boolean', undefined, true));
+
+ // ICU prefers options to be passed using -u- extension key/values for
+ // number format, so we need to build that.
+ var extensionMap = parseExtension(locale.extension);
+ var extension = setOptions(options, extensionMap, NUMBER_FORMAT_KEY_MAP,
+ getOption, internalOptions);
+
+ var requestedLocale = locale.locale + extension;
+ var resolved = $Object.defineProperties({}, {
+ currency: {writable: true},
+ currencyDisplay: {writable: true},
+ locale: {writable: true},
+ maximumFractionDigits: {writable: true},
+ minimumFractionDigits: {writable: true},
+ minimumIntegerDigits: {writable: true},
+ numberingSystem: {writable: true},
+ requestedLocale: {value: requestedLocale, writable: true},
+ style: {value: internalOptions.style, writable: true},
+ useGrouping: {writable: true}
+ });
+ if (internalOptions.hasOwnProperty('minimumSignificantDigits')) {
+ defineWEProperty(resolved, 'minimumSignificantDigits', undefined);
+ }
+ if (internalOptions.hasOwnProperty('maximumSignificantDigits')) {
+ defineWEProperty(resolved, 'maximumSignificantDigits', undefined);
+ }
+ var formatter = %CreateNumberFormat(requestedLocale,
+ internalOptions,
+ resolved);
+
+ // We can't get information about number or currency style from ICU, so we
+ // assume user request was fulfilled.
+ if (internalOptions.style === 'currency') {
+ $Object.defineProperty(resolved, 'currencyDisplay', {value: currencyDisplay,
+ writable: true});
+ }
+
+ $Object.defineProperty(numberFormat, 'formatter', {value: formatter});
+ $Object.defineProperty(numberFormat, 'resolved', {value: resolved});
+ $Object.defineProperty(numberFormat, '__initializedIntlObject',
+ {value: 'numberformat'});
+
+ return numberFormat;
+}
+
+
+/**
+ * Constructs Intl.NumberFormat object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'NumberFormat', function() {
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.NumberFormat(locales, options);
+ }
+
+ return initializeNumberFormat(toObject(this), locales, options);
+ },
+ DONT_ENUM
+);
+
+
+/**
+ * NumberFormat resolvedOptions method.
+ */
+%SetProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'numberformat') {
+ throw new $TypeError('resolvedOptions method called on a non-object' +
+ ' or on a object that is not Intl.NumberFormat.');
+ }
+
+ var format = this;
+ var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
+ format.resolved.locale);
+
+ var result = {
+ locale: locale,
+ numberingSystem: format.resolved.numberingSystem,
+ style: format.resolved.style,
+ useGrouping: format.resolved.useGrouping,
+ minimumIntegerDigits: format.resolved.minimumIntegerDigits,
+ minimumFractionDigits: format.resolved.minimumFractionDigits,
+ maximumFractionDigits: format.resolved.maximumFractionDigits,
+ };
+
+ if (result.style === 'currency') {
+ defineWECProperty(result, 'currency', format.resolved.currency);
+ defineWECProperty(result, 'currencyDisplay',
+ format.resolved.currencyDisplay);
+ }
+
+ if (format.resolved.hasOwnProperty('minimumSignificantDigits')) {
+ defineWECProperty(result, 'minimumSignificantDigits',
+ format.resolved.minimumSignificantDigits);
+ }
+
+ if (format.resolved.hasOwnProperty('maximumSignificantDigits')) {
+ defineWECProperty(result, 'maximumSignificantDigits',
+ format.resolved.maximumSignificantDigits);
+ }
+
+ return result;
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.NumberFormat.prototype.resolvedOptions,
+ 'resolvedOptions');
+%FunctionRemovePrototype(Intl.NumberFormat.prototype.resolvedOptions);
+%SetNativeFlag(Intl.NumberFormat.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('numberformat', locales, %_Arguments(1));
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.NumberFormat.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.NumberFormat.supportedLocalesOf);
+%SetNativeFlag(Intl.NumberFormat.supportedLocalesOf);
+
+
+/**
+ * Returns a String value representing the result of calling ToNumber(value)
+ * according to the effective locale and the formatting options of this
+ * NumberFormat.
+ */
+function formatNumber(formatter, value) {
+ // Spec treats -0 and +0 as 0.
+ var number = $Number(value);
+ if (number === -0) {
+ number = 0;
+ }
+
+ return %InternalNumberFormat(formatter.formatter, number);
+}
+
+
+/**
+ * Returns a Number that represents string value that was passed in.
+ */
+function parseNumber(formatter, value) {
+ return %InternalNumberParse(formatter.formatter, $String(value));
+}
+
+
+addBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1);
+addBoundMethod(Intl.NumberFormat, 'v8Parse', parseNumber, 1);
+
+/**
+ * Returns a string that matches LDML representation of the options object.
+ */
+function toLDMLString(options) {
+ var getOption = getGetOption(options, 'dateformat');
+
+ var ldmlString = '';
+
+ var option = getOption('weekday', 'string', ['narrow', 'short', 'long']);
+ ldmlString += appendToLDMLString(
+ option, {narrow: 'EEEEE', short: 'EEE', long: 'EEEE'});
+
+ option = getOption('era', 'string', ['narrow', 'short', 'long']);
+ ldmlString += appendToLDMLString(
+ option, {narrow: 'GGGGG', short: 'GGG', long: 'GGGG'});
+
+ option = getOption('year', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'yy', 'numeric': 'y'});
+
+ option = getOption('month', 'string',
+ ['2-digit', 'numeric', 'narrow', 'short', 'long']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'MM', 'numeric': 'M',
+ 'narrow': 'MMMMM', 'short': 'MMM', 'long': 'MMMM'});
+
+ option = getOption('day', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(
+ option, {'2-digit': 'dd', 'numeric': 'd'});
+
+ var hr12 = getOption('hour12', 'boolean');
+ option = getOption('hour', 'string', ['2-digit', 'numeric']);
+ if (hr12 === undefined) {
+ ldmlString += appendToLDMLString(option, {'2-digit': 'jj', 'numeric': 'j'});
+ } else if (hr12 === true) {
+ ldmlString += appendToLDMLString(option, {'2-digit': 'hh', 'numeric': 'h'});
+ } else {
+ ldmlString += appendToLDMLString(option, {'2-digit': 'HH', 'numeric': 'H'});
+ }
+
+ option = getOption('minute', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'mm', 'numeric': 'm'});
+
+ option = getOption('second', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'ss', 'numeric': 's'});
+
+ option = getOption('timeZoneName', 'string', ['short', 'long']);
+ ldmlString += appendToLDMLString(option, {short: 'z', long: 'zzzz'});
+
+ return ldmlString;
+}
+
+
+/**
+ * Returns either LDML equivalent of the current option or empty string.
+ */
+function appendToLDMLString(option, pairs) {
+ if (option !== undefined) {
+ return pairs[option];
+ } else {
+ return '';
+ }
+}
+
+
+/**
+ * Returns object that matches LDML representation of the date.
+ */
+function fromLDMLString(ldmlString) {
+ // First remove '' quoted text, so we lose 'Uhr' strings.
+ ldmlString = ldmlString.replace(GetQuotedStringRE(), '');
+
+ var options = {};
+ var match = ldmlString.match(/E{3,5}/g);
+ options = appendToDateTimeObject(
+ options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
+
+ match = ldmlString.match(/G{3,5}/g);
+ options = appendToDateTimeObject(
+ options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
+
+ match = ldmlString.match(/y{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'year', match, {y: 'numeric', yy: '2-digit'});
+
+ match = ldmlString.match(/M{1,5}/g);
+ options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
+ M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
+
+ // Sometimes we get L instead of M for month - standalone name.
+ match = ldmlString.match(/L{1,5}/g);
+ options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
+ L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
+
+ match = ldmlString.match(/d{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'day', match, {d: 'numeric', dd: '2-digit'});
+
+ match = ldmlString.match(/h{1,2}/g);
+ if (match !== null) {
+ options['hour12'] = true;
+ }
+ options = appendToDateTimeObject(
+ options, 'hour', match, {h: 'numeric', hh: '2-digit'});
+
+ match = ldmlString.match(/H{1,2}/g);
+ if (match !== null) {
+ options['hour12'] = false;
+ }
+ options = appendToDateTimeObject(
+ options, 'hour', match, {H: 'numeric', HH: '2-digit'});
+
+ match = ldmlString.match(/m{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'minute', match, {m: 'numeric', mm: '2-digit'});
+
+ match = ldmlString.match(/s{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'second', match, {s: 'numeric', ss: '2-digit'});
+
+ match = ldmlString.match(/z|zzzz/g);
+ options = appendToDateTimeObject(
+ options, 'timeZoneName', match, {z: 'short', zzzz: 'long'});
+
+ return options;
+}
+
+
+function appendToDateTimeObject(options, option, match, pairs) {
+ if (IS_NULL(match)) {
+ if (!options.hasOwnProperty(option)) {
+ defineWEProperty(options, option, undefined);
+ }
+ return options;
+ }
+
+ var property = match[0];
+ defineWEProperty(options, option, pairs[property]);
+
+ return options;
+}
+
+
+/**
+ * Returns options with at least default values in it.
+ */
+function toDateTimeOptions(options, required, defaults) {
+ if (options === undefined) {
+ options = null;
+ } else {
+ options = toObject(options);
+ }
+
+ options = $Object.apply(this, [options]);
+
+ var needsDefault = true;
+ if ((required === 'date' || required === 'any') &&
+ (options.weekday !== undefined || options.year !== undefined ||
+ options.month !== undefined || options.day !== undefined)) {
+ needsDefault = false;
+ }
+
+ if ((required === 'time' || required === 'any') &&
+ (options.hour !== undefined || options.minute !== undefined ||
+ options.second !== undefined)) {
+ needsDefault = false;
+ }
+
+ if (needsDefault && (defaults === 'date' || defaults === 'all')) {
+ $Object.defineProperty(options, 'year', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ $Object.defineProperty(options, 'month', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ $Object.defineProperty(options, 'day', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ }
+
+ if (needsDefault && (defaults === 'time' || defaults === 'all')) {
+ $Object.defineProperty(options, 'hour', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ $Object.defineProperty(options, 'minute', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ $Object.defineProperty(options, 'second', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ }
+
+ return options;
+}
+
+
+/**
+ * Initializes the given object so it's a valid DateTimeFormat instance.
+ * Useful for subclassing.
+ */
+function initializeDateTimeFormat(dateFormat, locales, options) {
+
+ if (dateFormat.hasOwnProperty('__initializedIntlObject')) {
+ throw new $TypeError('Trying to re-initialize DateTimeFormat object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var locale = resolveLocale('dateformat', locales, options);
+
+ options = toDateTimeOptions(options, 'any', 'date');
+
+ var getOption = getGetOption(options, 'dateformat');
+
+ // We implement only best fit algorithm, but still need to check
+ // if the formatMatcher values are in range.
+ var matcher = getOption('formatMatcher', 'string',
+ ['basic', 'best fit'], 'best fit');
+
+ // Build LDML string for the skeleton that we pass to the formatter.
+ var ldmlString = toLDMLString(options);
+
+ // Filter out supported extension keys so we know what to put in resolved
+ // section later on.
+ // We need to pass calendar and number system to the method.
+ var tz = canonicalizeTimeZoneID(options.timeZone);
+
+ // ICU prefers options to be passed using -u- extension key/values, so
+ // we need to build that.
+ var internalOptions = {};
+ var extensionMap = parseExtension(locale.extension);
+ var extension = setOptions(options, extensionMap, DATETIME_FORMAT_KEY_MAP,
+ getOption, internalOptions);
+
+ var requestedLocale = locale.locale + extension;
+ var resolved = $Object.defineProperties({}, {
+ calendar: {writable: true},
+ day: {writable: true},
+ era: {writable: true},
+ hour12: {writable: true},
+ hour: {writable: true},
+ locale: {writable: true},
+ minute: {writable: true},
+ month: {writable: true},
+ numberingSystem: {writable: true},
+ pattern: {writable: true},
+ requestedLocale: {value: requestedLocale, writable: true},
+ second: {writable: true},
+ timeZone: {writable: true},
+ timeZoneName: {writable: true},
+ tz: {value: tz, writable: true},
+ weekday: {writable: true},
+ year: {writable: true}
+ });
+
+ var formatter = %CreateDateTimeFormat(
+ requestedLocale, {skeleton: ldmlString, timeZone: tz}, resolved);
+
+ if (tz !== undefined && tz !== resolved.timeZone) {
+ throw new $RangeError('Unsupported time zone specified ' + tz);
+ }
+
+ $Object.defineProperty(dateFormat, 'formatter', {value: formatter});
+ $Object.defineProperty(dateFormat, 'resolved', {value: resolved});
+ $Object.defineProperty(dateFormat, '__initializedIntlObject',
+ {value: 'dateformat'});
+
+ return dateFormat;
+}
+
+
+/**
+ * Constructs Intl.DateTimeFormat object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'DateTimeFormat', function() {
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.DateTimeFormat(locales, options);
+ }
+
+ return initializeDateTimeFormat(toObject(this), locales, options);
+ },
+ DONT_ENUM
+);
+
+
+/**
+ * DateTimeFormat resolvedOptions method.
+ */
+%SetProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'dateformat') {
+ throw new $TypeError('resolvedOptions method called on a non-object or ' +
+ 'on a object that is not Intl.DateTimeFormat.');
+ }
+
+ var format = this;
+ var fromPattern = fromLDMLString(format.resolved.pattern);
+ var userCalendar = ICU_CALENDAR_MAP[format.resolved.calendar];
+ if (userCalendar === undefined) {
+ // Use ICU name if we don't have a match. It shouldn't happen, but
+ // it would be too strict to throw for this.
+ userCalendar = format.resolved.calendar;
+ }
+
+ var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
+ format.resolved.locale);
+
+ var result = {
+ locale: locale,
+ numberingSystem: format.resolved.numberingSystem,
+ calendar: userCalendar,
+ timeZone: format.resolved.timeZone
+ };
+
+ addWECPropertyIfDefined(result, 'timeZoneName', fromPattern.timeZoneName);
+ addWECPropertyIfDefined(result, 'era', fromPattern.era);
+ addWECPropertyIfDefined(result, 'year', fromPattern.year);
+ addWECPropertyIfDefined(result, 'month', fromPattern.month);
+ addWECPropertyIfDefined(result, 'day', fromPattern.day);
+ addWECPropertyIfDefined(result, 'weekday', fromPattern.weekday);
+ addWECPropertyIfDefined(result, 'hour12', fromPattern.hour12);
+ addWECPropertyIfDefined(result, 'hour', fromPattern.hour);
+ addWECPropertyIfDefined(result, 'minute', fromPattern.minute);
+ addWECPropertyIfDefined(result, 'second', fromPattern.second);
+
+ return result;
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.DateTimeFormat.prototype.resolvedOptions,
+ 'resolvedOptions');
+%FunctionRemovePrototype(Intl.DateTimeFormat.prototype.resolvedOptions);
+%SetNativeFlag(Intl.DateTimeFormat.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('dateformat', locales, %_Arguments(1));
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.DateTimeFormat.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.DateTimeFormat.supportedLocalesOf);
+%SetNativeFlag(Intl.DateTimeFormat.supportedLocalesOf);
+
+
+/**
+ * Returns a String value representing the result of calling ToNumber(date)
+ * according to the effective locale and the formatting options of this
+ * DateTimeFormat.
+ */
+function formatDate(formatter, dateValue) {
+ var dateMs;
+ if (dateValue === undefined) {
+ dateMs = $Date.now();
+ } else {
+ dateMs = $Number(dateValue);
+ }
+
+ if (!$isFinite(dateMs)) {
+ throw new $RangeError('Provided date is not in valid range.');
+ }
+
+ return %InternalDateFormat(formatter.formatter, new $Date(dateMs));
+}
+
+
+/**
+ * Returns a Date object representing the result of calling ToString(value)
+ * according to the effective locale and the formatting options of this
+ * DateTimeFormat.
+ * Returns undefined if date string cannot be parsed.
+ */
+function parseDate(formatter, value) {
+ return %InternalDateParse(formatter.formatter, $String(value));
+}
+
+
+// 0 because date is optional argument.
+addBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0);
+addBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
+
+
+/**
+ * Returns canonical Area/Location name, or throws an exception if the zone
+ * name is invalid IANA name.
+ */
+function canonicalizeTimeZoneID(tzID) {
+ // Skip undefined zones.
+ if (tzID === undefined) {
+ return tzID;
+ }
+
+ // Special case handling (UTC, GMT).
+ var upperID = tzID.toUpperCase();
+ if (upperID === 'UTC' || upperID === 'GMT' ||
+ upperID === 'ETC/UTC' || upperID === 'ETC/GMT') {
+ return 'UTC';
+ }
+
+ // We expect only _ and / beside ASCII letters.
+ // All inputs should conform to Area/Location from now on.
+ var match = GetTimezoneNameCheckRE().exec(tzID);
+ if (IS_NULL(match)) {
+ throw new $RangeError('Expected Area/Location for time zone, got ' + tzID);
+ }
+
+ var result = toTitleCaseWord(match[1]) + '/' + toTitleCaseWord(match[2]);
+ var i = 3;
+ while (match[i] !== undefined && i < match.length) {
+ result = result + '_' + toTitleCaseWord(match[i]);
+ i++;
+ }
+
+ return result;
+}
+
+/**
+ * Initializes the given object so it's a valid BreakIterator instance.
+ * Useful for subclassing.
+ */
+function initializeBreakIterator(iterator, locales, options) {
+ if (iterator.hasOwnProperty('__initializedIntlObject')) {
+ throw new $TypeError('Trying to re-initialize v8BreakIterator object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var getOption = getGetOption(options, 'breakiterator');
+
+ var internalOptions = {};
+
+ defineWEProperty(internalOptions, 'type', getOption(
+ 'type', 'string', ['character', 'word', 'sentence', 'line'], 'word'));
+
+ var locale = resolveLocale('breakiterator', locales, options);
+ var resolved = $Object.defineProperties({}, {
+ requestedLocale: {value: locale.locale, writable: true},
+ type: {value: internalOptions.type, writable: true},
+ locale: {writable: true}
+ });
+
+ var internalIterator = %CreateBreakIterator(locale.locale,
+ internalOptions,
+ resolved);
+
+ $Object.defineProperty(iterator, 'iterator', {value: internalIterator});
+ $Object.defineProperty(iterator, 'resolved', {value: resolved});
+ $Object.defineProperty(iterator, '__initializedIntlObject',
+ {value: 'breakiterator'});
+
+ return iterator;
+}
+
+
+/**
+ * Constructs Intl.v8BreakIterator object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'v8BreakIterator', function() {
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.v8BreakIterator(locales, options);
+ }
+
+ return initializeBreakIterator(toObject(this), locales, options);
+ },
+ DONT_ENUM
+);
+
+
+/**
+ * BreakIterator resolvedOptions method.
+ */
+%SetProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'breakiterator') {
+ throw new $TypeError('resolvedOptions method called on a non-object or ' +
+ 'on a object that is not Intl.v8BreakIterator.');
+ }
+
+ var segmenter = this;
+ var locale = getOptimalLanguageTag(segmenter.resolved.requestedLocale,
+ segmenter.resolved.locale);
+
+ return {
+ locale: locale,
+ type: segmenter.resolved.type
+ };
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.v8BreakIterator.prototype.resolvedOptions,
+ 'resolvedOptions');
+%FunctionRemovePrototype(Intl.v8BreakIterator.prototype.resolvedOptions);
+%SetNativeFlag(Intl.v8BreakIterator.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.v8BreakIterator, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('breakiterator', locales, %_Arguments(1));
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.v8BreakIterator.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.v8BreakIterator.supportedLocalesOf);
+%SetNativeFlag(Intl.v8BreakIterator.supportedLocalesOf);
+
+
+/**
+ * Adopts text to segment using the iterator. Old text, if present,
+ * gets discarded.
+ */
+function adoptText(iterator, text) {
+ %BreakIteratorAdoptText(iterator.iterator, $String(text));
+}
+
+
+/**
+ * Returns index of the first break in the string and moves current pointer.
+ */
+function first(iterator) {
+ return %BreakIteratorFirst(iterator.iterator);
+}
+
+
+/**
+ * Returns the index of the next break and moves the pointer.
+ */
+function next(iterator) {
+ return %BreakIteratorNext(iterator.iterator);
+}
+
+
+/**
+ * Returns index of the current break.
+ */
+function current(iterator) {
+ return %BreakIteratorCurrent(iterator.iterator);
+}
+
+
+/**
+ * Returns type of the current break.
+ */
+function breakType(iterator) {
+ return %BreakIteratorBreakType(iterator.iterator);
+}
+
+
+addBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1);
+addBoundMethod(Intl.v8BreakIterator, 'first', first, 0);
+addBoundMethod(Intl.v8BreakIterator, 'next', next, 0);
+addBoundMethod(Intl.v8BreakIterator, 'current', current, 0);
+addBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0);
+
+// Save references to Intl objects and methods we use, for added security.
+var savedObjects = {
+ 'collator': Intl.Collator,
+ 'numberformat': Intl.NumberFormat,
+ 'dateformatall': Intl.DateTimeFormat,
+ 'dateformatdate': Intl.DateTimeFormat,
+ 'dateformattime': Intl.DateTimeFormat
+};
+
+
+// Default (created with undefined locales and options parameters) collator,
+// number and date format instances. They'll be created as needed.
+var defaultObjects = {
+ 'collator': undefined,
+ 'numberformat': undefined,
+ 'dateformatall': undefined,
+ 'dateformatdate': undefined,
+ 'dateformattime': undefined,
+};
+
+
+/**
+ * Returns cached or newly created instance of a given service.
+ * We cache only default instances (where no locales or options are provided).
+ */
+function cachedOrNewService(service, locales, options, defaults) {
+ var useOptions = (defaults === undefined) ? options : defaults;
+ if (locales === undefined && options === undefined) {
+ if (defaultObjects[service] === undefined) {
+ defaultObjects[service] = new savedObjects[service](locales, useOptions);
+ }
+ return defaultObjects[service];
+ }
+ return new savedObjects[service](locales, useOptions);
+}
+
+
+/**
+ * Compares this and that, and returns less than 0, 0 or greater than 0 value.
+ * Overrides the built-in method.
+ */
+$Object.defineProperty($String.prototype, 'localeCompare', {
+ value: function(that) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (IS_NULL_OR_UNDEFINED(this)) {
+ throw new $TypeError('Method invoked on undefined or null value.');
+ }
+
+ var locales = %_Arguments(1);
+ var options = %_Arguments(2);
+ var collator = cachedOrNewService('collator', locales, options);
+ return compare(collator, this, that);
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($String.prototype.localeCompare, 'localeCompare');
+%FunctionRemovePrototype($String.prototype.localeCompare);
+%SetNativeFlag($String.prototype.localeCompare);
+
+
+/**
+ * Formats a Number object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used.
+ */
+$Object.defineProperty($Number.prototype, 'toLocaleString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!(this instanceof $Number) && typeof(this) !== 'number') {
+ throw new $TypeError('Method invoked on an object that is not Number.');
+ }
+
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+ var numberFormat = cachedOrNewService('numberformat', locales, options);
+ return formatNumber(numberFormat, this);
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($Number.prototype.toLocaleString, 'toLocaleString');
+%FunctionRemovePrototype($Number.prototype.toLocaleString);
+%SetNativeFlag($Number.prototype.toLocaleString);
+
+
+/**
+ * Returns actual formatted date or fails if date parameter is invalid.
+ */
+function toLocaleDateTime(date, locales, options, required, defaults, service) {
+ if (!(date instanceof $Date)) {
+ throw new $TypeError('Method invoked on an object that is not Date.');
+ }
+
+ if ($isNaN(date)) {
+ return 'Invalid Date';
+ }
+
+ var internalOptions = toDateTimeOptions(options, required, defaults);
+
+ var dateFormat =
+ cachedOrNewService(service, locales, options, internalOptions);
+
+ return formatDate(dateFormat, date);
+}
+
+
+/**
+ * Formats a Date object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used - both date and time are
+ * present in the output.
+ */
+$Object.defineProperty($Date.prototype, 'toLocaleString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+ return toLocaleDateTime(
+ this, locales, options, 'any', 'all', 'dateformatall');
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($Date.prototype.toLocaleString, 'toLocaleString');
+%FunctionRemovePrototype($Date.prototype.toLocaleString);
+%SetNativeFlag($Date.prototype.toLocaleString);
+
+
+/**
+ * Formats a Date object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used - only date is present
+ * in the output.
+ */
+$Object.defineProperty($Date.prototype, 'toLocaleDateString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+ return toLocaleDateTime(
+ this, locales, options, 'date', 'date', 'dateformatdate');
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($Date.prototype.toLocaleDateString, 'toLocaleDateString');
+%FunctionRemovePrototype($Date.prototype.toLocaleDateString);
+%SetNativeFlag($Date.prototype.toLocaleDateString);
+
+
+/**
+ * Formats a Date object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used - only time is present
+ * in the output.
+ */
+$Object.defineProperty($Date.prototype, 'toLocaleTimeString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+ return toLocaleDateTime(
+ this, locales, options, 'time', 'time', 'dateformattime');
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($Date.prototype.toLocaleTimeString, 'toLocaleTimeString');
+%FunctionRemovePrototype($Date.prototype.toLocaleTimeString);
+%SetNativeFlag($Date.prototype.toLocaleTimeString);
+
+return Intl;
+}())});
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 7fdf50c7a..05cc23a71 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -46,12 +46,22 @@ namespace v8 {
namespace internal {
+static const byte kCallOpcode = 0xE8;
+static const int kNoCodeAgeSequenceLength = 5;
+
+
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
- if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
+ if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
CPU::FlushICache(p, sizeof(uint32_t));
+ } else if (rmode_ == CODE_AGE_SEQUENCE) {
+ if (*pc_ == kCallOpcode) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ *p -= delta; // Relocate entry.
+ CPU::FlushICache(p, sizeof(uint32_t));
+ }
} else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
@@ -74,13 +84,13 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
return Assembler::target_address_at(pc_);
}
Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(pc_);
@@ -94,7 +104,7 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
Assembler::set_target_address_at(pc_, target);
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -123,6 +133,7 @@ Object** RelocInfo::target_object_address() {
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
Memory::Object_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
if (mode == UPDATE_WRITE_BARRIER &&
@@ -140,24 +151,35 @@ Address* RelocInfo::target_reference_address() {
}
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode mode) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ if (target_address() != target) set_target_address(target, mode);
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ ASSERT(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
+ return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
@@ -169,6 +191,28 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
}
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return Memory::Object_Handle_at(pc_ + 1);
+}
+
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return Code::GetCodeFromTargetAddress(
+ Assembler::target_address_at(pc_ + 1));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(*pc_ == kCallOpcode);
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
+}
+
+
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -206,7 +250,7 @@ Object** RelocInfo::call_object_address() {
bool RelocInfo::IsPatchedReturnSequence() {
- return *pc_ == 0xE8;
+ return *pc_ == kCallOpcode;
}
@@ -215,28 +259,29 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
-void RelocInfo::Visit(ObjectVisitor* visitor) {
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
+ #ifdef ENABLE_DEBUGGER_SUPPORT
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
+ isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ } else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
}
@@ -250,11 +295,13 @@ void RelocInfo::Visit(Heap* heap) {
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@@ -263,7 +310,7 @@ void RelocInfo::Visit(Heap* heap) {
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ } else if (IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
@@ -272,7 +319,7 @@ void RelocInfo::Visit(Heap* heap) {
Immediate::Immediate(int x) {
x_ = x;
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
@@ -289,29 +336,30 @@ Immediate::Immediate(Label* internal_offset) {
Immediate::Immediate(Handle<Object> handle) {
+ AllowDeferredHandleDereference using_raw_address;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!HEAP->InNewSpace(obj));
if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
x_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
// no relocation needed
x_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
}
Immediate::Immediate(Smi* value) {
x_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
Immediate::Immediate(Address addr) {
x_ = reinterpret_cast<int32_t>(addr);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
@@ -322,6 +370,7 @@ void Assembler::emit(uint32_t x) {
void Assembler::emit(Handle<Object> handle) {
+ AllowDeferredHandleDereference heap_object_check;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
ASSERT(!isolate()->heap()->InNewSpace(obj));
@@ -338,20 +387,29 @@ void Assembler::emit(Handle<Object> handle) {
void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
- } else if (rmode != RelocInfo::NONE) {
+ } else if (!RelocInfo::IsNone(rmode)
+ && rmode != RelocInfo::CODE_AGE_SEQUENCE) {
RecordRelocInfo(rmode);
}
emit(x);
}
+void Assembler::emit(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId id) {
+ AllowDeferredHandleDereference embedding_raw_address;
+ emit(reinterpret_cast<intptr_t>(code.location()), rmode, id);
+}
+
+
void Assembler::emit(const Immediate& x) {
if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
Label* label = reinterpret_cast<Label*>(x.x_);
emit_code_relative_offset(label);
return;
}
- if (x.rmode_ != RelocInfo::NONE) RecordRelocInfo(x.rmode_);
+ if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
emit(x.x_);
}
@@ -368,7 +426,7 @@ void Assembler::emit_code_relative_offset(Label* label) {
void Assembler::emit_w(const Immediate& x) {
- ASSERT(x.rmode_ == RelocInfo::NONE);
+ ASSERT(RelocInfo::IsNone(x.rmode_));
uint16_t value = static_cast<uint16_t>(x.x_);
reinterpret_cast<uint16_t*>(pc_)[0] = value;
pc_ += sizeof(uint16_t);
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index f291b0526..0557ed885 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -36,7 +36,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "disassembler.h"
#include "macro-assembler.h"
@@ -52,7 +52,41 @@ namespace internal {
bool CpuFeatures::initialized_ = false;
#endif
uint64_t CpuFeatures::supported_ = 0;
-uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
+uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
+uint64_t CpuFeatures::cross_compile_ = 0;
+
+
+ExternalReference ExternalReference::cpu_features() {
+ ASSERT(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
+
+int IntelDoubleRegister::NumAllocatableRegisters() {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ return XMMRegister::kNumAllocatableRegisters;
+ } else {
+ return X87Register::kNumAllocatableRegisters;
+ }
+}
+
+
+int IntelDoubleRegister::NumRegisters() {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ return XMMRegister::kNumRegisters;
+ } else {
+ return X87Register::kNumRegisters;
+ }
+}
+
+
+const char* IntelDoubleRegister::AllocationIndexToString(int index) {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ return XMMRegister::AllocationIndexToString(index);
+ } else {
+ return X87Register::AllocationIndexToString(index);
+ }
+}
// The Probe method needs executable memory, so it uses Heap::CreateCode.
@@ -68,81 +102,28 @@ void CpuFeatures::Probe() {
return; // No features if we might serialize.
}
- const int kBufferSize = 4 * KB;
- VirtualMemory* memory = new VirtualMemory(kBufferSize);
- if (!memory->IsReserved()) {
- delete memory;
- return;
+ uint64_t probed_features = 0;
+ CPU cpu;
+ if (cpu.has_sse41()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE4_1;
}
- ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
- if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
- delete memory;
- return;
+ if (cpu.has_sse3()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE3;
}
-
- Assembler assm(NULL, memory->address(), kBufferSize);
- Label cpuid, done;
-#define __ assm.
- // Save old esp, since we are going to modify the stack.
- __ push(ebp);
- __ pushfd();
- __ push(ecx);
- __ push(ebx);
- __ mov(ebp, esp);
-
- // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
- __ pushfd();
- __ pop(eax);
- __ mov(edx, eax);
- __ xor_(eax, 0x200000); // Flip bit 21.
- __ push(eax);
- __ popfd();
- __ pushfd();
- __ pop(eax);
- __ xor_(eax, edx); // Different if CPUID is supported.
- __ j(not_zero, &cpuid);
-
- // CPUID not supported. Clear the supported features in edx:eax.
- __ xor_(eax, eax);
- __ xor_(edx, edx);
- __ jmp(&done);
-
- // Invoke CPUID with 1 in eax to get feature information in
- // ecx:edx. Temporarily enable CPUID support because we know it's
- // safe here.
- __ bind(&cpuid);
- __ mov(eax, 1);
- supported_ = (1 << CPUID);
- { Scope fscope(CPUID);
- __ cpuid();
+ if (cpu.has_sse2()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE2;
+ }
+ if (cpu.has_cmov()) {
+ probed_features |= static_cast<uint64_t>(1) << CMOV;
}
- supported_ = 0;
-
- // Move the result from ecx:edx to edx:eax and make sure to mark the
- // CPUID feature as supported.
- __ mov(eax, edx);
- __ or_(eax, 1 << CPUID);
- __ mov(edx, ecx);
-
- // Done.
- __ bind(&done);
- __ mov(esp, ebp);
- __ pop(ebx);
- __ pop(ecx);
- __ popfd();
- __ pop(ebp);
- __ ret(0);
-#undef __
- typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
- supported_ = probe();
- found_by_runtime_probing_ = supported_;
- uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
- supported_ |= os_guarantees;
- found_by_runtime_probing_ &= ~os_guarantees;
+ // SAHF must be available in compat/legacy mode.
+ ASSERT(cpu.has_sahf());
+ probed_features |= static_cast<uint64_t>(1) << SAHF;
- delete memory;
+ uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
+ supported_ = probed_features | platform_features;
+ found_by_runtime_probing_only_ = probed_features & ~platform_features;
}
@@ -169,7 +150,7 @@ void Displacement::init(Label* L, Type type) {
const int RelocInfo::kApplyMask =
RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE |
- 1 << RelocInfo::DEBUG_BREAK_SLOT;
+ 1 << RelocInfo::DEBUG_BREAK_SLOT | 1 << RelocInfo::CODE_AGE_SEQUENCE;
bool RelocInfo::IsCodedSpecially() {
@@ -209,7 +190,7 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
#endif
// Patch the code.
- patcher.masm()->call(target, RelocInfo::NONE);
+ patcher.masm()->call(target, RelocInfo::NONE32);
// Check that the size of the code generated is as expected.
ASSERT_EQ(kCallCodeSize,
@@ -228,11 +209,11 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
// [base + disp/r]
- if (disp == 0 && rmode == RelocInfo::NONE && !base.is(ebp)) {
+ if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
// [base]
set_modrm(0, base);
if (base.is(esp)) set_sib(times_1, esp, base);
- } else if (is_int8(disp) && rmode == RelocInfo::NONE) {
+ } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
// [base + disp8]
set_modrm(1, base);
if (base.is(esp)) set_sib(times_1, esp, base);
@@ -253,11 +234,11 @@ Operand::Operand(Register base,
RelocInfo::Mode rmode) {
ASSERT(!index.is(esp)); // illegal addressing mode
// [base + index*scale + disp/r]
- if (disp == 0 && rmode == RelocInfo::NONE && !base.is(ebp)) {
+ if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
// [base + index*scale]
set_modrm(0, esp);
set_sib(scale, index, base);
- } else if (is_int8(disp) && rmode == RelocInfo::NONE) {
+ } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
// [base + index*scale + disp8]
set_modrm(1, esp);
set_sib(scale, index, base);
@@ -312,48 +293,19 @@ Register Operand::reg() const {
static void InitCoverageLog();
#endif
-Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
- : AssemblerBase(arg_isolate),
- positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code) {
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
+ positions_recorder_(this) {
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
// existing code in it; see CodePatcher::CodePatcher(...).
#ifdef DEBUG
if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size); // int3
+ memset(buffer_, 0xCC, buffer_size_); // int3
}
#endif
- // Set up buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
#ifdef GENERATED_CODE_COVERAGE
InitCoverageLog();
@@ -361,18 +313,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
}
-Assembler::~Assembler() {
- if (own_buffer_) {
- if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
@@ -483,7 +423,6 @@ void Assembler::CodeTargetAlign() {
void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xA2);
@@ -705,7 +644,7 @@ void Assembler::movzx_w(Register dst, const Operand& src) {
void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CMOV));
+ ASSERT(IsEnabled(CMOV));
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
EMIT(0x0F);
@@ -1065,6 +1004,27 @@ void Assembler::rcr(Register dst, uint8_t imm8) {
}
+void Assembler::ror(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xC8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xC8 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::ror_cl(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD3);
+ EMIT(0xC8 | dst.code());
+}
+
+
void Assembler::sar(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
ASSERT(is_uint5(imm8)); // illegal shift count
@@ -1172,30 +1132,21 @@ void Assembler::sub(const Operand& dst, Register src) {
void Assembler::test(Register reg, const Immediate& imm) {
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ test_b(reg, imm.x_);
+ return;
+ }
+
EnsureSpace ensure_space(this);
- // Only use test against byte for registers that have a byte
- // variant: eax, ebx, ecx, and edx.
- if (imm.rmode_ == RelocInfo::NONE &&
- is_uint8(imm.x_) &&
- reg.is_byte_register()) {
- uint8_t imm8 = imm.x_;
- if (reg.is(eax)) {
- EMIT(0xA8);
- EMIT(imm8);
- } else {
- emit_arith_b(0xF6, 0xC0, reg, imm8);
- }
+ // This is not using emit_arith because test doesn't support
+ // sign-extension of 8-bit operands.
+ if (reg.is(eax)) {
+ EMIT(0xA9);
} else {
- // This is not using emit_arith because test doesn't support
- // sign-extension of 8-bit operands.
- if (reg.is(eax)) {
- EMIT(0xA9);
- } else {
- EMIT(0xF7);
- EMIT(0xC0 | reg.code());
- }
- emit(imm);
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
}
+ emit(imm);
}
@@ -1215,6 +1166,13 @@ void Assembler::test_b(Register reg, const Operand& op) {
void Assembler::test(const Operand& op, const Immediate& imm) {
+ if (op.is_reg_only()) {
+ test(op.reg(), imm);
+ return;
+ }
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ return test_b(op, imm.x_);
+ }
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(eax, op);
@@ -1222,9 +1180,26 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
}
+void Assembler::test_b(Register reg, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ // Only use test against byte for registers that have a byte
+ // variant: eax, ebx, ecx, and edx.
+ if (reg.is(eax)) {
+ EMIT(0xA8);
+ EMIT(imm8);
+ } else if (reg.is_byte_register()) {
+ emit_arith_b(0xF6, 0xC0, reg, imm8);
+ } else {
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
+ emit(imm8);
+ }
+}
+
+
void Assembler::test_b(const Operand& op, uint8_t imm8) {
- if (op.is_reg_only() && !op.reg().is_byte_register()) {
- test(op, Immediate(imm8));
+ if (op.is_reg_only()) {
+ test_b(op.reg(), imm8);
return;
}
EnsureSpace ensure_space(this);
@@ -1294,14 +1269,6 @@ void Assembler::nop() {
}
-void Assembler::rdtsc() {
- ASSERT(CpuFeatures::IsEnabled(RDTSC));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x31);
-}
-
-
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
ASSERT(is_uint16(imm16));
@@ -1415,7 +1382,11 @@ void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
ASSERT(!RelocInfo::IsCodeTarget(rmode));
EMIT(0xE8);
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
+ if (RelocInfo::IsRuntimeEntry(rmode)) {
+ emit(reinterpret_cast<uint32_t>(entry), rmode);
+ } else {
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+ }
}
@@ -1443,9 +1414,10 @@ void Assembler::call(Handle<Code> code,
TypeFeedbackId ast_id) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ ASSERT(RelocInfo::IsCodeTarget(rmode)
+ || rmode == RelocInfo::CODE_AGE_SEQUENCE);
EMIT(0xE8);
- emit(reinterpret_cast<intptr_t>(code.location()), rmode, ast_id);
+ emit(code, rmode, ast_id);
}
@@ -1480,7 +1452,11 @@ void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
ASSERT(!RelocInfo::IsCodeTarget(rmode));
EMIT(0xE9);
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
+ if (RelocInfo::IsRuntimeEntry(rmode)) {
+ emit(reinterpret_cast<uint32_t>(entry), rmode);
+ } else {
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+ }
}
@@ -1495,13 +1471,13 @@ void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
ASSERT(RelocInfo::IsCodeTarget(rmode));
EMIT(0xE9);
- emit(reinterpret_cast<intptr_t>(code.location()), rmode);
+ emit(code, rmode);
}
void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
EnsureSpace ensure_space(this);
- ASSERT(0 <= cc && cc < 16);
+ ASSERT(0 <= cc && static_cast<int>(cc) < 16);
if (L->is_bound()) {
const int short_size = 2;
const int long_size = 6;
@@ -1533,11 +1509,15 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- ASSERT((0 <= cc) && (cc < 16));
+ ASSERT((0 <= cc) && (static_cast<int>(cc) < 16));
// 0000 1111 1000 tttn #32-bit disp.
EMIT(0x0F);
EMIT(0x80 | cc);
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
+ if (RelocInfo::IsRuntimeEntry(rmode)) {
+ emit(reinterpret_cast<uint32_t>(entry), rmode);
+ } else {
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+ }
}
@@ -1546,7 +1526,7 @@ void Assembler::j(Condition cc, Handle<Code> code) {
// 0000 1111 1000 tttn #32-bit disp
EMIT(0x0F);
EMIT(0x80 | cc);
- emit(reinterpret_cast<intptr_t>(code.location()), RelocInfo::CODE_TARGET);
+ emit(code, RelocInfo::CODE_TARGET);
}
@@ -1613,6 +1593,13 @@ void Assembler::fstp_s(const Operand& adr) {
}
+void Assembler::fst_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ emit_operand(edx, adr);
+}
+
+
void Assembler::fstp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
EMIT(0xDD);
@@ -1649,7 +1636,7 @@ void Assembler::fistp_s(const Operand& adr) {
void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
+ ASSERT(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
EMIT(0xDB);
emit_operand(ecx, adr);
@@ -1657,7 +1644,7 @@ void Assembler::fisttp_s(const Operand& adr) {
void Assembler::fisttp_d(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
+ ASSERT(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
EMIT(0xDD);
emit_operand(ecx, adr);
@@ -1747,12 +1734,24 @@ void Assembler::fadd(int i) {
}
+void Assembler::fadd_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xC0, i);
+}
+
+
void Assembler::fsub(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xE8, i);
}
+void Assembler::fsub_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xE0, i);
+}
+
+
void Assembler::fisub_s(const Operand& adr) {
EnsureSpace ensure_space(this);
EMIT(0xDA);
@@ -1760,6 +1759,12 @@ void Assembler::fisub_s(const Operand& adr) {
}
+void Assembler::fmul_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xC8, i);
+}
+
+
void Assembler::fmul(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC8, i);
@@ -1772,6 +1777,12 @@ void Assembler::fdiv(int i) {
}
+void Assembler::fdiv_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xF0, i);
+}
+
+
void Assembler::faddp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xC0, i);
@@ -1919,7 +1930,7 @@ void Assembler::setcc(Condition cc, Register reg) {
void Assembler::cvttss2si(Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -1929,7 +1940,7 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
void Assembler::cvttsd2si(Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -1939,7 +1950,7 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -1949,7 +1960,7 @@ void Assembler::cvtsd2si(Register dst, XMMRegister src) {
void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -1959,7 +1970,7 @@ void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -1969,7 +1980,7 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -1979,7 +1990,17 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addsd(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -1989,7 +2010,17 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulsd(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -1999,7 +2030,7 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2009,7 +2040,7 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) {
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2019,7 +2050,7 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2037,6 +2068,7 @@ void Assembler::xorps(XMMRegister dst, XMMRegister src) {
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2046,6 +2078,7 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2055,6 +2088,7 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) {
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2064,7 +2098,7 @@ void Assembler::orpd(XMMRegister dst, XMMRegister src) {
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2074,7 +2108,7 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2084,7 +2118,7 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ ASSERT(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2095,8 +2129,9 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
EMIT(static_cast<byte>(mode) | 0x8);
}
+
void Assembler::movmskpd(Register dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2105,8 +2140,17 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
}
+void Assembler::movmskps(Register dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x50);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2116,7 +2160,7 @@ void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2127,7 +2171,7 @@ void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x28);
@@ -2136,7 +2180,7 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2146,7 +2190,7 @@ void Assembler::movdqa(const Operand& dst, XMMRegister src) {
void Assembler::movdqa(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2156,7 +2200,7 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) {
void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2166,7 +2210,7 @@ void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
void Assembler::movdqu(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2176,7 +2220,7 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) {
void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ ASSERT(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2187,7 +2231,7 @@ void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
void Assembler::movntdq(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2201,25 +2245,14 @@ void Assembler::prefetch(const Operand& src, int level) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x18);
- XMMRegister code = { level }; // Emit hint number in Reg position of RegR/M.
+ // Emit hint number in Reg position of RegR/M.
+ XMMRegister code = XMMRegister::from_code(level);
emit_sse_operand(code, src);
}
-void Assembler::movdbl(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
-void Assembler::movdbl(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
void Assembler::movsd(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2); // double
EMIT(0x0F);
@@ -2229,7 +2262,7 @@ void Assembler::movsd(const Operand& dst, XMMRegister src ) {
void Assembler::movsd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2); // double
EMIT(0x0F);
@@ -2239,7 +2272,7 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2249,7 +2282,7 @@ void Assembler::movsd(XMMRegister dst, XMMRegister src) {
void Assembler::movss(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF3); // float
EMIT(0x0F);
@@ -2259,7 +2292,7 @@ void Assembler::movss(const Operand& dst, XMMRegister src ) {
void Assembler::movss(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF3); // float
EMIT(0x0F);
@@ -2269,7 +2302,7 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
void Assembler::movss(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2279,7 +2312,7 @@ void Assembler::movss(XMMRegister dst, XMMRegister src) {
void Assembler::movd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2289,7 +2322,7 @@ void Assembler::movd(XMMRegister dst, const Operand& src) {
void Assembler::movd(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2299,20 +2332,28 @@ void Assembler::movd(const Operand& dst, XMMRegister src) {
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
- ASSERT(CpuFeatures::IsSupported(SSE4_1));
+ ASSERT(IsEnabled(SSE4_1));
ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
EMIT(0x3A);
EMIT(0x17);
- emit_sse_operand(dst, src);
+ emit_sse_operand(src, dst);
EMIT(imm8);
}
+void Assembler::andps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::pand(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2322,7 +2363,7 @@ void Assembler::pand(XMMRegister dst, XMMRegister src) {
void Assembler::pxor(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2332,7 +2373,7 @@ void Assembler::pxor(XMMRegister dst, XMMRegister src) {
void Assembler::por(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2342,7 +2383,7 @@ void Assembler::por(XMMRegister dst, XMMRegister src) {
void Assembler::ptest(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ ASSERT(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2353,7 +2394,7 @@ void Assembler::ptest(XMMRegister dst, XMMRegister src) {
void Assembler::psllq(XMMRegister reg, int8_t shift) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2364,7 +2405,7 @@ void Assembler::psllq(XMMRegister reg, int8_t shift) {
void Assembler::psllq(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2374,7 +2415,7 @@ void Assembler::psllq(XMMRegister dst, XMMRegister src) {
void Assembler::psrlq(XMMRegister reg, int8_t shift) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2385,7 +2426,7 @@ void Assembler::psrlq(XMMRegister reg, int8_t shift) {
void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2394,8 +2435,8 @@ void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
}
-void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2406,7 +2447,7 @@ void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ ASSERT(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2418,7 +2459,7 @@ void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ ASSERT(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2445,8 +2486,13 @@ void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
}
+void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
+ EMIT(0xC0 | (dst.code() << 3) | src.code());
+}
+
+
void Assembler::Print() {
- Disassembler::Decode(stdout, buffer_, pc_);
+ Disassembler::Decode(isolate(), stdout, buffer_, pc_);
}
@@ -2504,9 +2550,9 @@ void Assembler::GrowBuffer() {
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
+ OS::MemMove(desc.buffer, buffer_, desc.instr_size);
+ OS::MemMove(rc_delta + reloc_info_writer.pos(),
+ reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
if (isolate()->assembler_spare_buffer() == NULL &&
@@ -2524,10 +2570,7 @@ void Assembler::GrowBuffer() {
// Relocate runtime entries.
for (RelocIterator it(desc); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::RUNTIME_ENTRY) {
- int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
- *p -= pc_delta; // relocate entry
- } else if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ if (rmode == RelocInfo::INTERNAL_REFERENCE) {
int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
if (*p != 0) { // 0 means uninitialized.
*p += pc_delta;
@@ -2579,7 +2622,7 @@ void Assembler::emit_operand(Register reg, const Operand& adr) {
pc_ += length;
// Emit relocation information if necessary.
- if (length >= sizeof(int32_t) && adr.rmode_ != RelocInfo::NONE) {
+ if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
RecordRelocInfo(adr.rmode_);
pc_ += sizeof(int32_t);
@@ -2608,7 +2651,7 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- ASSERT(rmode != RelocInfo::NONE);
+ ASSERT(!RelocInfo::IsNone(rmode));
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index b0f4651d1..f46c6478d 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -65,7 +65,10 @@ namespace internal {
// and best performance in optimized code.
//
struct Register {
- static const int kNumAllocatableRegisters = 6;
+ static const int kMaxNumAllocatableRegisters = 6;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
static const int kNumRegisters = 8;
static inline const char* AllocationIndexToString(int index);
@@ -119,7 +122,7 @@ const Register no_reg = { kRegister_no_reg_Code };
inline const char* Register::AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
// This is the mapping of allocation indices to registers.
const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
return kNames[index];
@@ -133,22 +136,71 @@ inline int Register::ToAllocationIndex(Register reg) {
inline Register Register::FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return (index >= 4) ? from_code(index + 2) : from_code(index);
}
-struct XMMRegister {
- static const int kNumAllocatableRegisters = 7;
- static const int kNumRegisters = 8;
+struct IntelDoubleRegister {
+ static const int kMaxNumRegisters = 8;
+ static const int kMaxNumAllocatableRegisters = 7;
+ static int NumAllocatableRegisters();
+ static int NumRegisters();
+ static const char* AllocationIndexToString(int index);
- static int ToAllocationIndex(XMMRegister reg) {
+ static int ToAllocationIndex(IntelDoubleRegister reg) {
ASSERT(reg.code() != 0);
return reg.code() - 1;
}
+ static IntelDoubleRegister FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ return from_code(index + 1);
+ }
+
+ static IntelDoubleRegister from_code(int code) {
+ IntelDoubleRegister result = { code };
+ return result;
+ }
+
+ bool is_valid() const {
+ return 0 <= code_ && code_ < NumRegisters();
+ }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+
+ int code_;
+};
+
+
+const IntelDoubleRegister double_register_0 = { 0 };
+const IntelDoubleRegister double_register_1 = { 1 };
+const IntelDoubleRegister double_register_2 = { 2 };
+const IntelDoubleRegister double_register_3 = { 3 };
+const IntelDoubleRegister double_register_4 = { 4 };
+const IntelDoubleRegister double_register_5 = { 5 };
+const IntelDoubleRegister double_register_6 = { 6 };
+const IntelDoubleRegister double_register_7 = { 7 };
+const IntelDoubleRegister no_double_reg = { -1 };
+
+
+struct XMMRegister : IntelDoubleRegister {
+ static const int kNumAllocatableRegisters = 7;
+ static const int kNumRegisters = 8;
+
+ static XMMRegister from_code(int code) {
+ STATIC_ASSERT(sizeof(XMMRegister) == sizeof(IntelDoubleRegister));
+ XMMRegister result;
+ result.code_ = code;
+ return result;
+ }
+
+ bool is(XMMRegister reg) const { return code_ == reg.code_; }
+
static XMMRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
return from_code(index + 1);
}
@@ -165,34 +217,57 @@ struct XMMRegister {
};
return names[index];
}
+};
- static XMMRegister from_code(int code) {
- XMMRegister r = { code };
- return r;
+
+#define xmm0 (static_cast<const XMMRegister&>(double_register_0))
+#define xmm1 (static_cast<const XMMRegister&>(double_register_1))
+#define xmm2 (static_cast<const XMMRegister&>(double_register_2))
+#define xmm3 (static_cast<const XMMRegister&>(double_register_3))
+#define xmm4 (static_cast<const XMMRegister&>(double_register_4))
+#define xmm5 (static_cast<const XMMRegister&>(double_register_5))
+#define xmm6 (static_cast<const XMMRegister&>(double_register_6))
+#define xmm7 (static_cast<const XMMRegister&>(double_register_7))
+#define no_xmm_reg (static_cast<const XMMRegister&>(no_double_reg))
+
+
+struct X87Register : IntelDoubleRegister {
+ static const int kNumAllocatableRegisters = 5;
+ static const int kNumRegisters = 5;
+
+ bool is(X87Register reg) const {
+ return code_ == reg.code_;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(XMMRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ const char* const names[] = {
+ "stX_0", "stX_1", "stX_2", "stX_3", "stX_4"
+ };
+ return names[index];
}
- int code_;
-};
+ static X87Register FromAllocationIndex(int index) {
+ STATIC_ASSERT(sizeof(X87Register) == sizeof(IntelDoubleRegister));
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ X87Register result;
+ result.code_ = index;
+ return result;
+ }
+ static int ToAllocationIndex(X87Register reg) {
+ return reg.code_;
+ }
+};
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
+#define stX_0 static_cast<const X87Register&>(double_register_0)
+#define stX_1 static_cast<const X87Register&>(double_register_1)
+#define stX_2 static_cast<const X87Register&>(double_register_2)
+#define stX_3 static_cast<const X87Register&>(double_register_3)
+#define stX_4 static_cast<const X87Register&>(double_register_4)
-typedef XMMRegister DoubleRegister;
+typedef IntelDoubleRegister DoubleRegister;
enum Condition {
@@ -275,12 +350,12 @@ class Immediate BASE_EMBEDDED {
return Immediate(label);
}
- bool is_zero() const { return x_ == 0 && rmode_ == RelocInfo::NONE; }
+ bool is_zero() const { return x_ == 0 && RelocInfo::IsNone(rmode_); }
bool is_int8() const {
- return -128 <= x_ && x_ < 128 && rmode_ == RelocInfo::NONE;
+ return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_);
}
bool is_int16() const {
- return -32768 <= x_ && x_ < 32768 && rmode_ == RelocInfo::NONE;
+ return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_);
}
private:
@@ -320,20 +395,20 @@ class Operand BASE_EMBEDDED {
// [base + disp/r]
explicit Operand(Register base, int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NONE32);
// [base + index*scale + disp/r]
explicit Operand(Register base,
Register index,
ScaleFactor scale,
int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NONE32);
// [index*scale + disp/r]
explicit Operand(Register index,
ScaleFactor scale,
int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NONE32);
static Operand StaticVariable(const ExternalReference& ext) {
return Operand(reinterpret_cast<int32_t>(ext.address()),
@@ -347,9 +422,10 @@ class Operand BASE_EMBEDDED {
RelocInfo::EXTERNAL_REFERENCE);
}
- static Operand Cell(Handle<JSGlobalPropertyCell> cell) {
+ static Operand ForCell(Handle<Cell> cell) {
+ AllowDeferredHandleDereference embedding_raw_address;
return Operand(reinterpret_cast<int32_t>(cell.location()),
- RelocInfo::GLOBAL_PROPERTY_CELL);
+ RelocInfo::CELL);
}
// Returns true if this Operand is a wrapper for the specified register.
@@ -442,10 +518,10 @@ class Displacement BASE_EMBEDDED {
// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
+// Supported features must be enabled by a CpuFeatureScope before use.
// Example:
-// if (CpuFeatures::IsSupported(SSE2)) {
-// CpuFeatures::Scope fscope(SSE2);
+// if (assembler->IsSupported(SSE2)) {
+// CpuFeatureScope fscope(assembler, SSE2);
// // Generate SSE2 floating point code.
// } else {
// // Generate standard x87 floating point code.
@@ -459,96 +535,54 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
+ if (Check(f, cross_compile_)) return true;
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == RDTSC && !FLAG_enable_rdtsc) return false;
- return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, supported_);
}
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
+ static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- uint64_t enabled = isolate->enabled_cpu_features();
- return (enabled & (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, found_by_runtime_probing_only_);
}
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- uint64_t mask = static_cast<uint64_t>(1) << f;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = isolate_->enabled_cpu_features();
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
-
- private:
- Isolate* isolate_;
- uint64_t old_enabled_;
-#else
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
+ static bool IsSafeForSnapshot(CpuFeature f) {
+ return Check(f, cross_compile_) ||
+ (IsSupported(f) &&
+ (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ }
- class TryForceFeatureScope BASE_EMBEDDED {
- public:
- explicit TryForceFeatureScope(CpuFeature f)
- : old_supported_(CpuFeatures::supported_) {
- if (CanForce()) {
- CpuFeatures::supported_ |= (static_cast<uint64_t>(1) << f);
- }
- }
+ static bool VerifyCrossCompiling() {
+ return cross_compile_ == 0;
+ }
- ~TryForceFeatureScope() {
- if (CanForce()) {
- CpuFeatures::supported_ = old_supported_;
- }
- }
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ uint64_t mask = flag2set(f);
+ return cross_compile_ == 0 ||
+ (cross_compile_ & mask) == mask;
+ }
- private:
- static bool CanForce() {
- // It's only safe to temporarily force support of CPU features
- // when there's only a single isolate, which is guaranteed when
- // the serializer is enabled.
- return Serializer::enabled();
- }
+ private:
+ static bool Check(CpuFeature f, uint64_t set) {
+ return (set & flag2set(f)) != 0;
+ }
- const uint64_t old_supported_;
- };
+ static uint64_t flag2set(CpuFeature f) {
+ return static_cast<uint64_t>(1) << f;
+ }
- private:
#ifdef DEBUG
static bool initialized_;
#endif
static uint64_t supported_;
- static uint64_t found_by_runtime_probing_;
+ static uint64_t found_by_runtime_probing_only_;
+
+ static uint64_t cross_compile_;
+ friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -582,15 +616,7 @@ class Assembler : public AssemblerBase {
// upon destruction of the assembler.
// TODO(vitalyr): the assembler does not need an isolate.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- ~Assembler();
-
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
- // Avoids using instructions that vary in size in unpredictable ways between
- // the snapshot and the running VM. This is needed by the full compiler so
- // that it can recompile code with debug support and fix the PC.
- void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
+ virtual ~Assembler() { }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -817,6 +843,8 @@ class Assembler : public AssemblerBase {
void rcl(Register dst, uint8_t imm8);
void rcr(Register dst, uint8_t imm8);
+ void ror(Register dst, uint8_t imm8);
+ void ror_cl(Register dst);
void sar(Register dst, uint8_t imm8);
void sar_cl(Register dst);
@@ -846,7 +874,7 @@ class Assembler : public AssemblerBase {
void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
- void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
+ void test_b(Register reg, uint8_t imm8);
void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32);
@@ -865,7 +893,6 @@ class Assembler : public AssemblerBase {
void hlt();
void int3();
void nop();
- void rdtsc();
void ret(int imm16);
// Label operations & relative jumps (PPUM Appendix D)
@@ -924,6 +951,7 @@ class Assembler : public AssemblerBase {
void fld_d(const Operand& adr);
void fstp_s(const Operand& adr);
+ void fst_s(const Operand& adr);
void fstp_d(const Operand& adr);
void fst_d(const Operand& adr);
@@ -950,9 +978,13 @@ class Assembler : public AssemblerBase {
void fninit();
void fadd(int i);
+ void fadd_i(int i);
void fsub(int i);
+ void fsub_i(int i);
void fmul(int i);
+ void fmul_i(int i);
void fdiv(int i);
+ void fdiv_i(int i);
void fisub_s(const Operand& adr);
@@ -985,6 +1017,10 @@ class Assembler : public AssemblerBase {
void cpuid();
+ // SSE instructions
+ void andps(XMMRegister dst, XMMRegister src);
+ void xorps(XMMRegister dst, XMMRegister src);
+
// SSE2 instructions
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
@@ -996,11 +1032,12 @@ class Assembler : public AssemblerBase {
void cvtsd2ss(XMMRegister dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src);
+ void addsd(XMMRegister dst, const Operand& src);
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
+ void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
@@ -1019,6 +1056,7 @@ class Assembler : public AssemblerBase {
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void movmskpd(Register dst, XMMRegister src);
+ void movmskps(Register dst, XMMRegister src);
void cmpltsd(XMMRegister dst, XMMRegister src);
void pcmpeqd(XMMRegister dst, XMMRegister src);
@@ -1029,16 +1067,22 @@ class Assembler : public AssemblerBase {
void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src);
void movdqu(const Operand& dst, XMMRegister src);
-
- // Use either movsd or movlpd.
- void movdbl(XMMRegister dst, const Operand& src);
- void movdbl(const Operand& dst, XMMRegister src);
+ void movdq(bool aligned, XMMRegister dst, const Operand& src) {
+ if (aligned) {
+ movdqa(dst, src);
+ } else {
+ movdqu(dst, src);
+ }
+ }
void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
void movd(XMMRegister dst, const Operand& src);
void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
void movd(const Operand& dst, XMMRegister src);
void movsd(XMMRegister dst, XMMRegister src);
+ void movsd(XMMRegister dst, const Operand& src);
+ void movsd(const Operand& dst, XMMRegister src);
+
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
@@ -1054,7 +1098,7 @@ class Assembler : public AssemblerBase {
void psllq(XMMRegister dst, XMMRegister src);
void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
- void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
+ void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void pextrd(Register dst, XMMRegister src, int8_t offset) {
pextrd(Operand(dst), src, offset);
}
@@ -1097,8 +1141,6 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
- int pc_offset() const { return pc_ - buffer_; }
-
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
@@ -1117,21 +1159,15 @@ class Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
- static const int kMinimalBufferSize = 4*KB;
- byte byte_at(int pos) { return buffer_[pos]; }
+ byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
protected:
- bool emit_debug_code() const { return emit_debug_code_; }
- bool predictable_code_size() const { return predictable_code_size_ ; }
-
- void movsd(XMMRegister dst, const Operand& src);
- void movsd(const Operand& dst, XMMRegister src);
-
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(Register dst, XMMRegister src);
+ void emit_sse_operand(XMMRegister dst, Register src);
byte* addr_at(int pos) { return buffer_ + pos; }
@@ -1151,6 +1187,9 @@ class Assembler : public AssemblerBase {
inline void emit(uint32_t x,
RelocInfo::Mode rmode,
TypeFeedbackId id = TypeFeedbackId::None());
+ inline void emit(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId id = TypeFeedbackId::None());
inline void emit(const Immediate& x);
inline void emit_w(const Immediate& x);
@@ -1186,22 +1225,10 @@ class Assembler : public AssemblerBase {
friend class CodePatcher;
friend class EnsureSpace;
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
// code generation
- byte* pc_; // the program counter; moves forward
RelocInfoWriter reloc_info_writer;
PositionsRecorder positions_recorder_;
-
- bool emit_debug_code_;
- bool predictable_code_size_;
-
friend class PositionsRecorder;
};
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 9bc15e909..e5e6ec50d 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "codegen.h"
#include "deoptimizer.h"
@@ -74,6 +74,24 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function.
+ __ push(edi);
+ // Push call kind information.
+ __ push(ecx);
+ // Function is also the parameter to the runtime call.
+ __ push(edi);
+
+ __ CallRuntime(function_id, 1);
+ // Restore call kind information.
+ __ pop(ecx);
+ // Restore receiver.
+ __ pop(edi);
+}
+
+
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
@@ -83,30 +101,29 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
+ CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
+ // Tail call to returned code.
+ __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(eax);
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
- // Tear down internal frame.
- }
+void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
@@ -190,8 +207,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// eax: initial map
__ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
__ shl(edi, kPointerSizeLog2);
- __ AllocateInNewSpace(
- edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+ __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
@@ -216,7 +232,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
if (FLAG_debug_code) {
__ cmp(esi, edi);
__ Assert(less_equal,
- "Unexpected number of pre-allocated property fields.");
+ kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ InitializeFieldsWithFiller(ecx, esi, edx);
__ mov(edx, factory->one_pointer_filler_map());
@@ -247,21 +263,22 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ sub(edx, ecx);
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
+ __ Assert(positive, kPropertyAllocationCountFailed);
// Scale the number of elements by pointer size and add the header for
// FixedArrays to the start of the next object calculation from above.
// ebx: JSObject
// edi: start of next object (will be start of FixedArray)
// edx: number of elements in properties array
- __ AllocateInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- edx,
- edi,
- ecx,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
+ __ Allocate(FixedArray::kHeaderSize,
+ times_pointer_size,
+ edx,
+ REGISTER_VALUE_IS_INT32,
+ edi,
+ ecx,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
// Initialize the FixedArray.
// ebx: JSObject
@@ -421,6 +438,8 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Clear the context before we push it when entering the internal frame.
__ Set(esi, Immediate(0));
@@ -460,6 +479,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code.
if (is_construct) {
+ // No type feedback cell is available
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ __ mov(ebx, Immediate(undefined_sentinel));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@@ -487,54 +510,113 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
+ CallRuntimePassFunction(masm, Runtime::kLazyCompile);
+ // Do a tail-call of the compiled function.
+ __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(eax);
+}
- // Tear down internal frame.
- }
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
// Do a tail-call of the compiled function.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
__ jmp(eax);
}
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // Re-execute the code that was patched back to the young age when
+ // the stub returns.
+ __ sub(Operand(esp, 0), Immediate(5));
+ __ pushad();
+ __ mov(eax, Operand(esp, 8 * kPointerSize));
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ }
+ __ popad();
+ __ ret(0);
+}
- // Push a copy of the function onto the stack.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+ __ pushad();
+ __ mov(eax, Operand(esp, 8 * kPointerSize));
+ __ sub(eax, Immediate(Assembler::kCallInstructionLength));
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
+ }
+ __ popad();
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ pop(eax); // Pop return address into scratch register.
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS Function.
+ __ push(eax); // Push return address after frame prologue.
+
+ // Jump to point after the code-age stub.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ pushad();
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ popad();
// Tear down internal frame.
}
- // Do a tail-call of the compiled function.
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
+ __ pop(MemOperand(esp, 0)); // Ignore state offset
+ __ ret(0); // Return to IC Miss stub, continuation still on stack.
}
@@ -567,7 +649,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ ret(2 * kPointerSize); // Remove state, eax.
__ bind(&not_tos_eax);
- __ Abort("no cases left");
+ __ Abort(kNoCasesLeft);
}
@@ -576,25 +658,13 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // TODO(kasperl): Do we need to save/restore the XMM registers too?
-
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ popad();
- __ ret(0);
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
@@ -923,431 +993,6 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
-
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ mov(FieldOperand(result, JSObject::kMapOffset), scratch1);
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(result, JSArray::kPropertiesOffset),
- factory->empty_fixed_array());
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
-
- // If no storage is requested for the elements array just set the empty
- // fixed array.
- if (initial_capacity == 0) {
- __ mov(FieldOperand(result, JSArray::kElementsOffset),
- factory->empty_fixed_array());
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ lea(scratch1, Operand(result, JSArray::kSize));
- __ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array
- // scratch2: start of next object
- __ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
- factory->fixed_array_map());
- __ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(initial_capacity)));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
- static const int kLoopUnfoldLimit = 4;
- if (initial_capacity <= kLoopUnfoldLimit) {
- // Use a scratch register here to have only one reloc info when unfolding
- // the loop.
- __ mov(scratch3, factory->the_hole_value());
- for (int i = 0; i < initial_capacity; i++) {
- __ mov(FieldOperand(scratch1,
- FixedArray::kHeaderSize + i * kPointerSize),
- scratch3);
- }
- } else {
- Label loop, entry;
- __ mov(scratch2, Immediate(initial_capacity));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(FieldOperand(scratch1,
- scratch2,
- times_pointer_size,
- FixedArray::kHeaderSize),
- factory->the_hole_value());
- __ bind(&entry);
- __ dec(scratch2);
- __ j(not_sign, &loop);
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array,
- Register elements_array_end,
- Register scratch,
- bool fill_with_hole,
- Label* gc_required) {
- ASSERT(scratch.is(edi)); // rep stos destination
- ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
- ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
-
- __ LoadInitialArrayMap(array_function, scratch,
- elements_array, fill_with_hole);
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested elements.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- times_half_pointer_size, // array_size is a smi.
- array_size,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array: initial map
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
- Factory* factory = masm->isolate()->factory();
- __ mov(elements_array, factory->empty_fixed_array());
- __ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ lea(elements_array, Operand(result, JSArray::kSize));
- __ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
- // Initialize the fixed array. FixedArray length is stored as a smi.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
- factory->fixed_array_map());
- // For non-empty JSArrays the length of the FixedArray and the JSArray is the
- // same.
- __ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array: elements array
- if (fill_with_hole) {
- __ SmiUntag(array_size);
- __ lea(edi, Operand(elements_array,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ mov(eax, factory->the_hole_value());
- __ cld();
- // Do not use rep stos when filling less than kRepStosThreshold
- // words.
- const int kRepStosThreshold = 16;
- Label loop, entry, done;
- __ cmp(ecx, kRepStosThreshold);
- __ j(below, &loop); // Note: ecx > 0.
- __ rep_stos();
- __ jmp(&done);
- __ bind(&loop);
- __ stos();
- __ bind(&entry);
- __ cmp(edi, elements_array_end);
- __ j(below, &loop);
- __ bind(&done);
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// edi: constructor (built-in Array function)
-// eax: argc
-// esp[0]: return address
-// esp[4]: last argument
-// This function is used for both construct and normal calls of Array. Whether
-// it is a construct call or not is indicated by the construct_call parameter.
-// The only difference between handling a construct call and a normal call is
-// that for a construct call the constructor function in edi needs to be
-// preserved for entering the generic code. In both cases argc in eax needs to
-// be preserved.
-static void ArrayNativeCode(MacroAssembler* masm,
- bool construct_call,
- Label* call_generic_code) {
- Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
- empty_array, not_empty_array, finish, cant_transition_map, not_double;
-
- // Push the constructor and argc. No need to tag argc as a smi, as there will
- // be no garbage collection with this on the stack.
- int push_count = 0;
- if (construct_call) {
- push_count++;
- __ push(edi);
- }
- push_count++;
- __ push(eax);
-
- // Check for array construction with zero arguments.
- __ test(eax, eax);
- __ j(not_zero, &argc_one_or_more);
-
- __ bind(&empty_array);
- // Handle construction of an empty array.
- AllocateEmptyJSArray(masm,
- edi,
- eax,
- ebx,
- ecx,
- edi,
- &prepare_generic_code_call);
- __ IncrementCounter(masm->isolate()->counters()->array_function_native(), 1);
- __ pop(ebx);
- if (construct_call) {
- __ pop(edi);
- }
- __ ret(kPointerSize);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmp(eax, 1);
- __ j(not_equal, &argc_two_or_more);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
- __ test(ecx, ecx);
- __ j(not_zero, &not_empty_array);
-
- // The single argument passed is zero, so we jump to the code above used to
- // handle the case of no arguments passed. To adapt the stack for that we move
- // the return address and the pushed constructor (if pushed) one stack slot up
- // thereby removing the passed argument. Argc is also on the stack - at the
- // bottom - and it needs to be changed from 1 to 0 to have the call into the
- // runtime system work in case a GC is required.
- for (int i = push_count; i > 0; i--) {
- __ mov(eax, Operand(esp, i * kPointerSize));
- __ mov(Operand(esp, (i + 1) * kPointerSize), eax);
- }
- __ Drop(2); // Drop two stack slots.
- __ push(Immediate(0)); // Treat this as a call with argc of zero.
- __ jmp(&empty_array);
-
- __ bind(&not_empty_array);
- __ test(ecx, Immediate(kIntptrSignBit | kSmiTagMask));
- __ j(not_zero, &prepare_generic_code_call);
-
- // Handle construction of an empty array of a certain size. Get the size from
- // the stack and bail out if size is to large to actually allocate an elements
- // array.
- __ cmp(ecx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
- __ j(greater_equal, &prepare_generic_code_call);
-
- // edx: array_size (smi)
- // edi: constructor
- // esp[0]: argc (cannot be 0 here)
- // esp[4]: constructor (only if construct_call)
- // esp[8]: return address
- // esp[C]: argument
- AllocateJSArray(masm,
- edi,
- ecx,
- ebx,
- eax,
- edx,
- edi,
- true,
- &prepare_generic_code_call);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->array_function_native(), 1);
- __ mov(eax, ebx);
- __ pop(ebx);
- if (construct_call) {
- __ pop(edi);
- }
- __ ret(2 * kPointerSize);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiTag(eax); // Convet argc to a smi.
- // eax: array_size (smi)
- // edi: constructor
- // esp[0] : argc
- // esp[4]: constructor (only if construct_call)
- // esp[8] : return address
- // esp[C] : last argument
- AllocateJSArray(masm,
- edi,
- eax,
- ebx,
- ecx,
- edx,
- edi,
- false,
- &prepare_generic_code_call);
- __ IncrementCounter(counters->array_function_native(), 1);
- __ push(ebx);
- __ mov(ebx, Operand(esp, kPointerSize));
- // ebx: argc
- // edx: elements_array_end (untagged)
- // esp[0]: JSArray
- // esp[4]: argc
- // esp[8]: constructor (only if construct_call)
- // esp[12]: return address
- // esp[16]: last argument
-
- // Location of the last argument
- int last_arg_offset = (construct_call ? 4 : 3) * kPointerSize;
- __ lea(edi, Operand(esp, last_arg_offset));
-
- // Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArray is false, so the FixedArray is returned in ecx).
- __ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
-
- Label has_non_smi_element;
-
- // ebx: argc
- // edx: location of the first array element
- // edi: location of the last argument
- // esp[0]: JSArray
- // esp[4]: argc
- // esp[8]: constructor (only if construct_call)
- // esp[12]: return address
- // esp[16]: last argument
- Label loop, entry;
- __ mov(ecx, ebx);
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(eax, &has_non_smi_element);
- }
- __ mov(Operand(edx, 0), eax);
- __ add(edx, Immediate(kPointerSize));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Remove caller arguments from the stack and return.
- // ebx: argc
- // esp[0]: JSArray
- // esp[4]: argc
- // esp[8]: constructor (only if construct_call)
- // esp[12]: return address
- // esp[16]: last argument
- __ bind(&finish);
- __ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
- __ pop(eax);
- __ pop(ebx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size,
- last_arg_offset - kPointerSize));
- __ jmp(ecx);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(eax,
- masm->isolate()->factory()->heap_number_map(),
- &not_double,
- DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- // Throw away the array that's only been partially constructed.
- __ pop(eax);
- __ UndoAllocationInNewSpace(eax);
- __ jmp(&prepare_generic_code_call);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- __ mov(ebx, Operand(esp, 0));
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- edi,
- eax,
- &cant_transition_map);
- __ mov(FieldOperand(ebx, HeapObject::kMapOffset), edi);
- __ RecordWriteField(ebx, HeapObject::kMapOffset, edi, eax,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Prepare to re-enter the loop
- __ lea(edi, Operand(esp, last_arg_offset));
-
- // Finish the array initialization loop.
- Label loop2;
- __ bind(&loop2);
- __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
- __ mov(Operand(edx, 0), eax);
- __ add(edx, Immediate(kPointerSize));
- __ dec(ecx);
- __ j(greater_equal, &loop2);
- __ jmp(&finish);
-
- // Restore argc and constructor before running the generic code.
- __ bind(&prepare_generic_code_call);
- __ pop(eax);
- if (construct_call) {
- __ pop(edi);
- }
- __ jmp(call_generic_code);
-}
-
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1364,21 +1009,16 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for InternalArray function");
+ __ Assert(not_zero, kUnexpectedInitialMapForInternalArrayFunction);
__ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for InternalArray function");
+ __ Assert(equal, kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
// function.
- ArrayNativeCode(masm, false, &generic_array_code);
-
- // Jump to the generic internal array code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ jmp(array_code, RelocInfo::CODE_TARGET);
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -1398,54 +1038,19 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for Array function");
+ __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for Array function");
+ __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
}
// Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, false, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ jmp(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- edi : constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_constructor;
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for Array function");
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, true, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+ // tail call a stub
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+ __ mov(ebx, Immediate(undefined_sentinel));
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -1463,7 +1068,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
- __ Assert(equal, "Unexpected String function");
+ __ Assert(equal, kUnexpectedStringFunction);
}
// Load the first argument into eax and get rid of the rest
@@ -1479,14 +1084,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- eax, // Input.
- ebx, // Result.
- ecx, // Scratch 1.
- edx, // Scratch 2.
- false, // Input is known to be smi?
- &not_cached);
+ __ LookupNumberStringCache(eax, // Input.
+ ebx, // Result.
+ ecx, // Scratch 1.
+ edx, // Scratch 2.
+ &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
// ----------- S t a t e -------------
@@ -1497,21 +1099,21 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Allocate a JSValue and put the tagged pointer into eax.
Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- eax, // Result.
- ecx, // New allocation top (we ignore it).
- no_reg,
- &gc_required,
- TAG_OBJECT);
+ __ Allocate(JSValue::kSize,
+ eax, // Result.
+ ecx, // New allocation top (we ignore it).
+ no_reg,
+ &gc_required,
+ TAG_OBJECT);
// Set the map.
__ LoadGlobalFunctionInitialMap(edi, ecx);
if (FLAG_debug_code) {
__ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
JSValue::kSize >> kPointerSizeLog2);
- __ Assert(equal, "Unexpected string wrapper instance size");
+ __ Assert(equal, kUnexpectedStringWrapperInstanceSize);
__ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
- __ Assert(equal, "Unexpected unused properties of string wrapper");
+ __ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
}
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
@@ -1699,76 +1301,68 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- CpuFeatures::TryForceFeatureScope scope(SSE2);
- if (!CpuFeatures::IsSupported(SSE2) && FLAG_debug_code) {
- __ Abort("Unreachable code: Cannot optimize without SSE2 support.");
- return;
- }
-
- // Get the loop depth of the stack guard check. This is recorded in
- // a test(eax, depth) instruction right after the call.
- Label stack_check;
- __ mov(ebx, Operand(esp, 0)); // return address
- if (FLAG_debug_code) {
- __ cmpb(Operand(ebx, 0), Assembler::kTestAlByte);
- __ Assert(equal, "test eax instruction not found after loop stack check");
- }
- __ movzx_b(ebx, Operand(ebx, 1)); // depth
-
- // Get the loop nesting level at which we allow OSR from the
- // unoptimized code and check if we want to do OSR yet. If not we
- // should perform a stack guard check so we can get interrupts while
- // waiting for on-stack replacement.
+ // Lookup the function in the JavaScript frame.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
- __ cmpb(ebx, FieldOperand(ecx, Code::kAllowOSRAtLoopNestingLevelOffset));
- __ j(greater, &stack_check);
-
- // Pass the function to optimize as the argument to the on-stack
- // replacement runtime function.
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Lookup and calculate pc offset.
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+ __ mov(ebx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+ __ sub(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ sub(edx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
+ __ SmiTag(edx);
+
+ // Pass both function and pc offset as arguments.
__ push(eax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ push(edx);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
Label skip;
- __ cmp(eax, Immediate(Smi::FromInt(-1)));
+ // If the code object is null, just return to the unoptimized code.
+ __ cmp(eax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
- // Insert a stack guard check so that if we decide not to perform
- // on-stack replacement right away, the function calling this stub can
- // still be interrupted.
- __ bind(&stack_check);
+ __ bind(&skip);
+
+ // Load deoptimization data from the code object.
+ __ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ __ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ __ SmiUntag(ebx);
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ __ lea(eax, Operand(eax, ebx, times_1, Code::kHeaderSize - kHeapObjectTag));
+
+ // Overwrite the return address on the stack.
+ __ mov(Operand(esp, 0), eax);
+
+ // And "return" to the OSR entry point of the function.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
Label ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ TailCallStub(&stub);
- if (FLAG_debug_code) {
- __ Abort("Unreachable code: returned from tail call.");
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
}
+ __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
__ bind(&ok);
__ ret(0);
-
- __ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiUntag(eax);
- __ push(eax);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
}
-
#undef __
}
} // namespace v8::internal
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 1d23c7e5d..b6bbe04b3 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -27,169 +27,309 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "bootstrapper.h"
#include "code-stubs.h"
#include "isolate.h"
#include "jsregexp.h"
#include "regexp-macro-assembler.h"
+#include "runtime.h"
#include "stub-cache.h"
#include "codegen.h"
+#include "runtime.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label check_heap_number, call_builtin;
- __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
- __ ret(0);
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ebx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
- __ bind(&check_heap_number);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ebx, Immediate(factory->heap_number_map()));
- __ j(not_equal, &call_builtin, Label::kNear);
- __ ret(0);
- __ bind(&call_builtin);
- __ pop(ecx); // Pop return address.
- __ push(eax);
- __ push(ecx); // Push return address.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+void ToNumberStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in esi.
- Counters* counters = masm->isolate()->counters();
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
- __ IncrementCounter(counters->fast_new_closure_total(), 1);
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx, ecx };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+}
- // Get the function info from the stack.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx, ecx, edx };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+}
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
- __ mov(ebx, Operand(ecx, Context::SlotOffset(map_index)));
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ebx);
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- Factory* factory = masm->isolate()->factory();
- __ mov(ebx, Immediate(factory->empty_fixed_array()));
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
- __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(factory->the_hole_value()));
- __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
- __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
- __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ test(ebx, ebx);
- __ j(not_zero, &check_optimized, Label::kNear);
- }
- __ bind(&install_unoptimized);
- __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
- Immediate(factory->undefined_value()));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ebx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
- __ bind(&check_optimized);
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
- // ecx holds native context, ebx points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // Map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into edx.
- __ mov(edx, FieldOperand(ebx, FixedArray::kHeaderSize + kPointerSize));
- __ cmp(ecx, FieldOperand(ebx, FixedArray::kHeaderSize));
- __ j(equal, &install_optimized);
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
- // Iterate through the rest of map backwards. edx holds an index as a Smi.
- Label loop;
- Label restore;
- __ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Do not double check first entry.
- __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ j(equal, &restore);
- __ sub(edx, Immediate(Smi::FromInt(
- SharedFunctionInfo::kEntryLength))); // Skip an entry.
- __ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0));
- __ j(not_equal, &loop, Label::kNear);
- // Hit: fetch the optimized code.
- __ mov(edx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 1));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
-
- // TODO(fschneider): Idea: store proper code pointers in the optimized code
- // map and either unmangle them on marking or do nothing as the whole map is
- // discarded on major GC anyway.
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
- // Now link a function into a list of optimized functions.
- __ mov(edx, ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST));
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
- __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset), edx);
- // No need for write barrier as JSFunction (eax) is in the new space.
- __ mov(ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST), eax);
- // Store JSFunction (eax) into edx before issuing write barrier as
- // it clobbers all the registers passed.
- __ mov(edx, eax);
- __ RecordWriteContextSlot(
- ecx,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- edx,
- ebx,
- kDontSaveFPRegs);
+void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
+}
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
- __ bind(&restore);
- // Restore SharedFunctionInfo into edx.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ jmp(&install_unoptimized);
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+}
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(ecx); // Temporarily remove return address.
- __ pop(edx);
- __ push(esi);
- __ push(edx);
- __ push(Immediate(factory->false_value()));
- __ push(ecx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
+
+static void InitializeArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // eax -- number of arguments
+ // edi -- function
+ // ebx -- type info cell with elements kind
+ static Register registers[] = { edi, ebx };
+ descriptor->register_param_count_ = 2;
+
+ if (constant_stack_parameter_count != 0) {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = eax;
+ }
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->register_params_ = registers;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // eax -- number of arguments
+ // edi -- constructor function
+ static Register registers[] = { edi };
+ descriptor->register_param_count_ = 1;
+
+ if (constant_stack_parameter_count != 0) {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = eax;
+ }
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->register_params_ = registers;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+void CompareNilICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(CompareNilIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+}
+
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx, ecx, edx };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
+void BinaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, eax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ Isolate* isolate = masm->isolate();
+ isolate->counters()->code_stubs()->Increment();
+
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ int param_count = descriptor->register_param_count_;
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ASSERT(descriptor->register_param_count_ == 0 ||
+ eax.is(descriptor->register_params_[param_count - 1]));
+ // Push arguments
+ for (int i = 0; i < param_count; ++i) {
+ __ push(descriptor->register_params_[i]);
+ }
+ ExternalReference miss = descriptor->miss_handler();
+ __ CallExternalReference(miss, descriptor->register_param_count_);
+ }
+
+ __ ret(0);
}
@@ -197,8 +337,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- eax, ebx, ecx, &gc, TAG_OBJECT);
+ __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize,
+ eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the function from the stack.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
@@ -245,8 +385,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- eax, ebx, ecx, &gc, TAG_OBJECT);
+ __ Allocate(FixedArray::SizeFor(length), eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the function or sentinel from the stack.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
@@ -268,9 +407,8 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
Label after_sentinel;
__ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
__ cmp(ecx, 0);
- __ Assert(equal, message);
+ __ Assert(equal, kExpected0AsASmiSentinel);
}
__ mov(ecx, GlobalObjectOperand());
__ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
@@ -307,301 +445,17 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
}
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- Label* fail) {
- // Registers on entry:
- //
- // ecx: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
- int size = JSArray::kSize + elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ lea(edx, Operand(eax, JSArray::kSize));
- __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
-
- // Copy the elements array.
- if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(edx, i), ebx);
- }
- } else {
- ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
- int i;
- for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(edx, i), ebx);
- }
- while (i < elements_size) {
- __ fld_d(FieldOperand(ecx, i));
- __ fstp_d(FieldOperand(edx, i));
- i += kDoubleSize;
- }
- ASSERT(i == elements_size);
- }
- }
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + kPointerSize]: constant elements.
- // [esp + (2 * kPointerSize)]: literal index.
- // [esp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into ecx and check if we need to create a
- // boilerplate.
- __ mov(ecx, Operand(esp, 3 * kPointerSize));
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kPointerSize == 4);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ecx, factory->undefined_value());
- Label slow_case;
- __ j(equal, &slow_case);
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- // ecx is boilerplate object.
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ CheckMap(ebx, factory->fixed_cow_array_map(),
- &check_fast_elements, DONT_DO_SMI_CHECK);
- GenerateFastCloneShallowArrayCommon(masm, 0,
- COPY_ON_WRITE_ELEMENTS, &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&check_fast_elements);
- __ CheckMap(ebx, factory->fixed_array_map(),
- &double_elements, DONT_DO_SMI_CHECK);
- GenerateFastCloneShallowArrayCommon(masm, length_,
- CLONE_ELEMENTS, &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Handle<Map> expected_map;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map = factory->fixed_array_map();
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map = factory->fixed_double_array_map();
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map = factory->fixed_cow_array_map();
- }
- __ push(ecx);
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
- __ Assert(equal, message);
- __ pop(ecx);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + kPointerSize]: object literal flags.
- // [esp + (2 * kPointerSize)]: constant properties.
- // [esp + (3 * kPointerSize)]: literal index.
- // [esp + (4 * kPointerSize)]: literals array.
-
- // Load boilerplate object into ecx and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ mov(ecx, Operand(esp, 4 * kPointerSize));
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- STATIC_ASSERT(kPointerSize == 4);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ecx, factory->undefined_value());
- __ j(equal, &slow_case);
-
- // Check that the boilerplate contains only fast properties and we can
- // statically determine the instance size.
- int size = JSObject::kHeaderSize + length_ * kPointerSize;
- __ mov(eax, FieldOperand(ecx, HeapObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceSizeOffset));
- __ cmp(eax, Immediate(size >> kPointerSizeLog2));
- __ j(not_equal, &slow_case);
-
- // Allocate the JS object and copy header together with all in-object
- // properties from the boilerplate.
- __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
- for (int i = 0; i < size; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
-
- // Return and remove the on-stack parameters.
- __ ret(4 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
-// The stub expects its argument on the stack and returns its result in tos_:
-// zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- Label patch;
- Factory* factory = masm->isolate()->factory();
- const Register argument = eax;
- const Register map = edx;
-
- if (!types_.IsEmpty()) {
- __ mov(argument, Operand(esp, 1 * kPointerSize));
- }
-
- // undefined -> false
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- Label not_smi;
- __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ mov(tos_, argument);
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_smi);
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(argument, &patch, Label::kNear);
- }
-
- if (types_.NeedsMap()) {
- __ mov(map, FieldOperand(argument, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- // Undetectable -> false.
- Label not_undetectable;
- __ j(zero, &not_undetectable, Label::kNear);
- __ Set(tos_, Immediate(0));
- __ ret(1 * kPointerSize);
- __ bind(&not_undetectable);
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // spec object -> true.
- Label not_js_object;
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, &not_js_object, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, Immediate(1));
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_js_object);
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ mov(tos_, FieldOperand(argument, String::kLengthOffset));
- __ ret(1 * kPointerSize); // the string length is OK as the return value
- __ bind(&not_string);
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number, false_result;
- __ cmp(map, factory->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ fldz();
- __ fld_d(FieldOperand(argument, HeapNumber::kValueOffset));
- __ FCmp();
- __ j(zero, &false_result, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, Immediate(1));
- }
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ Set(tos_, Immediate(0));
- __ ret(1 * kPointerSize);
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
__ pushad();
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
+ CpuFeatureScope scope(masm, SSE2);
__ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(Operand(esp, i * kDoubleSize), reg);
+ __ movsd(Operand(esp, i * kDoubleSize), reg);
}
}
const int argument_count = 1;
@@ -609,15 +463,15 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, ecx);
__ mov(Operand(esp, 0 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
+ CpuFeatureScope scope(masm, SSE2);
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(reg, Operand(esp, i * kDoubleSize));
+ __ movsd(reg, Operand(esp, i * kDoubleSize));
}
__ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
}
@@ -626,44 +480,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- const Register argument = eax;
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- Label different_value;
- __ CompareRoot(argument, value);
- __ j(not_equal, &different_value, Label::kNear);
- if (!result) {
- // If we have to return zero, there is no way around clearing tos_.
- __ Set(tos_, Immediate(0));
- } else if (!tos_.is(argument)) {
- // If we have to return non-zero, we can re-use the argument if it is the
- // same register as the result, because we never see Smi-zero here.
- __ Set(tos_, Immediate(1));
- }
- __ ret(1 * kPointerSize);
- __ bind(&different_value);
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Get return address, operand is now on top of stack.
- __ push(Immediate(Smi::FromInt(tos_.code())));
- __ push(Immediate(Smi::FromInt(types_.ToByte())));
- __ push(ecx); // Push return address.
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
-}
-
-
class FloatingPointHelper : public AllStatic {
public:
enum ArgLocation {
@@ -677,18 +493,6 @@ class FloatingPointHelper : public AllStatic {
// on FPU stack.
static void LoadFloatOperand(MacroAssembler* masm, Register number);
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
- // Returns operands as floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location = ARGS_ON_STACK);
-
- // Similar to LoadFloatOperand but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
-
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in eax, operand_2 in edx; falls through on float
// operands, jumps to the non_float label otherwise.
@@ -696,1762 +500,151 @@ class FloatingPointHelper : public AllStatic {
Label* non_float,
Register scratch);
- // Checks that the two floating point numbers on top of the FPU stack
- // have int32 values.
- static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
- Label* non_int32);
-
- // Takes the operands in edx and eax and loads them as integers in eax
- // and ecx.
- static void LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- Label* operand_conversion_failure);
-
- // Must only be called after LoadUnknownsAsIntegers. Assumes that the
- // operands are pushed on the stack, and that their conversions to int32
- // are in eax and ecx. Checks that the original numbers were in the int32
- // range.
- static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
- bool use_sse3,
- Label* not_int32);
-
- // Assumes that operands are smis or heap numbers and loads them
- // into xmm0 and xmm1. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm);
-
// Test if operands are numbers (smi or HeapNumber objects), and load
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged.
static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
-
- // Similar to LoadSSE2Operands but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
-
- // Checks that the two floating point numbers loaded into xmm0 and xmm1
- // have int32 values.
- static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
- Label* non_int32,
- Register scratch);
};
-// Get the integer part of a heap number. Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
-// trashed registers.
-static void IntegerConvert(MacroAssembler* masm,
- Register source,
- bool use_sse3,
- Label* conversion_failure) {
- ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
- Label done, right_exponent, normal_exponent;
- Register scratch = ebx;
- Register scratch2 = edi;
- // Get exponent word.
- __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- if (use_sse3) {
- CpuFeatures::Scope scope(SSE3);
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(scratch2, Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
- // Reserve space for 64 bit answer.
- __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint.
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
- __ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
- } else {
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, ecx);
- // Check whether the exponent matches a 32 bit signed int that cannot be
- // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
- // exponent is 30 (biased). This is the exponent that we are fastest at and
- // also the highest exponent we can handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmp(scratch2, Immediate(non_smi_exponent));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- __ j(equal, &right_exponent, Label::kNear);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ j(less, &normal_exponent, Label::kNear);
-
- {
- // Handle a big exponent. The only reason we have this code is that the
- // >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmp(scratch2, Immediate(big_non_smi_exponent));
- __ j(not_equal, conversion_failure);
- // We have the big exponent, typically from >>>. This means the number is
- // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch2, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to use the full unsigned range so we subtract 1 bit from the
- // shift distance.
- const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch2, big_shift_distance);
- // Get the second half of the double.
- __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 21 bits to get the most significant 11 bits or the low
- // mantissa word.
- __ shr(ecx, 32 - big_shift_distance);
- __ or_(ecx, scratch2);
- // We have the answer in ecx, but we may need to negate it.
- __ test(scratch, scratch);
- __ j(positive, &done, Label::kNear);
- __ neg(ecx);
- __ jmp(&done, Label::kNear);
- }
-
- __ bind(&normal_exponent);
- // Exponent word in scratch, exponent part of exponent word in scratch2.
- // Zero in ecx.
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(scratch2, Immediate(zero_exponent));
- // ecx already has a Smi zero.
- __ j(less, &done, Label::kNear);
-
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ shr(scratch2, HeapNumber::kExponentShift);
- __ mov(ecx, Immediate(30));
- __ sub(ecx, scratch2);
-
- __ bind(&right_exponent);
- // Here ecx is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ shl(scratch, shift_distance);
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch2, 32 - shift_distance);
- __ or_(scratch2, scratch);
- // Move down according to the exponent.
- __ shr_cl(scratch2);
- // Now the unsigned answer is in scratch2. We need to move it to ecx and
- // we may need to fix the sign.
- Label negative;
- __ xor_(ecx, ecx);
- __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
- __ j(greater, &negative, Label::kNear);
- __ mov(ecx, scratch2);
- __ jmp(&done, Label::kNear);
- __ bind(&negative);
- __ sub(ecx, scratch2);
- __ bind(&done);
- }
-}
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Register input_reg = this->source();
+ Register final_result_reg = this->destination();
+ ASSERT(is_truncating());
+ Label check_negative, process_64_bits, done, done_no_stash;
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
+ int double_offset = offset();
+ // Account for return address and saved regs if input is esp.
+ if (input_reg.is(esp)) double_offset += 3 * kPointerSize;
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
+ MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
+ MemOperand exponent_operand(MemOperand(input_reg,
+ double_offset + kDoubleSize / 2));
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
-
- __ push(eax); // the operand
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(mode_)));
- __ push(Immediate(Smi::FromInt(operand_type_)));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
- Label::kNear, Label::kNear, Label::kNear);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&non_smi);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* undo,
- Label* slow,
- Label::Distance non_smi_near,
- Label::Distance undo_near,
- Label::Distance slow_near) {
- // Check whether the value is a smi.
- __ JumpIfNotSmi(eax, non_smi, non_smi_near);
-
- // We can't handle -0 with smis, so use a type transition for that case.
- __ test(eax, eax);
- __ j(zero, slow, slow_near);
-
- // Try optimistic subtraction '0 - value', saving operand in eax for undo.
- __ mov(edx, eax);
- __ Set(eax, Immediate(0));
- __ sub(eax, edx);
- __ j(overflow, undo, undo_near);
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(
- MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near) {
- // Check whether the value is a smi.
- __ JumpIfNotSmi(eax, non_smi, non_smi_near);
-
- // Flip bits and revert inverted smi-tag.
- __ not_(eax);
- __ and_(eax, ~kSmiTagMask);
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
- __ mov(eax, edx);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateHeapNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateHeapNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberStubBitNot(
- MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, slow);
-
- if (mode_ == UNARY_OVERWRITE) {
- __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
- Immediate(HeapNumber::kSignMask)); // Flip sign.
- } else {
- __ mov(edx, eax);
- // edx: operand
-
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated, Label::kNear);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ pop(edx);
- }
-
- __ bind(&heapnumber_allocated);
- // eax: allocated 'empty' number
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- }
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
- Label* slow) {
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, slow);
-
- // Convert the heap number in eax to an untagged integer in ecx.
- IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ not_(ecx);
- __ cmp(ecx, 0xc0000000);
- __ j(sign, &try_float, Label::kNear);
-
- // Tag the result as a smi and we're done.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(eax, Operand(ecx, times_2, kSmiTag));
- __ ret(0);
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (mode_ == UNARY_NO_OVERWRITE) {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ mov(ebx, eax);
- __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the original HeapNumber on the stack. The integer value can't
- // be stored since it's untagged and not in the smi range (so we can't
- // smi-tag it). We'll recalculate the value after the GC instead.
- __ push(ebx);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- // New HeapNumber is in eax.
- __ pop(edx);
+ Register scratch1;
+ {
+ Register scratch_candidates[3] = { ebx, edx, edi };
+ for (int i = 0; i < 3; i++) {
+ scratch1 = scratch_candidates[i];
+ if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
}
- // IntegerConvert uses ebx and edi as scratch registers.
- // This conversion won't go slow-case.
- IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
- __ not_(ecx);
-
- __ bind(&heapnumber_allocated);
- }
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ecx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(0);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
- // Handle the slow case by jumping to the corresponding JavaScript builtin.
- __ pop(ecx); // pop return address.
- __ push(eax);
- __ push(ecx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
}
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- __ push(edx);
- __ push(eax);
- // Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(operands_type_)));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 5,
- 1);
-}
-
-
-// Prepare for a type transition runtime call when the args are already on
-// the stack, under the return address.
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- // Left and right arguments are already on top of the stack.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(operands_type_)));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 5,
- 1);
-}
-
-
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiCode(
- MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = eax;
- right = ebx;
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
-
-
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op_) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, left);
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left);
- combined = right;
- break;
-
- default:
- break;
+ // Since we must use ecx for shifts below, use some other register (eax)
+ // to calculate the result if ecx is the requested return register.
+ Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg;
+ // Save ecx if it isn't the return register and therefore volatile, or if it
+ // is the return register, then save the temp register we use in its stead for
+ // the result.
+ Register save_reg = final_result_reg.is(ecx) ? eax : ecx;
+ __ push(scratch1);
+ __ push(save_reg);
+
+ bool stash_exponent_copy = !input_reg.is(esp);
+ __ mov(scratch1, mantissa_operand);
+ if (CpuFeatures::IsSupported(SSE3)) {
+ CpuFeatureScope scope(masm, SSE3);
+ // Load x87 register with heap number.
+ __ fld_d(mantissa_operand);
}
+ __ mov(ecx, exponent_operand);
+ if (stash_exponent_copy) __ push(ecx);
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ JumpIfNotSmi(combined, &not_smis);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, left); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, left); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, left); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis);
- break;
-
- case Token::SUB:
- __ sub(left, right);
- __ j(overflow, &use_fp_on_smis);
- __ mov(eax, left);
- break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, left); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
- break;
-
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &use_fp_on_smis);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, edx);
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
-
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &not_smis);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
+ __ and_(ecx, HeapNumber::kExponentMask);
+ __ shr(ecx, HeapNumber::kExponentShift);
+ __ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias));
+ __ cmp(result_reg, Immediate(HeapNumber::kMantissaBits));
+ __ j(below, &process_64_bits);
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in eax. Some operations have registers pushed.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- __ ret(0);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- __ ret(2 * kPointerSize);
- break;
- default:
- UNREACHABLE();
+ // Result is entirely in lower 32-bits of mantissa
+ int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
+ if (CpuFeatures::IsSupported(SSE3)) {
+ __ fstp(0);
}
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
- __ bind(&use_fp_on_smis);
- switch (op_) {
- // Undo the effects of some operations, and some register moves.
- case Token::SHL:
- // The arguments are saved on the stack, and only used from there.
- break;
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division. They should be in eax, ebx for jump to not_smi.
- __ mov(eax, edi);
- break;
- default:
- // No other operators jump to use_fp_on_smis.
- break;
- }
- __ jmp(&not_smis);
- } else {
- ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
- switch (op_) {
- case Token::SHL:
- case Token::SHR: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- // It's OK to overwrite the arguments on the stack because we
- // are about to return.
- if (op_ == Token::SHR) {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
- __ fild_d(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- } else {
- ASSERT_EQ(Token::SHL, op_);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, left);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- }
- __ ret(2 * kPointerSize);
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op_) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- __ ret(0);
- break;
- }
-
- default:
- break;
+ __ sub(ecx, Immediate(delta));
+ __ xor_(result_reg, result_reg);
+ __ cmp(ecx, Immediate(31));
+ __ j(above, &done);
+ __ shl_cl(scratch1);
+ __ jmp(&check_negative);
+
+ __ bind(&process_64_bits);
+ if (CpuFeatures::IsSupported(SSE3)) {
+ CpuFeatureScope scope(masm, SSE3);
+ if (stash_exponent_copy) {
+ // Already a copy of the exponent on the stack, overwrite it.
+ STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+ __ sub(esp, Immediate(kDoubleSize / 2));
+ } else {
+ // Reserve space for 64 bit answer.
+ __ sub(esp, Immediate(kDoubleSize)); // Nolint.
}
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
- switch (op_) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
- break;
- }
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label call_runtime;
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
+ // Do conversion, which cannot fail because we checked the exponent.
+ __ fisttp_d(Operand(esp, 0));
+ __ mov(result_reg, Operand(esp, 0)); // Load low word of answer as result
+ __ add(esp, Immediate(kDoubleSize));
+ __ jmp(&done_no_stash);
} else {
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
- }
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateTypeTransition(masm);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-// Input:
-// edx: left operand (tagged)
-// eax: right operand (tagged)
-// Output:
-// eax: result (tagged)
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::INT32);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- Label not_floats;
- Label not_int32;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
- if (op_ == Token::MOD) {
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Check result type if it is currently Int32.
- if (result_type_ <= BinaryOpIC::INT32) {
- __ cvttsd2si(ecx, Operand(xmm0));
- __ cvtsi2sd(xmm2, ecx);
- __ pcmpeqd(xmm2, xmm0);
- __ movmskpd(ecx, xmm2);
- __ test(ecx, Immediate(1));
- __ j(zero, &not_int32);
- }
- GenerateHeapResultAllocation(masm, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- }
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
- if (op_ == Token::MOD) {
- // The operands are now on the FPU stack, but we don't need them.
- __ fstp(0);
- __ fstp(0);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- GenerateTypeTransition(masm);
- break;
+ // Result must be extracted from shifted 32-bit mantissa
+ __ sub(ecx, Immediate(delta));
+ __ neg(ecx);
+ if (stash_exponent_copy) {
+ __ mov(result_reg, MemOperand(esp, 0));
+ } else {
+ __ mov(result_reg, exponent_operand);
}
-
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label not_int32;
- Label non_smi_result;
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
- &not_floats);
- FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
- &not_int32);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
+ __ and_(result_reg,
+ Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
+ __ add(result_reg,
+ Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
+ __ shrd(result_reg, scratch1);
+ __ shr_cl(result_reg);
+ __ test(ecx, Immediate(32));
+ if (CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatureScope use_cmov(masm, CMOV);
+ __ cmov(not_equal, scratch1, result_reg);
+ } else {
+ Label skip_mov;
+ __ j(equal, &skip_mov, Label::kNear);
+ __ mov(scratch1, result_reg);
+ __ bind(&skip_mov);
}
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR hits a hard case, use the runtime system to
- // get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
}
-}
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- Factory* factory = masm->isolate()->factory();
-
- // Convert odd ball arguments to numbers.
- Label check, done;
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, &check, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(edx, edx);
+ // If the double was negative, negate the integer result.
+ __ bind(&check_negative);
+ __ mov(result_reg, scratch1);
+ __ neg(result_reg);
+ if (stash_exponent_copy) {
+ __ cmp(MemOperand(esp, 0), Immediate(0));
} else {
- __ mov(edx, Immediate(factory->nan_value()));
+ __ cmp(exponent_operand, Immediate(0));
}
- __ jmp(&done, Label::kNear);
- __ bind(&check);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, &done, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(eax, eax);
+ if (CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatureScope use_cmov(masm, CMOV);
+ __ cmov(greater, result_reg, scratch1);
} else {
- __ mov(eax, Immediate(factory->nan_value()));
- }
- __ bind(&done);
-
- GenerateHeapNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime;
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- GenerateHeapResultAllocation(masm, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
-
- __ bind(&not_floats);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label non_smi_result;
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
- &not_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR or MOD hit a hard case,
- // use the runtime system to get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
+ Label skip_mov;
+ __ j(less_equal, &skip_mov, Label::kNear);
+ __ mov(result_reg, scratch1);
+ __ bind(&skip_mov);
}
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- GenerateHeapResultAllocation(masm, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- __ bind(&not_floats);
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop the arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize);
- }
- break;
- }
- default: UNREACHABLE(); break;
+ // Restore registers
+ __ bind(&done);
+ if (stash_exponent_copy) {
+ __ add(esp, Immediate(kDoubleSize / 2));
}
-
- // If all else fails, use the runtime system to get the correct
- // result.
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD: {
- GenerateAddStrings(masm);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
- case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
+ __ bind(&done_no_stash);
+ if (!final_result_reg.is(result_reg)) {
+ ASSERT(final_result_reg.is(ecx));
+ __ mov(final_result_reg, result_reg);
}
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &left_not_string, Label::kNear);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub::GenerateHeapResultAllocation(
- MacroAssembler* masm,
- Label* alloc_failure) {
- Label skip_allocation;
- OverwriteMode mode = mode_;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, ebx);
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, edx);
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
- __ push(ecx);
+ __ pop(save_reg);
+ __ pop(scratch1);
+ __ ret(0);
}
@@ -2504,8 +697,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&loaded);
} else { // UNTAGGED.
+ CpuFeatureScope scope(masm, SSE2);
if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope sse4_scope(SSE4_1);
+ CpuFeatureScope sse4_scope(masm, SSE4_1);
__ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
} else {
__ pshufd(xmm0, xmm1, 0x1);
@@ -2576,7 +770,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ fstp(0);
__ ret(kPointerSize);
} else { // UNTAGGED.
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ CpuFeatureScope scope(masm, SSE2);
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
@@ -2588,9 +783,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
if (tagged) {
__ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
} else { // UNTAGGED.
+ CpuFeatureScope scope(masm, SSE2);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
+ __ movsd(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
@@ -2602,17 +798,18 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
if (tagged) {
__ ret(kPointerSize);
} else { // UNTAGGED.
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ CpuFeatureScope scope(masm, SSE2);
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
// Skip cache and return answer directly, only in untagged case.
__ bind(&skip_cache);
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
+ __ movsd(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
GenerateOperation(masm, type_);
__ fstp_d(Operand(esp, 0));
- __ movdbl(xmm1, Operand(esp, 0));
+ __ movsd(xmm1, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
@@ -2634,16 +831,17 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime, 1, 1);
} else { // UNTAGGED.
+ CpuFeatureScope scope(masm, SSE2);
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(eax);
__ CallRuntime(RuntimeFunction(), 1);
}
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
}
@@ -2760,75 +958,6 @@ void TranscendentalCacheStub::GenerateOperation(
}
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- // Test if arg1 is a Smi.
- __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
-
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- Factory* factory = masm->isolate()->factory();
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(edx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg1);
-
- // Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm, edx, use_sse3, conversion_failure);
- __ mov(edx, ecx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
-
- // Test if arg2 is a Smi.
- __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
-
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(ecx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg2);
-
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm, eax, use_sse3, conversion_failure);
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
-void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
- bool use_sse3,
- Label* not_int32) {
- return;
-}
-
-
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
@@ -2847,33 +976,6 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
}
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
- Label load_smi_edx, load_eax, load_smi_eax, done;
- // Load operand in edx into xmm0.
- __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
- __ bind(&load_eax);
- // Load operand in eax into xmm1.
- __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
-
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
-
- __ bind(&done);
-}
-
-
void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
@@ -2882,7 +984,7 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Factory* factory = masm->isolate()->factory();
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
__ j(not_equal, not_numbers); // Argument in edx is not a number.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1, or branch to not_numbers.
__ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
@@ -2891,108 +993,20 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
__ jmp(not_numbers); // Argument in eax is not a number.
__ bind(&load_smi_edx);
__ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
+ __ Cvtsi2sd(xmm0, edx);
__ SmiTag(edx); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
__ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
+ __ Cvtsi2sd(xmm1, eax);
__ SmiTag(eax); // Retag smi for heap number overwriting test.
__ jmp(&done, Label::kNear);
__ bind(&load_float_eax);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ bind(&done);
}
-void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, scratch);
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, scratch);
-}
-
-
-void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
- Label* non_int32,
- Register scratch) {
- __ cvttsd2si(scratch, Operand(xmm0));
- __ cvtsi2sd(xmm2, scratch);
- __ ucomisd(xmm0, xmm2);
- __ j(not_zero, non_int32);
- __ j(carry, non_int32);
- __ cvttsd2si(scratch, Operand(xmm1));
- __ cvtsi2sd(xmm2, scratch);
- __ ucomisd(xmm1, xmm2);
- __ j(not_zero, non_int32);
- __ j(carry, non_int32);
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location) {
- Label load_smi_1, load_smi_2, done_load_1, done;
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, edx);
- } else {
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, eax);
- } else {
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi_1);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ mov(Operand(esp, 0), scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-}
-
-
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch) {
@@ -3016,14 +1030,8 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
}
-void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
- Label* non_int32) {
- return;
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatures::Scope use_sse2(SSE2);
+ CpuFeatureScope use_sse2(masm, SSE2);
Factory* factory = masm->isolate()->factory();
const Register exponent = eax;
const Register base = edx;
@@ -3037,7 +1045,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Save 1 in double_result - we need this several times later on.
__ mov(scratch, Immediate(1));
- __ cvtsi2sd(double_result, scratch);
+ __ Cvtsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
@@ -3052,12 +1060,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
factory->heap_number_map());
__ j(not_equal, &call_runtime);
- __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+ __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent, Label::kNear);
__ bind(&base_is_smi);
__ SmiUntag(base);
- __ cvtsi2sd(double_base, base);
+ __ Cvtsi2sd(double_base, base);
__ bind(&unpack_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -3068,7 +1076,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
factory->heap_number_map());
__ j(not_equal, &call_runtime);
- __ movdbl(double_exponent,
+ __ movsd(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -3076,21 +1084,21 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
- __ movdbl(double_exponent,
+ __ movsd(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
}
if (exponent_type_ != INTEGER) {
- Label fast_power;
- // Detect integer exponents stored as double.
- __ cvttsd2si(exponent, Operand(double_exponent));
+ Label fast_power, try_arithmetic_simplification;
+ __ DoubleToI(exponent, double_exponent, double_scratch,
+ TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification);
+ __ jmp(&int_exponent);
+
+ __ bind(&try_arithmetic_simplification);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
+ __ cvttsd2si(exponent, Operand(double_exponent));
__ cmp(exponent, Immediate(0x80000000u));
__ j(equal, &call_runtime);
- __ cvtsi2sd(double_scratch, exponent);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_exponent, double_scratch);
- __ j(equal, &int_exponent);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -3171,9 +1179,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), double_exponent);
+ __ movsd(Operand(esp, 0), double_exponent);
__ fld_d(Operand(esp, 0)); // E
- __ movdbl(Operand(esp, 0), double_base);
+ __ movsd(Operand(esp, 0), double_base);
__ fld_d(Operand(esp, 0)); // B, E
// Exponent is in st(1) and base is in st(0)
@@ -3187,16 +1195,16 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
__ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
__ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
+ __ faddp(1); // 2^(X-rnd(X)), rnd(X)
// FSCALE calculates st(0) * 2^st(1)
__ fscale(); // 2^X, rnd(X)
- __ fstp(1);
+ __ fstp(1); // 2^X
// Bail out to runtime in case of exceptions in the status word.
__ fnstsw_ax();
__ test_b(eax, 0x5F); // We check for all but precision exception.
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
+ __ movsd(double_result, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
__ jmp(&done);
@@ -3250,7 +1258,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// and may not have contained the exponent value in the first place when the
// exponent is a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
- __ cvtsi2sd(double_exponent, exponent);
+ __ Cvtsi2sd(double_exponent, exponent);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
@@ -3263,7 +1271,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// as heap number in exponent.
__ bind(&done);
__ AllocateHeapNumber(eax, scratch, base, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
} else {
@@ -3271,8 +1279,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(4, scratch);
- __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
- __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
+ __ movsd(Operand(esp, 0 * kDoubleSize), double_base);
+ __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()), 4);
}
@@ -3280,7 +1288,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Store it into the (fixed) result register.
__ sub(esp, Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
+ __ movsd(double_result, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
__ bind(&done);
@@ -3290,6 +1298,111 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ if (kind() == Code::KEYED_LOAD_IC) {
+ __ cmp(ecx, Immediate(masm->isolate()->factory()->prototype_string()));
+ __ j(not_equal, &miss);
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
+ __ bind(&miss);
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void StringLengthStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ if (kind() == Code::KEYED_LOAD_IC) {
+ __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
+ __ j(not_equal, &miss);
+ }
+
+ StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss);
+ __ bind(&miss);
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ //
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
+
+ Label miss;
+
+ Register receiver = edx;
+ Register value = eax;
+ Register scratch = ebx;
+
+ if (kind() == Code::KEYED_STORE_IC) {
+ __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
+ __ j(not_equal, &miss);
+ }
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+ __ j(not_equal, &miss);
+
+ // Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
+ __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
+ __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
+ __ j(not_equal, &miss);
+
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
+ __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(equal, &miss);
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ pop(scratch);
+ __ push(receiver);
+ __ push(value);
+ __ push(scratch); // return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&miss);
+
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in edx and the parameter count is in eax.
@@ -3375,6 +1488,8 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ Isolate* isolate = masm->isolate();
+
// esp[0] : return address
// esp[4] : number of parameters (tagged)
// esp[8] : receiver displacement
@@ -3435,7 +1550,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ add(ebx, Immediate(Heap::kArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
+ __ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
// eax = address of new object(s) (tagged)
// ecx = argument count (tagged)
@@ -3506,7 +1621,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ j(zero, &skip_parameter_map);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(FACTORY->non_strict_arguments_elements_map()));
+ Immediate(isolate->factory()->non_strict_arguments_elements_map()));
__ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
__ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
@@ -3527,7 +1642,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ add(ebx, Operand(esp, 4 * kPointerSize));
__ sub(ebx, eax);
- __ mov(ecx, FACTORY->the_hole_value());
+ __ mov(ecx, isolate->factory()->the_hole_value());
__ mov(edx, edi);
__ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
// eax = loop variable (tagged)
@@ -3562,7 +1677,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// esp[16] = address of receiver argument
// Copy arguments header and remaining slots (if there are any).
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
+ Immediate(isolate->factory()->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
Label arguments_loop, arguments_test;
@@ -3598,6 +1713,8 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ Isolate* isolate = masm->isolate();
+
// esp[0] : return address
// esp[4] : number of parameters
// esp[8] : receiver displacement
@@ -3633,7 +1750,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
// Do the allocation of both objects in one go.
- __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
+ __ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
// Get the arguments boilerplate from the current native context.
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
@@ -3668,7 +1785,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
+ Immediate(isolate->factory()->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
// Untag the length for the loop below.
@@ -3714,7 +1831,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static const int kSubjectOffset = 3 * kPointerSize;
static const int kJSRegExpOffset = 4 * kPointerSize;
- Label runtime, invoke_regexp;
+ Label runtime;
+ Factory* factory = masm->isolate()->factory();
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
@@ -3732,13 +1850,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(eax, &runtime);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
__ j(not_equal, &runtime);
+
// Check that the RegExp has been compiled (data contains a fixed array).
__ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
__ test(ecx, Immediate(kSmiTagMask));
- __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
+ __ Check(not_zero, kUnexpectedTypeForRegExpDataFixedArrayExpected);
__ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected);
}
// ecx: RegExp data (FixedArray)
@@ -3750,156 +1869,124 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: RegExp data (FixedArray)
// Check that the number of captures fit in the static offsets vector buffer.
__ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // Multiplying by 2 comes for free since edx is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(edx, Immediate(2)); // edx was a smi.
- // Check that the static offsets vector buffer is large enough.
- __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize);
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
__ j(above, &runtime);
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the second argument is a string.
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ JumpIfSmi(eax, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
- // Get the length of the string to ebx.
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
-
- // ebx: Length of subject string as a smi
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ mov(eax, Operand(esp, kPreviousIndexOffset));
- __ JumpIfNotSmi(eax, &runtime);
- __ cmp(eax, ebx);
- __ j(above_equal, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
- __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(eax, factory->fixed_array_map());
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ SmiUntag(eax);
- __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmp(edx, eax);
- __ j(greater, &runtime);
-
// Reset offset for possibly sliced string.
__ Set(edi, Immediate(0));
- // ecx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_ascii_string, seq_two_byte_string, check_code;
__ mov(eax, Operand(esp, kSubjectOffset));
+ __ JumpIfSmi(eax, &runtime);
+ __ mov(edx, eax); // Make a copy of the original subject string.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- // First check for flat two byte string.
+
+ // eax: subject string
+ // edx: subject string
+ // ebx: subject string instance type
+ // ecx: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential two byte? If yes, go to (9).
+ // (2) Sequential one byte? If yes, go to (6).
+ // (3) Anything but sequential or cons? If yes, go to (7).
+ // (4) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (5a) Is subject sequential two byte? If yes, go to (9).
+ // (5b) Is subject external? If yes, go to (8).
+ // (6) One byte sequential. Load regexp code for one byte.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (7) Not a long external string? If yes, go to (10).
+ // (8) External string. Make it, offset-wise, look like a sequential string.
+ // (8a) Is the external string one byte? If yes, go to (6).
+ // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
+ // (10) Short external string or not a string? If yes, bail out to runtime.
+ // (11) Sliced string. Replace subject with parent. Go to (5a).
+
+ Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
+ external_string /* 8 */, check_underlying /* 5a */,
+ not_seq_nor_cons /* 7 */, check_code /* E */,
+ not_long_external /* 10 */;
+
+ // (1) Sequential two byte? If yes, go to (9).
__ and_(ebx, kIsNotStringMask |
kStringRepresentationMask |
kStringEncodingMask |
kShortExternalStringMask);
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be a flat ASCII string. None of the following
- // string type tests will succeed if subject is not a string or a short
- // external string.
+ __ j(zero, &seq_two_byte_string); // Go to (9).
+
+ // (2) Sequential one byte? If yes, go to (6).
+ // Any other sequential string must be one byte.
__ and_(ebx, Immediate(kIsNotStringMask |
kStringRepresentationMask |
kShortExternalStringMask));
- __ j(zero, &seq_ascii_string, Label::kNear);
-
- // ebx: whether subject is a string and if yes, its string representation
- // Check for flat cons string or sliced string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- // In the case of a sliced string its offset has to be taken into account.
- Label cons_string, external_string, check_encoding;
+ __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
+
+ // (3) Anything but sequential or cons? If yes, go to (7).
+ // We check whether the subject string is a cons, since sequential strings
+ // have already been covered.
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(ebx, Immediate(kExternalStringTag));
- __ j(less, &cons_string);
- __ j(equal, &external_string);
-
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
- __ j(not_zero, &runtime);
+ __ j(greater_equal, &not_seq_nor_cons); // Go to (7).
- // String is sliced.
- __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
- __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
- // edi: offset of sliced string, smi-tagged.
- // eax: parent string.
- __ jmp(&check_encoding, Label::kNear);
- // String is a cons string, check whether it is flat.
- __ bind(&cons_string);
+ // (4) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
__ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
__ j(not_equal, &runtime);
__ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
- __ bind(&check_encoding);
+ __ bind(&check_underlying);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // eax: first part of cons string or parent of sliced string.
- // ebx: map of first part of cons string or map of parent of sliced string.
- // Is first part of cons or parent of slice a flat two byte string?
- __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
- kStringRepresentationMask | kStringEncodingMask);
+ __ mov(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
+ // (5a) Is subject sequential two byte? If yes, go to (9).
+ __ test_b(ebx, kStringRepresentationMask | kStringEncodingMask);
STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be sequential ASCII or external.
- __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
- kStringRepresentationMask);
- __ j(not_zero, &external_string);
-
- __ bind(&seq_ascii_string);
- // eax: subject string (flat ASCII)
+ __ j(zero, &seq_two_byte_string); // Go to (9).
+ // (5b) Is subject external? If yes, go to (8).
+ __ test_b(ebx, kStringRepresentationMask);
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ j(not_zero, &external_string); // Go to (8).
+
+ // eax: sequential subject string (or look-alike, external string)
+ // edx: original subject string
// ecx: RegExp data (FixedArray)
+ // (6) One byte sequential. Load regexp code for one byte.
+ __ bind(&seq_one_byte_string);
+ // Load previous index and check range before edx is overwritten. We have
+ // to use edx instead of eax here because it might have been only made to
+ // look like a sequential string when it actually is an external string.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(ebx, &runtime);
+ __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
+ __ j(above_equal, &runtime);
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(ecx, Immediate(1)); // Type is ASCII.
- __ jmp(&check_code, Label::kNear);
-
- __ bind(&seq_two_byte_string);
- // eax: subject string (flat two byte)
- // ecx: RegExp data (FixedArray)
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
- __ Set(ecx, Immediate(0)); // Type is two byte.
+ __ Set(ecx, Immediate(1)); // Type is one byte.
+ // (E) Carry on. String handling is done.
__ bind(&check_code);
+ // edx: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
__ JumpIfSmi(edx, &runtime);
// eax: subject string
+ // ebx: previous index (smi)
// edx: code
// ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ mov(ebx, Operand(esp, kPreviousIndexOffset));
- __ SmiUntag(ebx); // Previous index from smi.
-
- // eax: subject string
- // ebx: previous index
- // edx: code
- // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
// All checks done. Now push arguments for native regexp code.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->regexp_entry_native(), 1);
@@ -3910,7 +1997,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 9: Pass current isolate address.
__ mov(Operand(esp, 8 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
// Argument 8: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
@@ -3930,6 +2017,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
masm->isolate())));
// Argument 2: Previous index.
+ __ SmiUntag(ebx);
__ mov(Operand(esp, 1 * kPointerSize), ebx);
// Argument 1: Original subject string.
@@ -3960,9 +2048,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ test(ecx, ecx);
__ j(zero, &setup_two_byte, Label::kNear);
__ SmiUntag(esi);
- __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(ecx, FieldOperand(eax, esi, times_1, SeqOneByteString::kHeaderSize));
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqOneByteString::kHeaderSize));
__ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
__ jmp(&setup_rest, Label::kNear);
@@ -3981,7 +2069,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ call(edx);
// Drop arguments and come back to JS mode.
- __ LeaveApiExitFrame();
+ __ LeaveApiExitFrame(true);
// Check the result.
Label success;
@@ -4039,8 +2127,23 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// edx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray.
+ // Check that the fourth object is a JSArray object.
__ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ JumpIfSmi(eax, &runtime);
+ __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
__ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
+ __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(eax, factory->fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ SmiUntag(eax);
+ __ sub(eax, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmp(edx, eax);
+ __ j(greater, &runtime);
// ebx: last_match_info backing store (FixedArray)
// edx: number of capture registers
@@ -4050,13 +2153,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ SmiUntag(edx); // Number of capture registers back from smi.
// Store last subject and last input.
__ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(ecx, eax);
__ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
__ RecordWriteField(ebx,
RegExpImpl::kLastSubjectOffset,
eax,
edi,
kDontSaveFPRegs);
- __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(eax, ecx);
__ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
__ RecordWriteField(ebx,
RegExpImpl::kLastInputOffset,
@@ -4094,30 +2198,64 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand(esp, kLastMatchInfoOffset));
__ ret(4 * kPointerSize);
- // External string. Short external strings have already been ruled out.
- // eax: subject string (expected to be external)
- // ebx: scratch
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (7) Not a long external string? If yes, go to (10).
+ __ bind(&not_seq_nor_cons);
+ // Compare flags are still set from (3).
+ __ j(greater, &not_long_external, Label::kNear); // Go to (10).
+
+ // (8) External string. Short external strings have been ruled out.
__ bind(&external_string);
+ // Reload instance type.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ test_b(ebx, kIsIndirectStringMask);
- __ Assert(zero, "external string expected, but not found");
+ __ Assert(zero, kExternalStringExpectedButNotFound);
}
__ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
+ // (8a) Is the external string one byte? If yes, go to (6).
__ test_b(ebx, kStringEncodingMask);
- __ j(not_zero, &seq_ascii_string);
- __ jmp(&seq_two_byte_string);
+ __ j(not_zero, &seq_one_byte_string); // Goto (6).
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ // eax: sequential subject string (or look-alike, external string)
+ // edx: original subject string
+ // ecx: RegExp data (FixedArray)
+ // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
+ __ bind(&seq_two_byte_string);
+ // Load previous index and check range before edx is overwritten. We have
+ // to use edx instead of eax here because it might have been only made to
+ // look like a sequential string when it actually is an external string.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(ebx, &runtime);
+ __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
+ __ j(above_equal, &runtime);
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
+ __ Set(ecx, Immediate(0)); // Type is two byte.
+ __ jmp(&check_code); // Go to (E).
+
+ // (10) Not a string or a short external string? If yes, bail out to runtime.
+ __ bind(&not_long_external);
+ // Catch non-string subject or short external string.
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
+ __ j(not_zero, &runtime);
+
+ // (11) Sliced string. Replace subject with parent. Go to (5a).
+ // Load offset into edi and replace subject string with parent.
+ __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
+ __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
+ __ jmp(&check_underlying); // Go to (5a).
#endif // V8_INTERPRETED_REGEXP
}
@@ -4136,14 +2274,15 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Allocate RegExpResult followed by FixedArray with size in ebx.
// JSArray: [Map][empty properties][Elements][Length-smi][index][input]
// Elements: [Map][Length][..elements..]
- __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_half_pointer_size,
- ebx, // In: Number of elements (times 2, being a smi)
- eax, // Out: Start of allocation (tagged).
- ecx, // Out: End of allocation.
- edx, // Scratch register
- &slowcase,
- TAG_OBJECT);
+ __ Allocate(JSRegExpResult::kSize + FixedArray::kHeaderSize,
+ times_pointer_size,
+ ebx, // In: Number of elements as a smi
+ REGISTER_VALUE_IS_SMI,
+ eax, // Out: Start of allocation (tagged).
+ ecx, // Out: End of allocation.
+ edx, // Scratch register
+ &slowcase,
+ TAG_OBJECT);
// eax: Start of allocated area, object-tagged.
// Set JSArray map to global.regexp_result_map().
@@ -4203,228 +2342,109 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
- __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
- __ mov(number_string_cache,
- Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label smi_hash_calculated;
- Label load_result_from_cache;
- if (object_is_smi) {
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- } else {
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(object, &not_smi, Label::kNear);
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated, Label::kNear);
- __ bind(&not_smi);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- Register index = scratch;
- Register probe = mask;
- __ mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- } else {
- __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- __ FCmp();
- }
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache, Label::kNear);
- }
-
- __ bind(&smi_hash_calculated);
- // Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmp(object,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ mov(result,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
+static int NegativeComparisonResult(Condition cc) {
+ ASSERT(cc != equal);
+ ASSERT((cc == less) || (cc == less_equal)
+ || (cc == greater) || (cc == greater_equal));
+ return (cc == greater || cc == greater_equal) ? LESS : GREATER;
}
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ mov(ebx, Operand(esp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
+static void CheckInputType(MacroAssembler* masm,
+ Register input,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ cmp(FieldOperand(input, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->heap_number_map()));
+ __ j(not_equal, fail);
+ }
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
}
-static int NegativeComparisonResult(Condition cc) {
- ASSERT(cc != equal);
- ASSERT((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
+static void BranchIfNotInternalizedString(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ test(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ __ j(not_zero, label);
}
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects;
+ Condition cc = GetCondition();
- // Compare two smis if required.
- if (include_smi_compare_) {
- Label non_smi, smi_done;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
- __ sub(edx, eax); // Return on the result of the subtraction.
- __ j(no_overflow, &smi_done, Label::kNear);
- __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
- __ bind(&smi_done);
- __ mov(eax, edx);
- __ ret(0);
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected smi operands.");
- }
+ Label miss;
+ CheckInputType(masm, edx, left_, &miss);
+ CheckInputType(masm, eax, right_, &miss);
+
+ // Compare two smis.
+ Label non_smi, smi_done;
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
+ __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
+ __ sub(edx, eax); // Return on the result of the subtraction.
+ __ j(no_overflow, &smi_done, Label::kNear);
+ __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
+ __ bind(&smi_done);
+ __ mov(eax, edx);
+ __ ret(0);
+ __ bind(&non_smi);
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
// Identical objects can be compared fast, but there are some tricky cases
// for NaN and undefined.
+ Label generic_heap_number_comparison;
{
Label not_identical;
__ cmp(eax, edx);
__ j(not_equal, &not_identical);
- if (cc_ != equal) {
+ if (cc != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
Label check_for_nan;
__ cmp(edx, masm->isolate()->factory()->undefined_value());
__ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
__ ret(0);
__ bind(&check_for_nan);
}
- // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
- // so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- } else {
- Label heap_number;
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(equal, &heap_number, Label::kNear);
- if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &not_identical);
- }
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if
- // it's not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only accept QNaNs, which have bit 51 set.
- // Read top bits of double representation (second word of value).
-
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ Set(eax, Immediate(0));
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
- // bits.
- __ add(edx, edx);
- __ cmp(edx, kQuietNaNHighBitsMask << 1);
- if (cc_ == equal) {
- STATIC_ASSERT(EQUAL != 1);
- __ setcc(above_equal, eax);
- __ ret(0);
- } else {
- Label nan;
- __ j(above_equal, &nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- __ bind(&nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- __ ret(0);
- }
+ // Test for NaN. Compare heap numbers in a general way,
+ // to hanlde NaNs correctly.
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->heap_number_map()));
+ __ j(equal, &generic_heap_number_comparison, Label::kNear);
+ if (cc != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &not_identical);
}
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
__ bind(&not_identical);
}
// Strict equality can quickly decide whether objects are equal.
// Non-strict object equality is slower, so it is handled later in the stub.
- if (cc_ == equal && strict_) {
+ if (cc == equal && strict()) {
Label slow; // Fallthrough label.
Label not_smis;
// If we're doing a strict equality comparison, we don't have to do
@@ -4495,75 +2515,74 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
// Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- CpuFeatures::Scope use_cmov(CMOV);
-
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
- __ ret(0);
- } else {
- FloatingPointHelper::CheckFloatOperands(
- masm, &non_number_comparison, ebx);
- FloatingPointHelper::LoadFloatOperand(masm, eax);
- FloatingPointHelper::LoadFloatOperand(masm, edx);
- __ FCmp();
+ Label non_number_comparison;
+ Label unordered;
+ __ bind(&generic_heap_number_comparison);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ CpuFeatureScope use_cmov(masm, CMOV);
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
+ FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
+ __ ucomisd(xmm0, xmm1);
- Label below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, Label::kNear);
- __ j(above, &above_label, Label::kNear);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ mov(eax, 0); // equal
+ __ mov(ecx, Immediate(Smi::FromInt(1)));
+ __ cmov(above, eax, ecx);
+ __ mov(ecx, Immediate(Smi::FromInt(-1)));
+ __ cmov(below, eax, ecx);
+ __ ret(0);
+ } else {
+ FloatingPointHelper::CheckFloatOperands(
+ masm, &non_number_comparison, ebx);
+ FloatingPointHelper::LoadFloatOperand(masm, eax);
+ FloatingPointHelper::LoadFloatOperand(masm, edx);
+ __ FCmp();
- __ Set(eax, Immediate(0));
- __ ret(0);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
- __ bind(&below_label);
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(0);
+ Label below_label, above_label;
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ j(below, &below_label, Label::kNear);
+ __ j(above, &above_label, Label::kNear);
- __ bind(&above_label);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ ret(0);
- }
+ __ Set(eax, Immediate(0));
+ __ ret(0);
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- }
+ __ bind(&below_label);
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
__ ret(0);
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
+ __ bind(&above_label);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ __ ret(0);
}
- // Fast negative check for symbol-to-symbol equality.
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc != not_equal);
+ if (cc == less || cc == less_equal) {
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ }
+ __ ret(0);
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
+
+ // Fast negative check for internalized-to-internalized equality.
Label check_for_strings;
- if (cc_ == equal) {
- BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
- BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
+ if (cc == equal) {
+ BranchIfNotInternalizedString(masm, &check_for_strings, eax, ecx);
+ BranchIfNotInternalizedString(masm, &check_for_strings, edx, ecx);
// We've already checked for object identity, so if both operands
- // are symbols they aren't equal. Register eax already holds a
+ // are internalized they aren't equal. Register eax already holds a
// non-zero value, which indicates not equal, so just return.
__ ret(0);
}
@@ -4574,7 +2593,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
&check_unequal_objects);
// Inline comparison of ASCII strings.
- if (cc_ == equal) {
+ if (cc == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
edx,
eax,
@@ -4589,11 +2608,11 @@ void CompareStub::Generate(MacroAssembler* masm) {
edi);
}
#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
+ __ Abort(kUnexpectedFallThroughFromStringComparison);
#endif
__ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
+ if (cc == equal && !strict()) {
// Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
@@ -4637,11 +2656,11 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == equal) {
+ builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
builtin = Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
}
// Restore return address on the stack.
@@ -4650,29 +2669,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
- __ cmp(scratch, kSymbolTag | kStringTag);
- __ j(not_equal, label);
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -4680,34 +2679,84 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // eax : number of arguments to the construct function
// ebx : cache cell for call target
// edi : the function to call
Isolate* isolate = masm->isolate();
- Label initialize, done;
+ Label initialize, done, miss, megamorphic, not_array_function;
// Load the cache state into ecx.
- __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+ __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmp(ecx, edi);
- __ j(equal, &done, Label::kNear);
+ __ j(equal, &done);
__ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ j(equal, &done, Label::kNear);
+ __ j(equal, &done);
+
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the cell either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ j(not_equal, &miss);
+
+ // Load the global or builtins object from the current context
+ __ LoadGlobalContext(ecx);
+ // Make sure the function is the Array() function
+ __ cmp(edi, Operand(ecx,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done);
+
+ __ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
- __ j(equal, &initialize, Label::kNear);
+ __ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ __ bind(&megamorphic);
+ __ mov(FieldOperand(ebx, Cell::kValueOffset),
Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
__ jmp(&done, Label::kNear);
- // An uninitialized cache is patched with the function.
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
+ __ LoadGlobalContext(ecx);
+ // Make sure the function is the Array() function
+ __ cmp(edi, Operand(ecx,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ __ j(not_equal, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the cell
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(eax);
+ __ push(eax);
+ __ push(edi);
+ __ push(ebx);
+
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
+
+ __ pop(ebx);
+ __ pop(edi);
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
+ __ jmp(&done);
+
+ __ bind(&not_array_function);
+ __ mov(FieldOperand(ebx, Cell::kValueOffset), edi);
// No need for a write barrier here - cells are rescanned.
__ bind(&done);
@@ -4774,7 +2823,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
// object (undefined) so no write barrier is needed.
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ __ mov(FieldOperand(ebx, Cell::kValueOffset),
Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
}
// Check for function proxy.
@@ -4822,10 +2871,12 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
// Jump to the function-specific construct stub.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
+ Register jmp_reg = ecx;
+ __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(jmp_reg, FieldOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
+ __ jmp(jmp_reg);
// edi: called object
// eax: number of arguments
@@ -4854,35 +2905,64 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+bool CEntryStub::IsPregenerated(Isolate* isolate) {
+ return (!save_doubles_ || isolate->fp_stubs_generated()) &&
result_size_ == 1;
}
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ if (Serializer::enabled()) {
+ PlatformFeatureScope sse2(SSE2);
+ BinaryOpStub::GenerateAheadOfTime(isolate);
+ } else {
+ BinaryOpStub::GenerateAheadOfTime(isolate);
+ }
}
-void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CEntryStub save_doubles(1, kSaveFPRegs);
+ // Stubs might already be in the snapshot, detect that and don't regenerate,
+ // which would lead to code stub initialization state being messed up.
+ Code* save_doubles_code;
+ if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
+ save_doubles_code = *(save_doubles.GetCode(isolate));
+ }
+ save_doubles_code->set_is_pregenerated(true);
+ isolate->set_fp_stubs_generated(true);
+ }
}
-void CEntryStub::GenerateAheadOfTime() {
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode();
+ Handle<Code> code = stub.GetCode(isolate);
code->set_is_pregenerated(true);
}
+static void JumpIfOOM(MacroAssembler* masm,
+ Register value,
+ Register scratch,
+ Label* oom_label) {
+ __ mov(scratch, value);
+ STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
+ STATIC_ASSERT(kFailureTag == 3);
+ __ and_(scratch, 0xf);
+ __ cmp(scratch, 0xf);
+ __ j(equal, oom_label);
+}
+
+
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -4909,6 +2989,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// stack alignment is known to be correct. This function takes one argument
// which is passed on the stack, and we know that the stack has been
// prepared to pass at least one argument.
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
__ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
}
@@ -4923,7 +3005,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
__ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
__ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
__ call(ebx);
// Result is in eax or edx:eax - do not destroy these registers!
@@ -4931,8 +3013,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ dec(Operand::StaticVariable(scope_depth));
}
- // Make sure we're not trying to return 'the hole' from the runtime
- // call as this may lead to crashes in the IC code later.
+ // Runtime functions should not return 'the hole'. Allowing it to escape may
+ // lead to crashes in the IC code later.
if (FLAG_debug_code) {
Label okay;
__ cmp(eax, masm->isolate()->factory()->the_hole_value());
@@ -4980,11 +3062,15 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &retry, Label::kNear);
// Special handling of out of memory exceptions.
- __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
- __ j(equal, throw_out_of_memory_exception);
+ JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
- // Retrieve the pending exception and clear the variable.
+ // Retrieve the pending exception.
__ mov(eax, Operand::StaticVariable(pending_exception_address));
+
+ // See if we just retrieved an OOM exception.
+ JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
+
+ // Clear the pending exception.
__ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
__ mov(Operand::StaticVariable(pending_exception_address), edx);
@@ -5009,6 +3095,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// esi: current context (C callee-saved)
// edi: JS function of the caller (C callee-saved)
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// NOTE: Invocations of builtins may return failure objects instead
// of a proper result. The builtin entry handles this by performing
// a garbage collection and retrying the builtin (twice).
@@ -5063,7 +3151,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Set pending exception and eax to out of memory exception.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
isolate);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+ Label already_have_failure;
+ JumpIfOOM(masm, eax, ecx, &already_have_failure);
+ __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException(0x1)));
+ __ bind(&already_have_failure);
__ mov(Operand::StaticVariable(pending_exception), eax);
// Fall through to the next label.
@@ -5079,6 +3170,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Set up frame.
__ push(ebp);
__ mov(ebp, esp);
@@ -5212,9 +3305,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
-
ASSERT_EQ(object.code(), InstanceofStub::left().code());
ASSERT_EQ(function.code(), InstanceofStub::right().code());
@@ -5234,18 +3324,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
if (!HasCallSiteInlineCheck()) {
// Look up the function and the map in the instanceof cache.
Label miss;
- __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ cmp(function, Operand::StaticArray(scratch,
- times_pointer_size,
- roots_array_start));
+ __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
__ j(not_equal, &miss, Label::kNear);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ cmp(map, Operand::StaticArray(
- scratch, times_pointer_size, roots_array_start));
+ __ CompareRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
__ j(not_equal, &miss, Label::kNear);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(eax, Operand::StaticArray(
- scratch, times_pointer_size, roots_array_start));
+ __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&miss);
}
@@ -5260,12 +3343,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Update the global instanceof or call site inlined cache with the current
// map and function. The cached answer will be set when it is known below.
if (!HasCallSiteInlineCheck()) {
- __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
- map);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
- function);
+ __ StoreRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
+ __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
} else {
// The constants for the code patching are based on no push instructions
// at the call site.
@@ -5275,9 +3354,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
__ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
- __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp1);
__ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
- __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp2);
}
__ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
__ mov(Operand(scratch, 0), map);
@@ -5299,10 +3378,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&is_instance);
if (!HasCallSiteInlineCheck()) {
- __ Set(eax, Immediate(0));
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(scratch,
- times_pointer_size, roots_array_start), eax);
+ __ mov(eax, Immediate(0));
+ __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
} else {
// Get return address and delta to inlined map check.
__ mov(eax, factory->true_value());
@@ -5310,7 +3387,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
__ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
}
__ mov(Operand(scratch, kDeltaToMovImmediate), eax);
if (!ReturnTrueFalseObject()) {
@@ -5321,10 +3398,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&is_not_instance);
if (!HasCallSiteInlineCheck()) {
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(
- scratch, times_pointer_size, roots_array_start), eax);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
} else {
// Get return address and delta to inlined map check.
__ mov(eax, factory->false_value());
@@ -5332,7 +3407,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
__ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
}
__ mov(Operand(scratch, kDeltaToMovImmediate), eax);
if (!ReturnTrueFalseObject()) {
@@ -5407,44 +3482,6 @@ Register InstanceofStub::left() { return eax; }
Register InstanceofStub::right() { return edx; }
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == equal || cc_ == not_equal;
- stream->Add("CompareStub_%s", cc_name);
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -5483,7 +3520,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
// Index is not a smi.
__ bind(&index_not_smi_);
@@ -5533,7 +3570,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
@@ -5544,10 +3581,10 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
__ test(code_,
Immediate(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
__ j(not_zero, &slow_case_);
Factory* factory = masm->isolate()->factory();
@@ -5568,7 +3605,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
@@ -5580,24 +3617,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
@@ -5610,7 +3630,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
+ // Otherwise, at least one of the arguments is definitely a string,
+ // and we convert the one that is not known to be a string.
+ if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
__ JumpIfSmi(eax, &call_runtime);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
__ j(above_equal, &call_runtime);
@@ -5619,20 +3643,16 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(edx, &call_runtime);
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
__ j(above_equal, &call_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
+ } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
+ GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
+ GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
}
// Both arguments are strings.
@@ -5670,8 +3690,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
// Handle exceptionally long strings in the runtime system.
__ j(overflow, &call_runtime);
- // Use the symbol table when adding two one character strings, as it
- // helps later optimizations to return a symbol here.
+ // Use the string table when adding two one character strings, as it
+ // helps later optimizations to return an internalized string here.
__ cmp(ebx, Immediate(Smi::FromInt(2)));
__ j(not_equal, &longer_than_two);
@@ -5679,13 +3699,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
// Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
+ __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
- // Try to lookup two character string in symbol table. If it is not found
+ // Try to lookup two character string in string table. If it is not found
// just allocate a new one.
Label make_two_character_string, make_two_character_string_no_reload;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ StringHelper::GenerateTwoCharacterStringTableProbe(
masm, ebx, ecx, eax, edx, edi,
&make_two_character_string_no_reload, &make_two_character_string);
__ IncrementCounter(counters->string_add_native(), 1);
@@ -5697,8 +3717,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
__ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
// Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
+ __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
__ bind(&make_two_character_string_no_reload);
__ IncrementCounter(counters->string_add_make_two_char(), 1);
__ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
@@ -5706,7 +3726,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ shl(ecx, kBitsPerByte);
__ or_(ebx, ecx);
// Set the characters in the new string.
- __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
+ __ mov_w(FieldOperand(eax, SeqOneByteString::kHeaderSize), ebx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -5723,7 +3743,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
__ and_(ecx, edi);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(ecx, Immediate(kStringEncodingMask));
__ j(zero, &non_ascii);
@@ -5736,24 +3756,49 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
__ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
+
+ Label skip_write_barrier, after_writing;
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(masm->isolate());
+ __ test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
+ __ j(zero, &skip_write_barrier);
+
+ __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
+ __ RecordWriteField(ecx,
+ ConsString::kFirstOffset,
+ eax,
+ ebx,
+ kDontSaveFPRegs);
+ __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
+ __ RecordWriteField(ecx,
+ ConsString::kSecondOffset,
+ edx,
+ ebx,
+ kDontSaveFPRegs);
+ __ jmp(&after_writing);
+
+ __ bind(&skip_write_barrier);
__ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
__ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
+
+ __ bind(&after_writing);
+
__ mov(eax, ecx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
- // to contain only ASCII characters.
+ // to contain only one byte characters.
// ecx: first instance type AND second instance type.
// edi: second instance type.
- __ test(ecx, Immediate(kAsciiDataHintMask));
+ __ test(ecx, Immediate(kOneByteDataHintMask));
__ j(not_zero, &ascii_data);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ xor_(edi, ecx);
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
- __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
+ STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
+ __ and_(edi, kOneByteStringTag | kOneByteDataHintTag);
+ __ cmp(edi, kOneByteStringTag | kOneByteDataHintTag);
__ j(equal, &ascii_data);
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
@@ -5780,10 +3825,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ test_b(ecx, kShortExternalStringMask);
__ j(not_zero, &call_runtime);
__ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ jmp(&first_prepared, Label::kNear);
__ bind(&first_is_sequential);
- __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(eax, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&first_prepared);
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
@@ -5801,10 +3846,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ test_b(edi, kShortExternalStringMask);
__ j(not_zero, &call_runtime);
__ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ jmp(&second_prepared, Label::kNear);
__ bind(&second_is_sequential);
- __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(edx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&second_prepared);
// Push the addresses of both strings' first characters onto the stack.
@@ -5825,7 +3870,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// eax: result string
__ mov(ecx, eax);
// Locate first character of result.
- __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(ecx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Load first argument's length and first character location. Account for
// values currently on the stack when fetching arguments from it.
__ mov(edx, Operand(esp, 4 * kPointerSize));
@@ -5902,6 +3947,21 @@ void StringAddStub::Generate(MacroAssembler* masm) {
}
+void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ push(eax);
+ __ push(edx);
+}
+
+
+void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm,
+ Register temp) {
+ __ pop(temp);
+ __ pop(edx);
+ __ pop(eax);
+ __ push(temp);
+}
+
+
void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
int stack_offset,
Register arg,
@@ -5916,31 +3976,11 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
__ j(below, &done);
// Check the number to string cache.
- Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- false,
- &not_cached);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
__ mov(arg, scratch1);
__ mov(Operand(esp, stack_offset), arg);
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
- __ j(not_equal, slow);
- __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
- 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ j(zero, slow);
- __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
- __ mov(Operand(esp, stack_offset), arg);
-
__ bind(&done);
}
@@ -6031,7 +4071,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
}
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -6043,7 +4083,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register scratch = scratch3;
// Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
+ // different hash algorithm. Don't try to look for these in the string table.
Label not_array_index;
__ mov(scratch, c1);
__ sub(scratch, Immediate(static_cast<int>('0')));
@@ -6069,47 +4109,43 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string.
- // Load the symbol table.
- Register symbol_table = c2;
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
- __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
- __ mov(symbol_table,
- Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
+ // Load the string table.
+ Register string_table = c2;
+ __ LoadRoot(string_table, Heap::kStringTableRootIndex);
- // Calculate capacity mask from the symbol table capacity.
+ // Calculate capacity mask from the string table capacity.
Register mask = scratch2;
- __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ mov(mask, FieldOperand(string_table, StringTable::kCapacityOffset));
__ SmiUntag(mask);
__ sub(mask, Immediate(1));
// Registers
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string
- // symbol_table: symbol table
+ // string_table: string table
// mask: capacity mask
// scratch: -
- // Perform a number of probes in the symbol table.
+ // Perform a number of probes in the string table.
static const int kProbes = 4;
- Label found_in_symbol_table;
+ Label found_in_string_table;
Label next_probe[kProbes], next_probe_pop_mask[kProbes];
Register candidate = scratch; // Scratch register contains candidate.
for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
+ // Calculate entry in string table.
__ mov(scratch, hash);
if (i > 0) {
- __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
+ __ add(scratch, Immediate(StringTable::GetProbeOffset(i)));
}
__ and_(scratch, mask);
- // Load the entry from the symbol table.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ // Load the entry from the string table.
+ STATIC_ASSERT(StringTable::kEntrySize == 1);
__ mov(candidate,
- FieldOperand(symbol_table,
+ FieldOperand(string_table,
scratch,
times_pointer_size,
- SymbolTable::kElementsStartOffset));
+ StringTable::kElementsStartOffset));
// If entry is undefined no string with this hash can be found.
Factory* factory = masm->isolate()->factory();
@@ -6135,10 +4171,10 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
temp, temp, &next_probe_pop_mask[i]);
// Check if the two characters match.
- __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ mov(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
__ and_(temp, 0x0000ffff);
__ cmp(chars, temp);
- __ j(equal, &found_in_symbol_table);
+ __ j(equal, &found_in_string_table);
__ bind(&next_probe_pop_mask[i]);
__ pop(mask);
__ bind(&next_probe[i]);
@@ -6149,7 +4185,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Scratch register contains result when we fall through to here.
Register result = candidate;
- __ bind(&found_in_symbol_table);
+ __ bind(&found_in_string_table);
__ pop(mask); // Pop saved mask from the stack.
if (!result.is(eax)) {
__ mov(eax, result);
@@ -6163,12 +4199,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register scratch) {
// hash = (seed + character) + ((seed + character) << 10);
if (Serializer::enabled()) {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
- __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
- __ mov(scratch, Operand::StaticArray(scratch,
- times_pointer_size,
- roots_array_start));
+ __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
__ SmiUntag(scratch);
__ add(scratch, character);
__ mov(hash, scratch);
@@ -6267,6 +4298,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ ret(3 * kPointerSize);
__ bind(&not_original_string);
+ Label single_char;
+ __ cmp(ecx, Immediate(Smi::FromInt(1)));
+ __ j(equal, &single_char);
+
// eax: string
// ebx: instance type
// ecx: sub string length (smi)
@@ -6324,7 +4359,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(ebx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_slice, Label::kNear);
@@ -6363,7 +4398,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &runtime);
__ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
@@ -6371,7 +4406,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ push(edx);
__ push(edi);
__ SmiUntag(ecx);
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ test_b(ebx, kStringEncodingMask);
__ j(zero, &two_byte_sequential);
@@ -6383,12 +4418,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(edx, esi); // esi used by following code.
// Locate first character of result.
__ mov(edi, eax);
- __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
__ pop(esi);
__ pop(ebx);
__ SmiUntag(ebx);
- __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(esi, FieldOperand(esi, ebx, times_1, SeqOneByteString::kHeaderSize));
// eax: result string
// ecx: result length
@@ -6437,6 +4472,17 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+ __ bind(&single_char);
+ // eax: string
+ // ebx: instance type
+ // ecx: sub string length (smi)
+ // edx: from index (smi)
+ StringCharAtGenerator generator(
+ eax, edx, ecx, eax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ ret(3 * kPointerSize);
+ generator.SkipSlow(masm, &runtime);
}
@@ -6513,7 +4559,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
__ test(length_delta, length_delta);
- __ j(not_zero, &result_not_equal, Label::kNear);
+ Label length_not_equal;
+ __ j(not_zero, &length_not_equal, Label::kNear);
// Result is EQUAL.
STATIC_ASSERT(EQUAL == 0);
@@ -6522,8 +4569,13 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ ret(0);
Label result_greater;
- __ bind(&result_not_equal);
+ Label result_less;
+ __ bind(&length_not_equal);
__ j(greater, &result_greater, Label::kNear);
+ __ jmp(&result_less, Label::kNear);
+ __ bind(&result_not_equal);
+ __ j(above, &result_greater, Label::kNear);
+ __ bind(&result_less);
// Result is LESS.
__ Set(eax, Immediate(Smi::FromInt(LESS)));
@@ -6549,9 +4601,9 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiUntag(length);
__ lea(left,
- FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+ FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
__ lea(right,
- FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+ FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
__ neg(length);
Register index = length; // index = -length;
@@ -6606,7 +4658,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ mov(ecx, edx);
__ or_(ecx, eax);
@@ -6631,32 +4683,53 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
- __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &maybe_undefined2, Label::kNear);
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(edx, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(eax, &miss);
+ }
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or SS2 or CMOV is unsupported.
+ // stub if NaN is involved or SSE2 or CMOV is unsupported.
if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
- CpuFeatures::Scope scope1(SSE2);
- CpuFeatures::Scope scope2(CMOV);
+ CpuFeatureScope scope1(masm, SSE2);
+ CpuFeatureScope scope2(masm, CMOV);
- // Load left and right operand
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(eax, &right_smi, Label::kNear);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ jmp(&left, Label::kNear);
+ __ bind(&right_smi);
+ __ mov(ecx, eax); // Can't clobber eax because we can still jump away.
+ __ SmiUntag(ecx);
+ __ Cvtsi2sd(xmm1, ecx);
+
+ __ bind(&left);
+ __ JumpIfSmi(edx, &left_smi, Label::kNear);
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ jmp(&done);
+ __ bind(&left_smi);
+ __ mov(ecx, edx); // Can't clobber edx because we can still jump away.
+ __ SmiUntag(ecx);
+ __ Cvtsi2sd(xmm0, ecx);
- // Compare operands
+ __ bind(&done);
+ // Compare operands.
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
@@ -6670,17 +4743,30 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, ecx);
__ ret(0);
+ } else {
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
+ __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
+
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
}
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
__ j(not_equal, &miss);
+ __ JumpIfSmi(edx, &unordered);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ jmp(&unordered);
@@ -6697,8 +4783,53 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+ ASSERT(GetCondition() == equal);
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+ Register tmp1 = ecx;
+ Register tmp2 = ebx;
+
+ // Check that both operands are heap objects.
+ Label miss;
+ __ mov(tmp1, left);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ and_(tmp1, right);
+ __ JumpIfSmi(tmp1, &miss, Label::kNear);
+
+ // Check that both operands are internalized strings.
+ __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ or_(tmp1, tmp2);
+ __ test(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ __ j(not_zero, &miss, Label::kNear);
+
+ // Internalized strings are compared by identity.
+ Label done;
+ __ cmp(left, right);
+ // Make sure eax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(eax));
+ __ j(not_equal, &done, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ bind(&done);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
ASSERT(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -6714,17 +4845,17 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
__ and_(tmp1, right);
__ JumpIfSmi(tmp1, &miss, Label::kNear);
- // Check that both operands are symbols.
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
__ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp1, tmp2);
- __ test(tmp1, Immediate(kIsSymbolMask));
- __ j(zero, &miss, Label::kNear);
- // Symbols are compared by identity.
+ __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
+ __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
+
+ // Unique names are compared by identity.
Label done;
__ cmp(left, right);
// Make sure eax is non-zero. At this point input operands are
@@ -6743,7 +4874,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -6785,15 +4916,16 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle not identical strings.
__ bind(&not_same);
- // Check that both strings are symbols. If they are, we're done
+ // Check that both strings are internalized. If they are, we're done
// because we already know they are not identical. But in the case of
- // non-equality compare, we still need to determine the order.
+ // non-equality compare, we still need to determine the order. We
+ // also know they are both strings.
if (equality) {
Label do_compare;
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp1, tmp2);
- __ test(tmp1, Immediate(kIsSymbolMask));
- __ j(zero, &do_compare, Label::kNear);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ or_(tmp1, tmp2);
+ __ test(tmp1, Immediate(kIsNotInternalizedMask));
+ __ j(not_zero, &do_compare, Label::kNear);
// Make sure eax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(eax));
@@ -6832,7 +4964,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
@@ -6900,14 +5032,14 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// the property. This function may return false negatives, so miss_label
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<String> name,
- Register r0) {
- ASSERT(name->IsSymbol());
+// Name must be a unique name and receiver must be a heap object.
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ Handle<Name> name,
+ Register r0) {
+ ASSERT(name->IsUniqueName());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
@@ -6922,10 +5054,10 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ dec(index);
__ and_(index,
Immediate(Smi::FromInt(name->Hash() +
- StringDictionary::GetProbeOffset(i))));
+ NameDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
__ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
Register entity_name = r0;
// Having undefined at this place means the name is not contained.
@@ -6936,26 +5068,22 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ j(equal, done);
// Stop if found the property.
- __ cmp(entity_name, Handle<String>(name));
+ __ cmp(entity_name, Handle<Name>(name));
__ j(equal, miss);
- Label the_hole;
+ Label good;
// Check for the hole and skip.
__ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
- __ j(equal, &the_hole, Label::kNear);
+ __ j(equal, &good, Label::kNear);
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not a unique name.
__ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- kIsSymbolMask);
- __ j(zero, miss);
- __ bind(&the_hole);
+ __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+ miss);
+ __ bind(&good);
}
- StringDictionaryLookupStub stub(properties,
- r0,
- r0,
- StringDictionaryLookupStub::NEGATIVE_LOOKUP);
+ NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP);
__ push(Immediate(Handle<Object>(name)));
__ push(Immediate(name->Hash()));
__ CallStub(&stub);
@@ -6965,23 +5093,23 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
}
-// Probe the string dictionary in the |elements| register. Jump to the
+// Probe the name dictionary in the |elements| register. Jump to the
// |done| label if a property with the given name is found leaving the
// index into the dictionary in |r0|. Jump to the |miss| label
// otherwise.
-void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1) {
+void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1) {
ASSERT(!elements.is(r0));
ASSERT(!elements.is(r1));
ASSERT(!name.is(r0));
ASSERT(!name.is(r1));
- __ AssertString(name);
+ __ AssertName(name);
__ mov(r1, FieldOperand(elements, kCapacityOffset));
__ shr(r1, kSmiTagSize); // convert smi to int
@@ -6992,15 +5120,15 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
// cover ~93% of loads from dictionaries.
for (int i = 0; i < kInlinedProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
- __ shr(r0, String::kHashShift);
+ __ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
+ __ shr(r0, Name::kHashShift);
if (i > 0) {
- __ add(r0, Immediate(StringDictionary::GetProbeOffset(i)));
+ __ add(r0, Immediate(NameDictionary::GetProbeOffset(i)));
}
__ and_(r0, r1);
// Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
__ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
// Check if the key is identical to the name.
@@ -7011,13 +5139,10 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ j(equal, done);
}
- StringDictionaryLookupStub stub(elements,
- r1,
- r0,
- POSITIVE_LOOKUP);
+ NameDictionaryLookupStub stub(elements, r1, r0, POSITIVE_LOOKUP);
__ push(name);
- __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
- __ shr(r0, String::kHashShift);
+ __ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
+ __ shr(r0, Name::kHashShift);
__ push(r0);
__ CallStub(&stub);
@@ -7027,7 +5152,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
}
-void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// Stack frame on entry:
@@ -7035,7 +5160,7 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
// esp[1 * kPointerSize]: key's hash.
// esp[2 * kPointerSize]: key.
// Registers:
- // dictionary_: StringDictionary to probe.
+ // dictionary_: NameDictionary to probe.
// result_: used as scratch.
// index_: will hold an index of entry if lookup is successful.
// might alias with result_.
@@ -7060,12 +5185,12 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Compute the masked index: (hash + i + i * i) & mask.
__ mov(scratch, Operand(esp, 2 * kPointerSize));
if (i > 0) {
- __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
+ __ add(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
}
__ and_(scratch, Operand(esp, 0));
// Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
__ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
@@ -7082,15 +5207,14 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ j(equal, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // If we hit a non symbol key during negative lookup
- // we have to bailout as this key might be equal to the
+ // If we hit a key that is not a unique name during negative
+ // lookup we have to bailout as this key might be equal to the
// key we are looking for.
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not a unique name.
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
- kIsSymbolMask);
- __ j(zero, &maybe_in_dictionary);
+ __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ &maybe_in_dictionary);
}
}
@@ -7130,8 +5254,6 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in CompileArrayPushCall.
{ REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
{ REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal and CallFunctionStub.
- { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField and
// KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
{ REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
@@ -7155,15 +5277,17 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
{ REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub
+ // FastNewClosureStub and StringAddStub::Generate
{ REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET},
+ // StringAddStub::Generate
+ { REG(ecx), REG(eax), REG(ebx), EMIT_REMEMBERED_SET},
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
#undef REG
-bool RecordWriteStub::IsPregenerated() {
+bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -7179,19 +5303,18 @@ bool RecordWriteStub::IsPregenerated() {
}
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
-
- CpuFeatures::TryForceFeatureScope scope(SSE2);
- if (CpuFeatures::IsSupported(SSE2)) {
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
+ StoreBufferOverflowStub stub(kDontSaveFPRegs);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode()->set_is_pregenerated(true);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
}
}
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -7200,7 +5323,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
entry->address,
entry->action,
kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
}
}
@@ -7297,15 +5420,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
__ mov(Operand(esp, 0 * kPointerSize), regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
- } else {
- ASSERT(mode == INCREMENTAL);
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
- __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0()); // Value.
- }
+ __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
__ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
if (mode == INCREMENTAL_COMPACTION) {
@@ -7420,11 +5537,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : element value to store
- // -- ebx : array literal
- // -- edi : map of array literal
// -- ecx : element index as smi
- // -- edx : array literal index in function
// -- esp[0] : return address
+ // -- esp[4] : array literal index in function
+ // -- esp[8] : array literal
+ // clobbers ebx, edx, edi
// -----------------------------------
Label element_done;
@@ -7434,6 +5551,11 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label slow_elements_from_double;
Label fast_elements;
+ // Get array literal index, array literal and its map.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
+
__ CheckFastElements(edi, &double_elements);
// Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
@@ -7498,8 +5620,28 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
}
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+ __ call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ mov(ebx, MemOperand(ebp, parameter_count_offset));
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ pop(ecx);
+ int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
+ ? kPointerSize
+ : 0;
+ __ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset));
+ __ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ // It's always safe to call the entry hook stub, as the hook itself
+ // is not allowed to call back to V8.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
ProfileEntryHookStub stub;
masm->CallStub(&stub);
}
@@ -7507,28 +5649,360 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // Ecx is the only volatile register we must save.
+ // Save volatile registers.
+ const int kNumSavedRegisters = 3;
+ __ push(eax);
__ push(ecx);
+ __ push(edx);
// Calculate and push the original stack pointer.
- __ lea(eax, Operand(esp, kPointerSize));
+ __ lea(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
__ push(eax);
- // Calculate and push the function address.
- __ mov(eax, Operand(eax, 0));
+ // Retrieve our return address and use it to calculate the calling
+ // function's address.
+ __ mov(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
__ sub(eax, Immediate(Assembler::kCallInstructionLength));
__ push(eax);
// Call the entry hook.
- int32_t hook_location = reinterpret_cast<int32_t>(&entry_hook_);
- __ call(Operand(hook_location, RelocInfo::NONE));
+ ASSERT(masm->isolate()->function_entry_hook() != NULL);
+ __ call(FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
+ RelocInfo::RUNTIME_ENTRY);
__ add(esp, Immediate(2 * kPointerSize));
// Restore ecx.
+ __ pop(edx);
__ pop(ecx);
+ __ pop(eax);
+
__ ret(0);
}
+
+template<class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(),
+ CONTEXT_CHECK_REQUIRED,
+ mode);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(edx, kind);
+ __ j(not_equal, &next);
+ T stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // ebx - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // edx - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // eax - number of arguments
+ // edi - constructor?
+ // esp[0] - return address
+ // esp[4] - last argument
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ test_b(edx, 1);
+ __ j(not_zero, &normal_sequence);
+ }
+
+ // look at the first argument
+ __ mov(ecx, Operand(esp, kPointerSize));
+ __ test(ecx, ecx);
+ __ j(zero, &normal_sequence);
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry.
+ __ inc(edx);
+ __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
+ if (FLAG_debug_code) {
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ Assert(equal, kExpectedAllocationSiteInCell);
+ }
+
+ // Save the resulting elements kind in type info
+ __ SmiTag(edx);
+ __ mov(FieldOperand(ecx, AllocationSite::kTransitionInfoOffset), edx);
+ __ SmiUntag(edx);
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(edx, kind);
+ __ j(not_equal, &next);
+ ArraySingleArgumentConstructorStub stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+template<class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ ElementsKind initial_kind = GetInitialFastElementsKind();
+ ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
+
+ int to_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(kind);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
+ (!FLAG_track_allocation_sites &&
+ (kind == initial_kind || kind == initial_holey_kind))) {
+ T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ }
+ }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
+ stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
+ stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
+ stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ test(eax, eax);
+ __ j(not_zero, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc (only if argument_count_ == ANY)
+ // -- ebx : type info cell
+ // -- edi : constructor
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ CmpObjectType(ecx, MAP_TYPE, ecx);
+ __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+
+ // We should either have undefined in ebx or a valid cell
+ Label okay_here;
+ Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
+ __ cmp(ebx, Immediate(undefined_sentinel));
+ __ j(equal, &okay_here);
+ __ cmp(FieldOperand(ebx, 0), Immediate(cell_map));
+ __ Assert(equal, kExpectedPropertyCellInRegisterEbx);
+ __ bind(&okay_here);
+ }
+
+ Label no_info;
+ // If the type cell is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
+ __ cmp(ebx, Immediate(undefined_sentinel));
+ __ j(equal, &no_info);
+ __ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
+ __ cmp(FieldOperand(edx, 0), Immediate(
+ masm->isolate()->factory()->allocation_site_map()));
+ __ j(not_equal, &no_info);
+
+ __ mov(edx, FieldOperand(edx, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(edx);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label not_zero_case, not_one_case;
+ Label normal_sequence;
+
+ __ test(eax, eax);
+ __ j(not_zero, &not_zero_case);
+ InternalArrayNoArgumentConstructorStub stub0(kind);
+ __ TailCallStub(&stub0);
+
+ __ bind(&not_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, &not_one_case);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ mov(ecx, Operand(esp, kPointerSize));
+ __ test(ecx, ecx);
+ __ j(zero, &normal_sequence);
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+ }
+
+ __ bind(&normal_sequence);
+ InternalArraySingleArgumentConstructorStub stub1(kind);
+ __ TailCallStub(&stub1);
+
+ __ bind(&not_one_case);
+ InternalArrayNArgumentsConstructorStub stubN(kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- ebx : type info cell
+ // -- edi : constructor
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ CmpObjectType(ecx, MAP_TYPE, ecx);
+ __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Figure out the right elements kind
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(ecx, Map::kElementsKindMask);
+ __ shr(ecx, Map::kElementsKindShift);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &done);
+ __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ Assert(equal,
+ kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index 803a711de..006651c9c 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -36,9 +36,13 @@ namespace v8 {
namespace internal {
+void ArrayNativeCode(MacroAssembler* masm,
+ bool construct_call,
+ Label* call_generic_code);
+
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0,
@@ -61,15 +65,17 @@ class TranscendentalCacheStub: public CodeStub {
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
+ : save_doubles_(save_fp) {
+ ASSERT(CpuFeatures::IsSafeForSnapshot(SSE2) || save_fp == kDontSaveFPRegs);
+ }
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated() { return true; }
- static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
@@ -80,170 +86,6 @@ class StoreBufferOverflowStub: public CodeStub {
};
-class UnaryOpStub: public CodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* undo,
- Label* slow,
- Label::Distance non_smi_near = Label::kFar,
- Label::Distance undo_near = Label::kFar,
- Label::Distance slow_near = Label::kFar);
- void GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near = Label::kFar);
- void GenerateSmiCodeUndo(MacroAssembler* masm);
-
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateHeapNumberStubSub(MacroAssembler* masm);
- void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- use_sse3_ = CpuFeatures::IsSupported(SSE3);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_sse3_(SSE3Bits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_sse3_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class SSE3Bits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | SSE3Bits::encode(use_sse3_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -267,15 +109,15 @@ class StringHelper : public AllStatic {
Register scratch, // Neither of above.
bool ascii);
- // Probe the symbol table for a two character string. If the string
+ // Probe the string table for a two character string. If the string
// requires non-standard hashing a jump to the label not_probed is
// performed and registers c1 and c2 are preserved. In all other
// cases they are clobbered. If the string is not found by probing a
// jump to the label not_found is performed. This jump does not
- // guarantee that the string is not in the symbol table. If the
+ // guarantee that the string is not in the string table. If the
// string is found the code falls through with the string in
// register eax.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -302,20 +144,7 @@ class StringHelper : public AllStatic {
};
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -333,11 +162,14 @@ class StringAddStub: public CodeStub {
Register scratch3,
Label* slow);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateRegisterArgsPop(MacroAssembler* masm, Register temp);
+
const StringAddFlags flags_;
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -349,7 +181,7 @@ class SubStringStub: public CodeStub {
};
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
@@ -385,39 +217,14 @@ class StringCompareStub: public CodeStub {
};
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringDictionaryLookupStub: public CodeStub {
+class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- StringDictionaryLookupStub(Register dictionary,
- Register result,
- Register index,
- LookupMode mode)
+ NameDictionaryLookupStub(Register dictionary,
+ Register result,
+ Register index,
+ LookupMode mode)
: dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
void Generate(MacroAssembler* masm);
@@ -426,7 +233,7 @@ class StringDictionaryLookupStub: public CodeStub {
Label* miss,
Label* done,
Register properties,
- Handle<String> name,
+ Handle<Name> name,
Register r0);
static void GeneratePositiveLookup(MacroAssembler* masm,
@@ -444,14 +251,14 @@ class StringDictionaryLookupStub: public CodeStub {
static const int kTotalProbes = 20;
static const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryLookup; }
+ Major MajorKey() { return NameDictionaryLookup; }
int MinorKey() {
return DictionaryBits::encode(dictionary_.code()) |
@@ -472,7 +279,7 @@ class StringDictionaryLookupStub: public CodeStub {
};
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
@@ -487,6 +294,7 @@ class RecordWriteStub: public CodeStub {
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
+ ASSERT(CpuFeatures::IsSafeForSnapshot(SSE2) || fp_mode == kDontSaveFPRegs);
}
enum Mode {
@@ -495,8 +303,8 @@ class RecordWriteStub: public CodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
@@ -630,13 +438,13 @@ class RecordWriteStub: public CodeStub {
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
+ CpuFeatureScope scope(masm, SSE2);
masm->sub(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
// Save all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
+ masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
}
}
}
@@ -644,11 +452,11 @@ class RecordWriteStub: public CodeStub {
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
+ CpuFeatureScope scope(masm, SSE2);
// Restore all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
+ masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
}
masm->add(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
@@ -675,7 +483,7 @@ class RecordWriteStub: public CodeStub {
Register GetRegThatIsNotEcxOr(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(ecx)) continue;
if (candidate.is(r1)) continue;
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index eb6868729..d09a85f8b 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "codegen.h"
#include "heap.h"
@@ -94,7 +94,45 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!CpuFeatures::IsSupported(SSE2)) return &exp;
+ if (!FLAG_fast_math) return &exp;
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &exp;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ // esp[1 * kPointerSize]: raw double input
+ // esp[0 * kPointerSize]: return address
+ {
+ CpuFeatureScope use_sse2(&masm, SSE2);
+ XMMRegister input = xmm1;
+ XMMRegister result = xmm2;
+ __ movsd(input, Operand(esp, 1 * kPointerSize));
+ __ push(eax);
+ __ push(ebx);
+
+ MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);
+
+ __ pop(ebx);
+ __ pop(eax);
+ __ movsd(Operand(esp, 1 * kPointerSize), result);
+ __ fld_d(Operand(esp, 1 * kPointerSize));
+ __ Ret();
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
@@ -116,10 +154,10 @@ UnaryMathFunction CreateSqrtFunction() {
// esp[0 * kPointerSize]: return address
// Move double input into registers.
{
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
+ CpuFeatureScope use_sse2(&masm, SSE2);
+ __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
__ sqrtsd(xmm0, xmm0);
- __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
+ __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
// Load result into floating point register as return value.
__ fld_d(Operand(esp, 1 * kPointerSize));
__ Ret();
@@ -127,7 +165,7 @@ UnaryMathFunction CreateSqrtFunction() {
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
@@ -135,21 +173,101 @@ UnaryMathFunction CreateSqrtFunction() {
}
-static void MemCopyWrapper(void* dest, const void* src, size_t size) {
- memcpy(dest, src, size);
+// Helper functions for CreateMemMoveFunction.
+#undef __
+#define __ ACCESS_MASM(masm)
+
+enum Direction { FORWARD, BACKWARD };
+enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
+
+// Expects registers:
+// esi - source, aligned if alignment == ALIGNED
+// edi - destination, always aligned
+// ecx - count (copy size in bytes)
+// edx - loop count (number of 64 byte chunks)
+void MemMoveEmitMainLoop(MacroAssembler* masm,
+ Label* move_last_15,
+ Direction direction,
+ Alignment alignment) {
+ Register src = esi;
+ Register dst = edi;
+ Register count = ecx;
+ Register loop_count = edx;
+ Label loop, move_last_31, move_last_63;
+ __ cmp(loop_count, 0);
+ __ j(equal, &move_last_63);
+ __ bind(&loop);
+ // Main loop. Copy in 64 byte chunks.
+ if (direction == BACKWARD) __ sub(src, Immediate(0x40));
+ __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
+ __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
+ __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
+ __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
+ if (direction == FORWARD) __ add(src, Immediate(0x40));
+ if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
+ __ movdqa(Operand(dst, 0x00), xmm0);
+ __ movdqa(Operand(dst, 0x10), xmm1);
+ __ movdqa(Operand(dst, 0x20), xmm2);
+ __ movdqa(Operand(dst, 0x30), xmm3);
+ if (direction == FORWARD) __ add(dst, Immediate(0x40));
+ __ dec(loop_count);
+ __ j(not_zero, &loop);
+ // At most 63 bytes left to copy.
+ __ bind(&move_last_63);
+ __ test(count, Immediate(0x20));
+ __ j(zero, &move_last_31);
+ if (direction == BACKWARD) __ sub(src, Immediate(0x20));
+ __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
+ __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
+ if (direction == FORWARD) __ add(src, Immediate(0x20));
+ if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
+ __ movdqa(Operand(dst, 0x00), xmm0);
+ __ movdqa(Operand(dst, 0x10), xmm1);
+ if (direction == FORWARD) __ add(dst, Immediate(0x20));
+ // At most 31 bytes left to copy.
+ __ bind(&move_last_31);
+ __ test(count, Immediate(0x10));
+ __ j(zero, move_last_15);
+ if (direction == BACKWARD) __ sub(src, Immediate(0x10));
+ __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
+ if (direction == FORWARD) __ add(src, Immediate(0x10));
+ if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
+ __ movdqa(Operand(dst, 0), xmm0);
+ if (direction == FORWARD) __ add(dst, Immediate(0x10));
+}
+
+
+void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
+ __ pop(esi);
+ __ pop(edi);
+ __ ret(0);
}
-OS::MemCopyFunction CreateMemCopyFunction() {
+#undef __
+#define __ masm.
+
+
+class LabelConverter {
+ public:
+ explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
+ int32_t address(Label* l) const {
+ return reinterpret_cast<int32_t>(buffer_) + l->pos();
+ }
+ private:
+ byte* buffer_;
+};
+
+
+OS::MemMoveFunction CreateMemMoveFunction() {
size_t actual_size;
// Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) return &MemCopyWrapper;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return NULL;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ LabelConverter conv(buffer);
- // Generated code is put into a fixed, unmovable, buffer, and not into
+ // Generated code is put into a fixed, unmovable buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
// (e.g. the JavaScript nan-object).
@@ -165,185 +283,369 @@ OS::MemCopyFunction CreateMemCopyFunction() {
const int kSourceOffset = 2 * kPointerSize;
const int kSizeOffset = 3 * kPointerSize;
+ // When copying up to this many bytes, use special "small" handlers.
+ const size_t kSmallCopySize = 8;
+ // When copying up to this many bytes, use special "medium" handlers.
+ const size_t kMediumCopySize = 63;
+ // When non-overlapping region of src and dst is less than this,
+ // use a more careful implementation (slightly slower).
+ const size_t kMinMoveDistance = 16;
+ // Note that these values are dictated by the implementation below,
+ // do not just change them and hope things will work!
+
int stack_offset = 0; // Update if we change the stack height.
- if (FLAG_debug_code) {
- __ cmp(Operand(esp, kSizeOffset + stack_offset),
- Immediate(OS::kMinComplexMemCopy));
- Label ok;
- __ j(greater_equal, &ok);
- __ int3();
- __ bind(&ok);
- }
+ Label backward, backward_much_overlap;
+ Label forward_much_overlap, small_size, medium_size, pop_and_return;
+ __ push(edi);
+ __ push(esi);
+ stack_offset += 2 * kPointerSize;
+ Register dst = edi;
+ Register src = esi;
+ Register count = ecx;
+ Register loop_count = edx;
+ __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
+ __ mov(src, Operand(esp, stack_offset + kSourceOffset));
+ __ mov(count, Operand(esp, stack_offset + kSizeOffset));
+
+ __ cmp(dst, src);
+ __ j(equal, &pop_and_return);
+
if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope enable(SSE2);
- __ push(edi);
- __ push(esi);
- stack_offset += 2 * kPointerSize;
- Register dst = edi;
- Register src = esi;
- Register count = ecx;
- __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
- __ mov(src, Operand(esp, stack_offset + kSourceOffset));
- __ mov(count, Operand(esp, stack_offset + kSizeOffset));
-
-
- __ movdqu(xmm0, Operand(src, 0));
- __ movdqu(Operand(dst, 0), xmm0);
- __ mov(edx, dst);
- __ and_(edx, 0xF);
- __ neg(edx);
- __ add(edx, Immediate(16));
- __ add(dst, edx);
- __ add(src, edx);
- __ sub(count, edx);
-
- // edi is now aligned. Check if esi is also aligned.
- Label unaligned_source;
- __ test(src, Immediate(0x0F));
- __ j(not_zero, &unaligned_source);
+ CpuFeatureScope sse2_scope(&masm, SSE2);
+ __ prefetch(Operand(src, 0), 1);
+ __ cmp(count, kSmallCopySize);
+ __ j(below_equal, &small_size);
+ __ cmp(count, kMediumCopySize);
+ __ j(below_equal, &medium_size);
+ __ cmp(dst, src);
+ __ j(above, &backward);
+
{
+ // |dst| is a lower address than |src|. Copy front-to-back.
+ Label unaligned_source, move_last_15, skip_last_move;
+ __ mov(eax, src);
+ __ sub(eax, dst);
+ __ cmp(eax, kMinMoveDistance);
+ __ j(below, &forward_much_overlap);
+ // Copy first 16 bytes.
+ __ movdqu(xmm0, Operand(src, 0));
+ __ movdqu(Operand(dst, 0), xmm0);
+ // Determine distance to alignment: 16 - (dst & 0xF).
+ __ mov(edx, dst);
+ __ and_(edx, 0xF);
+ __ neg(edx);
+ __ add(edx, Immediate(16));
+ __ add(dst, edx);
+ __ add(src, edx);
+ __ sub(count, edx);
+ // dst is now aligned. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ // Check if src is also aligned.
+ __ test(src, Immediate(0xF));
+ __ j(not_zero, &unaligned_source);
// Copy loop for aligned source and destination.
- __ mov(edx, count);
- Register loop_count = ecx;
- Register count = edx;
- __ shr(loop_count, 5);
- {
- // Main copy loop.
- Label loop;
- __ bind(&loop);
- __ prefetch(Operand(src, 0x20), 1);
- __ movdqa(xmm0, Operand(src, 0x00));
- __ movdqa(xmm1, Operand(src, 0x10));
- __ add(src, Immediate(0x20));
-
- __ movdqa(Operand(dst, 0x00), xmm0);
- __ movdqa(Operand(dst, 0x10), xmm1);
- __ add(dst, Immediate(0x20));
-
- __ dec(loop_count);
- __ j(not_zero, &loop);
- }
-
- // At most 31 bytes to copy.
- Label move_less_16;
- __ test(count, Immediate(0x10));
- __ j(zero, &move_less_16);
- __ movdqa(xmm0, Operand(src, 0));
- __ add(src, Immediate(0x10));
- __ movdqa(Operand(dst, 0), xmm0);
- __ add(dst, Immediate(0x10));
- __ bind(&move_less_16);
-
+ MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
+ __ bind(&move_last_15);
__ and_(count, 0xF);
+ __ j(zero, &skip_last_move, Label::kNear);
__ movdqu(xmm0, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
+ __ bind(&skip_last_move);
+ MemMoveEmitPopAndReturn(&masm);
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
- }
- __ Align(16);
- {
// Copy loop for unaligned source and aligned destination.
- // If source is not aligned, we can't read it as efficiently.
__ bind(&unaligned_source);
- __ mov(edx, ecx);
- Register loop_count = ecx;
- Register count = edx;
- __ shr(loop_count, 5);
- {
- // Main copy loop
- Label loop;
- __ bind(&loop);
- __ prefetch(Operand(src, 0x20), 1);
- __ movdqu(xmm0, Operand(src, 0x00));
- __ movdqu(xmm1, Operand(src, 0x10));
- __ add(src, Immediate(0x20));
-
- __ movdqa(Operand(dst, 0x00), xmm0);
- __ movdqa(Operand(dst, 0x10), xmm1);
- __ add(dst, Immediate(0x20));
-
- __ dec(loop_count);
- __ j(not_zero, &loop);
- }
+ MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
+ __ jmp(&move_last_15);
+
+ // Less than kMinMoveDistance offset between dst and src.
+ Label loop_until_aligned, last_15_much_overlap;
+ __ bind(&loop_until_aligned);
+ __ mov_b(eax, Operand(src, 0));
+ __ inc(src);
+ __ mov_b(Operand(dst, 0), eax);
+ __ inc(dst);
+ __ dec(count);
+ __ bind(&forward_much_overlap); // Entry point into this block.
+ __ test(dst, Immediate(0xF));
+ __ j(not_zero, &loop_until_aligned);
+ // dst is now aligned, src can't be. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
+ FORWARD, MOVE_UNALIGNED);
+ __ bind(&last_15_much_overlap);
+ __ and_(count, 0xF);
+ __ j(zero, &pop_and_return);
+ __ cmp(count, kSmallCopySize);
+ __ j(below_equal, &small_size);
+ __ jmp(&medium_size);
+ }
- // At most 31 bytes to copy.
- Label move_less_16;
- __ test(count, Immediate(0x10));
- __ j(zero, &move_less_16);
+ {
+ // |dst| is a higher address than |src|. Copy backwards.
+ Label unaligned_source, move_first_15, skip_last_move;
+ __ bind(&backward);
+ // |dst| and |src| always point to the end of what's left to copy.
+ __ add(dst, count);
+ __ add(src, count);
+ __ mov(eax, dst);
+ __ sub(eax, src);
+ __ cmp(eax, kMinMoveDistance);
+ __ j(below, &backward_much_overlap);
+ // Copy last 16 bytes.
+ __ movdqu(xmm0, Operand(src, -0x10));
+ __ movdqu(Operand(dst, -0x10), xmm0);
+ // Find distance to alignment: dst & 0xF
+ __ mov(edx, dst);
+ __ and_(edx, 0xF);
+ __ sub(dst, edx);
+ __ sub(src, edx);
+ __ sub(count, edx);
+ // dst is now aligned. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ // Check if src is also aligned.
+ __ test(src, Immediate(0xF));
+ __ j(not_zero, &unaligned_source);
+ // Copy loop for aligned source and destination.
+ MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
+ // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
+ __ bind(&move_first_15);
+ __ and_(count, 0xF);
+ __ j(zero, &skip_last_move, Label::kNear);
+ __ sub(src, count);
+ __ sub(dst, count);
__ movdqu(xmm0, Operand(src, 0));
- __ add(src, Immediate(0x10));
- __ movdqa(Operand(dst, 0), xmm0);
- __ add(dst, Immediate(0x10));
- __ bind(&move_less_16);
-
- // At most 15 bytes to copy. Copy 16 bytes at end of string.
- __ and_(count, 0x0F);
- __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
+ __ movdqu(Operand(dst, 0), xmm0);
+ __ bind(&skip_last_move);
+ MemMoveEmitPopAndReturn(&masm);
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
+ // Copy loop for unaligned source and aligned destination.
+ __ bind(&unaligned_source);
+ MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
+ __ jmp(&move_first_15);
+
+ // Less than kMinMoveDistance offset between dst and src.
+ Label loop_until_aligned, first_15_much_overlap;
+ __ bind(&loop_until_aligned);
+ __ dec(src);
+ __ dec(dst);
+ __ mov_b(eax, Operand(src, 0));
+ __ mov_b(Operand(dst, 0), eax);
+ __ dec(count);
+ __ bind(&backward_much_overlap); // Entry point into this block.
+ __ test(dst, Immediate(0xF));
+ __ j(not_zero, &loop_until_aligned);
+ // dst is now aligned, src can't be. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
+ BACKWARD, MOVE_UNALIGNED);
+ __ bind(&first_15_much_overlap);
+ __ and_(count, 0xF);
+ __ j(zero, &pop_and_return);
+ // Small/medium handlers expect dst/src to point to the beginning.
+ __ sub(dst, count);
+ __ sub(src, count);
+ __ cmp(count, kSmallCopySize);
+ __ j(below_equal, &small_size);
+ __ jmp(&medium_size);
+ }
+ {
+ // Special handlers for 9 <= copy_size < 64. No assumptions about
+ // alignment or move distance, so all reads must be unaligned and
+ // must happen before any writes.
+ Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
+
+ __ bind(&f9_16);
+ __ movsd(xmm0, Operand(src, 0));
+ __ movsd(xmm1, Operand(src, count, times_1, -8));
+ __ movsd(Operand(dst, 0), xmm0);
+ __ movsd(Operand(dst, count, times_1, -8), xmm1);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f17_32);
+ __ movdqu(xmm0, Operand(src, 0));
+ __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f33_48);
+ __ movdqu(xmm0, Operand(src, 0x00));
+ __ movdqu(xmm1, Operand(src, 0x10));
+ __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, 0x10), xmm1);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f49_63);
+ __ movdqu(xmm0, Operand(src, 0x00));
+ __ movdqu(xmm1, Operand(src, 0x10));
+ __ movdqu(xmm2, Operand(src, 0x20));
+ __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, 0x10), xmm1);
+ __ movdqu(Operand(dst, 0x20), xmm2);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&medium_handlers);
+ __ dd(conv.address(&f9_16));
+ __ dd(conv.address(&f17_32));
+ __ dd(conv.address(&f33_48));
+ __ dd(conv.address(&f49_63));
+
+ __ bind(&medium_size); // Entry point into this block.
+ __ mov(eax, count);
+ __ dec(eax);
+ __ shr(eax, 4);
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmp(eax, 3);
+ __ j(below_equal, &ok);
+ __ int3();
+ __ bind(&ok);
+ }
+ __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
+ __ jmp(eax);
+ }
+ {
+ // Specialized copiers for copy_size <= 8 bytes.
+ Label small_handlers, f0, f1, f2, f3, f4, f5_8;
+ __ bind(&f0);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f1);
+ __ mov_b(eax, Operand(src, 0));
+ __ mov_b(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f2);
+ __ mov_w(eax, Operand(src, 0));
+ __ mov_w(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f3);
+ __ mov_w(eax, Operand(src, 0));
+ __ mov_b(edx, Operand(src, 2));
+ __ mov_w(Operand(dst, 0), eax);
+ __ mov_b(Operand(dst, 2), edx);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f4);
+ __ mov(eax, Operand(src, 0));
+ __ mov(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f5_8);
+ __ mov(eax, Operand(src, 0));
+ __ mov(edx, Operand(src, count, times_1, -4));
+ __ mov(Operand(dst, 0), eax);
+ __ mov(Operand(dst, count, times_1, -4), edx);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&small_handlers);
+ __ dd(conv.address(&f0));
+ __ dd(conv.address(&f1));
+ __ dd(conv.address(&f2));
+ __ dd(conv.address(&f3));
+ __ dd(conv.address(&f4));
+ __ dd(conv.address(&f5_8));
+ __ dd(conv.address(&f5_8));
+ __ dd(conv.address(&f5_8));
+ __ dd(conv.address(&f5_8));
+
+ __ bind(&small_size); // Entry point into this block.
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmp(count, 8);
+ __ j(below_equal, &ok);
+ __ int3();
+ __ bind(&ok);
+ }
+ __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
+ __ jmp(eax);
}
-
} else {
- // SSE2 not supported. Unlikely to happen in practice.
- __ push(edi);
- __ push(esi);
- stack_offset += 2 * kPointerSize;
- __ cld();
- Register dst = edi;
- Register src = esi;
- Register count = ecx;
- __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
- __ mov(src, Operand(esp, stack_offset + kSourceOffset));
- __ mov(count, Operand(esp, stack_offset + kSizeOffset));
-
- // Copy the first word.
- __ mov(eax, Operand(src, 0));
- __ mov(Operand(dst, 0), eax);
-
- // Increment src,dstso that dst is aligned.
- __ mov(edx, dst);
- __ and_(edx, 0x03);
- __ neg(edx);
- __ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
- __ add(dst, edx);
- __ add(src, edx);
- __ sub(count, edx);
- // edi is now aligned, ecx holds number of remaning bytes to copy.
-
- __ mov(edx, count);
- count = edx;
- __ shr(ecx, 2); // Make word count instead of byte count.
- __ rep_movs();
-
- // At most 3 bytes left to copy. Copy 4 bytes at end of string.
- __ and_(count, 3);
- __ mov(eax, Operand(src, count, times_1, -4));
- __ mov(Operand(dst, count, times_1, -4), eax);
-
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
+ // No SSE2.
+ Label forward;
+ __ cmp(count, 0);
+ __ j(equal, &pop_and_return);
+ __ cmp(dst, src);
+ __ j(above, &backward);
+ __ jmp(&forward);
+ {
+ // Simple forward copier.
+ Label forward_loop_1byte, forward_loop_4byte;
+ __ bind(&forward_loop_4byte);
+ __ mov(eax, Operand(src, 0));
+ __ sub(count, Immediate(4));
+ __ add(src, Immediate(4));
+ __ mov(Operand(dst, 0), eax);
+ __ add(dst, Immediate(4));
+ __ bind(&forward); // Entry point.
+ __ cmp(count, 3);
+ __ j(above, &forward_loop_4byte);
+ __ bind(&forward_loop_1byte);
+ __ cmp(count, 0);
+ __ j(below_equal, &pop_and_return);
+ __ mov_b(eax, Operand(src, 0));
+ __ dec(count);
+ __ inc(src);
+ __ mov_b(Operand(dst, 0), eax);
+ __ inc(dst);
+ __ jmp(&forward_loop_1byte);
+ }
+ {
+ // Simple backward copier.
+ Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
+ __ bind(&backward);
+ __ add(src, count);
+ __ add(dst, count);
+ __ cmp(count, 3);
+ __ j(below_equal, &entry_shortcut);
+
+ __ bind(&backward_loop_4byte);
+ __ sub(src, Immediate(4));
+ __ sub(count, Immediate(4));
+ __ mov(eax, Operand(src, 0));
+ __ sub(dst, Immediate(4));
+ __ mov(Operand(dst, 0), eax);
+ __ cmp(count, 3);
+ __ j(above, &backward_loop_4byte);
+ __ bind(&backward_loop_1byte);
+ __ cmp(count, 0);
+ __ j(below_equal, &pop_and_return);
+ __ bind(&entry_shortcut);
+ __ dec(src);
+ __ dec(count);
+ __ mov_b(eax, Operand(src, 0));
+ __ dec(dst);
+ __ mov_b(Operand(dst, 0), eax);
+ __ jmp(&backward_loop_1byte);
+ }
}
+ __ bind(&pop_and_return);
+ MemMoveEmitPopAndReturn(&masm);
+
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
-
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
+ // TODO(jkummerow): It would be nice to register this code creation event
+ // with the PROFILE / GDBJIT system.
+ return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
}
+
#undef __
// -------------------------------------------------------------------------
@@ -351,8 +653,10 @@ OS::MemCopyFunction CreateMemCopyFunction() {
#define __ ACCESS_MASM(masm)
+
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm) {
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- eax : value
// -- ebx : target map
@@ -360,6 +664,11 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
+ }
+
// Set transitioned map.
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
__ RecordWriteField(edx,
@@ -373,7 +682,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- eax : value
// -- ebx : target map
@@ -383,6 +692,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
@@ -397,24 +710,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new FixedDoubleArray.
// edx: receiver
// edi: length of source FixedArray (smi-tagged)
- __ lea(esi, Operand(edi,
- times_4,
- FixedDoubleArray::kHeaderSize + kPointerSize));
- __ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
-
- Label aligned, aligned_done;
- __ test(eax, Immediate(kDoubleAlignmentMask - kHeapObjectTag));
- __ j(zero, &aligned, Label::kNear);
- __ mov(FieldOperand(eax, 0),
- Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
- __ add(eax, Immediate(kPointerSize));
- __ jmp(&aligned_done);
-
- __ bind(&aligned);
- __ mov(Operand(eax, esi, times_1, -kPointerSize-1),
- Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
-
- __ bind(&aligned_done);
+ AllocationFlags flags =
+ static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
+ __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
+ REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
// eax: destination FixedDoubleArray
// edi: number of elements
@@ -441,8 +740,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
ExternalReference::address_of_the_hole_nan();
XMMRegister the_hole_nan = xmm1;
if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(the_hole_nan,
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ movsd(the_hole_nan,
Operand::StaticVariable(canonical_the_hole_nan_reference));
}
__ jmp(&entry);
@@ -466,9 +765,9 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Normal smi, convert it to double and store.
__ SmiUntag(ebx);
if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ CpuFeatureScope fscope(masm, SSE2);
+ __ Cvtsi2sd(xmm0, ebx);
+ __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
xmm0);
} else {
__ push(ebx);
@@ -483,12 +782,12 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
if (FLAG_debug_code) {
__ cmp(ebx, masm->isolate()->factory()->the_hole_value());
- __ Assert(equal, "object found in smi-only array");
+ __ Assert(equal, kObjectFoundInSmiOnlyArray);
}
if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
the_hole_nan);
} else {
__ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
@@ -521,7 +820,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- eax : value
// -- ebx : target map
@@ -531,6 +830,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, success;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
@@ -546,7 +849,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Allocate new FixedArray.
// ebx: length of source FixedDoubleArray (smi-tagged)
__ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
- __ AllocateInNewSpace(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
+ __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
// eax: destination FixedArray
// ebx: number of elements
@@ -592,10 +895,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
// edx: new heap number
if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ movdbl(xmm0,
+ CpuFeatureScope fscope(masm, SSE2);
+ __ movsd(xmm0,
FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
- __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
__ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
@@ -710,7 +1013,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ test(result, Immediate(kIsIndirectStringMask));
- __ Assert(zero, "external string expected, but not found");
+ __ Assert(zero, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
STATIC_CHECK(kShortExternalStringTag != 0);
@@ -732,7 +1035,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Dispatch on the encoding: ASCII or two-byte.
Label ascii;
__ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(result, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii, Label::kNear);
@@ -751,12 +1054,130 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ movzx_b(result, FieldOperand(string,
index,
times_1,
- SeqAsciiString::kHeaderSize));
+ SeqOneByteString::kHeaderSize));
+ __ bind(&done);
+}
+
+
+static Operand ExpConstant(int index) {
+ return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ XMMRegister input,
+ XMMRegister result,
+ XMMRegister double_scratch,
+ Register temp1,
+ Register temp2) {
+ ASSERT(!input.is(double_scratch));
+ ASSERT(!input.is(result));
+ ASSERT(!result.is(double_scratch));
+ ASSERT(!temp1.is(temp2));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label done;
+
+ __ movsd(double_scratch, ExpConstant(0));
+ __ xorpd(result, result);
+ __ ucomisd(double_scratch, input);
+ __ j(above_equal, &done);
+ __ ucomisd(input, ExpConstant(1));
+ __ movsd(result, ExpConstant(2));
+ __ j(above_equal, &done);
+ __ movsd(double_scratch, ExpConstant(3));
+ __ movsd(result, ExpConstant(4));
+ __ mulsd(double_scratch, input);
+ __ addsd(double_scratch, result);
+ __ movd(temp2, double_scratch);
+ __ subsd(double_scratch, result);
+ __ movsd(result, ExpConstant(6));
+ __ mulsd(double_scratch, ExpConstant(5));
+ __ subsd(double_scratch, input);
+ __ subsd(result, double_scratch);
+ __ movsd(input, double_scratch);
+ __ mulsd(input, double_scratch);
+ __ mulsd(result, input);
+ __ mov(temp1, temp2);
+ __ mulsd(result, ExpConstant(7));
+ __ subsd(result, double_scratch);
+ __ add(temp1, Immediate(0x1ff800));
+ __ addsd(result, ExpConstant(8));
+ __ and_(temp2, Immediate(0x7ff));
+ __ shr(temp1, 11);
+ __ shl(temp1, 20);
+ __ movd(input, temp1);
+ __ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
+ __ movsd(double_scratch, Operand::StaticArray(
+ temp2, times_8, ExternalReference::math_exp_log_table()));
+ __ por(input, double_scratch);
+ __ mulsd(result, input);
__ bind(&done);
}
#undef __
+
+static byte* GetNoCodeAgeSequence(uint32_t* length) {
+ static bool initialized = false;
+ static byte sequence[kNoCodeAgeSequenceLength];
+ *length = kNoCodeAgeSequenceLength;
+ if (!initialized) {
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found both in
+ // FUNCTION and OPTIMIZED_FUNCTION code:
+ CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
+ patcher.masm()->push(ebp);
+ patcher.masm()->mov(ebp, esp);
+ patcher.masm()->push(esi);
+ patcher.masm()->push(edi);
+ initialized = true;
+ }
+ return sequence;
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ bool result = (!memcmp(sequence, young_sequence, young_length));
+ ASSERT(result || *sequence == kCallOpcode);
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ sequence++; // Skip the kCallOpcode byte
+ Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
+ Assembler::kCallTargetAddressOffset;
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (age == kNoAgeCodeAge) {
+ CopyBytes(sequence, young_sequence, young_length);
+ CPU::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
+ CodePatcher patcher(sequence, young_length);
+ patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index f4ab0b50f..6a207ca9b 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -43,7 +43,7 @@ class CompilationInfo;
class CodeGenerator {
public:
// Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
+ static void MakeCodePrologue(CompilationInfo* info, const char* kind);
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
@@ -53,7 +53,7 @@ class CodeGenerator {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
- static bool ShouldGenerateLog(Expression* type);
+ static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static bool RecordPositions(MacroAssembler* masm,
int pos,
@@ -88,6 +88,20 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
+
+class MathExpGenerator : public AllStatic {
+ public:
+ static void EmitMathExp(MacroAssembler* masm,
+ XMMRegister input,
+ XMMRegister result,
+ XMMRegister double_scratch,
+ Register temp1,
+ Register temp2);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/deps/v8/src/ia32/cpu-ia32.cc b/deps/v8/src/ia32/cpu-ia32.cc
index 9eabb2a96..5fb04fc72 100644
--- a/deps/v8/src/ia32/cpu-ia32.cc
+++ b/deps/v8/src/ia32/cpu-ia32.cc
@@ -33,7 +33,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "cpu.h"
#include "macro-assembler.h"
@@ -72,18 +72,6 @@ void CPU::FlushICache(void* start, size_t size) {
#endif
}
-
-void CPU::DebugBreak() {
-#ifdef _MSC_VER
- // To avoid Visual Studio runtime support the following code can be used
- // instead
- // __asm { int 3 }
- __debugbreak();
-#else
- asm("int $3");
-#endif
-}
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index d153e18ee..76a7003bf 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "codegen.h"
#include "debug.h"
@@ -49,8 +49,8 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceLength >=
Assembler::kCallInstructionLength);
- Isolate* isolate = Isolate::Current();
- rinfo()->PatchCodeWithCall(isolate->debug()->debug_break_return()->entry(),
+ rinfo()->PatchCodeWithCall(
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry(),
Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
@@ -79,7 +79,7 @@ bool BreakLocationIterator::IsDebugBreakAtSlot() {
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = debug_info_->GetIsolate();
rinfo()->PatchCodeWithCall(
isolate->debug()->debug_break_slot()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
@@ -91,6 +91,7 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
}
+
// All debug break stubs support padding for LiveEdit.
const bool Debug::FramePaddingLayout::kIsSupported = true;
@@ -127,7 +128,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ test(reg, Immediate(0xc0000000));
- __ Assert(zero, "Unable to encode value as smi");
+ __ Assert(zero, kUnableToEncodeValueAsSmi);
}
__ SmiTag(reg);
__ push(reg);
@@ -240,6 +241,15 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, eax.bit(), 0, false);
+}
+
+
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC call call (from ic-ia32.cc)
// ----------- S t a t e -------------
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 99ad5225b..e339b3ad1 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "codegen.h"
#include "deoptimizer.h"
@@ -90,9 +90,9 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
Factory* factory = isolate->factory();
Handle<ByteArray> new_reloc =
factory->NewByteArray(reloc_length + padding, TENURED);
- memcpy(new_reloc->GetDataStartAddress() + padding,
- code->relocation_info()->GetDataStartAddress(),
- reloc_length);
+ OS::MemCopy(new_reloc->GetDataStartAddress() + padding,
+ code->relocation_info()->GetDataStartAddress(),
+ reloc_length);
// Create a relocation writer to write the comments in the padding
// space. Use position 0 for everything to ensure short encoding.
RelocInfoWriter reloc_info_writer(
@@ -114,21 +114,8 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
}
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- if (!function->IsOptimized()) return;
-
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
-
- // Get the optimized code.
- Code* code = function->code();
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
-
// We will overwrite the code's relocation info in-place. Relocation info
// is written backward. The relocation info is the payload of a byte
// array. Later on we will slide this to the start of the byte array and
@@ -155,8 +142,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Patch lazy deoptimization entry.
Address call_address = code_start_address + deopt_data->Pc(i)->value();
CodePatcher patcher(call_address, patch_size());
- Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
- patcher.masm()->call(deopt_entry, RelocInfo::NONE);
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+ patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
// We use RUNTIME_ENTRY for deoptimization bailouts.
RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
RelocInfo::RUNTIME_ENTRY,
@@ -175,7 +162,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Move the relocation info to the beginning of the byte array.
int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
- memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
+ OS::MemMove(
+ code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
// The relocation info is in place, update the size.
reloc_info->set_length(new_reloc_size);
@@ -186,825 +174,60 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
ASSERT(junk_address <= reloc_end_address);
isolate->heap()->CreateFillerObjectAt(junk_address,
reloc_end_address - junk_address);
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
- }
-}
-
-
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x13;
-static const byte kJaeInstruction = 0x73;
-static const byte kJaeOffset = 0x07;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(check_code->entry(),
- Assembler::target_address_at(call_target_address));
- // The stack check code matches the pattern:
- //
- // cmp esp, <limit>
- // jae ok
- // call <stack guard>
- // test eax, <loop nesting depth>
- // ok: ...
- //
- // We will patch away the branch so the code is:
- //
- // cmp esp, <limit> ;; Not changed
- // nop
- // nop
- // call <on-stack replacment>
- // test eax, <loop nesting depth>
- // ok:
-
- if (FLAG_count_based_interrupts) {
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- } else {
- ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
- }
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- *(call_target_address - 3) = kNopByteOne;
- *(call_target_address - 2) = kNopByteTwo;
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(replacement_code->entry(),
- Assembler::target_address_at(call_target_address));
-
- // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
- // restore the conditional branch.
- ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (FLAG_count_based_interrupts) {
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- } else {
- *(call_target_address - 3) = kJaeInstruction;
- *(call_target_address - 2) = kJaeOffset;
- }
- Assembler::set_target_address_at(call_target_address,
- check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, check_code);
}
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
- // TODO(kasperl): This should not be the bailout_id_. It should be
- // the ast id. Confusing.
- ASSERT(bailout_id_ == ast_id);
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Next(); // Drop JS frames count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d, ebp:esp=0x%08x:0x%08x]\n",
- ast_id,
- input_frame_size,
- output_frame_size,
- input_->GetRegister(ebp.code()),
- input_->GetRegister(esp.code()));
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // All OSR stack frames are dynamically aligned to an 8-byte boundary.
- int frame_pointer = input_->GetRegister(ebp.code());
- if ((frame_pointer & kPointerSize) != 0) {
- frame_pointer -= kPointerSize;
- has_alignment_padding_ = 1;
- }
-
- int32_t alignment_state = (has_alignment_padding_ == 1) ?
- kAlignmentPaddingPushed :
- kNoAlignmentPadding;
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08x ; (alignment state)\n",
- output_offset,
- alignment_state);
- }
- output_[0]->SetFrameSlot(output_offset, alignment_state);
- output_offset -= kPointerSize;
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(ebp.code(), frame_pointer);
- output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- optimized_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation =
- function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index) {
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
- // Arguments adaptor can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers ebp and esp are set to the correct values though.
- // A marker value is used in place of the context.
- output_offset -= kPointerSize;
- intptr_t context = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- output_frame->SetFrameSlot(output_offset, context);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
- top_address + output_offset, output_offset, context);
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
}
-
- // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
+ input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ input_->SetDoubleRegister(i, 0.0);
}
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
}
-
- ASSERT(0 == output_offset);
-
- Builtins* builtins = isolate_->builtins();
- Code* adaptor_trampoline =
- builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
- uint32_t pc = reinterpret_cast<uint32_t>(
- adaptor_trampoline->instruction_start() +
- isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
}
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = 7 * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
- // Construct stub can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The output frame reflects a JSConstructStubGeneric frame.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(construct_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
- top_address + output_offset, output_offset, value);
- }
-
- ASSERT(0 == output_offset);
-
- uint32_t pc = reinterpret_cast<uint32_t>(
- construct_stub->instruction_start() +
- isolate_->heap()->construct_stub_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ intptr_t handler =
+ reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
+ int params = descriptor->environment_length();
+ output_frame->SetRegister(eax.code(), params);
+ output_frame->SetRegister(ebx.code(), handler);
}
-void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
- bool is_setter_stub_frame) {
- JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
- // The receiver (and the implicit return value, if any) are expected in
- // registers by the LoadIC/StoreIC, so they don't belong to the output stack
- // frame. This means that we have to use a height of 0.
- unsigned height = 0;
- unsigned height_in_bytes = height * kPointerSize;
- const char* kind = is_setter_stub_frame ? "setter" : "getter";
- if (FLAG_trace_deopt) {
- PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
- }
-
- // We need 1 stack entry for the return address + 4 stack entries from
- // StackFrame::INTERNAL (FP, context, frame type, code object, see
- // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
- // entry for the implicit return value, see
- // StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0);
- unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, accessor);
- output_frame->SetFrameType(StackFrame::INTERNAL);
-
- // A frame for an accessor stub can not be the topmost or bottommost one.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous frame's top and
- // this frame's size.
- intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- unsigned output_offset = output_frame_size;
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; function (%s sentinel)\n",
- top_address + output_offset, output_offset, value, kind);
- }
-
- // Get Code object from accessor stub.
- output_offset -= kPointerSize;
- Builtins::Name name = is_setter_stub_frame ?
- Builtins::kStoreIC_Setter_ForDeopt :
- Builtins::kLoadIC_Getter_ForDeopt;
- Code* accessor_stub = isolate_->builtins()->builtin(name);
- value = reinterpret_cast<intptr_t>(accessor_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Skip receiver.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
-
- if (is_setter_stub_frame) {
- // The implicit return value was part of the artificial setter stub
- // environment.
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ if (!CpuFeatures::IsSupported(SSE2)) return;
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
}
-
- ASSERT(0 == output_offset);
-
- Smi* offset = is_setter_stub_frame ?
- isolate_->heap()->setter_stub_deopt_pc_offset() :
- isolate_->heap()->getter_stub_deopt_pc_offset();
- intptr_t pc = reinterpret_cast<intptr_t>(
- accessor_stub->instruction_start() + offset->value());
- output_frame->SetPc(pc);
}
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // Compute the incoming parameter translation.
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
-
+ unsigned input_frame_size = input_->GetFrameSize();
unsigned alignment_state_offset =
- input_offset - parameter_count * kPointerSize -
- StandardFrameConstants::kFixedFrameSize -
- kPointerSize;
+ input_frame_size - parameter_count * kPointerSize -
+ StandardFrameConstants::kFixedFrameSize -
+ kPointerSize;
ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
- JavaScriptFrameConstants::kLocal0Offset);
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- uint32_t top_address;
- if (is_bottommost) {
- int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
- has_alignment_padding_ =
- (alignment_state == kAlignmentPaddingPushed) ? 1 : 0;
- // 2 = context and function in the frame.
- // If the optimized frame had alignment padding, adjust the frame pointer
- // to point to the new position of the old frame pointer after padding
- // is removed. Subtract 2 * kPointerSize for the context and function slots.
- top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
- height_in_bytes + has_alignment_padding_ * kPointerSize;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost ||
- (input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize) ==
- fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
- ASSERT(!is_bottommost || !has_alignment_padding_ ||
- (fp_value & kPointerSize) != 0);
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<uint32_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(esi.code(), value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
- }
-}
-
-
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers ebp and esp are set to the correct values though.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
- }
+ JavaScriptFrameConstants::kLocal0Offset);
+ int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
+ return (alignment_state == kAlignmentPaddingPushed);
}
@@ -1012,9 +235,6 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
- CpuFeatures::Scope scope(SSE2);
-
- Isolate* isolate = masm()->isolate();
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -1022,10 +242,13 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::kNumAllocatableRegisters;
__ sub(esp, Immediate(kDoubleRegsSize));
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movdbl(Operand(esp, offset), xmm_reg);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ movsd(Operand(esp, offset), xmm_reg);
+ }
}
__ pushad();
@@ -1036,15 +259,11 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the bailout id from the stack.
__ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
- // Get the address of the location in the code object if possible
+ // Get the address of the location in the code object
// and compute the fp-to-sp delta in register edx.
- if (type() == EAGER) {
- __ Set(ecx, Immediate(0));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- } else {
- __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
- }
+ __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
+
__ sub(edx, ebp);
__ neg(edx);
@@ -1057,10 +276,10 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
__ mov(Operand(esp, 5 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
+ Immediate(ExternalReference::isolate_address(isolate())));
{
AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
}
// Preserve deoptimizer object in register eax and get the input
@@ -1073,21 +292,25 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(Operand(ebx, offset));
}
- // Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize;
- __ movdbl(xmm0, Operand(esp, src_offset));
- __ movdbl(Operand(ebx, dst_offset), xmm0);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ // Fill in the double input registers.
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize;
+ __ movsd(xmm0, Operand(esp, src_offset));
+ __ movsd(Operand(ebx, dst_offset), xmm0);
+ }
}
- // Remove the bailout id and the double registers from the stack.
- if (type() == EAGER) {
- __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
- } else {
- __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
- }
+ // Clear FPU all exceptions.
+ // TODO(ulan): Find out why the TOP register is not zero here in some cases,
+ // and check that the generated code never deoptimizes with unbalanced stack.
+ __ fnclex();
+
+ // Remove the bailout id, return address and the double registers.
+ __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
@@ -1098,10 +321,13 @@ void Deoptimizer::EntryGenerator::Generate() {
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
+ Label pop_loop_header;
+ __ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(edx, 0));
__ add(edx, Immediate(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
__ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
@@ -1112,65 +338,59 @@ void Deoptimizer::EntryGenerator::Generate() {
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
+ ExternalReference::compute_output_frames_function(isolate()), 1);
}
__ pop(eax);
- if (type() != OSR) {
- // If frame was dynamically aligned, pop padding.
- Label no_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_padding);
- __ pop(ecx);
- if (FLAG_debug_code) {
- __ cmp(ecx, Immediate(kAlignmentZapValue));
- __ Assert(equal, "alignment marker expected");
- }
- __ bind(&no_padding);
- } else {
- // If frame needs dynamic alignment push padding.
- Label no_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_padding);
- __ push(Immediate(kAlignmentZapValue));
- __ bind(&no_padding);
+ // If frame was dynamically aligned, pop padding.
+ Label no_padding;
+ __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(0));
+ __ j(equal, &no_padding);
+ __ pop(ecx);
+ if (FLAG_debug_code) {
+ __ cmp(ecx, Immediate(kAlignmentZapValue));
+ __ Assert(equal, kAlignmentMarkerExpected);
}
+ __ bind(&no_padding);
// Replace the current frame with the output frames.
- Label outer_push_loop, inner_push_loop;
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
// Outer loop state: eax = current FrameDescription**, edx = one past the
// last FrameDescription**.
__ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
__ mov(eax, Operand(eax, Deoptimizer::output_offset()));
__ lea(edx, Operand(eax, edx, times_4, 0));
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: ebx = current FrameDescription*, ecx = loop index.
__ mov(ebx, Operand(eax, 0));
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(ecx, Immediate(sizeof(uint32_t)));
__ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
+ __ bind(&inner_loop_header);
__ test(ecx, ecx);
__ j(not_zero, &inner_push_loop);
__ add(eax, Immediate(kPointerSize));
+ __ bind(&outer_loop_header);
__ cmp(eax, edx);
__ j(below, &outer_push_loop);
- // In case of OSR, we have to restore the XMM registers.
- if (type() == OSR) {
+ // In case of a failed STUB, we have to restore the XMM registers.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
- __ movdbl(xmm_reg, Operand(ebx, src_offset));
+ __ movsd(xmm_reg, Operand(ebx, src_offset));
}
}
// Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ push(Operand(ebx, FrameDescription::state_offset()));
- }
+ __ push(Operand(ebx, FrameDescription::state_offset()));
__ push(Operand(ebx, FrameDescription::pc_offset()));
__ push(Operand(ebx, FrameDescription::continuation_offset()));
@@ -1202,6 +422,17 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&done);
}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
#undef __
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 75b46bd47..13cf6bc49 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -31,7 +31,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "disasm.h"
@@ -575,6 +575,7 @@ int DisassemblerIA32::F7Instruction(byte* data) {
}
}
+
int DisassemblerIA32::D1D3C1Instruction(byte* data) {
byte op = *data;
ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
@@ -605,7 +606,7 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) {
}
ASSERT_NE(NULL, mnem);
AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
- if (imm8 > 0) {
+ if (imm8 >= 0) {
AppendToBuffer("%d", imm8);
} else {
AppendToBuffer("cl");
@@ -697,6 +698,7 @@ int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
switch (escape_opcode) {
case 0xD9: switch (regop) {
case 0: mnem = "fld_s"; break;
+ case 2: mnem = "fst_s"; break;
case 3: mnem = "fstp_s"; break;
case 7: mnem = "fstcw"; break;
default: UnimplementedInstruction();
@@ -742,7 +744,14 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
switch (escape_opcode) {
case 0xD8:
- UnimplementedInstruction();
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "fadd_i"; break;
+ case 0xE0: mnem = "fsub_i"; break;
+ case 0xC8: mnem = "fmul_i"; break;
+ case 0xF0: mnem = "fdiv_i"; break;
+ default: UnimplementedInstruction();
+ }
break;
case 0xD9:
@@ -766,6 +775,7 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
case 0xEE: mnem = "fldz"; break;
case 0xF0: mnem = "f2xm1"; break;
case 0xF1: mnem = "fyl2x"; break;
+ case 0xF4: mnem = "fxtract"; break;
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
case 0xF8: mnem = "fprem"; break;
@@ -814,6 +824,7 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
has_register = true;
switch (modrm_byte & 0xF8) {
case 0xC0: mnem = "ffree"; break;
+ case 0xD0: mnem = "fst"; break;
case 0xD8: mnem = "fstp"; break;
default: UnimplementedInstruction();
}
@@ -861,7 +872,6 @@ static const char* F0Mnem(byte f0byte) {
switch (f0byte) {
case 0x18: return "prefetch";
case 0xA2: return "cpuid";
- case 0x31: return "rdtsc";
case 0xBE: return "movsx_b";
case 0xBF: return "movsx_w";
case 0xB6: return "movzx_b";
@@ -869,6 +879,7 @@ static const char* F0Mnem(byte f0byte) {
case 0xAF: return "imul";
case 0xA5: return "shld";
case 0xAD: return "shrd";
+ case 0xAC: return "shrd"; // 3-operand version.
case 0xAB: return "bts";
default: return NULL;
}
@@ -931,13 +942,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case SHORT_IMMEDIATE_INSTR: {
byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
- AppendToBuffer("%s eax, %s", idesc.mnem, NameOfAddress(addr));
+ AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
}
case BYTE_IMMEDIATE_INSTR: {
- AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
+ AppendToBuffer("%s al,0x%x", idesc.mnem, data[1]);
data += 2;
break;
}
@@ -1031,6 +1042,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte == 0x54) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("andps %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (f0byte == 0x57) {
data += 2;
int mod, regop, rm;
@@ -1039,6 +1058,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte == 0x50) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movmskps %s,%s",
+ NameOfCPURegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
@@ -1220,8 +1247,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("extractps %s,%s,%d",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm),
+ NameOfCPURegister(rm),
+ NameOfXMMRegister(regop),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x22) {
@@ -1439,6 +1466,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += D1D3C1Instruction(data);
break;
+ case 0xD8: // fall through
case 0xD9: // fall through
case 0xDA: // fall through
case 0xDB: // fall through
diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc
index dd44f0ee5..557081176 100644
--- a/deps/v8/src/ia32/frames-ia32.cc
+++ b/deps/v8/src/ia32/frames-ia32.cc
@@ -27,17 +27,23 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
-#include "frames-inl.h"
+#include "assembler.h"
+#include "assembler-ia32.h"
+#include "assembler-ia32-inl.h"
+#include "frames.h"
namespace v8 {
namespace internal {
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
+Register JavaScriptFrame::fp_register() { return ebp; }
+Register JavaScriptFrame::context_register() { return esi; }
+
+
+Register StubFailureTrampolineFrame::fp_register() { return ebp; }
+Register StubFailureTrampolineFrame::context_register() { return esi; }
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index 18915e2e3..860612510 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -60,18 +60,6 @@ const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
// ----------------------------------------------------
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -6 * kPointerSize;
@@ -97,22 +85,6 @@ class ExitFrameConstants : public AllStatic {
};
-class StandardFrameConstants : public AllStatic {
- public:
- // Fixed part of the frame consists of return address, caller fp,
- // context and function.
- // StandardFrame::IterateExpressions assumes that kContextOffset is the last
- // object pointer.
- static const int kFixedFrameSize = 4 * kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
-};
-
-
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
@@ -130,14 +102,30 @@ class JavaScriptFrameConstants : public AllStatic {
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
+ // FP-relative.
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kImplicitReceiverOffset = -5 * kPointerSize;
+ static const int kConstructorOffset = kMinInt;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
+};
+
+
class InternalFrameConstants : public AllStatic {
public:
+ // FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};
@@ -148,6 +136,11 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
+inline void StackHandler::SetFp(Address slot, Address fp) {
+ Memory::Address_at(slot) = fp;
+}
+
+
} } // namespace v8::internal
#endif // V8_IA32_FRAMES_IA32_H_
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 406537d2d..704fb4e7d 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "code-stubs.h"
#include "codegen.h"
@@ -107,6 +107,7 @@ class JumpPatchSite BASE_EMBEDDED {
// formal parameter count expected by the function.
//
// The live registers are:
+// o ecx: CallKind
// o edi: the JS function object being called (i.e. ourselves)
// o esi: our context
// o ebp: our caller's frame pointer
@@ -118,8 +119,8 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -127,7 +128,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
@@ -156,13 +157,14 @@ void FullCodeGenerator::Generate() {
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
+ info->set_prologue_offset(masm_->pc_offset());
+ __ Prologue(BUILD_FUNCTION_FRAME);
+ info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@@ -283,8 +285,7 @@ void FullCodeGenerator::Generate() {
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -311,7 +312,7 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ mov(ebx, Immediate(profiling_counter_));
- __ sub(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ __ sub(FieldOperand(ebx, Cell::kValueOffset),
Immediate(Smi::FromInt(delta)));
}
@@ -323,55 +324,33 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
reset_value = Smi::kMaxValue;
}
__ mov(ebx, Immediate(profiling_counter_));
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ __ mov(FieldOperand(ebx, Cell::kValueOffset),
Immediate(Smi::FromInt(reset_value)));
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Stack check");
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- // Count based interrupts happen often enough when they are enabled
- // that the additional stack checks are not necessary (they would
- // only check for interrupts).
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ __ j(positive, &ok, Label::kNear);
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
+ RecordBackEdge(stmt->OsrEntryId());
- // Loop stack checks can be patched to perform on-stack replacement. In
- // order to decide whether or not to perform OSR we embed the loop depth
- // in a test instruction after the call so we can extract it from the OSR
- // builtin.
- ASSERT(loop_depth() > 0);
- __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
-
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -401,7 +380,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ Max(1, distance / kCodeSizeMultiplier));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -411,8 +390,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(eax);
EmitProfilingCounterReset();
@@ -428,6 +407,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
__ mov(esp, ebp);
+ int no_frame_start = masm_->pc_offset();
__ pop(ebp);
int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
@@ -438,6 +418,7 @@ void FullCodeGenerator::EmitReturnSequence() {
ASSERT(Assembler::kJSReturnSequenceLength <=
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -656,9 +637,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- ToBooleanStub stub(result_register());
- __ push(result_register());
- __ CallStub(&stub, condition->test_id());
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
__ test(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@@ -754,16 +734,15 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current function
- // context.
+ // The variable in the declaration always resides in the current context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
__ cmp(ebx, isolate()->factory()->with_context_map());
- __ Check(not_equal, "Declaration in with context.");
+ __ Check(not_equal, kDeclarationInWithContext);
__ cmp(ebx, isolate()->factory()->catch_context_map());
- __ Check(not_equal, "Declaration in catch context.");
+ __ Check(not_equal, kDeclarationInCatchContext);
}
}
@@ -884,33 +863,32 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- Handle<JSModule> instance = declaration->module()->interface()->Instance();
- ASSERT(!instance.is_null());
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name(), zone());
- globals_->Add(instance, zone());
- Visit(declaration->module());
- break;
- }
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ mov(ContextOperand(esi, variable->index()), Immediate(instance));
- Visit(declaration->module());
- break;
- }
+ // Load instance object.
+ __ LoadContext(eax, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ mov(eax, ContextOperand(eax, variable->interface()->Index()));
+ __ mov(eax, ContextOperand(eax, Context::EXTENSION_INDEX));
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
+ // Assign it.
+ __ mov(ContextOperand(esi, variable->index()), eax);
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(esi,
+ Context::SlotOffset(variable->index()),
+ eax,
+ ecx,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
}
@@ -945,13 +923,21 @@ void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(esi); // The context is the first argument.
- __ push(Immediate(pairs));
- __ push(Immediate(Smi::FromInt(DeclareGlobalsFlags())));
+ __ Push(pairs);
+ __ Push(Smi::FromInt(DeclareGlobalsFlags()));
__ CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
}
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1002,7 +988,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ test(eax, eax);
@@ -1043,9 +1029,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, &exit);
@@ -1116,13 +1101,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+ Handle<Cell> cell = isolate()->factory()->NewCell(
+ Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(ebx, cell);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ __ mov(FieldOperand(ebx, Cell::kValueOffset),
Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
@@ -1193,7 +1177,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(loop_statement.continue_label());
__ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ jmp(&loop);
// Remove the pointers stored on the stack.
@@ -1207,6 +1191,64 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ __ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+ __ CompareRoot(eax, Heap::kNullValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(eax, &convert);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &done_convert);
+ __ bind(&convert);
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
@@ -1220,8 +1262,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
- __ push(Immediate(info));
+ FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ __ mov(ebx, Immediate(info));
__ CallStub(&stub);
} else {
__ push(esi);
@@ -1346,9 +1388,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST ||
- local->mode() == CONST_HARMONY ||
- local->mode() == LET) {
+ if (local->mode() == LET ||
+ local->mode() == CONST ||
+ local->mode() == CONST_HARMONY) {
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
if (local->mode() == CONST) {
@@ -1495,7 +1537,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
@@ -1533,24 +1575,29 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_properties));
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
flags |= expr->has_function()
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
- __ push(Immediate(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (flags != ObjectLiteral::kFastElements ||
+ if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(constant_properties));
+ __ push(Immediate(Smi::FromInt(flags)));
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+ __ mov(ecx, Immediate(constant_properties));
+ __ mov(edx, Immediate(Smi::FromInt(flags)));
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
}
@@ -1582,10 +1629,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
+ if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- __ mov(ecx, Immediate(key->handle()));
+ __ mov(ecx, Immediate(key->value()));
__ mov(edx, Operand(esp, 0));
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -1597,8 +1644,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
}
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
__ push(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(key);
VisitForStackValue(value);
@@ -1609,6 +1654,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Drop(3);
}
break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ CallRuntime(Runtime::kSetPrototype, 2);
+ } else {
+ __ Drop(2);
+ }
+ break;
case ObjectLiteral::Property::GETTER:
accessor_table.lookup(key)->second->getter = value;
break;
@@ -1659,33 +1713,54 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_elements));
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+ __ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ DONT_TRACK_ALLOCATION_SITE,
length);
__ CallStub(&stub);
} else if (expr->depth() > 1) {
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(constant_elements));
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ } else if (Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(constant_elements));
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
- FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
- ? FastCloneShallowArrayStub::CLONE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
+ if (has_constant_fast_elements) {
+ mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+ __ mov(ecx, Immediate(constant_elements));
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
__ CallStub(&stub);
}
@@ -1697,13 +1772,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(eax);
+ __ push(eax); // array literal.
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
@@ -1712,7 +1785,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
+ __ mov(ebx, Operand(esp, kPointerSize)); // Copy of array literal.
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
// Store the subexpression value in the array's elements.
__ mov(FieldOperand(ebx, offset), result_register());
@@ -1723,10 +1796,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
INLINE_SMI_CHECK);
} else {
// Store the subexpression value in the array's elements.
- __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
- __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
__ mov(ecx, Immediate(Smi::FromInt(i)));
- __ mov(edx, Immediate(Smi::FromInt(expr->literal_index())));
StoreArrayLiteralElementStub stub;
__ CallStub(&stub);
}
@@ -1735,6 +1805,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
+ __ add(esp, Immediate(kPointerSize)); // literal index
context()->PlugTOS();
} else {
context()->Plug(eax);
@@ -1856,11 +1927,289 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ switch (expr->yield_kind()) {
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+
+ __ bind(&continuation);
+ __ jmp(&resume);
+
+ __ bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(continuation.pos())));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
+ __ mov(ecx, esi);
+ __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+ kDontSaveFPRegs);
+ __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
+ __ cmp(esp, ebx);
+ __ j(equal, &post_runtime);
+ __ push(eax); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ __ pop(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::FINAL: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ mov(FieldOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
+ // Initial send value is undefined.
+ __ mov(eax, isolate()->factory()->undefined_value());
+ __ jmp(&l_next);
+
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+ __ bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ mov(ecx, isolate()->factory()->throw_string()); // "throw"
+ __ push(ecx); // "throw"
+ __ push(Operand(esp, 2 * kPointerSize)); // iter
+ __ push(eax); // exception
+ __ jmp(&l_call);
+
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
+ __ bind(&l_try);
+ __ pop(eax); // result
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ push(eax); // result
+ __ jmp(&l_suspend);
+ __ bind(&l_continuation);
+ __ jmp(&l_resume);
+ __ bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ mov(eax, Operand(esp, generator_object_depth));
+ __ push(eax); // g
+ ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(l_continuation.pos())));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
+ __ mov(ecx, esi);
+ __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+ kDontSaveFPRegs);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(eax); // result
+ EmitReturnSequence();
+ __ bind(&l_resume); // received in eax
+ __ PopTryHandler();
+
+ // receiver = iter; f = iter.next; arg = received;
+ __ bind(&l_next);
+ __ mov(ecx, isolate()->factory()->next_string()); // "next"
+ __ push(ecx);
+ __ push(Operand(esp, 2 * kPointerSize)); // iter
+ __ push(eax); // received
+
+ // result = receiver[f](arg);
+ __ bind(&l_call);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
+ CallIC(ic);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The key is still on the stack; drop it.
+
+ // if (!result.done) goto l_try;
+ __ bind(&l_loop);
+ __ push(eax); // save result
+ __ mov(edx, eax); // result
+ __ mov(ecx, isolate()->factory()->done_string()); // "done"
+ Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(done_ic); // result.done in eax
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ test(eax, eax);
+ __ j(zero, &l_try);
+
+ // result.value
+ __ pop(edx); // result
+ __ mov(ecx, isolate()->factory()->value_string()); // "value"
+ Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(value_ic); // result.value in eax
+ context()->DropAndPlug(2, eax); // drop iter and g
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
+ Expression *value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ // The value stays in eax, and is ultimately read by the resumed generator, as
+ // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. ebx
+ // will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ __ pop(ebx);
+
+ // Check generator state.
+ Label wrong_state, done;
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
+ __ cmp(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(0)));
+ __ j(less_equal, &wrong_state);
+
+ // Load suspended function and context.
+ __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
+ __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+
+ // Push receiver.
+ __ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
+
+ // Push holes for arguments to generator function.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edx,
+ FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(ecx, isolate()->factory()->the_hole_value());
+ Label push_argument_holes, push_frame;
+ __ bind(&push_argument_holes);
+ __ sub(edx, Immediate(Smi::FromInt(1)));
+ __ j(carry, &push_frame);
+ __ push(ecx);
+ __ jmp(&push_argument_holes);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame;
+ __ bind(&push_frame);
+ __ call(&resume_frame);
+ __ jmp(&done);
+ __ bind(&resume_frame);
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS Function.
+
+ // Load the operand stack size.
+ __ mov(edx, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
+ __ mov(edx, FieldOperand(edx, FixedArray::kLengthOffset));
+ __ SmiUntag(edx);
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ cmp(edx, Immediate(0));
+ __ j(not_zero, &slow_resume);
+ __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(ecx);
+ __ add(edx, ecx);
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ jmp(edx);
+ __ bind(&slow_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ Label push_operand_holes, call_resume;
+ __ bind(&push_operand_holes);
+ __ sub(edx, Immediate(1));
+ __ j(carry, &call_resume);
+ __ push(ecx);
+ __ jmp(&push_operand_holes);
+ __ bind(&call_resume);
+ __ push(ebx);
+ __ push(result_register());
+ __ Push(Smi::FromInt(resume_mode));
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ // Not reached: the runtime call returns elsewhere.
+ __ Abort(kGeneratorFailedToResume);
+
+ // Throw error if we attempt to operate on a running generator.
+ __ bind(&wrong_state);
+ __ push(ebx);
+ __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+
+ __ bind(&done);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ __ Allocate(map->instance_size(), eax, ecx, edx, &gc_required, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&allocated);
+ __ mov(ebx, map);
+ __ pop(ecx);
+ __ mov(edx, isolate()->factory()->ToBoolean(done));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSGeneratorObject::kResultValuePropertyOffset), ecx);
+ __ mov(FieldOperand(eax, JSGeneratorObject::kResultDonePropertyOffset), edx);
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset,
+ ecx, edx, kDontSaveFPRegs);
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
- ASSERT(!key->handle()->IsSmi());
- __ mov(ecx, Immediate(key->handle()));
+ ASSERT(!key->value()->IsSmi());
+ __ mov(ecx, Immediate(key->value()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -1890,7 +2239,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -1976,7 +2325,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(edx);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(eax);
@@ -1984,7 +2333,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten to have a 'throw
+ // Invalid left-hand sides are rewritten by the parser to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
VisitForEffect(expr);
@@ -2014,7 +2363,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->obj());
__ mov(edx, eax);
__ pop(eax); // Restore value.
- __ mov(ecx, prop->key()->AsLiteral()->handle());
+ __ mov(ecx, prop->key()->AsLiteral()->value());
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2108,7 +2457,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Check for an uninitialized let binding.
__ mov(edx, location);
__ cmp(edx, isolate()->factory()->the_hole_value());
- __ Check(equal, "Let binding re-initialization.");
+ __ Check(equal, kLetBindingReInitialization);
}
// Perform the assignment.
__ mov(location, eax);
@@ -2141,7 +2490,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ mov(ecx, prop->key()->AsLiteral()->handle());
+ __ mov(ecx, prop->key()->AsLiteral()->value());
__ pop(edx);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -2276,8 +2625,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
__ mov(ebx, cell);
@@ -2325,7 +2673,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
@@ -2407,7 +2755,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
if (property->key()->IsPropertyName()) {
EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
+ property->key()->AsLiteral()->value(),
RelocInfo::CODE_TARGET);
} else {
EmitKeyedCallWithIC(expr, property->key());
@@ -2461,13 +2809,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
__ mov(ebx, cell);
CallConstructStub stub(RECORD_CALL_TARGET);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(eax);
}
@@ -2600,7 +2947,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@@ -2614,15 +2961,15 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ test_b(FieldOperand(ebx, Map::kBitField2Offset),
1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ j(not_zero, if_true);
+ __ j(not_zero, &skip_lookup);
// Check for fast case object. Return false for slow case objects.
__ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
__ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ cmp(ecx, FACTORY->hash_table_map());
+ __ cmp(ecx, isolate()->factory()->hash_table_map());
__ j(equal, if_false);
- // Look for valueOf symbol in the descriptor array, and indicate false if
+ // Look for valueOf string in the descriptor array, and indicate false if
// found. Since we omit an enumeration index check, if it is added via a
// transition that shares its descriptor array, this is a false positive.
Label entry, loop, done;
@@ -2644,11 +2991,11 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Calculate location of the first key name.
__ add(ebx, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
+ // internalized string "valueOf" the result is false.
__ jmp(&entry);
__ bind(&loop);
__ mov(edx, FieldOperand(ebx, 0));
- __ cmp(edx, FACTORY->value_of_symbol());
+ __ cmp(edx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
__ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
@@ -2660,6 +3007,12 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Reload map as register ebx was used as temporary above.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ or_(FieldOperand(ebx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+
+ __ bind(&skip_lookup);
+
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
@@ -2671,14 +3024,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ cmp(ecx,
ContextOperand(edx,
Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, if_false);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(ebx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ jmp(if_true);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
context()->Plug(if_true, if_false);
}
@@ -2885,12 +3233,12 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ mov(eax, isolate()->factory()->function_class_symbol());
+ __ mov(eax, isolate()->factory()->function_class_string());
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ mov(eax, isolate()->factory()->Object_symbol());
+ __ mov(eax, isolate()->factory()->Object_string());
__ jmp(&done);
// Non-JS objects have class null.
@@ -2914,7 +3262,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
@@ -2952,14 +3300,14 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
// This is implemented on both SSE2 and FPU.
if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
+ CpuFeatureScope fscope(masm(), SSE2);
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(xmm1, ebx);
__ movd(xmm0, eax);
__ cvtss2sd(xmm1, xmm1);
__ xorps(xmm0, xmm1);
__ subsd(xmm0, xmm1);
- __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
} else {
// 0x4130000000000000 is 1.0 x 2^20 as a double.
__ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
@@ -3026,7 +3374,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -3067,6 +3415,87 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ __ test(index, Immediate(kSmiTagMask));
+ __ Check(zero, kNonSmiIndex);
+ __ test(value, Immediate(kSmiTagMask));
+ __ Check(zero, kNonSmiValue);
+
+ __ cmp(index, FieldOperand(string, String::kLengthOffset));
+ __ Check(less, kIndexIsTooLarge);
+
+ __ cmp(index, Immediate(Smi::FromInt(0)));
+ __ Check(greater_equal, kIndexIsNegative);
+
+ __ push(value);
+ __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ __ cmp(value, Immediate(encoding_mask));
+ __ Check(equal, kUnexpectedStringType);
+ __ pop(value);
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = eax;
+ Register index = ebx;
+ Register value = ecx;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(value);
+ __ pop(index);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
+ __ SmiUntag(value);
+ __ SmiUntag(index);
+ __ mov_b(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
+ value);
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = eax;
+ Register index = ebx;
+ Register value = ecx;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(value);
+ __ pop(index);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ if (FLAG_debug_code) {
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ }
+
+ __ SmiUntag(value);
+ // No need to untag a smi for two-byte addressing.
+ __ mov_w(FieldOperand(string, index, times_1, SeqTwoByteString::kHeaderSize),
+ value);
+ context()->Plug(string);
+}
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3117,8 +3546,8 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into eax and call the stub.
+ VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
@@ -3246,7 +3675,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringAddStub stub(NO_STRING_ADD_FLAGS);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH);
__ CallStub(&stub);
context()->Plug(eax);
}
@@ -3374,12 +3803,12 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
+ __ Abort(kAttemptToUseUndefinedCache);
__ mov(eax, isolate()->factory()->undefined_value());
context()->Plug(eax);
return;
@@ -3561,7 +3990,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// scratch, string_length, elements.
if (generate_debug_code_) {
__ cmp(index, array_length);
- __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
+ __ Assert(less, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
__ mov(string, FieldOperand(elements,
@@ -3573,10 +4002,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
+ __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
__ add(string_length,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
+ FieldOperand(string, SeqOneByteString::kLengthOffset));
__ j(overflow, &bailout);
__ add(index, Immediate(1));
__ cmp(index, array_length);
@@ -3606,13 +4035,13 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, ASCII_STRING_TYPE);
+ __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
// Add (separator length times array_length) - separator length
// to string_length.
__ mov(scratch, separator_operand);
- __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
+ __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset));
__ sub(string_length, scratch); // May be negative, temporarily.
__ imul(scratch, array_length_operand);
__ j(overflow, &bailout);
@@ -3626,11 +4055,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
__ mov(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+ __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
__ mov(string, separator_operand);
- __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
+ __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ j(equal, &one_char_separator);
__ j(greater, &long_separator);
@@ -3655,7 +4084,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
__ bind(&loop_1_condition);
@@ -3668,7 +4097,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
- __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
__ mov_b(separator_operand, scratch);
__ Set(index, Immediate(0));
@@ -3696,7 +4125,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
@@ -3725,7 +4154,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ bind(&loop_3_entry);
@@ -3737,7 +4166,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
@@ -3908,49 +4337,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- __ JumpIfSmi(result_register(), &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
default:
UNREACHABLE();
}
}
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
- // UnaryOpStub expects the argument to be in the
- // accumulator register eax.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
@@ -4067,7 +4459,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()),
+ RelocInfo::CODE_TARGET,
+ expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4096,7 +4490,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ mov(ecx, prop->key()->AsLiteral()->handle());
+ __ mov(ecx, prop->key()->AsLiteral()->value());
__ pop(edx);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -4186,12 +4580,12 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_symbol())) {
+ if (check->Equals(isolate()->heap()->number_string())) {
__ JumpIfSmi(eax, if_true);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ } else if (check->Equals(isolate()->heap()->string_string())) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
__ j(above_equal, if_false);
@@ -4199,16 +4593,20 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, SYMBOL_TYPE, edx);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->boolean_string())) {
__ cmp(eax, isolate()->factory()->true_value());
__ j(equal, if_true);
__ cmp(eax, isolate()->factory()->false_value());
Split(equal, if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_symbol())) {
+ check->Equals(isolate()->heap()->null_string())) {
__ cmp(eax, isolate()->factory()->null_value());
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ } else if (check->Equals(isolate()->heap()->undefined_string())) {
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, if_true);
__ JumpIfSmi(eax, if_false);
@@ -4217,14 +4615,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
__ test(ecx, Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ } else if (check->Equals(isolate()->heap()->function_string())) {
__ JumpIfSmi(eax, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
__ j(equal, if_true);
__ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ } else if (check->Equals(isolate()->heap()->object_string())) {
__ JumpIfSmi(eax, if_false);
if (!FLAG_harmony_typeof) {
__ cmp(eax, isolate()->factory()->null_value());
@@ -4286,29 +4684,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = no_condition;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cc = CompareIC::ComputeCondition(op);
__ pop(edx);
bool inline_smi_code = ShouldInlineSmiCase(op);
@@ -4325,7 +4701,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -4353,24 +4729,17 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Handle<Object> nil_value = nil == kNullValue ?
- isolate()->factory()->null_value() :
- isolate()->factory()->undefined_value();
- __ cmp(eax, nil_value);
+
+ Handle<Object> nil_value = nil == kNullValue
+ ? isolate()->factory()->null_value()
+ : isolate()->factory()->undefined_value();
if (expr->op() == Token::EQ_STRICT) {
+ __ cmp(eax, nil_value);
Split(equal, if_true, if_false, fall_through);
} else {
- Handle<Object> other_nil_value = nil == kNullValue ?
- isolate()->factory()->undefined_value() :
- isolate()->factory()->null_value();
- __ j(equal, if_true);
- __ cmp(eax, other_nil_value);
- __ j(equal, if_true);
- __ JumpIfSmi(eax, if_false);
- // It can be an undetectable object.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(edx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(edx, Immediate(1 << Map::kIsUndetectable));
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ __ test(eax, eax);
Split(not_zero, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
@@ -4519,6 +4888,79 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
+
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x11;
+static const byte kCallInstruction = 0xe8;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ Address jns_offset_address = call_target_address - 2;
+
+ switch (target_state) {
+ case INTERRUPT:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // jns ok
+ // call <interrupt stub>
+ // ok:
+ *jns_instr_address = kJnsInstruction;
+ *jns_offset_address = kJnsOffset;
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // ok:
+ *jns_instr_address = kNopByteOne;
+ *jns_offset_address = kNopByteTwo;
+ break;
+ }
+
+ Assembler::set_target_address_at(call_target_address,
+ replacement_code->entry());
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+
+ if (*jns_instr_address == kJnsInstruction) {
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return INTERRUPT;
+ }
+
+ ASSERT_EQ(kNopByteOne, *jns_instr_address);
+ ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+
+ if (Assembler::target_address_at(call_target_address) ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index dae3bbd63..f8e4ea53d 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "codegen.h"
#include "ic-inl.h"
@@ -60,11 +60,11 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register r0,
- Register r1,
- Label* miss) {
+static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register r0,
+ Register r1,
+ Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// r0: used to hold receiver instance type.
@@ -92,7 +92,8 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
__ j(not_zero, miss);
__ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ CheckMap(r0, FACTORY->hash_table_map(), miss, DONT_DO_SMI_CHECK);
+ __ CheckMap(r0, masm->isolate()->factory()->hash_table_map(), miss,
+ DONT_DO_SMI_CHECK);
}
@@ -100,7 +101,7 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
// storage. This function may fail to load a property even though it is
// in the dictionary, so code at miss_label must always call a backup
// property load that is complete. This function is safe to call if
-// name is not a symbol, and will jump to the miss_label in that
+// name is not internalized, and will jump to the miss_label in that
// case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm,
@@ -127,21 +128,21 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
// property.
__ bind(&done);
const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
@@ -157,7 +158,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// storage. This function may fail to store a property eventhough it
// is in the dictionary, so code at miss_label must always call a
// backup property store that is complete. This function is safe to
-// call if name is not a symbol, and will jump to the miss_label in
+// call if name is not internalized, and will jump to the miss_label in
// that case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
static void GenerateDictionaryStore(MacroAssembler* masm,
@@ -182,21 +183,21 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
// property that is not read only.
__ bind(&done);
const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask =
(PropertyDetails::TypeField::kMask |
@@ -216,50 +217,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
}
-void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadArrayLength(masm, edx, eax, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateStringLength(MacroAssembler* masm,
- bool support_wrappers) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
- support_wrappers);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
@@ -314,7 +271,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
if (not_fast_array != NULL) {
// Check that the object is in fast mode and writable.
__ CheckMap(scratch,
- FACTORY->fixed_array_map(),
+ masm->isolate()->factory()->fixed_array_map(),
not_fast_array,
DONT_DO_SMI_CHECK);
} else {
@@ -326,7 +283,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Fast case: Do the load.
STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(scratch, Immediate(FACTORY->the_hole_value()));
+ __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
@@ -336,31 +293,38 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
}
-// Checks whether a key is an array index string or a symbol string.
-// Falls through if the key is a symbol.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_symbol) {
+// Checks whether a key is an array index string or a unique name.
+// Falls through if the key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_unique) {
// Register use:
// key - holds the key and is unchanged. Assumed to be non-smi.
// Scratch registers:
// map - used to hold the map of the key.
// hash - used to hold the hash of the key.
- __ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
- __ j(above_equal, not_symbol);
+ Label unique;
+ __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
+ __ j(above, not_unique);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ j(equal, &unique);
// Is the string an array index, with cached numeric value?
- __ mov(hash, FieldOperand(key, String::kHashFieldOffset));
- __ test(hash, Immediate(String::kContainsCachedArrayIndexMask));
+ __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
+ __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
__ j(zero, index_string);
- // Is the string a symbol?
- STATIC_ASSERT(kSymbolTag != 0);
- __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsSymbolMask);
- __ j(zero, not_symbol);
+ // Is the string internalized? We already know it's a string so a single
+ // bit test is enough.
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
+ kIsNotInternalizedMask);
+ __ j(not_zero, not_unique);
+
+ __ bind(&unique);
}
@@ -447,11 +411,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
+ Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
// Check that the key is a smi.
- __ JumpIfNotSmi(ecx, &check_string);
+ __ JumpIfNotSmi(ecx, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi.
@@ -502,8 +466,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ IncrementCounter(counters->keyed_load_generic_slow(), 1);
GenerateRuntimeGetProperty(masm);
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow);
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, ecx, eax, ebx, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(
masm, edx, eax, Map::kHasNamedInterceptor, &slow);
@@ -519,7 +483,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// based on 32 bits of the map pointer and the string hash.
if (FLAG_debug_code) {
__ cmp(eax, FieldOperand(edx, HeapObject::kMapOffset));
- __ Check(equal, "Map is no longer in eax.");
+ __ Check(equal, kMapIsNoLongerInEax);
}
__ mov(ebx, eax); // Keep the map around for later.
__ shr(eax, KeyedLookupCache::kMapHashShift);
@@ -528,7 +492,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ xor_(eax, edi);
__ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
- // Load the key (consisting of map and symbol) from the cache and
+ // Load the key (consisting of map and internalized string) from the cache and
// check for match.
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
@@ -612,7 +576,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
__ ret(0);
- __ bind(&index_string);
+ __ bind(&index_name);
__ IndexFromHash(ebx, ecx);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
@@ -647,7 +611,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -689,7 +653,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ TailCallExternalReference(ref, 2, 1);
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -714,7 +678,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(eax, unmapped_location);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -743,7 +707,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -835,7 +799,9 @@ static void KeyedStoreGenerateGenericHelper(
ebx,
edi,
slow);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@@ -846,7 +812,9 @@ static void KeyedStoreGenerateGenericHelper(
ebx,
edi,
slow);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -860,7 +828,8 @@ static void KeyedStoreGenerateGenericHelper(
ebx,
edi,
slow);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@@ -1055,7 +1024,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
// eax: elements
// Search the dictionary placing the result in edi.
@@ -1171,11 +1140,11 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
+ Label check_number_dictionary, check_name, lookup_monomorphic_cache;
+ Label index_smi, index_name;
// Check that the key is a smi.
- __ JumpIfNotSmi(ecx, &check_string);
+ __ JumpIfNotSmi(ecx, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from
@@ -1234,10 +1203,10 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ mov(edi, eax);
__ jmp(&do_call);
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow_call);
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, ecx, eax, ebx, &index_name, &slow_call);
- // The key is known to be a symbol.
+ // The key is known to be a unique name.
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
@@ -1263,14 +1232,14 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&slow_call);
// This branch is taken if:
// - the receiver requires boxing or access check,
- // - the key is neither smi nor symbol,
+ // - the key is neither smi nor a unique name,
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
__ IncrementCounter(counters->keyed_call_generic_slow(), 1);
GenerateMiss(masm, argc);
- __ bind(&index_string);
+ __ bind(&index_name);
__ IndexFromHash(ebx, ecx);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
@@ -1315,10 +1284,10 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- // Check if the name is a string.
+ // Check if the name is really a name.
Label miss;
__ JumpIfSmi(ecx, &miss);
- Condition cond = masm->IsObjectStringType(ecx, eax, eax);
+ Condition cond = masm->IsObjectNameType(ecx, eax, eax);
__ j(NegateCondition(cond), &miss);
CallICBase::GenerateNormal(masm, argc);
__ bind(&miss);
@@ -1334,9 +1303,11 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
- eax);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
+ Code::NORMAL, Code::LOAD_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, edx, ecx, ebx, eax);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1351,7 +1322,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -----------------------------------
Label miss;
- GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
// eax: elements
// Search the dictionary placing the result in eax.
@@ -1385,7 +1356,24 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx); // receiver
+ __ push(ecx); // name
+ __ push(ebx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
@@ -1400,7 +1388,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
__ push(ebx); // return address
// Perform tail call to the entry.
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
masm->isolate())
: ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
@@ -1434,10 +1422,11 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -- esp[0] : return address
// -----------------------------------
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
- no_reg);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, strict_mode,
+ Code::NORMAL, Code::STORE_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, edx, ecx, ebx, no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1465,65 +1454,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = edx;
- Register value = eax;
- Register scratch = ebx;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ pop(scratch);
- __ push(receiver);
- __ push(value);
- __ push(scratch); // return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- GenerateMiss(masm);
-}
-
-
void StoreIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
@@ -1534,7 +1464,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Label miss, restore_miss;
- GenerateStringDictionaryReceiverCheck(masm, edx, ebx, edi, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, edx, ebx, edi, &miss);
// A lot of registers are needed for storing to slow case
// objects. Push and restore receiver but rely on
@@ -1555,8 +1485,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
@@ -1598,7 +1528,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -1613,7 +1543,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
__ push(ebx);
// Do tail-call to runtime routine.
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
masm->isolate())
: ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@@ -1621,7 +1551,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
}
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -1636,59 +1566,28 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
__ push(ebx); // return address
// Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- ebx : target map
+ // -- eax : value
+ // -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- // Must return the modified receiver in eax.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
- __ mov(eax, edx);
- __ Ret();
- __ bind(&fail);
- }
__ pop(ebx);
__ push(edx);
- __ push(ebx); // return address
- // Leaving the code managed by the register allocator and return to the
- // convention of using esi as context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ebx : target map
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- // Must return the modified receiver in eax.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
- __ mov(eax, edx);
- __ Ret();
- __ bind(&fail);
- }
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx); // return address
- __ pop(ebx);
- __ push(edx);
- __ push(ebx); // return address
- // Leaving the code managed by the register allocator and return to the
- // convention of using esi as context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -1715,7 +1614,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-static bool HasInlinedSmiCode(Address address) {
+bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1726,40 +1625,6 @@ static bool HasInlinedSmiCode(Address address) {
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
-
- State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
-}
-
-
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 32c66a05f..0c7e8d6ef 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -27,21 +27,29 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "ia32/lithium-codegen-ia32.h"
+#include "ic.h"
#include "code-stubs.h"
#include "deoptimizer.h"
#include "stub-cache.h"
#include "codegen.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
+static SaveFPRegsMode GetSaveFPRegsMode() {
+ // We don't need to save floating point regs when generating the snapshot
+ return CpuFeatures::IsSafeForSnapshot(SSE2) ? kSaveFPRegs : kDontSaveFPRegs;
+}
+
+
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public CallWrapper {
+class SafepointGenerator V8_FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -49,11 +57,11 @@ class SafepointGenerator : public CallWrapper {
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) {}
- virtual ~SafepointGenerator() { }
+ virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const {}
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
- virtual void AfterCall() const {
+ virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -67,25 +75,26 @@ class SafepointGenerator : public CallWrapper {
#define __ masm()->
bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
+ LPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
- CpuFeatures::Scope scope(SSE2);
-
- CodeStub::GenerateFPStubs();
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
- !chunk()->graph()->is_recursive()) ||
- !info()->osr_ast_id().IsNone();
+ support_aligned_spilled_doubles_ = info()->IsOptimizing();
+
+ dynamic_frame_alignment_ = info()->IsOptimizing() &&
+ ((chunk()->num_double_slots() > 2 &&
+ !chunk()->graph()->is_recursive()) ||
+ !info()->osr_ast_id().IsNone());
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
+ GenerateJumpTable() &&
GenerateSafepointTable();
}
@@ -94,144 +103,163 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (FLAG_weak_embedded_maps_in_optimized_code) {
+ RegisterDependentCodeForEmbeddedMaps(code);
+ }
PopulateDeoptimizationData(code);
- Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+ if (!info()->IsStub()) {
+ Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+ }
+ info()->CommitDependencies(code);
}
-void LCodeGen::Abort(const char* reason) {
+void LCodeGen::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
+#ifdef _MSC_VER
+void LCodeGen::MakeSureStackPagesMapped(int offset) {
+ const int kPageSize = 4 * KB;
+ for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
+ __ mov(Operand(esp, offset), eax);
+ }
}
+#endif
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). ecx is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ test(ecx, Operand(ecx));
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ mov(Operand(esp, receiver_offset),
- Immediate(isolate()->factory()->undefined_value()));
- __ bind(&ok);
- }
-
-
- if (dynamic_frame_alignment_) {
- // Move state of dynamic frame alignment into edx.
- __ mov(edx, Immediate(kNoAlignmentPadding));
-
- Label do_not_pad, align_loop;
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- // Align esp + 4 to a multiple of 2 * kPointerSize.
- __ test(esp, Immediate(kPointerSize));
- __ j(not_zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- __ mov(edx, Immediate(kAlignmentPaddingPushed));
- // Copy arguments, receiver, and return address.
- __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). ecx is zero for method calls and non-zero for
+ // function calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ mov(Operand(esp, receiver_offset),
+ Immediate(isolate()->factory()->undefined_value()));
+ __ bind(&ok);
+ }
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
- __ bind(&do_not_pad);
+ if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+ // Move state of dynamic frame alignment into edx.
+ __ mov(edx, Immediate(kNoAlignmentPadding));
+
+ Label do_not_pad, align_loop;
+ STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+ // Align esp + 4 to a multiple of 2 * kPointerSize.
+ __ test(esp, Immediate(kPointerSize));
+ __ j(not_zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+ // Copy arguments, receiver, and return address.
+ __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ bind(&do_not_pad);
+ }
}
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS function.
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ ASSERT(!frame_is_built_);
+ frame_is_built_ = true;
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ info()->AddNoFrameRange(0, masm_->pc_offset());
+ }
- if (dynamic_frame_alignment_ && FLAG_debug_code) {
+ if (info()->IsOptimizing() &&
+ dynamic_frame_alignment_ &&
+ FLAG_debug_code) {
__ test(esp, Immediate(kPointerSize));
- __ Assert(zero, "frame is expected to be aligned");
+ __ Assert(zero, kFrameIsExpectedToBeAligned);
}
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
- ASSERT_GE(slots, 1);
- if (slots == 1) {
- if (dynamic_frame_alignment_) {
- __ push(edx);
- } else {
- __ push(Immediate(kNoAlignmentPadding));
- }
- } else {
- if (FLAG_debug_code) {
- __ mov(Operand(eax), Immediate(slots));
- Label loop;
- __ bind(&loop);
- __ push(Immediate(kSlotsZapValue));
- __ dec(eax);
- __ j(not_zero, &loop);
+ ASSERT(slots != 0 || !info()->IsOptimizing());
+ if (slots > 0) {
+ if (slots == 1) {
+ if (dynamic_frame_alignment_) {
+ __ push(edx);
+ } else {
+ __ push(Immediate(kNoAlignmentPadding));
+ }
} else {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
- #ifdef _MSC_VER
- // On windows, you may not access the stack more than one page below
- // the most recently mapped page. To make the allocated area randomly
- // accessible, we write to each page in turn (the value is irrelevant).
- const int kPageSize = 4 * KB;
- for (int offset = slots * kPointerSize - kPageSize;
- offset > 0;
- offset -= kPageSize) {
- __ mov(Operand(esp, offset), eax);
+ if (FLAG_debug_code) {
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ MakeSureStackPagesMapped(slots * kPointerSize);
+#endif
+ __ push(eax);
+ __ mov(Operand(eax), Immediate(slots));
+ Label loop;
+ __ bind(&loop);
+ __ mov(MemOperand(esp, eax, times_4, 0),
+ Immediate(kSlotsZapValue));
+ __ dec(eax);
+ __ j(not_zero, &loop);
+ __ pop(eax);
+ } else {
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ MakeSureStackPagesMapped(slots * kPointerSize);
+#endif
+ }
+
+ if (support_aligned_spilled_doubles_) {
+ Comment(";;; Store dynamic frame alignment tag for spilled doubles");
+ // Store dynamic frame alignment state in the first local.
+ int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
+ if (dynamic_frame_alignment_) {
+ __ mov(Operand(ebp, offset), edx);
+ } else {
+ __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
+ }
}
- #endif
}
- // Store dynamic frame alignment state in the first local.
- if (dynamic_frame_alignment_) {
- __ mov(Operand(ebp,
- JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
- edx);
- } else {
- __ mov(Operand(ebp,
- JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
- Immediate(kNoAlignmentPadding));
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
+ Comment(";;; Save clobbered callee double registers");
+ CpuFeatureScope scope(masm(), SSE2);
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ movsd(MemOperand(esp, count * kDoubleSize),
+ XMMRegister::FromAllocationIndex(save_iterator.Current()));
+ save_iterator.Advance();
+ count++;
+ }
}
}
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in edi.
@@ -271,7 +299,7 @@ bool LCodeGen::GeneratePrologue() {
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// We have not executed any compiled code yet, so esi still holds the
// incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
@@ -280,24 +308,126 @@ bool LCodeGen::GeneratePrologue() {
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Move state of dynamic frame alignment into edx.
+ __ mov(edx, Immediate(kNoAlignmentPadding));
+
+ if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+ Label do_not_pad, align_loop;
+ // Align ebp + 4 to a multiple of 2 * kPointerSize.
+ __ test(ebp, Immediate(kPointerSize));
+ __ j(zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+
+ // Move all parts of the frame over one word. The frame consists of:
+ // unoptimized frame slots, alignment state, context, frame pointer, return
+ // address, receiver, and the arguments.
+ __ mov(ecx, Immediate(scope()->num_parameters() +
+ 5 + graph()->osr()->UnoptimizedFrameSlots()));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ sub(Operand(ebp), Immediate(kPointerSize));
+ __ bind(&do_not_pad);
+ }
+
+ // Save the first local, which is overwritten by the alignment state.
+ Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
+ __ push(alignment_loc);
+
+ // Set the dynamic frame alignment state.
+ __ mov(alignment_loc, edx);
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 1);
+ __ sub(esp, Immediate((slots - 1) * kPointerSize));
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
+}
+
+
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ if (instr->IsGoto()) {
+ x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
+ } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
+ !instr->IsGap() && !instr->IsReturn()) {
+ if (instr->ClobbersDoubleRegisters()) {
+ if (instr->HasDoubleRegisterResult()) {
+ ASSERT_EQ(1, x87_stack_.depth());
+ } else {
+ ASSERT_EQ(0, x87_stack_.depth());
+ }
+ }
+ __ VerifyX87StackDepth(x87_stack_.depth());
}
+ }
+}
+
- if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- instr->CompileToNative(this);
+bool LCodeGen::GenerateJumpTable() {
+ Label needs_frame;
+ if (jump_table_.length() > 0) {
+ Comment(";;; -------------------- Jump table --------------------");
+ }
+ for (int i = 0; i < jump_table_.length(); i++) {
+ __ bind(&jump_table_[i].label);
+ Address entry = jump_table_[i].address;
+ Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ if (jump_table_[i].needs_frame) {
+ __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
+ if (needs_frame.is_bound()) {
+ __ jmp(&needs_frame);
+ } else {
+ __ bind(&needs_frame);
+ __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ // Push a PC inside the function so that the deopt code can find where
+ // the deopt comes from. It doesn't have to be the precise return
+ // address of a "calling" LAZY deopt, it only has to be somewhere
+ // inside the code body.
+ Label push_approx_pc;
+ __ call(&push_approx_pc);
+ __ bind(&push_approx_pc);
+ // Push the continuation which was stashed were the ebp should
+ // be. Replace it with the saved ebp.
+ __ push(MemOperand(esp, 3 * kPointerSize));
+ __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
+ __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+ __ ret(0); // Call the continuation without clobbering registers.
+ }
+ } else {
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
}
}
- EnsureSpaceForLazyDeopt();
return !is_aborted();
}
@@ -307,11 +437,40 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- Comment(";;; Deferred code @%d: %s.",
+ X87Stack copy(code->x87_stack());
+ x87_stack_ = copy;
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
+
+ Comment(";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
code->instruction_index(),
+ code->instr()->hydrogen_value()->id(),
code->instr()->Mnemonic());
+ __ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ // Build the frame in such a way that esi isn't trashed.
+ __ push(ebp); // Caller's frame pointer.
+ __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ __ lea(ebp, Operand(esp, 2 * kPointerSize));
+ Comment(";;; Deferred code");
+ }
code->Generate();
+ if (NeedsDeferredFrame()) {
+ __ bind(code->done());
+ Comment(";;; Destroy frame");
+ ASSERT(frame_is_built_);
+ frame_is_built_ = false;
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ }
__ jmp(code->exit());
}
}
@@ -325,6 +484,15 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
+ if (!info()->IsStub()) {
+ // For lazy deoptimization we need space to patch a call after every call.
+ // Ensure there is always space for such patching, even if the code ends
+ // in a call.
+ int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
+ while (masm()->pc_offset() < target_offset) {
+ masm()->nop();
+ }
+ }
safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
@@ -335,35 +503,257 @@ Register LCodeGen::ToRegister(int index) const {
}
+X87Register LCodeGen::ToX87Register(int index) const {
+ return X87Register::FromAllocationIndex(index);
+}
+
+
XMMRegister LCodeGen::ToDoubleRegister(int index) const {
return XMMRegister::FromAllocationIndex(index);
}
+void LCodeGen::X87LoadForUsage(X87Register reg) {
+ ASSERT(x87_stack_.Contains(reg));
+ x87_stack_.Fxch(reg);
+ x87_stack_.pop();
+}
+
+
+void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
+ ASSERT(x87_stack_.Contains(reg1));
+ ASSERT(x87_stack_.Contains(reg2));
+ x87_stack_.Fxch(reg1, 1);
+ x87_stack_.Fxch(reg2);
+ x87_stack_.pop();
+ x87_stack_.pop();
+}
+
+
+void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
+ ASSERT(is_mutable_);
+ ASSERT(Contains(reg) && stack_depth_ > other_slot);
+ int i = ArrayIndex(reg);
+ int st = st2idx(i);
+ if (st != other_slot) {
+ int other_i = st2idx(other_slot);
+ X87Register other = stack_[other_i];
+ stack_[other_i] = reg;
+ stack_[i] = other;
+ if (st == 0) {
+ __ fxch(other_slot);
+ } else if (other_slot == 0) {
+ __ fxch(st);
+ } else {
+ __ fxch(st);
+ __ fxch(other_slot);
+ __ fxch(st);
+ }
+ }
+}
+
+
+int LCodeGen::X87Stack::st2idx(int pos) {
+ return stack_depth_ - pos - 1;
+}
+
+
+int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
+ for (int i = 0; i < stack_depth_; i++) {
+ if (stack_[i].is(reg)) return i;
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+
+bool LCodeGen::X87Stack::Contains(X87Register reg) {
+ for (int i = 0; i < stack_depth_; i++) {
+ if (stack_[i].is(reg)) return true;
+ }
+ return false;
+}
+
+
+void LCodeGen::X87Stack::Free(X87Register reg) {
+ ASSERT(is_mutable_);
+ ASSERT(Contains(reg));
+ int i = ArrayIndex(reg);
+ int st = st2idx(i);
+ if (st > 0) {
+ // keep track of how fstp(i) changes the order of elements
+ int tos_i = st2idx(0);
+ stack_[i] = stack_[tos_i];
+ }
+ pop();
+ __ fstp(st);
+}
+
+
+void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
+ if (x87_stack_.Contains(dst)) {
+ x87_stack_.Fxch(dst);
+ __ fstp(0);
+ } else {
+ x87_stack_.push(dst);
+ }
+ X87Fld(src, opts);
+}
+
+
+void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
+ ASSERT(!src.is_reg_only());
+ switch (opts) {
+ case kX87DoubleOperand:
+ __ fld_d(src);
+ break;
+ case kX87FloatOperand:
+ __ fld_s(src);
+ break;
+ case kX87IntOperand:
+ __ fild_s(src);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
+ ASSERT(!dst.is_reg_only());
+ x87_stack_.Fxch(src);
+ switch (opts) {
+ case kX87DoubleOperand:
+ __ fst_d(dst);
+ break;
+ case kX87IntOperand:
+ __ fist_s(dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
+ ASSERT(is_mutable_);
+ if (Contains(reg)) {
+ Free(reg);
+ }
+ // Mark this register as the next register to write to
+ stack_[stack_depth_] = reg;
+}
+
+
+void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
+ ASSERT(is_mutable_);
+ // Assert the reg is prepared to write, but not on the virtual stack yet
+ ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
+ stack_depth_ < X87Register::kNumAllocatableRegisters);
+ stack_depth_++;
+}
+
+
+void LCodeGen::X87PrepareBinaryOp(
+ X87Register left, X87Register right, X87Register result) {
+ // You need to use DefineSameAsFirst for x87 instructions
+ ASSERT(result.is(left));
+ x87_stack_.Fxch(right, 1);
+ x87_stack_.Fxch(left);
+}
+
+
+void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
+ if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
+ bool double_inputs = instr->HasDoubleRegisterInput();
+
+ // Flush stack from tos down, since FreeX87() will mess with tos
+ for (int i = stack_depth_-1; i >= 0; i--) {
+ X87Register reg = stack_[i];
+ // Skip registers which contain the inputs for the next instruction
+ // when flushing the stack
+ if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
+ continue;
+ }
+ Free(reg);
+ if (i < stack_depth_-1) i++;
+ }
+ }
+ if (instr->IsReturn()) {
+ while (stack_depth_ > 0) {
+ __ fstp(0);
+ stack_depth_--;
+ }
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
+ }
+}
+
+
+void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
+ ASSERT(stack_depth_ <= 1);
+ // If ever used for new stubs producing two pairs of doubles joined into two
+ // phis this assert hits. That situation is not handled, since the two stacks
+ // might have st0 and st1 swapped.
+ if (current_block_id + 1 != goto_instr->block_id()) {
+ // If we have a value on the x87 stack on leaving a block, it must be a
+ // phi input. If the next block we compile is not the join block, we have
+ // to discard the stack state.
+ stack_depth_ = 0;
+ }
+}
+
+
+void LCodeGen::EmitFlushX87ForDeopt() {
+ // The deoptimizer does not support X87 Registers. But as long as we
+ // deopt from a stub its not a problem, since we will re-materialize the
+ // original stub inputs, which can't be double registers.
+ ASSERT(info()->IsStub());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ pushfd();
+ __ VerifyX87StackDepth(x87_stack_.depth());
+ __ popfd();
+ }
+ for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
+}
+
+
Register LCodeGen::ToRegister(LOperand* op) const {
ASSERT(op->IsRegister());
return ToRegister(op->index());
}
+X87Register LCodeGen::ToX87Register(LOperand* op) const {
+ ASSERT(op->IsDoubleRegister());
+ return ToX87Register(op->index());
+}
+
+
XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
ASSERT(op->IsDoubleRegister());
return ToDoubleRegister(op->index());
}
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(constant->HasInteger32Value());
- return constant->Integer32Value();
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ ASSERT(r.IsSmiOrTagged());
+ return reinterpret_cast<int32_t>(Smi::FromInt(value));
}
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return constant->handle();
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
}
@@ -374,8 +764,20 @@ double LCodeGen::ToDouble(LConstantOperand* op) const {
}
+ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasExternalReferenceValue());
+ return constant->ExternalReferenceValue();
+}
+
+
bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsInteger32();
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
}
@@ -383,49 +785,29 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
if (op->IsRegister()) return Operand(ToRegister(op));
if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return Operand(ebp, -(index + 3) * kPointerSize);
- } else {
- // Incoming parameter. Skip the return address.
- return Operand(ebp, -(index - 1) * kPointerSize);
- }
+ return Operand(ebp, StackSlotOffset(op->index()));
}
Operand LCodeGen::HighOperand(LOperand* op) {
ASSERT(op->IsDoubleStackSlot());
- int index = op->index();
- int offset = (index >= 0) ? index + 3 : index - 1;
- return Operand(ebp, -offset * kPointerSize);
+ return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count) {
+ Translation* translation) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
+ int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *arguments_index = -environment->parameter_count();
- *arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- arguments_index,
- arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ WriteTranslation(environment->outer(), translation);
+ bool has_closure_id = !info()->closure().is_null() &&
+ !info()->closure().is_identical_to(environment->closure());
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
switch (environment->frame_type()) {
@@ -448,68 +830,65 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
+ default:
+ UNREACHABLE();
}
- // Inlined frames which push their arguments cause the index to be
- // bumped and another stack area to be used for materialization.
- if (environment->entry() != NULL &&
- environment->entry()->arguments_pushed()) {
- *arguments_index = *arguments_index < 0
- ? GetStackSlotCount()
- : *arguments_index + *arguments_count;
- *arguments_count = environment->entry()->arguments_count() + 1;
- }
-
+ int object_index = 0;
+ int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- *arguments_index,
- *arguments_count);
- }
- }
-
- AddToTranslation(translation,
+ AddToTranslation(environment,
+ translation,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
+ &object_index,
+ &dematerialized_index);
}
}
-void LCodeGen::AddToTranslation(Translation* translation,
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
} else if (is_uint32) {
@@ -537,7 +916,7 @@ void LCodeGen::AddToTranslation(Translation* translation,
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -550,8 +929,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
__ call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
@@ -573,22 +950,20 @@ void LCodeGen::CallCode(Handle<Code> code,
void LCodeGen::CallRuntime(const Runtime::Function* fun,
int argc,
- LInstruction* instr) {
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
ASSERT(instr != NULL);
ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ CallRuntime(fun, argc);
+ __ CallRuntime(fun, argc, save_doubles);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+
+ ASSERT(info()->is_calling());
}
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context) {
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) {
if (!ToRegister(context).is(esi)) {
__ mov(esi, ToRegister(context));
@@ -598,14 +973,23 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
- __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle()));
+ __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
} else {
UNREACHABLE();
}
+}
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+
+ ASSERT(info()->is_calling());
}
@@ -627,8 +1011,6 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
int frame_count = 0;
int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
@@ -636,7 +1018,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
}
}
Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
+ WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
@@ -647,77 +1029,141 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
}
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
- Abort("bailout was not prepared");
+ Abort(kBailoutWasNotPrepared);
return;
}
- if (FLAG_deopt_every_n_times != 0) {
- Handle<SharedFunctionInfo> shared(info_->shared_info());
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
Label no_deopt;
__ pushfd();
__ push(eax);
- __ push(ebx);
- __ mov(ebx, shared);
- __ mov(eax,
- FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset));
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ mov(eax, Operand::StaticVariable(count));
+ __ sub(eax, Immediate(1));
__ j(not_zero, &no_deopt, Label::kNear);
if (FLAG_trap_on_deopt) __ int3();
- __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
- eax);
- __ pop(ebx);
+ __ mov(eax, Immediate(FLAG_deopt_every_n_times));
+ __ mov(Operand::StaticVariable(count), eax);
__ pop(eax);
__ popfd();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-
+ ASSERT(frame_is_built_);
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
- eax);
- __ pop(ebx);
+ __ mov(Operand::StaticVariable(count), eax);
__ pop(eax);
__ popfd();
}
- if (cc == no_condition) {
- if (FLAG_trap_on_deopt) __ int3();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+ // Before Instructions which can deopt, we normally flush the x87 stack. But
+ // we can have inputs or outputs of the current instruction on the stack,
+ // thus we need to flush them here from the physical stack to leave it in a
+ // consistent state.
+ if (x87_stack_.depth() > 0) {
+ Label done;
+ if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
+ EmitFlushX87ForDeopt();
+ __ bind(&done);
+ }
+
+ if (info()->ShouldTrapOnDeopt()) {
+ Label done;
+ if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
+ __ int3();
+ __ bind(&done);
+ }
+
+ ASSERT(info()->IsStub() || frame_is_built_);
+ if (cc == no_condition && frame_is_built_) {
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- if (FLAG_trap_on_deopt) {
- Label done;
- __ j(NegateCondition(cc), &done, Label::kNear);
- __ int3();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&done);
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (jump_table_.is_empty() ||
+ jump_table_.last().address != entry ||
+ jump_table_.last().needs_frame != !frame_is_built_ ||
+ jump_table_.last().bailout_type != bailout_type) {
+ Deoptimizer::JumpTableEntry table_entry(entry,
+ bailout_type,
+ !frame_is_built_);
+ jump_table_.Add(table_entry, zone());
+ }
+ if (cc == no_condition) {
+ __ jmp(&jump_table_.last().label);
} else {
- __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
+ __ j(cc, &jump_table_.last().label);
}
}
}
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment) {
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ DeoptimizeIf(cc, environment, bailout_type);
+}
+
+
+void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
+ ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
+ maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
+ }
+ }
+ }
+#ifdef VERIFY_HEAP
+ // This disables verification of weak embedded objects after full GC.
+ // AddDependentCode can cause a GC, which would observe the state where
+ // this code is not yet in the depended code lists of the embedded maps.
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
+#endif
+ for (int i = 0; i < maps.length(); i++) {
+ maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
+ }
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
+}
+
+
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
factory()->NewDeoptimizationInputData(length, TENURED);
- Handle<ByteArray> translations = translations_.CreateByteArray();
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
+ { AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
}
- data->SetLiteralArray(*literals);
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
@@ -800,7 +1246,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, mode);
}
@@ -812,18 +1258,26 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordPosition(int position) {
+void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
}
void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_,
+ label->hydrogen_value()->id(),
+ label->block_id(),
+ LabelType(label));
__ bind(label->label());
current_block_ = label->block_id();
DoGap(label);
@@ -862,38 +1316,28 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::RegExpExec: {
RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -903,123 +1347,179 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
+ GenerateOsrPrologue();
}
void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
-
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+ HMod* hmod = instr->hydrogen();
+ HValue* left = hmod->left();
+ HValue* right = hmod->right();
+ if (hmod->HasPowerOf2Divisor()) {
+ // TODO(svenpanne) We should really do the strength reduction on the
+ // Hydrogen level.
+ Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(ToRegister(instr->result())));
- if (divisor < 0) divisor = -divisor;
+ // Note: The code below even works when right contains kMinInt.
+ int32_t divisor = Abs(right->GetInteger32Constant());
- Label positive_dividend, done;
- __ test(dividend, Operand(dividend));
- __ j(not_sign, &positive_dividend, Label::kNear);
- __ neg(dividend);
- __ and_(dividend, divisor - 1);
- __ neg(dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ j(not_zero, &done, Label::kNear);
- DeoptimizeIf(no_condition, instr->environment());
- } else {
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ test(left_reg, Operand(left_reg));
+ __ j(not_sign, &left_is_not_negative, Label::kNear);
+ __ neg(left_reg);
+ __ and_(left_reg, divisor - 1);
+ __ neg(left_reg);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
__ jmp(&done, Label::kNear);
}
- __ bind(&positive_dividend);
- __ and_(dividend, divisor - 1);
+
+ __ bind(&left_is_not_negative);
+ __ and_(left_reg, divisor - 1);
__ bind(&done);
- } else {
- Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
+
+ } else if (hmod->fixed_right_arg().has_value) {
Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(ToRegister(instr->result())));
Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
+ int32_t divisor = hmod->fixed_right_arg().value;
+ ASSERT(IsPowerOf2(divisor));
+
+ // Check if our assumption of a fixed right operand still holds.
+ __ cmp(right_reg, Immediate(divisor));
+ DeoptimizeIf(not_equal, instr->environment());
+
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ test(left_reg, Operand(left_reg));
+ __ j(not_sign, &left_is_not_negative, Label::kNear);
+ __ neg(left_reg);
+ __ and_(left_reg, divisor - 1);
+ __ neg(left_reg);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ __ jmp(&done, Label::kNear);
+ }
+
+ __ bind(&left_is_not_negative);
+ __ and_(left_reg, divisor - 1);
+ __ bind(&done);
+
+ } else {
+ Register left_reg = ToRegister(instr->left());
ASSERT(left_reg.is(eax));
- ASSERT(result_reg.is(edx));
+ Register right_reg = ToRegister(instr->right());
ASSERT(!right_reg.is(eax));
ASSERT(!right_reg.is(edx));
+ Register result_reg = ToRegister(instr->result());
+ ASSERT(result_reg.is(edx));
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ Label done;
+ // Check for x % 0, idiv would signal a divide error. We have to
+ // deopt in this case because we can't return a NaN.
+ if (right->CanBeZero()) {
__ test(right_reg, Operand(right_reg));
DeoptimizeIf(zero, instr->environment());
}
- __ test(left_reg, Operand(left_reg));
- __ j(zero, &remainder_eq_dividend, Label::kNear);
- __ j(sign, &slow, Label::kNear);
-
- __ test(right_reg, Operand(right_reg));
- __ j(not_sign, &both_positive, Label::kNear);
- // The sign of the divisor doesn't matter.
- __ neg(right_reg);
-
- __ bind(&both_positive);
- // If the dividend is smaller than the nonnegative
- // divisor, the dividend is the result.
- __ cmp(left_reg, Operand(right_reg));
- __ j(less, &remainder_eq_dividend, Label::kNear);
+ // Check for kMinInt % -1, idiv would signal a divide error. We
+ // have to deopt if we care about -0, because we can't return that.
+ if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
+ Label no_overflow_possible;
+ __ cmp(left_reg, kMinInt);
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ cmp(right_reg, -1);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ Set(result_reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ }
+ __ bind(&no_overflow_possible);
+ }
- // Check if the divisor is a PowerOfTwo integer.
- Register scratch = ToRegister(instr->temp());
- __ mov(scratch, right_reg);
- __ sub(Operand(scratch), Immediate(1));
- __ test(scratch, Operand(right_reg));
- __ j(not_zero, &do_subtraction, Label::kNear);
- __ and_(left_reg, Operand(scratch));
- __ jmp(&remainder_eq_dividend, Label::kNear);
-
- __ bind(&do_subtraction);
- const int kUnfolds = 3;
- // Try a few subtractions of the dividend.
- __ mov(scratch, left_reg);
- for (int i = 0; i < kUnfolds; i++) {
- // Reduce the dividend by the divisor.
- __ sub(left_reg, Operand(right_reg));
- // Check if the dividend is less than the divisor.
- __ cmp(left_reg, Operand(right_reg));
- __ j(less, &remainder_eq_dividend, Label::kNear);
- }
- __ mov(left_reg, scratch);
-
- // Slow case, using idiv instruction.
- __ bind(&slow);
- // Sign extend to edx.
+ // Sign extend dividend in eax into edx:eax.
__ cdq();
- // Check for (0 % -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (left->CanBeNegative() &&
+ hmod->CanBeZero() &&
+ hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label positive_left;
- Label done;
__ test(left_reg, Operand(left_reg));
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
-
- // Test the remainder for 0, because then the result would be -0.
__ test(result_reg, Operand(result_reg));
- __ j(not_zero, &done, Label::kNear);
-
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(zero, instr->environment());
+ __ jmp(&done, Label::kNear);
__ bind(&positive_left);
- __ idiv(right_reg);
- __ bind(&done);
- } else {
- __ idiv(right_reg);
}
- __ jmp(&done, Label::kNear);
-
- __ bind(&remainder_eq_dividend);
- __ mov(result_reg, left_reg);
-
+ __ idiv(right_reg);
__ bind(&done);
}
}
void LCodeGen::DoDivI(LDivI* instr) {
+ if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
+ Register dividend = ToRegister(instr->left());
+ int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
+ int32_t test_value = 0;
+ int32_t power = 0;
+
+ if (divisor > 0) {
+ test_value = divisor - 1;
+ power = WhichPowerOf2(divisor);
+ } else {
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ test(dividend, Operand(dividend));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ __ cmp(dividend, kMinInt);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ test_value = - divisor - 1;
+ power = WhichPowerOf2(-divisor);
+ }
+
+ if (test_value != 0) {
+ if (instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ Label done, negative;
+ __ cmp(dividend, 0);
+ __ j(less, &negative, Label::kNear);
+ __ sar(dividend, power);
+ if (divisor < 0) __ neg(dividend);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&negative);
+ __ neg(dividend);
+ __ sar(dividend, power);
+ if (divisor > 0) __ neg(dividend);
+ __ bind(&done);
+ return; // Don't fall through to "__ neg" below.
+ } else {
+ // Deoptimize if remainder is not 0.
+ __ test(dividend, Immediate(test_value));
+ DeoptimizeIf(not_zero, instr->environment());
+ __ sar(dividend, power);
+ }
+ }
+
+ if (divisor < 0) __ neg(dividend);
+
+ return;
+ }
+
LOperand* right = instr->right();
ASSERT(ToRegister(instr->result()).is(eax));
ASSERT(ToRegister(instr->left()).is(eax));
@@ -1030,13 +1530,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
Register right_reg = ToRegister(right);
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, ToOperand(right));
DeoptimizeIf(zero, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ test(left_reg, Operand(left_reg));
__ j(not_zero, &left_not_zero, Label::kNear);
@@ -1045,8 +1545,8 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_zero);
}
- // Check for (-kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // Check for (kMinInt / -1).
+ if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
__ cmp(left_reg, kMinInt);
__ j(not_zero, &left_not_min_int, Label::kNear);
@@ -1059,9 +1559,20 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cdq();
__ idiv(right_reg);
- // Deoptimize if remainder is not 0.
- __ test(edx, Operand(edx));
- DeoptimizeIf(not_zero, instr->environment());
+ if (instr->is_flooring()) {
+ Label done;
+ __ test(edx, edx);
+ __ j(zero, &done, Label::kNear);
+ __ xor_(edx, right_reg);
+ __ sar(edx, 31);
+ __ add(eax, edx);
+ __ bind(&done);
+ } else if (!instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ // Deoptimize if remainder is not 0.
+ __ test(edx, Operand(edx));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
}
@@ -1210,9 +1721,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
case 9:
__ lea(left, Operand(left, left, times_8, 0));
break;
- case 16:
- __ shl(left, 4);
- break;
+ case 16:
+ __ shl(left, 4);
+ break;
default:
__ imul(left, left, constant);
break;
@@ -1221,6 +1732,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ imul(left, left, constant);
}
} else {
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(left);
+ }
__ imul(left, ToOperand(right));
}
@@ -1257,7 +1771,9 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(left->IsRegister());
if (right->IsConstantOperand()) {
- int right_operand = ToInteger32(LConstantOperand::cast(right));
+ int32_t right_operand =
+ ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->representation());
switch (instr->op()) {
case Token::BIT_AND:
__ and_(ToRegister(left), right_operand);
@@ -1266,7 +1782,11 @@ void LCodeGen::DoBitI(LBitI* instr) {
__ or_(ToRegister(left), right_operand);
break;
case Token::BIT_XOR:
- __ xor_(ToRegister(left), right_operand);
+ if (right_operand == int32_t(~0)) {
+ __ not_(ToRegister(left));
+ } else {
+ __ xor_(ToRegister(left), right_operand);
+ }
break;
default:
UNREACHABLE();
@@ -1300,14 +1820,21 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
ASSERT(ToRegister(right).is(ecx));
switch (instr->op()) {
+ case Token::ROR:
+ __ ror_cl(ToRegister(left));
+ if (instr->can_deopt()) {
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
+ }
+ break;
case Token::SAR:
__ sar_cl(ToRegister(left));
break;
case Token::SHR:
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
}
break;
case Token::SHL:
@@ -1321,6 +1848,14 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count == 0 && instr->can_deopt()) {
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
+ } else {
+ __ ror(ToRegister(left), shift_count);
+ }
+ break;
case Token::SAR:
if (shift_count != 0) {
__ sar(ToRegister(left), shift_count);
@@ -1328,15 +1863,24 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
break;
case Token::SHR:
if (shift_count == 0 && instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
} else {
__ shr(ToRegister(left), shift_count);
}
break;
case Token::SHL:
if (shift_count != 0) {
- __ shl(ToRegister(left), shift_count);
+ if (instr->hydrogen_value()->representation().IsSmi() &&
+ instr->can_deopt()) {
+ if (shift_count != 1) {
+ __ shl(ToRegister(left), shift_count - 1);
+ }
+ __ SmiTag(ToRegister(left));
+ DeoptimizeIf(overflow, instr->environment());
+ } else {
+ __ shl(ToRegister(left), shift_count);
+ }
}
break;
default:
@@ -1353,7 +1897,8 @@ void LCodeGen::DoSubI(LSubI* instr) {
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
- __ sub(ToOperand(left), ToInteger32Immediate(right));
+ __ sub(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
} else {
__ sub(ToRegister(left), ToOperand(right));
}
@@ -1364,73 +1909,73 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
+ __ Set(ToRegister(instr->result()), Immediate(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
__ Set(ToRegister(instr->result()), Immediate(instr->value()));
}
void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
- XMMRegister res = ToDoubleRegister(instr->result());
double v = instr->value();
- // Use xor to produce +0.0 in a fast and compact way, but avoid to
- // do so if the constant is -0.0.
- if (BitCast<uint64_t, double>(v) == 0) {
- __ xorps(res, res);
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+ ASSERT(instr->result()->IsDoubleRegister());
+
+ if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ __ push(Immediate(upper));
+ __ push(Immediate(lower));
+ X87Register reg = ToX87Register(instr->result());
+ X87Mov(reg, Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
} else {
- Register temp = ToRegister(instr->temp());
- uint64_t int_val = BitCast<uint64_t, double>(v);
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope scope(SSE4_1);
- if (lower != 0) {
- __ Set(temp, Immediate(lower));
- __ movd(res, Operand(temp));
- __ Set(temp, Immediate(upper));
- __ pinsrd(res, Operand(temp), 1);
+ CpuFeatureScope scope1(masm(), SSE2);
+ XMMRegister res = ToDoubleRegister(instr->result());
+ if (int_val == 0) {
+ __ xorps(res, res);
+ } else {
+ Register temp = ToRegister(instr->temp());
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope2(masm(), SSE4_1);
+ if (lower != 0) {
+ __ Set(temp, Immediate(lower));
+ __ movd(res, Operand(temp));
+ __ Set(temp, Immediate(upper));
+ __ pinsrd(res, Operand(temp), 1);
+ } else {
+ __ xorps(res, res);
+ __ Set(temp, Immediate(upper));
+ __ pinsrd(res, Operand(temp), 1);
+ }
} else {
- __ xorps(res, res);
__ Set(temp, Immediate(upper));
- __ pinsrd(res, Operand(temp), 1);
- }
- } else {
- __ Set(temp, Immediate(upper));
- __ movd(res, Operand(temp));
- __ psllq(res, 32);
- if (lower != 0) {
- __ Set(temp, Immediate(lower));
- __ movd(xmm0, Operand(temp));
- __ por(res, xmm0);
+ __ movd(res, Operand(temp));
+ __ psllq(res, 32);
+ if (lower != 0) {
+ XMMRegister xmm_scratch = double_scratch0();
+ __ Set(temp, Immediate(lower));
+ __ movd(xmm_scratch, Operand(temp));
+ __ por(res, xmm_scratch);
+ }
}
}
}
}
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Register reg = ToRegister(instr->result());
- Handle<Object> handle = instr->value();
- if (handle->IsHeapObject()) {
- __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
- } else {
- __ Set(reg, Immediate(handle));
- }
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
}
-void LCodeGen::DoFixedArrayBaseLength(
- LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ mov(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Register reg = ToRegister(instr->result());
+ Handle<Object> handle = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ LoadObject(reg, handle);
}
@@ -1463,8 +2008,11 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
ASSERT(input.is(result));
Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(input, &done, Label::kNear);
+
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ // If the object is a smi return the object.
+ __ JumpIfSmi(input, &done, Label::kNear);
+ }
// If the object is not a value type, return the object.
__ CmpObjectType(input, JS_VALUE_TYPE, map);
@@ -1511,10 +2059,33 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->value();
- ASSERT(input->Equals(instr->result()));
- __ not_(ToRegister(input));
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ Register string = ToRegister(instr->string());
+ Register index = ToRegister(instr->index());
+ Register value = ToRegister(instr->value());
+ String::Encoding encoding = instr->encoding();
+
+ if (FLAG_debug_code) {
+ __ push(value);
+ __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(equal, kUnexpectedStringType);
+ __ pop(value);
+ }
+
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
+ value);
+ } else {
+ __ mov_w(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
+ value);
+ }
}
@@ -1533,36 +2104,48 @@ void LCodeGen::DoThrow(LThrow* instr) {
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- if (right->IsConstantOperand()) {
- __ add(ToOperand(left), ToInteger32Immediate(right));
+ if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
+ if (right->IsConstantOperand()) {
+ int32_t offset = ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->representation());
+ __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
+ } else {
+ Operand address(ToRegister(left), ToRegister(right), times_1, 0);
+ __ lea(ToRegister(instr->result()), address);
+ }
} else {
- __ add(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ if (right->IsConstantOperand()) {
+ __ add(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
+ } else {
+ __ add(ToRegister(left), ToOperand(right));
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
}
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ CpuFeatureScope scope(masm(), SSE2);
LOperand* left = instr->left();
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
HMathMinMax::Operation operation = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsInteger32()) {
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
Label return_left;
Condition condition = (operation == HMathMinMax::kMathMin)
? less_equal
: greater_equal;
if (right->IsConstantOperand()) {
Operand left_op = ToOperand(left);
- Immediate right_imm = ToInteger32Immediate(right);
- __ cmp(left_op, right_imm);
+ Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
+ instr->hydrogen()->representation());
+ __ cmp(left_op, immediate);
__ j(condition, &return_left, Label::kNear);
- __ mov(left_op, right_imm);
+ __ mov(left_op, immediate);
} else {
Register left_reg = ToRegister(left);
Operand right_op = ToOperand(right);
@@ -1584,7 +2167,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
@@ -1609,44 +2192,88 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- XMMRegister left = ToDoubleRegister(instr->left());
- XMMRegister right = ToDoubleRegister(instr->right());
- XMMRegister result = ToDoubleRegister(instr->result());
- // Modulo uses a fixed result register.
- ASSERT(instr->op() == Token::MOD || left.is(result));
- switch (instr->op()) {
- case Token::ADD:
- __ addsd(left, right);
- break;
- case Token::SUB:
- __ subsd(left, right);
- break;
- case Token::MUL:
- __ mulsd(left, right);
- break;
- case Token::DIV:
- __ divsd(left, right);
- break;
- case Token::MOD: {
- // Pass two doubles as arguments on the stack.
- __ PrepareCallCFunction(4, eax);
- __ movdbl(Operand(esp, 0 * kDoubleSize), left);
- __ movdbl(Operand(esp, 1 * kDoubleSize), right);
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
- 4);
-
- // Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movdbl(result, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- break;
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister left = ToDoubleRegister(instr->left());
+ XMMRegister right = ToDoubleRegister(instr->right());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ switch (instr->op()) {
+ case Token::ADD:
+ __ addsd(left, right);
+ break;
+ case Token::SUB:
+ __ subsd(left, right);
+ break;
+ case Token::MUL:
+ __ mulsd(left, right);
+ break;
+ case Token::DIV:
+ __ divsd(left, right);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a mulsd depending on the result
+ __ movaps(left, left);
+ break;
+ case Token::MOD: {
+ // Pass two doubles as arguments on the stack.
+ __ PrepareCallCFunction(4, eax);
+ __ movsd(Operand(esp, 0 * kDoubleSize), left);
+ __ movsd(Operand(esp, 1 * kDoubleSize), right);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ 4);
+
+ // Return value is in st(0) on ia32.
+ // Store it into the result register.
+ __ sub(Operand(esp), Immediate(kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ movsd(result, Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ X87Register left = ToX87Register(instr->left());
+ X87Register right = ToX87Register(instr->right());
+ X87Register result = ToX87Register(instr->result());
+ if (instr->op() != Token::MOD) {
+ X87PrepareBinaryOp(left, right, result);
+ }
+ switch (instr->op()) {
+ case Token::ADD:
+ __ fadd_i(1);
+ break;
+ case Token::SUB:
+ __ fsub_i(1);
+ break;
+ case Token::MUL:
+ __ fmul_i(1);
+ break;
+ case Token::DIV:
+ __ fdiv_i(1);
+ break;
+ case Token::MOD: {
+ // Pass two doubles as arguments on the stack.
+ __ PrepareCallCFunction(4, eax);
+ X87Mov(Operand(esp, 1 * kDoubleSize), right);
+ X87Mov(Operand(esp, 0), left);
+ X87Free(right);
+ ASSERT(left.is(result));
+ X87PrepareToWrite(result);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ 4);
+
+ // Return value is in st(0) on ia32.
+ X87CommitWrite(result);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
- default:
- UNREACHABLE();
- break;
}
}
@@ -1658,26 +2285,19 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
-
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
+ int next_block = GetNextEmittedBlock();
- if (right_block == left_block) {
+ if (right_block == left_block || cc == no_condition) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
@@ -1690,62 +2310,85 @@ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
}
-void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
+template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
+ int false_block = instr->FalseDestination(chunk_);
+ if (cc == no_condition) {
+ __ jmp(chunk_->GetAssemblyLabel(false_block));
+ } else {
+ __ j(cc, chunk_->GetAssemblyLabel(false_block));
+ }
+}
+
+void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
+ if (r.IsSmiOrInteger32()) {
Register reg = ToRegister(instr->value());
__ test(reg, Operand(reg));
- EmitBranch(true_block, false_block, not_zero);
+ EmitBranch(instr, not_zero);
} else if (r.IsDouble()) {
+ ASSERT(!info()->IsStub());
+ CpuFeatureScope scope(masm(), SSE2);
XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
- EmitBranch(true_block, false_block, not_equal);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(reg, xmm_scratch);
+ EmitBranch(instr, not_equal);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
__ cmp(reg, factory()->true_value());
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
} else if (type.IsSmi()) {
+ ASSERT(!info()->IsStub());
__ test(reg, Operand(reg));
- EmitBranch(true_block, false_block, not_equal);
+ EmitBranch(instr, not_equal);
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, no_condition);
+ } else if (type.IsHeapNumber()) {
+ ASSERT(!info()->IsStub());
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
+ EmitBranch(instr, not_equal);
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ EmitBranch(instr, not_equal);
} else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
if (expected.Contains(ToBooleanStub::UNDEFINED)) {
// undefined -> false.
__ cmp(reg, factory()->undefined_value());
- __ j(equal, false_label);
+ __ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::BOOLEAN)) {
// true -> true.
__ cmp(reg, factory()->true_value());
- __ j(equal, true_label);
+ __ j(equal, instr->TrueLabel(chunk_));
// false -> false.
__ cmp(reg, factory()->false_value());
- __ j(equal, false_label);
+ __ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
// 'null' -> false.
__ cmp(reg, factory()->null_value());
- __ j(equal, false_label);
+ __ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ test(reg, Operand(reg));
- __ j(equal, false_label);
- __ JumpIfSmi(reg, true_label);
+ __ j(equal, instr->FalseLabel(chunk_));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
@@ -1762,14 +2405,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
// Undetectable -> false.
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
- __ j(not_zero, false_label);
+ __ j(not_zero, instr->FalseLabel(chunk_));
}
}
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(above_equal, true_label);
+ __ j(above_equal, instr->TrueLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::STRING)) {
@@ -1778,41 +2421,59 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string, Label::kNear);
__ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
- __ j(not_zero, true_label);
- __ jmp(false_label);
+ __ j(not_zero, instr->TrueLabel(chunk_));
+ __ jmp(instr->FalseLabel(chunk_));
__ bind(&not_string);
}
+ if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ // Symbol value -> true.
+ __ CmpInstanceType(map, SYMBOL_TYPE);
+ __ j(equal, instr->TrueLabel(chunk_));
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
factory()->heap_number_map());
__ j(not_equal, &not_heap_number, Label::kNear);
- __ fldz();
- __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
- __ FCmp();
- __ j(zero, false_label);
- __ jmp(true_label);
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
+ } else {
+ __ fldz();
+ __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ __ FCmp();
+ }
+ __ j(zero, instr->FalseLabel(chunk_));
+ __ jmp(instr->TrueLabel(chunk_));
__ bind(&not_heap_number);
}
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(no_condition, instr->environment());
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(no_condition, instr->environment());
+ }
}
}
}
void LCodeGen::EmitGoto(int block) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
+ if (!IsNextEmittedBlock(block)) {
+ __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
}
}
+void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
+}
+
+
void LCodeGen::DoGoto(LGoto* instr) {
EmitGoto(instr->block_id());
}
@@ -1825,6 +2486,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
case Token::EQ_STRICT:
cond = equal;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = not_equal;
+ break;
case Token::LT:
cond = is_unsigned ? below : less;
break;
@@ -1846,101 +2511,104 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cc = TokenToCondition(instr->op(), instr->is_double());
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ } else {
+ X87LoadForUsage(ToX87Register(right), ToX87Register(left));
+ __ FCmp();
+ }
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+ __ j(parity_even, instr->FalseLabel(chunk_));
} else {
if (right->IsConstantOperand()) {
- __ cmp(ToRegister(left), ToInteger32Immediate(right));
+ __ cmp(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
} else if (left->IsConstantOperand()) {
- __ cmp(ToOperand(right), ToInteger32Immediate(left));
+ __ cmp(ToOperand(right),
+ ToImmediate(left, instr->hydrogen()->representation()));
// We transposed the operands. Reverse the condition.
cc = ReverseCondition(cc);
} else {
__ cmp(ToRegister(left), ToOperand(right));
}
}
- EmitBranch(true_block, false_block, cc);
+ EmitBranch(instr, cc);
}
}
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->left());
- Operand right = ToOperand(instr->right());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ cmp(left, Operand(right));
- EmitBranch(true_block, false_block, equal);
-}
-
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ cmp(left, instr->hydrogen()->right());
- EmitBranch(true_block, false_block, equal);
+ if (instr->right()->IsConstantOperand()) {
+ Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
+ __ CmpObject(left, right);
+ } else {
+ Operand right = ToOperand(instr->right());
+ __ cmp(left, right);
+ }
+ EmitBranch(instr, equal);
}
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ cmp(input_reg, factory()->the_hole_value());
+ EmitBranch(instr, equal);
return;
}
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Handle<Object> nil_value = instr->nil() == kNullValue ?
- factory()->null_value() :
- factory()->undefined_value();
- __ cmp(reg, nil_value);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, equal);
+ bool use_sse2 = CpuFeatures::IsSupported(SSE2);
+ if (use_sse2) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(instr->object());
+ __ ucomisd(input_reg, input_reg);
+ EmitFalseBranch(instr, parity_odd);
} else {
- Handle<Object> other_nil_value = instr->nil() == kNullValue ?
- factory()->undefined_value() :
- factory()->null_value();
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ j(equal, true_label);
- __ cmp(reg, other_nil_value);
- __ j(equal, true_label);
- __ JumpIfSmi(reg, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = ToRegister(instr->temp());
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ test(scratch, Immediate(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, not_zero);
+ // Put the value to the top of stack
+ X87Register src = ToX87Register(instr->object());
+ X87LoadForUsage(src);
+ __ fld(0);
+ __ fld(0);
+ __ FCmp();
+ Label ok;
+ __ j(parity_even, &ok);
+ __ fstp(0);
+ EmitFalseBranch(instr, no_condition);
+ __ bind(&ok);
}
+
+
+ __ sub(esp, Immediate(kDoubleSize));
+ if (use_sse2) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(instr->object());
+ __ movsd(MemOperand(esp, 0), input_reg);
+ } else {
+ __ fstp_d(MemOperand(esp, 0));
+ }
+
+ __ add(esp, Immediate(kDoubleSize));
+ int offset = sizeof(kHoleNanUpper32);
+ __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
+ EmitBranch(instr, equal);
}
@@ -1971,21 +2639,20 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
+ Condition true_cond = EmitIsObject(
+ reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
- EmitBranch(true_block, false_block, true_cond);
+ EmitBranch(instr, true_cond);
}
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
@@ -1997,24 +2664,22 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- Condition true_cond = EmitIsString(reg, temp, false_label);
+ Condition true_cond = EmitIsString(
+ reg, temp, instr->FalseLabel(chunk_), check_needed);
- EmitBranch(true_block, false_block, true_cond);
+ EmitBranch(instr, true_cond);
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
Operand input = ToOperand(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
__ test(input, Immediate(kSmiTagMask));
- EmitBranch(true_block, false_block, zero);
+ EmitBranch(instr, zero);
}
@@ -2022,15 +2687,14 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(temp, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
- EmitBranch(true_block, false_block, not_zero);
+ EmitBranch(instr, not_zero);
}
@@ -2056,16 +2720,14 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
__ test(eax, Operand(eax));
- EmitBranch(true_block, false_block, condition);
+ EmitBranch(instr, condition);
}
@@ -2093,15 +2755,12 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ CmpObjectType(input, TestType(instr->hydrogen()), temp);
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
}
@@ -2120,12 +2779,9 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
__ test(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
@@ -2142,7 +2798,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
ASSERT(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
- if (class_name->IsEqualTo(CStrVector("Function"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2172,7 +2828,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
- if (class_name->IsEqualTo(CStrVector("Object"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
__ j(not_equal, is_true);
} else {
__ j(not_equal, is_false);
@@ -2183,12 +2839,12 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
__ mov(temp, FieldOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is a symbol because it's a literal.
- // The name in the constructor is a symbol because of the way the context is
- // booted. This routine isn't expected to work for random API-created
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
// classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are symbols it is sufficient to use an identity
- // comparison.
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
__ cmp(temp, class_name);
// End with the answer in the z flag.
}
@@ -2201,25 +2857,17 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Handle<String> class_name = instr->hydrogen()->class_name();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Register reg = ToRegister(instr->value());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
-
__ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
@@ -2227,7 +2875,7 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
// Object and function are in fixed registers defined by the stub.
ASSERT(ToRegister(instr->context()).is(esi));
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
__ test(eax, Operand(eax));
@@ -2241,15 +2889,16 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ LInstanceOfKnownGlobal* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
@@ -2257,7 +2906,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
};
DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
Label done, false_result;
Register object = ToRegister(instr->value());
@@ -2273,9 +2922,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register map = ToRegister(instr->temp());
__ mov(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
- Handle<JSGlobalPropertyCell> cache_cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
- __ cmp(map, Operand::Cell(cache_cell)); // Patched to cached map.
+ Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
+ __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
__ j(not_equal, &cache_miss, Label::kNear);
__ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
__ jmp(&done);
@@ -2328,7 +2976,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
__ mov(temp, Immediate(delta));
__ StoreToSafepointRegisterSlot(temp, temp);
- CallCodeGeneric(stub.GetCode(),
+ CallCodeGeneric(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -2345,7 +2993,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2360,8 +3008,45 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
}
+void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
+ int extra_value_count = dynamic_frame_alignment ? 2 : 1;
+
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ if (dynamic_frame_alignment && FLAG_debug_code) {
+ __ cmp(Operand(esp,
+ (parameter_count + extra_value_count) * kPointerSize),
+ Immediate(kAlignmentZapValue));
+ __ Assert(equal, kExpectedAlignmentMarker);
+ }
+ __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
+ } else {
+ Register reg = ToRegister(instr->parameter_count());
+ // The argument count parameter is a smi
+ __ SmiUntag(reg);
+ Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
+ if (dynamic_frame_alignment && FLAG_debug_code) {
+ ASSERT(extra_value_count == 2);
+ __ cmp(Operand(esp, reg, times_pointer_size,
+ extra_value_count * kPointerSize),
+ Immediate(kAlignmentZapValue));
+ __ Assert(equal, kExpectedAlignmentMarker);
+ }
+
+ // emit code to restore stack based on instr->parameter_count()
+ __ pop(return_addr_reg); // save return address
+ if (dynamic_frame_alignment) {
+ __ inc(reg); // 1 more for alignment
+ }
+ __ shl(reg, kPointerSizeLog2);
+ __ add(esp, reg);
+ __ jmp(return_addr_reg);
+ }
+}
+
+
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Preserve the return value on the stack and rely on the runtime call
// to return the value in the same register. We're leaving the code
// managed by the register allocator and tearing down the frame, it's
@@ -2370,32 +3055,49 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
+ ASSERT(NeedsEagerFrame());
+ CpuFeatureScope scope(masm(), SSE2);
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(esp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
if (dynamic_frame_alignment_) {
// Fetch the state of the dynamic frame alignment.
__ mov(edx, Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
}
- __ mov(esp, ebp);
- __ pop(ebp);
+ int no_frame_start = -1;
+ if (NeedsEagerFrame()) {
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ no_frame_start = masm_->pc_offset();
+ }
if (dynamic_frame_alignment_) {
Label no_padding;
__ cmp(edx, Immediate(kNoAlignmentPadding));
__ j(equal, &no_padding);
- if (FLAG_debug_code) {
- __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
- Immediate(kAlignmentZapValue));
- __ Assert(equal, "expected alignment marker");
- }
- __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
+
+ EmitReturn(instr, true);
__ bind(&no_padding);
}
- __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
+
+ EmitReturn(instr, false);
+ if (no_frame_start != -1) {
+ info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
}
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
+ __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
@@ -2418,19 +3120,19 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
+ Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(Operand::Cell(cell_handle), factory()->the_hole_value());
+ __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
// Store the value.
- __ mov(Operand::Cell(cell_handle), value);
+ __ mov(Operand::ForCell(cell_handle), value);
// Cells are always rescanned, so no write barrier here.
}
@@ -2485,16 +3187,16 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ mov(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Register temp = ToRegister(instr->temp());
int offset = Context::SlotOffset(instr->slot_index());
__ RecordWriteContextSlot(context,
offset,
value,
temp,
- kSaveFPRegs,
+ GetSaveFPRegsMode(),
EMIT_REMEMBERED_SET,
check_needed);
}
@@ -2504,54 +3206,47 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ MemOperand operand = instr->object()->IsConstantOperand()
+ ? MemOperand::StaticVariable(ToExternalReference(
+ LConstantOperand::cast(instr->object())))
+ : MemOperand(ToRegister(instr->object()), offset);
+ if (access.representation().IsByte()) {
+ ASSERT(instr->hydrogen()->representation().IsInteger32());
+ __ movzx_b(result, operand);
+ } else {
+ __ mov(result, operand);
+ }
+ return;
+ }
+
Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
- } else {
- __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ mov(result, FieldOperand(object, offset + type->instance_size()));
+ if (FLAG_track_double_fields &&
+ instr->hydrogen()->representation().IsDouble()) {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ movsd(result, FieldOperand(object, offset));
} else {
- // Non-negative property indices are in the properties array.
- __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
+ X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
}
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
+ return;
+ }
+
+ Register result = ToRegister(instr->result());
+ if (!access.IsInobject()) {
+ __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ object = result;
+ }
+ if (access.representation().IsByte()) {
+ ASSERT(instr->hydrogen()->representation().IsInteger32());
+ __ movzx_b(result, FieldOperand(object, offset));
} else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Handle<Map>(current->map()));
- DeoptimizeIf(not_equal, env);
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
- }
- __ mov(result, factory()->undefined_value());
+ __ mov(result, FieldOperand(object, offset));
}
}
@@ -2560,6 +3255,7 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+ AllowDeferredHandleDereference smi_check;
if (object->IsSmi()) {
__ Push(Handle<Smi>::cast(object));
} else {
@@ -2573,73 +3269,6 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
}
-// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
-// prototype chain, which causes unbounded code generation.
-static bool CompactEmit(SmallMapList* list,
- Handle<String> name,
- int i,
- Isolate* isolate) {
- Handle<Map> map = list->at(i);
- // If the map has ElementsKind transitions, we will generate map checks
- // for each kind in __ CompareMap(..., ALLOW_ELEMENTS_TRANSITION_MAPS).
- if (map->HasElementsTransition()) return false;
- LookupResult lookup(isolate);
- map->LookupDescriptor(NULL, *name, &lookup);
- return lookup.IsField() || lookup.IsConstantFunction();
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- bool all_are_compact = true;
- for (int i = 0; i < map_count; ++i) {
- if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
- all_are_compact = false;
- break;
- }
- }
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
- if (last && !need_generic) {
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- } else {
- Label next;
- bool compact = all_are_compact ? true :
- CompactEmit(instr->hydrogen()->types(), name, i, isolate());
- __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
- __ bind(&next);
- }
- }
- if (need_generic) {
- __ mov(ecx, name);
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- }
- __ bind(&done);
-}
-
-
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
@@ -2693,38 +3322,9 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ mov(result, FieldOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, ok, fail;
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(equal, &done, Label::kNear);
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(factory()->fixed_cow_array_map()));
- __ j(equal, &done, Label::kNear);
- Register temp((result.is(eax)) ? ebx : eax);
- __ push(temp);
- __ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
- __ and_(temp, Map::kElementsKindMask);
- __ shr(temp, Map::kElementsKindShift);
- __ cmp(temp, GetInitialFastElementsKind());
- __ j(less, &fail, Label::kNear);
- __ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND);
- __ j(less_equal, &ok, Label::kNear);
- __ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- __ j(less, &fail, Label::kNear);
- __ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- __ j(less_equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&ok);
- __ pop(temp);
- __ bind(&done);
- }
+ __ LoadRoot(result, instr->index());
}
@@ -2739,105 +3339,25 @@ void LCodeGen::DoLoadExternalArrayPointer(
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Operand index = ToOperand(instr->index());
- Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ sub(length, index);
- __ mov(result, Operand(arguments, length, times_4, kPointerSize));
-}
-
-
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
Register result = ToRegister(instr->result());
-
- // Load the result.
- __ mov(result,
- BuildFastArrayOperand(instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr->environment());
- } else {
- __ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFastDoubleElement(
- LLoadKeyedFastDoubleElement* instr) {
- XMMRegister result = ToDoubleRegister(instr->result());
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(), instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- offset,
- instr->additional_index());
- __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
- }
-
- Operand double_load_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- __ movdbl(result, double_load_operand);
-}
-
-
-Operand LCodeGen::BuildFastArrayOperand(
- LOperand* elements_pointer,
- LOperand* key,
- Representation key_representation,
- ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index) {
- Register elements_pointer_reg = ToRegister(elements_pointer);
- int shift_size = ElementsKindToShiftSize(elements_kind);
- // Even though the HLoad/StoreKeyedFastElement instructions force the input
- // representation for the key to be an integer, the input gets replaced during
- // bound check elimination with the index argument to the bounds check, which
- // can be tagged, so that case must be handled here, too.
- if (key_representation.IsTagged() && (shift_size >= 1)) {
- shift_size -= kSmiTagSize;
- }
- if (key->IsConstantOperand()) {
- int constant_value = ToInteger32(LConstantOperand::cast(key));
- if (constant_value & 0xF0000000) {
- Abort("array index constant value too big");
- }
- return Operand(elements_pointer_reg,
- ((constant_value + additional_index) << shift_size)
- + offset);
+ if (instr->length()->IsConstantOperand() &&
+ instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+ int index = (const_length - const_index) + 1;
+ __ mov(result, Operand(arguments, index * kPointerSize));
} else {
- ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
- return Operand(elements_pointer_reg,
- ToRegister(key),
- scale_factor,
- offset + (additional_index << shift_size));
+ Register length = ToRegister(instr->length());
+ Operand index = ToOperand(instr->index());
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ __ sub(length, index);
+ __ mov(result, Operand(arguments, length, times_4, kPointerSize));
}
}
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
- LLoadKeyedSpecializedArrayElement* instr) {
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
if (!key->IsConstantOperand() &&
@@ -2846,18 +3366,28 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
__ SmiUntag(ToRegister(key));
}
Operand operand(BuildFastArrayOperand(
- instr->external_pointer(),
+ instr->elements(),
key,
instr->hydrogen()->key()->representation(),
elements_kind,
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister result(ToDoubleRegister(instr->result()));
+ __ movss(result, operand);
+ __ cvtss2sd(result, result);
+ } else {
+ X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
+ }
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movdbl(ToDoubleRegister(instr->result()), operand);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ __ movsd(ToDoubleRegister(instr->result()), operand);
+ } else {
+ X87Mov(ToX87Register(instr->result()), operand);
+ }
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
@@ -2901,6 +3431,105 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
}
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ sizeof(kHoleNanLower32);
+ Operand hole_check_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ offset,
+ instr->additional_index());
+ __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ Operand double_load_operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index());
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ movsd(result, double_load_operand);
+ } else {
+ X87Mov(ToX87Register(instr->result()), double_load_operand);
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ Register result = ToRegister(instr->result());
+
+ // Load the result.
+ __ mov(result,
+ BuildFastArrayOperand(instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_ELEMENTS,
+ FixedArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index()));
+
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ test(result, Immediate(kSmiTagMask));
+ DeoptimizeIf(not_equal, instr->environment());
+ } else {
+ __ cmp(result, factory()->the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_external()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
+ } else {
+ DoLoadKeyedFixedArray(instr);
+ }
+}
+
+
+Operand LCodeGen::BuildFastArrayOperand(
+ LOperand* elements_pointer,
+ LOperand* key,
+ Representation key_representation,
+ ElementsKind elements_kind,
+ uint32_t offset,
+ uint32_t additional_index) {
+ Register elements_pointer_reg = ToRegister(elements_pointer);
+ int element_shift_size = ElementsKindToShiftSize(elements_kind);
+ int shift_size = element_shift_size;
+ if (key->IsConstantOperand()) {
+ int constant_value = ToInteger32(LConstantOperand::cast(key));
+ if (constant_value & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ return Operand(elements_pointer_reg,
+ ((constant_value + additional_index) << shift_size)
+ + offset);
+ } else {
+ // Take the tag bit into account while computing the shift size.
+ if (key_representation.IsSmi() && (shift_size >= 1)) {
+ shift_size -= kSmiTagSize;
+ }
+ ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
+ return Operand(elements_pointer_reg,
+ ToRegister(key),
+ scale_factor,
+ offset + (additional_index << element_shift_size));
+ }
+}
+
+
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
@@ -3043,7 +3672,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
@@ -3052,6 +3680,11 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
}
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ int3();
+}
+
+
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->value();
EmitPushTaggedOperand(argument);
@@ -3071,7 +3704,12 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
- __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
+ if (info()->IsOptimizing()) {
+ __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in esi.
+ ASSERT(result.is(esi));
+ }
}
@@ -3108,15 +3746,15 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
int arity,
LInstruction* instr,
CallKind call_kind,
EDIState edi_state) {
- bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
- function->shared()->formal_parameter_count() == arity;
-
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
if (can_invoke_directly) {
if (edi_state == EDI_UNINITIALIZED) {
@@ -3128,13 +3766,13 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Set eax to arguments count if adaption is not needed. Assumes that eax
// is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
+ if (dont_adapt_arguments) {
__ mov(eax, arity);
}
// Invoke function directly.
__ SetCallKind(ecx, call_kind);
- if (*function == *info()->closure()) {
+ if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
__ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
@@ -3142,17 +3780,21 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
} else {
// We need to adapt arguments.
+ LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
- __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+ ParameterCount expected(formal_parameter_count);
+ __ InvokeFunction(
+ function, expected, count, CALL_FUNCTION, generator, call_kind);
}
}
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
- CallKnownFunction(instr->function(),
+ CallKnownFunction(instr->hydrogen()->function(),
+ instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
CALL_AS_METHOD,
@@ -3160,44 +3802,36 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
}
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
DeoptimizeIf(not_equal, instr->environment());
- Label done;
+ Label slow, allocated, done;
Register tmp = input_reg.is(eax) ? ecx : eax;
Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
- Label negative;
__ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive, just
// return it. We do not need to patch the stack since |input| and
// |result| are the same register and |input| will be restored
// unchanged by popping safepoint registers.
__ test(tmp, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative);
- __ jmp(&done);
-
- __ bind(&negative);
+ __ j(zero, &done);
- Label allocated, slow;
__ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
- __ jmp(&allocated);
+ __ jmp(&allocated, Label::kNear);
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
-
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
instr, instr->context());
-
// Set the pointer to the new heap number in tmp.
if (!tmp.is(eax)) __ mov(tmp, eax);
-
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
@@ -3213,47 +3847,48 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
}
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ test(input_reg, Operand(input_reg));
Label is_positive;
- __ j(not_sign, &is_positive);
- __ neg(input_reg);
- __ test(input_reg, Operand(input_reg));
+ __ j(not_sign, &is_positive, Label::kNear);
+ __ neg(input_reg); // Sets flags.
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
}
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ LMathAbs* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
- LUnaryMathOperation* instr_;
+ LMathAbs* instr_;
};
ASSERT(instr->value()->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
+ CpuFeatureScope scope(masm(), SSE2);
if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
+ XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
- __ pand(input_reg, scratch);
- } else if (r.IsInteger32()) {
+ __ andps(input_reg, scratch);
+ } else if (r.IsSmiOrInteger32()) {
EmitIntegerMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
Register input_reg = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
@@ -3263,13 +3898,14 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- XMMRegister xmm_scratch = xmm0;
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope scope(SSE4_1);
+ CpuFeatureScope scope(masm(), SSE4_1);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Deoptimize on negative zero.
Label non_zero;
@@ -3317,7 +3953,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
__ bind(&negative_sign);
// Truncate, then compare and compensate.
__ cvttsd2si(output_reg, Operand(input_reg));
- __ cvtsi2sd(xmm_scratch, output_reg);
+ __ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
@@ -3327,52 +3963,70 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
}
}
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- XMMRegister xmm_scratch = xmm0;
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ CpuFeatureScope scope(masm(), SSE2);
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
-
- Label below_half, done;
- // xmm_scratch = 0.5
+ XMMRegister xmm_scratch = double_scratch0();
+ XMMRegister input_temp = ToDoubleRegister(instr->temp());
ExternalReference one_half = ExternalReference::address_of_one_half();
- __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
+ ExternalReference minus_one_half =
+ ExternalReference::address_of_minus_one_half();
+
+ Label done, round_to_zero, below_one_half, do_not_compensate;
+ __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
__ ucomisd(xmm_scratch, input_reg);
- __ j(above, &below_half);
- // xmm_scratch = input + 0.5
- __ addsd(xmm_scratch, input_reg);
+ __ j(above, &below_one_half);
- // Compute Math.floor(value + 0.5).
- // Use truncating instruction (OK because input is positive).
+ // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
+ __ addsd(xmm_scratch, input_reg);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
-
// Overflow is signalled with minint.
__ cmp(output_reg, 0x80000000u);
+ __ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
__ jmp(&done);
- __ bind(&below_half);
+ __ bind(&below_one_half);
+ __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
+ __ ucomisd(xmm_scratch, input_reg);
+ __ j(below_equal, &round_to_zero);
+
+ // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
+ // compare and compensate.
+ __ movsd(input_temp, input_reg); // Do not alter input_reg.
+ __ subsd(input_temp, xmm_scratch);
+ __ cvttsd2si(output_reg, Operand(input_temp));
+ // Catch minint due to overflow, and to prevent overflow when compensating.
+ __ cmp(output_reg, 0x80000000u);
+ __ RecordComment("D2I conversion overflow");
+ DeoptimizeIf(equal, instr->environment());
+
+ __ Cvtsi2sd(xmm_scratch, output_reg);
+ __ ucomisd(xmm_scratch, input_temp);
+ __ j(equal, &done);
+ __ sub(output_reg, Immediate(1));
+ // No overflow because we already ruled out minint.
+ __ jmp(&done);
+ __ bind(&round_to_zero);
// We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
// we can ignore the difference between a result of -0 and +0.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// If the sign is positive, we return +0.
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
+ __ RecordComment("Minus zero");
DeoptimizeIf(not_zero, instr->environment());
- } else {
- // If the input is >= -0.5, we return +0.
- __ mov(output_reg, Immediate(0xBF000000));
- __ movd(xmm_scratch, Operand(output_reg));
- __ cvtss2sd(xmm_scratch, xmm_scratch);
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below, instr->environment());
}
__ Set(output_reg, Immediate(0));
__ bind(&done);
}
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+ CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ sqrtsd(input_reg, input_reg);
@@ -3380,7 +4034,8 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- XMMRegister xmm_scratch = xmm0;
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = ToRegister(instr->temp());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
@@ -3424,7 +4079,10 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
- if (exponent_type.IsTagged()) {
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(eax, &no_deopt);
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
@@ -3444,169 +4102,142 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
+ CpuFeatureScope scope(masm(), SSE2);
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- ASSERT(ToRegister(instr->global_object()).is(eax));
// Assert that the register size is indeed the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
+ // Load native context
+ Register global_object = ToRegister(instr->global_object());
+ Register native_context = global_object;
+ __ mov(native_context, FieldOperand(
+ global_object, GlobalObject::kNativeContextOffset));
+
+ // Load state (FixedArray of the native context's random seeds)
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
- // ebx: FixedArray of the native context's random seeds
+ Register state = native_context;
+ __ mov(state, FieldOperand(native_context, kRandomSeedOffset));
// Load state[0].
- __ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize));
- // If state[0] == 0, call runtime to initialize seeds.
- __ test(ecx, ecx);
- __ j(zero, deferred->entry());
+ Register state0 = ToRegister(instr->scratch());
+ __ mov(state0, FieldOperand(state, ByteArray::kHeaderSize));
// Load state[1].
- __ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize));
- // ecx: state[0]
- // eax: state[1]
+ Register state1 = ToRegister(instr->scratch2());
+ __ mov(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ movzx_w(edx, ecx);
- __ imul(edx, edx, 18273);
- __ shr(ecx, 16);
- __ add(ecx, edx);
+ Register scratch3 = ToRegister(instr->scratch3());
+ __ movzx_w(scratch3, state0);
+ __ imul(scratch3, scratch3, 18273);
+ __ shr(state0, 16);
+ __ add(state0, scratch3);
// Save state[0].
- __ mov(FieldOperand(ebx, ByteArray::kHeaderSize), ecx);
+ __ mov(FieldOperand(state, ByteArray::kHeaderSize), state0);
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzx_w(edx, eax);
- __ imul(edx, edx, 36969);
- __ shr(eax, 16);
- __ add(eax, edx);
+ __ movzx_w(scratch3, state1);
+ __ imul(scratch3, scratch3, 36969);
+ __ shr(state1, 16);
+ __ add(state1, scratch3);
// Save state[1].
- __ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax);
+ __ mov(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ shl(ecx, 14);
- __ and_(eax, Immediate(0x3FFFF));
- __ add(eax, ecx);
+ Register random = state0;
+ __ shl(random, 14);
+ __ and_(state1, Immediate(0x3FFFF));
+ __ add(random, state1);
- __ bind(deferred->exit());
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
+ // Convert 32 random bits in random to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm2, ebx);
- __ movd(xmm1, eax);
- __ cvtss2sd(xmm2, xmm2);
- __ xorps(xmm1, xmm2);
- __ subsd(xmm1, xmm2);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in eax.
+ XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister scratch4 = double_scratch0();
+ __ mov(scratch3, Immediate(0x49800000)); // 1.0 x 2^20 as single.
+ __ movd(scratch4, scratch3);
+ __ movd(result, random);
+ __ cvtss2sd(scratch4, scratch4);
+ __ xorps(result, scratch4);
+ __ subsd(result, scratch4);
}
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathLog(LMathLog* instr) {
+ CpuFeatureScope scope(masm(), SSE2);
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
Label positive, done, zero;
- __ xorps(xmm0, xmm0);
- __ ucomisd(input_reg, xmm0);
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
__ j(equal, &zero, Label::kNear);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(input_reg, Operand::StaticVariable(nan));
+ __ movsd(input_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
__ bind(&zero);
- __ push(Immediate(0xFFF00000));
- __ push(Immediate(0));
- __ movdbl(input_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
+ ExternalReference ninf =
+ ExternalReference::address_of_negative_infinity();
+ __ movsd(input_reg, Operand::StaticVariable(ninf));
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
__ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
+ __ movsd(Operand(esp, 0), input_reg);
__ fld_d(Operand(esp, 0));
__ fyl2x();
__ fstp_d(Operand(esp, 0));
- __ movdbl(input_reg, Operand(esp, 0));
+ __ movsd(input_reg, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
__ bind(&done);
}
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input = ToDoubleRegister(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister temp0 = double_scratch0();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
+}
+
+
+void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ Set(esi, Immediate(0));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ Set(esi, Immediate(0));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ Set(esi, Immediate(0));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
-
- default:
- UNREACHABLE();
- }
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3615,15 +4246,16 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->function()).is(edi));
ASSERT(instr->HasPointerMap());
- if (instr->known_function().is_null()) {
+ Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
} else {
- CallKnownFunction(instr->known_function(),
+ CallKnownFunction(known_function,
+ instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
CALL_AS_METHOD,
@@ -3664,7 +4296,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3683,7 +4315,8 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
- CallKnownFunction(instr->target(),
+ CallKnownFunction(instr->hydrogen()->target(),
+ instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
CALL_AS_FUNCTION,
@@ -3696,74 +4329,211 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
+ // No cell in ebx for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+ __ mov(ebx, Immediate(undefined_value));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ Set(eax, Immediate(instr->arity()));
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->constructor()).is(edi));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ __ Set(eax, Immediate(instr->arity()));
+ __ mov(ebx, instr->hydrogen()->property_cell());
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+ ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here
+ // look at the first argument
+ __ mov(ecx, Operand(esp, 0));
+ __ test(ecx, ecx);
+ __ j(zero, &packed_case);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+ override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ jmp(&done);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ }
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
+ CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
+ __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ __ lea(result, Operand(base, instr->offset()));
}
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ MemOperand operand = instr->object()->IsConstantOperand()
+ ? MemOperand::StaticVariable(
+ ToExternalReference(LConstantOperand::cast(instr->object())))
+ : MemOperand(ToRegister(instr->object()), offset);
+ if (instr->value()->IsConstantOperand()) {
+ ASSERT(!representation.IsByte());
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ __ mov(operand, Immediate(ToInteger32(operand_value)));
+ } else {
+ Register value = ToRegister(instr->value());
+ if (representation.IsByte()) {
+ __ mov_b(operand, value);
+ } else {
+ __ mov(operand, value);
+ }
+ }
+ return;
+ }
+
Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
- int offset = instr->offset();
+ Handle<Map> transition = instr->transition();
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (!IsSmi(operand_value)) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ }
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (IsInteger32(operand_value)) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ } else {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ Register value = ToRegister(instr->value());
+ __ test(value, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister value = ToDoubleRegister(instr->value());
+ __ movsd(FieldOperand(object, offset), value);
+ } else {
+ X87Register value = ToX87Register(instr->value());
+ X87Mov(FieldOperand(object, offset), value);
+ }
+ return;
+ }
- if (!instr->transition().is_null()) {
+ if (!transition.is_null()) {
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
- __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
+ __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
Register temp = ToRegister(instr->temp());
Register temp_map = ToRegister(instr->temp_map());
- __ mov(temp_map, instr->transition());
+ __ mov(temp_map, transition);
__ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
// Update the write barrier for the map field.
__ RecordWriteField(object,
HeapObject::kMapOffset,
temp_map,
temp,
- kSaveFPRegs,
+ GetSaveFPRegsMode(),
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
}
// Do the store.
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
- __ mov(FieldOperand(object, offset), value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- Register temp = ToRegister(instr->temp());
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(object,
- offset,
- value,
- temp,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+
+ Register write_register = object;
+ if (!access.IsInobject()) {
+ write_register = ToRegister(instr->temp());
+ __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
+ }
+
+ MemOperand operand = FieldOperand(write_register, offset);
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (operand_value->IsRegister()) {
+ Register value = ToRegister(operand_value);
+ if (representation.IsByte()) {
+ __ mov_b(operand, value);
+ } else {
+ __ mov(operand, value);
+ }
+ } else {
+ Handle<Object> handle_value = ToHandle(operand_value);
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ __ mov(operand, handle_value);
}
} else {
- Register temp = ToRegister(instr->temp());
- __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(temp, offset), value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWriteField(temp,
- offset,
- value,
- object,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
+ Register value = ToRegister(instr->value());
+ if (representation.IsByte()) {
+ __ mov_b(operand, value);
+ } else {
+ __ mov(operand, value);
}
}
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ Register value = ToRegister(instr->value());
+ Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
+ // Update the write barrier for the object for in-object properties.
+ __ RecordWriteField(write_register,
+ offset,
+ value,
+ temp,
+ GetSaveFPRegsMode(),
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
}
@@ -3780,46 +4550,39 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand) {
- if (value->representation().IsTagged() && !value->type().IsSmi()) {
- if (operand->IsRegister()) {
- __ test(ToRegister(operand), Immediate(kSmiTagMask));
- } else {
- __ test(ToOperand(operand), Immediate(kSmiTagMask));
- }
- DeoptimizeIf(not_zero, environment);
+void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+ Label done;
+ __ j(NegateCondition(cc), &done, Label::kNear);
+ __ int3();
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, check->environment());
}
}
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->length(),
- instr->length());
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->index(),
- instr->index());
+ if (instr->hydrogen()->skip_check() && !FLAG_debug_code) return;
+
if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
- __ cmp(ToOperand(instr->length()),
- Immediate(Smi::FromInt(constant_index)));
- } else {
- __ cmp(ToOperand(instr->length()), Immediate(constant_index));
- }
- DeoptimizeIf(below_equal, instr->environment());
+ Immediate immediate =
+ ToImmediate(LConstantOperand::cast(instr->index()),
+ instr->hydrogen()->length()->representation());
+ __ cmp(ToOperand(instr->length()), immediate);
+ Condition condition =
+ instr->hydrogen()->allow_equality() ? below : below_equal;
+ ApplyCheckIf(condition, instr);
} else {
__ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
- DeoptimizeIf(above_equal, instr->environment());
+ Condition condition =
+ instr->hydrogen()->allow_equality() ? above : above_equal;
+ ApplyCheckIf(condition, instr);
}
}
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
- LStoreKeyedSpecializedArrayElement* instr) {
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
if (!key->IsConstantOperand() &&
@@ -3828,17 +4591,29 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
__ SmiUntag(ToRegister(key));
}
Operand operand(BuildFastArrayOperand(
- instr->external_pointer(),
+ instr->elements(),
key,
instr->hydrogen()->key()->representation(),
elements_kind,
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
- __ movss(operand, xmm0);
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
+ __ movss(operand, xmm_scratch);
+ } else {
+ __ fld(0);
+ __ fstp_s(operand);
+ }
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movdbl(operand, ToDoubleRegister(instr->value()));
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ __ movsd(operand, ToDoubleRegister(instr->value()));
+ } else {
+ X87Mov(operand, ToX87Register(instr->value()));
+ }
} else {
Register value = ToRegister(instr->value());
switch (elements_kind) {
@@ -3872,61 +4647,135 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
}
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->object());
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ Operand double_store_operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index());
+
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister value = ToDoubleRegister(instr->value());
+
+ if (instr->NeedsCanonicalization()) {
+ Label have_value;
+
+ __ ucomisd(value, value);
+ __ j(parity_odd, &have_value); // NaN.
+
+ __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
+ __ bind(&have_value);
+ }
+
+ __ movsd(double_store_operand, value);
+ } else {
+ // Can't use SSE2 in the serializer
+ if (instr->hydrogen()->IsConstantHoleStore()) {
+ // This means we should store the (double) hole. No floating point
+ // registers required.
+ double nan_double = FixedDoubleArray::hole_nan_as_double();
+ uint64_t int_val = BitCast<uint64_t, double>(nan_double);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+
+ __ mov(double_store_operand, Immediate(lower));
+ Operand double_store_operand2 = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize,
+ instr->additional_index());
+ __ mov(double_store_operand2, Immediate(upper));
+ } else {
+ Label no_special_nan_handling;
+ X87Register value = ToX87Register(instr->value());
+ X87Fxch(value);
+
+ if (instr->NeedsCanonicalization()) {
+ __ fld(0);
+ __ fld(0);
+ __ FCmp();
+
+ __ j(parity_odd, &no_special_nan_handling);
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_d(MemOperand(esp, 0));
+ __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
+ Immediate(kHoleNanUpper32));
+ __ add(esp, Immediate(kDoubleSize));
+ Label canonicalize;
+ __ j(not_equal, &canonicalize);
+ __ jmp(&no_special_nan_handling);
+ __ bind(&canonicalize);
+ __ fstp(0);
+ __ fld_d(Operand::StaticVariable(canonical_nan_reference));
+ }
+
+ __ bind(&no_special_nan_handling);
+ __ fst_d(double_store_operand);
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Operand operand = BuildFastArrayOperand(
- instr->object(),
+ instr->elements(),
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
- __ mov(operand, value);
+ if (instr->value()->IsRegister()) {
+ __ mov(operand, ToRegister(instr->value()));
+ } else {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (IsSmi(operand_value)) {
+ Immediate immediate = ToImmediate(operand_value, Representation::Smi());
+ __ mov(operand, immediate);
+ } else {
+ ASSERT(!IsInteger32(operand_value));
+ Handle<Object> handle_value = ToHandle(operand_value);
+ __ mov(operand, handle_value);
+ }
+ }
if (instr->hydrogen()->NeedsWriteBarrier()) {
+ ASSERT(instr->value()->IsRegister());
+ Register value = ToRegister(instr->value());
ASSERT(!instr->key()->IsConstantOperand());
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ lea(key, operand);
__ RecordWrite(elements,
key,
value,
- kSaveFPRegs,
+ GetSaveFPRegsMode(),
EMIT_REMEMBERED_SET,
check_needed);
}
}
-void LCodeGen::DoStoreKeyedFastDoubleElement(
- LStoreKeyedFastDoubleElement* instr) {
- XMMRegister value = ToDoubleRegister(instr->value());
-
- if (instr->NeedsCanonicalization()) {
- Label have_value;
-
- __ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
-
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
- __ bind(&have_value);
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ // By cases...external, fast-double, fast
+ if (instr->is_external()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
}
-
- Operand double_store_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- __ movdbl(double_store_operand, value);
}
@@ -3943,14 +4792,23 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+ DeoptimizeIf(equal, instr->environment());
+ __ bind(&no_memento_found);
+}
+
+
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_temp());
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = from_map->elements_kind();
- ElementsKind to_kind = to_map->elements_kind();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
Label not_applicable;
bool is_simple_map_transition =
@@ -3960,53 +4818,50 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
__ j(not_equal, &not_applicable, branch_distance);
if (is_simple_map_transition) {
- Register object_reg = ToRegister(instr->object());
- Handle<Map> map = instr->hydrogen()->transitioned_map();
+ Register new_map_reg = ToRegister(instr->new_map_temp());
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
- Immediate(map));
+ Immediate(to_map));
// Write barrier.
ASSERT_NE(instr->temp(), NULL);
__ RecordWriteForMap(object_reg, to_map, new_map_reg,
ToRegister(instr->temp()),
kDontSaveFPRegs);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
- __ mov(new_map_reg, to_map);
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(edx));
- ASSERT(new_map_reg.is(ebx));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
- RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- __ mov(new_map_reg, to_map);
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(edx));
- ASSERT(new_map_reg.is(ebx));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
- RelocInfo::CODE_TARGET, instr);
} else {
- UNREACHABLE();
+ PushSafepointRegistersScope scope(this);
+ if (!object_reg.is(eax)) {
+ __ push(object_reg);
+ }
+ LoadContextFromDeferred(instr->context());
+ if (!object_reg.is(eax)) {
+ __ pop(eax);
+ }
+ __ mov(ebx, to_map);
+ TransitionElementsKindStub stub(from_kind, to_kind);
+ __ CallStub(&stub);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
}
__ bind(&not_applicable);
}
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredStringCharCodeAt(LCodeGen* codegen,
+ LStringCharCodeAt* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
+ new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
StringCharLoadGenerator::Generate(masm(),
factory(),
@@ -4033,8 +4888,9 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
// DoStringCharCodeAt above.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ push(Immediate(Smi::FromInt(const_index)));
+ Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
+ Representation::Smi());
+ __ push(immediate);
} else {
Register index = ToRegister(instr->index());
__ SmiTag(index);
@@ -4049,25 +4905,29 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredStringCharFromCode(LCodeGen* codegen,
+ LStringCharFromCode* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
+ new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
Register result = ToRegister(instr->result());
ASSERT(!char_code.is(result));
- __ cmp(char_code, String::kMaxAsciiCharCode);
+ __ cmp(char_code, String::kMaxOneByteCharCode);
__ j(above, deferred->entry());
__ Set(result, Immediate(factory()->single_character_string_cache()));
__ mov(result, FieldOperand(result,
@@ -4096,50 +4956,84 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
}
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(string, String::kLengthOffset));
-}
-
-
void LCodeGen::DoStringAdd(LStringAdd* instr) {
EmitPushTaggedOperand(instr->left());
EmitPushTaggedOperand(instr->right());
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ StringAddStub stub(instr->hydrogen()->flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* input = instr->value();
- ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
+ ASSERT(input->IsRegister() || input->IsStackSlot());
ASSERT(output->IsDoubleRegister());
- __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ } else if (input->IsRegister()) {
+ Register input_reg = ToRegister(input);
+ __ push(input_reg);
+ X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
+ __ pop(input_reg);
+ } else {
+ X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
+ }
+}
+
+
+void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
+ Register input = ToRegister(instr->value());
+ __ SmiTag(input);
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
- LOperand* temp = instr->temp();
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ LOperand* temp = instr->temp();
+
+ __ LoadUint32(ToDoubleRegister(output),
+ ToRegister(input),
+ ToDoubleRegister(temp));
+ } else {
+ X87Register res = ToX87Register(output);
+ X87PrepareToWrite(res);
+ __ LoadUint32NoSSE2(ToRegister(input));
+ X87CommitWrite(res);
+ }
+}
- __ LoadUint32(ToDoubleRegister(output),
- ToRegister(input),
- ToDoubleRegister(temp));
+
+void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
+ Register input = ToRegister(instr->value());
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ __ test(input, Immediate(0xc0000000));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ SmiTag(input);
}
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
+ class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ DeferredNumberTagI(LCodeGen* codegen,
+ LNumberTagI* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -4148,7 +5042,8 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
- DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
+ DeferredNumberTagI* deferred =
+ new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
__ SmiTag(reg);
__ j(overflow, deferred->entry());
__ bind(deferred->exit());
@@ -4156,14 +5051,16 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ DeferredNumberTagU(LCodeGen* codegen,
+ LNumberTagU* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4172,7 +5069,8 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ DeferredNumberTagU* deferred =
+ new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
__ cmp(reg, Immediate(Smi::kMaxValue));
__ j(above, deferred->entry());
__ SmiTag(reg);
@@ -4186,6 +5084,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
Label slow;
Register reg = ToRegister(value);
Register tmp = reg.is(eax) ? ecx : eax;
+ XMMRegister xmm_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
@@ -4198,9 +5097,28 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// the value in there. If that fails, call the runtime system.
__ SmiUntag(reg);
__ xor_(reg, 0x80000000);
- __ cvtsi2sd(xmm0, Operand(reg));
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope feature_scope(masm(), SSE2);
+ __ Cvtsi2sd(xmm_scratch, Operand(reg));
+ } else {
+ __ push(reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(reg);
+ }
} else {
- __ LoadUint32(xmm0, reg, xmm1);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope feature_scope(masm(), SSE2);
+ __ LoadUint32(xmm_scratch, reg,
+ ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
+ } else {
+ // There's no fild variant for unsigned values, so zero-extend to a 64-bit
+ // int manually.
+ __ push(Immediate(0));
+ __ push(reg);
+ __ fild_d(Operand(esp, 0));
+ __ pop(reg);
+ __ pop(reg);
+ }
}
if (FLAG_inline_new) {
@@ -4226,37 +5144,59 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
if (!reg.is(eax)) __ mov(reg, eax);
- // Done. Put the value in xmm0 into the value of the allocated heap
+ // Done. Put the value in xmm_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope feature_scope(masm(), SSE2);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
+ } else {
+ __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ }
__ StoreToSafepointRegisterSlot(reg, reg);
}
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredNumberTagD(LCodeGen* codegen,
+ LNumberTagD* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
- XMMRegister input_reg = ToDoubleRegister(instr->value());
Register reg = ToRegister(instr->result());
- Register tmp = ToRegister(instr->temp());
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+ bool use_sse2 = CpuFeatures::IsSupported(SSE2);
+ if (!use_sse2) {
+ // Put the value to the top of stack
+ X87Register src = ToX87Register(instr->value());
+ X87LoadForUsage(src);
+ }
+
+ DeferredNumberTagD* deferred =
+ new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
if (FLAG_inline_new) {
+ Register tmp = ToRegister(instr->temp());
__ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
} else {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ if (use_sse2) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ } else {
+ __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ }
}
@@ -4291,175 +5231,227 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
LOperand* input = instr->value();
+ Register result = ToRegister(input);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
- __ test(ToRegister(input), Immediate(kSmiTagMask));
+ __ test(result, Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
} else {
- __ AssertSmi(ToRegister(input));
+ __ AssertSmi(result);
+ }
+ __ SmiUntag(result);
+}
+
+
+void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
+ Register temp_reg,
+ X87Register res_reg,
+ bool can_convert_undefined_to_nan,
+ bool deoptimize_on_minus_zero,
+ LEnvironment* env,
+ NumberUntagDMode mode) {
+ Label load_smi, done;
+
+ X87PrepareToWrite(res_reg);
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
+
+ // Heap number map check.
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ if (!can_convert_undefined_to_nan) {
+ DeoptimizeIf(not_equal, env);
+ } else {
+ Label heap_number, convert;
+ __ j(equal, &heap_number, Label::kNear);
+
+ // Convert undefined (or hole) to NaN.
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, env);
+
+ __ bind(&convert);
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ fld_d(Operand::StaticVariable(nan));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&heap_number);
+ }
+ // Heap number to x87 conversion.
+ __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ __ fldz();
+ __ FCmp();
+ __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ j(not_zero, &done, Label::kNear);
+
+ // Use general purpose registers to check if we have -0.0
+ __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ __ test(temp_reg, Immediate(HeapNumber::kSignMask));
+ __ j(zero, &done, Label::kNear);
+
+ // Pop FPU stack before deoptimizing.
+ __ fstp(0);
+ DeoptimizeIf(not_zero, env);
+ }
+ __ jmp(&done, Label::kNear);
+ } else {
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
- __ SmiUntag(ToRegister(input));
+
+ __ bind(&load_smi);
+ // Clobbering a temp is faster than re-tagging the
+ // input register since we avoid dependencies.
+ __ mov(temp_reg, input_reg);
+ __ SmiUntag(temp_reg); // Untag smi before converting to float.
+ __ push(temp_reg);
+ __ fild_s(Operand(esp, 0));
+ __ add(esp, Immediate(kPointerSize));
+ __ bind(&done);
+ X87CommitWrite(res_reg);
}
void LCodeGen::EmitNumberUntagD(Register input_reg,
Register temp_reg,
XMMRegister result_reg,
- bool deoptimize_on_undefined,
+ bool can_convert_undefined_to_nan,
bool deoptimize_on_minus_zero,
- LEnvironment* env) {
- Label load_smi, done;
+ LEnvironment* env,
+ NumberUntagDMode mode) {
+ Label convert, load_smi, done;
- // Smi check.
- __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- if (deoptimize_on_undefined) {
- DeoptimizeIf(not_equal, env);
- } else {
- Label heap_number;
- __ j(equal, &heap_number, Label::kNear);
+ // Heap number map check.
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ if (can_convert_undefined_to_nan) {
+ __ j(not_equal, &convert, Label::kNear);
+ } else {
+ DeoptimizeIf(not_equal, env);
+ }
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, env);
+ // Heap number to XMM conversion.
+ __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- // Convert undefined to NaN.
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(result_reg, Operand::StaticVariable(nan));
+ if (deoptimize_on_minus_zero) {
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(result_reg, xmm_scratch);
+ __ j(not_zero, &done, Label::kNear);
+ __ movmskpd(temp_reg, result_reg);
+ __ test_b(temp_reg, 1);
+ DeoptimizeIf(not_zero, env);
+ }
__ jmp(&done, Label::kNear);
- __ bind(&heap_number);
- }
- // Heap number to XMM conversion.
- __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(result_reg, xmm_scratch);
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(temp_reg, result_reg);
- __ test_b(temp_reg, 1);
- DeoptimizeIf(not_zero, env);
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+
+ // Convert undefined (and hole) to NaN.
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, env);
+
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ movsd(result_reg, Operand::StaticVariable(nan));
+ __ jmp(&done, Label::kNear);
+ }
+ } else {
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
- __ jmp(&done, Label::kNear);
- // Smi to XMM conversion
__ bind(&load_smi);
- __ SmiUntag(input_reg); // Untag smi before converting to float.
- __ cvtsi2sd(result_reg, Operand(input_reg));
- __ SmiTag(input_reg); // Retag smi.
+ // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
+ // input register since we avoid dependencies.
+ __ mov(temp_reg, input_reg);
+ __ SmiUntag(temp_reg); // Untag smi before converting to float.
+ __ Cvtsi2sd(result_reg, Operand(temp_reg));
__ bind(&done);
}
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Label done, heap_number;
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
Register input_reg = ToRegister(instr->value());
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
-
if (instr->truncating()) {
- __ j(equal, &heap_number, Label::kNear);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr->environment());
- __ mov(input_reg, 0);
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
- if (CpuFeatures::IsSupported(SSE3)) {
- CpuFeatures::Scope scope(SSE3);
- Label convert;
- // Use more powerful conversion when sse3 is available.
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
- // Get exponent alone and check for too-big exponent.
- __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ and_(input_reg, HeapNumber::kExponentMask);
- const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
- __ j(less, &convert, Label::kNear);
- // Pop FPU stack before deoptimizing.
- __ fstp(0);
- DeoptimizeIf(no_condition, instr->environment());
+ Label no_heap_number, check_bools, check_false;
- // Reserve space for 64 bit answer.
- __ bind(&convert);
- __ sub(Operand(esp), Immediate(kDoubleSize));
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
- __ add(Operand(esp), Immediate(kDoubleSize));
- } else {
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, Operand(xmm0));
- __ cmp(input_reg, 0x80000000u);
- __ j(not_equal, &done);
- // Check if the input was 0x8000000 (kMinInt).
- // If no, then we got an overflow and we deoptimize.
- ExternalReference min_int = ExternalReference::address_of_min_int();
- __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
- __ ucomisd(xmm_temp, xmm0);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- }
- } else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(not_equal, instr->environment());
+ // Heap number map check.
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(not_equal, &no_heap_number, Label::kNear);
+ __ TruncateHeapNumberToI(input_reg, input_reg);
+ __ jmp(done);
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, Operand(xmm0));
- __ cvtsi2sd(xmm_temp, Operand(input_reg));
- __ ucomisd(xmm0, xmm_temp);
+ __ bind(&no_heap_number);
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
+ __ cmp(input_reg, factory()->undefined_value());
+ __ j(not_equal, &check_bools, Label::kNear);
+ __ Set(input_reg, Immediate(0));
+ __ jmp(done);
+
+ __ bind(&check_bools);
+ __ cmp(input_reg, factory()->true_value());
+ __ j(not_equal, &check_false, Label::kNear);
+ __ Set(input_reg, Immediate(1));
+ __ jmp(done);
+
+ __ bind(&check_false);
+ __ cmp(input_reg, factory()->false_value());
+ __ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(input_reg, Operand(input_reg));
- __ j(not_zero, &done);
- __ movmskpd(input_reg, xmm0);
- __ and_(input_reg, 1);
- DeoptimizeIf(not_zero, instr->environment());
- }
+ __ Set(input_reg, Immediate(0));
+ __ jmp(done);
+ } else {
+ Label bailout;
+ XMMRegister scratch = (instr->temp() != NULL)
+ ? ToDoubleRegister(instr->temp())
+ : no_xmm_reg;
+ __ TaggedToI(input_reg, input_reg, scratch,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout);
+ __ jmp(done);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
}
- __ bind(&done);
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredTaggedToI(LCodeGen* codegen,
+ LTaggedToI* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_, done());
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
LOperand* input = instr->value();
ASSERT(input->IsRegister());
- ASSERT(input->Equals(instr->result()));
-
Register input_reg = ToRegister(input);
+ ASSERT(input_reg.is(ToRegister(instr->result())));
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
-
- // Smi check.
- __ JumpIfNotSmi(input_reg, deferred->entry());
-
- // Smi to int32 conversion
- __ SmiUntag(input_reg); // Untag smi.
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred =
+ new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
- __ bind(deferred->exit());
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+ }
}
@@ -4467,23 +5459,38 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister());
LOperand* temp = instr->temp();
- ASSERT(temp == NULL || temp->IsRegister());
+ ASSERT(temp->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
- XMMRegister result_reg = ToDoubleRegister(result);
-
bool deoptimize_on_minus_zero =
instr->hydrogen()->deoptimize_on_minus_zero();
- Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
-
- EmitNumberUntagD(input_reg,
- temp_reg,
- result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
- deoptimize_on_minus_zero,
- instr->environment());
+ Register temp_reg = ToRegister(temp);
+
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister result_reg = ToDoubleRegister(result);
+ EmitNumberUntagD(input_reg,
+ temp_reg,
+ result_reg,
+ instr->hydrogen()->can_convert_undefined_to_nan(),
+ deoptimize_on_minus_zero,
+ instr->environment(),
+ mode);
+ } else {
+ EmitNumberUntagDNoSSE2(input_reg,
+ temp_reg,
+ ToX87Register(instr->result()),
+ instr->hydrogen()->can_convert_undefined_to_nan(),
+ deoptimize_on_minus_zero,
+ instr->environment(),
+ mode);
+ }
}
@@ -4492,118 +5499,70 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
-
- XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- __ cvttsd2si(result_reg, Operand(input_reg));
- __ cmp(result_reg, 0x80000000u);
- if (CpuFeatures::IsSupported(SSE3)) {
- // This will deoptimize if the exponent of the input in out of range.
- CpuFeatures::Scope scope(SSE3);
- Label convert, done;
- __ j(not_equal, &done, Label::kNear);
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
- // Get exponent alone and check for too-big exponent.
- __ mov(result_reg, Operand(esp, sizeof(int32_t)));
- __ and_(result_reg, HeapNumber::kExponentMask);
- const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
- __ j(less, &convert, Label::kNear);
- __ add(Operand(esp), Immediate(kDoubleSize));
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&convert);
- // Do conversion, which cannot fail because we checked the exponent.
- __ fld_d(Operand(esp, 0));
- __ fisttp_d(Operand(esp, 0));
- __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
- __ add(Operand(esp), Immediate(kDoubleSize));
- __ bind(&done);
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(input);
+ __ TruncateDoubleToI(result_reg, input_reg);
} else {
- Label done;
- Register temp_reg = ToRegister(instr->temp());
- XMMRegister xmm_scratch = xmm0;
-
- // If cvttsd2si succeeded, we're done. Otherwise, we attempt
- // manual conversion.
- __ j(not_equal, &done, Label::kNear);
-
- // Get high 32 bits of the input in result_reg and temp_reg.
- __ pshufd(xmm_scratch, input_reg, 1);
- __ movd(Operand(temp_reg), xmm_scratch);
- __ mov(result_reg, temp_reg);
-
- // Prepare negation mask in temp_reg.
- __ sar(temp_reg, kBitsPerInt - 1);
-
- // Extract the exponent from result_reg and subtract adjusted
- // bias from it. The adjustment is selected in a way such that
- // when the difference is zero, the answer is in the low 32 bits
- // of the input, otherwise a shift has to be performed.
- __ shr(result_reg, HeapNumber::kExponentShift);
- __ and_(result_reg,
- HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
- __ sub(Operand(result_reg),
- Immediate(HeapNumber::kExponentBias +
- HeapNumber::kExponentBits +
- HeapNumber::kMantissaBits));
- // Don't handle big (> kMantissaBits + kExponentBits == 63) or
- // special exponents.
- DeoptimizeIf(greater, instr->environment());
-
- // Zero out the sign and the exponent in the input (by shifting
- // it to the left) and restore the implicit mantissa bit,
- // i.e. convert the input to unsigned int64 shifted left by
- // kExponentBits.
- ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
- // Minus zero has the most significant bit set and the other
- // bits cleared.
- __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
- __ psllq(input_reg, HeapNumber::kExponentBits);
- __ por(input_reg, xmm_scratch);
-
- // Get the amount to shift the input right in xmm_scratch.
- __ neg(result_reg);
- __ movd(xmm_scratch, Operand(result_reg));
-
- // Shift the input right and extract low 32 bits.
- __ psrlq(input_reg, xmm_scratch);
- __ movd(Operand(result_reg), input_reg);
-
- // Use the prepared mask in temp_reg to negate the result if necessary.
- __ xor_(result_reg, Operand(temp_reg));
- __ sub(result_reg, Operand(temp_reg));
- __ bind(&done);
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ TruncateX87TOSToI(result_reg);
}
} else {
- Label done;
- __ cvttsd2si(result_reg, Operand(input_reg));
- __ cvtsi2sd(xmm0, Operand(result_reg));
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ test(result_reg, Operand(result_reg));
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ and_(result_reg, 1);
- DeoptimizeIf(not_zero, instr->environment());
+ Label bailout, done;
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(input);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+ } else {
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
+ &bailout, Label::kNear);
}
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
__ bind(&done);
}
}
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+ Register result_reg = ToRegister(result);
+
+ Label bailout, done;
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(input);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+ } else {
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
+ &bailout, Label::kNear);
+ }
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
+
+ __ SmiTag(result_reg);
+ DeoptimizeIf(overflow, instr->environment());
+}
+
+
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
@@ -4612,9 +5571,11 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- __ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ LOperand* input = instr->value();
+ __ test(ToOperand(input), Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+ }
}
@@ -4663,54 +5624,94 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (isolate()->heap()->InNewSpace(*target)) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ if (instr->hydrogen()->object_in_new_space()) {
Register reg = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
- __ cmp(reg, Operand::Cell(cell));
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ __ cmp(reg, Operand::ForCell(cell));
} else {
Operand operand = ToOperand(instr->value());
- __ cmp(operand, target);
+ __ cmp(operand, object);
}
DeoptimizeIf(not_equal, instr->environment());
}
-void LCodeGen::DoCheckMapCommon(Register reg,
- Handle<Map> map,
- CompareMapMode mode,
- LEnvironment* env) {
- Label success;
- __ CompareMap(reg, map, &success, mode);
- DeoptimizeIf(not_equal, env);
- __ bind(&success);
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ {
+ PushSafepointRegistersScope scope(this);
+ __ push(object);
+ __ xor_(esi, esi);
+ __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+
+ __ test(eax, Immediate(kSmiTagMask));
+ }
+ DeoptimizeIf(zero, instr->environment());
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen,
+ LCheckMaps* instr,
+ Register object,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->CanOmitMapChecks()) return;
+
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->has_migration_target()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
+ __ bind(deferred->check_maps());
+ }
+
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- SmallMapList* map_set = instr->hydrogen()->map_set();
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
+ __ CompareMap(reg, map, &success);
__ j(equal, &success);
}
- Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
+
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ __ CompareMap(reg, map, &success);
+ if (instr->hydrogen()->has_migration_target()) {
+ __ j(not_equal, deferred->entry());
+ } else {
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+
__ bind(&success);
}
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ CpuFeatureScope scope(masm(), SSE2);
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+ __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
}
@@ -4722,8 +5723,12 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ CpuFeatureScope scope(masm(), SSE2);
+
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
+ XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
+ XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
@@ -4742,375 +5747,237 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
+ __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
// smi
__ bind(&is_smi);
__ SmiUntag(input_reg);
__ ClampUint8(input_reg);
-
__ bind(&done);
}
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register reg = ToRegister(instr->temp());
+void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
+ Register input_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->scratch());
+ Register scratch2 = ToRegister(instr->scratch2());
+ Register scratch3 = ToRegister(instr->scratch3());
+ Label is_smi, done, heap_number, valid_exponent,
+ largest_value, zero_result, maybe_nan_or_infinity;
- Handle<JSObject> holder = instr->holder();
- Handle<JSObject> current_prototype = instr->prototype();
+ __ JumpIfSmi(input_reg, &is_smi);
- // Load prototype object.
- __ LoadHeapObject(reg, current_prototype);
+ // Check for heap number
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(equal, &heap_number, Label::kFar);
- // Check prototype maps up to the holder.
- while (!current_prototype.is_identical_to(holder)) {
- DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+ // Check for undefined. Undefined is converted to zero for clamping
+ // conversions.
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, instr->environment());
+ __ jmp(&zero_result);
- current_prototype =
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
- // Load next prototype object.
- __ LoadHeapObject(reg, current_prototype);
- }
+ // Heap number
+ __ bind(&heap_number);
- // Check the holder map.
- DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
-}
+ // Surprisingly, all of the hand-crafted bit-manipulations below are much
+ // faster than the x86 FPU built-in instruction, especially since "banker's
+ // rounding" would be additionally very expensive
+
+ // Get exponent word.
+ __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+
+ // Test for negative values --> clamp to zero
+ __ test(scratch, scratch);
+ __ j(negative, &zero_result);
+
+ // Get exponent alone in scratch2.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kExponentMask);
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ __ j(zero, &zero_result);
+ __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
+ __ j(negative, &zero_result);
+
+ const uint32_t non_int8_exponent = 7;
+ __ cmp(scratch2, Immediate(non_int8_exponent + 1));
+ // If the exponent is too big, check for special values.
+ __ j(greater, &maybe_nan_or_infinity, Label::kNear);
+
+ __ bind(&valid_exponent);
+ // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
+ // < 7. The shift bias is the number of bits to shift the mantissa such that
+ // with an exponent of 7 such the that top-most one is in bit 30, allowing
+ // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
+ // 1).
+ int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
+ __ lea(result_reg, MemOperand(scratch2, shift_bias));
+ // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
+ // top bits of the mantissa.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ // Put back the implicit 1 of the mantissa
+ __ or_(scratch, 1 << HeapNumber::kExponentShift);
+ // Shift up to round
+ __ shl_cl(scratch);
+ // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
+ // use the bit in the "ones" place and add it to the "halves" place, which has
+ // the effect of rounding to even.
+ __ mov(scratch2, scratch);
+ const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
+ const uint32_t one_bit_shift = one_half_bit_shift + 1;
+ __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
+ __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
+ Label no_round;
+ __ j(less, &no_round);
+ Label round_up;
+ __ mov(scratch2, Immediate(1 << one_half_bit_shift));
+ __ j(greater, &round_up);
+ __ test(scratch3, scratch3);
+ __ j(not_zero, &round_up);
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, Immediate(1 << one_bit_shift));
+ __ shr(scratch2, 1);
+ __ bind(&round_up);
+ __ add(scratch, scratch2);
+ __ j(overflow, &largest_value);
+ __ bind(&no_round);
+ __ shr(scratch, 23);
+ __ mov(result_reg, scratch);
+ __ jmp(&done, Label::kNear);
+ __ bind(&maybe_nan_or_infinity);
+ // Check for NaN/Infinity, all other values map to 255
+ __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
+ __ j(not_equal, &largest_value, Label::kNear);
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
+ // Check for NaN, which differs from Infinity in that at least one mantissa
+ // bit is set.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+ __ j(not_zero, &zero_result); // M!=0 --> NaN
+ // Infinity -> Fall through to map to 255.
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
+ __ bind(&largest_value);
+ __ mov(result_reg, Immediate(255));
+ __ jmp(&done, Label::kNear);
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- // Allocate memory for the object. The initial map might change when
- // the constructor's prototype changes, but instance size and property
- // counts remain unchanged (if slack tracking finished).
- ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- no_reg,
- scratch,
- deferred->entry(),
- TAG_OBJECT);
+ __ bind(&zero_result);
+ __ xor_(result_reg, result_reg);
+ __ jmp(&done);
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
+ // smi
+ __ bind(&is_smi);
+ if (!input_reg.is(result_reg)) {
+ __ mov(result_reg, input_reg);
}
+ __ SmiUntag(result_reg);
+ __ ClampUint8(result_reg);
+ __ bind(&done);
+}
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(scratch, constructor);
- __ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
-
- if (FLAG_debug_code) {
- __ AssertNotSmi(map);
- __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
- instance_size >> kPointerSizeLog2);
- __ Assert(equal, "Unexpected instance size");
- __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
- initial_map->pre_allocated_property_fields());
- __ Assert(equal, "Unexpected pre-allocated property fields count");
- __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
- initial_map->unused_property_fields());
- __ Assert(equal, "Unexpected unused property fields count");
- __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
- initial_map->inobject_properties());
- __ Assert(equal, "Unexpected in-object property fields count");
- }
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ mov(FieldOperand(result, JSObject::kMapOffset), map);
- __ mov(scratch, factory()->empty_fixed_array());
- __ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
- __ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
- if (initial_map->inobject_properties() != 0) {
- __ mov(scratch, factory()->undefined_value());
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ mov(FieldOperand(result, property_offset), scratch);
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, Immediate(0));
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen,
+ LAllocate* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
- PushSafepointRegistersScope scope(this);
- __ push(Immediate(Smi::FromInt(instance_size)));
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr, instr->context());
- __ StoreToSafepointRegisterSlot(result, eax);
-}
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr, x87_stack_);
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // Load the map's "bit field 2". We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(ebx, FieldOperand(ebx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(ebx, Map::kElementsKindMask);
- __ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
- DeoptimizeIf(not_equal, instr->environment());
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
}
- // Set up the parameters to the stub/runtime call.
- __ PushHeapObject(literals);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ push(Immediate(isolate()->factory()->empty_fixed_array()));
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Register size = ToRegister(instr->size());
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
}
-}
-
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset) {
- ASSERT(!source.is(ecx));
- ASSERT(!result.is(ecx));
+ __ bind(deferred->exit());
- if (FLAG_debug_code) {
- __ LoadHeapObject(ecx, object);
- __ cmp(source, ecx);
- __ Assert(equal, "Unexpected object literal boilerplate");
- __ mov(ecx, FieldOperand(source, HeapObject::kMapOffset));
- __ cmp(ecx, Handle<Map>(object->map()));
- __ Assert(equal, "Unexpected boilerplate map");
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- __ and_(ecx, Map::kElementsKindMask);
- __ cmp(ecx, object->GetElementsKind() << Map::kElementsKindShift);
- __ Assert(equal, "Unexpected boilerplate elements kind");
- }
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_offset = *offset + object_size;
- int elements_size = has_elements ? elements->Size() : 0;
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ lea(ecx, Operand(result, elements_offset));
- } else {
- __ mov(ecx, FieldOperand(source, i));
- }
- __ mov(FieldOperand(result, object_offset + i), ecx);
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(ecx, Operand(result, *offset));
- __ mov(FieldOperand(result, total_offset), ecx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
- __ mov(FieldOperand(result, total_offset), ecx);
- } else {
- __ mov(FieldOperand(result, total_offset), Immediate(value));
- }
- }
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ mov(ecx, FieldOperand(source, i));
- __ mov(FieldOperand(result, elements_offset + i), ecx);
- }
-
- // Copy elements backing store content.
- int elements_length = elements->length();
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
- int32_t value_high = static_cast<int32_t>(value >> 32);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ mov(FieldOperand(result, total_offset), Immediate(value_low));
- __ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i));
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(ecx, Operand(result, *offset));
- __ mov(FieldOperand(result, total_offset), ecx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
- __ mov(FieldOperand(result, total_offset), ecx);
- } else {
- __ mov(FieldOperand(result, total_offset), Immediate(value));
- }
- }
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ mov(temp, (size / kPointerSize) - 1);
} else {
- UNREACHABLE();
+ temp = ToRegister(instr->size());
+ __ shr(temp, kPointerSizeLog2);
+ __ dec(temp);
}
+ Label loop;
+ __ bind(&loop);
+ __ mov(FieldOperand(result, temp, times_pointer_size, 0),
+ isolate()->factory()->one_pointer_filler_map());
+ __ dec(temp);
+ __ j(not_zero, &loop);
}
}
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the literal boilerplate ElementsKind is of a type different than
- // the expected one. The check isn't necessary if the boilerplate has already
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
- __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
- // Load the map's "bit field 2". We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(ecx, Map::kElementsKindMask);
- __ cmp(ecx, boilerplate_elements_kind << Map::kElementsKindShift);
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
- ASSERT_EQ(size, offset);
-}
-
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ Register result = ToRegister(instr->result());
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, Immediate(Smi::FromInt(0)));
- // Set up the parameters to the stub/runtime call.
- __ PushHeapObject(literals);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(constant_properties));
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= instr->hydrogen()->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- __ push(Immediate(Smi::FromInt(flags)));
-
- // Pick the right runtime function or stub to call.
- int properties_count = constant_properties->length() / 2;
- if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ PushSafepointRegistersScope scope(this);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ SmiTag(ToRegister(instr->size()));
+ __ push(size);
} else {
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ push(Immediate(Smi::FromInt(size)));
+ }
+
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInOldDataSpace, 1, instr, instr->context());
+ } else {
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInNewSpace, 1, instr, instr->context());
}
+ __ StoreToSafepointRegisterSlot(result, eax);
}
@@ -5148,7 +6015,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
@@ -5177,18 +6044,17 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
- __ push(Immediate(shared_info));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ if (!pretenure && instr->hydrogen()->has_no_literals()) {
+ FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ instr->hydrogen()->is_generator());
+ __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ push(esi);
- __ push(Immediate(shared_info));
- __ push(Immediate(pretenure
- ? factory()->true_value()
- : factory()->false_value()));
+ __ push(Immediate(instr->hydrogen()->shared_info()));
+ __ push(Immediate(pretenure ? factory()->true_value()
+ : factory()->false_value()));
CallRuntime(Runtime::kNewClosure, 3, instr);
}
}
@@ -5203,15 +6069,12 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition final_branch_condition =
- EmitTypeofIs(true_label, false_label, input, instr->type_literal());
+ EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ input, instr->type_literal());
if (final_branch_condition != no_condition) {
- EmitBranch(true_block, false_block, final_branch_condition);
+ EmitBranch(instr, final_branch_condition);
}
}
@@ -5221,13 +6084,13 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Register input,
Handle<String> type_name) {
Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_symbol())) {
+ if (type_name->Equals(heap()->number_string())) {
__ JumpIfSmi(input, true_label);
__ cmp(FieldOperand(input, HeapObject::kMapOffset),
factory()->heap_number_map());
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->string_symbol())) {
+ } else if (type_name->Equals(heap()->string_string())) {
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
__ j(above_equal, false_label);
@@ -5235,17 +6098,22 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
1 << Map::kIsUndetectable);
final_branch_condition = zero;
- } else if (type_name->Equals(heap()->boolean_symbol())) {
+ } else if (type_name->Equals(heap()->symbol_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ CmpObjectType(input, SYMBOL_TYPE, input);
+ final_branch_condition = equal;
+
+ } else if (type_name->Equals(heap()->boolean_string())) {
__ cmp(input, factory()->true_value());
__ j(equal, true_label);
__ cmp(input, factory()->false_value());
final_branch_condition = equal;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
__ cmp(input, factory()->null_value());
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->undefined_symbol())) {
+ } else if (type_name->Equals(heap()->undefined_string())) {
__ cmp(input, factory()->undefined_value());
__ j(equal, true_label);
__ JumpIfSmi(input, false_label);
@@ -5255,7 +6123,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
1 << Map::kIsUndetectable);
final_branch_condition = not_zero;
- } else if (type_name->Equals(heap()->function_symbol())) {
+ } else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
@@ -5263,7 +6131,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->object_symbol())) {
+ } else if (type_name->Equals(heap()->object_string())) {
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
__ cmp(input, factory()->null_value());
@@ -5287,11 +6155,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
EmitIsConstructCall(temp);
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
@@ -5313,21 +6179,22 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
}
-void LCodeGen::EnsureSpaceForLazyDeopt() {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
- __ Nop(padding_size);
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
+ }
}
last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5336,25 +6203,26 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(no_condition, instr->environment());
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
+ DeoptimizeIf(no_condition, instr->environment(), type);
}
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- __ push(ToOperand(obj));
- EmitPushTaggedOperand(key);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- // Create safepoint generator that will also ensure enough space in the
- // reloc info for patching in deoptimization (since this is invoking a
- // builtin)
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ push(Immediate(Smi::FromInt(strict_mode_flag())));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
}
@@ -5371,12 +6239,16 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredStackCheck(LCodeGen* codegen,
+ LStackCheck* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5395,9 +6267,10 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ASSERT(instr->context()->IsRegister());
ASSERT(ToRegister(instr->context()).is(esi));
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- EnsureSpaceForLazyDeopt();
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5405,12 +6278,12 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
+ new(zone()) DeferredStackCheck(this, instr, x87_stack_);
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(below, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5426,29 +6299,13 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// properly registered for deoptimization and records the assembler's PC
// offset.
LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
-}
-
-void LCodeGen::DoIn(LIn* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- EmitPushTaggedOperand(key);
- EmitPushTaggedOperand(obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+ GenerateOsrPrologue();
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 6670024db..78bc69de9 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -32,9 +32,11 @@
#include "checks.h"
#include "deoptimizer.h"
+#include "ia32/lithium-gap-resolver-ia32.h"
+#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
-#include "ia32/lithium-gap-resolver-ia32.h"
+#include "v8utils.h"
namespace v8 {
namespace internal {
@@ -44,48 +46,87 @@ class LDeferredCode;
class LGapNode;
class SafepointGenerator;
-class LCodeGen BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
+ : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
+ jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
dynamic_frame_alignment_(false),
+ support_aligned_spilled_doubles_(false),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
+ x87_stack_(assembler),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub() ||
+ info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
// Support for converting LOperands to assembler types.
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
+ X87Register ToX87Register(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
- Immediate ToInteger32Immediate(LOperand* op) const {
- return Immediate(ToInteger32(LConstantOperand::cast(op)));
+ bool IsSmi(LConstantOperand* op) const;
+ Immediate ToImmediate(LOperand* op, const Representation& r) const {
+ return Immediate(ToRepresentation(LConstantOperand::cast(op), r));
+ }
+ double ToDouble(LConstantOperand* op) const;
+
+ // Support for non-sse2 (x87) floating point stack handling.
+ // These functions maintain the mapping of physical stack registers to our
+ // virtual registers between instructions.
+ enum X87OperandType { kX87DoubleOperand, kX87FloatOperand, kX87IntOperand };
+
+ void X87Mov(X87Register reg, Operand src,
+ X87OperandType operand = kX87DoubleOperand);
+ void X87Mov(Operand src, X87Register reg,
+ X87OperandType operand = kX87DoubleOperand);
+
+ void X87PrepareBinaryOp(
+ X87Register left, X87Register right, X87Register result);
+
+ void X87LoadForUsage(X87Register reg);
+ void X87LoadForUsage(X87Register reg1, X87Register reg2);
+ void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
+ void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
+
+ void X87Fxch(X87Register reg, int other_slot = 0) {
+ x87_stack_.Fxch(reg, other_slot);
+ }
+ void X87Free(X87Register reg) {
+ x87_stack_.Free(reg);
+ }
+
+
+ bool X87StackEmpty() {
+ return x87_stack_.depth() == 0;
}
Handle<Object> ToHandle(LConstantOperand* op) const;
@@ -111,28 +152,22 @@ class LCodeGen BASE_EMBEDDED {
LOperand* value,
IntegerSignedness signedness);
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+ void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
+ void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
-
- void DoCheckMapCommon(Register reg, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
// Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
void EnsureRelocSpaceForDeoptimization();
@@ -142,27 +177,13 @@ class LCodeGen BASE_EMBEDDED {
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
- int GetNextEmittedBlock(int block);
+ XMMRegister double_scratch0() const { return xmm0; }
void EmitClassOfTest(Label* if_true,
Label* if_false,
@@ -172,23 +193,23 @@ class LCodeGen BASE_EMBEDDED {
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
- void Abort(const char* reason);
- void Comment(const char* format, ...);
+ void Abort(BailoutReason reason);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
- // Pad the reloc info to ensure that we have enough space to patch during
- // deoptimization.
- bool GenerateRelocPadding();
+ bool GenerateJumpTable();
bool GenerateSafepointTable();
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
@@ -205,7 +226,8 @@ class LCodeGen BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* fun,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int argc,
@@ -219,6 +241,8 @@ class LCodeGen BASE_EMBEDDED {
LInstruction* instr,
LOperand* context);
+ void LoadContextFromDeferred(LOperand* context);
+
enum EDIState {
EDI_UNINITIALIZED,
EDI_CONTAINS_TARGET
@@ -227,6 +251,7 @@ class LCodeGen BASE_EMBEDDED {
// Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
int arity,
LInstruction* instr,
CallKind call_kind,
@@ -237,14 +262,20 @@ class LCodeGen BASE_EMBEDDED {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
+ void DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void ApplyCheckIf(Condition cc, LBoundsCheck* check);
- void AddToTranslation(Translation* translation,
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
- int arguments_index,
- int arguments_count);
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+ void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -252,9 +283,11 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
- int ToInteger32(LConstantOperand* op) const;
+ X87Register ToX87Register(int index) const;
+ int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
+ ExternalReference ToExternalReference(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
Representation key_representation,
@@ -262,16 +295,7 @@ class LCodeGen BASE_EMBEDDED {
uint32_t offset,
uint32_t additional_index = 0);
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
+ void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
@@ -283,21 +307,32 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordPosition(int position);
+
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
- void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitNumberUntagD(Register input,
- Register temp,
- XMMRegister result,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env);
-
- void DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand);
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition cc);
+ template<class InstrType>
+ void EmitFalseBranch(InstrType instr, Condition cc);
+ void EmitNumberUntagD(
+ Register input,
+ Register temp,
+ XMMRegister result,
+ bool allow_undefined_as_nan,
+ bool deoptimize_on_minus_zero,
+ LEnvironment* env,
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
+
+ void EmitNumberUntagDNoSSE2(
+ Register input,
+ Register temp,
+ X87Register res_reg,
+ bool allow_undefined_as_nan,
+ bool deoptimize_on_minus_zero,
+ LEnvironment* env,
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -320,49 +355,111 @@ class LCodeGen BASE_EMBEDDED {
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input,
Register temp1,
- Label* is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed);
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset);
+ int* offset,
+ AllocationSiteMode mode);
+
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
- void EnsureSpaceForLazyDeopt();
+ void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
+ void X87Fld(Operand src, X87OperandType opts);
+
+ void EmitFlushX87ForDeopt();
+ void FlushX87StackIfNecessary(LInstruction* instr) {
+ x87_stack_.FlushIfNecessary(instr, this);
+ }
+ friend class LGapResolver;
+
+#ifdef _MSC_VER
+ // On windows, you may not access the stack more than one page below
+ // the most recently mapped page. To make the allocated area randomly
+ // accessible, we write an arbitrary value to each page in range
+ // esp + offset - page_size .. esp in turn.
+ void MakeSureStackPagesMapped(int offset);
+#endif
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
bool dynamic_frame_alignment_;
+ bool support_aligned_spilled_doubles_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
+ bool frame_is_built_;
+
+ class X87Stack {
+ public:
+ explicit X87Stack(MacroAssembler* masm)
+ : stack_depth_(0), is_mutable_(true), masm_(masm) { }
+ explicit X87Stack(const X87Stack& other)
+ : stack_depth_(other.stack_depth_), is_mutable_(false), masm_(masm()) {
+ for (int i = 0; i < stack_depth_; i++) {
+ stack_[i] = other.stack_[i];
+ }
+ }
+ bool operator==(const X87Stack& other) const {
+ if (stack_depth_ != other.stack_depth_) return false;
+ for (int i = 0; i < stack_depth_; i++) {
+ if (!stack_[i].is(other.stack_[i])) return false;
+ }
+ return true;
+ }
+ bool Contains(X87Register reg);
+ void Fxch(X87Register reg, int other_slot = 0);
+ void Free(X87Register reg);
+ void PrepareToWrite(X87Register reg);
+ void CommitWrite(X87Register reg);
+ void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen);
+ void LeavingBlock(int current_block_id, LGoto* goto_instr);
+ int depth() const { return stack_depth_; }
+ void pop() {
+ ASSERT(is_mutable_);
+ stack_depth_--;
+ }
+ void push(X87Register reg) {
+ ASSERT(is_mutable_);
+ ASSERT(stack_depth_ < X87Register::kNumAllocatableRegisters);
+ stack_[stack_depth_] = reg;
+ stack_depth_++;
+ }
+
+ MacroAssembler* masm() const { return masm_; }
+
+ private:
+ int ArrayIndex(X87Register reg);
+ int st2idx(int pos);
+
+ X87Register stack_[X87Register::kNumAllocatableRegisters];
+ int stack_depth_;
+ bool is_mutable_;
+ MacroAssembler* masm_;
+ };
+ X87Stack x87_stack_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -373,13 +470,14 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope BASE_EMBEDDED {
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->masm_->PushSafepointRegisters();
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ ASSERT(codegen_->info()->is_calling());
}
~PushSafepointRegistersScope() {
@@ -399,23 +497,26 @@ class LCodeGen BASE_EMBEDDED {
};
-class LDeferredCode: public ZoneObject {
+class LDeferredCode : public ZoneObject {
public:
- explicit LDeferredCode(LCodeGen* codegen)
+ explicit LDeferredCode(LCodeGen* codegen, const LCodeGen::X87Stack& x87_stack)
: codegen_(codegen),
external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
+ instruction_index_(codegen->current_instruction_),
+ x87_stack_(x87_stack) {
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() { }
+ virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
int instruction_index() const { return instruction_index_; }
+ const LCodeGen::X87Stack& x87_stack() const { return x87_stack_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@@ -426,7 +527,9 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
+ Label done_;
int instruction_index_;
+ LCodeGen::X87Stack x87_stack_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index 6428916fe..2b2126af9 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "ia32/lithium-gap-resolver-ia32.h"
#include "ia32/lithium-codegen-ia32.h"
@@ -191,7 +191,7 @@ int LGapResolver::CountSourceUses(LOperand* operand) {
Register LGapResolver::GetFreeRegisterNot(Register reg) {
int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
return Register::FromAllocationIndex(i);
}
@@ -204,7 +204,7 @@ bool LGapResolver::HasBeenReset() {
if (!moves_.is_empty()) return false;
if (spilled_register_ >= 0) return false;
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
if (source_uses_[i] != 0) return false;
if (destination_uses_[i] != 0) return false;
}
@@ -256,7 +256,7 @@ Register LGapResolver::EnsureTempRegister() {
// 3. Prefer to spill a register that is not used in any remaining move
// because it will not need to be restored until the end.
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
Register scratch = Register::FromAllocationIndex(i);
__ push(scratch);
@@ -306,16 +306,43 @@ void LGapResolver::EmitMove(int index) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
+ __ Set(dst, cgen_->ToImmediate(constant_source, r));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
+ } else if (destination->IsDoubleRegister()) {
+ double v = cgen_->ToDouble(constant_source);
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(cgen_->masm(), SSE2);
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ if (int_val == 0) {
+ __ xorps(dst, dst);
+ } else {
+ __ push(Immediate(upper));
+ __ push(Immediate(lower));
+ __ movsd(dst, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ }
+ } else {
+ __ push(Immediate(upper));
+ __ push(Immediate(lower));
+ X87Register dst = cgen_->ToX87Register(destination);
+ cgen_->X87Mov(dst, MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ }
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
+ __ Set(dst, cgen_->ToImmediate(constant_source, r));
} else {
Register tmp = EnsureTempRegister();
__ LoadObject(tmp, cgen_->ToHandle(constant_source));
@@ -324,29 +351,60 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
- XMMRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movaps(dst, src);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(cgen_->masm(), SSE2);
+ XMMRegister src = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movaps(dst, src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ __ movsd(dst, src);
+ }
} else {
+ // load from the register onto the stack, store in destination, which must
+ // be a double stack slot in the non-SSE2 case.
ASSERT(destination->IsDoubleStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movdbl(dst, src);
+ X87Register src = cgen_->ToX87Register(source);
+ cgen_->X87Mov(dst, src);
}
} else if (source->IsDoubleStackSlot()) {
- ASSERT(destination->IsDoubleRegister() ||
- destination->IsDoubleStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movdbl(dst, src);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(cgen_->masm(), SSE2);
+ ASSERT(destination->IsDoubleRegister() ||
+ destination->IsDoubleStackSlot());
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movsd(dst, src);
+ } else {
+ // We rely on having xmm0 available as a fixed scratch register.
+ Operand dst = cgen_->ToOperand(destination);
+ __ movsd(xmm0, src);
+ __ movsd(dst, xmm0);
+ }
} else {
- // We rely on having xmm0 available as a fixed scratch register.
- Operand dst = cgen_->ToOperand(destination);
- __ movdbl(xmm0, src);
- __ movdbl(dst, xmm0);
+ // load from the stack slot on top of the floating point stack, and then
+ // store in destination. If destination is a double register, then it
+ // represents the top of the stack and nothing needs to be done.
+ if (destination->IsDoubleStackSlot()) {
+ Register tmp = EnsureTempRegister();
+ Operand src0 = cgen_->ToOperand(source);
+ Operand src1 = cgen_->HighOperand(source);
+ Operand dst0 = cgen_->ToOperand(destination);
+ Operand dst1 = cgen_->HighOperand(destination);
+ __ mov(tmp, src0); // Then use tmp to copy source to destination.
+ __ mov(dst0, tmp);
+ __ mov(tmp, src1);
+ __ mov(dst1, tmp);
+ } else {
+ Operand src = cgen_->ToOperand(source);
+ X87Register dst = cgen_->ToX87Register(destination);
+ cgen_->X87Mov(dst, src);
+ }
}
-
} else {
UNREACHABLE();
}
@@ -410,6 +468,7 @@ void LGapResolver::EmitSwap(int index) {
__ mov(src, tmp0);
}
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ CpuFeatureScope scope(cgen_->masm(), SSE2);
// XMM register-register swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = cgen_->ToDoubleRegister(source);
@@ -417,21 +476,21 @@ void LGapResolver::EmitSwap(int index) {
__ movaps(xmm0, src);
__ movaps(src, dst);
__ movaps(dst, xmm0);
-
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
+ CpuFeatureScope scope(cgen_->masm(), SSE2);
// XMM register-memory swap. We rely on having xmm0
// available as a fixed scratch register.
ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
- ? source
- : destination);
+ ? source
+ : destination);
Operand other =
cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
- __ movdbl(xmm0, other);
- __ movdbl(other, reg);
- __ movdbl(reg, Operand(xmm0));
-
+ __ movsd(xmm0, other);
+ __ movsd(other, reg);
+ __ movsd(reg, Operand(xmm0));
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+ CpuFeatureScope scope(cgen_->masm(), SSE2);
// Double-width memory-to-memory. Spill on demand to use a general
// purpose temporary register and also rely on having xmm0 available as
// a fixed scratch register.
@@ -440,12 +499,12 @@ void LGapResolver::EmitSwap(int index) {
Operand src1 = cgen_->HighOperand(source);
Operand dst0 = cgen_->ToOperand(destination);
Operand dst1 = cgen_->HighOperand(destination);
- __ movdbl(xmm0, dst0); // Save destination in xmm0.
+ __ movsd(xmm0, dst0); // Save destination in xmm0.
__ mov(tmp, src0); // Then use tmp to copy source to destination.
__ mov(dst0, tmp);
__ mov(tmp, src1);
__ mov(dst1, tmp);
- __ movdbl(src0, xmm0);
+ __ movsd(src0, xmm0);
} else {
// No other combinations are possible.
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.h b/deps/v8/src/ia32/lithium-gap-resolver-ia32.h
index 0c81d72ee..4aff241f4 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.h
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.h
@@ -38,7 +38,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver BASE_EMBEDDED {
+class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
@@ -97,8 +97,8 @@ class LGapResolver BASE_EMBEDDED {
ZoneList<LMoveOperands> moves_;
// Source and destination use counts for the general purpose registers.
- int source_uses_[Register::kNumAllocatableRegisters];
- int destination_uses_[Register::kNumAllocatableRegisters];
+ int source_uses_[Register::kMaxNumAllocatableRegisters];
+ int destination_uses_[Register::kMaxNumAllocatableRegisters];
// If we had to spill on demand, the currently spilled register's
// allocation index.
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 1d12d23d2..73aabd6b1 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -27,11 +27,12 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "lithium-allocator-inl.h"
#include "ia32/lithium-ia32.h"
#include "ia32/lithium-codegen-ia32.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -43,31 +44,6 @@ namespace internal {
LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
#ifdef DEBUG
void LInstruction::VerifyCall() {
@@ -91,6 +67,33 @@ void LInstruction::VerifyCall() {
#endif
+bool LInstruction::HasDoubleRegisterResult() {
+ return HasResult() && result()->IsDoubleRegister();
+}
+
+
+bool LInstruction::HasDoubleRegisterInput() {
+ for (int i = 0; i < InputCount(); i++) {
+ LOperand* op = InputAt(i);
+ if (op != NULL && op->IsDoubleRegister()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool LInstruction::IsDoubleInput(X87Register reg, LCodeGen* cgen) {
+ for (int i = 0; i < InputCount(); i++) {
+ LOperand* op = InputAt(i);
+ if (op != NULL && op->IsDoubleRegister()) {
+ if (cgen->ToX87Register(op).is(reg)) return true;
+ }
+ }
+ return false;
+}
+
+
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
@@ -114,7 +117,11 @@ void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
- InputAt(i)->PrintTo(stream);
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
}
}
@@ -179,6 +186,7 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
case Token::SHL: return "sal-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
@@ -189,6 +197,11 @@ const char* LArithmeticT::Mnemonic() const {
}
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+
void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
@@ -200,7 +213,7 @@ void LBranch::PrintDataTo(StringStream* stream) {
}
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
@@ -209,15 +222,6 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
value()->PrintTo(stream);
@@ -287,20 +291,23 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
}
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + %d", offset());
}
-void LMathPowHalf::PrintDataTo(StringStream* stream) {
- stream->Add("/pow_half ");
- value()->PrintTo(stream);
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("#%d / ", arity());
}
@@ -357,6 +364,17 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@@ -368,9 +386,9 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (is_double) {
+ if (kind == DOUBLE_REGISTERS) {
spill_slot_count_++;
spill_slot_count_ |= 1;
num_double_slots_++;
@@ -379,11 +397,12 @@ int LPlatformChunk::GetNextSpillIndex(bool is_double) {
}
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
+ ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@@ -391,8 +410,7 @@ LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ hydrogen()->access().PrintTo(stream);
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -407,21 +425,35 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", additional_index());
+ } else {
+ stream->Add("]");
+ }
}
-void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", additional_index());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
}
@@ -443,13 +475,23 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
+ LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
// Reserve the first spill slot for the state of dynamic alignment.
- int alignment_state_index = chunk_->GetNextSpillIndex(false);
- ASSERT_EQ(alignment_state_index, 0);
- USE(alignment_state_index);
+ if (info()->IsOptimizing()) {
+ int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+ ASSERT_EQ(alignment_state_index, 0);
+ USE(alignment_state_index);
+ }
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+ }
+ }
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
@@ -463,7 +505,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(const char* reason) {
+void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -519,34 +561,44 @@ LOperand* LChunkBuilder::UseAtStart(HValue* value) {
}
+static inline bool CanBeImmediateConstant(HValue* value) {
+ return value->IsConstant() && HConstant::cast(value)->NotInNewSpace();
+}
+
+
LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: Use(value);
}
LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseAtStart(value);
}
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseRegister(value);
}
LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseRegisterAtStart(value);
}
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@@ -616,8 +668,10 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
+ &argument_index_accumulator,
+ &objects_to_materialize));
return instr;
}
@@ -625,6 +679,8 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+
#ifdef DEBUG
instr->VerifyCall();
#endif
@@ -657,7 +713,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -665,10 +721,12 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand =
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- operand->set_virtual_register(allocator_->GetVirtualRegister());
+ int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
- Abort("Not enough virtual registers (temps).");
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
}
+ operand->set_virtual_register(vreg);
return operand;
}
@@ -692,8 +750,14 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
}
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
}
@@ -704,52 +768,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseFixed(right_value, ecx);
- }
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
} else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
+ right = UseFixed(right_value, ecx);
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
- }
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -758,21 +814,22 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
@@ -819,11 +876,15 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
HEnvironment* last_environment = pred->last_environment();
for (int i = 0; i < block->phis()->length(); ++i) {
HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
}
for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
}
block->UpdateEnvironment(last_environment);
// Pick up the outgoing argument count of one of the predecessors.
@@ -852,17 +913,78 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ ASSERT(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- instr->set_hydrogen_value(current);
+ if (!CpuFeatures::IsSafeForSnapshot(SSE2) && instr->IsGoto() &&
+ LGoto::cast(instr)->jumps_to_join()) {
+ // TODO(olivf) Since phis of spilled values are joined as registers
+ // (not in the stack slot), we need to allow the goto gaps to keep one
+ // x87 register alive. To ensure all other values are still spilled, we
+ // insert a fpu register barrier right before.
+ LClobberDoubles* clobber = new(zone()) LClobberDoubles();
+ clobber->set_hydrogen_value(current);
+ chunk_->AddInstruction(clobber, current_block_);
+ }
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -871,15 +993,17 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+ LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
+ argument_index_accumulator,
+ objects_to_materialize);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
+ int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
LEnvironment* result =
new(zone()) LEnvironment(hydrogen_env->closure(),
hydrogen_env->frame_type(),
@@ -891,13 +1015,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
hydrogen_env->entry(),
zone());
int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
+ int object_index = objects_to_materialize->length();
+ for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
+ LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
@@ -908,6 +1034,39 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
+ for (int i = object_index; i < objects_to_materialize->length(); ++i) {
+ HValue* object_to_materialize = objects_to_materialize->at(i);
+ int previously_materialized_object = -1;
+ for (int prev = 0; prev < i; ++prev) {
+ if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
+ previously_materialized_object = prev;
+ break;
+ }
+ }
+ int length = object_to_materialize->OperandCount();
+ bool is_arguments = object_to_materialize->IsArgumentsObject();
+ if (previously_materialized_object >= 0) {
+ result->AddDuplicateObject(previously_materialized_object);
+ continue;
+ } else {
+ result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+ }
+ for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+ LOperand* op;
+ HValue* value = object_to_materialize->OperandAt(i);
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
+ } else {
+ ASSERT(!value->IsPushArgument());
+ op = UseAny(value);
+ }
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
+ }
+ }
+
if (hydrogen_env->frame_type() == JS_FUNCTION) {
*argument_index_accumulator = argument_index;
}
@@ -917,39 +1076,46 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor());
}
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- ASSERT(value->IsConstant());
- ASSERT(!value->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ ToBooleanStub::Types expected = instr->expected_input_types();
- // Untagged integers or doubles, smis and booleans don't require a
- // deoptimization environment nor a temp register.
+ // Tagged values that are not known smis or booleans require a
+ // deoptimization environment. If the instruction is generic no
+ // environment is needed since all cases are handled.
+ HValue* value = instr->value();
Representation rep = value->representation();
HType type = value->type();
if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
return new(zone()) LBranch(UseRegister(value), NULL);
}
- ToBooleanStub::Types expected = instr->expected_input_types();
+ bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
+ LOperand* temp = needs_temp ? TempRegister() : NULL;
+
+ // The Generic stub does not have a deopt, so we need no environment.
+ if (expected.IsGeneric()) {
+ return new(zone()) LBranch(UseRegister(value), temp);
+ }
+
// We need a temporary register when we have to access the map *or* we have
// no type info yet, in which case we handle all cases (including the ones
// involving maps).
- bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
- LOperand* temp = needs_temp ? TempRegister() : NULL;
return AssignEnvironment(new(zone()) LBranch(UseRegister(value), temp));
}
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
+
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -958,11 +1124,13 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+ info()->MarkAsRequiresFrame();
return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
}
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ info()->MarkAsRequiresFrame();
return DefineAsRegister(new(zone()) LArgumentsElements);
}
@@ -1011,12 +1179,28 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
LOperand* argument = UseAny(instr->argument());
return new(zone()) LPushArgument(argument);
}
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* inner_object) {
+ LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
+ LInnerAllocatedObject* result =
+ new(zone()) LInnerAllocatedObject(base_object);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
return instr->HasNoUses()
? NULL
@@ -1025,7 +1209,13 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, esi);
+ }
+
+ return DefineAsRegister(new(zone()) LContext);
}
@@ -1055,7 +1245,6 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
}
@@ -1063,60 +1252,115 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
- argument_count_ -= instr->argument_count();
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* context = UseAny(instr->context()); // Not actually used.
- LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
- input);
- return DefineSameAsFirst(result);
- } else if (op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
- input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
- LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs.
- if (op == kMathPowHalf) {
- LOperand* temp = TempRegister();
- LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
- return DefineSameAsFirst(result);
- }
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
- input);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- case kMathFloor:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathSqrt:
- return DefineSameAsFirst(result);
- default:
- UNREACHABLE();
- return NULL;
- }
+ switch (instr->op()) {
+ case kMathFloor: return DoMathFloor(instr);
+ case kMathRound: return DoMathRound(instr);
+ case kMathAbs: return DoMathAbs(instr);
+ case kMathLog: return DoMathLog(instr);
+ case kMathSin: return DoMathSin(instr);
+ case kMathCos: return DoMathCos(instr);
+ case kMathTan: return DoMathTan(instr);
+ case kMathExp: return DoMathExp(instr);
+ case kMathSqrt: return DoMathSqrt(instr);
+ case kMathPowHalf: return DoMathPowHalf(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
}
}
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathFloor* result = new(zone()) LMathFloor(input);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ LOperand* context = UseAny(instr->context());
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = FixedTemp(xmm4);
+ LMathRound* result = new(zone()) LMathRound(context, input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ LOperand* context = UseAny(instr->context()); // Deferred use.
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathAbs* result = new(zone()) LMathAbs(context, input);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathLog* result = new(zone()) LMathLog(input);
+ return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LMathSin* result = new(zone()) LMathSin(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LMathCos* result = new(zone()) LMathCos(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LMathTan* result = new(zone()) LMathTan(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* value = UseTempRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathSqrt* result = new(zone()) LMathSqrt(input);
+ return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ LOperand* context = UseAny(instr->context());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
+ return DefineSameAsFirst(result);
+}
+
+
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), esi);
LOperand* key = UseFixed(instr->key(), ecx);
- argument_count_ -= instr->argument_count();
LCallKeyed* result = new(zone()) LCallKeyed(context, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1124,7 +1368,6 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallNamed* result = new(zone()) LCallNamed(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1132,14 +1375,12 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallGlobal* result = new(zone()) LCallGlobal(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
}
@@ -1147,28 +1388,38 @@ LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
- argument_count_ -= instr->argument_count();
LCallNew* result = new(zone()) LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* constructor = UseFixed(instr->constructor(), edi);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
- argument_count_ -= instr->argument_count();
LCallFunction* result = new(zone()) LCallFunction(context, function);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
}
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@@ -1185,42 +1436,31 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(instr->op(), context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* input = UseRegisterAtStart(instr->value());
- LBitNotI* result = new(zone()) LBitNotI(input);
- return DefineSameAsFirst(result);
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ if (instr->HasPowerOf2Divisor()) {
+ ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+ LOperand* value = UseRegisterAtStart(instr->left());
+ LDivI* div =
+ new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
+ return AssignEnvironment(DefineSameAsFirst(div));
+ }
// The temporary operand is necessary to ensure that right is not allocated
// into edx.
LOperand* temp = FixedTemp(edx);
@@ -1228,26 +1468,14 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LOperand* divisor = UseRegister(instr->right());
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, eax));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
-HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
- // A value with an integer representation does not need to be transformed.
- if (dividend->representation().IsInteger32()) {
- return dividend;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (dividend->IsChange() &&
- HChange::cast(dividend)->from().IsInteger32()) {
- return HChange::cast(dividend)->value();
- }
- return NULL;
-}
-
-
HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
if (divisor->IsConstant() &&
HConstant::cast(divisor)->HasInteger32Value()) {
@@ -1255,12 +1483,31 @@ HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
return constant_val->CopyToRepresentation(Representation::Integer32(),
divisor->block()->zone());
}
+ // A value with an integer representation does not need to be transformed.
+ if (divisor->representation().IsInteger32()) {
+ return divisor;
+ // A change from an integer32 can be replaced by the integer32 value.
+ } else if (divisor->IsChange() &&
+ HChange::cast(divisor)->from().IsInteger32()) {
+ return HChange::cast(divisor)->value();
+ }
return NULL;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
HValue* right = instr->right();
+ if (!right->IsConstant()) {
+ ASSERT(right->representation().IsInteger32());
+ // The temporary operand is necessary to ensure that right is not allocated
+ // into edx.
+ LOperand* temp = FixedTemp(edx);
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineFixed(flooring_div, eax));
+ }
+
ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
int32_t divisor_si = HConstant::cast(right)->Integer32Value();
@@ -1287,52 +1534,58 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
- LInstruction* result;
if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LModI* mod =
- new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
- result = DefineSameAsFirst(mod);
+ ASSERT(!right->CanBeZero());
+ LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
+ UseOrConstant(right),
+ NULL);
+ LInstruction* result = DefineSameAsFirst(mod);
+ return (left->CanBeNegative() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero))
+ ? AssignEnvironment(result)
+ : result;
+ } else if (instr->fixed_right_arg().has_value) {
+ LModI* mod = new(zone()) LModI(UseRegister(left),
+ UseRegisterAtStart(right),
+ NULL);
+ return AssignEnvironment(DefineSameAsFirst(mod));
} else {
- // The temporary operand is necessary to ensure that right is
- // not allocated into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* value = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new(zone()) LModI(value, divisor, temp);
- result = DefineFixed(mod, edx);
+ // The temporary operand is necessary to ensure that right is not
+ // allocated into edx.
+ LModI* mod = new(zone()) LModI(UseFixed(left, eax),
+ UseRegister(right),
+ FixedTemp(edx));
+ LInstruction* result = DefineFixed(mod, edx);
+ return (right->CanBeZero() ||
+ (left->RangeCanInclude(kMinInt) &&
+ right->RangeCanInclude(-1) &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
+ (left->CanBeNegative() &&
+ instr->CanBeZero() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)))
+ ? AssignEnvironment(result)
+ : result;
}
-
- return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero))
- ? AssignEnvironment(result)
- : result;
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = UseFixedDouble(instr->right(), xmm1);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
temp = TempRegister();
@@ -1346,16 +1599,15 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@@ -1367,28 +1619,37 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::SUB, instr);
}
}
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ // Check to see if it would be advantageous to use an lea instruction rather
+ // than an add. This is the case when no overflow check is needed and there
+ // are multiple uses of the add's inputs, so using a 3-register add will
+ // preserve all input values for later uses.
+ bool use_lea = LAddI::UseLea(instr);
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ HValue* right_candidate = instr->BetterRightOperand();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ if (can_overflow) {
result = AssignEnvironment(result);
}
return result;
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1397,11 +1658,11 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -1433,15 +1694,19 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), eax);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ LOperand* global_object = UseTempRegister(instr->global_object());
+ LOperand* scratch = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LOperand* scratch3 = TempRegister();
+ LRandom* result = new(zone()) LRandom(
+ global_object, scratch, scratch2, scratch3);
+ return DefineFixedDouble(result, xmm1);
}
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsSmiOrTagged());
+ ASSERT(instr->right()->representation().IsSmiOrTagged());
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
LOperand* right = UseFixed(instr->right(), eax);
@@ -1450,57 +1715,55 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left;
LOperand* right;
- if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
- left = UseRegisterOrConstantAtStart(instr->left());
- right = UseRegisterOrConstantAtStart(instr->right());
+ if (CanBeImmediateConstant(instr->left()) &&
+ CanBeImmediateConstant(instr->right())) {
+ // The code generator requires either both inputs to be constant
+ // operands, or neither.
+ left = UseConstant(instr->left());
+ right = UseConstant(instr->right());
} else {
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
}
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
}
}
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseAtStart(instr->right());
+ LOperand* right = UseOrConstantAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
}
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
- return new(zone()) LCmpConstantEqAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- // We only need a temp register for non-strict compare.
- LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
}
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsSmiOrTagged());
LOperand* temp = TempRegister();
return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
}
@@ -1521,7 +1784,7 @@ LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
HIsUndetectableAndBranch* instr) {
- ASSERT(instr ->value()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsTagged());
return new(zone()) LIsUndetectableAndBranch(
UseRegisterAtStart(instr->value()), TempRegister());
}
@@ -1577,19 +1840,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
LOperand* map = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LMapEnumLength(map));
@@ -1617,6 +1867,17 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegister(instr->index());
+ ASSERT(ecx.is_byte_register());
+ LOperand* value = UseFixed(instr->value(), ecx);
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+ return DefineSameAsFirst(result);
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
return AssignEnvironment(new(zone()) LBoundsCheck(
UseRegisterOrConstantAtStart(instr->index()),
@@ -1624,6 +1885,13 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -1654,43 +1922,63 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
+ // Only mark conversions that might need to allocate as calling rather than
+ // all changes. This makes simple, non-allocating conversion not have to force
+ // building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
// Temp register only necessary for minus zero check.
- LOperand* temp = instr->deoptimize_on_minus_zero()
- ? TempRegister()
- : NULL;
+ LOperand* temp = TempRegister();
LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
return AssignEnvironment(DefineAsRegister(res));
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- if (instr->value()->type().IsSmi()) {
+ HValue* val = instr->value();
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
} else {
bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp =
- (truncating && CpuFeatures::IsSupported(SSE3))
- ? NULL
- : FixedTemp(xmm1);
- LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
+ (CpuFeatures::IsSafeForSnapshot(SSE2) && !truncating)
+ ? FixedTemp(xmm1) : NULL;
+ LTaggedToI* res = new(zone()) LTaggedToI(UseRegister(val), xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
}
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
- LOperand* temp = TempRegister();
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
// Make sure that temp and result_temp are different registers.
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(instr->value());
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
bool truncating = instr->CanTruncateToInt32();
- bool needs_temp = truncating && !CpuFeatures::IsSupported(SSE3);
+ bool needs_temp = CpuFeatures::IsSafeForSnapshot(SSE2) && !truncating;
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
@@ -1698,19 +1986,31 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new(zone()) LSmiTag(value));
} else if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
+ LOperand* temp = CpuFeatures::IsSupported(SSE2) ? FixedTemp(xmm1)
+ : NULL;
LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ LInstruction* result = val->CheckFlag(HInstruction::kUint32)
+ ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
+ : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return result;
+ }
+ return AssignEnvironment(result);
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@@ -1728,49 +2028,49 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckNonSmi(value));
}
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
- return AssignEnvironment(result);
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
}
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
+ LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
return AssignEnvironment(result);
}
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- // If the target is in new space, we'll emit a global cell compare and so
- // want the value in a register. If the target gets promoted before we
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ // If the object is in new space, we'll emit a global cell compare and so
+ // want the value in a register. If the object gets promoted before we
// emit code, we will still get the register but will do an immediate
// compare instead of the cell compare. This is safe.
- LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target())
- ? UseRegisterAtStart(instr->value())
- : UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ LOperand* value = instr->object_in_new_space()
+ ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = NULL;
+ if (!instr->CanOmitMapChecks()) {
+ value = UseRegisterAtStart(instr->value());
+ if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
+ }
LCheckMaps* result = new(zone()) LCheckMaps(value);
- return AssignEnvironment(result);
+ if (!instr->CanOmitMapChecks()) {
+ AssignEnvironment(result);
+ if (instr->has_migration_target()) return AssignPointerMap(result);
+ }
+ return result;
}
@@ -1784,32 +2084,48 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LOperand* reg = UseFixed(value, eax);
return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
} else {
- ASSERT(input_rep.IsTagged());
- LOperand* reg = UseFixed(value, eax);
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve xmm1 explicitly.
- LOperand* temp = FixedTemp(xmm1);
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
- return AssignEnvironment(DefineFixed(result, eax));
+ ASSERT(input_rep.IsSmiOrTagged());
+ if (CpuFeatures::IsSupported(SSE2)) {
+ LOperand* reg = UseFixed(value, eax);
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve xmm1 explicitly.
+ LOperand* temp = FixedTemp(xmm1);
+ LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
+ return AssignEnvironment(DefineFixed(result, eax));
+ } else {
+ LOperand* value = UseRegister(instr->value());
+ LClampTToUint8NoSSE2* res =
+ new(zone()) LClampTToUint8NoSSE2(value, TempRegister(),
+ TempRegister(), TempRegister());
+ return AssignEnvironment(DefineFixed(res, ecx));
+ }
}
}
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new(zone()) LReturn(UseFixed(instr->value(), eax));
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), esi)
+ : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new(zone()) LReturn(UseFixed(instr->value(), eax), context,
+ parameter_count);
}
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
- if (r.IsInteger32()) {
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
double value = instr->DoubleValue();
- LOperand* temp = (BitCast<uint64_t, double>(value) != 0)
- ? TempRegister()
- : NULL;
+ bool value_is_zero = BitCast<uint64_t, double>(value) == 0;
+ LOperand* temp = value_is_zero ? NULL : TempRegister();
return DefineAsRegister(new(zone()) LConstantD(temp));
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new(zone()) LConstantE);
} else if (r.IsTagged()) {
return DefineAsRegister(new(zone()) LConstantT);
} else {
@@ -1878,31 +2194,14 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- ASSERT(instr->representation().IsTagged());
- LOperand* obj = UseRegisterAtStart(instr->object());
+ LOperand* obj = (instr->access().IsExternalMemory() &&
+ instr->access().offset() == 0)
+ ? UseRegisterOrConstantAtStart(instr->object())
+ : UseRegisterAtStart(instr->object());
return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* obj = UseFixed(instr->object(), edx);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(context, obj);
- return MarkAsCall(DefineFixed(result, eax), instr);
- } else {
- LOperand* context = UseAny(instr->context()); // Not actually used.
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(context, obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), edx);
@@ -1919,9 +2218,8 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
}
@@ -1932,59 +2230,37 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
- HLoadKeyedFastElement* instr) {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
- if (instr->RequiresHoleCheck()) AssignEnvironment(result);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
- HLoadKeyedFastDoubleElement* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastDoubleElement* result =
- new(zone()) LLoadKeyedFastDoubleElement(elements, key);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
- HLoadKeyedSpecializedArrayElement* instr) {
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* external_pointer = UseRegister(instr->external_pointer());
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
- : UseRegisterOrConstant(instr->key());
+ : UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyed* result = NULL;
- LLoadKeyedSpecializedArrayElement* result =
- new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
- LInstruction* load_instr = DefineAsRegister(result);
+ if (!instr->is_external()) {
+ LOperand* obj = UseRegisterAtStart(instr->elements());
+ result = new(zone()) LLoadKeyed(obj, key);
+ } else {
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ LOperand* external_pointer = UseRegister(instr->elements());
+ result = new(zone()) LLoadKeyed(external_pointer, key);
+ }
+
+ DefineAsRegister(result);
+ bool can_deoptimize = instr->RequiresHoleCheck() ||
+ (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS)
- ? AssignEnvironment(load_instr)
- : load_instr;
+ return can_deoptimize ? AssignEnvironment(result) : result;
}
@@ -1999,72 +2275,77 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
- HStoreKeyedFastElement* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- ASSERT(instr->value()->representation().IsTagged());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* obj = UseRegister(instr->object());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedFastElement(obj, key, val);
-}
-
+LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
-LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
- HStoreKeyedFastDoubleElement* instr) {
- ASSERT(instr->value()->representation().IsDouble());
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
+ // Determine if we need a byte register in this case for the value.
+ bool val_is_fixed_register =
+ elements_kind == EXTERNAL_BYTE_ELEMENTS ||
+ elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
+ elements_kind == EXTERNAL_PIXEL_ELEMENTS;
+ if (val_is_fixed_register) {
+ return UseFixed(instr->value(), eax);
+ }
- LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ if (!CpuFeatures::IsSafeForSnapshot(SSE2) &&
+ IsDoubleOrFloatElementsKind(elements_kind)) {
+ return UseRegisterAtStart(instr->value());
+ }
- return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
+ return UseRegister(instr->value());
}
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
- HStoreKeyedSpecializedArrayElement* instr) {
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ if (!instr->is_external()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsSmi());
+
+ if (instr->value()->representation().IsDouble()) {
+ LOperand* object = UseRegisterAtStart(instr->elements());
+ LOperand* val = NULL;
+ val = UseRegisterAtStart(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ return new(zone()) LStoreKeyed(object, key, val);
+ } else {
+ ASSERT(instr->value()->representation().IsSmiOrTagged());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+
+ LOperand* obj = UseRegister(instr->elements());
+ LOperand* val;
+ LOperand* key;
+ if (needs_write_barrier) {
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ val = UseRegisterOrConstantAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
+ return new(zone()) LStoreKeyed(obj, key, val);
+ }
+ }
+
ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
+ ASSERT(
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* val = NULL;
- if (elements_kind == EXTERNAL_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // We need a byte register in this case for the value.
- val = UseFixed(instr->value(), eax);
- } else {
- val = UseRegister(instr->value());
- }
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->elements()->representation().IsExternal());
+
+ LOperand* external_pointer = UseRegister(instr->elements());
+ LOperand* val = GetStoreKeyedValueOperand(instr);
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
- : UseRegisterOrConstant(instr->key());
- return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ : UseRegisterOrConstantAtStart(instr->key());
+ return new(zone()) LStoreKeyed(external_pointer,
+ key,
+ val);
}
@@ -2086,57 +2367,97 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- ElementsKind from_kind = instr->original_map()->elements_kind();
- ElementsKind to_kind = instr->transitioned_map()->elements_kind();
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ LOperand* object = UseRegister(instr->object());
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister();
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
- return DefineSameAsFirst(result);
+ new(zone()) LTransitionElementsKind(object, NULL,
+ new_map_reg, temp_reg);
+ return result;
} else {
- LOperand* object = UseFixed(instr->object(), eax);
- LOperand* fixed_object_reg = FixedTemp(edx);
- LOperand* new_map_reg = FixedTemp(ebx);
+ LOperand* context = UseRegister(instr->context());
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object,
- new_map_reg,
- fixed_object_reg);
- return MarkAsCall(DefineFixed(result, eax), instr);
+ new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
+ return AssignPointerMap(result);
}
}
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp);
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
+ bool is_external_location = instr->access().IsExternalMemory() &&
+ instr->access().offset() == 0;
bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
+ bool needs_write_barrier_for_map = instr->has_transition() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
if (needs_write_barrier) {
- obj = instr->is_in_object()
+ obj = is_in_object
? UseRegister(instr->object())
: UseTempRegister(instr->object());
+ } else if (is_external_location) {
+ ASSERT(!is_in_object);
+ ASSERT(!needs_write_barrier);
+ ASSERT(!needs_write_barrier_for_map);
+ obj = UseRegisterOrConstant(instr->object());
} else {
obj = needs_write_barrier_for_map
? UseRegister(instr->object())
: UseRegisterAtStart(instr->object());
}
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ bool can_be_constant = instr->value()->IsConstant() &&
+ HConstant::cast(instr->value())->NotInNewSpace() &&
+ !(FLAG_track_double_fields && instr->field_representation().IsDouble());
+
+ LOperand* val;
+ if (instr->field_representation().IsByte()) {
+ // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx).
+ // Just force the value to be in eax and we're safe here.
+ val = UseFixed(instr->value(), eax);
+ } else if (needs_write_barrier) {
+ val = UseTempRegister(instr->value());
+ } else if (can_be_constant) {
+ val = UseRegisterOrConstant(instr->value());
+ } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ val = UseTempRegister(instr->value());
+ } else if (FLAG_track_double_fields &&
+ instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
// We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property).
- LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
- needs_write_barrier_for_map) ? TempRegister() : NULL;
+ LOperand* temp = (!is_in_object || needs_write_barrier ||
+ needs_write_barrier_for_map) ? TempRegister() : NULL;
// We need a temporary register for write barrier of the map field.
LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
+ LStoreNamedField* result =
+ new(zone()) LStoreNamedField(obj, val, temp, temp_map);
+ if (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject()) {
+ if (!instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
+ }
+ return result;
}
@@ -2179,41 +2500,18 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
}
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = instr->size()->IsConstant()
+ ? UseConstant(instr->size())
+ : UseTempRegister(instr->size());
LOperand* temp = TempRegister();
- LAllocateObject* result = new(zone()) LAllocateObject(context, temp);
+ LAllocate* result = new(zone()) LAllocate(context, size, temp);
return AssignPointerMap(DefineAsRegister(result));
}
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LFastLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LArrayLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LObjectLiteral(context), eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(
@@ -2228,15 +2526,6 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseAtStart(instr->object());
- LOperand* key = UseOrConstantAtStart(instr->key());
- LDeleteProperty* result = new(zone()) LDeleteProperty(context, object, key);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2246,16 +2535,39 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ int index = static_cast<int>(instr->index());
+ Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
+ return DefineFixed(result, reg);
+ }
}
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kNotEnoughSpillSlotsForOsr);
+ spill_index = 0;
+ }
+ if (spill_index == 0) {
+ // The dynamic frame alignment state overwrites the first local.
+ // The first local is saved at the end of the unoptimized frame.
+ spill_index = graph()->osr()->UnoptimizedFrameSlots();
+ }
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
@@ -2263,7 +2575,6 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallStub* result = new(zone()) LCallStub(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2278,10 +2589,26 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
}
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = Use(instr->index());
+ LOperand* length;
+ LOperand* index;
+ if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+ length = UseRegisterOrConstant(instr->length());
+ index = UseOrConstant(instr->index());
+ } else {
+ length = UseTempRegister(instr->length());
+ index = Use(instr->index());
+ }
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@@ -2313,20 +2640,7 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
+ instr->ReplayEnvironment(current_block_->last_environment());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
@@ -2348,6 +2662,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ info()->MarkAsDeferredCalling();
if (instr->is_function_entry()) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(new(zone()) LStackCheck(context), instr);
@@ -2367,10 +2682,11 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->call_kind(),
- instr->inlining_kind());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
+ instr->inlining_kind(),
+ instr->undefined_receiver());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
}
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
@@ -2387,7 +2703,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
+ ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
@@ -2397,15 +2713,6 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* key = UseOrConstantAtStart(instr->key());
- LOperand* object = UseOrConstantAtStart(instr->object());
- LIn* result = new(zone()) LIn(context, key, object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->enumerable(), eax);
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 4643f95f4..9b00f3c35 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -43,15 +43,13 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
- V(AllocateObject) \
+ V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
V(ArithmeticD) \
V(ArithmeticT) \
- V(ArrayLiteral) \
V(BitI) \
- V(BitNotI) \
V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \
@@ -61,35 +59,45 @@ class LCodeGen;
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
+ V(CheckMapValue) \
V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
+ V(ClampTToUint8NoSSE2) \
V(ClassOfTestAndBranch) \
- V(CmpIDAndBranch) \
+ V(ClobberDoubles) \
+ V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
- V(CmpConstantEqAndBranch) \
V(ConstantD) \
+ V(ConstantE) \
V(ConstantI) \
+ V(ConstantS) \
V(ConstantT) \
V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
V(DeclareGlobals) \
- V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(DoubleToSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
V(ElementsKind) \
- V(FastLiteral) \
- V(FixedArrayBaseLength) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -97,46 +105,50 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
- V(In) \
+ V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Uint32ToDouble) \
+ V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
+ V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyedFastElement) \
- V(LoadKeyedFastDoubleElement) \
+ V(LoadKeyed) \
V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(LoadRoot) \
V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathCos) \
+ V(MathExp) \
+ V(MathFloor) \
V(MathFloorOfDiv) \
+ V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSin) \
+ V(MathSqrt) \
+ V(MathTan) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
V(NumberUntagD) \
- V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -145,51 +157,50 @@ class LCodeGen;
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyedFastDoubleElement) \
- V(StoreKeyedFastElement) \
+ V(StoreKeyed) \
V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
V(StringCompareAndBranch) \
- V(StringLength) \
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
+ V(Uint32ToDouble) \
+ V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop)
+ V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -199,13 +210,15 @@ class LCodeGen;
}
-class LInstruction: public ZoneObject {
+class LInstruction : public ZoneObject {
public:
LInstruction()
: environment_(NULL),
hydrogen_value_(NULL),
- is_call_(false) { }
- virtual ~LInstruction() { }
+ bit_field_(IsCallBits::encode(false)) {
+ }
+
+ virtual ~LInstruction() {}
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
@@ -243,23 +256,36 @@ class LInstruction: public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
- void MarkAsCall() { is_call_ = true; }
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
// Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters() const {
+ return IsCall() ||
+ // We only have rudimentary X87Stack tracking, thus in general
+ // cannot handle phi-nodes.
+ (!CpuFeatures::IsSafeForSnapshot(SSE2) && IsControl());
+ }
virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
+ virtual LOperand* result() const = 0;
+
+ bool HasDoubleRegisterResult();
+ bool HasDoubleRegisterInput();
+ bool IsDoubleInput(X87Register reg, LCodeGen* cgen);
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
#ifdef DEBUG
void VerifyCall();
#endif
@@ -274,10 +300,12 @@ class LInstruction: public ZoneObject {
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
+ class IsCallBits: public BitField<bool, 0, 1> {};
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
- bool is_call_;
+ int bit_field_;
};
@@ -285,13 +313,15 @@ class LInstruction: public ZoneObject {
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
+class LTemplateInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
+ LOperand* result() const { return results_[0]; }
protected:
EmbeddedContainer<LOperand*, R> results_;
@@ -300,15 +330,15 @@ class LTemplateInstruction: public LInstruction {
private:
// Iterator support.
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
};
-class LGap: public LTemplateInstruction<0, 0, 0> {
+class LGap : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block) : block_(block) {
parallel_moves_[BEFORE] = NULL;
@@ -318,8 +348,8 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -355,52 +385,91 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap: public LGap {
+class LInstructionGap V8_FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return !IsRedundant();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
};
-class LGoto: public LTemplateInstruction<0, 0, 0> {
+class LClobberDoubles V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LClobberDoubles() { ASSERT(!CpuFeatures::IsSafeForSnapshot(SSE2)); }
+
+ virtual bool ClobbersDoubleRegisters() const { return true; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClobberDoubles, "clobber-d")
+};
+
+
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
- int block_id() const { return block_id_; }
+ int block_id() const { return block_->block_id(); }
+ virtual bool ClobbersDoubleRegisters() const { return false; }
+
+ bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
private:
- int block_id_;
+ HBasicBlock* block_;
};
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
};
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel: public LGap {
+class LLabel V8_FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
Label* label() { return &label_; }
LLabel* replacement() const { return replacement_; }
void set_replacement(LLabel* label) { replacement_ = label; }
@@ -412,13 +481,16 @@ class LLabel: public LGap {
};
-class LParameter: public LTemplateInstruction<1, 0, 0> {
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LTemplateInstruction<1, 1, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallStub(LOperand* context) {
inputs_[0] = context;
@@ -435,8 +507,11 @@ class LCallStub: public LTemplateInstruction<1, 1, 0> {
};
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
@@ -444,21 +519,48 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
- virtual bool IsControl() const { return true; }
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
private:
HControlInstruction* hydrogen() {
return HControlInstruction::cast(this->hydrogen_value());
}
+
+ Label* false_label_;
+ Label* true_label_;
};
-class LWrapReceiver: public LTemplateInstruction<1, 2, 1> {
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LWrapReceiver(LOperand* receiver,
LOperand* function,
@@ -476,7 +578,7 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 1> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -497,7 +599,7 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -511,11 +613,11 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -527,14 +629,20 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModI: public LTemplateInstruction<1, 2, 1> {
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -551,7 +659,7 @@ class LModI: public LTemplateInstruction<1, 2, 1> {
};
-class LDivI: public LTemplateInstruction<1, 2, 1> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -562,12 +670,14 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+ bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
+
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
@@ -586,7 +696,7 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
};
-class LMulI: public LTemplateInstruction<1, 2, 1> {
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -603,9 +713,9 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
@@ -613,39 +723,35 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 2, 0> {
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LUnaryMathOperation(LOperand* context, LOperand* value) {
- inputs_[1] = context;
+ explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
}
- LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
};
-class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
+ LMathRound(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[1] = context;
inputs_[0] = value;
temps_[0] = temp;
@@ -655,62 +761,149 @@ class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
- virtual void PrintDataTo(StringStream* stream);
+
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
+ inputs_[0] = value;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ explicit LMathLog(LOperand* value) {
+ inputs_[0] = value;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
};
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
+class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
+ explicit LMathSin(LOperand* value) {
+ inputs_[0] = value;
}
- LOperand* left() { return inputs_[0]; }
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+
+class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathCos(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+
+class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathTan(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
+};
+
+
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSqrt(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
};
-class LIsNilAndBranch: public LControlInstruction<1, 1> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LIsNilAndBranch(LOperand* value, LOperand* temp) {
+ LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[1] = context;
inputs_[0] = value;
temps_[0] = temp;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
- virtual void PrintDataTo(StringStream* stream);
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+};
+
+
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranch(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
};
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -722,11 +915,11 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -737,12 +930,13 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -753,11 +947,11 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -769,12 +963,13 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStringCompareAndBranch: public LControlInstruction<3, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
public:
LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -789,13 +984,13 @@ class LStringCompareAndBranch: public LControlInstruction<3, 0> {
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Token::Value op() const { return hydrogen()->token(); }
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -809,11 +1004,11 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -826,7 +1021,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -837,11 +1033,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -854,7 +1050,7 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -870,11 +1066,11 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpT: public LTemplateInstruction<1, 3, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LCmpT(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -889,7 +1085,7 @@ class LCmpT: public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -903,7 +1099,7 @@ class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -922,7 +1118,8 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -931,7 +1128,7 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
};
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -946,7 +1143,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
};
-class LBitI: public LTemplateInstruction<1, 2, 0> {
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -963,7 +1160,7 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -985,7 +1182,7 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
};
-class LSubI: public LTemplateInstruction<1, 2, 0> {
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1000,7 +1197,7 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1009,7 +1206,16 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD: public LTemplateInstruction<1, 0, 1> {
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
public:
explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
@@ -1024,16 +1230,29 @@ class LConstantD: public LTemplateInstruction<1, 0, 1> {
};
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- Handle<Object> value() const { return hydrogen()->handle(); }
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
};
-class LBranch: public LControlInstruction<1, 1> {
+class LBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1046,11 +1265,11 @@ class LBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1061,46 +1280,11 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- virtual bool IsControl() const { return true; }
-
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LJSArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
-};
-
-
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1112,7 +1296,7 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
@@ -1125,7 +1309,7 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> {
};
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
+class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1140,7 +1324,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
-class LDateField: public LTemplateInstruction<1, 1, 1> {
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index)
: index_(index) {
@@ -1161,33 +1345,45 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
-class LThrow: public LTemplateInstruction<0, 2, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LThrow(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
+ LSeqStringSetChar(String::Encoding encoding,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) : encoding_(encoding) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ inputs_[2] = value;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ String::Encoding encoding() { return encoding_; }
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
};
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
+ LThrow(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+ DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
-class LAddI: public LTemplateInstruction<1, 2, 0> {
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1197,12 +1393,17 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+ static bool UseLea(HAdd* add) {
+ return !add->CheckFlag(HValue::kCanOverflow) &&
+ add->BetterLeftOperand()->UseCount() > 1;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1212,12 +1413,12 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
};
-class LPower: public LTemplateInstruction<1, 2, 0> {
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1232,20 +1433,29 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
-class LRandom: public LTemplateInstruction<1, 1, 0> {
+class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
- explicit LRandom(LOperand* global_object) {
+ LRandom(LOperand* global_object,
+ LOperand* scratch,
+ LOperand* scratch2,
+ LOperand* scratch3) {
inputs_[0] = global_object;
+ temps_[0] = scratch;
+ temps_[1] = scratch2;
+ temps_[2] = scratch3;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* global_object() const { return inputs_[0]; }
+ LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
+ LOperand* scratch3() const { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1258,16 +1468,18 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
Token::Value op() const { return op_; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LArithmeticT(Token::Value op,
LOperand* context,
@@ -1283,9 +1495,11 @@ class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
Token::Value op() const { return op_; }
@@ -1294,17 +1508,30 @@ class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
};
-class LReturn: public LTemplateInstruction<0, 1, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LReturn(LOperand* value) {
+ explicit LReturn(LOperand* value, LOperand* context,
+ LOperand* parameter_count) {
inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
+ }
+
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
}
+ LConstantOperand* constant_parameter_count() {
+ ASSERT(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+ LOperand* parameter_count() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+ DECLARE_HYDROGEN_ACCESSOR(Return)
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1317,22 +1544,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadNamedFieldPolymorphic(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadNamedGeneric(LOperand* context, LOperand* object) {
inputs_[0] = context;
@@ -1349,7 +1561,7 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
inputs_[0] = function;
@@ -1364,19 +1576,17 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
};
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
};
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
@@ -1389,38 +1599,29 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
-
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
+ bool is_external() const {
+ return hydrogen()->is_external();
}
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
- "load-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ bool key_is_smi() {
+ return hydrogen()->key()->representation().IsTagged();
+ }
};
@@ -1430,35 +1631,14 @@ inline static bool ExternalArrayOpRequiresTemp(
// Operations that require the key to be divided by two to be converted into
// an index cannot fold the scale operation into a load and need an extra
// temp register to do the work.
- return key_representation.IsTagged() &&
+ return key_representation.IsSmi() &&
(elements_kind == EXTERNAL_BYTE_ELEMENTS ||
elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
elements_kind == EXTERNAL_PIXEL_ELEMENTS);
}
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- }
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
-
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
inputs_[0] = context;
@@ -1474,14 +1654,14 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
};
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
inputs_[0] = context;
@@ -1499,7 +1679,7 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStoreGlobalCell(LOperand* value) {
inputs_[0] = value;
@@ -1512,7 +1692,7 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
};
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreGlobalGeneric(LOperand* context,
LOperand* global_object,
@@ -1534,7 +1714,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1547,11 +1727,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1568,11 +1748,11 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1584,7 +1764,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LDrop: public LTemplateInstruction<0, 0, 0> {
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1597,20 +1777,54 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ temps_[0] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInnerAllocatedObject(LOperand* base_object) {
+ inputs_[0] = base_object;
+ }
+
+ LOperand* base_object() { return inputs_[0]; }
+ int offset() { return hydrogen()->offset(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
+ DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
+};
+
+
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext: public LTemplateInstruction<1, 0, 0> {
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
@@ -1622,7 +1836,7 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
@@ -1635,7 +1849,7 @@ class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
@@ -1647,7 +1861,7 @@ class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
};
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1659,19 +1873,19 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
};
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1684,14 +1898,13 @@ class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
- Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
};
-class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallKeyed(LOperand* context, LOperand* key) {
inputs_[0] = context;
@@ -1704,13 +1917,13 @@ class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LTemplateInstruction<1, 1, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNamed(LOperand* context) {
inputs_[0] = context;
@@ -1721,14 +1934,14 @@ class LCallNamed: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LTemplateInstruction<1, 2, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1745,7 +1958,7 @@ class LCallFunction: public LTemplateInstruction<1, 2, 0> {
};
-class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallGlobal(LOperand* context) {
inputs_[0] = context;
@@ -1756,26 +1969,25 @@ class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<JSFunction> target() const { return hydrogen()->target(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LTemplateInstruction<1, 2, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1788,13 +2000,32 @@ class LCallNew: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
inputs_[0] = context;
@@ -1805,12 +2036,17 @@ class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1822,7 +2058,20 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1836,7 +2085,20 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -1848,20 +2110,21 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- explicit LNumberTagU(LOperand* value, LOperand* temp) {
+ LNumberTagU(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
-class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LNumberTagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1872,11 +2135,12 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDoubleToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1893,8 +2157,21 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
};
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+};
+
+
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LTaggedToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1905,13 +2182,13 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -1923,7 +2200,7 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberUntagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1938,7 +2215,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
};
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -1956,7 +2233,7 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 2> {
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LStoreNamedField(LOperand* obj,
LOperand* val,
@@ -1976,16 +2253,16 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 2> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
+ Handle<Map> transition() const { return hydrogen()->transition_map(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
-class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
inputs_[0] = context;
@@ -2000,85 +2277,38 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
- virtual void PrintDataTo(StringStream* stream);
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedFastDoubleElement(LOperand* elements,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
+ bool is_external() const { return hydrogen()->is_external(); }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
- "store-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
-};
-
-
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
-
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* object,
@@ -2098,22 +2328,25 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
+ LOperand* context,
LOperand* new_map_temp,
LOperand* temp) {
inputs_[0] = object;
+ inputs_[1] = context;
temps_[0] = new_map_temp;
temps_[1] = temp;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* object() { return inputs_[0]; }
LOperand* new_map_temp() { return temps_[0]; }
LOperand* temp() { return temps_[1]; }
@@ -2122,14 +2355,34 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
-class LStringAdd: public LTemplateInstruction<1, 3, 0> {
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LTrapAllocationMemento(LOperand* object,
+ LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+ "trap-allocation-memento")
+};
+
+
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -2146,7 +2399,7 @@ class LStringAdd: public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharCodeAt: public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
inputs_[0] = context;
@@ -2163,7 +2416,7 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharFromCode: public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharFromCode(LOperand* context, LOperand* char_code) {
inputs_[0] = context;
@@ -2178,33 +2431,20 @@ class LStringCharFromCode: public LTemplateInstruction<1, 2, 0> {
};
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
- }
-
- LOperand* string() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
-};
-
-
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LCheckInstanceType(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2219,7 +2459,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
};
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value) {
inputs_[0] = value;
@@ -2232,23 +2472,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
- public:
- explicit LCheckPrototypeMaps(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
- Handle<JSObject> holder() const { return hydrogen()->holder(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2260,7 +2484,7 @@ class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* value) {
inputs_[0] = value;
@@ -2272,7 +2496,7 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* value) {
inputs_[0] = value;
@@ -2284,86 +2508,75 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LClampTToUint8(LOperand* value, LOperand* temp) {
+ LClampTToUint8(LOperand* value, LOperand* temp_xmm) {
inputs_[0] = value;
- temps_[0] = temp;
+ temps_[0] = temp_xmm;
}
LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp_xmm() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
-};
-
-
-class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
- public:
- LAllocateObject(LOperand* context, LOperand* temp) {
- inputs_[0] = context;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
-class LFastLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFastLiteral(LOperand* context) {
- inputs_[0] = context;
+// Truncating conversion from a tagged value to an int32.
+class LClampTToUint8NoSSE2 V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LClampTToUint8NoSSE2(LOperand* unclamped,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
}
- LOperand* context() { return inputs_[0]; }
+ LOperand* unclamped() { return inputs_[0]; }
+ LOperand* scratch() { return temps_[0]; }
+ LOperand* scratch2() { return temps_[1]; }
+ LOperand* scratch3() { return temps_[2]; }
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8NoSSE2,
+ "clamp-t-to-uint8-nosse2")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
};
-class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LArrayLiteral(LOperand* context) {
- inputs_[0] = context;
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
}
- LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
};
-class LObjectLiteral: public LTemplateInstruction<1, 1, 0> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- explicit LObjectLiteral(LOperand* context) {
+ LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp;
}
LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
-class LRegExpLiteral: public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LRegExpLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2376,7 +2589,7 @@ class LRegExpLiteral: public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral: public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LFunctionLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2386,12 +2599,10 @@ class LFunctionLiteral: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
};
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2404,7 +2615,7 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof: public LTemplateInstruction<1, 2, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -2418,7 +2629,7 @@ class LTypeof: public LTemplateInstruction<1, 2, 0> {
};
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2431,50 +2642,20 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LDeleteProperty: public LTemplateInstruction<1, 3, 0> {
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- LDeleteProperty(LOperand* context, LOperand* obj, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = obj;
- inputs_[2] = key;
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
}
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-};
-
-
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry();
-
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
};
-class LStackCheck: public LTemplateInstruction<0, 1, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStackCheck(LOperand* context) {
inputs_[0] = context;
@@ -2492,23 +2673,7 @@ class LStackCheck: public LTemplateInstruction<0, 1, 0> {
};
-class LIn: public LTemplateInstruction<1, 3, 0> {
- public:
- LIn(LOperand* context, LOperand* key, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = key;
- inputs_[2] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* object() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
-class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LForInPrepareMap(LOperand* context, LOperand* object) {
inputs_[0] = context;
@@ -2522,7 +2687,7 @@ class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> {
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2538,7 +2703,7 @@ class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2552,7 +2717,7 @@ class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2567,14 +2732,14 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk: public LChunk {
+class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph),
num_double_slots_(0) { }
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
int num_double_slots() const { return num_double_slots_; }
@@ -2583,7 +2748,7 @@ class LPlatformChunk: public LChunk {
};
-class LChunkBuilder BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
@@ -2596,21 +2761,32 @@ class LChunkBuilder BASE_EMBEDDED {
next_block_(NULL),
argument_count_(0),
allocator_(allocator),
- position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
- static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathTan(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+
private:
enum Status {
UNUSED,
@@ -2629,11 +2805,12 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* reason);
+ void Abort(BailoutReason reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(XMMRegister reg);
+ LUnallocated* ToUnallocated(X87Register reg);
// Methods for setting up define-use relationships.
MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
@@ -2667,6 +2844,9 @@ class LChunkBuilder BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
@@ -2694,6 +2874,8 @@ class LChunkBuilder BASE_EMBEDDED {
template<int I, int T>
LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
XMMRegister reg);
+ template<int I, int T>
+ LInstruction* DefineX87TOS(LTemplateInstruction<1, I, T>* instr);
// Assigns an environment to an instruction. An instruction which can
// deoptimize must have an environment.
LInstruction* AssignEnvironment(LInstruction* instr);
@@ -2712,7 +2894,8 @@ class LChunkBuilder BASE_EMBEDDED {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize);
void VisitInstruction(HInstruction* current);
@@ -2721,7 +2904,9 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
+
+ LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
LPlatformChunk* chunk_;
CompilationInfo* info_;
@@ -2733,7 +2918,6 @@ class LChunkBuilder BASE_EMBEDDED {
HBasicBlock* next_block_;
int argument_count_;
LAllocator* allocator_;
- int position_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 0d0bf0377..025bd891c 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -27,11 +27,13 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "bootstrapper.h"
#include "codegen.h"
+#include "cpu-profiler.h"
#include "debug.h"
+#include "isolate-inl.h"
#include "runtime.h"
#include "serialize.h"
@@ -47,12 +49,67 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
+ // TODO(titzer): should we just use a null handle here instead?
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
}
}
+void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
+ if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ mov(destination, value);
+ return;
+ }
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(destination, Immediate(index));
+ mov(destination, Operand::StaticArray(destination,
+ times_pointer_size,
+ roots_array_start));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+ Register scratch,
+ Heap::RootListIndex index) {
+ ASSERT(Heap::RootCanBeWrittenAfterInitialization(index));
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(scratch, Immediate(index));
+ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
+ source);
+}
+
+
+void MacroAssembler::CompareRoot(Register with,
+ Register scratch,
+ Heap::RootListIndex index) {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(scratch, Immediate(index));
+ cmp(with, Operand::StaticArray(scratch,
+ times_pointer_size,
+ roots_array_start));
+}
+
+
+void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
+ ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ cmp(with, value);
+}
+
+
+void MacroAssembler::CompareRoot(const Operand& with,
+ Heap::RootListIndex index) {
+ ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ cmp(with, value);
+}
+
+
void MacroAssembler::InNewSpace(
Register object,
Register scratch,
@@ -160,8 +217,233 @@ void MacroAssembler::ClampUint8(Register reg) {
}
-static double kUint32Bias =
- static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
+void MacroAssembler::SlowTruncateToI(Register result_reg,
+ Register input_reg,
+ int offset) {
+ DoubleToIStub stub(input_reg, result_reg, offset, true);
+ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result_reg,
+ XMMRegister input_reg) {
+ Label done;
+ cvttsd2si(result_reg, Operand(input_reg));
+ cmp(result_reg, 0x80000000u);
+ j(not_equal, &done, Label::kNear);
+
+ sub(esp, Immediate(kDoubleSize));
+ movsd(MemOperand(esp, 0), input_reg);
+ SlowTruncateToI(result_reg, esp, 0);
+ add(esp, Immediate(kDoubleSize));
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
+ sub(esp, Immediate(kDoubleSize));
+ fst_d(MemOperand(esp, 0));
+ SlowTruncateToI(result_reg, esp, 0);
+ add(esp, Immediate(kDoubleSize));
+}
+
+
+void MacroAssembler::X87TOSToI(Register result_reg,
+ MinusZeroMode minus_zero_mode,
+ Label* conversion_failed,
+ Label::Distance dst) {
+ Label done;
+ sub(esp, Immediate(kPointerSize));
+ fld(0);
+ fist_s(MemOperand(esp, 0));
+ fild_s(MemOperand(esp, 0));
+ pop(result_reg);
+ FCmp();
+ j(not_equal, conversion_failed, dst);
+ j(parity_even, conversion_failed, dst);
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ test(result_reg, Operand(result_reg));
+ j(not_zero, &done, Label::kNear);
+ // To check for minus zero, we load the value again as float, and check
+ // if that is still 0.
+ sub(esp, Immediate(kPointerSize));
+ fst_s(MemOperand(esp, 0));
+ pop(result_reg);
+ test(result_reg, Operand(result_reg));
+ j(not_zero, conversion_failed, dst);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::DoubleToI(Register result_reg,
+ XMMRegister input_reg,
+ XMMRegister scratch,
+ MinusZeroMode minus_zero_mode,
+ Label* conversion_failed,
+ Label::Distance dst) {
+ ASSERT(!input_reg.is(scratch));
+ cvttsd2si(result_reg, Operand(input_reg));
+ Cvtsi2sd(scratch, Operand(result_reg));
+ ucomisd(scratch, input_reg);
+ j(not_equal, conversion_failed, dst);
+ j(parity_even, conversion_failed, dst); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ Label done;
+ // The integer converted back is equal to the original. We
+ // only have to test if we got -0 as an input.
+ test(result_reg, Operand(result_reg));
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, input_reg);
+ // Bit 0 contains the sign of the double in input_reg.
+ // If input was positive, we are ok and return 0, otherwise
+ // jump to conversion_failed.
+ and_(result_reg, 1);
+ j(not_zero, conversion_failed, dst);
+ bind(&done);
+ }
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
+ Register input_reg) {
+ Label done, slow_case;
+
+ if (CpuFeatures::IsSupported(SSE3)) {
+ CpuFeatureScope scope(this, SSE3);
+ Label convert;
+ // Use more powerful conversion when sse3 is available.
+ // Load x87 register with heap number.
+ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ // Get exponent alone and check for too-big exponent.
+ mov(result_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ and_(result_reg, HeapNumber::kExponentMask);
+ const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ cmp(Operand(result_reg), Immediate(kTooBigExponent));
+ j(greater_equal, &slow_case, Label::kNear);
+
+ // Reserve space for 64 bit answer.
+ sub(Operand(esp), Immediate(kDoubleSize));
+ // Do conversion, which cannot fail because we checked the exponent.
+ fisttp_d(Operand(esp, 0));
+ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
+ add(Operand(esp), Immediate(kDoubleSize));
+ jmp(&done, Label::kNear);
+
+ // Slow case.
+ bind(&slow_case);
+ if (input_reg.is(result_reg)) {
+ // Input is clobbered. Restore number from fpu stack
+ sub(Operand(esp), Immediate(kDoubleSize));
+ fstp_d(Operand(esp, 0));
+ SlowTruncateToI(result_reg, esp, 0);
+ add(esp, Immediate(kDoubleSize));
+ } else {
+ fstp(0);
+ SlowTruncateToI(result_reg, input_reg);
+ }
+ } else if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(this, SSE2);
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2si(result_reg, Operand(xmm0));
+ cmp(result_reg, 0x80000000u);
+ j(not_equal, &done, Label::kNear);
+ // Check if the input was 0x8000000 (kMinInt).
+ // If no, then we got an overflow and we deoptimize.
+ ExternalReference min_int = ExternalReference::address_of_min_int();
+ ucomisd(xmm0, Operand::StaticVariable(min_int));
+ j(not_equal, &slow_case, Label::kNear);
+ j(parity_even, &slow_case, Label::kNear); // NaN.
+ jmp(&done, Label::kNear);
+
+ // Slow case.
+ bind(&slow_case);
+ if (input_reg.is(result_reg)) {
+ // Input is clobbered. Restore number from double scratch.
+ sub(esp, Immediate(kDoubleSize));
+ movsd(MemOperand(esp, 0), xmm0);
+ SlowTruncateToI(result_reg, esp, 0);
+ add(esp, Immediate(kDoubleSize));
+ } else {
+ SlowTruncateToI(result_reg, input_reg);
+ }
+ } else {
+ SlowTruncateToI(result_reg, input_reg);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::TaggedToI(Register result_reg,
+ Register input_reg,
+ XMMRegister temp,
+ MinusZeroMode minus_zero_mode,
+ Label* lost_precision) {
+ Label done;
+ ASSERT(!temp.is(xmm0));
+
+ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ j(not_equal, lost_precision, Label::kNear);
+
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ ASSERT(!temp.is(no_xmm_reg));
+ CpuFeatureScope scope(this, SSE2);
+
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2si(result_reg, Operand(xmm0));
+ Cvtsi2sd(temp, Operand(result_reg));
+ ucomisd(xmm0, temp);
+ RecordComment("Deferred TaggedToI: lost precision");
+ j(not_equal, lost_precision, Label::kNear);
+ RecordComment("Deferred TaggedToI: NaN");
+ j(parity_even, lost_precision, Label::kNear);
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ test(result_reg, Operand(result_reg));
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, xmm0);
+ and_(result_reg, 1);
+ RecordComment("Deferred TaggedToI: minus zero");
+ j(not_zero, lost_precision, Label::kNear);
+ }
+ } else {
+ // TODO(olivf) Converting a number on the fpu is actually quite slow. We
+ // should first try a fast conversion and then bailout to this slow case.
+ Label lost_precision_pop, zero_check;
+ Label* lost_precision_int = (minus_zero_mode == FAIL_ON_MINUS_ZERO)
+ ? &lost_precision_pop : lost_precision;
+ sub(esp, Immediate(kPointerSize));
+ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) fld(0);
+ fist_s(MemOperand(esp, 0));
+ fild_s(MemOperand(esp, 0));
+ FCmp();
+ pop(result_reg);
+ j(not_equal, lost_precision_int, Label::kNear);
+ j(parity_even, lost_precision_int, Label::kNear); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ test(result_reg, Operand(result_reg));
+ j(zero, &zero_check, Label::kNear);
+ fstp(0);
+ jmp(&done, Label::kNear);
+ bind(&zero_check);
+ // To check for minus zero, we load the value again as float, and check
+ // if that is still 0.
+ sub(esp, Immediate(kPointerSize));
+ fstp_s(Operand(esp, 0));
+ pop(result_reg);
+ test(result_reg, Operand(result_reg));
+ j(zero, &done, Label::kNear);
+ jmp(lost_precision, Label::kNear);
+
+ bind(&lost_precision_pop);
+ fstp(0);
+ jmp(lost_precision, Label::kNear);
+ }
+ }
+ bind(&done);
+}
void MacroAssembler::LoadUint32(XMMRegister dst,
@@ -169,15 +451,31 @@ void MacroAssembler::LoadUint32(XMMRegister dst,
XMMRegister scratch) {
Label done;
cmp(src, Immediate(0));
- movdbl(scratch,
- Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE));
- cvtsi2sd(dst, src);
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias();
+ movsd(scratch, Operand::StaticVariable(uint32_bias));
+ Cvtsi2sd(dst, src);
j(not_sign, &done, Label::kNear);
addsd(dst, scratch);
bind(&done);
}
+void MacroAssembler::LoadUint32NoSSE2(Register src) {
+ Label done;
+ push(src);
+ fild_s(Operand(esp, 0));
+ cmp(src, Immediate(0));
+ j(not_sign, &done, Label::kNear);
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias();
+ fld_d(Operand::StaticVariable(uint32_bias));
+ faddp(1);
+ bind(&done);
+ add(esp, Immediate(kPointerSize));
+}
+
+
void MacroAssembler::RecordWriteArray(Register object,
Register value,
Register index,
@@ -385,11 +683,17 @@ void MacroAssembler::DebugBreak() {
Set(eax, Immediate(0));
mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
- call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
}
#endif
+void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
+ xorps(dst, dst);
+ cvtsi2sd(dst, src);
+}
+
+
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
xor_(dst, dst); // Shorter than mov.
@@ -406,7 +710,7 @@ void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
static const int kMaxImmediateBits = 17;
- if (x.rmode_ != RelocInfo::NONE) return false;
+ if (!RelocInfo::IsNone(x.rmode_)) return false;
return !is_intn(x.x_, kMaxImmediateBits);
}
@@ -431,21 +735,6 @@ void MacroAssembler::SafePush(const Immediate& x) {
}
-void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- // see ROOT_ACCESSOR macro in factory.h
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- cmp(with, value);
-}
-
-
-void MacroAssembler::CompareRoot(const Operand& with,
- Heap::RootListIndex index) {
- // see ROOT_ACCESSOR macro in factory.h
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- cmp(with, value);
-}
-
-
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
@@ -507,7 +796,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
Register scratch1,
XMMRegister scratch2,
Label* fail,
- bool specialize_for_processor) {
+ bool specialize_for_processor,
+ int elements_offset) {
Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
@@ -526,15 +816,17 @@ void MacroAssembler::StoreNumberToDoubleElements(
ExternalReference canonical_nan_reference =
ExternalReference::address_of_canonical_non_hole_nan();
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatures::Scope use_sse2(SSE2);
- movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ CpuFeatureScope use_sse2(this, SSE2);
+ movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
+ movsd(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
+ fstp_d(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset));
}
jmp(&done);
@@ -546,8 +838,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
j(zero, &not_nan);
bind(&is_nan);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatures::Scope use_sse2(SSE2);
- movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
+ CpuFeatureScope use_sse2(this, SSE2);
+ movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
} else {
fld_d(Operand::StaticVariable(canonical_nan_reference));
}
@@ -559,15 +851,17 @@ void MacroAssembler::StoreNumberToDoubleElements(
mov(scratch1, maybe_number);
SmiUntag(scratch1);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatures::Scope fscope(SSE2);
- cvtsi2sd(scratch2, scratch1);
- movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
+ CpuFeatureScope fscope(this, SSE2);
+ Cvtsi2sd(scratch2, scratch1);
+ movsd(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
push(scratch1);
fild_s(Operand(esp, 0));
pop(scratch1);
- fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
+ fstp_d(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset));
}
bind(&done);
}
@@ -575,44 +869,28 @@ void MacroAssembler::StoreNumberToDoubleElements(
void MacroAssembler::CompareMap(Register obj,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
+ Label* early_success) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- j(equal, early_success, Label::kNear);
- cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(current_map));
- }
- }
- }
}
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
+ SmiCheckType smi_check_type) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
Label success;
- CompareMap(obj, map, &success, mode);
+ CompareMap(obj, map, &success);
j(not_equal, fail);
bind(&success);
}
void MacroAssembler::DispatchMap(Register obj,
+ Register unused,
Handle<Map> map,
Handle<Code> success,
SmiCheckType smi_check_type) {
@@ -638,6 +916,16 @@ Condition MacroAssembler::IsObjectStringType(Register heap_object,
}
+Condition MacroAssembler::IsObjectNameType(Register heap_object,
+ Register map,
+ Register instance_type) {
+ mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
+ return below_equal;
+}
+
+
void MacroAssembler::IsObjectJSObjectType(Register heap_object,
Register map,
Register scratch,
@@ -678,7 +966,7 @@ void MacroAssembler::AssertNumber(Register object) {
JumpIfSmi(object, &ok);
cmp(FieldOperand(object, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
- Check(equal, "Operand not a number");
+ Check(equal, kOperandNotANumber);
bind(&ok);
}
}
@@ -687,7 +975,7 @@ void MacroAssembler::AssertNumber(Register object) {
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(equal, "Operand is not a smi");
+ Check(equal, kOperandIsNotASmi);
}
}
@@ -695,12 +983,25 @@ void MacroAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, "Operand is a smi and not a string");
+ Check(not_equal, kOperandIsASmiAndNotAString);
push(object);
mov(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
pop(object);
- Check(below, "Operand is not a string");
+ Check(below, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAName);
+ push(object);
+ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, LAST_NAME_TYPE);
+ pop(object);
+ Check(below_equal, kOperandIsNotAName);
}
}
@@ -708,7 +1009,31 @@ void MacroAssembler::AssertString(Register object) {
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, "Operand is a smi");
+ Check(not_equal, kOperandIsASmi);
+ }
+}
+
+
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
+ } else {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(edi); // Callee's JS function.
+ }
}
}
@@ -721,7 +1046,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(Immediate(CodeObject()));
if (emit_debug_code()) {
cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
- Check(not_equal, "code object not properly patched");
+ Check(not_equal, kCodeObjectNotProperlyPatched);
}
}
@@ -730,7 +1055,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(type)));
- Check(equal, "stack frame types must match");
+ Check(equal, kStackFrameTypesMustMatch);
}
leave();
}
@@ -762,13 +1087,13 @@ void MacroAssembler::EnterExitFramePrologue() {
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Optionally save all XMM registers.
if (save_doubles) {
- CpuFeatures::Scope scope(SSE2);
+ CpuFeatureScope scope(this, SSE2);
int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
sub(esp, Immediate(space));
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+ movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else {
sub(esp, Immediate(argc * kPointerSize));
@@ -808,11 +1133,11 @@ void MacroAssembler::EnterApiExitFrame(int argc) {
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Optionally restore all XMM registers.
if (save_doubles) {
- CpuFeatures::Scope scope(SSE2);
+ CpuFeatureScope scope(this, SSE2);
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+ movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
}
}
@@ -826,13 +1151,16 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Push the return address to get ready to return.
push(ecx);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(true);
}
-void MacroAssembler::LeaveExitFrameEpilogue() {
+
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
- mov(esi, Operand::StaticVariable(context_address));
+ if (restore_context) {
+ mov(esi, Operand::StaticVariable(context_address));
+ }
#ifdef DEBUG
mov(Operand::StaticVariable(context_address), Immediate(0));
#endif
@@ -844,11 +1172,11 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
}
-void MacroAssembler::LeaveApiExitFrame() {
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
mov(esp, ebp);
pop(ebp);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(restore_context);
}
@@ -995,69 +1323,65 @@ void MacroAssembler::ThrowUncatchable(Register value) {
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss) {
Label same_contexts;
- ASSERT(!holder_reg.is(scratch));
+ ASSERT(!holder_reg.is(scratch1));
+ ASSERT(!holder_reg.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
// Load current lexical context from the stack frame.
- mov(scratch, Operand(ebp, StandardFrameConstants::kContextOffset));
+ mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
- cmp(scratch, Immediate(0));
- Check(not_equal, "we should not have an empty lexical context");
+ cmp(scratch1, Immediate(0));
+ Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- mov(scratch, FieldOperand(scratch, offset));
- mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ mov(scratch1, FieldOperand(scratch1, offset));
+ mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
- push(scratch);
// Read the first word and compare to native_context_map.
- mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- cmp(scratch, isolate()->factory()->native_context_map());
- Check(equal, "JSGlobalObject::native_context should be a native context.");
- pop(scratch);
+ cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
+ isolate()->factory()->native_context_map());
+ Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
}
// Check if both contexts are the same.
- cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens, save holder_reg on the stack so we can use it
// as a temporary register.
//
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
- push(holder_reg);
// Check that the security token in the calling global object is
// compatible with the security token in the receiving global
// object.
- mov(holder_reg,
+ mov(scratch2,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
- cmp(holder_reg, isolate()->factory()->null_value());
- Check(not_equal, "JSGlobalProxy::context() should not be null.");
+ cmp(scratch2, isolate()->factory()->null_value());
+ Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
- push(holder_reg);
// Read the first word and compare to native_context_map(),
- mov(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- cmp(holder_reg, isolate()->factory()->native_context_map());
- Check(equal, "JSGlobalObject::native_context should be a native context.");
- pop(holder_reg);
+ cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
+ isolate()->factory()->native_context_map());
+ Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
}
int token_offset = Context::kHeaderSize +
Context::SECURITY_TOKEN_INDEX * kPointerSize;
- mov(scratch, FieldOperand(scratch, token_offset));
- cmp(scratch, FieldOperand(holder_reg, token_offset));
- pop(holder_reg);
+ mov(scratch1, FieldOperand(scratch1, token_offset));
+ cmp(scratch1, FieldOperand(scratch2, token_offset));
j(not_equal, miss);
bind(&same_contexts);
@@ -1186,8 +1510,8 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Just return if allocation top is already known.
if ((flags & RESULT_CONTAINS_TOP) != 0) {
@@ -1195,47 +1519,50 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
ASSERT(scratch.is(no_reg));
#ifdef DEBUG
// Assert that result actually contains top on entry.
- cmp(result, Operand::StaticVariable(new_space_allocation_top));
- Check(equal, "Unexpected allocation top");
+ cmp(result, Operand::StaticVariable(allocation_top));
+ Check(equal, kUnexpectedAllocationTop);
#endif
return;
}
// Move address of new object to result. Use scratch register if available.
if (scratch.is(no_reg)) {
- mov(result, Operand::StaticVariable(new_space_allocation_top));
+ mov(result, Operand::StaticVariable(allocation_top));
} else {
- mov(scratch, Immediate(new_space_allocation_top));
+ mov(scratch, Immediate(allocation_top));
mov(result, Operand(scratch, 0));
}
}
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
- Register scratch) {
+ Register scratch,
+ AllocationFlags flags) {
if (emit_debug_code()) {
test(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, "Unaligned allocation in new space");
+ Check(zero, kUnalignedAllocationInNewSpace);
}
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Update new top. Use scratch if available.
if (scratch.is(no_reg)) {
- mov(Operand::StaticVariable(new_space_allocation_top), result_end);
+ mov(Operand::StaticVariable(allocation_top), result_end);
} else {
mov(Operand(scratch, 0), result_end);
}
}
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1255,44 +1582,65 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
- Register top_reg = result_end.is_valid() ? result_end : result;
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+ // Calculate new top and bail out if space is exhausted.
+ Register top_reg = result_end.is_valid() ? result_end : result;
if (!top_reg.is(result)) {
mov(top_reg, result);
}
add(top_reg, Immediate(object_size));
j(carry, gc_required);
- cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
+ cmp(top_reg, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
// Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch);
+ UpdateAllocationTopHelper(top_reg, scratch, flags);
// Tag result if requested.
+ bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
- if ((flags & TAG_OBJECT) != 0) {
+ if (tag_result) {
sub(result, Immediate(object_size - kHeapObjectTag));
} else {
sub(result, Immediate(object_size));
}
- } else if ((flags & TAG_OBJECT) != 0) {
- add(result, Immediate(kHeapObjectTag));
+ } else if (tag_result) {
+ ASSERT(kHeapObjectTag == 1);
+ inc(result);
}
}
-void MacroAssembler::AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ RegisterValueType element_count_type,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT((flags & SIZE_IN_WORDS) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1311,34 +1659,63 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+ // Calculate new top and bail out if space is exhausted.
// We assume that element_count*element_size + header_size does not
// overflow.
+ if (element_count_type == REGISTER_VALUE_IS_SMI) {
+ STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
+ STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
+ STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
+ ASSERT(element_size >= times_2);
+ ASSERT(kSmiTagSize == 1);
+ element_size = static_cast<ScaleFactor>(element_size - 1);
+ } else {
+ ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
+ }
lea(result_end, Operand(element_count, element_size, header_size));
add(result_end, result);
j(carry, gc_required);
- cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
+ cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
- // Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
- lea(result, Operand(result, kHeapObjectTag));
+ ASSERT(kHeapObjectTag == 1);
+ inc(result);
}
// Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
+ UpdateAllocationTopHelper(result_end, scratch, flags);
}
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1357,24 +1734,44 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
+ // Calculate new top and bail out if space is exhausted.
if (!object_size.is(result_end)) {
mov(result_end, object_size);
}
add(result_end, result);
j(carry, gc_required);
- cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
+ cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
- lea(result, Operand(result, kHeapObjectTag));
+ ASSERT(kHeapObjectTag == 1);
+ inc(result);
}
// Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
+ UpdateAllocationTopHelper(result_end, scratch, flags);
}
@@ -1386,7 +1783,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
and_(object, Immediate(~kHeapObjectTagMask));
#ifdef DEBUG
cmp(object, Operand::StaticVariable(new_space_allocation_top));
- Check(below, "Undo allocation of non allocated memory");
+ Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
mov(Operand::StaticVariable(new_space_allocation_top), object);
}
@@ -1397,12 +1794,8 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1425,14 +1818,15 @@ void MacroAssembler::AllocateTwoByteString(Register result,
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space.
- AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqTwoByteString::kHeaderSize,
+ times_1,
+ scratch1,
+ REGISTER_VALUE_IS_INT32,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1453,21 +1847,22 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, length);
ASSERT(kCharSize == 1);
add(scratch1, Immediate(kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
- AllocateInNewSpace(SeqAsciiString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqOneByteString::kHeaderSize,
+ times_1,
+ scratch1,
+ REGISTER_VALUE_IS_INT32,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1488,12 +1883,8 @@ void MacroAssembler::AllocateAsciiString(Register result,
ASSERT(length > 0);
// Allocate ASCII string in new space.
- AllocateInNewSpace(SeqAsciiString::SizeFor(length),
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
+ gc_required, TAG_OBJECT);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1510,12 +1901,8 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1527,14 +1914,32 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+
+ test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
+ j(zero, &allocate_new_space);
+
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+ jmp(&install_map);
+
+ bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ bind(&install_map);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(isolate()->factory()->cons_ascii_string_map()));
@@ -1546,12 +1951,8 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1564,12 +1965,8 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1738,13 +2135,14 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
- call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
+ call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
+ ASSERT(allow_stub_calls_ ||
+ stub->CompilingCallsToThisStubIsGCSafe(isolate()));
+ jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -1756,7 +2154,7 @@ void MacroAssembler::StubReturn(int argc) {
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+ return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
}
@@ -1787,22 +2185,9 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
}
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(eax, Immediate(function->nargs));
- mov(ebx, Immediate(ExternalReference(function, isolate())));
- CEntryStub ces(1, kSaveFPRegs);
- CallStub(&ces);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -1817,7 +2202,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Set(eax, Immediate(num_arguments));
mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(1);
+ CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles
+ : kDontSaveFPRegs);
CallStub(&ces);
}
@@ -1853,94 +2239,94 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
}
-// If true, a Handle<T> returned by value from a function with cdecl calling
-// convention will be returned directly as a value of location_ field in a
-// register eax.
-// If false, it is returned as a pointer to a preallocated by caller memory
-// region. Pointer to this region should be passed to a function as an
-// implicit first argument.
-#if defined(USING_BSD_ABI) || defined(__MINGW32__) || defined(__CYGWIN__)
-static const bool kReturnHandlesDirectly = true;
-#else
-static const bool kReturnHandlesDirectly = false;
-#endif
-
-
Operand ApiParameterOperand(int index) {
- return Operand(
- esp, (index + (kReturnHandlesDirectly ? 0 : 1)) * kPointerSize);
+ return Operand(esp, index * kPointerSize);
}
void MacroAssembler::PrepareCallApiFunction(int argc) {
- if (kReturnHandlesDirectly) {
- EnterApiExitFrame(argc);
- // When handles are returned directly we don't have to allocate extra
- // space for and pass an out parameter.
- if (emit_debug_code()) {
- mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
- }
- } else {
- // We allocate two additional slots: return value and pointer to it.
- EnterApiExitFrame(argc + 2);
-
- // The argument slots are filled as follows:
- //
- // n + 1: output slot
- // n: arg n
- // ...
- // 1: arg1
- // 0: pointer to the output slot
-
- lea(esi, Operand(esp, (argc + 1) * kPointerSize));
- mov(Operand(esp, 0 * kPointerSize), esi);
- if (emit_debug_code()) {
- mov(Operand(esi, 0), Immediate(0));
- }
+ EnterApiExitFrame(argc);
+ if (emit_debug_code()) {
+ mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
}
}
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ Address function_address,
+ Address thunk_address,
+ Operand thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
+ ExternalReference::handle_scope_next_address(isolate());
ExternalReference limit_address =
- ExternalReference::handle_scope_limit_address();
+ ExternalReference::handle_scope_limit_address(isolate());
ExternalReference level_address =
- ExternalReference::handle_scope_level_address();
+ ExternalReference::handle_scope_level_address(isolate());
// Allocate HandleScope in callee-save registers.
mov(ebx, Operand::StaticVariable(next_address));
mov(edi, Operand::StaticVariable(limit_address));
add(Operand::StaticVariable(level_address), Immediate(1));
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, eax);
+ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ bool* is_profiling_flag =
+ isolate()->cpu_profiler()->is_profiling_address();
+ STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
+ mov(eax, Immediate(reinterpret_cast<Address>(is_profiling_flag)));
+ cmpb(Operand(eax, 0), 0);
+ j(zero, &profiler_disabled);
+
+ // Additional parameter is the address of the actual getter function.
+ mov(thunk_last_arg, Immediate(function_address));
+ // Call the api function.
+ call(thunk_address, RelocInfo::RUNTIME_ENTRY);
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
// Call the api function.
call(function_address, RelocInfo::RUNTIME_ENTRY);
+ bind(&end_profiler_check);
- if (!kReturnHandlesDirectly) {
- // PrepareCallApiFunction saved pointer to the output slot into
- // callee-save register esi.
- mov(eax, Operand(esi, 0));
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, eax);
+ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
}
- Label empty_handle;
Label prologue;
+ // Load the value from ReturnValue
+ mov(eax, return_value_operand);
+
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
- // Check if the result handle holds 0.
- test(eax, eax);
- j(zero, &empty_handle);
- // It was non-zero. Dereference to get the result value.
- mov(eax, Operand(eax, 0));
bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
mov(Operand::StaticVariable(next_address), ebx);
sub(Operand::StaticVariable(level_address), Immediate(1));
- Assert(above_equal, "Invalid HandleScope level");
+ Assert(above_equal, kInvalidHandleScopeLevel);
cmp(edi, Operand::StaticVariable(limit_address));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
@@ -1951,6 +2337,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(isolate()->factory()->the_hole_value()));
j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
#if ENABLE_EXTRA_CHECKS
// Check if the function returned a valid JavaScript value.
@@ -1982,21 +2369,24 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
cmp(return_value, isolate()->factory()->null_value());
j(equal, &ok, Label::kNear);
- Abort("API call returned invalid object");
+ Abort(kAPICallReturnedInvalidObject);
bind(&ok);
#endif
- LeaveApiExitFrame();
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ mov(esi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
- bind(&empty_handle);
- // It was zero; the result is undefined.
- mov(eax, isolate()->factory()->undefined_value());
- jmp(&prologue);
-
bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
@@ -2004,7 +2394,8 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&delete_allocated_handles);
mov(Operand::StaticVariable(limit_address), edi);
mov(edi, eax);
- mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
+ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate())));
mov(eax, Immediate(delete_extensions));
call(eax);
mov(eax, edi);
@@ -2016,7 +2407,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
CEntryStub ces(1);
- jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
+ jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -2196,6 +2587,7 @@ void MacroAssembler::InvokeFunction(Register fun,
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -2207,7 +2599,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
LoadHeapObject(edi, function);
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- ParameterCount expected(function->shared()->formal_parameter_count());
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
@@ -2272,7 +2663,7 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (emit_debug_code()) {
cmp(FieldOperand(dst, HeapObject::kMapOffset),
isolate()->factory()->with_context_map());
- Check(not_equal, "Variable resolved to with context.");
+ Check(not_equal, kVariableResolvedToWithContext);
}
}
@@ -2328,12 +2719,23 @@ void MacroAssembler::LoadInitialArrayMap(
}
+void MacroAssembler::LoadGlobalContext(Register global_context) {
+ // Load the global or builtins object from the current context.
+ mov(global_context,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ mov(global_context,
+ FieldOperand(global_context, GlobalObject::kNativeContextOffset));
+}
+
+
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
mov(function,
Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- mov(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
+ mov(function,
+ FieldOperand(function, GlobalObject::kNativeContextOffset));
// Load the function from the native context.
mov(function, Operand(function, Context::SlotOffset(index)));
}
@@ -2348,7 +2750,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
jmp(&ok);
bind(&fail);
- Abort("Global functions must have initial map");
+ Abort(kGlobalFunctionsMustHaveInitialMap);
bind(&ok);
}
}
@@ -2387,21 +2789,32 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
+ AllowDeferredHandleDereference embedding_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- mov(result, Operand::Cell(cell));
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ mov(result, Operand::ForCell(cell));
} else {
mov(result, object);
}
}
+void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
+ AllowDeferredHandleDereference using_raw_address;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ cmp(reg, Operand::ForCell(cell));
+ } else {
+ cmp(reg, object);
+ }
+}
+
+
void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
+ AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- push(Operand::Cell(cell));
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ push(Operand::ForCell(cell));
} else {
Push(object);
}
@@ -2425,6 +2838,27 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
}
+void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
+ // Make sure the floating point stack is either empty or has depth items.
+ ASSERT(depth <= 7);
+ // This is very expensive.
+ ASSERT(FLAG_debug_code && FLAG_enable_slow_asserts);
+
+ // The top-of-stack (tos) is 7 if there is one item pushed.
+ int tos = (8 - depth) % 8;
+ const int kTopMask = 0x3800;
+ push(eax);
+ fwait();
+ fnstsw_ax();
+ and_(eax, kTopMask);
+ shr(eax, 11);
+ cmp(eax, Immediate(tos));
+ Check(equal, kUnexpectedFPUStackDepthAfterInstruction);
+ fnclex();
+ pop(eax);
+}
+
+
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
add(esp, Immediate(stack_elements * kPointerSize));
@@ -2502,8 +2936,8 @@ void MacroAssembler::DecrementCounter(Condition cc,
}
-void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (emit_debug_code()) Check(cc, msg);
+void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
+ if (emit_debug_code()) Check(cc, reason);
}
@@ -2520,16 +2954,16 @@ void MacroAssembler::AssertFastElements(Register elements) {
cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(factory->fixed_cow_array_map()));
j(equal, &ok);
- Abort("JSObject with fast elements map has slow elements");
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
bind(&ok);
}
}
-void MacroAssembler::Check(Condition cc, const char* msg) {
+void MacroAssembler::Check(Condition cc, BailoutReason reason) {
Label L;
j(cc, &L);
- Abort(msg);
+ Abort(reason);
// will not return here
bind(&L);
}
@@ -2550,12 +2984,13 @@ void MacroAssembler::CheckStackAlignment() {
}
-void MacroAssembler::Abort(const char* msg) {
+void MacroAssembler::Abort(BailoutReason reason) {
// We want to pass the msg string like a smi to avoid GC
// problems, however msg is not guaranteed to be aligned
// properly. Instead, we pass an aligned pointer that is
// a proper v8 smi, but also pass the alignment difference
// from the real pointer as a smi.
+ const char* msg = GetBailoutReason(reason);
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
@@ -2564,6 +2999,11 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ int3();
+ return;
+ }
#endif
push(eax);
@@ -2606,6 +3046,88 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst,
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
+ sub(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label smi_hash_calculated;
+ Label load_result_from_cache;
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpIfNotSmi(object, &not_smi, Label::kNear);
+ mov(scratch, object);
+ SmiUntag(scratch);
+ jmp(&smi_hash_calculated, Label::kNear);
+ bind(&not_smi);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ j(not_equal, not_found);
+ STATIC_ASSERT(8 == kDoubleSize);
+ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ // Object is heap number and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ Register index = scratch;
+ Register probe = mask;
+ mov(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope fscope(this, SSE2);
+ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
+ } else {
+ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+ FCmp();
+ }
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache, Label::kNear);
+
+ bind(&smi_hash_calculated);
+ // Object is smi and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ // Check if the entry is the smi we are looking for.
+ cmp(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ mov(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+}
+
+
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
@@ -2615,7 +3137,7 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
}
and_(scratch,
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- cmp(scratch, kStringTag | kSeqStringTag | kAsciiStringTag);
+ cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
j(not_equal, failure);
}
@@ -2640,7 +3162,8 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
// Check that both are flat ASCII strings.
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
// Interleave bits from both instance types and compare them in one check.
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
and_(scratch1, kFlatAsciiStringMask);
@@ -2651,6 +3174,20 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
}
+void MacroAssembler::JumpIfNotUniqueName(Operand operand,
+ Label* not_unique_name,
+ Label::Distance distance) {
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ j(zero, &succeed);
+ cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
+ j(not_equal, not_unique_name, distance);
+
+ bind(&succeed);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
@@ -2770,6 +3307,18 @@ void MacroAssembler::CheckPageFlagForMap(
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ mov(scratch, map);
+ mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ and_(scratch, Immediate(Smi::FromInt(Map::Deprecated::kMask)));
+ j(not_zero, if_deprecated);
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
@@ -2876,7 +3425,7 @@ void MacroAssembler::EnsureNotWhite(
// Check for heap-number
mov(map, FieldOperand(value, HeapObject::kMapOffset));
- cmp(map, FACTORY->heap_number_map());
+ cmp(map, isolate()->factory()->heap_number_map());
j(not_equal, &not_heap_number, Label::kNear);
mov(length, Immediate(HeapNumber::kSize));
jmp(&is_data_object, Label::kNear);
@@ -2906,15 +3455,15 @@ void MacroAssembler::EnsureNotWhite(
bind(&not_external);
// Sequential string, either ASCII or UC16.
- ASSERT(kAsciiStringTag == 0x04);
+ ASSERT(kOneByteStringTag == 0x04);
and_(length, Immediate(kStringEncodingMask));
xor_(length, Immediate(kStringEncodingMask));
add(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
// by 2. If we multiply the string length as smi by this, it still
// won't overflow a 32-bit value.
- ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize);
- ASSERT(SeqAsciiString::kMaxSize <=
+ ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
+ ASSERT(SeqOneByteString::kMaxSize <=
static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
imul(length, FieldOperand(value, String::kLengthOffset));
shr(length, 2 + kSmiTagSize + kSmiShiftSize);
@@ -2932,7 +3481,7 @@ void MacroAssembler::EnsureNotWhite(
if (emit_debug_code()) {
mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
- Check(less_equal, "Live Bytes Count overflow chunk size");
+ Check(less_equal, kLiveBytesCountOverflowChunkSize);
}
bind(&done);
@@ -2981,6 +3530,27 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
j(not_equal, &next);
}
+
+void MacroAssembler::TestJSArrayForAllocationMemento(
+ Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ lea(scratch_reg, Operand(receiver_reg,
+ JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
+ cmp(scratch_reg, Immediate(new_space_start));
+ j(less, no_memento_found);
+ cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
+ j(greater, no_memento_found);
+ cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
+ Immediate(isolate()->factory()->allocation_memento_map()));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index e48d0e75c..30f8a8dfb 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -35,18 +35,6 @@
namespace v8 {
namespace internal {
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1
-};
-
-
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
@@ -55,6 +43,12 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum RegisterValueType {
+ REGISTER_VALUE_IS_SMI,
+ REGISTER_VALUE_IS_INT32
+};
+
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
@@ -67,6 +61,15 @@ class MacroAssembler: public Assembler {
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
+ // Operations on roots in the root-array.
+ void LoadRoot(Register destination, Heap::RootListIndex index);
+ void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
+ void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
+ // These methods can only be used with constant roots (i.e. non-writable
+ // and not in new space).
+ void CompareRoot(Register with, Heap::RootListIndex index);
+ void CompareRoot(const Operand& with, Heap::RootListIndex index);
+
// ---------------------------------------------------------------------------
// GC Support
enum RememberedSetFinalAction {
@@ -97,6 +100,10 @@ class MacroAssembler: public Assembler {
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
@@ -218,6 +225,9 @@ class MacroAssembler: public Assembler {
void DebugBreak();
#endif
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
// Enter specific kind of exit frame. Expects the number of
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
@@ -233,7 +243,7 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in
// register eax (untouched).
- void LeaveApiExitFrame();
+ void LeaveApiExitFrame(bool restore_context);
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
@@ -255,6 +265,8 @@ class MacroAssembler: public Assembler {
Register map_out,
bool can_have_holes);
+ void LoadGlobalContext(Register global_context);
+
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -272,9 +284,11 @@ class MacroAssembler: public Assembler {
void LoadFromSafepointRegisterSlot(Register dst, Register src);
void LoadHeapObject(Register result, Handle<HeapObject> object);
+ void CmpHeapObject(Register reg, Handle<HeapObject> object);
void PushHeapObject(Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
@@ -282,6 +296,15 @@ class MacroAssembler: public Assembler {
}
}
+ void CmpObject(Register reg, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
+ if (object->IsHeapObject()) {
+ CmpHeapObject(reg, Handle<HeapObject>::cast(object));
+ } else {
+ cmp(reg, Immediate(object));
+ }
+ }
+
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -324,6 +347,7 @@ class MacroAssembler: public Assembler {
CallKind call_kind);
void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -345,15 +369,17 @@ class MacroAssembler: public Assembler {
void Set(Register dst, const Immediate& x);
void Set(const Operand& dst, const Immediate& x);
+ // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
+ // hinders register renaming and makes dependence chains longer. So we use
+ // xorps to clear the dst register before cvtsi2sd to solve this issue.
+ void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
+ void Cvtsi2sd(XMMRegister dst, const Operand& src);
+
// Support for constant splitting.
bool IsUnsafeImmediate(const Immediate& x);
void SafeSet(Register dst, const Immediate& x);
void SafePush(const Immediate& x);
- // Compare against a known root, e.g. undefined, null, true, ...
- void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(const Operand& with, Heap::RootListIndex index);
-
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
@@ -388,7 +414,8 @@ class MacroAssembler: public Assembler {
Register scratch1,
XMMRegister scratch2,
Label* fail,
- bool specialize_for_processor);
+ bool specialize_for_processor,
+ int offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
@@ -396,8 +423,7 @@ class MacroAssembler: public Assembler {
// sequences branches to early_success.
void CompareMap(Register obj,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ Label* early_success);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@@ -406,13 +432,13 @@ class MacroAssembler: public Assembler {
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified map and branch to a
// specified target if equal. Skip the smi check if not required (object is
// known to be a heap object)
void DispatchMap(Register obj,
+ Register unused,
Handle<Map> map,
Handle<Code> success,
SmiCheckType smi_check_type);
@@ -426,6 +452,15 @@ class MacroAssembler: public Assembler {
Register map,
Register instance_type);
+ // Check if the object in register heap_object is a name. Afterwards the
+ // register map contains the object map and the register instance_type
+ // contains the instance_type. The registers map and instance_type can be the
+ // same in which case it contains the instance type afterwards. Either of the
+ // registers map and instance_type can be the same as heap_object.
+ Condition IsObjectNameType(Register heap_object,
+ Register map,
+ Register instance_type);
+
// Check if a heap object's type is in the JSObject range, not including
// JSFunction. The object's map will be loaded in the map register.
// Any or all of the three registers may be the same.
@@ -448,6 +483,21 @@ class MacroAssembler: public Assembler {
XMMRegister scratch_reg,
Register result_reg);
+ void SlowTruncateToI(Register result_reg, Register input_reg,
+ int offset = HeapNumber::kValueOffset - kHeapObjectTag);
+
+ void TruncateHeapNumberToI(Register result_reg, Register input_reg);
+ void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
+ void TruncateX87TOSToI(Register result_reg);
+
+ void DoubleToI(Register result_reg, XMMRegister input_reg,
+ XMMRegister scratch, MinusZeroMode minus_zero_mode,
+ Label* conversion_failed, Label::Distance dst = Label::kFar);
+ void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode,
+ Label* conversion_failed, Label::Distance dst = Label::kFar);
+
+ void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
+ MinusZeroMode minus_zero_mode, Label* lost_precision);
// Smi tagging support.
void SmiTag(Register reg) {
@@ -468,6 +518,7 @@ class MacroAssembler: public Assembler {
}
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
+ void LoadUint32NoSSE2(Register src);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value,
@@ -516,6 +567,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
// ---------------------------------------------------------------------------
// Exception handling
@@ -538,7 +592,8 @@ class MacroAssembler: public Assembler {
// on access to global objects across environments. The holder register
// is left untouched, but the scratch register is clobbered.
void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss);
void GetNumberHash(Register r0, Register scratch);
@@ -555,38 +610,39 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Allocation support
- // Allocate an object in new space. If the new space is exhausted control
- // continues at the gc_required label. The allocated object is returned in
- // result and end of the new object is returned in result_end. The register
- // scratch can be passed as no_reg in which case an additional object
- // reference will be added to the reloc info. The returned pointers in result
- // and result_end have not yet been tagged as heap objects. If
- // result_contains_top_on_entry is true the content of result is known to be
- // the allocation top on entry (could be result_end from a previous call to
- // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
+ // Allocate an object in new space or old pointer space. If the given space
+ // is exhausted control continues at the gc_required label. The allocated
+ // object is returned in result and end of the new object is returned in
+ // result_end. The register scratch can be passed as no_reg in which case
+ // an additional object reference will be added to the reloc info. The
+ // returned pointers in result and result_end have not yet been tagged as
+ // heap objects. If result_contains_top_on_entry is true the content of
+ // result is known to be the allocation top on entry (could be result_end
+ // from a previous call). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ RegisterValueType element_count_type,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
@@ -708,11 +764,18 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ }
// Convenience function: call an external reference.
void CallExternalReference(ExternalReference ref, int num_arguments);
@@ -757,7 +820,12 @@ class MacroAssembler: public Assembler {
// from handle and propagates exceptions. Clobbers ebx, edi and
// caller-save registers. Restores context. On return removes
// stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Address function_address, int stack_space);
+ void CallApiFunctionAndReturn(Address function_address,
+ Address thunk_address,
+ Operand thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext);
@@ -776,6 +844,8 @@ class MacroAssembler: public Assembler {
void Drop(int element_count);
void Call(Label* target) { call(target); }
+ void Push(Register src) { push(src); }
+ void Pop(Register dst) { pop(dst); }
// Emit call to the code we are currently generating.
void CallSelf() {
@@ -788,12 +858,15 @@ class MacroAssembler: public Assembler {
// Push a handle value.
void Push(Handle<Object> handle) { push(Immediate(handle)); }
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
Handle<Object> CodeObject() {
ASSERT(!code_object_.is_null());
return code_object_;
}
+ // Insert code to verify that the x87 stack has the specified depth (0-7)
+ void VerifyX87StackDepth(uint32_t depth);
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -810,15 +883,15 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, const char* msg);
+ void Assert(Condition cc, BailoutReason reason);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg);
+ void Check(Condition cc, BailoutReason reason);
// Print a message to stdout and abort execution.
- void Abort(const char* msg);
+ void Abort(BailoutReason reason);
// Check that the stack is aligned.
void CheckStackAlignment();
@@ -835,6 +908,17 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities.
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found);
+
// Check whether the instance type represents a flat ASCII string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
@@ -850,6 +934,15 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* on_not_flat_ascii_strings);
+ // Checks if the given register or operand is a unique name
+ void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
+ Label::Distance distance = Label::kFar) {
+ JumpIfNotUniqueName(Operand(reg), not_unique_name, distance);
+ }
+
+ void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
+ Label::Distance distance = Label::kFar);
+
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
}
@@ -862,6 +955,26 @@ class MacroAssembler: public Assembler {
// in eax. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, conditional code is set to equal.
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ j(equal, memento_found);
+ bind(&no_memento_found);
+ }
+
private:
bool generating_stub_;
bool allow_stub_calls_;
@@ -884,13 +997,16 @@ class MacroAssembler: public Assembler {
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
- void LeaveExitFrameEpilogue();
+ void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags);
- void UpdateAllocationTopHelper(Register result_end, Register scratch);
+
+ void UpdateAllocationTopHelper(Register result_end,
+ Register scratch,
+ AllocationFlags flags);
// Helper for PopHandleScope. Allowed to perform a GC and returns
// NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
@@ -922,9 +1038,9 @@ class MacroAssembler: public Assembler {
Operand SafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code);
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class StandardFrame;
};
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 622dc4254..d371c456c 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -27,8 +27,9 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
+#include "cpu-profiler.h"
#include "unicode.h"
#include "log.h"
#include "regexp-stack.h"
@@ -104,7 +105,7 @@ RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -209,86 +210,6 @@ void RegExpMacroAssemblerIA32::CheckCharacterLT(uc16 limit, Label* on_less) {
}
-void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
-#ifdef DEBUG
- // If input is ASCII, don't even bother calling here if the string to
- // match contains a non-ASCII character.
- if (mode_ == ASCII) {
- ASSERT(String::IsAscii(str.start(), str.length()));
- }
-#endif
- int byte_length = str.length() * char_size();
- int byte_offset = cp_offset * char_size();
- if (check_end_of_string) {
- // Check that there are at least str.length() characters left in the input.
- __ cmp(edi, Immediate(-(byte_offset + byte_length)));
- BranchOrBacktrack(greater, on_failure);
- }
-
- if (on_failure == NULL) {
- // Instead of inlining a backtrack, (re)use the global backtrack target.
- on_failure = &backtrack_label_;
- }
-
- // Do one character test first to minimize loading for the case that
- // we don't match at all (loading more than one character introduces that
- // chance of reading unaligned and reading across cache boundaries).
- // If the first character matches, expect a larger chance of matching the
- // string, and start loading more characters at a time.
- if (mode_ == ASCII) {
- __ cmpb(Operand(esi, edi, times_1, byte_offset),
- static_cast<int8_t>(str[0]));
- } else {
- // Don't use 16-bit immediate. The size changing prefix throws off
- // pre-decoding.
- __ movzx_w(eax,
- Operand(esi, edi, times_1, byte_offset));
- __ cmp(eax, static_cast<int32_t>(str[0]));
- }
- BranchOrBacktrack(not_equal, on_failure);
-
- __ lea(ebx, Operand(esi, edi, times_1, 0));
- for (int i = 1, n = str.length(); i < n;) {
- if (mode_ == ASCII) {
- if (i <= n - 4) {
- int combined_chars =
- (static_cast<uint32_t>(str[i + 0]) << 0) |
- (static_cast<uint32_t>(str[i + 1]) << 8) |
- (static_cast<uint32_t>(str[i + 2]) << 16) |
- (static_cast<uint32_t>(str[i + 3]) << 24);
- __ cmp(Operand(ebx, byte_offset + i), Immediate(combined_chars));
- i += 4;
- } else {
- __ cmpb(Operand(ebx, byte_offset + i),
- static_cast<int8_t>(str[i]));
- i += 1;
- }
- } else {
- ASSERT(mode_ == UC16);
- if (i <= n - 2) {
- __ cmp(Operand(ebx, byte_offset + i * sizeof(uc16)),
- Immediate(*reinterpret_cast<const int*>(&str[i])));
- i += 2;
- } else {
- // Avoid a 16-bit immediate operation. It uses the length-changing
- // 0x66 prefix which causes pre-decoder misprediction and pipeline
- // stalls. See
- // "Intel(R) 64 and IA-32 Architectures Optimization Reference Manual"
- // (248966.pdf) section 3.4.2.3 "Length-Changing Prefixes (LCP)"
- __ movzx_w(eax,
- Operand(ebx, byte_offset + i * sizeof(uc16)));
- __ cmp(eax, static_cast<int32_t>(str[i]));
- i += 1;
- }
- }
- BranchOrBacktrack(not_equal, on_failure);
- }
-}
-
-
void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
Label fallthrough;
__ cmp(edi, Operand(backtrack_stackpointer(), 0));
@@ -344,7 +265,15 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ or_(eax, 0x20); // Convert match character to lower-case.
__ lea(ecx, Operand(eax, -'a'));
__ cmp(ecx, static_cast<int32_t>('z' - 'a')); // Is eax a lowercase letter?
- __ j(above, &fail);
+ Label convert_capture;
+ __ j(below_equal, &convert_capture); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ sub(ecx, Immediate(224 - 'a'));
+ __ cmp(ecx, Immediate(254 - 224));
+ __ j(above, &fail); // Weren't Latin-1 letters.
+ __ cmp(ecx, Immediate(247 - 224)); // Check for 247.
+ __ j(equal, &fail);
+ __ bind(&convert_capture);
// Also convert capture character.
__ movzx_b(ecx, Operand(edx, 0));
__ or_(ecx, 0x20);
@@ -393,7 +322,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Set isolate.
__ mov(Operand(esp, 3 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
+ Immediate(ExternalReference::isolate_address(isolate())));
// Set byte_length.
__ mov(Operand(esp, 2 * kPointerSize), ebx);
// Set byte_offset2.
@@ -409,7 +338,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
{
AllowExternalCallThatCantCauseGC scope(masm_);
ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ ExternalReference::re_case_insensitive_compare_uc16(isolate());
__ CallCFunction(compare, argument_count);
}
// Pop original values before reacting on result value.
@@ -569,7 +498,7 @@ void RegExpMacroAssemblerIA32::CheckBitInTable(
Label* on_bit_set) {
__ mov(eax, Immediate(table));
Register index = current_character();
- if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
+ if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ mov(ebx, kTableSize - 1);
__ and_(ebx, current_character());
index = ebx;
@@ -587,29 +516,23 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
case 's':
// Match space-characters
if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ cmp(current_character(), ' ');
- __ j(equal, &success);
+ __ j(equal, &success, Label::kNear);
// Check range 0x09..0x0d
__ lea(eax, Operand(current_character(), -'\t'));
__ cmp(eax, '\r' - '\t');
- BranchOrBacktrack(above, on_no_match);
+ __ j(below_equal, &success, Label::kNear);
+ // \u00a0 (NBSP).
+ __ cmp(eax, 0x00a0 - '\t');
+ BranchOrBacktrack(not_equal, on_no_match);
__ bind(&success);
return true;
}
return false;
case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- __ cmp(current_character(), ' ');
- BranchOrBacktrack(equal, on_no_match);
- __ lea(eax, Operand(current_character(), -'\t'));
- __ cmp(eax, '\r' - '\t');
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- }
+ // The emitted code for generic character classes is good enough.
return false;
case 'd':
// Match ASCII digits ('0'..'9')
@@ -743,7 +666,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Label stack_ok;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
+ ExternalReference::address_of_stack_limit(isolate());
__ mov(ecx, esp);
__ sub(ecx, Operand::StaticVariable(stack_limit));
// Handle it if the stack pointer is already below the stack limit.
@@ -788,7 +711,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax);
-#ifdef WIN32
+#if V8_OS_WIN
// Ensure that we write to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
@@ -798,7 +721,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
i += kRegistersPerPage) {
__ mov(register_location(i), eax); // One write every page.
}
-#endif // WIN32
+#endif // V8_OS_WIN
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
@@ -970,12 +893,12 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, ebx);
__ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
+ Immediate(ExternalReference::isolate_address(isolate())));
__ lea(eax, Operand(ebp, kStackHighEnd));
__ mov(Operand(esp, 1 * kPointerSize), eax);
__ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_->isolate());
+ ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
@@ -1000,10 +923,10 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(&code_desc);
Handle<Code> code =
- masm_->isolate()->factory()->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+ isolate()->factory()->NewCode(code_desc,
+ Code::ComputeFlags(Code::REGEXP),
+ masm_->CodeObject());
+ PROFILE(isolate(), RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code);
}
@@ -1107,6 +1030,7 @@ void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by) {
__ bind(&after_position);
}
+
void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
__ mov(register_location(register_index), Immediate(to));
@@ -1159,7 +1083,7 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
__ lea(eax, Operand(esp, -kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), eax);
ExternalReference check_stack_guard =
- ExternalReference::re_check_stack_guard_state(masm_->isolate());
+ ExternalReference::re_check_stack_guard_state(isolate());
__ CallCFunction(check_stack_guard, num_arguments);
}
@@ -1175,7 +1099,6 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
@@ -1197,7 +1120,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1228,7 +1151,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
@@ -1351,7 +1274,7 @@ void RegExpMacroAssemblerIA32::CheckPreemption() {
// Check for preemption.
Label no_preempt;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
+ ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above, &no_preempt);
@@ -1364,7 +1287,7 @@ void RegExpMacroAssemblerIA32::CheckPreemption() {
void RegExpMacroAssemblerIA32::CheckStackLimit() {
Label no_stack_overflow;
ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
+ ExternalReference::address_of_regexp_stack_limit(isolate());
__ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
__ j(above, &no_stack_overflow);
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
index 7aea38586..393333600 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -30,6 +30,7 @@
#include "ia32/assembler-ia32.h"
#include "ia32/assembler-ia32-inl.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
@@ -51,10 +52,6 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
@@ -196,6 +193,8 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// (ecx) and increments it by a word size.
inline void Pop(Register target);
+ Isolate* isolate() const { return masm_->isolate(); }
+
MacroAssembler* masm_;
// Which mode to generate code for (ASCII or UC16).
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 11efb72bb..0648833dc 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
#include "ic-inl.h"
#include "codegen.h"
@@ -137,38 +137,34 @@ static void ProbeTable(Isolate* isolate,
}
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<String> name,
- Register r0,
- Register r1) {
- ASSERT(name->IsSymbol());
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(name->IsUniqueName());
+ ASSERT(!receiver.is(scratch0));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1);
__ IncrementCounter(counters->negative_lookups_miss(), 1);
- __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
const int kInterceptorOrAccessCheckNeededMask =
(1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
// Bail out if the receiver has a named interceptor or requires access checks.
- __ test_b(FieldOperand(r0, Map::kBitFieldOffset),
+ __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
kInterceptorOrAccessCheckNeededMask);
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
__ j(below, miss_label);
// Load properties array.
- Register properties = r0;
+ Register properties = scratch0;
__ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
@@ -177,12 +173,12 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
__ j(not_equal, miss_label);
Label done;
- StringDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- properties,
- name,
- r1);
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ properties,
+ name,
+ scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
@@ -227,7 +223,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
- __ mov(offset, FieldOperand(name, String::kHashFieldOffset));
+ __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
// We mask out the last two bits because they are not part of the hash and
@@ -241,7 +237,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
// Primary miss: Compute hash for secondary probe.
- __ mov(offset, FieldOperand(name, String::kHashFieldOffset));
+ __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
__ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
@@ -329,32 +325,28 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length from the string and convert to a smi.
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss);
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmp(scratch1, JS_VALUE_TYPE);
+ __ j(not_equal, miss);
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
+ __ ret(0);
}
@@ -369,26 +361,21 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
- Handle<JSObject> holder,
- int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ mov(dst, FieldOperand(src, offset));
- } else {
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ int offset = index * kPointerSize;
+ if (!inobject) {
// Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ offset = offset + FixedArray::kHeaderSize;
__ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- __ mov(dst, FieldOperand(dst, offset));
+ src = dst;
}
+ __ mov(dst, FieldOperand(src, offset));
}
@@ -397,6 +384,11 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
@@ -405,8 +397,6 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(scratch);
__ push(receiver);
__ push(holder);
- __ push(FieldOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(Immediate(reinterpret_cast<int>(masm->isolate())));
}
@@ -420,12 +410,12 @@ static void CompileCallLoadPropertyWithInterceptor(
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
}
// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = 4;
+static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
// Reserves space for the extra arguments to API function in the
@@ -464,66 +454,126 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// Generates call to API function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
// -- esp[0] : return address
- // -- esp[4] : object passing the type check
- // (last fast api call extra argument,
- // set by CheckPrototypes)
- // -- esp[8] : api function
- // (first fast api call extra argument)
- // -- esp[12] : api call data
- // -- esp[16] : isolate
- // -- esp[20] : last argument
+ // -- esp[4] - esp[28] : FunctionCallbackInfo, incl.
+ // : object passing the type check
+ // (set by CheckPrototypes)
+ // -- esp[32] : last argument
// -- ...
- // -- esp[(argc + 4) * 4] : first argument
- // -- esp[(argc + 5) * 4] : receiver
+ // -- esp[(argc + 7) * 4] : first argument
+ // -- esp[(argc + 8) * 4] : receiver
// -----------------------------------
+
+ typedef FunctionCallbackArguments FCA;
+ // Save calling context.
+ __ mov(Operand(esp, (1 + FCA::kContextSaveIndex) * kPointerSize), esi);
+
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ LoadHeapObject(edi, function);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // Pass the additional arguments.
- __ mov(Operand(esp, 2 * kPointerSize), edi);
+ // Construct the FunctionCallbackInfo.
+ __ mov(Operand(esp, (1 + FCA::kCalleeIndex) * kPointerSize), edi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data());
+ Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ mov(ecx, api_call_info);
__ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
- __ mov(Operand(esp, 3 * kPointerSize), ebx);
+ __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize), ebx);
} else {
- __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
+ __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize),
+ Immediate(call_data));
}
- __ mov(Operand(esp, 4 * kPointerSize),
+ __ mov(Operand(esp, (1 + FCA::kIsolateIndex) * kPointerSize),
Immediate(reinterpret_cast<int>(masm->isolate())));
+ __ mov(Operand(esp, (1 + FCA::kReturnValueOffset) * kPointerSize),
+ masm->isolate()->factory()->undefined_value());
+ __ mov(Operand(esp, (1 + FCA::kReturnValueDefaultValueIndex) * kPointerSize),
+ masm->isolate()->factory()->undefined_value());
// Prepare arguments.
- __ lea(eax, Operand(esp, 4 * kPointerSize));
+ STATIC_ASSERT(kFastApiCallArguments == 7);
+ __ lea(eax, Operand(esp, 1 * kPointerSize));
- const int kApiArgc = 1; // API function gets reference to the v8::Arguments.
+ // API function gets reference to the v8::Arguments. If CPU profiler
+ // is enabled wrapper function will be called and we need to pass
+ // address of the callback as additional parameter, always allocate
+ // space for it.
+ const int kApiArgc = 1 + 1;
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
const int kApiStackSpace = 4;
+ // Function address is a foreign pointer outside V8's heap.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
- __ mov(ApiParameterOperand(1), eax); // v8::Arguments::implicit_args_.
- __ add(eax, Immediate(argc * kPointerSize));
- __ mov(ApiParameterOperand(2), eax); // v8::Arguments::values_.
- __ Set(ApiParameterOperand(3), Immediate(argc)); // v8::Arguments::length_.
- // v8::Arguments::is_construct_call_.
- __ Set(ApiParameterOperand(4), Immediate(0));
+ // FunctionCallbackInfo::implicit_args_.
+ __ mov(ApiParameterOperand(2), eax);
+ __ add(eax, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
+ __ mov(ApiParameterOperand(3), eax);
+ // FunctionCallbackInfo::length_.
+ __ Set(ApiParameterOperand(4), Immediate(argc));
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Set(ApiParameterOperand(5), Immediate(0));
// v8::InvocationCallback's argument.
- __ lea(eax, ApiParameterOperand(1));
+ __ lea(eax, ApiParameterOperand(2));
__ mov(ApiParameterOperand(0), eax);
- // Function address is a foreign pointer outside V8's heap.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+
+ Operand context_restore_operand(ebp,
+ (2 + FCA::kContextSaveIndex) * kPointerSize);
+ Operand return_value_operand(ebp,
+ (2 + FCA::kReturnValueOffset) * kPointerSize);
__ CallApiFunctionAndReturn(function_address,
- argc + kFastApiCallArguments + 1);
+ thunk_address,
+ ApiParameterOperand(1),
+ argc + kFastApiCallArguments + 1,
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
+}
+
+
+// Generate call to api function.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+ ASSERT(!receiver.is(scratch));
+
+ const int stack_space = kFastApiCallArguments + argc + 1;
+ const int kHolderIndex = FunctionCallbackArguments::kHolderIndex + 1;
+ // Copy return value.
+ __ mov(scratch, Operand(esp, 0));
+ // Assign stack space for the call arguments.
+ __ sub(esp, Immediate(stack_space * kPointerSize));
+ // Move the return address on top of the stack.
+ __ mov(Operand(esp, 0), scratch);
+ // Write holder to stack frame.
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), receiver);
+ // Write receiver to stack frame.
+ int index = stack_space;
+ __ mov(Operand(esp, index-- * kPointerSize), receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ ASSERT(!receiver.is(values[i]));
+ ASSERT(!scratch.is(values[i]));
+ __ mov(Operand(esp, index-- * kPointerSize), values[i]);
+ }
+
+ GenerateFastApiCall(masm, optimization, argc, true);
}
@@ -541,7 +591,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
void Compile(MacroAssembler* masm,
Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name,
+ Handle<Name> name,
LookupResult* lookup,
Register receiver,
Register scratch1,
@@ -573,7 +623,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register scratch3,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
- Handle<String> name,
+ Handle<Name> name,
const CallOptimization& optimization,
Label* miss_label) {
ASSERT(optimization.is_constant_call());
@@ -637,12 +687,14 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiCall(masm, optimization, arguments_.immediate(), false);
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- __ InvokeFunction(optimization.constant_function(), arguments_,
+ Handle<JSFunction> function = optimization.constant_function();
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments_,
JUMP_FUNCTION, NullCallWrapper(), call_kind);
}
@@ -666,7 +718,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<String> name,
+ Handle<Name> name,
Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
@@ -683,7 +735,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
@@ -723,78 +775,118 @@ class CallInterceptorCompiler BASE_EMBEDDED {
};
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Handle<Code> code = (kind == Code::LOAD_IC)
- ? masm->isolate()->builtins()->LoadIC_Miss()
- : masm->isolate()->builtins()->KeyedLoadIC_Miss();
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
- Handle<Code> code =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(code, RelocInfo::CODE_TARGET);
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ bind(label);
+ __ mov(this->name(), Immediate(name));
+ }
}
-// Both name_reg and receiver_reg are preserved on jumps to miss_label,
-// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name,
- Register receiver_reg,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- LookupResult lookup(masm->isolate());
- object->Lookup(*name, &lookup);
- if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
- // In sloppy mode, we could just return the value and be done. However, we
- // might be in strict mode, where we have to throw. Since we cannot tell,
- // go into slow case unconditionally.
- __ jmp(miss_label);
- return;
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<PropertyCell> cell =
+ JSGlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
+ if (Serializer::enabled()) {
+ __ mov(scratch, Immediate(cell));
+ __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+ Immediate(the_hole));
+ } else {
+ __ cmp(Operand::ForCell(cell), Immediate(the_hole));
}
+ __ j(not_equal, miss);
+}
- // Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK, mode);
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
+void StoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
+// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
+// store is successful.
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register unused,
+ Label* miss_label,
+ Label* slow) {
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ CmpObject(value_reg, constant);
+ __ j(not_equal, miss_label);
+ } else if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(value_reg);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ Cvtsi2sd(xmm0, value_reg);
+ } else {
+ __ push(value_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(value_reg);
+ }
+ __ SmiTag(value_reg);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ } else {
+ __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
+ }
- // Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- if (lookup.IsFound()) {
- holder = lookup.holder();
+ __ bind(&do_store);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
} else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
+ __ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
}
- // We need an extra register, push
- __ push(name_reg);
- Label miss_pop, done_check;
- CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, &miss_pop);
- __ jmp(&done_check);
- __ bind(&miss_pop);
- __ pop(name_reg);
- __ jmp(miss_label);
- __ bind(&done_check);
- __ pop(name_reg);
}
// Stub never generated for non-global objects that require access
@@ -802,13 +894,14 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
+ if (details.type() == FIELD &&
+ object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ pop(scratch1); // Return address.
__ push(receiver_reg);
__ push(Immediate(transition));
- __ push(eax);
+ __ push(value_reg);
__ push(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -818,98 +911,224 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- if (!transition.is_null()) {
- // Update the map of the object.
- __ mov(scratch1, Immediate(transition));
- __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
-
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- name_reg,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ // Update the map of the object.
+ __ mov(scratch1, Immediate(transition));
+ __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ scratch2,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
+ return;
}
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ // TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ mov(FieldOperand(receiver_reg, offset), eax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, eax);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kDontSaveFPRegs);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ mov(FieldOperand(receiver_reg, offset), storage_reg);
+ } else {
+ __ mov(FieldOperand(receiver_reg, offset), value_reg);
+ }
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg,
+ offset,
+ storage_reg,
+ scratch1,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(scratch1, offset), eax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, eax);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kDontSaveFPRegs);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ mov(FieldOperand(scratch1, offset), storage_reg);
+ } else {
+ __ mov(FieldOperand(scratch1, offset), value_reg);
+ }
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1,
+ offset,
+ storage_reg,
+ receiver_reg,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
}
// Return the value (register eax).
+ ASSERT(value_reg.is(eax));
__ ret(0);
}
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
- if (Serializer::enabled()) {
- __ mov(scratch, Immediate(cell));
- __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- Immediate(the_hole));
+// Both name_reg and receiver_reg are preserved on jumps to miss_label,
+// but may be destroyed if store is successful.
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ int index = lookup->GetFieldIndex().field_index();
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ // Load the double storage.
+ if (index < 0) {
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ mov(scratch1, FieldOperand(receiver_reg, offset));
+ } else {
+ __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ mov(scratch1, FieldOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(value_reg);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ Cvtsi2sd(xmm0, value_reg);
+ } else {
+ __ push(value_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(value_reg);
+ }
+ __ SmiTag(value_reg);
+ __ jmp(&do_store);
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ } else {
+ __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
+ }
+ __ bind(&do_store);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
+ }
+ // Return the value (register eax).
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
+ return;
+ }
+
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ mov(FieldOperand(receiver_reg, offset), value_reg);
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
} else {
- __ cmp(Operand::Cell(cell), Immediate(the_hole));
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array (optimistically).
+ __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ mov(FieldOperand(scratch1, offset), value_reg);
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
}
- __ j(not_equal, miss);
+
+ // Return the value (register eax).
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
}
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Register scratch,
- Label* miss) {
+void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
Handle<JSObject> current = object;
while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
+ if (current->IsJSGlobalObject()) {
GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
+ Handle<JSGlobalObject>::cast(current),
name,
scratch,
miss);
@@ -918,6 +1137,12 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
}
}
+
+void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
+ __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm())
@@ -928,9 +1153,16 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register holder_reg,
Register scratch1,
Register scratch2,
- Handle<String> name,
+ Handle<Name> name,
int save_at_depth,
- Label* miss) {
+ Label* miss,
+ PrototypeCheckType check) {
+ const int kHolderIndex = FunctionCallbackArguments::kHolderIndex + 1;
+ // Make sure that the type feedback oracle harvests the receiver map.
+ // TODO(svenpanne) Remove this hack when all ICs are reworked.
+ __ mov(scratch1, Handle<Map>(object->map()));
+
+ Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -942,7 +1174,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int depth = 0;
if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
}
// Traverse the prototype chain and check the maps in the prototype chain for
@@ -958,11 +1190,12 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- name = factory()->LookupSymbol(name);
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
}
ASSERT(current->property_dictionary()->FindEntry(*name) ==
- StringDictionary::kNotFound);
+ NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
scratch1, scratch2);
@@ -973,19 +1206,22 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
} else {
bool in_new_space = heap()->InNewSpace(*prototype);
Handle<Map> current_map(current->map());
- if (in_new_space) {
- // Save the map in scratch1 for later.
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
}
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ }
+
+ if (in_new_space) {
+ // Save the map in scratch1 for later.
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
}
+
reg = holder_reg; // From now on the object will be in holder_reg.
if (in_new_space) {
@@ -999,7 +1235,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
}
// Go to the next object in the prototype chain.
@@ -1010,14 +1246,15 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- // Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()),
- miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, Handle<Map>(holder->map()), miss, DONT_DO_SMI_CHECK);
+ }
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
}
// If we've skipped any global objects, it's not enough to verify that
@@ -1030,134 +1267,159 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ jmp(success);
+ __ bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ }
+}
- // Check the prototype chain.
- Register reg = CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
- // Get the value from the properties.
- GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
- __ ret(0);
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ jmp(success);
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ }
}
-void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- ASSERT(!receiver.is(scratch2));
- ASSERT(!receiver.is(scratch3));
- Register dictionary = scratch1;
- bool must_preserve_dictionary_reg = receiver.is(dictionary);
-
- // Load the properties dictionary.
- if (must_preserve_dictionary_reg) {
- __ push(dictionary);
- }
- __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
+Register LoadStubCompiler::CallbackHandlerFrontend(
+ Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* success,
+ Handle<Object> callback) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ ASSERT(!reg.is(scratch2()));
+ ASSERT(!reg.is(scratch3()));
+ Register dictionary = scratch1();
+ bool must_preserve_dictionary_reg = reg.is(dictionary);
+
+ // Load the properties dictionary.
+ if (must_preserve_dictionary_reg) {
+ __ push(dictionary);
+ }
+ __ mov(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
- // Probe the dictionary.
- Label probe_done, pop_and_miss;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ // Probe the dictionary.
+ Label probe_done, pop_and_miss;
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
&pop_and_miss,
&probe_done,
dictionary,
- name_reg,
- scratch2,
- scratch3);
- __ bind(&pop_and_miss);
- if (must_preserve_dictionary_reg) {
- __ pop(dictionary);
- }
- __ jmp(miss);
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch2 contains the
- // index into the dictionary. Check that the value is the callback.
- Register index = scratch2;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ mov(scratch3,
- Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag));
- if (must_preserve_dictionary_reg) {
- __ pop(dictionary);
+ this->name(),
+ scratch2(),
+ scratch3());
+ __ bind(&pop_and_miss);
+ if (must_preserve_dictionary_reg) {
+ __ pop(dictionary);
+ }
+ __ jmp(&miss);
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch2 contains the
+ // index into the dictionary. Check that the value is the callback.
+ Register index = scratch2();
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ mov(scratch3(),
+ Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag));
+ if (must_preserve_dictionary_reg) {
+ __ pop(dictionary);
+ }
+ __ cmp(scratch3(), callback);
+ __ j(not_equal, &miss);
}
- __ cmp(scratch3, callback);
- __ j(not_equal, miss);
+
+ HandlerFrontendFooter(name, success, &miss);
+ return reg;
}
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
+}
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- GenerateDictionaryLoadCallback(
- reg, name_reg, scratch1, scratch2, scratch3, callback, name, miss);
- }
+void LoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+}
- // Insert additional parameters into the stack frame above return address.
- ASSERT(!scratch3.is(reg));
- __ pop(scratch3); // Get return address to place it below.
- __ push(receiver); // receiver
- __ mov(scratch2, esp);
- ASSERT(!scratch2.is(reg));
- __ push(reg); // holder
- // Push data from AccessorInfo.
+void LoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
+ // Insert additional parameters into the stack frame above return address.
+ ASSERT(!scratch3().is(reg));
+ __ pop(scratch3()); // Get return address to place it below.
+
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ __ push(receiver()); // receiver
+ // Push data from ExecutableAccessorInfo.
if (isolate()->heap()->InNewSpace(callback->data())) {
- __ mov(scratch1, Immediate(callback));
- __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));
+ ASSERT(!scratch2().is(reg));
+ __ mov(scratch2(), Immediate(callback));
+ __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
} else {
- __ push(Immediate(Handle<Object>(callback->data())));
+ __ push(Immediate(Handle<Object>(callback->data(), isolate())));
}
+ __ push(Immediate(isolate()->factory()->undefined_value())); // ReturnValue
+ // ReturnValue default value
+ __ push(Immediate(isolate()->factory()->undefined_value()));
__ push(Immediate(reinterpret_cast<int>(isolate())));
+ __ push(reg); // holder
- // Save a pointer to where we pushed the arguments pointer.
- // This will be passed as the const AccessorInfo& to the C++ callback.
- __ push(scratch2);
+ // Save a pointer to where we pushed the arguments. This will be
+ // passed as the const PropertyAccessorInfo& to the C++ callback.
+ __ push(esp);
- __ push(name_reg); // name
+ __ push(name()); // name
__ mov(ebx, esp); // esp points to reference to name (handler).
- __ push(scratch3); // Restore return address.
+ __ push(scratch3()); // Restore return address.
- // 4 elements array for v8::Arguments::values_, handler for name and pointer
+ // array for v8::Arguments::values_, handler for name and pointer
// to the values (it considered as smi in GC).
- const int kStackSpace = 6;
- const int kApiArgc = 2;
+ const int kStackSpace = PropertyCallbackArguments::kArgsLength + 2;
+ // Allocate space for opional callback address parameter in case
+ // CPU profiler is active.
+ const int kApiArgc = 2 + 1;
+ Address getter_address = v8::ToCData<Address>(callback->getter());
__ PrepareCallApiFunction(kApiArgc);
__ mov(ApiParameterOperand(0), ebx); // name.
__ add(ebx, Immediate(kPointerSize));
@@ -1167,49 +1429,34 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- __ CallApiFunctionAndReturn(getter_address, kStackSpace);
-}
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
-void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSFunction> value,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
+ __ CallApiFunctionAndReturn(getter_address,
+ thunk_address,
+ ApiParameterOperand(2),
+ kStackSpace,
+ Operand(ebp, 7 * kPointerSize),
+ NULL);
+}
- // Check that the maps haven't changed.
- CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
- __ LoadHeapObject(eax, value);
+ __ LoadObject(eax, value);
__ ret(0);
}
-void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Label* miss) {
+void LoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<Name> name) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
@@ -1218,8 +1465,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo()) {
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
compile_followup_inline = callback->getter() != NULL &&
callback->IsCompatibleReceiver(*object);
}
@@ -1229,17 +1477,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
// Preserve the receiver register explicitly whenever it is different from
// the holder and it is needed should the interceptor return without any
// result. The CALLBACKS case needs the receiver to be passed into C++ code,
// the FIELD case might cause a miss during the prototype check.
bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+ bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
(lookup->type() == CALLBACKS || must_perfrom_prototype_check);
// Save necessary data before invoking an interceptor.
@@ -1248,18 +1493,18 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
- __ push(receiver);
+ __ push(receiver());
}
__ push(holder_reg);
- __ push(name_reg);
+ __ push(this->name());
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
+ receiver(),
holder_reg,
- name_reg,
+ this->name(),
interceptor_holder);
// Check if interceptor provided a value for property. If it's
@@ -1273,86 +1518,38 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// Clobber registers when generating debug-code to provoke errors.
__ bind(&interceptor_failed);
if (FLAG_debug_code) {
- __ mov(receiver, Immediate(BitCast<int32_t>(kZapValue)));
+ __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
__ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(name_reg, Immediate(BitCast<int32_t>(kZapValue)));
+ __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
}
- __ pop(name_reg);
+ __ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
- __ pop(receiver);
+ __ pop(receiver());
}
// Leave the internal frame.
}
- // Check that the maps from interceptor's holder to lookup's holder
- // haven't changed. And load lookup's holder into holder_reg.
- if (must_perfrom_prototype_check) {
- holder_reg = CheckPrototypes(interceptor_holder,
- holder_reg,
- Handle<JSObject>(lookup->holder()),
- scratch1,
- scratch2,
- scratch3,
- name,
- miss);
- }
-
- if (lookup->IsField()) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Retrieve a field from field's holder.
- GenerateFastPropertyLoad(masm(), eax, holder_reg,
- Handle<JSObject>(lookup->holder()),
- lookup->GetFieldIndex());
- __ ret(0);
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- Handle<AccessorInfo> callback(
- AccessorInfo::cast(lookup->GetCallbackObject()));
- ASSERT(callback->getter() != NULL);
-
- // Tail call to runtime.
- // Important invariant in CALLBACKS case: the code above must be
- // structured to never clobber |receiver| register.
- __ pop(scratch2); // return address
- __ push(receiver);
- __ push(holder_reg);
- __ mov(holder_reg, Immediate(callback));
- __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
- __ push(Immediate(reinterpret_cast<int>(isolate())));
- __ push(holder_reg);
- __ push(name_reg);
- __ push(scratch2); // restore return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
- Register holder_reg =
- CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name, miss);
- __ pop(scratch2); // save old return address
- PushInterceptorArguments(masm(), receiver, holder_reg,
- name_reg, interceptor_holder);
- __ push(scratch2); // restore old return address
+ __ pop(scratch2()); // save old return address
+ PushInterceptorArguments(masm(), receiver(), holder_reg,
+ this->name(), interceptor_holder);
+ __ push(scratch2()); // restore old return address
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
__ cmp(ecx, Immediate(name));
__ j(not_equal, miss);
@@ -1362,7 +1559,7 @@ void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name,
+ Handle<Name> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
@@ -1380,15 +1577,15 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Label* miss) {
// Get the value from the cell.
if (Serializer::enabled()) {
__ mov(edi, Immediate(cell));
- __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
+ __ mov(edi, FieldOperand(edi, Cell::kValueOffset));
} else {
- __ mov(edi, Operand::Cell(cell));
+ __ mov(edi, Operand::ForCell(cell));
}
// Check that the cell contains the same function.
@@ -1423,8 +1620,8 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
- Handle<String> name) {
+ PropertyIndex index,
+ Handle<Name> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1447,7 +1644,9 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
name, &miss);
- GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
+ GenerateFastPropertyLoad(
+ masm(), edi, reg, index.is_inobject(holder),
+ index.translate(holder), Representation::Tagged());
// Check that the function really is a function.
__ JumpIfSmi(edi, &miss);
@@ -1477,12 +1676,59 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
+Handle<Code> CallStubCompiler::CompileArrayCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Cell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name,
+ Code::StubType type) {
+ Label miss;
+
+ // Check that function is still array
+ const int argc = arguments().immediate();
+ GenerateNameCheck(name, &miss);
+
+ if (cell.is_null()) {
+ // Get the receiver from the stack.
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(edx, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+ name, &miss);
+ } else {
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
+ site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
+ Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
+ __ mov(eax, Immediate(argc));
+ __ mov(ebx, site_feedback_cell);
+ __ mov(edi, function);
+
+ ArrayConstructorStub stub(isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(type, name);
+}
+
+
Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1518,7 +1764,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier;
+ Label attempt_to_grow_elements, with_write_barrier, check_double;
// Get the elements array of the object.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1526,7 +1772,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &call_builtin);
+ __ j(not_equal, &check_double);
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@@ -1557,17 +1803,49 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ ret((argc + 1) * kPointerSize);
+ __ bind(&check_double);
+
+
+ // Check that the elements are in double mode.
+ __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+ Immediate(factory()->fixed_double_array_map()));
+ __ j(not_equal, &call_builtin);
+
+ // Get the array's length into eax and calculate new length.
+ __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ add(eax, Immediate(Smi::FromInt(argc)));
+
+ // Get the elements' length into ecx.
+ __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ cmp(eax, ecx);
+ __ j(greater, &call_builtin);
+
+ __ mov(ecx, Operand(esp, argc * kPointerSize));
+ __ StoreNumberToDoubleElements(
+ ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize);
+
+ // Save new length.
+ __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
+ __ ret((argc + 1) * kPointerSize);
+
__ bind(&with_write_barrier);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
+ if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(ebx, &not_fast_object, Label::kNear);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(ebx, &call_builtin);
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ Immediate(factory()->heap_number_map()));
+ __ j(equal, &call_builtin);
// edi: elements array
// edx: receiver
// ebx: map
@@ -1579,7 +1857,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&try_holey_map);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
// Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ jmp(&fast_object);
@@ -1591,7 +1871,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
edi,
&call_builtin);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
// Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ bind(&fast_object);
@@ -1695,16 +1977,17 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1777,16 +2060,17 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- ecx : function name
// -- esp[0] : return address
@@ -1821,8 +2105,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
eax,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- eax, holder, ebx, edx, edi, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ eax, holder, ebx, edx, edi, name, &miss);
Register receiver = ebx;
Register index = edi;
@@ -1860,16 +2145,17 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- ecx : function name
// -- esp[0] : return address
@@ -1904,8 +2190,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
eax,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- eax, holder, ebx, edx, edi, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ eax, holder, ebx, edx, edi, name, &miss);
Register receiver = eax;
Register index = edi;
@@ -1945,16 +2232,17 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- ecx : function name
// -- esp[0] : return address
@@ -2012,24 +2300,26 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), call_kind);
__ bind(&miss);
// ecx: function name.
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2042,7 +2332,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
return Handle<Code>::null();
}
- CpuFeatures::Scope use_sse2(SSE2);
+ CpuFeatureScope use_sse2(masm(), SSE2);
const int argc = arguments().immediate();
@@ -2081,7 +2371,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Check if the argument is a heap number and load its value into xmm0.
Label slow;
__ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
// Check if the argument is strictly positive. Note this also
// discards NaN.
@@ -2131,7 +2421,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Return a new heap number.
__ AllocateHeapNumber(eax, ebx, edx, &slow);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(2 * kPointerSize);
// Return the argument (when it's an already round heap number).
@@ -2142,24 +2432,26 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// ecx: function name.
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2202,6 +2494,8 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(eax, &not_smi);
+ // Branchless abs implementation, refer to below:
+ // http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs
// Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
// otherwise.
__ mov(ebx, eax);
@@ -2247,15 +2541,16 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// ecx: function name.
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
@@ -2263,7 +2558,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name) {
ASSERT(optimization.is_simple_api_call());
@@ -2300,12 +2595,12 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
name, depth, &miss);
// Move the return address on top of the stack.
- __ mov(eax, Operand(esp, 4 * kPointerSize));
+ __ mov(eax, Operand(esp, kFastApiCallArguments * kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), eax);
// esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
// duplicate of return address and will be overwritten.
- GenerateFastApiCall(masm(), optimization, argc);
+ GenerateFastApiCall(masm(), optimization, argc, false);
__ bind(&miss);
__ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
@@ -2318,11 +2613,11 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
}
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function,
- Handle<String> name,
- CheckType check) {
+void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Label* success) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2330,15 +2625,6 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
Label miss;
GenerateNameCheck(name, &miss);
@@ -2371,76 +2657,101 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
break;
case STRING_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- // Check that the object is a string or a symbol.
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
- __ j(above_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- eax, holder, ebx, edx, edi, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ // Check that the object is a string.
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
+ __ j(above_equal, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ eax, holder, ebx, edx, edi, name, &miss);
break;
- case NUMBER_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(edx, &fast);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- eax, holder, ebx, edx, edi, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case SYMBOL_CHECK:
+ // Check that the object is a symbol.
+ __ CmpObjectType(edx, SYMBOL_TYPE, eax);
+ __ j(not_equal, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::SYMBOL_FUNCTION_INDEX, eax, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ eax, holder, ebx, edx, edi, name, &miss);
break;
- case BOOLEAN_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a boolean.
- __ cmp(edx, factory()->true_value());
- __ j(equal, &fast);
- __ cmp(edx, factory()->false_value());
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- eax, holder, ebx, edx, edi, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case NUMBER_CHECK: {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ JumpIfSmi(edx, &fast);
+ __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
+ __ j(not_equal, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ eax, holder, ebx, edx, edi, name, &miss);
+ break;
+ }
+ case BOOLEAN_CHECK: {
+ Label fast;
+ // Check that the object is a boolean.
+ __ cmp(edx, factory()->true_value());
+ __ j(equal, &fast);
+ __ cmp(edx, factory()->false_value());
+ __ j(not_equal, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ eax, holder, ebx, edx, edi, name, &miss);
break;
+ }
}
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
+ __ jmp(success);
// Handle call cache miss.
__ bind(&miss);
GenerateMissBranch();
+}
+
+
+void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), call_kind);
+}
+
+
+Handle<Code> CallStubCompiler::CompileCallConstant(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Handle<JSFunction> function) {
+
+ if (HasCustomCallGenerator(function)) {
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<Cell>::null(),
+ function, Handle<String>::cast(name),
+ Code::CONSTANT);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
+ }
+
+ Label success;
+
+ CompileHandlerFrontend(object, holder, name, check, &success);
+ __ bind(&success);
+ CompileHandlerBackend(function);
// Return the generated code.
return GetCode(function);
@@ -2449,7 +2760,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name) {
+ Handle<Name> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2509,9 +2820,9 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<Name> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2521,7 +2832,9 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ Handle<Code> code = CompileCustomCall(
+ object, holder, cell, function, Handle<String>::cast(name),
+ Code::NORMAL);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
@@ -2567,80 +2880,47 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
}
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+
+ __ pop(scratch1()); // remove the return address
+ __ push(receiver());
+ __ Push(callback);
+ __ Push(name);
+ __ push(value());
+ __ push(scratch1()); // restore return address
- // Generate store field code. Trashes the name register.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- edx, ecx, ebx, edi,
- &miss);
- // Handle store cache miss.
- __ bind(&miss);
- __ mov(ecx, Immediate(name)); // restore name
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+ __ TailCallExternalReference(store_callback_property, 4, 1);
// Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
+ return GetCode(kind(), Code::CALLBACKS, name);
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
- // Check that the maps haven't changed, preserving the value register.
- __ push(eax);
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(receiver, edx, holder, ebx, eax, edi, name, &miss);
- __ pop(eax); // restore value
-
- // Stub never generated for non-global objects that require access checks.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-
- __ pop(ebx); // remove the return address
- __ push(edx); // receiver
- __ push(Immediate(callback)); // callback info
- __ push(ecx); // name
- __ push(eax); // value
- __ push(ebx); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
- // Handle store cache miss.
- __ bind(&miss);
- __ pop(eax);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch1(), 1, values);
// Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+ return GetCode(kind(), Code::CALLBACKS, name);
}
@@ -2668,8 +2948,9 @@ void StoreStubCompiler::GenerateStoreViaSetter(
__ push(edx);
__ push(eax);
ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2690,192 +2971,23 @@ void StoreStubCompiler::GenerateStoreViaSetter(
#define __ ACCESS_MASM(masm())
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed, preserving the name register.
- __ push(ecx);
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(receiver, edx, holder, ebx, ecx, edi, name, &miss);
- __ pop(ecx);
-
- GenerateStoreViaSetter(masm(), setter);
-
- __ bind(&miss);
- __ pop(ecx);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> receiver,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(edx, Handle<Map>(receiver->map()),
- &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(edx, ebx, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
-
- __ pop(ebx); // remove the return address
- __ push(edx); // receiver
- __ push(ecx); // name
- __ push(eax); // value
- __ push(Immediate(Smi::FromInt(strict_mode_)));
- __ push(ebx); // restore return address
+ Handle<JSObject> object,
+ Handle<Name> name) {
+ __ pop(scratch1()); // remove the return address
+ __ push(receiver());
+ __ push(this->name());
+ __ push(value());
+ __ push(Immediate(Smi::FromInt(strict_mode())));
+ __ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 4, 1);
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the global has not changed.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Handle<Map>(object->map())));
- __ j(not_equal, &miss);
-
- // Compute the cell operand to use.
- __ mov(ebx, Immediate(cell));
- Operand cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ cmp(cell_operand, factory()->the_hole_value());
- __ j(equal, &miss);
-
- // Store the value in the cell.
- __ mov(cell_operand, eax);
- // No write barrier here, because cells are always rescanned.
-
- // Return the value (register eax).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1);
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
// Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- // Generate store field code. Trashes the name register.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- edx, ecx, ebx, edi,
- &miss);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_store_field(), 1);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub =
- KeyedStoreElementStub(is_jsarray, elements_kind, grow_mode_).GetCode();
-
- __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
+ return GetCode(kind(), Code::INTERCEPTOR, name);
}
@@ -2883,116 +2995,91 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
Label miss;
- __ JumpIfSmi(edx, &miss, Label::kNear);
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- // ebx: receiver->map().
+ __ JumpIfSmi(receiver(), &miss, Label::kNear);
+ __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
for (int i = 0; i < receiver_maps->length(); ++i) {
- __ cmp(edi, receiver_maps->at(i));
+ __ cmp(scratch1(), receiver_maps->at(i));
if (transitioned_maps->at(i).is_null()) {
__ j(equal, handler_stubs->at(i));
} else {
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
- __ mov(ebx, Immediate(transitioned_maps->at(i)));
+ __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
__ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
}
__ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetICCode(
+ kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> last) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
- ASSERT(last->IsGlobalObject() || last->HasFastProperties());
-
- // Check the maps of the full prototype chain. Also check that
- // global property cells up to (but not including) the last object
- // in the prototype chain are empty.
- CheckPrototypes(object, edx, last, ebx, eax, edi, name, &miss);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<Name> name,
+ Handle<JSGlobalObject> global) {
+ Label success;
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (last->IsGlobalObject()) {
- GenerateCheckPropertyCell(
- masm(), Handle<GlobalObject>::cast(last), name, eax, &miss);
- }
+ NonexistentHandlerFrontend(object, last, name, &success, global);
+ __ bind(&success);
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ mov(eax, isolate()->factory()->undefined_value());
__ ret(0);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::NONEXISTENT, factory()->empty_string());
+ return GetCode(kind(), Code::NONEXISTENT, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- int index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
+ return registers;
+}
- GenerateLoadField(object, holder, edx, ebx, eax, edi, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
- // Return the generated code.
- return GetCode(Code::FIELD, name);
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
+ return registers;
}
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
+Register* StoreStubCompiler::registers() {
+ // receiver, name, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { edx, ecx, eax, ebx, edi, no_reg };
+ return registers;
+}
- GenerateLoadCallback(object, holder, edx, ecx, ebx, eax, edi, no_reg,
- callback, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { edx, ecx, eax, ebx, edi, no_reg };
+ return registers;
+}
+
+
+void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
+ Register name_reg,
+ Label* miss) {
+ __ cmp(name_reg, Immediate(name));
+ __ j(not_equal, miss);
+}
+
+
+void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
+ Register name_reg,
+ Label* miss) {
+ __ cmp(name_reg, Immediate(name));
+ __ j(not_equal, miss);
}
@@ -3001,21 +3088,18 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(edx);
+ __ push(receiver);
ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -3033,507 +3117,81 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(receiver, edx, holder, ebx, eax, edi, name, &miss);
-
- GenerateLoadViaGetter(masm(), getter);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> value,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadConstant(object, holder, edx, ebx, eax, edi, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // TODO(368): Compile in the whole chain: all the interceptors in
- // prototypes and ultimate answer.
- GenerateLoadInterceptor(receiver, holder, &lookup, edx, ecx, eax, ebx, edi,
- name, &miss);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
+ Handle<GlobalObject> global,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
bool is_dont_delete) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(object, edx, holder, ebx, eax, edi, name, &miss);
+ Label success, miss;
+ __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
+ HandlerFrontendHeader(
+ object, receiver(), Handle<JSObject>::cast(global), name, &miss);
// Get the value from the cell.
if (Serializer::enabled()) {
- __ mov(ebx, Immediate(cell));
- __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+ __ mov(eax, Immediate(cell));
+ __ mov(eax, FieldOperand(eax, PropertyCell::kValueOffset));
} else {
- __ mov(ebx, Operand::Cell(cell));
+ __ mov(eax, Operand::ForCell(cell));
}
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
- __ cmp(ebx, factory()->the_hole_value());
+ __ cmp(eax, factory()->the_hole_value());
__ j(equal, &miss);
} else if (FLAG_debug_code) {
- __ cmp(ebx, factory()->the_hole_value());
- __ Check(not_equal, "DontDelete cells can't contain the hole");
+ __ cmp(eax, factory()->the_hole_value());
+ __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
}
+ HandlerFrontendFooter(name, &success, &miss);
+ __ bind(&success);
+
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
- __ mov(eax, ebx);
+ // The code above already loads the result into the return register.
__ ret(0);
- __ bind(&miss);
- __ IncrementCounter(counters->named_load_global_stub_miss(), 1);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- int index) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_field(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- GenerateLoadField(receiver, holder, edx, ebx, eax, edi, index, name, &miss);
-
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_field(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::FIELD, name);
+ return GetICCode(kind(), Code::NORMAL, name);
}
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_callback(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- GenerateLoadCallback(receiver, holder, edx, ecx, ebx, eax, edi, no_reg,
- callback, name, &miss);
-
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_callback(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_constant_function(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- GenerateLoadConstant(
- receiver, holder, edx, ebx, eax, edi, value, name, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_constant_function(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_interceptor(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver, holder, &lookup, edx, ecx, eax, ebx, edi,
- name, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_interceptor(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_array_length(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- GenerateLoadArrayLength(masm(), edx, eax, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_array_length(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_string_length(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- GenerateLoadStringLength(masm(), edx, eax, ebx, &miss, true);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_string_length(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- GenerateLoadFunctionPrototype(masm(), edx, eax, ebx, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_function_prototype(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
-
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
MapHandleList* receiver_maps,
- CodeHandleList* handler_ics) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
Label miss;
- __ JumpIfSmi(edx, &miss);
- Register map_reg = ebx;
- __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
- for (int current = 0; current < receiver_count; ++current) {
- __ cmp(map_reg, receiver_maps->at(current));
- __ j(equal, handler_ics->at(current));
+ if (check == PROPERTY) {
+ GenerateNameCheck(name, this->name(), &miss);
}
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
-}
-
-
-// Specialized stub for constructing objects from functions which only have only
-// simple assignments of the form this.x = ...; in their body.
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- edi : constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_stub_call;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(ebx, factory()->undefined_value());
- __ j(not_equal, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // edi: constructor
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ JumpIfSmi(ebx, &generic_stub_call);
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ j(not_equal, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // ebx: initial map
- __ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
- __ Check(not_equal, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject on the heap by moving the new space allocation
- // top forward.
- // ebx: initial map
- ASSERT(function->has_initial_map());
- int instance_size = function->initial_map()->instance_size();
-#ifdef DEBUG
- __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ shl(ecx, kPointerSizeLog2);
- __ cmp(ecx, Immediate(instance_size));
- __ Check(equal, "Instance size of initial map changed.");
-#endif
- __ AllocateInNewSpace(instance_size, edx, ecx, no_reg,
- &generic_stub_call, NO_ALLOCATION_FLAGS);
-
- // Allocated the JSObject, now initialize the fields and add the heap tag.
- // ebx: initial map
- // edx: JSObject (untagged)
- __ mov(Operand(edx, JSObject::kMapOffset), ebx);
- __ mov(ebx, factory()->empty_fixed_array());
- __ mov(Operand(edx, JSObject::kPropertiesOffset), ebx);
- __ mov(Operand(edx, JSObject::kElementsOffset), ebx);
-
- // Push the allocated object to the stack. This is the object that will be
- // returned (after it is tagged).
- __ push(edx);
-
- // eax: argc
- // edx: JSObject (untagged)
- // Load the address of the first in-object property into edx.
- __ lea(edx, Operand(edx, JSObject::kHeaderSize));
- // Calculate the location of the first argument. The stack contains the
- // allocated object and the return address on top of the argc arguments.
- __ lea(ecx, Operand(esp, eax, times_4, 1 * kPointerSize));
-
- // Use edi for holding undefined which is used in several places below.
- __ mov(edi, factory()->undefined_value());
-
- // eax: argc
- // ecx: first argument
- // edx: first in-object property of the JSObject
- // edi: undefined
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- // Check if the argument assigned to the property is actually passed.
- // If argument is not passed the property is set to undefined,
- // otherwise find it on the stack.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ mov(ebx, edi);
- __ cmp(eax, arg_number);
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatures::Scope use_cmov(CMOV);
- __ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize));
- } else {
- Label not_passed;
- __ j(below_equal, &not_passed);
- __ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
- __ bind(&not_passed);
- }
- // Store value in the property.
- __ mov(Operand(edx, i * kPointerSize), ebx);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
- __ mov(Operand(edx, i * kPointerSize), Immediate(constant));
+ __ JumpIfSmi(receiver(), &miss);
+ Register map_reg = scratch1();
+ __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = receiver_maps->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ cmp(map_reg, map);
+ __ j(equal, handlers->at(current));
}
}
+ ASSERT(number_of_handled_maps != 0);
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ mov(Operand(edx, i * kPointerSize), edi);
- }
-
- // Move argc to ebx and retrieve and tag the JSObject to return.
- __ mov(ebx, eax);
- __ pop(eax);
- __ or_(eax, Immediate(kHeapObjectTag));
-
- // Remove caller arguments and receiver from the stack and return.
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
- __ push(ecx);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
- __ IncrementCounter(counters->constructed_objects_stub(), 1);
- __ ret(0);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(code, RelocInfo::CODE_TARGET);
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode();
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetICCode(kind(), type, name, state);
}
@@ -3573,789 +3231,15 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Handle<Code> miss_force_generic_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_force_generic_ic, RelocInfo::CODE_TARGET);
-}
-
-
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch,
- XMMRegister xmm_scratch0,
- XMMRegister xmm_scratch1,
- Label* fail) {
- // Check that key is a smi and if SSE2 is available a heap number
- // containing a smi and branch if the check fails.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- Label key_ok;
- __ JumpIfSmi(key, &key_ok);
- __ cmp(FieldOperand(key, HeapObject::kMapOffset),
- Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
- __ j(not_equal, fail);
- __ movdbl(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
- __ cvttsd2si(scratch, Operand(xmm_scratch0));
- __ cvtsi2sd(xmm_scratch1, scratch);
- __ ucomisd(xmm_scratch1, xmm_scratch0);
- __ j(not_equal, fail);
- __ j(parity_even, fail); // NaN.
- // Check if the key fits in the smi range.
- __ cmp(scratch, 0xc0000000);
- __ j(sign, fail);
- __ SmiTag(scratch);
- __ mov(key, scratch);
- __ bind(&key_ok);
- } else {
- __ JumpIfNotSmi(key, fail);
- }
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, failed_allocation, slow;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &miss_force_generic);
- __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
- // ebx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ SmiUntag(ecx); // Untag the index.
- __ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- __ SmiUntag(ecx); // Untag the index.
- __ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsx_w(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzx_w(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- __ mov(eax, Operand(ebx, ecx, times_2, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- __ fld_s(Operand(ebx, ecx, times_2, 0));
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ fld_d(Operand(ebx, ecx, times_4, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // eax: value
- // For floating-point array type:
- // FP(0): value
-
- if (elements_kind == EXTERNAL_INT_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- __ cmp(eax, 0xc0000000);
- __ j(sign, &box_int);
- } else {
- ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &box_int);
- }
-
- __ SmiTag(eax);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- __ push(eax);
- __ fild_s(Operand(esp, 0));
- __ pop(eax);
- } else {
- ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
- // Need to zero-extend the value.
- // There's no fild variant for unsigned values, so zero-extend
- // to a 64-bit int manually.
- __ push(Immediate(0));
- __ push(eax);
- __ fild_d(Operand(esp, 0));
- __ pop(eax);
- __ pop(eax);
- }
- // FP(0): value
- __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
- // Set the value.
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
- // Set the value.
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- } else {
- __ SmiTag(eax);
- __ ret(0);
- }
-
- // If we fail allocation of the HeapNumber, we still have a value on
- // top of the FPU stack. Remove it.
- __ bind(&failed_allocation);
- __ fstp(0);
- // Fall through to slow case.
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
-
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- // Miss case: Jump to runtime.
__ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
// ----------- S t a t e -------------
- // -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label miss_force_generic, slow, check_heap_number;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(ecx, FieldOperand(edi, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- __ JumpIfNotSmi(eax, &slow);
- } else {
- __ JumpIfNotSmi(eax, &check_heap_number);
- }
-
- // smi case
- __ mov(ebx, eax); // Preserve the value in eax as the return value.
- __ SmiUntag(ebx);
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // edi: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- __ ClampUint8(ebx);
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ mov_w(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ push(ebx);
- __ fild_s(Operand(esp, 0));
- __ pop(ebx);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ fstp_s(Operand(edi, ecx, times_2, 0));
- } else { // elements_kind == EXTERNAL_DOUBLE_ELEMENTS.
- __ fstp_d(Operand(edi, ecx, times_4, 0));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0); // Return the original value.
-
- // TODO(danno): handle heap number -> pixel array conversion
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- __ bind(&check_heap_number);
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(not_equal, &slow);
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // edi: base pointer of external storage
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fstp_s(Operand(edi, ecx, times_2, 0));
- __ ret(0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fstp_d(Operand(edi, ecx, times_4, 0));
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
-
- // For the moment we make the slow call to the runtime on
- // processors that don't support SSE2. The code in IntegerConvert
- // (code-stubs-ia32.cc) is roughly what is needed here though the
- // conversion failure case does not need to be handled.
- if (CpuFeatures::IsSupported(SSE2)) {
- if ((elements_kind == EXTERNAL_INT_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) &&
- CpuFeatures::IsSupported(SSE3)) {
- CpuFeatures::Scope scope(SSE3);
- // fisttp stores values as signed integers. To represent the
- // entire range of int and unsigned int arrays, store as a
- // 64-bit int and discard the high 32 bits.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ sub(esp, Immediate(2 * kPointerSize));
- __ fisttp_d(Operand(esp, 0));
-
- // If conversion failed (NaN, infinity, or a number outside
- // signed int64 range), the result is 0x8000000000000000, and
- // we must handle this case in the runtime.
- Label ok;
- __ cmp(Operand(esp, kPointerSize), Immediate(0x80000000u));
- __ j(not_equal, &ok);
- __ cmp(Operand(esp, 0), Immediate(0));
- __ j(not_equal, &ok);
- __ add(esp, Immediate(2 * kPointerSize)); // Restore the stack.
- __ jmp(&slow);
-
- __ bind(&ok);
- __ pop(ebx);
- __ add(esp, Immediate(kPointerSize));
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- } else {
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatures::Scope scope(SSE2);
- __ cvttsd2si(ebx, FieldOperand(eax, HeapNumber::kValueOffset));
- __ cmp(ebx, 0x80000000u);
- __ j(equal, &slow);
- // ebx: untagged integer value
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- __ ClampUint8(ebx);
- // Fall through.
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ mov_w(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- __ ret(0); // Return original value.
- }
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_external_array_slow(), 1);
-
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(eax);
-
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Load the result and make sure it's not the hole.
- __ mov(ebx, Operand(eax, ecx, times_2,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
- __ j(equal, &miss_force_generic);
- __ mov(eax, ebx);
- __ ret(0);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(eax);
-
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(eax, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Check for the hole
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmp(FieldOperand(eax, ecx, times_4, offset), Immediate(kHoleNanUpper32));
- __ j(equal, &miss_force_generic);
-
- // Always allocate a heap number for the result.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(xmm0, FieldOperand(eax, ecx, times_4,
- FixedDoubleArray::kHeaderSize));
- } else {
- __ fld_d(FieldOperand(eax, ecx, times_4, FixedDoubleArray::kHeaderSize));
- }
- __ AllocateHeapNumber(eax, ebx, edi, &slow_allocate_heapnumber);
- // Set the value.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(0);
-
- __ bind(&slow_allocate_heapnumber);
- // A value was pushed on the floating point stack before the allocation, if
- // the allocation fails it needs to be removed.
- if (!CpuFeatures::IsSupported(SSE2)) {
- __ fstp(0);
- }
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, grow, slow, transition_elements_kind;
- Label check_capacity, prepare_slow, finish_store, commit_backing_store;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(eax, &transition_elements_kind);
- }
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- if (is_js_array) {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
- __ j(above_equal, &miss_force_generic);
- }
-
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ j(not_equal, &miss_force_generic);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- // ecx is a smi, use times_half_pointer_size instead of
- // times_pointer_size
- __ mov(FieldOperand(edi,
- ecx,
- times_half_pointer_size,
- FixedArray::kHeaderSize), eax);
- } else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
- // Do the store and update the write barrier.
- // ecx is a smi, use times_half_pointer_size instead of
- // times_pointer_size
- __ lea(ecx, FieldOperand(edi,
- ecx,
- times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(Operand(ecx, 0), eax);
- // Make sure to preserve the value in register eax.
- __ mov(ebx, eax);
- __ RecordWrite(edi, ecx, ebx, kDontSaveFPRegs);
- }
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic_force_generic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- // Handle transition to other elements kinds without using the generic stub.
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Handle transition requiring the array to grow.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(not_equal, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
- // Restore the key, which is known to be the array length.
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ mov(FieldOperand(edi, JSObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ mov(ebx, Immediate(masm->isolate()->factory()->the_hole_value()));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ mov(FieldOperand(edi, FixedArray::SizeFor(i)), ebx);
- }
-
- // Store the element at index zero.
- __ mov(FieldOperand(edi, FixedArray::SizeFor(0)), eax);
-
- // Install the new backing store in the JSArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
- __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ ret(0);
-
- __ bind(&check_capacity);
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_cow_array_map()));
- __ j(equal, &miss_force_generic);
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ jmp(&finish_store);
-
- __ bind(&prepare_slow);
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label check_capacity, prepare_slow, finish_store, commit_backing_store;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(edi);
-
- if (is_js_array) {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
- __ j(above_equal, &miss_force_generic);
- }
-
- __ bind(&finish_store);
- __ StoreNumberToDoubleElements(eax, edi, ecx, edx, xmm0,
- &transition_elements_kind, true);
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic_force_generic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- // Handle transition to other elements kinds without using the generic stub.
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Handle transition requiring the array to grow.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(eax, &value_is_smi);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
- __ j(not_equal, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(not_equal, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
-
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Initialize the new FixedDoubleArray. Leave elements unitialized for
- // efficiency, they are guaranteed to be initialized before use.
- __ mov(FieldOperand(edi, JSObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_double_array_map()));
- __ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset),
- Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
-
- // Install the new backing store in the JSArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
- __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ jmp(&finish_store);
-
- __ bind(&check_capacity);
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmp(ecx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ jmp(&finish_store);
-
- __ bind(&prepare_slow);
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic_slow, RelocInfo::CODE_TARGET);
- }
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
}
diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h
index 49b6ef9d0..06cbf2e11 100644
--- a/deps/v8/src/ic-inl.h
+++ b/deps/v8/src/ic-inl.h
@@ -43,7 +43,7 @@ Address IC::address() const {
Address result = Assembler::target_address_from_return_address(pc());
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug* debug = Isolate::Current()->debug();
+ Debug* debug = isolate()->debug();
// First check if any break points are active if not just return the address
// of the call.
if (!debug->has_break_points()) return result;
@@ -102,34 +102,20 @@ void IC::SetTargetAtAddress(Address address, Code* target) {
InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
JSObject* holder) {
- if (object->IsJSObject()) {
- return GetCodeCacheForObject(JSObject::cast(object), holder);
- }
+ if (object->IsJSObject()) return OWN_MAP;
+
// If the object is a value, we use the prototype map for the cache.
- ASSERT(object->IsString() || object->IsNumber() || object->IsBoolean());
+ ASSERT(object->IsString() || object->IsSymbol() ||
+ object->IsNumber() || object->IsBoolean());
return PROTOTYPE_MAP;
}
-InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
- JSObject* holder) {
- // Fast-properties and global objects store stubs in their own maps.
- // Slow properties objects use prototype's map (unless the property is its own
- // when holder == object). It works because slow properties objects having
- // the same prototype (or a prototype with the same map) and not having
- // the property are interchangeable for such a stub.
- if (holder != object &&
- !object->HasFastProperties() &&
- !object->IsJSGlobalProxy() &&
- !object->IsJSGlobalObject()) {
- return PROTOTYPE_MAP;
- }
- return OWN_MAP;
-}
-
-
-JSObject* IC::GetCodeCacheHolder(Object* object, InlineCacheHolderFlag holder) {
- Object* map_owner = (holder == OWN_MAP ? object : object->GetPrototype());
+JSObject* IC::GetCodeCacheHolder(Isolate* isolate,
+ Object* object,
+ InlineCacheHolderFlag holder) {
+ Object* map_owner =
+ holder == OWN_MAP ? object : object->GetPrototype(isolate);
ASSERT(map_owner->IsJSObject());
return JSObject::cast(map_owner);
}
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index dd0bb10e1..55d7ba936 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -43,36 +43,44 @@ namespace internal {
char IC::TransitionMarkFromState(IC::State state) {
switch (state) {
case UNINITIALIZED: return '0';
- case PREMONOMORPHIC: return 'P';
+ case PREMONOMORPHIC: return '.';
case MONOMORPHIC: return '1';
case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
- case MEGAMORPHIC: return IsGeneric() ? 'G' : 'N';
+ case POLYMORPHIC: return 'P';
+ case MEGAMORPHIC: return 'N';
+ case GENERIC: return 'G';
// We never see the debugger states here, because the state is
// computed from the original code - not the patched code. Let
// these cases fall through to the unreachable code below.
- case DEBUG_BREAK: break;
- case DEBUG_PREPARE_STEP_IN: break;
+ case DEBUG_STUB: break;
}
UNREACHABLE();
return 0;
}
+
+const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) {
+ if (mode == STORE_NO_TRANSITION_HANDLE_COW) return ".COW";
+ if (mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+ return ".IGNORE_OOB";
+ }
+ if (IsGrowStoreMode(mode)) return ".GROW";
+ return "";
+}
+
+
void IC::TraceIC(const char* type,
- Handle<Object> name,
- State old_state,
- Code* new_target) {
+ Handle<Object> name) {
if (FLAG_trace_ic) {
- State new_state = StateFrom(new_target,
- HEAP->undefined_value(),
- HEAP->undefined_value());
- PrintF("[%s in ", type);
- StackFrameIterator it;
+ Code* new_target = raw_target();
+ State new_state = new_target->ic_state();
+ PrintF("[%s%s in ", new_target->is_keyed_stub() ? "Keyed" : "", type);
+ StackFrameIterator it(isolate());
while (it.frame()->fp() != this->fp()) it.Advance();
StackFrame* raw_frame = it.frame();
if (raw_frame->is_internal()) {
- Isolate* isolate = new_target->GetIsolate();
- Code* apply_builtin = isolate->builtins()->builtin(
+ Code* apply_builtin = isolate()->builtins()->builtin(
Builtins::kFunctionApply);
if (raw_frame->unchecked_code() == apply_builtin) {
PrintF("apply from ");
@@ -80,77 +88,80 @@ void IC::TraceIC(const char* type,
raw_frame = it.frame();
}
}
- JavaScriptFrame::PrintTop(stdout, false, true);
- bool new_can_grow =
- Code::GetKeyedAccessGrowMode(new_target->extra_ic_state()) ==
- ALLOW_JSARRAY_GROWTH;
+ JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+ Code::ExtraICState extra_state = new_target->extra_ic_state();
+ const char* modifier =
+ GetTransitionMarkModifier(Code::GetKeyedAccessStoreMode(extra_state));
PrintF(" (%c->%c%s)",
- TransitionMarkFromState(old_state),
+ TransitionMarkFromState(state()),
TransitionMarkFromState(new_state),
- new_can_grow ? ".GROW" : "");
+ modifier);
name->Print();
PrintF("]\n");
}
}
-#define TRACE_GENERIC_IC(type, reason) \
+#define TRACE_GENERIC_IC(isolate, type, reason) \
do { \
if (FLAG_trace_ic) { \
PrintF("[%s patching generic stub in ", type); \
- JavaScriptFrame::PrintTop(stdout, false, true); \
+ JavaScriptFrame::PrintTop(isolate, stdout, false, true); \
PrintF(" (%s)]\n", reason); \
} \
} while (false)
#else
-#define TRACE_GENERIC_IC(type, reason)
+#define TRACE_GENERIC_IC(isolate, type, reason)
#endif // DEBUG
-#define TRACE_IC(type, name, old_state, new_target) \
- ASSERT((TraceIC(type, name, old_state, new_target), true))
+#define TRACE_IC(type, name) \
+ ASSERT((TraceIC(type, name), true))
-IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
- ASSERT(isolate == Isolate::Current());
- // To improve the performance of the (much used) IC code, we unfold
- // a few levels of the stack frame iteration code. This yields a
- // ~35% speedup when running DeltaBlue with the '--nouse-ic' flag.
+IC::IC(FrameDepth depth, Isolate* isolate)
+ : isolate_(isolate),
+ target_set_(false) {
+ // To improve the performance of the (much used) IC code, we unfold a few
+ // levels of the stack frame iteration code. This yields a ~35% speedup when
+ // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
const Address entry =
Isolate::c_entry_fp(isolate->thread_local_top());
Address* pc_address =
reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
- // If there's another JavaScript frame on the stack, we need to look
- // one frame further down the stack to find the frame pointer and
- // the return address stack slot.
+ // If there's another JavaScript frame on the stack or a
+ // StubFailureTrampoline, we need to look one frame further down the stack to
+ // find the frame pointer and the return address stack slot.
if (depth == EXTRA_CALL_FRAME) {
const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
}
#ifdef DEBUG
- StackFrameIterator it;
+ StackFrameIterator it(isolate);
for (int i = 0; i < depth + 1; i++) it.Advance();
StackFrame* frame = it.frame();
ASSERT(fp == frame->fp() && pc_address == frame->pc_address());
#endif
fp_ = fp;
- pc_address_ = pc_address;
+ pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
+ target_ = handle(raw_target(), isolate);
+ state_ = target_->ic_state();
}
#ifdef ENABLE_DEBUGGER_SUPPORT
Address IC::OriginalCodeAddress() const {
- HandleScope scope;
+ HandleScope scope(isolate());
// Compute the JavaScript frame for the frame pointer of this IC
// structure. We need this to be able to find the function
// corresponding to the frame.
- StackFrameIterator it;
+ StackFrameIterator it(isolate());
while (it.frame()->fp() != this->fp()) it.Advance();
JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
// Find the function on the stack and both the active code for the
// function and the original code.
- JSFunction* function = JSFunction::cast(frame->function());
- Handle<SharedFunctionInfo> shared(function->shared());
+ JSFunction* function = frame->function();
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate());
Code* code = shared->code();
ASSERT(Debug::HasDebugInfo(shared));
Code* original_code = Debug::GetDebugInfo(shared)->original_code();
@@ -169,42 +180,130 @@ Address IC::OriginalCodeAddress() const {
#endif
-static bool HasNormalObjectsInPrototypeChain(Isolate* isolate,
- LookupResult* lookup,
- Object* receiver) {
- Object* end = lookup->IsProperty()
- ? lookup->holder() : Object::cast(isolate->heap()->null_value());
- for (Object* current = receiver;
- current != end;
- current = current->GetPrototype()) {
- if (current->IsJSObject() &&
- !JSObject::cast(current)->HasFastProperties() &&
- !current->IsJSGlobalProxy() &&
- !current->IsJSGlobalObject()) {
- return true;
+static bool HasInterceptorGetter(JSObject* object) {
+ return !object->GetNamedInterceptor()->getter()->IsUndefined();
+}
+
+
+static bool HasInterceptorSetter(JSObject* object) {
+ return !object->GetNamedInterceptor()->setter()->IsUndefined();
+}
+
+
+static void LookupForRead(Handle<Object> object,
+ Handle<String> name,
+ LookupResult* lookup) {
+ // Skip all the objects with named interceptors, but
+ // without actual getter.
+ while (true) {
+ object->Lookup(*name, lookup);
+ // Besides normal conditions (property not found or it's not
+ // an interceptor), bail out if lookup is not cacheable: we won't
+ // be able to IC it anyway and regular lookup should work fine.
+ if (!lookup->IsInterceptor() || !lookup->IsCacheable()) {
+ return;
+ }
+
+ Handle<JSObject> holder(lookup->holder(), lookup->isolate());
+ if (HasInterceptorGetter(*holder)) {
+ return;
+ }
+
+ holder->LocalLookupRealNamedProperty(*name, lookup);
+ if (lookup->IsFound()) {
+ ASSERT(!lookup->IsInterceptor());
+ return;
+ }
+
+ Handle<Object> proto(holder->GetPrototype(), lookup->isolate());
+ if (proto->IsNull()) {
+ ASSERT(!lookup->IsFound());
+ return;
}
+
+ object = proto;
}
+}
+
+
+bool CallIC::TryUpdateExtraICState(LookupResult* lookup,
+ Handle<Object> object) {
+ if (!lookup->IsConstantFunction()) return false;
+ JSFunction* function = lookup->GetConstantFunction();
+ if (!function->shared()->HasBuiltinFunctionId()) return false;
+ // Fetch the arguments passed to the called function.
+ const int argc = target()->arguments_count();
+ Address entry = isolate()->c_entry_fp(isolate()->thread_local_top());
+ Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
+ Arguments args(argc + 1,
+ &Memory::Object_at(fp +
+ StandardFrameConstants::kCallerSPOffset +
+ argc * kPointerSize));
+ switch (function->shared()->builtin_function_id()) {
+ case kStringCharCodeAt:
+ case kStringCharAt:
+ if (object->IsString()) {
+ String* string = String::cast(*object);
+ // Check there's the right string value or wrapper in the receiver slot.
+ ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
+ // If we're in the default (fastest) state and the index is
+ // out of bounds, update the state to record this fact.
+ if (StringStubState::decode(extra_ic_state()) == DEFAULT_STRING_STUB &&
+ argc >= 1 && args[1]->IsNumber()) {
+ double index = DoubleToInteger(args.number_at(1));
+ if (index < 0 || index >= string->length()) {
+ extra_ic_state_ =
+ StringStubState::update(extra_ic_state(),
+ STRING_INDEX_OUT_OF_BOUNDS);
+ return true;
+ }
+ }
+ }
+ break;
+ default:
+ return false;
+ }
return false;
}
-static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
- Object* receiver,
- Object* name) {
+bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
+ Handle<String> name) {
+ DisallowHeapAllocation no_gc;
+
+ if (target()->is_call_stub()) {
+ LookupResult lookup(isolate());
+ LookupForRead(receiver, name, &lookup);
+ if (static_cast<CallIC*>(this)->TryUpdateExtraICState(&lookup, receiver)) {
+ return true;
+ }
+ }
+
+ if (target()->is_keyed_stub()) {
+ // Determine whether the failure is due to a name failure.
+ if (!name->IsName()) return false;
+ Name* stub_name = target()->FindFirstName();
+ if (*name != stub_name) return false;
+ }
+
InlineCacheHolderFlag cache_holder =
- Code::ExtractCacheHolderFromFlags(target->flags());
+ Code::ExtractCacheHolderFromFlags(target()->flags());
- if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
- // The stub was generated for JSObject but called for non-JSObject.
- // IC::GetCodeCacheHolder is not applicable.
- return false;
- } else if (cache_holder == PROTOTYPE_MAP &&
- receiver->GetPrototype()->IsNull()) {
- // IC::GetCodeCacheHolder is not applicable.
- return false;
+ switch (cache_holder) {
+ case OWN_MAP:
+ // The stub was generated for JSObject but called for non-JSObject.
+ // IC::GetCodeCacheHolder is not applicable.
+ if (!receiver->IsJSObject()) return false;
+ break;
+ case PROTOTYPE_MAP:
+ // IC::GetCodeCacheHolder is not applicable.
+ if (receiver->GetPrototype(isolate())->IsNull()) return false;
+ break;
}
- Map* map = IC::GetCodeCacheHolder(receiver, cache_holder)->map();
+
+ Handle<Map> map(
+ IC::GetCodeCacheHolder(isolate(), *receiver, cache_holder)->map());
// Decide whether the inline cache failed because of changes to the
// receiver itself or changes to one of its prototypes.
@@ -214,40 +313,79 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
// the receiver map's code cache. Therefore, if the current target
// is in the receiver map's code cache, the inline cache failed due
// to prototype check failure.
- int index = map->IndexInCodeCache(name, target);
+ int index = map->IndexInCodeCache(*name, *target());
if (index >= 0) {
- map->RemoveFromCodeCache(String::cast(name), target, index);
+ map->RemoveFromCodeCache(*name, *target(), index);
+ // Handlers are stored in addition to the ICs on the map. Remove those, too.
+ TryRemoveInvalidHandlers(map, name);
return true;
}
+ // The stub is not in the cache. We've ruled out all other kinds of failure
+ // except for proptotype chain changes, a deprecated map, a map that's
+ // different from the one that the stub expects, elements kind changes, or a
+ // constant global property that will become mutable. Threat all those
+ // situations as prototype failures (stay monomorphic if possible).
+
+ // If the IC is shared between multiple receivers (slow dictionary mode), then
+ // the map cannot be deprecated and the stub invalidated.
+ if (cache_holder == OWN_MAP) {
+ Map* old_map = target()->FindFirstMap();
+ if (old_map == *map) return true;
+ if (old_map != NULL) {
+ if (old_map->is_deprecated()) return true;
+ if (IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
+ map->elements_kind())) {
+ return true;
+ }
+ }
+ }
+
+ if (receiver->IsGlobalObject()) {
+ LookupResult lookup(isolate());
+ GlobalObject* global = GlobalObject::cast(*receiver);
+ global->LocalLookupRealNamedProperty(*name, &lookup);
+ if (!lookup.IsFound()) return false;
+ PropertyCell* cell = global->GetPropertyCell(&lookup);
+ return cell->type()->IsConstant();
+ }
+
return false;
}
-IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
- IC::State state = target->ic_state();
+void IC::TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name) {
+ CodeHandleList handlers;
+ target()->FindHandlers(&handlers);
+ for (int i = 0; i < handlers.length(); i++) {
+ Handle<Code> handler = handlers.at(i);
+ int index = map->IndexInCodeCache(*name, *handler);
+ if (index >= 0) {
+ map->RemoveFromCodeCache(*name, *handler, index);
+ return;
+ }
+ }
+}
- if (state != MONOMORPHIC || !name->IsString()) return state;
- if (receiver->IsUndefined() || receiver->IsNull()) return state;
- // For keyed load/store/call, the most likely cause of cache failure is
- // that the key has changed. We do not distinguish between
- // prototype and non-prototype failures for keyed access.
- Code::Kind kind = target->kind();
- if (kind == Code::KEYED_LOAD_IC ||
- kind == Code::KEYED_STORE_IC ||
- kind == Code::KEYED_CALL_IC) {
- return MONOMORPHIC;
+void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
+ if (!name->IsString()) return;
+ if (state() != MONOMORPHIC) {
+ if (state() == POLYMORPHIC && receiver->IsHeapObject()) {
+ TryRemoveInvalidHandlers(
+ handle(Handle<HeapObject>::cast(receiver)->map()),
+ Handle<String>::cast(name));
+ }
+ return;
}
+ if (receiver->IsUndefined() || receiver->IsNull()) return;
// Remove the target from the code cache if it became invalid
// because of changes in the prototype chain to avoid hitting it
// again.
- // Call stubs handle this later to allow extra IC state
- // transitions.
- if (kind != Code::CALL_IC &&
- TryRemoveInvalidPrototypeDependentStub(target, receiver, name)) {
- return MONOMORPHIC_PROTOTYPE_FAILURE;
+ if (TryRemoveInvalidPrototypeDependentStub(
+ receiver, Handle<String>::cast(name))) {
+ return MarkMonomorphicPrototypeFailure();
}
// The builtins object is special. It only changes when JavaScript
@@ -256,24 +394,20 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
// an inline cache miss for the builtins object after lazily loading
// JavaScript builtins, we return uninitialized as the state to
// force the inline cache back to monomorphic state.
- if (receiver->IsJSBuiltinsObject()) {
- return UNINITIALIZED;
- }
-
- return MONOMORPHIC;
+ if (receiver->IsJSBuiltinsObject()) state_ = UNINITIALIZED;
}
RelocInfo::Mode IC::ComputeMode() {
Address addr = address();
- Code* code = Code::cast(isolate()->heap()->FindCodeObject(addr));
+ Code* code = Code::cast(isolate()->FindCodeObject(addr));
for (RelocIterator it(code, RelocInfo::kCodeTargetMask);
!it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (info->pc() == addr) return info->rmode();
}
UNREACHABLE();
- return RelocInfo::NONE;
+ return RelocInfo::NONE32;
}
@@ -310,7 +444,8 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
if (FLAG_type_info_threshold == 0 && !FLAG_watch_ic_patching) {
return;
}
- Code* host = target->GetHeap()->isolate()->
+ Isolate* isolate = target->GetHeap()->isolate();
+ Code* host = isolate->
inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
if (host->kind() != Code::FUNCTION) return;
@@ -333,7 +468,7 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
}
if (FLAG_watch_ic_patching) {
host->set_profiler_ticks(0);
- Isolate::Current()->runtime_profiler()->NotifyICChanged();
+ isolate->runtime_profiler()->NotifyICChanged();
}
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
@@ -341,23 +476,23 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
}
-void IC::Clear(Address address) {
+void IC::Clear(Isolate* isolate, Address address) {
Code* target = GetTargetAtAddress(address);
// Don't clear debug break inline cache as it will remove the break point.
- if (target->ic_state() == DEBUG_BREAK) return;
+ if (target->is_debug_stub()) return;
switch (target->kind()) {
- case Code::LOAD_IC: return LoadIC::Clear(address, target);
+ case Code::LOAD_IC: return LoadIC::Clear(isolate, address, target);
case Code::KEYED_LOAD_IC:
- return KeyedLoadIC::Clear(address, target);
- case Code::STORE_IC: return StoreIC::Clear(address, target);
+ return KeyedLoadIC::Clear(isolate, address, target);
+ case Code::STORE_IC: return StoreIC::Clear(isolate, address, target);
case Code::KEYED_STORE_IC:
- return KeyedStoreIC::Clear(address, target);
+ return KeyedStoreIC::Clear(isolate, address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
- case Code::COMPARE_IC: return CompareIC::Clear(address, target);
- case Code::UNARY_OP_IC:
+ case Code::COMPARE_IC: return CompareIC::Clear(isolate, address, target);
+ case Code::COMPARE_NIL_IC: return CompareNilIC::Clear(address, target);
case Code::BINARY_OP_IC:
case Code::TO_BOOLEAN_IC:
// Clearing these is tricky and does not
@@ -369,10 +504,10 @@ void IC::Clear(Address address) {
void CallICBase::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
+ if (IsCleared(target)) return;
bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
Code* code =
- Isolate::Current()->stub_cache()->FindCallInitialize(
+ target->GetIsolate()->stub_cache()->FindCallInitialize(
target->arguments_count(),
contextual ? RelocInfo::CODE_TARGET_CONTEXT : RelocInfo::CODE_TARGET,
target->kind());
@@ -380,99 +515,58 @@ void CallICBase::Clear(Address address, Code* target) {
}
-void KeyedLoadIC::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
+void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target) {
+ if (IsCleared(target)) return;
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
- SetTargetAtAddress(address, initialize_stub());
+ SetTargetAtAddress(address, *pre_monomorphic_stub(isolate));
}
-void LoadIC::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
- SetTargetAtAddress(address, initialize_stub());
+void LoadIC::Clear(Isolate* isolate, Address address, Code* target) {
+ if (IsCleared(target)) return;
+ SetTargetAtAddress(address, *pre_monomorphic_stub(isolate));
}
-void StoreIC::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
+void StoreIC::Clear(Isolate* isolate, Address address, Code* target) {
+ if (IsCleared(target)) return;
SetTargetAtAddress(address,
- (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
- ? initialize_stub_strict()
- : initialize_stub());
+ *pre_monomorphic_stub(
+ isolate, Code::GetStrictMode(target->extra_ic_state())));
}
-void KeyedStoreIC::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
+void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target) {
+ if (IsCleared(target)) return;
SetTargetAtAddress(address,
- (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
- ? initialize_stub_strict()
- : initialize_stub());
+ *pre_monomorphic_stub(
+ isolate, Code::GetStrictMode(target->extra_ic_state())));
}
-void CompareIC::Clear(Address address, Code* target) {
- // Only clear ICCompareStubs, we currently cannot clear generic CompareStubs.
- if (target->major_key() != CodeStub::CompareIC) return;
+void CompareIC::Clear(Isolate* isolate, Address address, Code* target) {
+ ASSERT(target->major_key() == CodeStub::CompareIC);
+ CompareIC::State handler_state;
+ Token::Value op;
+ ICCompareStub::DecodeMinorKey(target->stub_info(), NULL, NULL,
+ &handler_state, &op);
// Only clear CompareICs that can retain objects.
- if (target->compare_state() != KNOWN_OBJECTS) return;
- Token::Value op = CompareIC::ComputeOperation(target);
- SetTargetAtAddress(address, GetRawUninitialized(op));
+ if (handler_state != KNOWN_OBJECT) return;
+ SetTargetAtAddress(address, GetRawUninitialized(isolate, op));
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
-static bool HasInterceptorGetter(JSObject* object) {
- return !object->GetNamedInterceptor()->getter()->IsUndefined();
-}
-
-
-static void LookupForRead(Handle<Object> object,
- Handle<String> name,
- LookupResult* lookup) {
- // Skip all the objects with named interceptors, but
- // without actual getter.
- while (true) {
- object->Lookup(*name, lookup);
- // Besides normal conditions (property not found or it's not
- // an interceptor), bail out if lookup is not cacheable: we won't
- // be able to IC it anyway and regular lookup should work fine.
- if (!lookup->IsInterceptor() || !lookup->IsCacheable()) {
- return;
- }
-
- Handle<JSObject> holder(lookup->holder());
- if (HasInterceptorGetter(*holder)) {
- return;
- }
-
- holder->LocalLookupRealNamedProperty(*name, lookup);
- if (lookup->IsFound()) {
- ASSERT(!lookup->IsInterceptor());
- return;
- }
-
- Handle<Object> proto(holder->GetPrototype());
- if (proto->IsNull()) {
- ASSERT(!lookup->IsFound());
- return;
- }
-
- object = proto;
- }
-}
-
-
Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
- Handle<Object> delegate = Execution::GetFunctionDelegate(object);
+ Handle<Object> delegate = Execution::GetFunctionDelegate(isolate(), object);
if (delegate->IsJSFunction() && !object->IsJSFunctionProxy()) {
// Patch the receiver and use the delegate as the function to
// invoke. This is used for invoking objects as if they were functions.
const int argc = target()->arguments_count();
- StackFrameLocator locator;
+ StackFrameLocator locator(isolate());
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
int index = frame->ComputeExpressionsCount() - (argc + 1);
frame->SetExpression(index, *object);
@@ -485,7 +579,8 @@ Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
Handle<Object> object) {
while (callee->IsJSFunctionProxy()) {
- callee = Handle<Object>(JSFunctionProxy::cast(*callee)->call_trap());
+ callee = Handle<Object>(JSFunctionProxy::cast(*callee)->call_trap(),
+ isolate());
}
if (callee->IsJSFunction()) {
@@ -500,7 +595,7 @@ void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
// Change the receiver to the result of calling ToObject on it.
const int argc = this->target()->arguments_count();
- StackFrameLocator locator;
+ StackFrameLocator locator(isolate());
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
int index = frame->ComputeExpressionsCount() - (argc + 1);
frame->SetExpression(index, *isolate()->factory()->ToObject(object));
@@ -508,10 +603,19 @@ void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
}
-MaybeObject* CallICBase::LoadFunction(State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
+static bool MigrateDeprecated(Handle<Object> object) {
+ if (!object->IsJSObject()) return false;
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (!receiver->map()->is_deprecated()) return false;
+ JSObject::MigrateInstance(Handle<JSObject>::cast(object));
+ return true;
+}
+
+
+MaybeObject* CallICBase::LoadFunction(Handle<Object> object,
Handle<String> name) {
+ bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
+
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
@@ -522,7 +626,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
// the element if so.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- Handle<Object> result = Object::GetElement(object, index);
+ Handle<Object> result = Object::GetElement(isolate(), object, index);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
if (result->IsJSFunction()) return *result;
@@ -540,15 +644,13 @@ MaybeObject* CallICBase::LoadFunction(State state,
if (!lookup.IsFound()) {
// If the object does not have the requested property, check which
// exception we need to throw.
- return IsContextual(object)
+ return IsUndeclaredGlobal(object)
? ReferenceError("not_defined", name)
: TypeError("undefined_method", object, name);
}
// Lookup is valid: Update inline cache and stub cache.
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, extra_ic_state, object, name);
- }
+ if (use_ic) UpdateCaches(&lookup, object, name);
// Get the property.
PropertyAttributes attr;
@@ -559,7 +661,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
if (lookup.IsInterceptor() && attr == ABSENT) {
// If the object does not have the requested property, check which
// exception we need to throw.
- return IsContextual(object)
+ return IsUndeclaredGlobal(object)
? ReferenceError("not_defined", name)
: TypeError("undefined_method", object, name);
}
@@ -593,70 +695,25 @@ MaybeObject* CallICBase::LoadFunction(State state,
}
-bool CallICBase::TryUpdateExtraICState(LookupResult* lookup,
- Handle<Object> object,
- Code::ExtraICState* extra_ic_state) {
- ASSERT(kind_ == Code::CALL_IC);
- if (lookup->type() != CONSTANT_FUNCTION) return false;
- JSFunction* function = lookup->GetConstantFunction();
- if (!function->shared()->HasBuiltinFunctionId()) return false;
-
- // Fetch the arguments passed to the called function.
- const int argc = target()->arguments_count();
- Address entry = isolate()->c_entry_fp(isolate()->thread_local_top());
- Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
- Arguments args(argc + 1,
- &Memory::Object_at(fp +
- StandardFrameConstants::kCallerSPOffset +
- argc * kPointerSize));
- switch (function->shared()->builtin_function_id()) {
- case kStringCharCodeAt:
- case kStringCharAt:
- if (object->IsString()) {
- String* string = String::cast(*object);
- // Check there's the right string value or wrapper in the receiver slot.
- ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
- // If we're in the default (fastest) state and the index is
- // out of bounds, update the state to record this fact.
- if (StringStubState::decode(*extra_ic_state) == DEFAULT_STRING_STUB &&
- argc >= 1 && args[1]->IsNumber()) {
- double index = DoubleToInteger(args.number_at(1));
- if (index < 0 || index >= string->length()) {
- *extra_ic_state =
- StringStubState::update(*extra_ic_state,
- STRING_INDEX_OUT_OF_BOUNDS);
- return true;
- }
- }
- }
- break;
- default:
- return false;
- }
- return false;
-}
-
-
Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_state,
Handle<Object> object,
Handle<String> name) {
int argc = target()->arguments_count();
- Handle<JSObject> holder(lookup->holder());
+ Handle<JSObject> holder(lookup->holder(), isolate());
switch (lookup->type()) {
case FIELD: {
- int index = lookup->GetFieldIndex();
+ PropertyIndex index = lookup->GetFieldIndex();
return isolate()->stub_cache()->ComputeCallField(
- argc, kind_, extra_state, name, object, holder, index);
+ argc, kind_, extra_ic_state(), name, object, holder, index);
}
- case CONSTANT_FUNCTION: {
+ case CONSTANT: {
+ if (!lookup->IsConstantFunction()) return Handle<Code>::null();
// Get the constant function and compute the code stub for this
// call; used for rewriting to monomorphic state and making sure
// that the code stub is in the stub cache.
- Handle<JSFunction> function(lookup->GetConstantFunction());
+ Handle<JSFunction> function(lookup->GetConstantFunction(), isolate());
return isolate()->stub_cache()->ComputeCallConstant(
- argc, kind_, extra_state, name, object, holder, function);
+ argc, kind_, extra_ic_state(), name, object, holder, function);
}
case NORMAL: {
// If we return a null handle, the IC will not be patched.
@@ -665,11 +722,13 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
if (holder->IsGlobalObject()) {
Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
+ Handle<PropertyCell> cell(
+ global->GetPropertyCell(lookup), isolate());
if (!cell->value()->IsJSFunction()) return Handle<Code>::null();
Handle<JSFunction> function(JSFunction::cast(cell->value()));
return isolate()->stub_cache()->ComputeCallGlobal(
- argc, kind_, extra_state, name, receiver, global, cell, function);
+ argc, kind_, extra_ic_state(), name,
+ receiver, global, cell, function);
} else {
// There is only one shared stub for calling normalized
// properties. It does not traverse the prototype chain, so the
@@ -677,125 +736,90 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
// applicable.
if (!holder.is_identical_to(receiver)) return Handle<Code>::null();
return isolate()->stub_cache()->ComputeCallNormal(
- argc, kind_, extra_state);
+ argc, kind_, extra_ic_state());
}
break;
}
case INTERCEPTOR:
ASSERT(HasInterceptorGetter(*holder));
return isolate()->stub_cache()->ComputeCallInterceptor(
- argc, kind_, extra_state, name, object, holder);
+ argc, kind_, extra_ic_state(), name, object, holder);
default:
return Handle<Code>::null();
}
}
+Handle<Code> CallICBase::megamorphic_stub() {
+ return isolate()->stub_cache()->ComputeCallMegamorphic(
+ target()->arguments_count(), kind_, extra_ic_state());
+}
+
+
+Handle<Code> CallICBase::pre_monomorphic_stub() {
+ return isolate()->stub_cache()->ComputeCallPreMonomorphic(
+ target()->arguments_count(), kind_, extra_ic_state());
+}
+
+
void CallICBase::UpdateCaches(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
// Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
- if (lookup->holder() != *object &&
- HasNormalObjectsInPrototypeChain(
- isolate(), lookup, object->GetPrototype())) {
- // Suppress optimization for prototype chains with slow properties objects
- // in the middle.
- return;
- }
-
// Compute the number of arguments.
- int argc = target()->arguments_count();
- bool had_proto_failure = false;
Handle<Code> code;
- if (state == UNINITIALIZED) {
- // This is the first time we execute this inline cache.
- // Set the target to the pre monomorphic stub to delay
- // setting the monomorphic state.
- code = isolate()->stub_cache()->ComputeCallPreMonomorphic(
- argc, kind_, extra_ic_state);
- } else if (state == MONOMORPHIC) {
- if (kind_ == Code::CALL_IC &&
- TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
- code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
- object, name);
- } else if (kind_ == Code::CALL_IC &&
- TryRemoveInvalidPrototypeDependentStub(target(),
- *object,
- *name)) {
- had_proto_failure = true;
- code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
- object, name);
- } else {
- code = isolate()->stub_cache()->ComputeCallMegamorphic(
- argc, kind_, extra_ic_state);
- }
- } else {
- code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
- object, name);
- }
+ code = state() == UNINITIALIZED
+ ? pre_monomorphic_stub()
+ : ComputeMonomorphicStub(lookup, object, name);
// If there's no appropriate stub we simply avoid updating the caches.
+ // TODO(verwaest): Install a slow fallback in this case to avoid not learning,
+ // and deopting Crankshaft code.
if (code.is_null()) return;
- // Patch the call site depending on the state of the cache.
- if (state == UNINITIALIZED ||
- state == PREMONOMORPHIC ||
- state == MONOMORPHIC ||
- state == MONOMORPHIC_PROTOTYPE_FAILURE) {
- set_target(*code);
- } else if (state == MEGAMORPHIC) {
- // Cache code holding map should be consistent with
- // GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
- Handle<JSObject> cache_object = object->IsJSObject()
- ? Handle<JSObject>::cast(object)
- : Handle<JSObject>(JSObject::cast(object->GetPrototype()));
- // Update the stub cache.
- isolate()->stub_cache()->Set(*name, cache_object->map(), *code);
- }
+ Handle<JSObject> cache_object = object->IsJSObject()
+ ? Handle<JSObject>::cast(object)
+ : Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate())),
+ isolate());
- if (had_proto_failure) state = MONOMORPHIC_PROTOTYPE_FAILURE;
- TRACE_IC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
- name, state, target());
+ PatchCache(cache_object, name, code);
+ TRACE_IC("CallIC", name);
}
-MaybeObject* KeyedCallIC::LoadFunction(State state,
- Handle<Object> object,
+MaybeObject* KeyedCallIC::LoadFunction(Handle<Object> object,
Handle<Object> key) {
- if (key->IsSymbol()) {
- return CallICBase::LoadFunction(state,
- Code::kNoExtraICState,
- object,
- Handle<String>::cast(key));
+ if (key->IsInternalizedString()) {
+ return CallICBase::LoadFunction(object, Handle<String>::cast(key));
}
if (object->IsUndefined() || object->IsNull()) {
return TypeError("non_object_property_call", object, key);
}
- if (FLAG_use_ic && state != MEGAMORPHIC && object->IsHeapObject()) {
+ bool use_ic = MigrateDeprecated(object)
+ ? false : FLAG_use_ic && !object->IsAccessCheckNeeded();
+
+ if (use_ic && state() != MEGAMORPHIC) {
+ ASSERT(!object->IsJSGlobalProxy());
int argc = target()->arguments_count();
- Handle<Map> map =
- isolate()->factory()->non_strict_arguments_elements_map();
- if (object->IsJSObject() &&
- Handle<JSObject>::cast(object)->elements()->map() == *map) {
- Handle<Code> code = isolate()->stub_cache()->ComputeCallArguments(
- argc, Code::KEYED_CALL_IC);
- set_target(*code);
- TRACE_IC("KeyedCallIC", key, state, target());
- } else if (!object->IsAccessCheckNeeded()) {
- Handle<Code> code = isolate()->stub_cache()->ComputeCallMegamorphic(
- argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
- set_target(*code);
- TRACE_IC("KeyedCallIC", key, state, target());
+ Handle<Code> stub = isolate()->stub_cache()->ComputeCallMegamorphic(
+ argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->elements()->map() ==
+ isolate()->heap()->non_strict_arguments_elements_map()) {
+ stub = isolate()->stub_cache()->ComputeCallArguments(argc);
+ }
}
+ ASSERT(!stub.is_null());
+ set_target(*stub);
+ TRACE_IC("CallIC", key);
}
- Handle<Object> result = GetProperty(object, key);
+ Handle<Object> result = GetProperty(isolate(), object, key);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
// Make receiver an object if the callee requires it. Strict mode or builtin
@@ -811,8 +835,7 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
}
-MaybeObject* LoadIC::Load(State state,
- Handle<Object> object,
+MaybeObject* LoadIC::Load(Handle<Object> object,
Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
@@ -825,63 +848,41 @@ MaybeObject* LoadIC::Load(State state,
// string wrapper objects. The length property of string wrapper
// objects is read-only and therefore always returns the length of
// the underlying string value. See ECMA-262 15.5.5.1.
- if ((object->IsString() || object->IsStringWrapper()) &&
- name->Equals(isolate()->heap()->length_symbol())) {
+ if (object->IsStringWrapper() &&
+ name->Equals(isolate()->heap()->length_string())) {
Handle<Code> stub;
- if (state == UNINITIALIZED) {
+ if (state() == UNINITIALIZED) {
stub = pre_monomorphic_stub();
- } else if (state == PREMONOMORPHIC) {
- stub = object->IsString()
- ? isolate()->builtins()->LoadIC_StringLength()
- : isolate()->builtins()->LoadIC_StringWrapperLength();
- } else if (state == MONOMORPHIC && object->IsStringWrapper()) {
- stub = isolate()->builtins()->LoadIC_StringWrapperLength();
- } else if (state != MEGAMORPHIC) {
+ } else if (state() == PREMONOMORPHIC || state() == MONOMORPHIC) {
+ StringLengthStub string_length_stub(kind());
+ stub = string_length_stub.GetCode(isolate());
+ } else if (state() != MEGAMORPHIC) {
+ ASSERT(state() != GENERIC);
stub = megamorphic_stub();
}
if (!stub.is_null()) {
set_target(*stub);
#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#length /stringwrapper]\n");
#endif
}
// Get the string if we have a string wrapper object.
- Handle<Object> string = object->IsJSValue()
- ? Handle<Object>(Handle<JSValue>::cast(object)->value())
- : object;
- return Smi::FromInt(String::cast(*string)->length());
- }
-
- // Use specialized code for getting the length of arrays.
- if (object->IsJSArray() &&
- name->Equals(isolate()->heap()->length_symbol())) {
- Handle<Code> stub;
- if (state == UNINITIALIZED) {
- stub = pre_monomorphic_stub();
- } else if (state == PREMONOMORPHIC) {
- stub = isolate()->builtins()->LoadIC_ArrayLength();
- } else if (state != MEGAMORPHIC) {
- stub = megamorphic_stub();
- }
- if (!stub.is_null()) {
- set_target(*stub);
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
-#endif
- }
- return JSArray::cast(*object)->length();
+ String* string = String::cast(JSValue::cast(*object)->value());
+ return Smi::FromInt(string->length());
}
// Use specialized code for getting prototype of functions.
if (object->IsJSFunction() &&
- name->Equals(isolate()->heap()->prototype_symbol()) &&
+ name->Equals(isolate()->heap()->prototype_string()) &&
Handle<JSFunction>::cast(object)->should_have_prototype()) {
Handle<Code> stub;
- if (state == UNINITIALIZED) {
+ if (state() == UNINITIALIZED) {
stub = pre_monomorphic_stub();
- } else if (state == PREMONOMORPHIC) {
- stub = isolate()->builtins()->LoadIC_FunctionPrototype();
- } else if (state != MEGAMORPHIC) {
+ } else if (state() == PREMONOMORPHIC) {
+ FunctionPrototypeStub function_prototype_stub(kind());
+ stub = function_prototype_stub.GetCode(isolate());
+ } else if (state() != MEGAMORPHIC) {
+ ASSERT(state() != GENERIC);
stub = megamorphic_stub();
}
if (!stub.is_null()) {
@@ -890,14 +891,20 @@ MaybeObject* LoadIC::Load(State state,
if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
#endif
}
- return Accessors::FunctionGetPrototype(*object, 0);
+ return *Accessors::FunctionGetPrototype(Handle<JSFunction>::cast(object));
}
}
// Check if the name is trivially convertible to an index and get
- // the element if so.
+ // the element or char if so.
uint32_t index;
- if (name->AsArrayIndex(&index)) return object->GetElement(index);
+ if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
+ // Rewrite to the generic keyed load stub.
+ if (FLAG_use_ic) set_target(*generic_stub());
+ return Runtime::GetElementOrCharAtOrFail(isolate(), object, index);
+ }
+
+ bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
// Named lookup in the object.
LookupResult lookup(isolate());
@@ -905,306 +912,474 @@ MaybeObject* LoadIC::Load(State state,
// If we did not find a property, check if we need to throw an exception.
if (!lookup.IsFound()) {
- if (IsContextual(object)) {
+ if (IsUndeclaredGlobal(object)) {
return ReferenceError("not_defined", name);
}
LOG(isolate(), SuspectReadEvent(*name, *object));
}
// Update inline cache and stub cache.
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, object, name);
- }
+ if (use_ic) UpdateCaches(&lookup, object, name);
PropertyAttributes attr;
- if (lookup.IsInterceptor() || lookup.IsHandler()) {
- // Get the property.
- Handle<Object> result =
- Object::GetProperty(object, object, &lookup, name, &attr);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- // If the property is not present, check if we need to throw an
- // exception.
- if (attr == ABSENT && IsContextual(object)) {
- return ReferenceError("not_defined", name);
+ // Get the property.
+ Handle<Object> result =
+ Object::GetProperty(object, object, &lookup, name, &attr);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ // If the property is not present, check if we need to throw an
+ // exception.
+ if ((lookup.IsInterceptor() || lookup.IsHandler()) &&
+ attr == ABSENT && IsUndeclaredGlobal(object)) {
+ return ReferenceError("not_defined", name);
+ }
+ return *result;
+}
+
+
+static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
+ Handle<Map> new_receiver_map) {
+ ASSERT(!new_receiver_map.is_null());
+ for (int current = 0; current < receiver_maps->length(); ++current) {
+ if (!receiver_maps->at(current).is_null() &&
+ receiver_maps->at(current).is_identical_to(new_receiver_map)) {
+ return false;
}
- return *result;
}
+ receiver_maps->Add(new_receiver_map);
+ return true;
+}
- // Get the property.
- return object->GetProperty(*object, &lookup, *name, &attr);
+
+bool IC::UpdatePolymorphicIC(Handle<HeapObject> receiver,
+ Handle<String> name,
+ Handle<Code> code) {
+ if (!code->is_handler()) return false;
+
+ MapHandleList receiver_maps;
+ CodeHandleList handlers;
+
+ int number_of_valid_maps;
+ int handler_to_overwrite = -1;
+ Handle<Map> new_receiver_map(receiver->map());
+ {
+ DisallowHeapAllocation no_gc;
+ target()->FindAllMaps(&receiver_maps);
+ int number_of_maps = receiver_maps.length();
+ number_of_valid_maps = number_of_maps;
+
+ for (int i = 0; i < number_of_maps; i++) {
+ Handle<Map> map = receiver_maps.at(i);
+ // Filter out deprecated maps to ensure its instances get migrated.
+ if (map->is_deprecated()) {
+ number_of_valid_maps--;
+ // If the receiver map is already in the polymorphic IC, this indicates
+ // there was a prototoype chain failure. In that case, just overwrite the
+ // handler.
+ } else if (map.is_identical_to(new_receiver_map)) {
+ number_of_valid_maps--;
+ handler_to_overwrite = i;
+ }
+ }
+
+ if (number_of_valid_maps >= 4) return false;
+ if (number_of_maps == 0) return false;
+
+ if (!target()->FindHandlers(&handlers, receiver_maps.length())) {
+ return false;
+ }
+ }
+
+ number_of_valid_maps++;
+ if (handler_to_overwrite >= 0) {
+ handlers.Set(handler_to_overwrite, code);
+ } else {
+ receiver_maps.Add(new_receiver_map);
+ handlers.Add(code);
+ }
+
+ Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
+ &receiver_maps, &handlers, number_of_valid_maps, name, strict_mode());
+ set_target(*ic);
+ return true;
+}
+
+
+void IC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name) {
+ if (!handler->is_handler()) return set_target(*handler);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC(
+ receiver, handler, name, strict_mode());
+ set_target(*ic);
+}
+
+
+void IC::CopyICToMegamorphicCache(Handle<String> name) {
+ MapHandleList receiver_maps;
+ CodeHandleList handlers;
+ {
+ DisallowHeapAllocation no_gc;
+ target()->FindAllMaps(&receiver_maps);
+ if (!target()->FindHandlers(&handlers, receiver_maps.length())) return;
+ }
+ for (int i = 0; i < receiver_maps.length(); i++) {
+ UpdateMegamorphicCache(*receiver_maps.at(i), *name, *handlers.at(i));
+ }
+}
+
+
+bool IC::IsTransitionedMapOfMonomorphicTarget(Map* receiver_map) {
+ DisallowHeapAllocation no_allocation;
+
+ Map* current_map = target()->FindFirstMap();
+ ElementsKind receiver_elements_kind = receiver_map->elements_kind();
+ bool more_general_transition =
+ IsMoreGeneralElementsKindTransition(
+ current_map->elements_kind(), receiver_elements_kind);
+ Map* transitioned_map = more_general_transition
+ ? current_map->LookupElementsTransitionMap(receiver_elements_kind)
+ : NULL;
+
+ return transitioned_map == receiver_map;
+}
+
+
+void IC::PatchCache(Handle<HeapObject> receiver,
+ Handle<String> name,
+ Handle<Code> code) {
+ switch (state()) {
+ case UNINITIALIZED:
+ case PREMONOMORPHIC:
+ case MONOMORPHIC_PROTOTYPE_FAILURE:
+ UpdateMonomorphicIC(receiver, code, name);
+ break;
+ case MONOMORPHIC:
+ // For now, call stubs are allowed to rewrite to the same stub. This
+ // happens e.g., when the field does not contain a function.
+ ASSERT(target()->is_call_stub() ||
+ target()->is_keyed_call_stub() ||
+ !target().is_identical_to(code));
+ if (!target()->is_keyed_stub()) {
+ bool is_same_handler = false;
+ {
+ DisallowHeapAllocation no_allocation;
+ Code* old_handler = target()->FindFirstHandler();
+ is_same_handler = old_handler == *code;
+ }
+ if (is_same_handler
+ && IsTransitionedMapOfMonomorphicTarget(receiver->map())) {
+ UpdateMonomorphicIC(receiver, code, name);
+ break;
+ }
+ if (UpdatePolymorphicIC(receiver, name, code)) {
+ break;
+ }
+
+ CopyICToMegamorphicCache(name);
+ }
+
+ UpdateMegamorphicCache(receiver->map(), *name, *code);
+ set_target(*megamorphic_stub());
+ break;
+ case MEGAMORPHIC:
+ UpdateMegamorphicCache(receiver->map(), *name, *code);
+ break;
+ case POLYMORPHIC:
+ if (target()->is_keyed_stub()) {
+ // When trying to patch a polymorphic keyed stub with anything other
+ // than another polymorphic stub, go generic.
+ set_target(*generic_stub());
+ } else {
+ if (UpdatePolymorphicIC(receiver, name, code)) {
+ break;
+ }
+ CopyICToMegamorphicCache(name);
+ UpdateMegamorphicCache(receiver->map(), *name, *code);
+ set_target(*megamorphic_stub());
+ }
+ break;
+ case DEBUG_STUB:
+ break;
+ case GENERIC:
+ UNREACHABLE();
+ break;
+ }
}
+Handle<Code> LoadIC::SimpleFieldLoad(int offset,
+ bool inobject,
+ Representation representation) {
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(inobject, offset, representation);
+ return stub.GetCode(isolate());
+ } else {
+ KeyedLoadFieldStub stub(inobject, offset, representation);
+ return stub.GetCode(isolate());
+ }
+}
+
void LoadIC::UpdateCaches(LookupResult* lookup,
- State state,
Handle<Object> object,
Handle<String> name) {
- // Bail out if the result is not cacheable.
- if (!lookup->IsCacheable()) return;
-
- // Loading properties from values is not common, so don't try to
- // deal with non-JS objects here.
- if (!object->IsJSObject()) return;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ // TODO(verwaest): It would be nice to support loading fields from smis as
+ // well. For now just fail to update the cache.
+ if (!object->IsHeapObject()) return;
- if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
+ Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
- // Compute the code stub for this load.
Handle<Code> code;
- if (state == UNINITIALIZED) {
+ if (state() == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
code = pre_monomorphic_stub();
+ } else if (!lookup->IsCacheable()) {
+ // Bail out if the result is not cacheable.
+ code = slow_stub();
+ } else if (object->IsString() &&
+ name->Equals(isolate()->heap()->length_string())) {
+ int length_index = String::kLengthOffset / kPointerSize;
+ code = SimpleFieldLoad(length_index);
+ } else if (!object->IsJSObject()) {
+ // TODO(jkummerow): It would be nice to support non-JSObjects in
+ // ComputeLoadHandler, then we wouldn't need to go generic here.
+ code = slow_stub();
} else if (!lookup->IsProperty()) {
- // Nonexistent property. The result is undefined.
- code = isolate()->stub_cache()->ComputeLoadNonexistent(name, receiver);
+ code = kind() == Code::LOAD_IC
+ ? isolate()->stub_cache()->ComputeLoadNonexistent(
+ name, Handle<JSObject>::cast(receiver))
+ : slow_stub();
} else {
- // Compute monomorphic stub.
- Handle<JSObject> holder(lookup->holder());
- switch (lookup->type()) {
- case FIELD:
- code = isolate()->stub_cache()->ComputeLoadField(
- name, receiver, holder, lookup->GetFieldIndex());
- break;
- case CONSTANT_FUNCTION: {
- Handle<JSFunction> constant(lookup->GetConstantFunction());
- code = isolate()->stub_cache()->ComputeLoadConstant(
- name, receiver, holder, constant);
- break;
- }
- case NORMAL:
- if (holder->IsGlobalObject()) {
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
- code = isolate()->stub_cache()->ComputeLoadGlobal(
- name, receiver, global, cell, lookup->IsDontDelete());
- } else {
- // There is only one shared stub for loading normalized
- // properties. It does not traverse the prototype chain, so the
- // property must be found in the receiver for the stub to be
- // applicable.
- if (!holder.is_identical_to(receiver)) return;
- code = isolate()->stub_cache()->ComputeLoadNormal();
- }
- break;
- case CALLBACKS: {
- Handle<Object> callback(lookup->GetCallbackObject());
- if (callback->IsAccessorInfo()) {
- Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(callback);
- if (v8::ToCData<Address>(info->getter()) == 0) return;
- if (!info->IsCompatibleReceiver(*receiver)) return;
- code = isolate()->stub_cache()->ComputeLoadCallback(
- name, receiver, holder, info);
- } else if (callback->IsAccessorPair()) {
- Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter());
- if (!getter->IsJSFunction()) return;
- if (holder->IsGlobalObject()) return;
- if (!holder->HasFastProperties()) return;
- code = isolate()->stub_cache()->ComputeLoadViaGetter(
- name, receiver, holder, Handle<JSFunction>::cast(getter));
- } else {
- ASSERT(callback->IsForeign());
- // No IC support for old-style native accessors.
- return;
- }
- break;
- }
- case INTERCEPTOR:
- ASSERT(HasInterceptorGetter(*holder));
- code = isolate()->stub_cache()->ComputeLoadInterceptor(
- name, receiver, holder);
- break;
- default:
- return;
- }
+ code = ComputeHandler(lookup, Handle<JSObject>::cast(receiver), name);
}
- // Patch the call site depending on the state of the cache.
- if (state == UNINITIALIZED ||
- state == PREMONOMORPHIC ||
- state == MONOMORPHIC_PROTOTYPE_FAILURE) {
- set_target(*code);
- } else if (state == MONOMORPHIC) {
- // We are transitioning from monomorphic to megamorphic case.
- // Place the current monomorphic stub and stub compiled for
- // the receiver into stub cache.
- Map* map = target()->FindFirstMap();
- if (map != NULL) {
- isolate()->stub_cache()->Set(*name, map, target());
- }
- isolate()->stub_cache()->Set(*name, receiver->map(), *code);
+ PatchCache(receiver, name, code);
+ TRACE_IC("LoadIC", name);
+}
- set_target(*megamorphic_stub());
- } else if (state == MEGAMORPHIC) {
- // Cache code holding map should be consistent with
- // GenerateMonomorphicCacheProbe.
- isolate()->stub_cache()->Set(*name, receiver->map(), *code);
- }
- TRACE_IC("LoadIC", name, state, target());
+void IC::UpdateMegamorphicCache(Map* map, Name* name, Code* code) {
+ // Cache code holding map should be consistent with
+ // GenerateMonomorphicCacheProbe.
+ isolate()->stub_cache()->Set(name, map, code);
}
-Handle<Code> KeyedLoadIC::GetElementStubWithoutMapCheck(
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- ASSERT(grow_mode == DO_NOT_ALLOW_JSARRAY_GROWTH);
- return KeyedLoadElementStub(elements_kind).GetCode();
-}
+Handle<Code> IC::ComputeHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> value) {
+ Handle<Code> code = isolate()->stub_cache()->FindHandler(
+ name, receiver, kind());
+ if (!code.is_null()) return code;
+ code = CompileHandler(lookup, receiver, name, value);
-Handle<Code> KeyedLoadIC::ComputePolymorphicStub(
- MapHandleList* receiver_maps,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode growth_mode) {
- CodeHandleList handler_ics(receiver_maps->length());
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> receiver_map = receiver_maps->at(i);
- Handle<Code> cached_stub = ComputeMonomorphicStubWithoutMapCheck(
- receiver_map, strict_mode, growth_mode);
- handler_ics.Add(cached_stub);
+ if (code->is_handler() && code->type() != Code::NORMAL) {
+ HeapObject::UpdateMapCodeCache(receiver, name, code);
}
- KeyedLoadStubCompiler compiler(isolate());
- Handle<Code> code = compiler.CompileLoadPolymorphic(
- receiver_maps, &handler_ics);
- isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
- PROFILE(isolate(),
- CodeCreateEvent(Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG, *code, 0));
+
return code;
}
+Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> unused) {
+ Handle<JSObject> holder(lookup->holder());
+ LoadStubCompiler compiler(isolate(), kind());
+
+ switch (lookup->type()) {
+ case FIELD: {
+ PropertyIndex field = lookup->GetFieldIndex();
+ if (receiver.is_identical_to(holder)) {
+ return SimpleFieldLoad(field.translate(holder),
+ field.is_inobject(holder),
+ lookup->representation());
+ }
+ return compiler.CompileLoadField(
+ receiver, holder, name, field, lookup->representation());
+ }
+ case CONSTANT: {
+ Handle<Object> constant(lookup->GetConstant(), isolate());
+ // TODO(2803): Don't compute a stub for cons strings because they cannot
+ // be embedded into code.
+ if (constant->IsConsString()) break;
+ return compiler.CompileLoadConstant(receiver, holder, name, constant);
+ }
+ case NORMAL:
+ if (kind() != Code::LOAD_IC) break;
+ if (holder->IsGlobalObject()) {
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
+ Handle<PropertyCell> cell(
+ global->GetPropertyCell(lookup), isolate());
+ // TODO(verwaest): Turn into a handler.
+ return isolate()->stub_cache()->ComputeLoadGlobal(
+ name, receiver, global, cell, lookup->IsDontDelete());
+ }
+ // There is only one shared stub for loading normalized
+ // properties. It does not traverse the prototype chain, so the
+ // property must be found in the receiver for the stub to be
+ // applicable.
+ if (!holder.is_identical_to(receiver)) break;
+ return isolate()->builtins()->LoadIC_Normal();
+ case CALLBACKS: {
+ // Use simple field loads for some well-known callback properties.
+ int object_offset;
+ Handle<Map> map(receiver->map());
+ if (Accessors::IsJSObjectFieldAccessor(map, name, &object_offset)) {
+ PropertyIndex index =
+ PropertyIndex::NewHeaderIndex(object_offset / kPointerSize);
+ return compiler.CompileLoadField(
+ receiver, receiver, name, index, Representation::Tagged());
+ }
+
+ Handle<Object> callback(lookup->GetCallbackObject(), isolate());
+ if (callback->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(callback);
+ if (v8::ToCData<Address>(info->getter()) == 0) break;
+ if (!info->IsCompatibleReceiver(*receiver)) break;
+ return compiler.CompileLoadCallback(receiver, holder, name, info);
+ } else if (callback->IsAccessorPair()) {
+ Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter(),
+ isolate());
+ if (!getter->IsJSFunction()) break;
+ if (holder->IsGlobalObject()) break;
+ if (!holder->HasFastProperties()) break;
+ Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+ CallOptimization call_optimization(function);
+ if (call_optimization.is_simple_api_call() &&
+ call_optimization.IsCompatibleReceiver(*receiver)) {
+ return compiler.CompileLoadCallback(
+ receiver, holder, name, call_optimization);
+ }
+ return compiler.CompileLoadViaGetter(receiver, holder, name, function);
+ }
+ // TODO(dcarney): Handle correctly.
+ if (callback->IsDeclaredAccessorInfo()) break;
+ ASSERT(callback->IsForeign());
+ // No IC support for old-style native accessors.
+ break;
+ }
+ case INTERCEPTOR:
+ ASSERT(HasInterceptorGetter(*holder));
+ return compiler.CompileLoadInterceptor(receiver, holder, name);
+ default:
+ break;
+ }
+
+ return slow_stub();
+}
+
+
static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
// This helper implements a few common fast cases for converting
// non-smi keys of keyed loads/stores to a smi or a string.
if (key->IsHeapNumber()) {
double value = Handle<HeapNumber>::cast(key)->value();
- if (isnan(value)) {
- key = isolate->factory()->nan_symbol();
+ if (std::isnan(value)) {
+ key = isolate->factory()->nan_string();
} else {
int int_value = FastD2I(value);
if (value == int_value && Smi::IsValid(int_value)) {
- key = Handle<Smi>(Smi::FromInt(int_value));
+ key = Handle<Smi>(Smi::FromInt(int_value), isolate);
}
}
} else if (key->IsUndefined()) {
- key = isolate->factory()->undefined_symbol();
+ key = isolate->factory()->undefined_string();
}
return key;
}
-MaybeObject* KeyedLoadIC::Load(State state,
- Handle<Object> object,
- Handle<Object> key,
- bool force_generic_stub) {
- // Check for values that can be converted into a symbol directly or
- // is representable as a smi.
- key = TryConvertKey(key, isolate());
+Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
+ // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
+ // via megamorphic stubs, since they don't have a map in their relocation info
+ // and so the stubs can't be harvested for the object needed for a map check.
+ if (target()->type() != Code::NORMAL) {
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
+ return generic_stub();
+ }
- if (key->IsSymbol()) {
- Handle<String> name = Handle<String>::cast(key);
+ Handle<Map> receiver_map(receiver->map(), isolate());
+ MapHandleList target_receiver_maps;
+ if (state() == UNINITIALIZED || state() == PREMONOMORPHIC) {
+ // Optimistically assume that ICs that haven't reached the MONOMORPHIC state
+ // yet will do so and stay there.
+ return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
+ }
- // If the object is undefined or null it's illegal to try to get any
- // of its properties; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_load", object, name);
+ if (target().is_identical_to(string_stub())) {
+ target_receiver_maps.Add(isolate()->factory()->string_map());
+ } else {
+ target()->FindAllMaps(&target_receiver_maps);
+ if (target_receiver_maps.length() == 0) {
+ return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
}
+ }
- if (FLAG_use_ic) {
- // TODO(1073): don't ignore the current stub state.
-
- // Use specialized code for getting the length of strings.
- if (object->IsString() &&
- name->Equals(isolate()->heap()->length_symbol())) {
- Handle<String> string = Handle<String>::cast(object);
- Handle<Code> code =
- isolate()->stub_cache()->ComputeKeyedLoadStringLength(name, string);
- ASSERT(!code.is_null());
- set_target(*code);
- TRACE_IC("KeyedLoadIC", name, state, target());
- return Smi::FromInt(string->length());
- }
-
- // Use specialized code for getting the length of arrays.
- if (object->IsJSArray() &&
- name->Equals(isolate()->heap()->length_symbol())) {
- Handle<JSArray> array = Handle<JSArray>::cast(object);
- Handle<Code> code =
- isolate()->stub_cache()->ComputeKeyedLoadArrayLength(name, array);
- ASSERT(!code.is_null());
- set_target(*code);
- TRACE_IC("KeyedLoadIC", name, state, target());
- return array->length();
- }
+ // The first time a receiver is seen that is a transitioned version of the
+ // previous monomorphic receiver type, assume the new ElementsKind is the
+ // monomorphic type. This benefits global arrays that only transition
+ // once, and all call sites accessing them are faster if they remain
+ // monomorphic. If this optimistic assumption is not true, the IC will
+ // miss again and it will become polymorphic and support both the
+ // untransitioned and transitioned maps.
+ if (state() == MONOMORPHIC &&
+ IsMoreGeneralElementsKindTransition(
+ target_receiver_maps.at(0)->elements_kind(),
+ receiver->GetElementsKind())) {
+ return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
+ }
- // Use specialized code for getting prototype of functions.
- if (object->IsJSFunction() &&
- name->Equals(isolate()->heap()->prototype_symbol()) &&
- Handle<JSFunction>::cast(object)->should_have_prototype()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(object);
- Handle<Code> code =
- isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype(
- name, function);
- ASSERT(!code.is_null());
- set_target(*code);
- TRACE_IC("KeyedLoadIC", name, state, target());
- return Accessors::FunctionGetPrototype(*object, 0);
- }
- }
+ ASSERT(state() != GENERIC);
- // Check if the name is trivially convertible to an index and get
- // the element or char if so.
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- // Rewrite to the generic keyed load stub.
- if (FLAG_use_ic) set_target(*generic_stub());
- return Runtime::GetElementOrCharAt(isolate(), object, index);
- }
+ // Determine the list of receiver maps that this call site has seen,
+ // adding the map that was just encountered.
+ if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) {
+ // If the miss wasn't due to an unseen map, a polymorphic stub
+ // won't help, use the generic stub.
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
+ return generic_stub();
+ }
- // Named lookup.
- LookupResult lookup(isolate());
- LookupForRead(object, name, &lookup);
+ // If the maximum number of receiver maps has been exceeded, use the generic
+ // version of the IC.
+ if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
+ return generic_stub();
+ }
- // If we did not find a property, check if we need to throw an exception.
- if (!lookup.IsFound() && IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
+ return isolate()->stub_cache()->ComputeLoadElementPolymorphic(
+ &target_receiver_maps);
+}
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, object, name);
- }
- PropertyAttributes attr;
- if (lookup.IsInterceptor()) {
- // Get the property.
- Handle<Object> result =
- Object::GetProperty(object, object, &lookup, name, &attr);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- // If the property is not present, check if we need to throw an
- // exception.
- if (attr == ABSENT && IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
- return *result;
- }
-
- return object->GetProperty(*object, &lookup, *name, &attr);
+MaybeObject* KeyedLoadIC::Load(Handle<Object> object,
+ Handle<Object> key,
+ ICMissMode miss_mode) {
+ if (MigrateDeprecated(object)) {
+ return Runtime::GetObjectPropertyOrFail(isolate(), object, key);
}
- // Do not use ICs for objects that require access checks (including
- // the global object).
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
+ MaybeObject* maybe_object = NULL;
+ Handle<Code> stub = generic_stub();
+
+ // Check for values that can be converted into an internalized string directly
+ // or is representable as a smi.
+ key = TryConvertKey(key, isolate());
- if (use_ic) {
- Handle<Code> stub = generic_stub();
- if (!force_generic_stub) {
+ if (key->IsInternalizedString()) {
+ maybe_object = LoadIC::Load(object, Handle<String>::cast(key));
+ if (maybe_object->IsFailure()) return maybe_object;
+ } else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
+ ASSERT(!object->IsJSGlobalProxy());
+ if (miss_mode != MISS_FORCE_GENERIC) {
if (object->IsString() && key->IsNumber()) {
- if (state == UNINITIALIZED) {
- stub = string_stub();
- }
+ if (state() == UNINITIALIZED) stub = string_stub();
} else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->elements()->map() ==
@@ -1212,600 +1387,484 @@ MaybeObject* KeyedLoadIC::Load(State state,
stub = non_strict_arguments_stub();
} else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub();
- } else if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
- stub = ComputeStub(receiver, LOAD, kNonStrictMode, stub);
+ } else if (!key->ToSmi()->IsFailure() &&
+ (!target().is_identical_to(non_strict_arguments_stub()))) {
+ stub = LoadElementStub(receiver);
}
}
- } else {
- TRACE_GENERIC_IC("KeyedLoadIC", "force generic");
}
- if (!stub.is_null()) set_target(*stub);
}
- TRACE_IC("KeyedLoadIC", key, state, target());
-
- // Get the property.
- return Runtime::GetObjectProperty(isolate(), object, key);
-}
-
-
-void KeyedLoadIC::UpdateCaches(LookupResult* lookup,
- State state,
- Handle<Object> object,
- Handle<String> name) {
- // Bail out if we didn't find a result.
- if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
-
- if (!object->IsJSObject()) return;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
-
- // Compute the code stub for this load.
- Handle<Code> code;
-
- if (state == UNINITIALIZED) {
- // This is the first time we execute this inline cache.
- // Set the target to the pre monomorphic stub to delay
- // setting the monomorphic state.
- code = pre_monomorphic_stub();
- } else {
- // Compute a monomorphic stub.
- Handle<JSObject> holder(lookup->holder());
- switch (lookup->type()) {
- case FIELD:
- code = isolate()->stub_cache()->ComputeKeyedLoadField(
- name, receiver, holder, lookup->GetFieldIndex());
- break;
- case CONSTANT_FUNCTION: {
- Handle<JSFunction> constant(lookup->GetConstantFunction());
- code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
- name, receiver, holder, constant);
- break;
- }
- case CALLBACKS: {
- Handle<Object> callback_object(lookup->GetCallbackObject());
- if (!callback_object->IsAccessorInfo()) return;
- Handle<AccessorInfo> callback =
- Handle<AccessorInfo>::cast(callback_object);
- if (v8::ToCData<Address>(callback->getter()) == 0) return;
- if (!callback->IsCompatibleReceiver(*receiver)) return;
- code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
- name, receiver, holder, callback);
- break;
- }
- case INTERCEPTOR:
- ASSERT(HasInterceptorGetter(lookup->holder()));
- code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
- name, receiver, holder);
- break;
- default:
- // Always rewrite to the generic case so that we do not
- // repeatedly try to rewrite.
- code = generic_stub();
- break;
+ if (!is_target_set()) {
+ if (*stub == *generic_stub()) {
+ TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
}
+ ASSERT(!stub.is_null());
+ set_target(*stub);
+ TRACE_IC("LoadIC", key);
}
- // Patch the call site depending on the state of the cache. Make
- // sure to always rewrite from monomorphic to megamorphic.
- ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
- if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
- set_target(*code);
- } else if (state == MONOMORPHIC) {
- set_target(*megamorphic_stub());
- }
-
- TRACE_IC("KeyedLoadIC", name, state, target());
-}
-
-
-static bool StoreICableLookup(LookupResult* lookup) {
- // Bail out if we didn't find a result.
- if (!lookup->IsFound()) return false;
-
- // Bail out if inline caching is not allowed.
- if (!lookup->IsCacheable()) return false;
-
- // If the property is read-only, we leave the IC in its current state.
- if (lookup->IsTransition()) {
- return !lookup->GetTransitionDetails().IsReadOnly();
- }
- return !lookup->IsReadOnly();
+ if (maybe_object != NULL) return maybe_object;
+ return Runtime::GetObjectPropertyOrFail(isolate(), object, key);
}
static bool LookupForWrite(Handle<JSObject> receiver,
Handle<String> name,
- LookupResult* lookup) {
- receiver->LocalLookup(*name, lookup);
- if (!lookup->IsFound()) {
- receiver->map()->LookupTransition(*receiver, *name, lookup);
- }
- if (!StoreICableLookup(lookup)) {
- // 2nd chance: There can be accessors somewhere in the prototype chain.
- receiver->Lookup(*name, lookup);
- return lookup->IsPropertyCallbacks() && StoreICableLookup(lookup);
- }
+ Handle<Object> value,
+ LookupResult* lookup,
+ IC* ic) {
+ Handle<JSObject> holder = receiver;
+ receiver->Lookup(*name, lookup);
+ if (lookup->IsFound()) {
+ if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
+
+ if (lookup->holder() == *receiver) {
+ if (lookup->IsInterceptor() && !HasInterceptorSetter(*receiver)) {
+ receiver->LocalLookupRealNamedProperty(*name, lookup);
+ return lookup->IsFound() &&
+ !lookup->IsReadOnly() &&
+ lookup->CanHoldValue(value) &&
+ lookup->IsCacheable();
+ }
+ return lookup->CanHoldValue(value);
+ }
- if (lookup->IsInterceptor() &&
- receiver->GetNamedInterceptor()->setter()->IsUndefined()) {
- receiver->LocalLookupRealNamedProperty(*name, lookup);
- return StoreICableLookup(lookup);
+ if (lookup->IsPropertyCallbacks()) return true;
+
+ // Currently normal holders in the prototype chain are not supported. They
+ // would require a runtime positive lookup and verification that the details
+ // have not changed.
+ if (lookup->IsInterceptor() || lookup->IsNormal()) return false;
+ holder = Handle<JSObject>(lookup->holder(), lookup->isolate());
+ }
+
+ // While normally LookupTransition gets passed the receiver, in this case we
+ // pass the holder of the property that we overwrite. This keeps the holder in
+ // the LookupResult intact so we can later use it to generate a prototype
+ // chain check. This avoids a double lookup, but requires us to pass in the
+ // receiver when trying to fetch extra information from the transition.
+ receiver->map()->LookupTransition(*holder, *name, lookup);
+ if (!lookup->IsTransition()) return false;
+ PropertyDetails target_details =
+ lookup->GetTransitionDetails(receiver->map());
+ if (target_details.IsReadOnly()) return false;
+
+ // If the value that's being stored does not fit in the field that the
+ // instance would transition to, create a new transition that fits the value.
+ // This has to be done before generating the IC, since that IC will embed the
+ // transition target.
+ // Ensure the instance and its map were migrated before trying to update the
+ // transition target.
+ ASSERT(!receiver->map()->is_deprecated());
+ if (!value->FitsRepresentation(target_details.representation())) {
+ Handle<Map> target(lookup->GetTransitionMapFromMap(receiver->map()));
+ Map::GeneralizeRepresentation(
+ target, target->LastAdded(),
+ value->OptimalRepresentation(), FORCE_FIELD);
+ // Lookup the transition again since the transition tree may have changed
+ // entirely by the migration above.
+ receiver->map()->LookupTransition(*holder, *name, lookup);
+ if (!lookup->IsTransition()) return false;
+ ic->MarkMonomorphicPrototypeFailure();
}
-
return true;
}
-MaybeObject* StoreIC::Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
+MaybeObject* StoreIC::Store(Handle<Object> object,
Handle<String> name,
- Handle<Object> value) {
- if (!object->IsJSObject()) {
- // Handle proxies.
- if (object->IsJSProxy()) {
- return JSProxy::cast(*object)->
- SetProperty(*name, *value, NONE, strict_mode);
- }
+ Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode) {
+ if (MigrateDeprecated(object) || object->IsJSProxy()) {
+ Handle<Object> result = JSReceiver::SetProperty(
+ Handle<JSReceiver>::cast(object), name, value, NONE, strict_mode());
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
+ }
- // If the object is undefined or null it's illegal to try to set any
- // properties on it; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_store", object, name);
- }
+ // If the object is undefined or null it's illegal to try to set any
+ // properties on it; throw a TypeError in that case.
+ if (object->IsUndefined() || object->IsNull()) {
+ return TypeError("non_object_property_store", object, name);
+ }
- // The length property of string values is read-only. Throw in strict mode.
- if (strict_mode == kStrictMode && object->IsString() &&
- name->Equals(isolate()->heap()->length_symbol())) {
- return TypeError("strict_read_only_property", object, name);
- }
- // Ignore other stores where the receiver is not a JSObject.
- // TODO(1475): Must check prototype chains of object wrappers.
- return *value;
+ // The length property of string values is read-only. Throw in strict mode.
+ if (strict_mode() == kStrictMode && object->IsString() &&
+ name->Equals(isolate()->heap()->length_string())) {
+ return TypeError("strict_read_only_property", object, name);
}
+ // Ignore other stores where the receiver is not a JSObject.
+ // TODO(1475): Must check prototype chains of object wrappers.
+ if (!object->IsJSObject()) return *value;
+
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
Handle<Object> result =
- JSObject::SetElement(receiver, index, value, NONE, strict_mode);
+ JSObject::SetElement(receiver, index, value, NONE, strict_mode());
RETURN_IF_EMPTY_HANDLE(isolate(), result);
return *value;
}
+ // Observed objects are always modified through the runtime.
+ if (FLAG_harmony_observation && receiver->map()->is_observed()) {
+ Handle<Object> result = JSReceiver::SetProperty(
+ receiver, name, value, NONE, strict_mode(), store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
+ }
+
// Use specialized code for setting the length of arrays with fast
- // properties. Slow properties might indicate redefinition of the
- // length property.
- if (receiver->IsJSArray() &&
- name->Equals(isolate()->heap()->length_symbol()) &&
+ // properties. Slow properties might indicate redefinition of the length
+ // property. Note that when redefined using Object.freeze, it's possible
+ // to have fast properties but a read-only length.
+ if (FLAG_use_ic &&
+ receiver->IsJSArray() &&
+ name->Equals(isolate()->heap()->length_string()) &&
Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
- receiver->HasFastProperties()) {
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
-#endif
- Handle<Code> stub = (strict_mode == kStrictMode)
- ? isolate()->builtins()->StoreIC_ArrayLength_Strict()
- : isolate()->builtins()->StoreIC_ArrayLength();
+ receiver->HasFastProperties() &&
+ !receiver->map()->is_frozen()) {
+ Handle<Code> stub =
+ StoreArrayLengthStub(kind(), strict_mode()).GetCode(isolate());
set_target(*stub);
- return receiver->SetProperty(*name, *value, NONE, strict_mode);
+ TRACE_IC("StoreIC", name);
+ Handle<Object> result = JSReceiver::SetProperty(
+ receiver, name, value, NONE, strict_mode(), store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
- // Lookup the property locally in the receiver.
- if (!receiver->IsJSGlobalProxy()) {
- LookupResult lookup(isolate());
-
- if (LookupForWrite(receiver, name, &lookup)) {
- if (FLAG_use_ic) { // Generate a stub for this store.
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
- }
- } else {
- // Strict mode doesn't allow setting non-existent global property
- // or an assignment to a read only property.
- if (strict_mode == kStrictMode) {
- if (lookup.IsProperty() && lookup.IsReadOnly()) {
- return TypeError("strict_read_only_property", object, name);
- } else if (IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
- }
+ if (receiver->IsJSGlobalProxy()) {
+ if (FLAG_use_ic && kind() != Code::KEYED_STORE_IC) {
+ // Generate a generic stub that goes to the runtime when we see a global
+ // proxy as receiver.
+ Handle<Code> stub = global_proxy_stub();
+ set_target(*stub);
+ TRACE_IC("StoreIC", name);
}
+ Handle<Object> result = JSReceiver::SetProperty(
+ receiver, name, value, NONE, strict_mode(), store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
- if (receiver->IsJSGlobalProxy()) {
- // TODO(ulan): find out why we patch this site even with --no-use-ic
- // Generate a generic stub that goes to the runtime when we see a global
- // proxy as receiver.
- Handle<Code> stub = (strict_mode == kStrictMode)
- ? global_proxy_stub_strict()
- : global_proxy_stub();
- if (target() != *stub) {
+ LookupResult lookup(isolate());
+ bool can_store = LookupForWrite(receiver, name, value, &lookup, this);
+ if (!can_store &&
+ strict_mode() == kStrictMode &&
+ !(lookup.IsProperty() && lookup.IsReadOnly()) &&
+ IsUndeclaredGlobal(object)) {
+ // Strict mode doesn't allow setting non-existent global property.
+ return ReferenceError("not_defined", name);
+ }
+ if (FLAG_use_ic) {
+ if (state() == UNINITIALIZED) {
+ Handle<Code> stub = pre_monomorphic_stub();
+ set_target(*stub);
+ TRACE_IC("StoreIC", name);
+ } else if (can_store) {
+ UpdateCaches(&lookup, receiver, name, value);
+ } else if (!name->IsCacheable(isolate()) ||
+ lookup.IsNormal() ||
+ (lookup.IsField() && lookup.CanHoldValue(value))) {
+ Handle<Code> stub = generic_stub();
set_target(*stub);
- TRACE_IC("StoreIC", name, state, target());
}
}
// Set the property.
- return receiver->SetProperty(*name,
- *value,
- NONE,
- strict_mode,
- JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED);
+ Handle<Object> result = JSReceiver::SetProperty(
+ receiver, name, value, NONE, strict_mode(), store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
void StoreIC::UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value) {
ASSERT(!receiver->IsJSGlobalProxy());
- ASSERT(StoreICableLookup(lookup));
ASSERT(lookup->IsFound());
// These are not cacheable, so we never see such LookupResults here.
ASSERT(!lookup->IsHandler());
- // If the property has a non-field type allowing map transitions
- // where there is extra room in the object, we leave the IC in its
- // current state.
- PropertyType type = lookup->type();
+ Handle<Code> code = ComputeHandler(lookup, receiver, name, value);
- // Compute the code stub for this store; used for rewriting to
- // monomorphic state and making sure that the code stub is in the
- // stub cache.
+ PatchCache(receiver, name, code);
+ TRACE_IC("StoreIC", name);
+}
+
+
+Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> value) {
Handle<JSObject> holder(lookup->holder());
- Handle<Code> code;
- switch (type) {
+ StoreStubCompiler compiler(isolate(), strict_mode(), kind());
+ switch (lookup->type()) {
case FIELD:
- code = isolate()->stub_cache()->ComputeStoreField(name,
- receiver,
- lookup->GetFieldIndex(),
- Handle<Map>::null(),
- strict_mode);
- break;
+ return compiler.CompileStoreField(receiver, lookup, name);
+ case TRANSITION: {
+ // Explicitly pass in the receiver map since LookupForWrite may have
+ // stored something else than the receiver in the holder.
+ Handle<Map> transition(
+ lookup->GetTransitionTarget(receiver->map()), isolate());
+ int descriptor = transition->LastAdded();
+
+ DescriptorArray* target_descriptors = transition->instance_descriptors();
+ PropertyDetails details = target_descriptors->GetDetails(descriptor);
+
+ if (details.type() == CALLBACKS || details.attributes() != NONE) break;
+
+ return compiler.CompileStoreTransition(
+ receiver, lookup, transition, name);
+ }
case NORMAL:
+ if (kind() == Code::KEYED_STORE_IC) break;
if (receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
// global object.
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
- code = isolate()->stub_cache()->ComputeStoreGlobal(
- name, global, cell, strict_mode);
- } else {
- if (!holder.is_identical_to(receiver)) return;
- code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
+ Handle<PropertyCell> cell(
+ global->GetPropertyCell(lookup), isolate());
+ // TODO(verwaest): Turn into a handler.
+ return isolate()->stub_cache()->ComputeStoreGlobal(
+ name, global, cell, value, strict_mode());
}
- break;
+ ASSERT(holder.is_identical_to(receiver));
+ return strict_mode() == kStrictMode
+ ? isolate()->builtins()->StoreIC_Normal_Strict()
+ : isolate()->builtins()->StoreIC_Normal();
case CALLBACKS: {
- Handle<Object> callback(lookup->GetCallbackObject());
- if (callback->IsAccessorInfo()) {
- Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(callback);
- if (v8::ToCData<Address>(info->setter()) == 0) return;
- if (!holder->HasFastProperties()) return;
- if (!info->IsCompatibleReceiver(*receiver)) return;
- code = isolate()->stub_cache()->ComputeStoreCallback(
- name, receiver, holder, info, strict_mode);
+ if (kind() == Code::KEYED_STORE_IC) break;
+ Handle<Object> callback(lookup->GetCallbackObject(), isolate());
+ if (callback->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(callback);
+ if (v8::ToCData<Address>(info->setter()) == 0) break;
+ if (!holder->HasFastProperties()) break;
+ if (!info->IsCompatibleReceiver(*receiver)) break;
+ return compiler.CompileStoreCallback(receiver, holder, name, info);
} else if (callback->IsAccessorPair()) {
- Handle<Object> setter(Handle<AccessorPair>::cast(callback)->setter());
- if (!setter->IsJSFunction()) return;
- if (holder->IsGlobalObject()) return;
- if (!holder->HasFastProperties()) return;
- code = isolate()->stub_cache()->ComputeStoreViaSetter(
- name, receiver, holder, Handle<JSFunction>::cast(setter),
- strict_mode);
- } else {
- ASSERT(callback->IsForeign());
- // No IC support for old-style native accessors.
- return;
+ Handle<Object> setter(
+ Handle<AccessorPair>::cast(callback)->setter(), isolate());
+ if (!setter->IsJSFunction()) break;
+ if (holder->IsGlobalObject()) break;
+ if (!holder->HasFastProperties()) break;
+ Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
+ CallOptimization call_optimization(function);
+ if (call_optimization.is_simple_api_call() &&
+ call_optimization.IsCompatibleReceiver(*receiver)) {
+ return compiler.CompileStoreCallback(
+ receiver, holder, name, call_optimization);
+ }
+ return compiler.CompileStoreViaSetter(
+ receiver, holder, name, Handle<JSFunction>::cast(setter));
}
+ // TODO(dcarney): Handle correctly.
+ if (callback->IsDeclaredAccessorInfo()) break;
+ ASSERT(callback->IsForeign());
+ // No IC support for old-style native accessors.
break;
}
case INTERCEPTOR:
- ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
- code = isolate()->stub_cache()->ComputeStoreInterceptor(
- name, receiver, strict_mode);
- break;
- case CONSTANT_FUNCTION:
- return;
- case TRANSITION: {
- Handle<Map> transition(lookup->GetTransitionTarget());
- int descriptor = transition->LastAdded();
-
- DescriptorArray* target_descriptors = transition->instance_descriptors();
- PropertyDetails details = target_descriptors->GetDetails(descriptor);
-
- if (details.type() != FIELD || details.attributes() != NONE) return;
-
- int field_index = target_descriptors->GetFieldIndex(descriptor);
- code = isolate()->stub_cache()->ComputeStoreField(
- name, receiver, field_index, transition, strict_mode);
-
+ if (kind() == Code::KEYED_STORE_IC) break;
+ ASSERT(HasInterceptorSetter(*receiver));
+ return compiler.CompileStoreInterceptor(receiver, name);
+ case CONSTANT:
break;
- }
case NONEXISTENT:
case HANDLER:
UNREACHABLE();
- return;
- }
-
- // Patch the call site depending on the state of the cache.
- if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) {
- set_target(*code);
- } else if (state == MONOMORPHIC) {
- // Only move to megamorphic if the target changes.
- if (target() != *code) {
- set_target((strict_mode == kStrictMode)
- ? megamorphic_stub_strict()
- : megamorphic_stub());
- }
- } else if (state == MEGAMORPHIC) {
- // Update the stub cache.
- isolate()->stub_cache()->Set(*name, receiver->map(), *code);
- }
-
- TRACE_IC("StoreIC", name, state, target());
-}
-
-
-static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
- Handle<Map> new_receiver_map) {
- ASSERT(!new_receiver_map.is_null());
- for (int current = 0; current < receiver_maps->length(); ++current) {
- if (!receiver_maps->at(current).is_null() &&
- receiver_maps->at(current).is_identical_to(new_receiver_map)) {
- return false;
- }
- }
- receiver_maps->Add(new_receiver_map);
- return true;
-}
-
-
-void KeyedIC::GetReceiverMapsForStub(Handle<Code> stub,
- MapHandleList* result) {
- ASSERT(stub->is_inline_cache_stub());
- if (!string_stub().is_null() && stub.is_identical_to(string_stub())) {
- return result->Add(isolate()->factory()->string_map());
- } else if (stub->is_keyed_load_stub() || stub->is_keyed_store_stub()) {
- if (stub->ic_state() == MONOMORPHIC) {
- result->Add(Handle<Map>(stub->FindFirstMap()));
- } else {
- ASSERT(stub->ic_state() == MEGAMORPHIC);
- AssertNoAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*stub, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Handle<Object> object(info->target_object());
- ASSERT(object->IsMap());
- AddOneReceiverMapIfMissing(result, Handle<Map>::cast(object));
- }
- }
+ break;
}
+ return slow_stub();
}
-Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
- StubKind stub_kind,
- StrictModeFlag strict_mode,
- Handle<Code> generic_stub) {
- State ic_state = target()->ic_state();
- KeyedAccessGrowMode grow_mode = IsGrowStubKind(stub_kind)
- ? ALLOW_JSARRAY_GROWTH
- : DO_NOT_ALLOW_JSARRAY_GROWTH;
-
+Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
+ KeyedAccessStoreMode store_mode) {
// Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
// via megamorphic stubs, since they don't have a map in their relocation info
// and so the stubs can't be harvested for the object needed for a map check.
if (target()->type() != Code::NORMAL) {
- TRACE_GENERIC_IC("KeyedIC", "non-NORMAL target type");
- return generic_stub;
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
+ return generic_stub();
}
- bool monomorphic = false;
- bool is_transition_stub = IsTransitionStubKind(stub_kind);
- Handle<Map> receiver_map(receiver->map());
- Handle<Map> monomorphic_map = receiver_map;
- MapHandleList target_receiver_maps;
- if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+ Handle<Map> receiver_map(receiver->map(), isolate());
+ if (state() == UNINITIALIZED || state() == PREMONOMORPHIC) {
// Optimistically assume that ICs that haven't reached the MONOMORPHIC state
// yet will do so and stay there.
- monomorphic = true;
- } else {
- GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
- if (ic_state == MONOMORPHIC && (is_transition_stub || stub_kind == LOAD)) {
- // The first time a receiver is seen that is a transitioned version of the
- // previous monomorphic receiver type, assume the new ElementsKind is the
- // monomorphic type. This benefits global arrays that only transition
- // once, and all call sites accessing them are faster if they remain
- // monomorphic. If this optimistic assumption is not true, the IC will
- // miss again and it will become polymorphic and support both the
- // untransitioned and transitioned maps.
- monomorphic = IsMoreGeneralElementsKindTransition(
- target_receiver_maps.at(0)->elements_kind(),
- receiver->GetElementsKind());
- }
+ Handle<Map> monomorphic_map = ComputeTransitionedMap(receiver, store_mode);
+ store_mode = GetNonTransitioningStoreMode(store_mode);
+ return isolate()->stub_cache()->ComputeKeyedStoreElement(
+ monomorphic_map, strict_mode(), store_mode);
}
- if (monomorphic) {
- if (is_transition_stub) {
- monomorphic_map = ComputeTransitionedMap(receiver, stub_kind);
- ASSERT(*monomorphic_map != *receiver_map);
- stub_kind = GetNoTransitionStubKind(stub_kind);
+ MapHandleList target_receiver_maps;
+ target()->FindAllMaps(&target_receiver_maps);
+ if (target_receiver_maps.length() == 0) {
+ // In the case that there is a non-map-specific IC is installed (e.g. keyed
+ // stores into properties in dictionary mode), then there will be not
+ // receiver maps in the target.
+ return generic_stub();
+ }
+
+ // There are several special cases where an IC that is MONOMORPHIC can still
+ // transition to a different GetNonTransitioningStoreMode IC that handles a
+ // superset of the original IC. Handle those here if the receiver map hasn't
+ // changed or it has transitioned to a more general kind.
+ KeyedAccessStoreMode old_store_mode =
+ Code::GetKeyedAccessStoreMode(target()->extra_ic_state());
+ Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
+ if (state() == MONOMORPHIC) {
+ // If the "old" and "new" maps are in the same elements map family, stay
+ // MONOMORPHIC and use the map for the most generic ElementsKind.
+ Handle<Map> transitioned_receiver_map = receiver_map;
+ if (IsTransitionStoreMode(store_mode)) {
+ transitioned_receiver_map =
+ ComputeTransitionedMap(receiver, store_mode);
+ }
+ if (IsTransitionedMapOfMonomorphicTarget(*transitioned_receiver_map)) {
+ // Element family is the same, use the "worst" case map.
+ store_mode = GetNonTransitioningStoreMode(store_mode);
+ return isolate()->stub_cache()->ComputeKeyedStoreElement(
+ transitioned_receiver_map, strict_mode(), store_mode);
+ } else if (*previous_receiver_map == receiver->map() &&
+ old_store_mode == STANDARD_STORE &&
+ (IsGrowStoreMode(store_mode) ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
+ // A "normal" IC that handles stores can switch to a version that can
+ // grow at the end of the array, handle OOB accesses or copy COW arrays
+ // and still stay MONOMORPHIC.
+ return isolate()->stub_cache()->ComputeKeyedStoreElement(
+ receiver_map, strict_mode(), store_mode);
}
- return ComputeMonomorphicStub(
- monomorphic_map, stub_kind, strict_mode, generic_stub);
}
- ASSERT(target() != *generic_stub);
- // Determine the list of receiver maps that this call site has seen,
- // adding the map that was just encountered.
+ ASSERT(state() != GENERIC);
+
bool map_added =
AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
- if (IsTransitionStubKind(stub_kind)) {
- Handle<Map> new_map = ComputeTransitionedMap(receiver, stub_kind);
- map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps, new_map);
+
+ if (IsTransitionStoreMode(store_mode)) {
+ Handle<Map> transitioned_receiver_map =
+ ComputeTransitionedMap(receiver, store_mode);
+ map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps,
+ transitioned_receiver_map);
}
+
if (!map_added) {
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
- TRACE_GENERIC_IC("KeyedIC", "same map added twice");
- return generic_stub;
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
+ return generic_stub();
}
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
- TRACE_GENERIC_IC("KeyedIC", "max polymorph exceeded");
- return generic_stub;
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
+ return generic_stub();
+ }
+
+ // Make sure all polymorphic handlers have the same store mode, otherwise the
+ // generic stub must be used.
+ store_mode = GetNonTransitioningStoreMode(store_mode);
+ if (old_store_mode != STANDARD_STORE) {
+ if (store_mode == STANDARD_STORE) {
+ store_mode = old_store_mode;
+ } else if (store_mode != old_store_mode) {
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "store mode mismatch");
+ return generic_stub();
+ }
}
- if ((Code::GetKeyedAccessGrowMode(target()->extra_ic_state()) ==
- ALLOW_JSARRAY_GROWTH)) {
- grow_mode = ALLOW_JSARRAY_GROWTH;
+ // If the store mode isn't the standard mode, make sure that all polymorphic
+ // receivers are either external arrays, or all "normal" arrays. Otherwise,
+ // use the generic stub.
+ if (store_mode != STANDARD_STORE) {
+ int external_arrays = 0;
+ for (int i = 0; i < target_receiver_maps.length(); ++i) {
+ if (target_receiver_maps[i]->has_external_array_elements()) {
+ external_arrays++;
+ }
+ }
+ if (external_arrays != 0 &&
+ external_arrays != target_receiver_maps.length()) {
+ TRACE_GENERIC_IC(isolate(), "KeyedIC",
+ "unsupported combination of external and normal arrays");
+ return generic_stub();
+ }
}
- Handle<PolymorphicCodeCache> cache =
- isolate()->factory()->polymorphic_code_cache();
- Code::ExtraICState extra_state = Code::ComputeExtraICState(grow_mode,
- strict_mode);
- Code::Flags flags = Code::ComputeFlags(kind(), MEGAMORPHIC, extra_state);
- Handle<Object> probe = cache->Lookup(&target_receiver_maps, flags);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- Handle<Code> stub =
- ComputePolymorphicStub(&target_receiver_maps, strict_mode, grow_mode);
- PolymorphicCodeCache::Update(cache, &target_receiver_maps, flags, stub);
- return stub;
-}
-
-
-Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
- Handle<Map> receiver_map,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode) {
- if ((receiver_map->instance_type() & kNotStringTag) == 0) {
- ASSERT(!string_stub().is_null());
- return string_stub();
- } else {
- ASSERT(receiver_map->has_dictionary_elements() ||
- receiver_map->has_fast_smi_or_object_elements() ||
- receiver_map->has_fast_double_elements() ||
- receiver_map->has_external_array_elements());
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- return GetElementStubWithoutMapCheck(is_js_array,
- receiver_map->elements_kind(),
- grow_mode);
- }
-}
-
-
-Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<Map> receiver_map,
- StubKind stub_kind,
- StrictModeFlag strict_mode,
- Handle<Code> generic_stub) {
- ElementsKind elements_kind = receiver_map->elements_kind();
- if (IsFastElementsKind(elements_kind) ||
- IsExternalArrayElementsKind(elements_kind) ||
- IsDictionaryElementsKind(elements_kind)) {
- return isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
- receiver_map, stub_kind, strict_mode);
- } else {
- return generic_stub;
- }
+ return isolate()->stub_cache()->ComputeStoreElementPolymorphic(
+ &target_receiver_maps, store_mode, strict_mode());
}
-Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver,
- StubKind stub_kind) {
- switch (stub_kind) {
- case KeyedIC::STORE_TRANSITION_SMI_TO_OBJECT:
- case KeyedIC::STORE_TRANSITION_DOUBLE_TO_OBJECT:
- case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
- case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
+Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
+ Handle<JSObject> receiver,
+ KeyedAccessStoreMode store_mode) {
+ switch (store_mode) {
+ case STORE_TRANSITION_SMI_TO_OBJECT:
+ case STORE_TRANSITION_DOUBLE_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
- case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE:
- case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
+ case STORE_TRANSITION_SMI_TO_DOUBLE:
+ case STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
- case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
- case KeyedIC::STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
- case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
- case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
+ case STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
+ case STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
return JSObject::GetElementsTransitionMap(receiver,
FAST_HOLEY_ELEMENTS);
- case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
- case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
+ case STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
+ case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
return JSObject::GetElementsTransitionMap(receiver,
FAST_HOLEY_DOUBLE_ELEMENTS);
- case KeyedIC::LOAD:
- case KeyedIC::STORE_NO_TRANSITION:
- case KeyedIC::STORE_AND_GROW_NO_TRANSITION:
- UNREACHABLE();
- break;
+ case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ ASSERT(receiver->map()->has_external_array_elements());
+ // Fall through
+ case STORE_NO_TRANSITION_HANDLE_COW:
+ case STANDARD_STORE:
+ case STORE_AND_GROW_NO_TRANSITION:
+ return Handle<Map>(receiver->map(), isolate());
}
return Handle<Map>::null();
}
-Handle<Code> KeyedStoreIC::GetElementStubWithoutMapCheck(
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- return KeyedStoreElementStub(is_js_array, elements_kind, grow_mode).GetCode();
-}
-
-
-Handle<Code> KeyedStoreIC::ComputePolymorphicStub(
- MapHandleList* receiver_maps,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode) {
- // Collect MONOMORPHIC stubs for all target_receiver_maps.
- CodeHandleList handler_ics(receiver_maps->length());
- MapHandleList transitioned_maps(receiver_maps->length());
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> receiver_map(receiver_maps->at(i));
- Handle<Code> cached_stub;
- Handle<Map> transitioned_map =
- receiver_map->FindTransitionedMap(receiver_maps);
- if (!transitioned_map.is_null()) {
- cached_stub = ElementsTransitionAndStoreStub(
- receiver_map->elements_kind(), // original elements_kind
- transitioned_map->elements_kind(),
- receiver_map->instance_type() == JS_ARRAY_TYPE, // is_js_array
- strict_mode, grow_mode).GetCode();
- } else {
- cached_stub = ComputeMonomorphicStubWithoutMapCheck(receiver_map,
- strict_mode,
- grow_mode);
- }
- ASSERT(!cached_stub.is_null());
- handler_ics.Add(cached_stub);
- transitioned_maps.Add(transitioned_map);
- }
- KeyedStoreStubCompiler compiler(isolate(), strict_mode, grow_mode);
- Handle<Code> code = compiler.CompileStorePolymorphic(
- receiver_maps, &handler_ics, &transitioned_maps);
- isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
- PROFILE(isolate(),
- CodeCreateEvent(Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG, *code, 0));
- return code;
+bool IsOutOfBoundsAccess(Handle<JSObject> receiver,
+ int index) {
+ if (receiver->IsJSArray()) {
+ return JSArray::cast(*receiver)->length()->IsSmi() &&
+ index >= Smi::cast(JSArray::cast(*receiver)->length())->value();
+ }
+ return index >= receiver->elements()->length();
}
-KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver,
- Handle<Object> key,
- Handle<Object> value) {
- ASSERT(key->IsSmi());
- int index = Smi::cast(*key)->value();
- bool allow_growth = receiver->IsJSArray() &&
- JSArray::cast(*receiver)->length()->IsSmi() &&
- index >= Smi::cast(JSArray::cast(*receiver)->length())->value();
-
+KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
+ Handle<Object> key,
+ Handle<Object> value) {
+ ASSERT(!key->ToSmi()->IsFailure());
+ Smi* smi_key = NULL;
+ key->ToSmi()->To(&smi_key);
+ int index = smi_key->value();
+ bool oob_access = IsOutOfBoundsAccess(receiver, index);
+ bool allow_growth = receiver->IsJSArray() && oob_access;
if (allow_growth) {
// Handle growing array in stub if necessary.
if (receiver->HasFastSmiElements()) {
@@ -1858,169 +1917,87 @@ KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver,
}
}
}
- return STORE_NO_TRANSITION;
+ if (!FLAG_trace_external_array_abuse &&
+ receiver->map()->has_external_array_elements() && oob_access) {
+ return STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS;
+ }
+ Heap* heap = receiver->GetHeap();
+ if (receiver->elements()->map() == heap->fixed_cow_array_map()) {
+ return STORE_NO_TRANSITION_HANDLE_COW;
+ } else {
+ return STANDARD_STORE;
+ }
}
}
-MaybeObject* KeyedStoreIC::Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
+MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
- bool force_generic) {
- // Check for values that can be converted into a symbol directly or
- // is representable as a smi.
- key = TryConvertKey(key, isolate());
-
- if (key->IsSymbol()) {
- Handle<String> name = Handle<String>::cast(key);
-
- // Handle proxies.
- if (object->IsJSProxy()) {
- return JSProxy::cast(*object)->SetProperty(
- *name, *value, NONE, strict_mode);
- }
+ ICMissMode miss_mode) {
+ if (MigrateDeprecated(object)) {
+ return Runtime::SetObjectPropertyOrFail(
+ isolate(), object , key, value, NONE, strict_mode());
+ }
- // If the object is undefined or null it's illegal to try to set any
- // properties on it; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_store", object, name);
- }
+ // Check for values that can be converted into an internalized string directly
+ // or is representable as a smi.
+ key = TryConvertKey(key, isolate());
- // Ignore stores where the receiver is not a JSObject.
- if (!object->IsJSObject()) return *value;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- // Check if the given name is an array index.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- Handle<Object> result =
- JSObject::SetElement(receiver, index, value, NONE, strict_mode);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- return *value;
- }
+ MaybeObject* maybe_object = NULL;
+ Handle<Code> stub = generic_stub();
- // Update inline cache and stub cache.
- if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
- LookupResult lookup(isolate());
- if (LookupForWrite(receiver, name, &lookup)) {
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
- }
+ if (key->IsInternalizedString()) {
+ maybe_object = StoreIC::Store(object,
+ Handle<String>::cast(key),
+ value,
+ JSReceiver::MAY_BE_STORE_FROM_KEYED);
+ if (maybe_object->IsFailure()) return maybe_object;
+ } else {
+ bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
+ !(FLAG_harmony_observation && object->IsJSObject() &&
+ JSObject::cast(*object)->map()->is_observed());
+ if (use_ic && !object->IsSmi()) {
+ // Don't use ICs for maps of the objects in Array's prototype chain. We
+ // expect to be able to trap element sets to objects with those maps in
+ // the runtime to enable optimization of element hole access.
+ Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
+ if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false;
}
- // Set the property.
- return receiver->SetProperty(*name, *value, NONE, strict_mode);
- }
-
- // Do not use ICs for objects that require access checks (including
- // the global object).
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
- ASSERT(!(use_ic && object->IsJSGlobalProxy()));
-
- if (use_ic) {
- Handle<Code> stub = (strict_mode == kStrictMode)
- ? generic_stub_strict()
- : generic_stub();
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = non_strict_arguments_stub();
- } else if (!force_generic) {
- if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
- StubKind stub_kind = GetStubKind(receiver, key, value);
- stub = ComputeStub(receiver, stub_kind, strict_mode, stub);
+ if (use_ic) {
+ ASSERT(!object->IsJSGlobalProxy());
+
+ if (miss_mode != MISS_FORCE_GENERIC) {
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure();
+ if (receiver->elements()->map() ==
+ isolate()->heap()->non_strict_arguments_elements_map()) {
+ stub = non_strict_arguments_stub();
+ } else if (key_is_smi_like &&
+ (!target().is_identical_to(non_strict_arguments_stub()))) {
+ KeyedAccessStoreMode store_mode =
+ GetStoreMode(receiver, key, value);
+ stub = StoreElementStub(receiver, store_mode);
+ }
}
- } else {
- TRACE_GENERIC_IC("KeyedStoreIC", "force generic");
}
}
- if (!stub.is_null()) set_target(*stub);
}
- TRACE_IC("KeyedStoreIC", key, state, target());
-
- // Set the property.
- return Runtime::SetObjectProperty(
- isolate(), object , key, value, NONE, strict_mode);
-}
-
-
-void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value) {
- ASSERT(!receiver->IsJSGlobalProxy());
- ASSERT(StoreICableLookup(lookup));
- ASSERT(lookup->IsFound());
-
- // These are not cacheable, so we never see such LookupResults here.
- ASSERT(!lookup->IsHandler());
-
- // If the property has a non-field type allowing map transitions
- // where there is extra room in the object, we leave the IC in its
- // current state.
- PropertyType type = lookup->type();
-
- // Compute the code stub for this store; used for rewriting to
- // monomorphic state and making sure that the code stub is in the
- // stub cache.
- Handle<Code> code;
-
- switch (type) {
- case FIELD:
- code = isolate()->stub_cache()->ComputeKeyedStoreField(
- name, receiver, lookup->GetFieldIndex(),
- Handle<Map>::null(), strict_mode);
- break;
- case TRANSITION: {
- Handle<Map> transition(lookup->GetTransitionTarget());
- int descriptor = transition->LastAdded();
-
- DescriptorArray* target_descriptors = transition->instance_descriptors();
- PropertyDetails details = target_descriptors->GetDetails(descriptor);
-
- if (details.type() == FIELD && details.attributes() == NONE) {
- int field_index = target_descriptors->GetFieldIndex(descriptor);
- code = isolate()->stub_cache()->ComputeKeyedStoreField(
- name, receiver, field_index, transition, strict_mode);
- break;
- }
- // fall through.
+ if (!is_target_set()) {
+ if (*stub == *generic_stub()) {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
}
- case NORMAL:
- case CONSTANT_FUNCTION:
- case CALLBACKS:
- case INTERCEPTOR:
- // Always rewrite to the generic case so that we do not
- // repeatedly try to rewrite.
- code = (strict_mode == kStrictMode)
- ? generic_stub_strict()
- : generic_stub();
- break;
- case HANDLER:
- case NONEXISTENT:
- UNREACHABLE();
- return;
- }
-
- ASSERT(!code.is_null());
-
- // Patch the call site depending on the state of the cache. Make
- // sure to always rewrite from monomorphic to megamorphic.
- ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
- if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
- set_target(*code);
- } else if (state == MONOMORPHIC) {
- set_target((strict_mode == kStrictMode)
- ? *megamorphic_stub_strict()
- : *megamorphic_stub());
+ ASSERT(!stub.is_null());
+ set_target(*stub);
+ TRACE_IC("StoreIC", key);
}
- TRACE_IC("KeyedStoreIC", name, state, target());
+ if (maybe_object) return maybe_object;
+ return Runtime::SetObjectPropertyOrFail(
+ isolate(), object , key, value, NONE, strict_mode());
}
@@ -2036,19 +2013,16 @@ RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CallIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- MaybeObject* maybe_result = ic.LoadFunction(state,
- extra_ic_state,
- args.at<Object>(0),
- args.at<String>(1));
- // Result could be a function or a failure.
- JSFunction* raw_function = NULL;
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> key = args.at<String>(1);
+ ic.UpdateState(receiver, key);
+ MaybeObject* maybe_result = ic.LoadFunction(receiver, key);
+ JSFunction* raw_function;
if (!maybe_result->To(&raw_function)) return maybe_result;
// The first time the inline cache is updated may be the first time the
- // function it references gets called. If the function is lazily compiled
- // then the first call will trigger a compilation. We check for this case
+ // function it references gets called. If the function is lazily compiled
+ // then the first call will trigger a compilation. We check for this case
// and we do the compilation immediately, instead of waiting for the stub
// currently attached to the JSFunction object to trigger compilation.
if (raw_function->is_compiled()) return raw_function;
@@ -2064,16 +2038,17 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
KeyedCallIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- MaybeObject* maybe_result =
- ic.LoadFunction(state, args.at<Object>(0), args.at<Object>(1));
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ MaybeObject* maybe_result = ic.LoadFunction(receiver, key);
// Result could be a function or a failure.
JSFunction* raw_function = NULL;
if (!maybe_result->To(&raw_function)) return maybe_result;
if (raw_function->is_compiled()) return raw_function;
- Handle<JSFunction> function(raw_function);
+ Handle<JSFunction> function(raw_function, isolate);
JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
return *function;
}
@@ -2083,9 +2058,11 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- LoadIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<String>(1));
+ LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> key = args.at<String>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Load(receiver, key);
}
@@ -2093,38 +2070,61 @@ RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- KeyedLoadIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<Object>(1), false);
+ KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Load(receiver, key, MISS);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Load(receiver, key, MISS);
}
RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- KeyedLoadIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<Object>(1), true);
+ KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Load(receiver, key, MISS_FORCE_GENERIC);
}
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
- HandleScope scope;
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> key = args.at<String>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Store(receiver, key, args.at<Object>(2));
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- StoreIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<String>(1),
- args.at<Object>(2));
+ StoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> key = args.at<String>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Store(receiver, key, args.at<Object>(2));
}
RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
- NoHandleAllocation nha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
JSArray* receiver = JSArray::cast(args[0]);
@@ -2136,14 +2136,14 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
#ifdef DEBUG
// The length property has to be a writable callback property.
LookupResult debug_lookup(isolate);
- receiver->LocalLookup(isolate->heap()->length_symbol(), &debug_lookup);
+ receiver->LocalLookup(isolate->heap()->length_string(), &debug_lookup);
ASSERT(debug_lookup.IsPropertyCallbacks() && !debug_lookup.IsReadOnly());
#endif
Object* result;
- { MaybeObject* maybe_result = receiver->SetElementsLength(len);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = receiver->SetElementsLength(len);
+ if (!maybe_result->To(&result)) return maybe_result;
+
return len;
}
@@ -2152,7 +2152,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
// it is necessary to extend the properties array of a
// JSObject.
RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
- NoHandleAllocation na;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
// Convert the parameters
@@ -2169,11 +2169,24 @@ RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
int new_unused = transition->unused_property_fields();
int new_size = old_storage->length() + new_unused + 1;
Object* result;
- { MaybeObject* maybe_result = old_storage->CopySize(new_size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = old_storage->CopySize(new_size);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+
FixedArray* new_storage = FixedArray::cast(result);
- new_storage->set(old_storage->length(), value);
+
+ Object* to_store = value;
+
+ if (FLAG_track_double_fields) {
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(transition->LastAdded());
+ if (details.representation().IsDouble()) {
+ MaybeObject* maybe_storage =
+ isolate->heap()->AllocateHeapNumber(value->Number());
+ if (!maybe_storage->To(&to_store)) return maybe_storage;
+ }
+ }
+
+ new_storage->set(old_storage->length(), to_store);
// Set the new property value and do the map transition.
object->set_properties(new_storage);
@@ -2188,27 +2201,50 @@ RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
- KeyedStoreIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<Object>(1),
- args.at<Object>(2),
- false);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Store(receiver, key, args.at<Object>(2), MISS);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Store(receiver, key, args.at<Object>(2), MISS);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<Object> object = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ Handle<Object> value = args.at<Object>(2);
+ StrictModeFlag strict_mode = ic.strict_mode();
+ return Runtime::SetObjectProperty(isolate,
+ object,
+ key,
+ value,
+ NONE,
+ strict_mode);
}
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
- NoHandleAllocation na;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- KeyedStoreIC ic(isolate);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
+ StrictModeFlag strict_mode = ic.strict_mode();
return Runtime::SetObjectProperty(isolate,
object,
key,
@@ -2221,469 +2257,465 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
- KeyedStoreIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<Object>(1),
- args.at<Object>(2),
- true);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Store(receiver, key, args.at<Object>(2), MISS_FORCE_GENERIC);
}
-void UnaryOpIC::patch(Code* code) {
- set_target(code);
+RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 4);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ Handle<Object> value = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(2);
+ Handle<Object> object = args.at<Object>(3);
+ StrictModeFlag strict_mode = ic.strict_mode();
+ return Runtime::SetObjectProperty(isolate,
+ object,
+ key,
+ value,
+ NONE,
+ strict_mode);
}
-const char* UnaryOpIC::GetName(TypeInfo type_info) {
+const char* BinaryOpIC::GetName(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED: return "Uninitialized";
case SMI: return "Smi";
- case HEAP_NUMBER: return "HeapNumbers";
+ case INT32: return "Int32";
+ case NUMBER: return "Number";
+ case ODDBALL: return "Oddball";
+ case STRING: return "String";
case GENERIC: return "Generic";
default: return "Invalid";
}
}
-UnaryOpIC::State UnaryOpIC::ToState(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED:
- return ::v8::internal::UNINITIALIZED;
- case SMI:
- case HEAP_NUMBER:
- return MONOMORPHIC;
- case GENERIC:
- return MEGAMORPHIC;
- }
- UNREACHABLE();
- return ::v8::internal::UNINITIALIZED;
-}
+MaybeObject* BinaryOpIC::Transition(Handle<Object> left, Handle<Object> right) {
+ Code::ExtraICState extra_ic_state = target()->extended_extra_ic_state();
+ BinaryOpStub stub(extra_ic_state);
-UnaryOpIC::TypeInfo UnaryOpIC::GetTypeInfo(Handle<Object> operand) {
- ::v8::internal::TypeInfo operand_type =
- ::v8::internal::TypeInfo::TypeFromValue(operand);
- if (operand_type.IsSmi()) {
- return SMI;
- } else if (operand_type.IsNumber()) {
- return HEAP_NUMBER;
- } else {
- return GENERIC;
- }
-}
+ Handle<Type> left_type = stub.GetLeftType(isolate());
+ Handle<Type> right_type = stub.GetRightType(isolate());
+ bool smi_was_enabled = left_type->Maybe(Type::Smi()) &&
+ right_type->Maybe(Type::Smi());
+ Maybe<Handle<Object> > result = stub.Result(left, right, isolate());
+ if (!result.has_value) return Failure::Exception();
-UnaryOpIC::TypeInfo UnaryOpIC::ComputeNewType(
- UnaryOpIC::TypeInfo current_type,
- UnaryOpIC::TypeInfo previous_type) {
- switch (previous_type) {
- case UnaryOpIC::UNINITIALIZED:
- return current_type;
- case UnaryOpIC::SMI:
- return (current_type == UnaryOpIC::GENERIC)
- ? UnaryOpIC::GENERIC
- : UnaryOpIC::HEAP_NUMBER;
- case UnaryOpIC::HEAP_NUMBER:
- return UnaryOpIC::GENERIC;
- case UnaryOpIC::GENERIC:
- // We should never do patching if we are in GENERIC state.
- UNREACHABLE();
- return UnaryOpIC::GENERIC;
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ char buffer[100];
+ NoAllocationStringAllocator allocator(buffer,
+ static_cast<unsigned>(sizeof(buffer)));
+ StringStream stream(&allocator);
+ stream.Add("[");
+ stub.PrintName(&stream);
+
+ stub.UpdateStatus(left, right, result);
+
+ stream.Add(" => ");
+ stub.PrintState(&stream);
+ stream.Add(" ");
+ stream.OutputToStdOut();
+ PrintF(" @ %p <- ", static_cast<void*>(*stub.GetCode(isolate())));
+ JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+ PrintF("]\n");
+ } else {
+ stub.UpdateStatus(left, right, result);
}
- UNREACHABLE();
- return UnaryOpIC::GENERIC;
-}
-
+#else
+ stub.UpdateStatus(left, right, result);
+#endif
-void BinaryOpIC::patch(Code* code) {
- set_target(code);
-}
+ Handle<Code> code = stub.GetCode(isolate());
+ set_target(*code);
+ left_type = stub.GetLeftType(isolate());
+ right_type = stub.GetRightType(isolate());
+ bool enable_smi = left_type->Maybe(Type::Smi()) &&
+ right_type->Maybe(Type::Smi());
-const char* BinaryOpIC::GetName(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED: return "Uninitialized";
- case SMI: return "SMI";
- case INT32: return "Int32s";
- case HEAP_NUMBER: return "HeapNumbers";
- case ODDBALL: return "Oddball";
- case BOTH_STRING: return "BothStrings";
- case STRING: return "Strings";
- case GENERIC: return "Generic";
- default: return "Invalid";
+ if (!smi_was_enabled && enable_smi) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ } else if (smi_was_enabled && !enable_smi) {
+ PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
}
-}
-
-BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED:
- return ::v8::internal::UNINITIALIZED;
- case SMI:
- case INT32:
- case HEAP_NUMBER:
- case ODDBALL:
- case BOTH_STRING:
- case STRING:
- return MONOMORPHIC;
- case GENERIC:
- return MEGAMORPHIC;
- }
- UNREACHABLE();
- return ::v8::internal::UNINITIALIZED;
+ ASSERT(result.has_value);
+ return static_cast<MaybeObject*>(*result.value);
}
-BinaryOpIC::TypeInfo BinaryOpIC::JoinTypes(BinaryOpIC::TypeInfo x,
- BinaryOpIC::TypeInfo y) {
- if (x == UNINITIALIZED) return y;
- if (y == UNINITIALIZED) return x;
- if (x == y) return x;
- if (x == BOTH_STRING && y == STRING) return STRING;
- if (x == STRING && y == BOTH_STRING) return STRING;
- if (x == STRING || x == BOTH_STRING || y == STRING || y == BOTH_STRING) {
- return GENERIC;
- }
- if (x > y) return x;
- return y;
+RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss) {
+ HandleScope scope(isolate);
+ Handle<Object> left = args.at<Object>(0);
+ Handle<Object> right = args.at<Object>(1);
+ BinaryOpIC ic(isolate);
+ return ic.Transition(left, right);
}
-BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Handle<Object> left,
- Handle<Object> right) {
- ::v8::internal::TypeInfo left_type =
- ::v8::internal::TypeInfo::TypeFromValue(left);
- ::v8::internal::TypeInfo right_type =
- ::v8::internal::TypeInfo::TypeFromValue(right);
-
- if (left_type.IsSmi() && right_type.IsSmi()) {
- return SMI;
- }
-
- if (left_type.IsInteger32() && right_type.IsInteger32()) {
- // Platforms with 32-bit Smis have no distinct INT32 type.
- if (kSmiValueSize == 32) return SMI;
- return INT32;
- }
-
- if (left_type.IsNumber() && right_type.IsNumber()) {
- return HEAP_NUMBER;
- }
-
- // Patching for fast string ADD makes sense even if only one of the
- // arguments is a string.
- if (left_type.IsString()) {
- return right_type.IsString() ? BOTH_STRING : STRING;
- } else if (right_type.IsString()) {
- return STRING;
- }
-
- // Check for oddball objects.
- if (left->IsUndefined() && right->IsNumber()) return ODDBALL;
- if (left->IsNumber() && right->IsUndefined()) return ODDBALL;
-
- return GENERIC;
+Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
+ ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
+ Code* code = NULL;
+ CHECK(stub.FindCodeInCache(&code, isolate));
+ return code;
}
-RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
- ASSERT(args.length() == 4);
+Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) {
+ ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
+ return stub.GetCode(isolate);
+}
- HandleScope scope(isolate);
- Handle<Object> operand = args.at<Object>(0);
- Token::Value op = static_cast<Token::Value>(args.smi_at(1));
- UnaryOverwriteMode mode = static_cast<UnaryOverwriteMode>(args.smi_at(2));
- UnaryOpIC::TypeInfo previous_type =
- static_cast<UnaryOpIC::TypeInfo>(args.smi_at(3));
-
- UnaryOpIC::TypeInfo type = UnaryOpIC::GetTypeInfo(operand);
- type = UnaryOpIC::ComputeNewType(type, previous_type);
-
- UnaryOpStub stub(op, mode, type);
- Handle<Code> code = stub.GetCode();
- if (!code.is_null()) {
- if (FLAG_trace_ic) {
- PrintF("[UnaryOpIC (%s->%s)#%s]\n",
- UnaryOpIC::GetName(previous_type),
- UnaryOpIC::GetName(type),
- Token::Name(op));
- }
- UnaryOpIC ic(isolate);
- ic.patch(*code);
- }
- Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
- isolate->thread_local_top()->context_->builtins(), isolate);
- Object* builtin = NULL; // Initialization calms down the compiler.
- switch (op) {
- case Token::SUB:
- builtin = builtins->javascript_builtin(Builtins::UNARY_MINUS);
- break;
- case Token::BIT_NOT:
- builtin = builtins->javascript_builtin(Builtins::BIT_NOT);
- break;
- default:
- UNREACHABLE();
+const char* CompareIC::GetStateName(State state) {
+ switch (state) {
+ case UNINITIALIZED: return "UNINITIALIZED";
+ case SMI: return "SMI";
+ case NUMBER: return "NUMBER";
+ case INTERNALIZED_STRING: return "INTERNALIZED_STRING";
+ case STRING: return "STRING";
+ case UNIQUE_NAME: return "UNIQUE_NAME";
+ case OBJECT: return "OBJECT";
+ case KNOWN_OBJECT: return "KNOWN_OBJECT";
+ case GENERIC: return "GENERIC";
}
+ UNREACHABLE();
+ return NULL;
+}
- Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
- bool caught_exception;
- Handle<Object> result = Execution::Call(builtin_function, operand, 0, NULL,
- &caught_exception);
- if (caught_exception) {
- return Failure::Exception();
+Handle<Type> CompareIC::StateToType(
+ Isolate* isolate,
+ CompareIC::State state,
+ Handle<Map> map) {
+ switch (state) {
+ case CompareIC::UNINITIALIZED:
+ return handle(Type::None(), isolate);
+ case CompareIC::SMI:
+ return handle(Type::Smi(), isolate);
+ case CompareIC::NUMBER:
+ return handle(Type::Number(), isolate);
+ case CompareIC::STRING:
+ return handle(Type::String(), isolate);
+ case CompareIC::INTERNALIZED_STRING:
+ return handle(Type::InternalizedString(), isolate);
+ case CompareIC::UNIQUE_NAME:
+ return handle(Type::UniqueName(), isolate);
+ case CompareIC::OBJECT:
+ return handle(Type::Receiver(), isolate);
+ case CompareIC::KNOWN_OBJECT:
+ return handle(
+ map.is_null() ? Type::Receiver() : Type::Class(map), isolate);
+ case CompareIC::GENERIC:
+ return handle(Type::Any(), isolate);
}
- return *result;
+ UNREACHABLE();
+ return Handle<Type>();
}
-RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
- ASSERT(args.length() == 5);
-
- HandleScope scope(isolate);
- Handle<Object> left = args.at<Object>(0);
- Handle<Object> right = args.at<Object>(1);
- int key = args.smi_at(2);
- Token::Value op = static_cast<Token::Value>(args.smi_at(3));
- BinaryOpIC::TypeInfo previous_type =
- static_cast<BinaryOpIC::TypeInfo>(args.smi_at(4));
-
- BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(left, right);
- type = BinaryOpIC::JoinTypes(type, previous_type);
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
- if ((type == BinaryOpIC::STRING || type == BinaryOpIC::BOTH_STRING) &&
- op != Token::ADD) {
- type = BinaryOpIC::GENERIC;
- }
- if (type == BinaryOpIC::SMI && previous_type == BinaryOpIC::SMI) {
- if (op == Token::DIV ||
- op == Token::MUL ||
- op == Token::SHR ||
- kSmiValueSize == 32) {
- // Arithmetic on two Smi inputs has yielded a heap number.
- // That is the only way to get here from the Smi stub.
- // With 32-bit Smis, all overflows give heap numbers, but with
- // 31-bit Smis, most operations overflow to int32 results.
- result_type = BinaryOpIC::HEAP_NUMBER;
- } else {
- // Other operations on SMIs that overflow yield int32s.
- result_type = BinaryOpIC::INT32;
- }
- }
- if (type == BinaryOpIC::INT32 && previous_type == BinaryOpIC::INT32) {
- // We must be here because an operation on two INT32 types overflowed.
- result_type = BinaryOpIC::HEAP_NUMBER;
- }
- BinaryOpStub stub(key, type, result_type);
- Handle<Code> code = stub.GetCode();
- if (!code.is_null()) {
- if (FLAG_trace_ic) {
- PrintF("[BinaryOpIC (%s->(%s->%s))#%s]\n",
- BinaryOpIC::GetName(previous_type),
- BinaryOpIC::GetName(type),
- BinaryOpIC::GetName(result_type),
- Token::Name(op));
- }
- BinaryOpIC ic(isolate);
- ic.patch(*code);
+void CompareIC::StubInfoToType(int stub_minor_key,
+ Handle<Type>* left_type,
+ Handle<Type>* right_type,
+ Handle<Type>* overall_type,
+ Handle<Map> map,
+ Isolate* isolate) {
+ State left_state, right_state, handler_state;
+ ICCompareStub::DecodeMinorKey(stub_minor_key, &left_state, &right_state,
+ &handler_state, NULL);
+ *left_type = StateToType(isolate, left_state);
+ *right_type = StateToType(isolate, right_state);
+ *overall_type = StateToType(isolate, handler_state, map);
+}
- // Activate inlined smi code.
- if (previous_type == BinaryOpIC::UNINITIALIZED) {
- PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
- }
- }
- Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
- isolate->thread_local_top()->context_->builtins(), isolate);
- Object* builtin = NULL; // Initialization calms down the compiler.
- switch (op) {
- case Token::ADD:
- builtin = builtins->javascript_builtin(Builtins::ADD);
- break;
- case Token::SUB:
- builtin = builtins->javascript_builtin(Builtins::SUB);
- break;
- case Token::MUL:
- builtin = builtins->javascript_builtin(Builtins::MUL);
- break;
- case Token::DIV:
- builtin = builtins->javascript_builtin(Builtins::DIV);
+CompareIC::State CompareIC::NewInputState(State old_state,
+ Handle<Object> value) {
+ switch (old_state) {
+ case UNINITIALIZED:
+ if (value->IsSmi()) return SMI;
+ if (value->IsHeapNumber()) return NUMBER;
+ if (value->IsInternalizedString()) return INTERNALIZED_STRING;
+ if (value->IsString()) return STRING;
+ if (value->IsSymbol()) return UNIQUE_NAME;
+ if (value->IsJSObject()) return OBJECT;
break;
- case Token::MOD:
- builtin = builtins->javascript_builtin(Builtins::MOD);
+ case SMI:
+ if (value->IsSmi()) return SMI;
+ if (value->IsHeapNumber()) return NUMBER;
break;
- case Token::BIT_AND:
- builtin = builtins->javascript_builtin(Builtins::BIT_AND);
+ case NUMBER:
+ if (value->IsNumber()) return NUMBER;
break;
- case Token::BIT_OR:
- builtin = builtins->javascript_builtin(Builtins::BIT_OR);
+ case INTERNALIZED_STRING:
+ if (value->IsInternalizedString()) return INTERNALIZED_STRING;
+ if (value->IsString()) return STRING;
+ if (value->IsSymbol()) return UNIQUE_NAME;
break;
- case Token::BIT_XOR:
- builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
+ case STRING:
+ if (value->IsString()) return STRING;
break;
- case Token::SHR:
- builtin = builtins->javascript_builtin(Builtins::SHR);
+ case UNIQUE_NAME:
+ if (value->IsUniqueName()) return UNIQUE_NAME;
break;
- case Token::SAR:
- builtin = builtins->javascript_builtin(Builtins::SAR);
+ case OBJECT:
+ if (value->IsJSObject()) return OBJECT;
break;
- case Token::SHL:
- builtin = builtins->javascript_builtin(Builtins::SHL);
+ case GENERIC:
break;
- default:
+ case KNOWN_OBJECT:
UNREACHABLE();
+ break;
}
-
- Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
-
- bool caught_exception;
- Handle<Object> builtin_args[] = { right };
- Handle<Object> result = Execution::Call(builtin_function,
- left,
- ARRAY_SIZE(builtin_args),
- builtin_args,
- &caught_exception);
- if (caught_exception) {
- return Failure::Exception();
- }
- return *result;
-}
-
-
-Code* CompareIC::GetRawUninitialized(Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED);
- Code* code = NULL;
- CHECK(stub.FindCodeInCache(&code));
- return code;
-}
-
-
-Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED);
- return stub.GetCode();
-}
-
-
-CompareIC::State CompareIC::ComputeState(Code* target) {
- int key = target->major_key();
- if (key == CodeStub::Compare) return GENERIC;
- ASSERT(key == CodeStub::CompareIC);
- return static_cast<State>(target->compare_state());
-}
-
-
-Token::Value CompareIC::ComputeOperation(Code* target) {
- ASSERT(target->major_key() == CodeStub::CompareIC);
- return static_cast<Token::Value>(
- target->compare_operation() + Token::EQ);
-}
-
-
-const char* CompareIC::GetStateName(State state) {
- switch (state) {
- case UNINITIALIZED: return "UNINITIALIZED";
- case SMIS: return "SMIS";
- case HEAP_NUMBERS: return "HEAP_NUMBERS";
- case OBJECTS: return "OBJECTS";
- case KNOWN_OBJECTS: return "KNOWN_OBJECTS";
- case SYMBOLS: return "SYMBOLS";
- case STRINGS: return "STRINGS";
- case GENERIC: return "GENERIC";
- default:
- UNREACHABLE();
- return NULL;
- }
+ return GENERIC;
}
-CompareIC::State CompareIC::TargetState(State state,
+CompareIC::State CompareIC::TargetState(State old_state,
+ State old_left,
+ State old_right,
bool has_inlined_smi_code,
Handle<Object> x,
Handle<Object> y) {
- switch (state) {
+ switch (old_state) {
case UNINITIALIZED:
- if (x->IsSmi() && y->IsSmi()) return SMIS;
- if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
+ if (x->IsSmi() && y->IsSmi()) return SMI;
+ if (x->IsNumber() && y->IsNumber()) return NUMBER;
if (Token::IsOrderedRelationalCompareOp(op_)) {
// Ordered comparisons treat undefined as NaN, so the
- // HEAP_NUMBER stub will do the right thing.
+ // NUMBER stub will do the right thing.
if ((x->IsNumber() && y->IsUndefined()) ||
(y->IsNumber() && x->IsUndefined())) {
- return HEAP_NUMBERS;
+ return NUMBER;
}
}
- if (x->IsSymbol() && y->IsSymbol()) {
- // We compare symbols as strings if we need to determine
+ if (x->IsInternalizedString() && y->IsInternalizedString()) {
+ // We compare internalized strings as plain ones if we need to determine
// the order in a non-equality compare.
- return Token::IsEqualityOp(op_) ? SYMBOLS : STRINGS;
+ return Token::IsEqualityOp(op_) ? INTERNALIZED_STRING : STRING;
}
- if (x->IsString() && y->IsString()) return STRINGS;
+ if (x->IsString() && y->IsString()) return STRING;
if (!Token::IsEqualityOp(op_)) return GENERIC;
+ if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
if (x->IsJSObject() && y->IsJSObject()) {
if (Handle<JSObject>::cast(x)->map() ==
- Handle<JSObject>::cast(y)->map() &&
- Token::IsEqualityOp(op_)) {
- return KNOWN_OBJECTS;
+ Handle<JSObject>::cast(y)->map()) {
+ return KNOWN_OBJECT;
} else {
- return OBJECTS;
+ return OBJECT;
}
}
return GENERIC;
- case SMIS:
- return has_inlined_smi_code && x->IsNumber() && y->IsNumber()
- ? HEAP_NUMBERS
- : GENERIC;
- case SYMBOLS:
+ case SMI:
+ return x->IsNumber() && y->IsNumber() ? NUMBER : GENERIC;
+ case INTERNALIZED_STRING:
ASSERT(Token::IsEqualityOp(op_));
- return x->IsString() && y->IsString() ? STRINGS : GENERIC;
- case HEAP_NUMBERS:
- case STRINGS:
- case OBJECTS:
- case KNOWN_OBJECTS:
+ if (x->IsString() && y->IsString()) return STRING;
+ if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
+ return GENERIC;
+ case NUMBER:
+ // If the failure was due to one side changing from smi to heap number,
+ // then keep the state (if other changed at the same time, we will get
+ // a second miss and then go to generic).
+ if (old_left == SMI && x->IsHeapNumber()) return NUMBER;
+ if (old_right == SMI && y->IsHeapNumber()) return NUMBER;
+ return GENERIC;
+ case KNOWN_OBJECT:
+ ASSERT(Token::IsEqualityOp(op_));
+ if (x->IsJSObject() && y->IsJSObject()) return OBJECT;
+ return GENERIC;
+ case STRING:
+ case UNIQUE_NAME:
+ case OBJECT:
case GENERIC:
return GENERIC;
}
UNREACHABLE();
- return GENERIC;
+ return GENERIC; // Make the compiler happy.
+}
+
+
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+ HandleScope scope(isolate());
+ State previous_left, previous_right, previous_state;
+ ICCompareStub::DecodeMinorKey(target()->stub_info(), &previous_left,
+ &previous_right, &previous_state, NULL);
+ State new_left = NewInputState(previous_left, x);
+ State new_right = NewInputState(previous_right, y);
+ State state = TargetState(previous_state, previous_left, previous_right,
+ HasInlinedSmiCode(address()), x, y);
+ ICCompareStub stub(op_, new_left, new_right, state);
+ if (state == KNOWN_OBJECT) {
+ stub.set_known_map(
+ Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate()));
+ }
+ set_target(*stub.GetCode(isolate()));
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[CompareIC in ");
+ JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+ PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n",
+ GetStateName(previous_left),
+ GetStateName(previous_right),
+ GetStateName(previous_state),
+ GetStateName(new_left),
+ GetStateName(new_right),
+ GetStateName(state),
+ Token::Name(op_),
+ static_cast<void*>(*stub.GetCode(isolate())));
+ }
+#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ }
}
-// Used from ic_<arch>.cc.
+// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
- NoHandleAllocation na;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
- return ic.target();
+ return ic.raw_target();
}
-RUNTIME_FUNCTION(MaybeObject*, ToBoolean_Patch) {
- ASSERT(args.length() == 3);
+void CompareNilIC::Clear(Address address, Code* target) {
+ if (IsCleared(target)) return;
+ Code::ExtraICState state = target->extended_extra_ic_state();
+
+ CompareNilICStub stub(state, HydrogenCodeStub::UNINITIALIZED);
+ stub.ClearState();
+
+ Code* code = NULL;
+ CHECK(stub.FindCodeInCache(&code, target->GetIsolate()));
+
+ SetTargetAtAddress(address, code);
+}
+
+MaybeObject* CompareNilIC::DoCompareNilSlow(NilValue nil,
+ Handle<Object> object) {
+ if (object->IsNull() || object->IsUndefined()) {
+ return Smi::FromInt(true);
+ }
+ return Smi::FromInt(object->IsUndetectableObject());
+}
+
+
+MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
+ Code::ExtraICState extra_ic_state = target()->extended_extra_ic_state();
+
+ CompareNilICStub stub(extra_ic_state);
+
+ // Extract the current supported types from the patched IC and calculate what
+ // types must be supported as a result of the miss.
+ bool already_monomorphic = stub.IsMonomorphic();
+
+ stub.UpdateStatus(object);
+
+ NilValue nil = stub.GetNilValue();
+
+ // Find or create the specialized stub to support the new set of types.
+ Handle<Code> code;
+ if (stub.IsMonomorphic()) {
+ Handle<Map> monomorphic_map(already_monomorphic
+ ? target()->FindFirstMap()
+ : HeapObject::cast(*object)->map());
+ code = isolate()->stub_cache()->ComputeCompareNil(monomorphic_map, stub);
+ } else {
+ code = stub.GetCode(isolate());
+ }
+ set_target(*code);
+ return DoCompareNilSlow(nil, object);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss) {
HandleScope scope(isolate);
Handle<Object> object = args.at<Object>(0);
- Register tos = Register::from_code(args.smi_at(1));
- ToBooleanStub::Types old_types(args.smi_at(2));
+ CompareNilIC ic(isolate);
+ return ic.CompareNil(object);
+}
- ToBooleanStub::Types new_types(old_types);
- bool to_boolean_value = new_types.Record(object);
- old_types.TraceTransition(new_types);
- ToBooleanStub stub(tos, new_types);
- Handle<Code> code = stub.GetCode();
- ToBooleanIC ic(isolate);
- ic.patch(*code);
+RUNTIME_FUNCTION(MaybeObject*, Unreachable) {
+ UNREACHABLE();
+ CHECK(false);
+ return isolate->heap()->undefined_value();
+}
+
+
+Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
+ switch (op) {
+ default:
+ UNREACHABLE();
+ case Token::ADD:
+ return Builtins::ADD;
+ break;
+ case Token::SUB:
+ return Builtins::SUB;
+ break;
+ case Token::MUL:
+ return Builtins::MUL;
+ break;
+ case Token::DIV:
+ return Builtins::DIV;
+ break;
+ case Token::MOD:
+ return Builtins::MOD;
+ break;
+ case Token::BIT_OR:
+ return Builtins::BIT_OR;
+ break;
+ case Token::BIT_AND:
+ return Builtins::BIT_AND;
+ break;
+ case Token::BIT_XOR:
+ return Builtins::BIT_XOR;
+ break;
+ case Token::SAR:
+ return Builtins::SAR;
+ break;
+ case Token::SHR:
+ return Builtins::SHR;
+ break;
+ case Token::SHL:
+ return Builtins::SHL;
+ break;
+ }
+}
+
+
+MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object,
+ Code::ExtraICState extra_ic_state) {
+ ToBooleanStub stub(extra_ic_state);
+ bool to_boolean_value = stub.UpdateStatus(object);
+ Handle<Code> code = stub.GetCode(isolate());
+ set_target(*code);
return Smi::FromInt(to_boolean_value ? 1 : 0);
}
-void ToBooleanIC::patch(Code* code) {
- set_target(code);
+RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss) {
+ ASSERT(args.length() == 1);
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at<Object>(0);
+ ToBooleanIC ic(isolate);
+ Code::ExtraICState extra_ic_state = ic.target()->extended_extra_ic_state();
+ return ic.ToBoolean(object, extra_ic_state);
}
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 8767f988a..fde4bc77a 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -45,22 +45,23 @@ namespace internal {
ICU(KeyedCallIC_Miss) \
ICU(StoreIC_Miss) \
ICU(StoreIC_ArrayLength) \
+ ICU(StoreIC_Slow) \
ICU(SharedStoreIC_ExtendStorage) \
ICU(KeyedStoreIC_Miss) \
ICU(KeyedStoreIC_MissForceGeneric) \
ICU(KeyedStoreIC_Slow) \
/* Utilities for IC stubs. */ \
- ICU(LoadCallbackProperty) \
ICU(StoreCallbackProperty) \
ICU(LoadPropertyWithInterceptorOnly) \
ICU(LoadPropertyWithInterceptorForLoad) \
ICU(LoadPropertyWithInterceptorForCall) \
ICU(KeyedLoadPropertyWithInterceptor) \
ICU(StoreInterceptorProperty) \
- ICU(UnaryOp_Patch) \
- ICU(BinaryOp_Patch) \
ICU(CompareIC_Miss) \
- ICU(ToBoolean_Patch)
+ ICU(BinaryOpIC_Miss) \
+ ICU(CompareNilIC_Miss) \
+ ICU(Unreachable) \
+ ICU(ToBooleanIC_Miss)
//
// IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
// and KeyedStoreIC.
@@ -94,16 +95,20 @@ class IC {
virtual ~IC() {}
// Get the call-site target; used for determining the state.
- Code* target() const { return GetTargetAtAddress(address()); }
- inline Address address() const;
+ Handle<Code> target() const { return target_; }
+ Code* raw_target() const { return GetTargetAtAddress(address()); }
- virtual bool IsGeneric() const { return false; }
+ State state() const { return state_; }
+ inline Address address() const;
// Compute the current IC state based on the target stub, receiver and name.
- static State StateFrom(Code* target, Object* receiver, Object* name);
+ void UpdateState(Handle<Object> receiver, Handle<Object> name);
+ void MarkMonomorphicPrototypeFailure() {
+ state_ = MONOMORPHIC_PROTOTYPE_FAILURE;
+ }
// Clear the inline cache to initial state.
- static void Clear(Address address);
+ static void Clear(Isolate* isolate, Address address);
// Computes the reloc info for this IC. This is a fairly expensive
// operation as it has to search through the heap to find the code
@@ -112,16 +117,16 @@ class IC {
// Returns if this IC is for contextual (no explicit receiver)
// access to properties.
- bool IsContextual(Handle<Object> receiver) {
+ bool IsUndeclaredGlobal(Handle<Object> receiver) {
if (receiver->IsGlobalObject()) {
- return SlowIsContextual();
+ return SlowIsUndeclaredGlobal();
} else {
- ASSERT(!SlowIsContextual());
+ ASSERT(!SlowIsUndeclaredGlobal());
return false;
}
}
- bool SlowIsContextual() {
+ bool SlowIsUndeclaredGlobal() {
return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
}
@@ -129,11 +134,15 @@ class IC {
// These methods should not be called with undefined or null.
static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object,
JSObject* holder);
- static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object,
- JSObject* holder);
- static inline JSObject* GetCodeCacheHolder(Object* object,
+ static inline JSObject* GetCodeCacheHolder(Isolate* isolate,
+ Object* object,
InlineCacheHolderFlag holder);
+ static bool IsCleared(Code* code) {
+ InlineCacheState state = code->ic_state();
+ return state == UNINITIALIZED || state == PREMONOMORPHIC;
+ }
+
protected:
Address fp() const { return fp_; }
Address pc() const { return *pc_address_; }
@@ -146,15 +155,17 @@ class IC {
#endif
// Set the call-site target.
- void set_target(Code* code) { SetTargetAtAddress(address(), code); }
+ void set_target(Code* code) {
+ SetTargetAtAddress(address(), code);
+ target_set_ = true;
+ }
+
+ bool is_target_set() { return target_set_; }
#ifdef DEBUG
char TransitionMarkFromState(IC::State state);
- void TraceIC(const char* type,
- Handle<Object> name,
- State old_state,
- Code* new_target);
+ void TraceIC(const char* type, Handle<Object> name);
#endif
Failure* TypeError(const char* type,
@@ -167,6 +178,53 @@ class IC {
static inline void SetTargetAtAddress(Address address, Code* target);
static void PostPatching(Address address, Code* target, Code* old_target);
+ // Compute the handler either by compiling or by retrieving a cached version.
+ Handle<Code> ComputeHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> value = Handle<Code>::null());
+ virtual Handle<Code> CompileHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> value) {
+ UNREACHABLE();
+ return Handle<Code>::null();
+ }
+ void UpdateMonomorphicIC(Handle<HeapObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name);
+
+ bool UpdatePolymorphicIC(Handle<HeapObject> receiver,
+ Handle<String> name,
+ Handle<Code> code);
+
+ void CopyICToMegamorphicCache(Handle<String> name);
+ bool IsTransitionedMapOfMonomorphicTarget(Map* receiver_map);
+ void PatchCache(Handle<HeapObject> receiver,
+ Handle<String> name,
+ Handle<Code> code);
+ virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code);
+ virtual Code::Kind kind() const {
+ UNREACHABLE();
+ return Code::STUB;
+ }
+ virtual Handle<Code> slow_stub() const {
+ UNREACHABLE();
+ return Handle<Code>::null();
+ }
+ virtual Handle<Code> megamorphic_stub() {
+ UNREACHABLE();
+ return Handle<Code>::null();
+ }
+ virtual Handle<Code> generic_stub() const {
+ UNREACHABLE();
+ return Handle<Code>::null();
+ }
+ virtual StrictModeFlag strict_mode() const { return kNonStrictMode; }
+ bool TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
+ Handle<String> name);
+ void TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name);
+
private:
// Frame pointer for the frame that uses (calls) the IC.
Address fp_;
@@ -179,6 +237,11 @@ class IC {
Isolate* isolate_;
+ // The original code target that missed.
+ Handle<Code> target_;
+ State state_;
+ bool target_set_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
};
@@ -205,31 +268,23 @@ class CallICBase: public IC {
class StringStubState: public BitField<StringStubFeedback, 1, 1> {};
// Returns a JSFunction or a Failure.
- MUST_USE_RESULT MaybeObject* LoadFunction(State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
+ MUST_USE_RESULT MaybeObject* LoadFunction(Handle<Object> object,
Handle<String> name);
protected:
CallICBase(Code::Kind kind, Isolate* isolate)
: IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
- bool TryUpdateExtraICState(LookupResult* lookup,
- Handle<Object> object,
- Code::ExtraICState* extra_ic_state);
+ virtual Code::ExtraICState extra_ic_state() { return Code::kNoExtraICState; }
// Compute a monomorphic stub if possible, otherwise return a null handle.
Handle<Code> ComputeMonomorphicStub(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_state,
Handle<Object> object,
Handle<String> name);
// Update the inline cache and the global stub cache based on the lookup
// result.
void UpdateCaches(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name);
@@ -256,6 +311,9 @@ class CallICBase: public IC {
Code::Kind kind,
Code::ExtraICState extra_state);
+ virtual Handle<Code> megamorphic_stub();
+ virtual Handle<Code> pre_monomorphic_stub();
+
Code::Kind kind_;
friend class IC;
@@ -264,7 +322,9 @@ class CallICBase: public IC {
class CallIC: public CallICBase {
public:
- explicit CallIC(Isolate* isolate) : CallICBase(Code::CALL_IC, isolate) {
+ explicit CallIC(Isolate* isolate)
+ : CallICBase(Code::CALL_IC, isolate),
+ extra_ic_state_(target()->extra_ic_state()) {
ASSERT(target()->is_call_stub());
}
@@ -289,6 +349,13 @@ class CallIC: public CallICBase {
CallICBase::GenerateNormal(masm, argc);
GenerateMiss(masm, argc, Code::kNoExtraICState);
}
+ bool TryUpdateExtraICState(LookupResult* lookup, Handle<Object> object);
+
+ protected:
+ virtual Code::ExtraICState extra_ic_state() { return extra_ic_state_; }
+
+ private:
+ Code::ExtraICState extra_ic_state_;
};
@@ -299,8 +366,7 @@ class KeyedCallIC: public CallICBase {
ASSERT(target()->is_keyed_call_stub());
}
- MUST_USE_RESULT MaybeObject* LoadFunction(State state,
- Handle<Object> object,
+ MUST_USE_RESULT MaybeObject* LoadFunction(Handle<Object> object,
Handle<Object> key);
// Code generator routines.
@@ -321,14 +387,10 @@ class KeyedCallIC: public CallICBase {
class LoadIC: public IC {
public:
- explicit LoadIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
- ASSERT(target()->is_load_stub());
+ explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
+ ASSERT(target()->is_load_stub() || target()->is_keyed_load_stub());
}
- MUST_USE_RESULT MaybeObject* Load(State state,
- Handle<Object> object,
- Handle<String> name);
-
// Code generator routines.
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GeneratePreMonomorphic(MacroAssembler* masm) {
@@ -337,164 +399,83 @@ class LoadIC: public IC {
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
+ static void GenerateRuntimeGetProperty(MacroAssembler* masm);
- // Specialized code generator routines.
- static void GenerateArrayLength(MacroAssembler* masm);
- static void GenerateStringLength(MacroAssembler* masm,
- bool support_wrappers);
- static void GenerateFunctionPrototype(MacroAssembler* masm);
+ MUST_USE_RESULT MaybeObject* Load(Handle<Object> object,
+ Handle<String> name);
+
+ protected:
+ virtual Code::Kind kind() const { return Code::LOAD_IC; }
+
+ virtual Handle<Code> slow_stub() const {
+ return isolate()->builtins()->LoadIC_Slow();
+ }
+
+ virtual Handle<Code> megamorphic_stub() {
+ return isolate()->builtins()->LoadIC_Megamorphic();
+ }
- private:
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
- State state,
Handle<Object> object,
Handle<String> name);
+ virtual Handle<Code> CompileHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> unused);
+
+ private:
// Stub accessors.
- Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->LoadIC_Megamorphic();
+ static Handle<Code> initialize_stub(Isolate* isolate) {
+ return isolate->builtins()->LoadIC_Initialize();
}
- static Code* initialize_stub() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kLoadIC_Initialize);
- }
- Handle<Code> pre_monomorphic_stub() {
- return isolate()->builtins()->LoadIC_PreMonomorphic();
- }
-
- static void Clear(Address address, Code* target);
-
- friend class IC;
-};
-
-
-class KeyedIC: public IC {
- public:
- enum StubKind {
- LOAD,
- STORE_NO_TRANSITION,
- STORE_TRANSITION_SMI_TO_OBJECT,
- STORE_TRANSITION_SMI_TO_DOUBLE,
- STORE_TRANSITION_DOUBLE_TO_OBJECT,
- STORE_TRANSITION_HOLEY_SMI_TO_OBJECT,
- STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE,
- STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT,
- STORE_AND_GROW_NO_TRANSITION,
- STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
- STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE,
- STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT
- };
- static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
- STORE_NO_TRANSITION;
- STATIC_ASSERT(kGrowICDelta ==
- STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT -
- STORE_TRANSITION_SMI_TO_OBJECT);
- STATIC_ASSERT(kGrowICDelta ==
- STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE -
- STORE_TRANSITION_SMI_TO_DOUBLE);
- STATIC_ASSERT(kGrowICDelta ==
- STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT -
- STORE_TRANSITION_DOUBLE_TO_OBJECT);
-
- explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
- virtual ~KeyedIC() {}
-
- static inline KeyedAccessGrowMode GetGrowModeFromStubKind(
- StubKind stub_kind) {
- return (stub_kind >= STORE_AND_GROW_NO_TRANSITION)
- ? ALLOW_JSARRAY_GROWTH
- : DO_NOT_ALLOW_JSARRAY_GROWTH;
- }
-
- static inline StubKind GetGrowStubKind(StubKind stub_kind) {
- ASSERT(stub_kind != LOAD);
- if (stub_kind < STORE_AND_GROW_NO_TRANSITION) {
- stub_kind = static_cast<StubKind>(static_cast<int>(stub_kind) +
- kGrowICDelta);
- }
- return stub_kind;
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
+ return isolate->builtins()->LoadIC_PreMonomorphic();
}
- virtual Handle<Code> GetElementStubWithoutMapCheck(
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) = 0;
-
- protected:
- virtual Handle<Code> string_stub() {
- return Handle<Code>::null();
+ virtual Handle<Code> pre_monomorphic_stub() {
+ return pre_monomorphic_stub(isolate());
}
- virtual Code::Kind kind() const = 0;
-
- Handle<Code> ComputeStub(Handle<JSObject> receiver,
- StubKind stub_kind,
- StrictModeFlag strict_mode,
- Handle<Code> default_stub);
+ Handle<Code> SimpleFieldLoad(int offset,
+ bool inobject = true,
+ Representation representation =
+ Representation::Tagged());
- virtual Handle<Code> ComputePolymorphicStub(
- MapHandleList* receiver_maps,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode) = 0;
-
- Handle<Code> ComputeMonomorphicStubWithoutMapCheck(
- Handle<Map> receiver_map,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode);
-
- private:
- void GetReceiverMapsForStub(Handle<Code> stub, MapHandleList* result);
+ static void Clear(Isolate* isolate, Address address, Code* target);
- Handle<Code> ComputeMonomorphicStub(Handle<Map> receiver_map,
- StubKind stub_kind,
- StrictModeFlag strict_mode,
- Handle<Code> default_stub);
-
- Handle<Map> ComputeTransitionedMap(Handle<JSObject> receiver,
- StubKind stub_kind);
-
- static bool IsTransitionStubKind(StubKind stub_kind) {
- return stub_kind > STORE_NO_TRANSITION &&
- stub_kind != STORE_AND_GROW_NO_TRANSITION;
- }
+ friend class IC;
+};
- static bool IsGrowStubKind(StubKind stub_kind) {
- return stub_kind >= STORE_AND_GROW_NO_TRANSITION;
- }
- static StubKind GetNoTransitionStubKind(StubKind stub_kind) {
- if (!IsTransitionStubKind(stub_kind)) return stub_kind;
- if (IsGrowStubKind(stub_kind)) return STORE_AND_GROW_NO_TRANSITION;
- return STORE_NO_TRANSITION;
- }
+enum ICMissMode {
+ MISS_FORCE_GENERIC,
+ MISS
};
-class KeyedLoadIC: public KeyedIC {
+class KeyedLoadIC: public LoadIC {
public:
- explicit KeyedLoadIC(Isolate* isolate) : KeyedIC(isolate) {
+ explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate)
+ : LoadIC(depth, isolate) {
ASSERT(target()->is_keyed_load_stub());
}
- MUST_USE_RESULT MaybeObject* Load(State state,
- Handle<Object> object,
+ MUST_USE_RESULT MaybeObject* Load(Handle<Object> object,
Handle<Object> key,
- bool force_generic_stub);
+ ICMissMode force_generic);
// Code generator routines.
- static void GenerateMiss(MacroAssembler* masm, bool force_generic);
+ static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
static void GenerateInitialize(MacroAssembler* masm) {
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm);
@@ -508,46 +489,33 @@ class KeyedLoadIC: public KeyedIC {
static const int kSlowCaseBitFieldMask =
(1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
- virtual Handle<Code> GetElementStubWithoutMapCheck(
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode);
-
- virtual bool IsGeneric() const {
- return target() == *generic_stub();
- }
-
protected:
virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
- virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode);
+ Handle<Code> LoadElementStub(Handle<JSObject> receiver);
- virtual Handle<Code> string_stub() {
- return isolate()->builtins()->KeyedLoadIC_String();
+ virtual Handle<Code> megamorphic_stub() {
+ return isolate()->builtins()->KeyedLoadIC_Generic();
+ }
+ virtual Handle<Code> generic_stub() const {
+ return isolate()->builtins()->KeyedLoadIC_Generic();
+ }
+ virtual Handle<Code> slow_stub() const {
+ return isolate()->builtins()->KeyedLoadIC_Slow();
}
- private:
- // Update the inline cache.
- void UpdateCaches(LookupResult* lookup,
- State state,
- Handle<Object> object,
- Handle<String> name);
+ virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
+ private:
// Stub accessors.
- static Code* initialize_stub() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Initialize);
+ static Handle<Code> initialize_stub(Isolate* isolate) {
+ return isolate->builtins()->KeyedLoadIC_Initialize();
}
- Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->KeyedLoadIC_Generic();
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
+ return isolate->builtins()->KeyedLoadIC_PreMonomorphic();
}
- Handle<Code> generic_stub() const {
- return isolate()->builtins()->KeyedLoadIC_Generic();
- }
- Handle<Code> pre_monomorphic_stub() {
- return isolate()->builtins()->KeyedLoadIC_PreMonomorphic();
+ virtual Handle<Code> pre_monomorphic_stub() {
+ return pre_monomorphic_stub(isolate());
}
Handle<Code> indexed_interceptor_stub() {
return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
@@ -555,8 +523,11 @@ class KeyedLoadIC: public KeyedIC {
Handle<Code> non_strict_arguments_stub() {
return isolate()->builtins()->KeyedLoadIC_NonStrictArguments();
}
+ Handle<Code> string_stub() {
+ return isolate()->builtins()->KeyedLoadIC_String();
+ }
- static void Clear(Address address, Code* target);
+ static void Clear(Isolate* isolate, Address address, Code* target);
friend class IC;
};
@@ -564,36 +535,93 @@ class KeyedLoadIC: public KeyedIC {
class StoreIC: public IC {
public:
- explicit StoreIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
- ASSERT(target()->is_store_stub());
+ StoreIC(FrameDepth depth, Isolate* isolate)
+ : IC(depth, isolate),
+ strict_mode_(Code::GetStrictMode(target()->extra_ic_state())) {
+ ASSERT(target()->is_store_stub() || target()->is_keyed_store_stub());
}
- MUST_USE_RESULT MaybeObject* Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
- Handle<String> name,
- Handle<Object> value);
+ virtual StrictModeFlag strict_mode() const { return strict_mode_; }
// Code generators for stub routines. Only called once at startup.
+ static void GenerateSlow(MacroAssembler* masm);
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+ static void GeneratePreMonomorphic(MacroAssembler* masm) {
+ GenerateMiss(masm);
+ }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm,
StrictModeFlag strict_mode);
- static void GenerateArrayLength(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
- static void GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode);
+ static void GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode);
+
+ MUST_USE_RESULT MaybeObject* Store(
+ Handle<Object> object,
+ Handle<String> name,
+ Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode =
+ JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED);
+
+ protected:
+ virtual Code::Kind kind() const { return Code::STORE_IC; }
+ virtual Handle<Code> megamorphic_stub() {
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->StoreIC_Megamorphic_Strict();
+ } else {
+ return isolate()->builtins()->StoreIC_Megamorphic();
+ }
+ }
+ // Stub accessors.
+ virtual Handle<Code> generic_stub() const {
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->StoreIC_Generic_Strict();
+ } else {
+ return isolate()->builtins()->StoreIC_Generic();
+ }
+ }
+
+ virtual Handle<Code> slow_stub() const {
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->StoreIC_Slow_Strict();
+ } else {
+ return isolate()->builtins()->StoreIC_Slow();
+ }
+ }
+
+ virtual Handle<Code> pre_monomorphic_stub() {
+ return pre_monomorphic_stub(isolate(), strict_mode());
+ }
+
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
+ StrictModeFlag strict_mode) {
+ if (strict_mode == kStrictMode) {
+ return isolate->builtins()->StoreIC_PreMonomorphic_Strict();
+ } else {
+ return isolate->builtins()->StoreIC_PreMonomorphic();
+ }
+ }
+
+ virtual Handle<Code> global_proxy_stub() {
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
+ } else {
+ return isolate()->builtins()->StoreIC_GlobalProxy();
+ }
+ }
- private:
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value);
+ virtual Handle<Code> CompileHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> value);
+ private:
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
@@ -601,31 +629,18 @@ class StoreIC: public IC {
IC::set_target(code);
}
- // Stub accessors.
- Code* megamorphic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kStoreIC_Megamorphic);
- }
- Code* megamorphic_stub_strict() {
- return isolate()->builtins()->builtin(
- Builtins::kStoreIC_Megamorphic_Strict);
- }
- static Code* initialize_stub() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kStoreIC_Initialize);
- }
- static Code* initialize_stub_strict() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kStoreIC_Initialize_Strict);
- }
- Handle<Code> global_proxy_stub() {
- return isolate()->builtins()->StoreIC_GlobalProxy();
- }
- Handle<Code> global_proxy_stub_strict() {
- return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ StrictModeFlag strict_mode) {
+ if (strict_mode == kStrictMode) {
+ return isolate->builtins()->StoreIC_Initialize_Strict();
+ } else {
+ return isolate->builtins()->StoreIC_Initialize();
+ }
}
- static void Clear(Address address, Code* target);
+ static void Clear(Isolate* isolate, Address address, Code* target);
+
+ StrictModeFlag strict_mode_;
friend class IC;
};
@@ -643,122 +658,105 @@ enum KeyedStoreIncrementLength {
};
-class KeyedStoreIC: public KeyedIC {
+class KeyedStoreIC: public StoreIC {
public:
- explicit KeyedStoreIC(Isolate* isolate) : KeyedIC(isolate) {
+ KeyedStoreIC(FrameDepth depth, Isolate* isolate)
+ : StoreIC(depth, isolate) {
ASSERT(target()->is_keyed_store_stub());
}
- MUST_USE_RESULT MaybeObject* Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
+ MUST_USE_RESULT MaybeObject* Store(Handle<Object> object,
Handle<Object> name,
Handle<Object> value,
- bool force_generic);
+ ICMissMode force_generic);
// Code generators for stub routines. Only called once at startup.
static void GenerateInitialize(MacroAssembler* masm) {
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
- static void GenerateMiss(MacroAssembler* masm, bool force_generic);
+ static void GeneratePreMonomorphic(MacroAssembler* masm) {
+ GenerateMiss(masm, MISS);
+ }
+ static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic);
static void GenerateSlow(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode);
static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
static void GenerateNonStrictArguments(MacroAssembler* masm);
- static void GenerateTransitionElementsSmiToDouble(MacroAssembler* masm);
- static void GenerateTransitionElementsDoubleToObject(MacroAssembler* masm);
-
- virtual Handle<Code> GetElementStubWithoutMapCheck(
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode);
-
- virtual bool IsGeneric() const {
- return target() == *generic_stub() ||
- target() == *generic_stub_strict();
- }
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
- virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode);
+ virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
- private:
- // Update the inline cache.
- void UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value);
+ virtual Handle<Code> pre_monomorphic_stub() {
+ return pre_monomorphic_stub(isolate(), strict_mode());
+ }
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
+ StrictModeFlag strict_mode) {
+ if (strict_mode == kStrictMode) {
+ return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
+ } else {
+ return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
+ }
+ }
+ virtual Handle<Code> slow_stub() const {
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->KeyedStoreIC_Slow_Strict();
+ } else {
+ return isolate()->builtins()->KeyedStoreIC_Slow();
+ }
+ }
+ virtual Handle<Code> megamorphic_stub() {
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ } else {
+ return isolate()->builtins()->KeyedStoreIC_Generic();
+ }
+ }
+ Handle<Code> StoreElementStub(Handle<JSObject> receiver,
+ KeyedAccessStoreMode store_mode);
+
+ private:
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
- ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
- Code::GetStrictMode(target()->extra_ic_state()));
+ ASSERT(Code::GetStrictMode(code->extra_ic_state()) == strict_mode());
IC::set_target(code);
}
// Stub accessors.
- static Code* initialize_stub() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedStoreIC_Initialize);
- }
- static Code* initialize_stub_strict() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedStoreIC_Initialize_Strict);
- }
- Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->KeyedStoreIC_Generic();
- }
- Handle<Code> megamorphic_stub_strict() {
- return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
- }
- Handle<Code> generic_stub() const {
- return isolate()->builtins()->KeyedStoreIC_Generic();
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ StrictModeFlag strict_mode) {
+ if (strict_mode == kStrictMode) {
+ return isolate->builtins()->KeyedStoreIC_Initialize_Strict();
+ } else {
+ return isolate->builtins()->KeyedStoreIC_Initialize();
+ }
}
- Handle<Code> generic_stub_strict() const {
- return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+
+ virtual Handle<Code> generic_stub() const {
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ } else {
+ return isolate()->builtins()->KeyedStoreIC_Generic();
+ }
}
+
Handle<Code> non_strict_arguments_stub() {
return isolate()->builtins()->KeyedStoreIC_NonStrictArguments();
}
- static void Clear(Address address, Code* target);
-
- StubKind GetStubKind(Handle<JSObject> receiver,
- Handle<Object> key,
- Handle<Object> value);
-
- friend class IC;
-};
-
-
-class UnaryOpIC: public IC {
- public:
- // sorted: increasingly more unspecific (ignoring UNINITIALIZED)
- // TODO(svenpanne) Using enums+switch is an antipattern, use a class instead.
- enum TypeInfo {
- UNINITIALIZED,
- SMI,
- HEAP_NUMBER,
- GENERIC
- };
-
- explicit UnaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
-
- void patch(Code* code);
-
- static const char* GetName(TypeInfo type_info);
+ static void Clear(Isolate* isolate, Address address, Code* target);
- static State ToState(TypeInfo type_info);
+ KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
+ Handle<Object> key,
+ Handle<Object> value);
- static TypeInfo GetTypeInfo(Handle<Object> operand);
+ Handle<Map> ComputeTransitionedMap(Handle<JSObject> receiver,
+ KeyedAccessStoreMode store_mode);
- static TypeInfo ComputeNewType(TypeInfo type, TypeInfo previous);
+ friend class IC;
};
@@ -769,71 +767,87 @@ class BinaryOpIC: public IC {
UNINITIALIZED,
SMI,
INT32,
- HEAP_NUMBER,
+ NUMBER,
ODDBALL,
- BOTH_STRING, // Only used for addition operation.
- STRING, // Only used for addition operation. At least one string operand.
+ STRING, // Only used for addition operation.
GENERIC
};
- explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
+ explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
- void patch(Code* code);
+ static Builtins::JavaScript TokenToJSBuiltin(Token::Value op);
static const char* GetName(TypeInfo type_info);
- static State ToState(TypeInfo type_info);
-
- static TypeInfo GetTypeInfo(Handle<Object> left, Handle<Object> right);
-
- static TypeInfo JoinTypes(TypeInfo x, TypeInfo y);
+ MUST_USE_RESULT MaybeObject* Transition(Handle<Object> left,
+ Handle<Object> right);
};
class CompareIC: public IC {
public:
+ // The type/state lattice is defined by the following inequations:
+ // UNINITIALIZED < ...
+ // ... < GENERIC
+ // SMI < NUMBER
+ // INTERNALIZED_STRING < STRING
+ // KNOWN_OBJECT < OBJECT
enum State {
UNINITIALIZED,
- SMIS,
- HEAP_NUMBERS,
- SYMBOLS,
- STRINGS,
- OBJECTS,
- KNOWN_OBJECTS,
+ SMI,
+ NUMBER,
+ STRING,
+ INTERNALIZED_STRING,
+ UNIQUE_NAME, // Symbol or InternalizedString
+ OBJECT, // JSObject
+ KNOWN_OBJECT, // JSObject with specific map (faster check)
GENERIC
};
+ static State NewInputState(State old_state, Handle<Object> value);
+
+ static Handle<Type> StateToType(Isolate* isolate,
+ State state,
+ Handle<Map> map = Handle<Map>());
+
+ static void StubInfoToType(int stub_minor_key,
+ Handle<Type>* left_type,
+ Handle<Type>* right_type,
+ Handle<Type>* overall_type,
+ Handle<Map> map,
+ Isolate* isolate);
+
CompareIC(Isolate* isolate, Token::Value op)
: IC(EXTRA_CALL_FRAME, isolate), op_(op) { }
// Update the inline cache for the given operands.
void UpdateCaches(Handle<Object> x, Handle<Object> y);
+
// Factory method for getting an uninitialized compare stub.
- static Handle<Code> GetUninitialized(Token::Value op);
+ static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op);
// Helper function for computing the condition for a compare operation.
static Condition ComputeCondition(Token::Value op);
- // Helper function for determining the state of a compare IC.
- static State ComputeState(Code* target);
-
- // Helper function for determining the operation a compare IC is for.
- static Token::Value ComputeOperation(Code* target);
-
static const char* GetStateName(State state);
private:
- State TargetState(State state, bool has_inlined_smi_code,
- Handle<Object> x, Handle<Object> y);
+ static bool HasInlinedSmiCode(Address address);
+
+ State TargetState(State old_state,
+ State old_left,
+ State old_right,
+ bool has_inlined_smi_code,
+ Handle<Object> x,
+ Handle<Object> y);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return ComputeCondition(op_); }
- State GetState() { return ComputeState(target()); }
- static Code* GetRawUninitialized(Token::Value op);
+ static Code* GetRawUninitialized(Isolate* isolate, Token::Value op);
- static void Clear(Address address, Code* target);
+ static void Clear(Isolate* isolate, Address address, Code* target);
Token::Value op_;
@@ -841,11 +855,26 @@ class CompareIC: public IC {
};
+class CompareNilIC: public IC {
+ public:
+ explicit CompareNilIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
+
+ MUST_USE_RESULT MaybeObject* CompareNil(Handle<Object> object);
+
+ static Handle<Code> GetUninitialized();
+
+ static void Clear(Address address, Code* target);
+
+ static MUST_USE_RESULT MaybeObject* DoCompareNilSlow(NilValue nil,
+ Handle<Object> object);
+};
+
+
class ToBooleanIC: public IC {
public:
- explicit ToBooleanIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
+ explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
- void patch(Code* code);
+ MaybeObject* ToBoolean(Handle<Object> object, Code::ExtraICState state);
};
@@ -853,6 +882,16 @@ class ToBooleanIC: public IC {
enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss);
+
+
} } // namespace v8::internal
#endif // V8_IC_H_
diff --git a/deps/v8/src/icu_util.cc b/deps/v8/src/icu_util.cc
new file mode 100644
index 000000000..b9bd65edc
--- /dev/null
+++ b/deps/v8/src/icu_util.cc
@@ -0,0 +1,62 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "icu_util.h"
+
+#if defined(_WIN32) && defined(V8_I18N_SUPPORT)
+#include <windows.h>
+
+#include "unicode/putil.h"
+#include "unicode/udata.h"
+
+#define ICU_UTIL_DATA_SYMBOL "icudt" U_ICU_VERSION_SHORT "_dat"
+#define ICU_UTIL_DATA_SHARED_MODULE_NAME "icudt.dll"
+#endif
+
+namespace v8 {
+
+namespace internal {
+
+bool InitializeICU() {
+#if defined(_WIN32) && defined(V8_I18N_SUPPORT)
+ // We expect to find the ICU data module alongside the current module.
+ HMODULE module = LoadLibraryA(ICU_UTIL_DATA_SHARED_MODULE_NAME);
+ if (!module) return false;
+
+ FARPROC addr = GetProcAddress(module, ICU_UTIL_DATA_SYMBOL);
+ if (!addr) return false;
+
+ UErrorCode err = U_ZERO_ERROR;
+ udata_setCommonData(reinterpret_cast<void*>(addr), &err);
+ return err == U_ZERO_ERROR;
+#else
+ // Mac/Linux bundle the ICU data in.
+ return true;
+#endif
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform-posix.h b/deps/v8/src/icu_util.h
index 7a982ed2e..478abce50 100644
--- a/deps/v8/src/platform-posix.h
+++ b/deps/v8/src/icu_util.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,15 +25,18 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_PLATFORM_POSIX_H_
-#define V8_PLATFORM_POSIX_H_
+
+#ifndef V8_ICU_UTIL_H_
+#define V8_ICU_UTIL_H_
namespace v8 {
+
namespace internal {
-// Used by platform implementation files during OS::PostSetUp().
-void POSIXPostSetUp();
+// Call this function to load ICU's data tables for the current process. This
+// function should be called before ICU is used.
+bool InitializeICU();
} } // namespace v8::internal
-#endif // V8_PLATFORM_POSIX_H_
+#endif // V8_ICU_UTIL_H_
diff --git a/deps/v8/src/incremental-marking-inl.h b/deps/v8/src/incremental-marking-inl.h
index bbe9a9d20..1c30383d5 100644
--- a/deps/v8/src/incremental-marking-inl.h
+++ b/deps/v8/src/incremental-marking-inl.h
@@ -37,16 +37,27 @@ namespace internal {
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
Object** slot,
Object* value) {
- MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+ HeapObject* value_heap_obj = HeapObject::cast(value);
+ MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
if (Marking::IsWhite(value_bit)) {
MarkBit obj_bit = Marking::MarkBitFrom(obj);
if (Marking::IsBlack(obj_bit)) {
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ if (chunk->IsLeftOfProgressBar(slot)) {
+ WhiteToGreyAndPush(value_heap_obj, value_bit);
+ RestartIfNotMarking();
+ } else {
+ return false;
+ }
+ } else {
+ BlackToGreyAndUnshift(obj, obj_bit);
+ RestartIfNotMarking();
+ return false;
+ }
+ } else {
+ return false;
}
-
- // Object is either grey or white. It will be scanned if survives.
- return false;
}
if (!is_compacting_) return false;
MarkBit obj_bit = Marking::MarkBitFrom(obj);
@@ -83,6 +94,10 @@ void IncrementalMarking::RecordWrites(HeapObject* obj) {
if (IsMarking()) {
MarkBit obj_bit = Marking::MarkBitFrom(obj);
if (Marking::IsBlack(obj_bit)) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ chunk->set_progress_bar(0);
+ }
BlackToGreyAndUnshift(obj, obj_bit);
RestartIfNotMarking();
}
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
index e51d6c136..4223dde21 100644
--- a/deps/v8/src/incremental-marking.cc
+++ b/deps/v8/src/incremental-marking.cc
@@ -54,7 +54,8 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
should_hurry_(false),
marking_speed_(0),
allocated_(0),
- no_marking_scope_depth_(0) {
+ no_marking_scope_depth_(0),
+ unscanned_bytes_of_large_object_(0) {
}
@@ -78,7 +79,7 @@ void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
- Object* value,
+ Object** slot,
Isolate* isolate) {
ASSERT(obj->IsHeapObject());
IncrementalMarking* marking = isolate->heap()->incremental_marking();
@@ -94,7 +95,7 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
MemoryChunk::kWriteBarrierCounterGranularity);
}
- marking->RecordWrite(obj, NULL, value);
+ marking->RecordWrite(obj, slot, *slot);
}
@@ -175,20 +176,109 @@ void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
}
+static void MarkObjectGreyDoNotEnqueue(Object* obj) {
+ if (obj->IsHeapObject()) {
+ HeapObject* heap_obj = HeapObject::cast(obj);
+ MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
+ if (Marking::IsBlack(mark_bit)) {
+ MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
+ -heap_obj->Size());
+ }
+ Marking::AnyToGrey(mark_bit);
+ }
+}
+
+
+static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
+ MarkBit mark_bit,
+ int size) {
+ ASSERT(!Marking::IsImpossible(mark_bit));
+ if (mark_bit.Get()) return;
+ mark_bit.Set();
+ MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
+ ASSERT(Marking::IsBlack(mark_bit));
+}
+
+
+static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
+ MarkBit mark_bit,
+ int size) {
+ ASSERT(!Marking::IsImpossible(mark_bit));
+ if (Marking::IsBlack(mark_bit)) return;
+ Marking::MarkBlack(mark_bit);
+ MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
+ ASSERT(Marking::IsBlack(mark_bit));
+}
+
+
class IncrementalMarkingMarkingVisitor
: public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
public:
static void Initialize() {
StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
-
+ table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
+ table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
table_.Register(kVisitJSRegExp, &VisitJSRegExp);
}
- static void VisitJSWeakMap(Map* map, HeapObject* object) {
+ static const int kProgressBarScanningChunk = 32 * 1024;
+
+ static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ // TODO(mstarzinger): Move setting of the flag to the allocation site of
+ // the array. The visitor should just check the flag.
+ if (FLAG_use_marking_progress_bar &&
+ chunk->owner()->identity() == LO_SPACE) {
+ chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
+ }
+ if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ Heap* heap = map->GetHeap();
+ // When using a progress bar for large fixed arrays, scan only a chunk of
+ // the array and try to push it onto the marking deque again until it is
+ // fully scanned. Fall back to scanning it through to the end in case this
+ // fails because of a full deque.
+ int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
+ int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset,
+ chunk->progress_bar());
+ int end_offset = Min(object_size,
+ start_offset + kProgressBarScanningChunk);
+ int already_scanned_offset = start_offset;
+ bool scan_until_end = false;
+ do {
+ VisitPointersWithAnchor(heap,
+ HeapObject::RawField(object, 0),
+ HeapObject::RawField(object, start_offset),
+ HeapObject::RawField(object, end_offset));
+ start_offset = end_offset;
+ end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
+ scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
+ } while (scan_until_end && start_offset < object_size);
+ chunk->set_progress_bar(start_offset);
+ if (start_offset < object_size) {
+ heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
+ heap->incremental_marking()->NotifyIncompleteScanOfObject(
+ object_size - (start_offset - already_scanned_offset));
+ }
+ } else {
+ FixedArrayVisitor::Visit(map, object);
+ }
+ }
+
+ static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
+ Context* context = Context::cast(object);
+
+ // We will mark cache black with a separate pass
+ // when we finish marking.
+ MarkObjectGreyDoNotEnqueue(context->normalized_map_cache());
+ VisitNativeContext(map, context);
+ }
+
+ static void VisitWeakCollection(Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
VisitPointers(heap,
- HeapObject::RawField(object, JSWeakMap::kPropertiesOffset),
- HeapObject::RawField(object, JSWeakMap::kSize));
+ HeapObject::RawField(object,
+ JSWeakCollection::kPropertiesOffset),
+ HeapObject::RawField(object, JSWeakCollection::kSize));
}
static void BeforeVisitingSharedFunctionInfo(HeapObject* object) {}
@@ -211,15 +301,25 @@ class IncrementalMarkingMarkingVisitor
}
}
+ INLINE(static void VisitPointersWithAnchor(Heap* heap,
+ Object** anchor,
+ Object** start,
+ Object** end)) {
+ for (Object** p = start; p < end; p++) {
+ Object* obj = *p;
+ if (obj->NonFailureIsHeapObject()) {
+ heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
+ MarkObject(heap, obj);
+ }
+ }
+ }
+
// Marks the object grey and pushes it on the marking stack.
INLINE(static void MarkObject(Heap* heap, Object* obj)) {
HeapObject* heap_object = HeapObject::cast(obj);
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
if (mark_bit.data_only()) {
- if (heap->incremental_marking()->MarkBlackOrKeepGrey(mark_bit)) {
- MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
- heap_object->Size());
- }
+ MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
} else if (Marking::IsWhite(mark_bit)) {
heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
}
@@ -243,10 +343,9 @@ class IncrementalMarkingMarkingVisitor
class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
public:
- IncrementalMarkingRootMarkingVisitor(Heap* heap,
- IncrementalMarking* incremental_marking)
- : heap_(heap),
- incremental_marking_(incremental_marking) {
+ explicit IncrementalMarkingRootMarkingVisitor(
+ IncrementalMarking* incremental_marking)
+ : incremental_marking_(incremental_marking) {
}
void VisitPointer(Object** p) {
@@ -265,10 +364,7 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
HeapObject* heap_object = HeapObject::cast(obj);
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
if (mark_bit.data_only()) {
- if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
- MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
- heap_object->Size());
- }
+ MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
} else {
if (Marking::IsWhite(mark_bit)) {
incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
@@ -276,7 +372,6 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
}
}
- Heap* heap_;
IncrementalMarking* incremental_marking_;
};
@@ -300,6 +395,7 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
}
} else if (chunk->owner()->identity() == CELL_SPACE ||
+ chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
chunk->scan_on_scavenge()) {
chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
@@ -346,6 +442,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
+ DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
@@ -380,6 +477,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
ActivateIncrementalWriteBarrier(heap_->old_data_space());
ActivateIncrementalWriteBarrier(heap_->cell_space());
+ ActivateIncrementalWriteBarrier(heap_->property_cell_space());
ActivateIncrementalWriteBarrier(heap_->map_space());
ActivateIncrementalWriteBarrier(heap_->code_space());
ActivateIncrementalWriteBarrier(heap_->new_space());
@@ -400,10 +498,16 @@ bool IncrementalMarking::WorthActivating() {
// debug tests run with incremental marking and some without.
static const intptr_t kActivationThreshold = 0;
#endif
-
+ // Only start incremental marking in a safe state: 1) when expose GC is
+ // deactivated, 2) when incremental marking is turned on, 3) when we are
+ // currently not in a GC, and 4) when we are currently not serializing
+ // or deserializing the heap.
return !FLAG_expose_gc &&
FLAG_incremental_marking &&
+ FLAG_incremental_marking_steps &&
+ heap_->gc_state() == Heap::NOT_IN_GC &&
!Serializer::enabled() &&
+ heap_->isolate()->IsInitialized() &&
heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
}
@@ -460,6 +564,7 @@ void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
}
}
+
void IncrementalMarking::UncommitMarkingDeque() {
if (state_ == STOPPED && marking_deque_memory_committed_) {
bool success = marking_deque_memory_->Uncommit(
@@ -471,18 +576,21 @@ void IncrementalMarking::UncommitMarkingDeque() {
}
-void IncrementalMarking::Start() {
+void IncrementalMarking::Start(CompactionFlag flag) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start\n");
}
ASSERT(FLAG_incremental_marking);
+ ASSERT(FLAG_incremental_marking_steps);
ASSERT(state_ == STOPPED);
+ ASSERT(heap_->gc_state() == Heap::NOT_IN_GC);
+ ASSERT(!Serializer::enabled());
+ ASSERT(heap_->isolate()->IsInitialized());
ResetStepCounters();
- if (heap_->old_pointer_space()->IsSweepingComplete() &&
- heap_->old_data_space()->IsSweepingComplete()) {
- StartMarking(ALLOW_COMPACTION);
+ if (heap_->IsSweepingComplete()) {
+ StartMarking(flag);
} else {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start sweeping.\n");
@@ -494,19 +602,6 @@ void IncrementalMarking::Start() {
}
-static void MarkObjectGreyDoNotEnqueue(Object* obj) {
- if (obj->IsHeapObject()) {
- HeapObject* heap_obj = HeapObject::cast(obj);
- MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
- if (Marking::IsBlack(mark_bit)) {
- MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
- -heap_obj->Size());
- }
- Marking::AnyToGrey(mark_bit);
- }
-}
-
-
void IncrementalMarking::StartMarking(CompactionFlag flag) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start marking\n");
@@ -550,9 +645,11 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) {
}
// Mark strong roots grey.
- IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
+ IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+ heap_->mark_compact_collector()->MarkWeakObjectToCodeTable();
+
// Ready to start incremental marking.
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Running\n");
@@ -606,8 +703,11 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
ASSERT(new_top != marking_deque_.bottom());
#ifdef DEBUG
MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
ASSERT(Marking::IsGrey(mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(mark_bit)));
+ (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
+ (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
+ Marking::IsBlack(mark_bit)));
#endif
}
}
@@ -619,46 +719,80 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
}
+void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
+ MarkBit map_mark_bit = Marking::MarkBitFrom(map);
+ if (Marking::IsWhite(map_mark_bit)) {
+ WhiteToGreyAndPush(map, map_mark_bit);
+ }
+
+ IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
+
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+#if ENABLE_SLOW_ASSERTS
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
+ (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
+ (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
+ Marking::IsBlack(mark_bit)));
+#endif
+ MarkBlackOrKeepBlack(obj, mark_bit, size);
+}
+
+
+void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
+ Map* filler_map = heap_->one_pointer_filler_map();
+ while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
+ HeapObject* obj = marking_deque_.Pop();
+
+ // Explicitly skip one word fillers. Incremental markbit patterns are
+ // correct only for objects that occupy at least two words.
+ Map* map = obj->map();
+ if (map == filler_map) continue;
+
+ int size = obj->SizeFromMap(map);
+ unscanned_bytes_of_large_object_ = 0;
+ VisitObject(map, obj, size);
+ bytes_to_process -= (size - unscanned_bytes_of_large_object_);
+ }
+}
+
+
+void IncrementalMarking::ProcessMarkingDeque() {
+ Map* filler_map = heap_->one_pointer_filler_map();
+ while (!marking_deque_.IsEmpty()) {
+ HeapObject* obj = marking_deque_.Pop();
+
+ // Explicitly skip one word fillers. Incremental markbit patterns are
+ // correct only for objects that occupy at least two words.
+ Map* map = obj->map();
+ if (map == filler_map) continue;
+
+ VisitObject(map, obj, obj->SizeFromMap(map));
+ }
+}
+
+
void IncrementalMarking::Hurry() {
if (state() == MARKING) {
double start = 0.0;
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Hurry\n");
+ if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
start = OS::TimeCurrentMillis();
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Hurry\n");
+ }
}
// TODO(gc) hurry can mark objects it encounters black as mutator
// was stopped.
- Map* filler_map = heap_->one_pointer_filler_map();
- Map* native_context_map = heap_->native_context_map();
- while (!marking_deque_.IsEmpty()) {
- HeapObject* obj = marking_deque_.Pop();
-
- // Explicitly skip one word fillers. Incremental markbit patterns are
- // correct only for objects that occupy at least two words.
- Map* map = obj->map();
- if (map == filler_map) {
- continue;
- } else if (map == native_context_map) {
- // Native contexts have weak fields.
- IncrementalMarkingMarkingVisitor::VisitNativeContext(map, obj);
- } else {
- MarkBit map_mark_bit = Marking::MarkBitFrom(map);
- if (Marking::IsWhite(map_mark_bit)) {
- WhiteToGreyAndPush(map, map_mark_bit);
- }
- IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
- }
-
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- ASSERT(!Marking::IsBlack(mark_bit));
- Marking::MarkBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
- }
+ ProcessMarkingDeque();
state_ = COMPLETE;
- if (FLAG_trace_incremental_marking) {
+ if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
double end = OS::TimeCurrentMillis();
- PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
- static_cast<int>(end - start));
+ double delta = end - start;
+ heap_->AddMarkingTime(delta);
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
+ static_cast<int>(delta));
+ }
}
}
@@ -748,6 +882,17 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
}
+void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
+ if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) {
+ // TODO(hpayer): Let's play safe for now, but compaction should be
+ // in principle possible.
+ Start(PREVENT_COMPACTION);
+ } else {
+ Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
+ }
+}
+
+
void IncrementalMarking::Step(intptr_t allocated_bytes,
CompletionAction action) {
if (heap_->gc_state() != Heap::NOT_IN_GC ||
@@ -774,7 +919,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
// allocation), so to reduce the lumpiness we don't use the write barriers
// invoked since last step directly to determine the amount of work to do.
intptr_t bytes_to_process =
- marking_speed_ * Max(allocated_, kWriteBarriersInvokedThreshold);
+ marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_);
allocated_ = 0;
write_barriers_invoked_since_last_step_ = 0;
@@ -782,53 +927,18 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
double start = 0;
- if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
+ if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
+ FLAG_print_cumulative_gc_stat) {
start = OS::TimeCurrentMillis();
}
if (state_ == SWEEPING) {
- if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
+ if (heap_->EnsureSweepersProgressed(static_cast<int>(bytes_to_process))) {
bytes_scanned_ = 0;
StartMarking(PREVENT_COMPACTION);
}
} else if (state_ == MARKING) {
- Map* filler_map = heap_->one_pointer_filler_map();
- Map* native_context_map = heap_->native_context_map();
- while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
- HeapObject* obj = marking_deque_.Pop();
-
- // Explicitly skip one word fillers. Incremental markbit patterns are
- // correct only for objects that occupy at least two words.
- Map* map = obj->map();
- if (map == filler_map) continue;
-
- int size = obj->SizeFromMap(map);
- bytes_to_process -= size;
- MarkBit map_mark_bit = Marking::MarkBitFrom(map);
- if (Marking::IsWhite(map_mark_bit)) {
- WhiteToGreyAndPush(map, map_mark_bit);
- }
-
- // TODO(gc) switch to static visitor instead of normal visitor.
- if (map == native_context_map) {
- // Native contexts have weak fields.
- Context* ctx = Context::cast(obj);
-
- // We will mark cache black with a separate pass
- // when we finish marking.
- MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
-
- IncrementalMarkingMarkingVisitor::VisitNativeContext(map, ctx);
- } else {
- IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
- }
-
- MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
- SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
- Marking::MarkBlack(obj_mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
- }
+ ProcessMarkingDeque(bytes_to_process);
if (marking_deque_.IsEmpty()) MarkingComplete(action);
}
@@ -888,7 +998,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
PrintPID("Postponing speeding up marking until marking starts\n");
}
} else {
- marking_speed_ += kMarkingSpeedAccellerationInterval;
+ marking_speed_ += kMarkingSpeedAccelleration;
marking_speed_ = static_cast<int>(
Min(kMaxMarkingSpeed,
static_cast<intptr_t>(marking_speed_ * 1.3)));
@@ -898,12 +1008,14 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
}
}
- if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
+ if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
+ FLAG_print_cumulative_gc_stat) {
double end = OS::TimeCurrentMillis();
double delta = (end - start);
longest_step_ = Max(longest_step_, delta);
steps_took_ += delta;
steps_took_since_last_gc_ += delta;
+ heap_->AddMarkingTime(delta);
}
}
diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h
index 1a86fcd44..d47c300ef 100644
--- a/deps/v8/src/incremental-marking.h
+++ b/deps/v8/src/incremental-marking.h
@@ -75,7 +75,9 @@ class IncrementalMarking {
bool WorthActivating();
- void Start();
+ enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
+
+ void Start(CompactionFlag flag = ALLOW_COMPACTION);
void Stop();
@@ -110,10 +112,7 @@ class IncrementalMarking {
static const intptr_t kMarkingSpeedAccelleration = 2;
static const intptr_t kMaxMarkingSpeed = 1000;
- void OldSpaceStep(intptr_t allocated) {
- Step(allocated * kFastMarking / kInitialMarkingSpeed,
- GC_VIA_STACK_GUARD);
- }
+ void OldSpaceStep(intptr_t allocated);
void Step(intptr_t allocated, CompletionAction action);
@@ -127,7 +126,7 @@ class IncrementalMarking {
}
static void RecordWriteFromCode(HeapObject* obj,
- Object* value,
+ Object** slot,
Isolate* isolate);
static void RecordWriteForEvacuationFromCode(HeapObject* obj,
@@ -164,19 +163,6 @@ class IncrementalMarking {
inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
- // Does white->black or keeps gray or black color. Returns true if converting
- // white to black.
- inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
- ASSERT(!Marking::IsImpossible(mark_bit));
- if (mark_bit.Get()) {
- // Grey or black: Keep the color.
- return false;
- }
- mark_bit.Set();
- ASSERT(Marking::IsBlack(mark_bit));
- return true;
- }
-
inline int steps_count() {
return steps_count_;
}
@@ -234,13 +220,15 @@ class IncrementalMarking {
void UncommitMarkingDeque();
+ void NotifyIncompleteScanOfObject(int unscanned_bytes) {
+ unscanned_bytes_of_large_object_ = unscanned_bytes;
+ }
+
private:
int64_t SpaceLeftInOldSpace();
void ResetStepCounters();
- enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
-
void StartMarking(CompactionFlag flag);
void ActivateIncrementalWriteBarrier(PagedSpace* space);
@@ -259,6 +247,12 @@ class IncrementalMarking {
void EnsureMarkingDequeIsCommitted();
+ INLINE(void ProcessMarkingDeque());
+
+ INLINE(void ProcessMarkingDeque(intptr_t bytes_to_process));
+
+ INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
+
Heap* heap_;
State state_;
@@ -284,6 +278,8 @@ class IncrementalMarking {
int no_marking_scope_depth_;
+ int unscanned_bytes_of_large_object_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
diff --git a/deps/v8/src/interface.cc b/deps/v8/src/interface.cc
index 336be82c6..603dfe9b8 100644
--- a/deps/v8/src/interface.cc
+++ b/deps/v8/src/interface.cc
@@ -35,8 +35,8 @@ namespace internal {
static bool Match(void* key1, void* key2) {
String* name1 = *static_cast<String**>(key1);
String* name2 = *static_cast<String**>(key2);
- ASSERT(name1->IsSymbol());
- ASSERT(name2->IsSymbol());
+ ASSERT(name1->IsInternalizedString());
+ ASSERT(name2->IsInternalizedString());
return name1 == name2;
}
@@ -170,6 +170,8 @@ void Interface::DoUnify(Interface* that, bool* ok, Zone* zone) {
ASSERT(that->forward_ == NULL);
ASSERT(!this->IsValue());
ASSERT(!that->IsValue());
+ ASSERT(this->index_ == -1);
+ ASSERT(that->index_ == -1);
ASSERT(*ok);
#ifdef DEBUG
@@ -194,15 +196,6 @@ void Interface::DoUnify(Interface* that, bool* ok, Zone* zone) {
return;
}
- // Merge instance.
- if (!that->instance_.is_null()) {
- if (!this->instance_.is_null() && *this->instance_ != *that->instance_) {
- *ok = false;
- return;
- }
- this->instance_ = that->instance_;
- }
-
// Merge interfaces.
this->flags_ |= that->flags_;
that->forward_ = this;
@@ -227,7 +220,7 @@ void Interface::Print(int n) {
} else if (IsValue()) {
PrintF("value\n");
} else if (IsModule()) {
- PrintF("module %s{", IsFrozen() ? "" : "(unresolved) ");
+ PrintF("module %d %s{", Index(), IsFrozen() ? "" : "(unresolved) ");
ZoneHashMap* map = Chase()->exports_;
if (map == NULL || map->occupancy() == 0) {
PrintF("}\n");
diff --git a/deps/v8/src/interface.h b/deps/v8/src/interface.h
index 94ef11ba5..f824a9a87 100644
--- a/deps/v8/src/interface.h
+++ b/deps/v8/src/interface.h
@@ -108,18 +108,18 @@ class Interface : public ZoneObject {
if (*ok) Chase()->flags_ |= MODULE;
}
- // Set associated instance object.
- void MakeSingleton(Handle<JSModule> instance, bool* ok) {
- *ok = IsModule() && Chase()->instance_.is_null();
- if (*ok) Chase()->instance_ = instance;
- }
-
// Do not allow any further refinements, directly or through unification.
void Freeze(bool* ok) {
*ok = IsValue() || IsModule();
if (*ok) Chase()->flags_ |= FROZEN;
}
+ // Assign an index.
+ void Allocate(int index) {
+ ASSERT(IsModule() && IsFrozen() && Chase()->index_ == -1);
+ Chase()->index_ = index;
+ }
+
// ---------------------------------------------------------------------------
// Accessors.
@@ -138,7 +138,23 @@ class Interface : public ZoneObject {
// Check whether this is closed (i.e. fully determined).
bool IsFrozen() { return Chase()->flags_ & FROZEN; }
- Handle<JSModule> Instance() { return Chase()->instance_; }
+ bool IsUnified(Interface* that) {
+ return Chase() == that->Chase()
+ || (this->IsValue() == that->IsValue() &&
+ this->IsConst() == that->IsConst());
+ }
+
+ int Length() {
+ ASSERT(IsModule() && IsFrozen());
+ ZoneHashMap* exports = Chase()->exports_;
+ return exports ? exports->occupancy() : 0;
+ }
+
+ // The context slot in the hosting global context pointing to this module.
+ int Index() {
+ ASSERT(IsModule() && IsFrozen());
+ return Chase()->index_;
+ }
// Look up an exported name. Returns NULL if not (yet) defined.
Interface* Lookup(Handle<String> name, Zone* zone);
@@ -194,12 +210,13 @@ class Interface : public ZoneObject {
int flags_;
Interface* forward_; // Unification link
ZoneHashMap* exports_; // Module exports and their types (allocated lazily)
- Handle<JSModule> instance_;
+ int index_;
explicit Interface(int flags)
: flags_(flags),
forward_(NULL),
- exports_(NULL) {
+ exports_(NULL),
+ index_(-1) {
#ifdef DEBUG
if (FLAG_print_interface_details)
PrintF("# Creating %p\n", static_cast<void*>(this));
diff --git a/deps/v8/src/interpreter-irregexp.cc b/deps/v8/src/interpreter-irregexp.cc
index 3a92b8455..2fc9fd302 100644
--- a/deps/v8/src/interpreter-irregexp.cc
+++ b/deps/v8/src/interpreter-irregexp.cc
@@ -68,14 +68,20 @@ static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
int from,
int current,
int len,
- Vector<const char> subject) {
+ Vector<const uint8_t> subject) {
for (int i = 0; i < len; i++) {
unsigned int old_char = subject[from++];
unsigned int new_char = subject[current++];
if (old_char == new_char) continue;
- if (old_char - 'A' <= 'Z' - 'A') old_char |= 0x20;
- if (new_char - 'A' <= 'Z' - 'A') new_char |= 0x20;
+ // Convert both characters to lower case.
+ old_char |= 0x20;
+ new_char |= 0x20;
if (old_char != new_char) return false;
+ // Not letters in the ASCII range and Latin-1 range.
+ if (!(old_char - 'a' <= 'z' - 'a') &&
+ !(old_char - 224 <= 254 - 224 && old_char != 247)) {
+ return false;
+ }
}
return true;
}
@@ -612,12 +618,12 @@ RegExpImpl::IrregexpResult IrregexpInterpreter::Match(
int start_position) {
ASSERT(subject->IsFlat());
- AssertNoAllocation a;
+ DisallowHeapAllocation no_gc;
const byte* code_base = code_array->GetDataStartAddress();
uc16 previous_char = '\n';
String::FlatContent subject_content = subject->GetFlatContent();
if (subject_content.IsAscii()) {
- Vector<const char> subject_vector = subject_content.ToAsciiVector();
+ Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector();
if (start_position != 0) previous_char = subject_vector[start_position - 1];
return RawMatch(isolate,
code_base,
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index 9fb16fbe9..764bcb8bf 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -28,20 +28,19 @@
#ifndef V8_ISOLATE_INL_H_
#define V8_ISOLATE_INL_H_
-#include "isolate.h"
-
#include "debug.h"
+#include "isolate.h"
+#include "utils/random-number-generator.h"
namespace v8 {
namespace internal {
-SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
+SaveContext::SaveContext(Isolate* isolate)
+ : isolate_(isolate),
+ prev_(isolate->save_context()) {
if (isolate->context() != NULL) {
context_ = Handle<Context>(isolate->context());
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- dummy_ = Handle<Context>(isolate->context());
-#endif
}
isolate->set_save_context(this);
@@ -49,6 +48,11 @@ SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
}
+bool Isolate::IsCodePreAgingActive() {
+ return FLAG_optimize_for_size && FLAG_age_code && !IsDebuggerActive();
+}
+
+
bool Isolate::IsDebuggerActive() {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (!NoBarrier_Load(&debugger_initialized_)) return false;
@@ -68,6 +72,13 @@ bool Isolate::DebuggerHasBreakPoints() {
}
+RandomNumberGenerator* Isolate::random_number_generator() {
+ if (random_number_generator_ == NULL) {
+ random_number_generator_ = new RandomNumberGenerator;
+ }
+ return random_number_generator_;
+}
+
} } // namespace v8::internal
#endif // V8_ISOLATE_INL_H_
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 5a5293eb7..71cd30158 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -29,26 +29,31 @@
#include "v8.h"
+#include "allocation-inl.h"
#include "ast.h"
#include "bootstrapper.h"
#include "codegen.h"
#include "compilation-cache.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "deoptimizer.h"
#include "heap-profiler.h"
#include "hydrogen.h"
-#include "isolate.h"
+#include "isolate-inl.h"
#include "lithium-allocator.h"
#include "log.h"
#include "messages.h"
#include "platform.h"
#include "regexp-stack.h"
#include "runtime-profiler.h"
+#include "sampler.h"
#include "scopeinfo.h"
#include "serialize.h"
#include "simulator.h"
#include "spaces.h"
#include "stub-cache.h"
+#include "sweeper-thread.h"
+#include "utils/random-number-generator.h"
#include "version.h"
#include "vm-state-inl.h"
@@ -90,7 +95,7 @@ void ThreadLocalTop::InitializeInternal() {
simulator_ = NULL;
#endif
js_entry_sp_ = NULL;
- external_callback_ = NULL;
+ external_callback_scope_ = NULL;
current_vm_state_ = EXTERNAL;
try_catch_handler_address_ = NULL;
context_ = NULL;
@@ -105,6 +110,7 @@ void ThreadLocalTop::InitializeInternal() {
// is complete.
pending_exception_ = NULL;
has_pending_message_ = false;
+ rethrowing_message_ = false;
pending_message_obj_ = NULL;
pending_message_script_ = NULL;
scheduled_exception_ = NULL;
@@ -114,11 +120,7 @@ void ThreadLocalTop::InitializeInternal() {
void ThreadLocalTop::Initialize() {
InitializeInternal();
#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
simulator_ = Simulator::current(isolate_);
-#elif V8_TARGET_ARCH_MIPS
- simulator_ = Simulator::current(isolate_);
-#endif
#endif
thread_id_ = ThreadId::Current();
}
@@ -129,6 +131,22 @@ v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
}
+int SystemThreadManager::NumberOfParallelSystemThreads(
+ ParallelSystemComponent type) {
+ int number_of_threads = Min(CPU::NumberOfProcessorsOnline(), kMaxThreads);
+ ASSERT(number_of_threads > 0);
+ if (number_of_threads == 1) {
+ return 0;
+ }
+ if (type == PARALLEL_SWEEPING) {
+ return number_of_threads;
+ } else if (type == CONCURRENT_SWEEPING) {
+ return number_of_threads - 1;
+ }
+ return 1;
+}
+
+
// Create a dummy thread that will wait forever on a semaphore. The only
// purpose for this thread is to have some stack area to save essential data
// into for use by a stacks only core dump (aka minidump).
@@ -202,8 +220,8 @@ class PreallocatedMemoryThread: public Thread {
PreallocatedMemoryThread()
: Thread("v8:PreallocMem"),
keep_running_(true),
- wait_for_ever_semaphore_(OS::CreateSemaphore(0)),
- data_ready_semaphore_(OS::CreateSemaphore(0)),
+ wait_for_ever_semaphore_(new Semaphore(0)),
+ data_ready_semaphore_(new Semaphore(0)),
data_(NULL),
length_(0) {
}
@@ -316,56 +334,72 @@ Isolate* Isolate::default_isolate_ = NULL;
Thread::LocalStorageKey Isolate::isolate_key_;
Thread::LocalStorageKey Isolate::thread_id_key_;
Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
-Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
+#ifdef DEBUG
+Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
+#endif // DEBUG
+Mutex Isolate::process_wide_mutex_;
+// TODO(dcarney): Remove with default isolate.
+enum DefaultIsolateStatus {
+ kDefaultIsolateUninitialized,
+ kDefaultIsolateInitialized,
+ kDefaultIsolateCrashIfInitialized
+};
+static DefaultIsolateStatus default_isolate_status_
+ = kDefaultIsolateUninitialized;
Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
-
-
-Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData(
- ThreadId thread_id) {
- ASSERT(!thread_id.Equals(ThreadId::Invalid()));
- PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
- {
- ScopedLock lock(process_wide_mutex_);
- ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL);
- thread_data_table_->Insert(per_thread);
- ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
- }
- return per_thread;
-}
-
+Atomic32 Isolate::isolate_counter_ = 0;
Isolate::PerIsolateThreadData*
Isolate::FindOrAllocatePerThreadDataForThisThread() {
ThreadId thread_id = ThreadId::Current();
PerIsolateThreadData* per_thread = NULL;
{
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<Mutex> lock_guard(&process_wide_mutex_);
per_thread = thread_data_table_->Lookup(this, thread_id);
if (per_thread == NULL) {
- per_thread = AllocatePerIsolateThreadData(thread_id);
+ per_thread = new PerIsolateThreadData(this, thread_id);
+ thread_data_table_->Insert(per_thread);
}
}
+ ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
return per_thread;
}
Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
ThreadId thread_id = ThreadId::Current();
+ return FindPerThreadDataForThread(thread_id);
+}
+
+
+Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
+ ThreadId thread_id) {
PerIsolateThreadData* per_thread = NULL;
{
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<Mutex> lock_guard(&process_wide_mutex_);
per_thread = thread_data_table_->Lookup(this, thread_id);
}
return per_thread;
}
+void Isolate::SetCrashIfDefaultIsolateInitialized() {
+ LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+ CHECK(default_isolate_status_ != kDefaultIsolateInitialized);
+ default_isolate_status_ = kDefaultIsolateCrashIfInitialized;
+}
+
+
void Isolate::EnsureDefaultIsolate() {
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+ CHECK(default_isolate_status_ != kDefaultIsolateCrashIfInitialized);
if (default_isolate_ == NULL) {
isolate_key_ = Thread::CreateThreadLocalKey();
thread_id_key_ = Thread::CreateThreadLocalKey();
per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
+#ifdef DEBUG
+ PerThreadAssertScopeBase::thread_local_key = Thread::CreateThreadLocalKey();
+#endif // DEBUG
thread_data_table_ = new Isolate::ThreadDataTable();
default_isolate_ = new Isolate();
}
@@ -408,9 +442,9 @@ void Isolate::EnterDefaultIsolate() {
}
-Isolate* Isolate::GetDefaultIsolateForLocking() {
+v8::Isolate* Isolate::GetDefaultIsolateForLocking() {
EnsureDefaultIsolate();
- return default_isolate_;
+ return reinterpret_cast<v8::Isolate*>(default_isolate_);
}
@@ -426,11 +460,6 @@ char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
}
-void Isolate::IterateThread(ThreadVisitor* v) {
- v->VisitThread(this, thread_local_top());
-}
-
-
void Isolate::IterateThread(ThreadVisitor* v, char* t) {
ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
v->VisitThread(this, thread);
@@ -459,7 +488,8 @@ void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
block != NULL;
block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
v->VisitPointer(BitCast<Object**>(&(block->exception_)));
- v->VisitPointer(BitCast<Object**>(&(block->message_)));
+ v->VisitPointer(BitCast<Object**>(&(block->message_obj_)));
+ v->VisitPointer(BitCast<Object**>(&(block->message_script_)));
}
// Iterate over pointers on native execution stack.
@@ -477,6 +507,7 @@ void Isolate::Iterate(ObjectVisitor* v) {
Iterate(v, current_t);
}
+
void Isolate::IterateDeferredHandles(ObjectVisitor* visitor) {
for (DeferredHandles* deferred = deferred_handles_head_;
deferred != NULL;
@@ -486,6 +517,29 @@ void Isolate::IterateDeferredHandles(ObjectVisitor* visitor) {
}
+#ifdef DEBUG
+bool Isolate::IsDeferredHandle(Object** handle) {
+ // Each DeferredHandles instance keeps the handles to one job in the
+ // concurrent recompilation queue, containing a list of blocks. Each block
+ // contains kHandleBlockSize handles except for the first block, which may
+ // not be fully filled.
+ // We iterate through all the blocks to see whether the argument handle
+ // belongs to one of the blocks. If so, it is deferred.
+ for (DeferredHandles* deferred = deferred_handles_head_;
+ deferred != NULL;
+ deferred = deferred->next_) {
+ List<Object**>* blocks = &deferred->blocks_;
+ for (int i = 0; i < blocks->length(); i++) {
+ Object** block_limit = (i == 0) ? deferred->first_block_limit_
+ : blocks->at(i) + kHandleBlockSize;
+ if (blocks->at(i) <= handle && handle < block_limit) return true;
+ }
+ }
+ return false;
+}
+#endif // DEBUG
+
+
void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
// The ARM simulator has a separate JS stack. We therefore register
// the C++ try catch handler with the simulator and get back an
@@ -511,11 +565,11 @@ Handle<String> Isolate::StackTraceString() {
if (stack_trace_nesting_level_ == 0) {
stack_trace_nesting_level_++;
HeapStringAllocator allocator;
- StringStream::ClearMentionedObjectCache();
+ StringStream::ClearMentionedObjectCache(this);
StringStream accumulator(&allocator);
incomplete_message_ = &accumulator;
PrintStack(&accumulator);
- Handle<String> stack_trace = accumulator.ToString();
+ Handle<String> stack_trace = accumulator.ToString(this);
incomplete_message_ = NULL;
stack_trace_nesting_level_ = 0;
return stack_trace;
@@ -526,11 +580,11 @@ Handle<String> Isolate::StackTraceString() {
OS::PrintError(
"If you are lucky you may find a partial stack dump on stdout.\n\n");
incomplete_message_->OutputToStdOut();
- return factory()->empty_symbol();
+ return factory()->empty_string();
} else {
OS::Abort();
// Unreachable
- return factory()->empty_symbol();
+ return factory()->empty_string();
}
}
@@ -541,22 +595,125 @@ void Isolate::PushStackTraceAndDie(unsigned int magic,
unsigned int magic2) {
const int kMaxStackTraceSize = 8192;
Handle<String> trace = StackTraceString();
- char buffer[kMaxStackTraceSize];
+ uint8_t buffer[kMaxStackTraceSize];
int length = Min(kMaxStackTraceSize - 1, trace->length());
String::WriteToFlat(*trace, buffer, 0, length);
buffer[length] = '\0';
+ // TODO(dcarney): convert buffer to utf8?
OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n",
magic, magic2,
static_cast<void*>(object), static_cast<void*>(map),
- buffer);
+ reinterpret_cast<char*>(buffer));
OS::Abort();
}
-void Isolate::CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object) {
+// Determines whether the given stack frame should be displayed in
+// a stack trace. The caller is the error constructor that asked
+// for the stack trace to be collected. The first time a construct
+// call to this function is encountered it is skipped. The seen_caller
+// in/out parameter is used to remember if the caller has been seen
+// yet.
+static bool IsVisibleInStackTrace(StackFrame* raw_frame,
+ Object* caller,
+ bool* seen_caller) {
+ // Only display JS frames.
+ if (!raw_frame->is_java_script()) return false;
+ JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
+ JSFunction* fun = frame->function();
+ if ((fun == caller) && !(*seen_caller)) {
+ *seen_caller = true;
+ return false;
+ }
+ // Skip all frames until we've seen the caller.
+ if (!(*seen_caller)) return false;
+ // Also, skip non-visible built-in functions and any call with the builtins
+ // object as receiver, so as to not reveal either the builtins object or
+ // an internal function.
+ // The --builtins-in-stack-traces command line flag allows including
+ // internal call sites in the stack trace for debugging purposes.
+ if (!FLAG_builtins_in_stack_traces) {
+ if (frame->receiver()->IsJSBuiltinsObject() ||
+ (fun->IsBuiltin() && !fun->shared()->native())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
+ Handle<Object> caller,
+ int limit) {
+ limit = Max(limit, 0); // Ensure that limit is not negative.
+ int initial_size = Min(limit, 10);
+ Handle<FixedArray> elements =
+ factory()->NewFixedArrayWithHoles(initial_size * 4 + 1);
+
+ // If the caller parameter is a function we skip frames until we're
+ // under it before starting to collect.
+ bool seen_caller = !caller->IsJSFunction();
+ // First element is reserved to store the number of non-strict frames.
+ int cursor = 1;
+ int frames_seen = 0;
+ int non_strict_frames = 0;
+ bool encountered_strict_function = false;
+ for (StackFrameIterator iter(this);
+ !iter.done() && frames_seen < limit;
+ iter.Advance()) {
+ StackFrame* raw_frame = iter.frame();
+ if (IsVisibleInStackTrace(raw_frame, *caller, &seen_caller)) {
+ frames_seen++;
+ JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
+ // Set initial size to the maximum inlining level + 1 for the outermost
+ // function.
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ frame->Summarize(&frames);
+ for (int i = frames.length() - 1; i >= 0; i--) {
+ if (cursor + 4 > elements->length()) {
+ int new_capacity = JSObject::NewElementsCapacity(elements->length());
+ Handle<FixedArray> new_elements =
+ factory()->NewFixedArrayWithHoles(new_capacity);
+ for (int i = 0; i < cursor; i++) {
+ new_elements->set(i, elements->get(i));
+ }
+ elements = new_elements;
+ }
+ ASSERT(cursor + 4 <= elements->length());
+
+ Handle<Object> recv = frames[i].receiver();
+ Handle<JSFunction> fun = frames[i].function();
+ Handle<Code> code = frames[i].code();
+ Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this);
+ // The stack trace API should not expose receivers and function
+ // objects on frames deeper than the top-most one with a strict
+ // mode function. The number of non-strict frames is stored as
+ // first element in the result array.
+ if (!encountered_strict_function) {
+ if (!fun->shared()->is_classic_mode()) {
+ encountered_strict_function = true;
+ } else {
+ non_strict_frames++;
+ }
+ }
+ elements->set(cursor++, *recv);
+ elements->set(cursor++, *fun);
+ elements->set(cursor++, *code);
+ elements->set(cursor++, *offset);
+ }
+ }
+ }
+ elements->set(0, Smi::FromInt(non_strict_frames));
+ Handle<JSArray> result = factory()->NewJSArrayWithElements(elements);
+ result->set_length(Smi::FromInt(cursor));
+ return result;
+}
+
+
+void Isolate::CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object) {
if (capture_stack_trace_for_uncaught_exceptions_) {
// Capture stack trace for a detailed exception message.
- Handle<String> key = factory()->hidden_stack_trace_symbol();
+ Handle<String> key = factory()->hidden_stack_trace_string();
Handle<JSArray> stack_trace = CaptureCurrentStackTrace(
stack_trace_for_uncaught_exceptions_frame_limit_,
stack_trace_for_uncaught_exceptions_options_);
@@ -571,17 +728,23 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
int limit = Max(frame_limit, 0);
Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
- Handle<String> column_key = factory()->LookupAsciiSymbol("column");
- Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber");
- Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName");
- Handle<String> name_or_source_url_key =
- factory()->LookupAsciiSymbol("nameOrSourceURL");
+ Handle<String> column_key =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("column"));
+ Handle<String> line_key =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("lineNumber"));
+ Handle<String> script_id_key =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptId"));
+ Handle<String> script_name_key =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptName"));
Handle<String> script_name_or_source_url_key =
- factory()->LookupAsciiSymbol("scriptNameOrSourceURL");
- Handle<String> function_key = factory()->LookupAsciiSymbol("functionName");
- Handle<String> eval_key = factory()->LookupAsciiSymbol("isEval");
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("scriptNameOrSourceURL"));
+ Handle<String> function_key =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("functionName"));
+ Handle<String> eval_key =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isEval"));
Handle<String> constructor_key =
- factory()->LookupAsciiSymbol("isConstructor");
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isConstructor"));
StackTraceFrameIterator it(this);
int frames_seen = 0;
@@ -589,7 +752,7 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
JavaScriptFrame* frame = it.frame();
// Set initial size to the maximum inlining level + 1 for the outermost
// function.
- List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
frame->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
// Create a JSObject to hold the information for the StackFrame.
@@ -618,35 +781,33 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, column_key,
- Handle<Smi>(Smi::FromInt(column_offset + 1)), NONE));
+ Handle<Smi>(Smi::FromInt(column_offset + 1), this), NONE));
}
CHECK_NOT_EMPTY_HANDLE(
this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, line_key,
- Handle<Smi>(Smi::FromInt(line_number + 1)), NONE));
+ Handle<Smi>(Smi::FromInt(line_number + 1), this), NONE));
+ }
+
+ if (options & StackTrace::kScriptId) {
+ Handle<Smi> script_id(script->id(), this);
+ CHECK_NOT_EMPTY_HANDLE(this,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ stack_frame, script_id_key, script_id,
+ NONE));
}
if (options & StackTrace::kScriptName) {
Handle<Object> script_name(script->name(), this);
CHECK_NOT_EMPTY_HANDLE(this,
JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, script_key, script_name, NONE));
+ stack_frame, script_name_key, script_name,
+ NONE));
}
if (options & StackTrace::kScriptNameOrSourceURL) {
- Handle<Object> script_name(script->name(), this);
- Handle<JSValue> script_wrapper = GetScriptWrapper(script);
- Handle<Object> property = GetProperty(script_wrapper,
- name_or_source_url_key);
- ASSERT(property->IsJSFunction());
- Handle<JSFunction> method = Handle<JSFunction>::cast(property);
- bool caught_exception;
- Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
- NULL, &caught_exception);
- if (caught_exception) {
- result = factory()->undefined_value();
- }
+ Handle<Object> result = GetScriptNameOrSourceURL(script);
CHECK_NOT_EMPTY_HANDLE(this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, script_name_or_source_url_key,
@@ -655,7 +816,7 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
if (options & StackTrace::kFunctionName) {
Handle<Object> fun_name(fun->shared()->name(), this);
- if (fun_name->ToBoolean()->IsFalse()) {
+ if (!fun_name->BooleanValue()) {
fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
}
CHECK_NOT_EMPTY_HANDLE(this,
@@ -664,9 +825,9 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
}
if (options & StackTrace::kIsEval) {
- int type = Smi::cast(script->compilation_type())->value();
- Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
- factory()->true_value() : factory()->false_value();
+ Handle<Object> is_eval =
+ script->compilation_type() == Script::COMPILATION_TYPE_EVAL ?
+ factory()->true_value() : factory()->false_value();
CHECK_NOT_EMPTY_HANDLE(this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, eval_key, is_eval, NONE));
@@ -693,6 +854,11 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
void Isolate::PrintStack() {
+ PrintStack(stdout);
+}
+
+
+void Isolate::PrintStack(FILE* out) {
if (stack_trace_nesting_level_ == 0) {
stack_trace_nesting_level_++;
@@ -703,13 +869,13 @@ void Isolate::PrintStack() {
allocator = preallocated_message_space_;
}
- StringStream::ClearMentionedObjectCache();
+ StringStream::ClearMentionedObjectCache(this);
StringStream accumulator(allocator);
incomplete_message_ = &accumulator;
PrintStack(&accumulator);
- accumulator.OutputToStdOut();
+ accumulator.OutputToFile(out);
InitializeLoggingAndCounters();
- accumulator.Log();
+ accumulator.Log(this);
incomplete_message_ = NULL;
stack_trace_nesting_level_ = 0;
if (preallocated_message_space_ == NULL) {
@@ -722,14 +888,15 @@ void Isolate::PrintStack() {
"\n\nAttempt to print stack while printing stack (double fault)\n");
OS::PrintError(
"If you are lucky you may find a partial stack dump on stdout.\n\n");
- incomplete_message_->OutputToStdOut();
+ incomplete_message_->OutputToFile(out);
}
}
-static void PrintFrames(StringStream* accumulator,
+static void PrintFrames(Isolate* isolate,
+ StringStream* accumulator,
StackFrame::PrintMode mode) {
- StackFrameIterator it;
+ StackFrameIterator it(isolate);
for (int i = 0; !it.done(); it.Advance()) {
it.frame()->Print(accumulator, mode, i++);
}
@@ -739,27 +906,27 @@ static void PrintFrames(StringStream* accumulator,
void Isolate::PrintStack(StringStream* accumulator) {
if (!IsInitialized()) {
accumulator->Add(
- "\n==== Stack trace is not available ==========================\n\n");
+ "\n==== JS stack trace is not available =======================\n\n");
accumulator->Add(
"\n==== Isolate for the thread is not initialized =============\n\n");
return;
}
// The MentionedObjectCache is not GC-proof at the moment.
- AssertNoAllocation nogc;
- ASSERT(StringStream::IsMentionedObjectCacheClear());
+ DisallowHeapAllocation no_gc;
+ ASSERT(StringStream::IsMentionedObjectCacheClear(this));
// Avoid printing anything if there are no frames.
if (c_entry_fp(thread_local_top()) == 0) return;
accumulator->Add(
- "\n==== Stack trace ============================================\n\n");
- PrintFrames(accumulator, StackFrame::OVERVIEW);
+ "\n==== JS stack trace =========================================\n\n");
+ PrintFrames(this, accumulator, StackFrame::OVERVIEW);
accumulator->Add(
"\n==== Details ================================================\n\n");
- PrintFrames(accumulator, StackFrame::DETAILS);
+ PrintFrames(this, accumulator, StackFrame::DETAILS);
- accumulator->PrintMentionedObjectCache();
+ accumulator->PrintMentionedObjectCache(this);
accumulator->Add("=====================\n\n");
}
@@ -783,10 +950,10 @@ void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
constructor->shared()->get_api_func_data()->access_check_info();
if (data_obj == heap_.undefined_value()) return;
- HandleScope scope;
+ HandleScope scope(this);
Handle<JSObject> receiver_handle(receiver);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
- { VMState state(this, EXTERNAL);
+ Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
+ { VMState<EXTERNAL> state(this);
thread_local_top()->failed_access_check_callback_(
v8::Utils::ToLocal(receiver_handle),
type,
@@ -830,11 +997,11 @@ bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
ASSERT(receiver->IsAccessCheckNeeded());
// The callers of this method are not expecting a GC.
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
// Skip checks for hidden properties access. Note, we do not
// require existence of a context in this case.
- if (key == heap_.hidden_symbol()) return true;
+ if (key == heap_.hidden_string()) return true;
// Check for compatibility between the security tokens in the
// current lexical context and the accessed object.
@@ -865,7 +1032,7 @@ bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
bool result = false;
{
// Leaving JavaScript.
- VMState state(this, EXTERNAL);
+ VMState<EXTERNAL> state(this);
result = callback(v8::Utils::ToLocal(receiver_handle),
v8::Utils::ToLocal(key_handle),
type,
@@ -907,7 +1074,7 @@ bool Isolate::MayIndexedAccess(JSObject* receiver,
bool result = false;
{
// Leaving JavaScript.
- VMState state(this, EXTERNAL);
+ VMState<EXTERNAL> state(this);
result = callback(v8::Utils::ToLocal(receiver_handle),
index,
type,
@@ -922,16 +1089,30 @@ const char* const Isolate::kStackOverflowMessage =
Failure* Isolate::StackOverflow() {
- HandleScope scope;
- Handle<String> key = factory()->stack_overflow_symbol();
+ HandleScope scope(this);
+ // At this point we cannot create an Error object using its javascript
+ // constructor. Instead, we copy the pre-constructed boilerplate and
+ // attach the stack trace as a hidden property.
+ Handle<String> key = factory()->stack_overflow_string();
Handle<JSObject> boilerplate =
- Handle<JSObject>::cast(GetProperty(js_builtins_object(), key));
- Handle<Object> exception = Copy(boilerplate);
- // TODO(1240995): To avoid having to call JavaScript code to compute
- // the message for stack overflow exceptions which is very likely to
- // double fault with another stack overflow exception, we use a
- // precomputed message.
+ Handle<JSObject>::cast(GetProperty(this, js_builtins_object(), key));
+ Handle<JSObject> exception = JSObject::Copy(boilerplate);
DoThrow(*exception, NULL);
+
+ // Get stack trace limit.
+ Handle<Object> error = GetProperty(js_builtins_object(), "$Error");
+ if (!error->IsJSObject()) return Failure::Exception();
+ Handle<Object> stack_trace_limit =
+ GetProperty(Handle<JSObject>::cast(error), "stackTraceLimit");
+ if (!stack_trace_limit->IsNumber()) return Failure::Exception();
+ double dlimit = stack_trace_limit->Number();
+ int limit = std::isnan(dlimit) ? 0 : static_cast<int>(dlimit);
+
+ Handle<JSArray> stack_trace = CaptureSimpleStackTrace(
+ exception, factory()->undefined_value(), limit);
+ JSObject::SetHiddenProperty(exception,
+ factory()->hidden_stack_trace_string(),
+ stack_trace);
return Failure::Exception();
}
@@ -942,6 +1123,23 @@ Failure* Isolate::TerminateExecution() {
}
+void Isolate::CancelTerminateExecution() {
+ if (try_catch_handler()) {
+ try_catch_handler()->has_terminated_ = false;
+ }
+ if (has_pending_exception() &&
+ pending_exception() == heap_.termination_exception()) {
+ thread_local_top()->external_caught_exception_ = false;
+ clear_pending_exception();
+ }
+ if (has_scheduled_exception() &&
+ scheduled_exception() == heap_.termination_exception()) {
+ thread_local_top()->external_caught_exception_ = false;
+ clear_scheduled_exception();
+ }
+}
+
+
Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
DoThrow(exception, location);
return Failure::Exception();
@@ -964,7 +1162,7 @@ Failure* Isolate::ReThrow(MaybeObject* exception) {
Failure* Isolate::ThrowIllegalOperation() {
- return Throw(heap_.illegal_access_symbol());
+ return Throw(heap_.illegal_access_string());
}
@@ -972,9 +1170,28 @@ void Isolate::ScheduleThrow(Object* exception) {
// When scheduling a throw we first throw the exception to get the
// error reporting if it is uncaught before rescheduling it.
Throw(exception);
- thread_local_top()->scheduled_exception_ = pending_exception();
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
+ PropagatePendingExceptionToExternalTryCatch();
+ if (has_pending_exception()) {
+ thread_local_top()->scheduled_exception_ = pending_exception();
+ thread_local_top()->external_caught_exception_ = false;
+ clear_pending_exception();
+ }
+}
+
+
+void Isolate::RestorePendingMessageFromTryCatch(v8::TryCatch* handler) {
+ ASSERT(handler == try_catch_handler());
+ ASSERT(handler->HasCaught());
+ ASSERT(handler->rethrow_);
+ ASSERT(handler->capture_message_);
+ Object* message = reinterpret_cast<Object*>(handler->message_obj_);
+ Object* script = reinterpret_cast<Object*>(handler->message_script_);
+ ASSERT(message->IsJSMessageObject() || message->IsTheHole());
+ ASSERT(script->IsScript() || script->IsTheHole());
+ thread_local_top()->pending_message_obj_ = message;
+ thread_local_top()->pending_message_script_ = script;
+ thread_local_top()->pending_message_start_pos_ = handler->message_start_pos_;
+ thread_local_top()->pending_message_end_pos_ = handler->message_end_pos_;
}
@@ -989,14 +1206,14 @@ Failure* Isolate::PromoteScheduledException() {
void Isolate::PrintCurrentStackTrace(FILE* out) {
StackTraceFrameIterator it(this);
while (!it.done()) {
- HandleScope scope;
+ HandleScope scope(this);
// Find code position if recorded in relocation info.
JavaScriptFrame* frame = it.frame();
int pos = frame->LookupCode()->SourcePosition(frame->pc());
- Handle<Object> pos_obj(Smi::FromInt(pos));
+ Handle<Object> pos_obj(Smi::FromInt(pos), this);
// Fetch function and receiver.
- Handle<JSFunction> fun(JSFunction::cast(frame->function()));
- Handle<Object> recv(frame->receiver());
+ Handle<JSFunction> fun(frame->function());
+ Handle<Object> recv(frame->receiver(), this);
// Advance to the next JavaScript frame and determine if the
// current frame is the top-level frame.
it.Advance();
@@ -1008,7 +1225,7 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
if (line->length() > 0) {
line->PrintOn(out);
- fprintf(out, "\n");
+ PrintF(out, "\n");
}
}
}
@@ -1019,7 +1236,7 @@ void Isolate::ComputeLocation(MessageLocation* target) {
StackTraceFrameIterator it(this);
if (!it.done()) {
JavaScriptFrame* frame = it.frame();
- JSFunction* fun = JSFunction::cast(frame->function());
+ JSFunction* fun = frame->function();
Object* script = fun->shared()->script();
if (script->IsScript() &&
!(Script::cast(script)->source()->IsUndefined())) {
@@ -1066,12 +1283,13 @@ bool Isolate::ShouldReportException(bool* can_be_caught_externally,
bool Isolate::IsErrorObject(Handle<Object> obj) {
if (!obj->IsJSObject()) return false;
- String* error_key = *(factory()->LookupAsciiSymbol("$Error"));
+ String* error_key =
+ *(factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("$Error")));
Object* error_constructor =
js_builtins_object()->GetPropertyNoExceptionThrown(error_key);
for (Object* prototype = *obj; !prototype->IsNull();
- prototype = prototype->GetPrototype()) {
+ prototype = prototype->GetPrototype(this)) {
if (!prototype->IsJSObject()) return false;
if (JSObject::cast(prototype)->map()->constructor() == error_constructor) {
return true;
@@ -1085,8 +1303,8 @@ static int fatal_exception_depth = 0;
void Isolate::DoThrow(Object* exception, MessageLocation* location) {
ASSERT(!has_pending_exception());
- HandleScope scope;
- Handle<Object> exception_handle(exception);
+ HandleScope scope(this);
+ Handle<Object> exception_handle(exception, this);
// Determine reporting and whether the exception is caught externally.
bool catchable_by_javascript = is_catchable_by_javascript(exception);
@@ -1095,9 +1313,12 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
bool report_exception = catchable_by_javascript && should_report_exception;
bool try_catch_needs_message =
- can_be_caught_externally && try_catch_handler()->capture_message_;
+ can_be_caught_externally && try_catch_handler()->capture_message_ &&
+ !thread_local_top()->rethrowing_message_;
bool bootstrapping = bootstrapper()->IsActive();
+ thread_local_top()->rethrowing_message_ = false;
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger of exception.
if (catchable_by_javascript) {
@@ -1123,7 +1344,7 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
if (capture_stack_trace_for_uncaught_exceptions_) {
if (IsErrorObject(exception_handle)) {
// We fetch the stack trace that corresponds to this error object.
- String* key = heap()->hidden_stack_trace_symbol();
+ String* key = heap()->hidden_stack_trace_string();
Object* stack_property =
JSObject::cast(*exception_handle)->GetHiddenProperty(key);
// Property lookup may have failed. In this case it's probably not
@@ -1139,10 +1360,25 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
stack_trace_for_uncaught_exceptions_options_);
}
}
+
+ Handle<Object> exception_arg = exception_handle;
+ // If the exception argument is a custom object, turn it into a string
+ // before throwing as uncaught exception. Note that the pending
+ // exception object to be set later must not be turned into a string.
+ if (exception_arg->IsJSObject() && !IsErrorObject(exception_arg)) {
+ bool failed = false;
+ exception_arg =
+ Execution::ToDetailString(this, exception_arg, &failed);
+ if (failed) {
+ exception_arg = factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("exception"));
+ }
+ }
Handle<Object> message_obj = MessageHandler::MakeMessageObject(
+ this,
"uncaught_exception",
location,
- HandleVector<Object>(&exception_handle, 1),
+ HandleVector<Object>(&exception_arg, 1),
stack_trace,
stack_trace_object);
thread_local_top()->pending_message_obj_ = *message_obj;
@@ -1160,8 +1396,9 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
FLAG_abort_on_uncaught_exception &&
(report_exception || can_be_caught_externally)) {
fatal_exception_depth++;
- fprintf(stderr, "%s\n\nFROM\n",
- *MessageHandler::GetLocalizedMessage(message_obj));
+ PrintF(stderr,
+ "%s\n\nFROM\n",
+ *MessageHandler::GetLocalizedMessage(this, message_obj));
PrintCurrentStackTrace(stderr);
OS::Abort();
}
@@ -1173,17 +1410,19 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
// to the console for easier debugging.
int line_number = GetScriptLineNumberSafe(location->script(),
location->start_pos());
- if (exception->IsString()) {
+ if (exception->IsString() && location->script()->name()->IsString()) {
OS::PrintError(
"Extension or internal compilation error: %s in %s at line %d.\n",
*String::cast(exception)->ToCString(),
*String::cast(location->script()->name())->ToCString(),
line_number + 1);
- } else {
+ } else if (location->script()->name()->IsString()) {
OS::PrintError(
"Extension or internal compilation error in %s at line %d.\n",
*String::cast(location->script()->name())->ToCString(),
line_number + 1);
+ } else {
+ OS::PrintError("Extension or internal compilation error.\n");
}
}
}
@@ -1250,8 +1489,8 @@ void Isolate::ReportPendingMessages() {
// the native context. Note: We have to mark the native context here
// since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
// set it.
- HandleScope scope;
- if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
+ HandleScope scope(this);
+ if (thread_local_top_.pending_exception_->IsOutOfMemory()) {
context()->mark_out_of_memory();
} else if (thread_local_top_.pending_exception_ ==
heap()->termination_exception()) {
@@ -1261,10 +1500,12 @@ void Isolate::ReportPendingMessages() {
if (thread_local_top_.has_pending_message_) {
thread_local_top_.has_pending_message_ = false;
if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
- HandleScope scope;
- Handle<Object> message_obj(thread_local_top_.pending_message_obj_);
- if (thread_local_top_.pending_message_script_ != NULL) {
- Handle<Script> script(thread_local_top_.pending_message_script_);
+ HandleScope scope(this);
+ Handle<Object> message_obj(thread_local_top_.pending_message_obj_,
+ this);
+ if (!thread_local_top_.pending_message_script_->IsTheHole()) {
+ Handle<Script> script(
+ Script::cast(thread_local_top_.pending_message_script_));
int start_pos = thread_local_top_.pending_message_start_pos_;
int end_pos = thread_local_top_.pending_message_end_pos_;
MessageLocation location(script, start_pos, end_pos);
@@ -1279,6 +1520,25 @@ void Isolate::ReportPendingMessages() {
}
+MessageLocation Isolate::GetMessageLocation() {
+ ASSERT(has_pending_exception());
+
+ if (!thread_local_top_.pending_exception_->IsOutOfMemory() &&
+ thread_local_top_.pending_exception_ != heap()->termination_exception() &&
+ thread_local_top_.has_pending_message_ &&
+ !thread_local_top_.pending_message_obj_->IsTheHole() &&
+ !thread_local_top_.pending_message_obj_->IsTheHole()) {
+ Handle<Script> script(
+ Script::cast(thread_local_top_.pending_message_script_));
+ int start_pos = thread_local_top_.pending_message_start_pos_;
+ int end_pos = thread_local_top_.pending_message_end_pos_;
+ return MessageLocation(script, start_pos, end_pos);
+ }
+
+ return MessageLocation();
+}
+
+
void Isolate::TraceException(bool flag) {
FLAG_trace_exception = flag; // TODO(isolates): This is an unfortunate use.
}
@@ -1309,7 +1569,7 @@ bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
Address external_handler_address =
thread_local_top()->try_catch_handler_address();
- JavaScriptFrameIterator it;
+ JavaScriptFrameIterator it(this);
if (it.done() || (it.frame()->sp() > external_handler_address)) {
clear_exception = true;
}
@@ -1358,19 +1618,17 @@ bool Isolate::is_out_of_memory() {
Handle<Context> Isolate::native_context() {
- GlobalObject* global = thread_local_top()->context_->global_object();
- return Handle<Context>(global->native_context());
+ return Handle<Context>(context()->global_object()->native_context());
}
Handle<Context> Isolate::global_context() {
- GlobalObject* global = thread_local_top()->context_->global_object();
- return Handle<Context>(global->global_context());
+ return Handle<Context>(context()->global_object()->global_context());
}
Handle<Context> Isolate::GetCallingNativeContext() {
- JavaScriptFrameIterator it;
+ JavaScriptFrameIterator it(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (debug_->InDebugger()) {
while (!it.done()) {
@@ -1392,11 +1650,8 @@ Handle<Context> Isolate::GetCallingNativeContext() {
char* Isolate::ArchiveThread(char* to) {
- if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
- RuntimeProfiler::IsolateExitedJS(this);
- }
- memcpy(to, reinterpret_cast<char*>(thread_local_top()),
- sizeof(ThreadLocalTop));
+ OS::MemCopy(to, reinterpret_cast<char*>(thread_local_top()),
+ sizeof(ThreadLocalTop));
InitializeThreadLocal();
clear_pending_exception();
clear_pending_message();
@@ -1406,20 +1661,13 @@ char* Isolate::ArchiveThread(char* to) {
char* Isolate::RestoreThread(char* from) {
- memcpy(reinterpret_cast<char*>(thread_local_top()), from,
- sizeof(ThreadLocalTop));
+ OS::MemCopy(reinterpret_cast<char*>(thread_local_top()), from,
+ sizeof(ThreadLocalTop));
// This might be just paranoia, but it seems to be needed in case a
// thread_local_top_ is restored on a separate OS thread.
#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
- thread_local_top()->simulator_ = Simulator::current(this);
-#elif V8_TARGET_ARCH_MIPS
thread_local_top()->simulator_ = Simulator::current(this);
#endif
-#endif
- if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
- RuntimeProfiler::IsolateEnteredJS(this);
- }
ASSERT(context() == NULL || context()->IsContext());
return from + sizeof(ThreadLocalTop);
}
@@ -1430,6 +1678,14 @@ Isolate::ThreadDataTable::ThreadDataTable()
}
+Isolate::ThreadDataTable::~ThreadDataTable() {
+ // TODO(svenpanne) The assertion below would fire if an embedder does not
+ // cleanly dispose all Isolates before disposing v8, so we are conservative
+ // and leave it out for now.
+ // ASSERT_EQ(NULL, list_);
+}
+
+
Isolate::PerIsolateThreadData*
Isolate::ThreadDataTable::Lookup(Isolate* isolate,
ThreadId thread_id) {
@@ -1455,15 +1711,6 @@ void Isolate::ThreadDataTable::Remove(PerIsolateThreadData* data) {
}
-void Isolate::ThreadDataTable::Remove(Isolate* isolate,
- ThreadId thread_id) {
- PerIsolateThreadData* data = Lookup(isolate, thread_id);
- if (data != NULL) {
- Remove(data);
- }
-}
-
-
void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
PerIsolateThreadData* data = list_;
while (data != NULL) {
@@ -1478,7 +1725,8 @@ void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
#define TRACE_ISOLATE(tag) \
do { \
if (FLAG_trace_isolates) { \
- PrintF("Isolate %p " #tag "\n", reinterpret_cast<void*>(this)); \
+ PrintF("Isolate %p (id %d)" #tag "\n", \
+ reinterpret_cast<void*>(this), id()); \
} \
} while (false)
#else
@@ -1499,11 +1747,7 @@ Isolate::Isolate()
compilation_cache_(NULL),
counters_(NULL),
code_range_(NULL),
- // Must be initialized early to allow v8::SetResourceConstraints calls.
- break_access_(OS::CreateMutex()),
debugger_initialized_(false),
- // Must be initialized early to allow v8::Debug calls.
- debugger_access_(OS::CreateMutex()),
logger_(NULL),
stats_table_(NULL),
stub_cache_(NULL),
@@ -1523,8 +1767,9 @@ Isolate::Isolate()
free_list_(0),
preallocated_storage_preallocated_(false),
inner_pointer_to_code_cache_(NULL),
- write_input_buffer_(NULL),
+ write_iterator_(NULL),
global_handles_(NULL),
+ eternal_handles_(NULL),
context_switcher_(NULL),
thread_manager_(NULL),
fp_stubs_generated_(false),
@@ -1532,9 +1777,21 @@ Isolate::Isolate()
string_tracker_(NULL),
regexp_stack_(NULL),
date_cache_(NULL),
- context_exit_happened_(false),
+ code_stub_interface_descriptors_(NULL),
+ // TODO(bmeurer) Initialized lazily because it depends on flags; can
+ // be fixed once the default isolate cleanup is done.
+ random_number_generator_(NULL),
+ has_fatal_error_(false),
+ use_crankshaft_(true),
+ initialized_from_snapshot_(false),
+ cpu_profiler_(NULL),
+ heap_profiler_(NULL),
+ function_entry_hook_(NULL),
deferred_handles_head_(NULL),
- optimizing_compiler_thread_(this) {
+ optimizing_compiler_thread_(NULL),
+ sweeper_thread_(NULL),
+ stress_deopt_count_(0) {
+ id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
memset(isolate_addresses_, 0,
@@ -1548,8 +1805,8 @@ Isolate::Isolate()
thread_manager_ = new ThreadManager();
thread_manager_->isolate_ = this;
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
- defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
+#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_MIPS && !defined(__mips__)
simulator_initialized_ = false;
simulator_i_cache_ = NULL;
simulator_redirection_ = NULL;
@@ -1580,6 +1837,7 @@ Isolate::Isolate()
#undef ISOLATE_INIT_ARRAY_EXECUTE
}
+
void Isolate::TearDown() {
TRACE_ISOLATE(tear_down);
@@ -1593,7 +1851,7 @@ void Isolate::TearDown() {
Deinit();
- { ScopedLock lock(process_wide_mutex_);
+ { LockGuard<Mutex> lock_guard(&process_wide_mutex_);
thread_data_table_->RemoveAllThreads(this);
}
@@ -1611,22 +1869,47 @@ void Isolate::TearDown() {
}
+void Isolate::GlobalTearDown() {
+ delete thread_data_table_;
+}
+
+
void Isolate::Deinit() {
if (state_ == INITIALIZED) {
TRACE_ISOLATE(deinit);
- if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ debugger()->UnloadDebugger();
+#endif
+
+ if (FLAG_concurrent_recompilation) {
+ optimizing_compiler_thread_->Stop();
+ delete optimizing_compiler_thread_;
+ }
+
+ if (FLAG_sweeper_threads > 0) {
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ sweeper_thread_[i]->Stop();
+ delete sweeper_thread_[i];
+ }
+ delete[] sweeper_thread_;
+ }
- if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
+ if (FLAG_hydrogen_stats) GetHStatistics()->Print();
+
+ if (FLAG_print_deopt_stress) {
+ PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_);
+ }
// We must stop the logger before we tear down other components.
- logger_->EnsureTickerStopped();
+ Sampler* sampler = logger_->sampler();
+ if (sampler && sampler->IsActive()) sampler->Stop();
delete deoptimizer_data_;
deoptimizer_data_ = NULL;
if (FLAG_preemption) {
- v8::Locker locker;
- v8::Locker::StopPreemption();
+ v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
+ v8::Locker::StopPreemption(reinterpret_cast<v8::Isolate*>(this));
}
builtins_.TearDown();
bootstrapper_->TearDown();
@@ -1636,8 +1919,6 @@ void Isolate::Deinit() {
preallocated_message_space_ = NULL;
PreallocatedMemoryThreadStop();
- HeapProfiler::TearDown();
- CpuProfiler::TearDown();
if (runtime_profiler_ != NULL) {
runtime_profiler_->TearDown();
delete runtime_profiler_;
@@ -1646,6 +1927,11 @@ void Isolate::Deinit() {
heap_.TearDown();
logger_->TearDown();
+ delete heap_profiler_;
+ heap_profiler_ = NULL;
+ delete cpu_profiler_;
+ cpu_profiler_ = NULL;
+
// The default isolate is re-initializable due to legacy API.
state_ = UNINITIALIZED;
}
@@ -1682,9 +1968,18 @@ void Isolate::SetIsolateThreadLocals(Isolate* isolate,
Isolate::~Isolate() {
TRACE_ISOLATE(destructor);
- // Has to be called while counters_ are still alive.
+ // Has to be called while counters_ are still alive
runtime_zone_.DeleteKeptSegment();
+ // The entry stack must be empty when we get here,
+ // except for the default isolate, where it can
+ // still contain up to one entry stack item
+ ASSERT(entry_stack_ == NULL || this == default_isolate_);
+ ASSERT(entry_stack_ == NULL || entry_stack_->previous_item == NULL);
+
+ delete entry_stack_;
+ entry_stack_ = NULL;
+
delete[] assembler_spare_buffer_;
assembler_spare_buffer_ = NULL;
@@ -1694,6 +1989,9 @@ Isolate::~Isolate() {
delete date_cache_;
date_cache_ = NULL;
+ delete[] code_stub_interface_descriptors_;
+ code_stub_interface_descriptors_ = NULL;
+
delete regexp_stack_;
regexp_stack_ = NULL;
@@ -1719,10 +2017,6 @@ Isolate::~Isolate() {
delete handle_scope_implementer_;
handle_scope_implementer_ = NULL;
- delete break_access_;
- break_access_ = NULL;
- delete debugger_access_;
- debugger_access_ = NULL;
delete compilation_cache_;
compilation_cache_ = NULL;
@@ -1730,8 +2024,8 @@ Isolate::~Isolate() {
bootstrapper_ = NULL;
delete inner_pointer_to_code_cache_;
inner_pointer_to_code_cache_ = NULL;
- delete write_input_buffer_;
- write_input_buffer_ = NULL;
+ delete write_iterator_;
+ write_iterator_ = NULL;
delete context_switcher_;
context_switcher_ = NULL;
@@ -1747,10 +2041,18 @@ Isolate::~Isolate() {
code_range_ = NULL;
delete global_handles_;
global_handles_ = NULL;
+ delete eternal_handles_;
+ eternal_handles_ = NULL;
+
+ delete string_stream_debug_object_cache_;
+ string_stream_debug_object_cache_ = NULL;
delete external_reference_table_;
external_reference_table_ = NULL;
+ delete random_number_generator_;
+ random_number_generator_ = NULL;
+
#ifdef ENABLE_DEBUGGER_SUPPORT
delete debugger_;
debugger_ = NULL;
@@ -1774,38 +2076,49 @@ void Isolate::PropagatePendingExceptionToExternalTryCatch() {
if (!external_caught) return;
- if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
+ if (thread_local_top_.pending_exception_->IsOutOfMemory()) {
// Do not propagate OOM exception: we should kill VM asap.
} else if (thread_local_top_.pending_exception_ ==
heap()->termination_exception()) {
try_catch_handler()->can_continue_ = false;
+ try_catch_handler()->has_terminated_ = true;
try_catch_handler()->exception_ = heap()->null_value();
} else {
+ v8::TryCatch* handler = try_catch_handler();
// At this point all non-object (failure) exceptions have
// been dealt with so this shouldn't fail.
ASSERT(!pending_exception()->IsFailure());
- try_catch_handler()->can_continue_ = true;
- try_catch_handler()->exception_ = pending_exception();
- if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
- try_catch_handler()->message_ = thread_local_top_.pending_message_obj_;
- }
+ ASSERT(thread_local_top_.pending_message_obj_->IsJSMessageObject() ||
+ thread_local_top_.pending_message_obj_->IsTheHole());
+ ASSERT(thread_local_top_.pending_message_script_->IsScript() ||
+ thread_local_top_.pending_message_script_->IsTheHole());
+ handler->can_continue_ = true;
+ handler->has_terminated_ = false;
+ handler->exception_ = pending_exception();
+ // Propagate to the external try-catch only if we got an actual message.
+ if (thread_local_top_.pending_message_obj_->IsTheHole()) return;
+
+ handler->message_obj_ = thread_local_top_.pending_message_obj_;
+ handler->message_script_ = thread_local_top_.pending_message_script_;
+ handler->message_start_pos_ = thread_local_top_.pending_message_start_pos_;
+ handler->message_end_pos_ = thread_local_top_.pending_message_end_pos_;
}
}
void Isolate::InitializeLoggingAndCounters() {
if (logger_ == NULL) {
- logger_ = new Logger;
+ logger_ = new Logger(this);
}
if (counters_ == NULL) {
- counters_ = new Counters;
+ counters_ = new Counters(this);
}
}
void Isolate::InitializeDebugger() {
#ifdef ENABLE_DEBUGGER_SUPPORT
- ScopedLock lock(debugger_access_);
+ LockGuard<RecursiveMutex> lock_guard(debugger_access());
if (NoBarrier_Load(&debugger_initialized_)) return;
InitializeLoggingAndCounters();
debug_ = new Debug(this);
@@ -1817,9 +2130,24 @@ void Isolate::InitializeDebugger() {
bool Isolate::Init(Deserializer* des) {
ASSERT(state_ != INITIALIZED);
- ASSERT(Isolate::Current() == this);
TRACE_ISOLATE(init);
+ stress_deopt_count_ = FLAG_deopt_every_n_times;
+
+ has_fatal_error_ = false;
+
+ use_crankshaft_ = FLAG_crankshaft
+ && !Serializer::enabled()
+ && CPU::SupportsCrankshaft();
+
+ if (function_entry_hook() != NULL) {
+ // When function entry hooking is in effect, we have to create the code
+ // stubs from scratch to get entry hooks, rather than loading the previously
+ // generated stubs from disk.
+ // If this assert fires, the initialization path has regressed.
+ ASSERT(des == NULL);
+ }
+
// The initialization process does not handle memory exhaustion.
DisallowAllocationFailure disallow_allocation_failure;
@@ -1830,43 +2158,44 @@ bool Isolate::Init(Deserializer* des) {
memory_allocator_ = new MemoryAllocator(this);
code_range_ = new CodeRange(this);
- // Safe after setting Heap::isolate_, initializing StackGuard and
- // ensuring that Isolate::Current() == this.
+ // Safe after setting Heap::isolate_, and initializing StackGuard
heap_.SetStackLimits();
#define ASSIGN_ELEMENT(CamelName, hacker_name) \
isolate_addresses_[Isolate::k##CamelName##Address] = \
reinterpret_cast<Address>(hacker_name##_address());
FOR_EACH_ISOLATE_ADDRESS_NAME(ASSIGN_ELEMENT)
-#undef C
+#undef ASSIGN_ELEMENT
string_tracker_ = new StringTracker();
string_tracker_->isolate_ = this;
compilation_cache_ = new CompilationCache(this);
- transcendental_cache_ = new TranscendentalCache();
+ transcendental_cache_ = new TranscendentalCache(this);
keyed_lookup_cache_ = new KeyedLookupCache();
context_slot_cache_ = new ContextSlotCache();
descriptor_lookup_cache_ = new DescriptorLookupCache();
unicode_cache_ = new UnicodeCache();
inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
- write_input_buffer_ = new StringInputBuffer();
+ write_iterator_ = new ConsStringIteratorOp();
global_handles_ = new GlobalHandles(this);
- bootstrapper_ = new Bootstrapper();
+ eternal_handles_ = new EternalHandles();
+ bootstrapper_ = new Bootstrapper(this);
handle_scope_implementer_ = new HandleScopeImplementer(this);
- stub_cache_ = new StubCache(this, runtime_zone());
+ stub_cache_ = new StubCache(this);
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
+ code_stub_interface_descriptors_ =
+ new CodeStubInterfaceDescriptor[CodeStub::NUMBER_OF_IDS];
+ cpu_profiler_ = new CpuProfiler(this);
+ heap_profiler_ = new HeapProfiler(heap());
// Enable logging before setting up the heap
- logger_->SetUp();
-
- CpuProfiler::SetUp();
- HeapProfiler::SetUp();
+ logger_->SetUp(this);
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
Simulator::Initialize(this);
#endif
#endif
@@ -1880,10 +2209,22 @@ bool Isolate::Init(Deserializer* des) {
}
// SetUp the object heap.
- const bool create_heap_objects = (des == NULL);
ASSERT(!heap_.HasBeenSetUp());
- if (!heap_.SetUp(create_heap_objects)) {
- V8::SetFatalError();
+ if (!heap_.SetUp()) {
+ V8::FatalProcessOutOfMemory("heap setup");
+ return false;
+ }
+
+ deoptimizer_data_ = new DeoptimizerData(memory_allocator_);
+
+ if (FLAG_concurrent_recompilation) {
+ optimizing_compiler_thread_ = new OptimizingCompilerThread(this);
+ optimizing_compiler_thread_->Start();
+ }
+
+ const bool create_heap_objects = (des == NULL);
+ if (create_heap_objects && !heap_.CreateHeapObjects()) {
+ V8::FatalProcessOutOfMemory("heap object creation");
return false;
}
@@ -1895,7 +2236,7 @@ bool Isolate::Init(Deserializer* des) {
InitializeThreadLocal();
bootstrapper_->Initialize(create_heap_objects);
- builtins_.SetUp(create_heap_objects);
+ builtins_.SetUp(this, create_heap_objects);
// Only preallocate on the first initialization.
if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
@@ -1909,8 +2250,8 @@ bool Isolate::Init(Deserializer* des) {
}
if (FLAG_preemption) {
- v8::Locker locker;
- v8::Locker::StartPreemption(100);
+ v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
+ v8::Locker::StartPreemption(reinterpret_cast<v8::Isolate*>(this), 100);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1919,7 +2260,7 @@ bool Isolate::Init(Deserializer* des) {
// If we are deserializing, read the state into the now-empty heap.
if (!create_heap_objects) {
- des->Deserialize();
+ des->Deserialize(this);
}
stub_cache_->Initialize();
@@ -1935,21 +2276,18 @@ bool Isolate::Init(Deserializer* des) {
// Quiet the heap NaN if needed on target platform.
if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
- deoptimizer_data_ = new DeoptimizerData;
runtime_profiler_ = new RuntimeProfiler(this);
runtime_profiler_->SetUp();
// If we are deserializing, log non-function code objects and compiled
// functions found in the snapshot.
- if (create_heap_objects &&
+ if (!create_heap_objects &&
(FLAG_log_code || FLAG_ll_prof || logger_->is_logging_code_events())) {
- HandleScope scope;
+ HandleScope scope(this);
LOG(this, LogCodeObjects());
LOG(this, LogCompiledFunctions());
}
- CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, state_)),
- Internals::kIsolateStateOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
Internals::kIsolateEmbedderDataOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
@@ -1957,7 +2295,51 @@ bool Isolate::Init(Deserializer* des) {
state_ = INITIALIZED;
time_millis_at_init_ = OS::TimeCurrentMillis();
- if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
+
+ if (!create_heap_objects) {
+ // Now that the heap is consistent, it's OK to generate the code for the
+ // deopt entry table that might have been referred to by optimized code in
+ // the snapshot.
+ HandleScope scope(this);
+ Deoptimizer::EnsureCodeForDeoptimizationEntry(
+ this,
+ Deoptimizer::LAZY,
+ kDeoptTableSerializeEntryCount - 1);
+ }
+
+ if (!Serializer::enabled()) {
+ // Ensure that all stubs which need to be generated ahead of time, but
+ // cannot be serialized into the snapshot have been generated.
+ HandleScope scope(this);
+ CodeStub::GenerateFPStubs(this);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(this);
+ StubFailureTrampolineStub::GenerateAheadOfTime(this);
+ // TODO(mstarzinger): The following is an ugly hack to make sure the
+ // interface descriptor is initialized even when stubs have been
+ // deserialized out of the snapshot without the graph builder.
+ FastCloneShallowArrayStub stub(FastCloneShallowArrayStub::CLONE_ELEMENTS,
+ DONT_TRACK_ALLOCATION_SITE, 0);
+ stub.InitializeInterfaceDescriptor(
+ this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray));
+ BinaryOpStub::InitializeForIsolate(this);
+ CompareNilICStub::InitializeForIsolate(this);
+ ToBooleanStub::InitializeForIsolate(this);
+ ArrayConstructorStubBase::InstallDescriptors(this);
+ InternalArrayConstructorStubBase::InstallDescriptors(this);
+ FastNewClosureStub::InstallDescriptors(this);
+ NumberToStringStub::InstallDescriptors(this);
+ }
+
+ if (FLAG_sweeper_threads > 0) {
+ sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ sweeper_thread_[i] = new SweeperThread(this);
+ sweeper_thread_[i]->Start();
+ }
+ }
+
+ initialized_from_snapshot_ = (des != NULL);
+
return true;
}
@@ -2071,6 +2453,67 @@ void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
}
+HStatistics* Isolate::GetHStatistics() {
+ if (hstatistics() == NULL) set_hstatistics(new HStatistics());
+ return hstatistics();
+}
+
+
+HTracer* Isolate::GetHTracer() {
+ if (htracer() == NULL) set_htracer(new HTracer(id()));
+ return htracer();
+}
+
+
+Map* Isolate::get_initial_js_array_map(ElementsKind kind) {
+ Context* native_context = context()->native_context();
+ Object* maybe_map_array = native_context->js_array_maps();
+ if (!maybe_map_array->IsUndefined()) {
+ Object* maybe_transitioned_map =
+ FixedArray::cast(maybe_map_array)->get(kind);
+ if (!maybe_transitioned_map->IsUndefined()) {
+ return Map::cast(maybe_transitioned_map);
+ }
+ }
+ return NULL;
+}
+
+
+bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
+ Map* root_array_map =
+ get_initial_js_array_map(GetInitialFastElementsKind());
+ ASSERT(root_array_map != NULL);
+ JSObject* initial_array_proto = JSObject::cast(*initial_array_prototype());
+
+ // Check that the array prototype hasn't been altered WRT empty elements.
+ if (root_array_map->prototype() != initial_array_proto) return false;
+ if (initial_array_proto->elements() != heap()->empty_fixed_array()) {
+ return false;
+ }
+
+ // Check that the object prototype hasn't been altered WRT empty elements.
+ JSObject* initial_object_proto = JSObject::cast(*initial_object_prototype());
+ Object* root_array_map_proto = initial_array_proto->GetPrototype();
+ if (root_array_map_proto != initial_object_proto) return false;
+ if (initial_object_proto->elements() != heap()->empty_fixed_array()) {
+ return false;
+ }
+
+ return initial_object_proto->GetPrototype()->IsNull();
+}
+
+
+CodeStubInterfaceDescriptor*
+ Isolate::code_stub_interface_descriptor(int index) {
+ return code_stub_interface_descriptors_ + index;
+}
+
+
+Object* Isolate::FindCodeObject(Address a) {
+ return inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(a);
+}
+
+
#ifdef DEBUG
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index b90191d0e..9aa14ee02 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -31,6 +31,7 @@
#include "../include/v8-debug.h"
#include "allocation.h"
#include "apiutils.h"
+#include "assert-scope.h"
#include "atomicops.h"
#include "builtins.h"
#include "contexts.h"
@@ -53,6 +54,7 @@ namespace internal {
class Bootstrapper;
class CodeGenerator;
class CodeRange;
+struct CodeStubInterfaceDescriptor;
class CompilationCache;
class ContextSlotCache;
class ContextSwitcher;
@@ -62,25 +64,30 @@ class CpuProfiler;
class DeoptimizerData;
class Deserializer;
class EmptyStatement;
+class ExternalCallbackScope;
class ExternalReferenceTable;
class Factory;
class FunctionInfoListener;
class HandleScopeImplementer;
class HeapProfiler;
+class HStatistics;
+class HTracer;
class InlineRuntimeFunctionsTable;
class NoAllocationStringAllocator;
class InnerPointerToCodeCache;
class PreallocatedMemoryThread;
+class RandomNumberGenerator;
class RegExpStack;
class SaveContext;
class UnicodeCache;
-class StringInputBuffer;
+class ConsStringIteratorOp;
class StringTracker;
class StubCache;
+class SweeperThread;
class ThreadManager;
class ThreadState;
class ThreadVisitor; // Defined in v8threads.h
-class VMState;
+template <StateTag Tag> class VMState;
// 'void function pointer', used to roundtrip the
// ExternalReference::ExternalReferenceRedirector since we can not include
@@ -94,8 +101,8 @@ class Debugger;
class DebuggerAgent;
#endif
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS
class Redirection;
class Simulator;
#endif
@@ -117,6 +124,15 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
} \
} while (false)
+#define RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, T) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ if (__isolate__->has_scheduled_exception()) { \
+ __isolate__->PromoteScheduledException(); \
+ return Handle<T>::null(); \
+ } \
+ } while (false)
+
#define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
do { \
if ((call).is_null()) { \
@@ -239,8 +255,9 @@ class ThreadLocalTop BASE_EMBEDDED {
ThreadId thread_id_;
MaybeObject* pending_exception_;
bool has_pending_message_;
+ bool rethrowing_message_;
Object* pending_message_obj_;
- Script* pending_message_script_;
+ Object* pending_message_script_;
int pending_message_start_pos_;
int pending_message_end_pos_;
// Use a separate value for scheduled exceptions to preserve the
@@ -256,13 +273,12 @@ class ThreadLocalTop BASE_EMBEDDED {
Address handler_; // try-blocks are chained through the stack
#ifdef USE_SIMULATOR
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator_;
#endif
-#endif // USE_SIMULATOR
Address js_entry_sp_; // the stack pointer of the bottom JS entry frame
- Address external_callback_; // the external callback we're currently in
+ // the external callback we're currently in
+ ExternalCallbackScope* external_callback_scope_;
StateTag current_vm_state_;
// Generated code scratch locations.
@@ -284,10 +300,23 @@ class ThreadLocalTop BASE_EMBEDDED {
};
+class SystemThreadManager {
+ public:
+ enum ParallelSystemComponent {
+ PARALLEL_SWEEPING,
+ CONCURRENT_SWEEPING,
+ PARALLEL_RECOMPILATION
+ };
+
+ static int NumberOfParallelSystemThreads(ParallelSystemComponent type);
+
+ static const int kMaxThreads = 4;
+};
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
#define ISOLATE_DEBUGGER_INIT_LIST(V) \
- V(v8::Debug::EventCallback, debug_event_callback, NULL) \
V(DebuggerAgent*, debugger_agent_instance, NULL)
#else
@@ -327,7 +356,6 @@ typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
V(byte*, assembler_spare_buffer, NULL) \
V(FatalErrorCallback, exception_behavior, NULL) \
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
- V(v8::Debug::MessageHandler, message_handler, NULL) \
/* To distinguish the function templates, so that we can find them in the */ \
/* function cache of the native context. */ \
V(int, next_serial_number, 0) \
@@ -337,9 +365,6 @@ typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
V(FunctionInfoListener*, active_function_info_listener, NULL) \
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, NULL) \
- /* State for CodeEntry in profile-generator. */ \
- V(CodeGenerator*, current_code_generator, NULL) \
- V(bool, jump_target_compiling_deferred_code, false) \
V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
V(Object*, string_stream_current_security_token, NULL) \
/* TODO(isolates): Release this on destruction? */ \
@@ -349,11 +374,9 @@ typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
/* AstNode state. */ \
V(int, ast_node_id, 0) \
V(unsigned, ast_node_count, 0) \
- /* SafeStackFrameIterator activations count. */ \
- V(int, safe_stack_iterator_counter, 0) \
- V(uint64_t, enabled_cpu_features, 0) \
- V(CpuProfiler*, cpu_profiler, NULL) \
- V(HeapProfiler*, heap_profiler, NULL) \
+ V(bool, observer_delivery_pending, false) \
+ V(HStatistics*, hstatistics, NULL) \
+ V(HTracer*, htracer, NULL) \
ISOLATE_DEBUGGER_INIT_LIST(V)
class Isolate {
@@ -374,8 +397,8 @@ class Isolate {
thread_id_(thread_id),
stack_limit_(0),
thread_state_(NULL),
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS
simulator_(NULL),
#endif
next_(NULL),
@@ -387,8 +410,8 @@ class Isolate {
ThreadState* thread_state() const { return thread_state_; }
void set_thread_state(ThreadState* value) { thread_state_ = value; }
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS
Simulator* simulator() const { return simulator_; }
void set_simulator(Simulator* simulator) {
simulator_ = simulator;
@@ -405,8 +428,8 @@ class Isolate {
uintptr_t stack_limit_;
ThreadState* thread_state_;
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS
Simulator* simulator_;
#endif
@@ -466,8 +489,11 @@ class Isolate {
// for legacy API reasons.
void TearDown();
+ static void GlobalTearDown();
+
bool IsDefaultIsolate() const { return this == default_isolate_; }
+ static void SetCrashIfDefaultIsolateInitialized();
// Ensures that process-wide resources and the default isolate have been
// allocated. It is only necessary to call this method in rare cases, for
// example if you are using V8 from within the body of a static initializer.
@@ -478,6 +504,10 @@ class Isolate {
// If one does not yet exist, return null.
PerIsolateThreadData* FindPerThreadDataForThisThread();
+ // Find the PerThread for given (isolate, thread) combination
+ // If one does not yet exist, return null.
+ PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Get the debugger from the default isolate. Preinitializes the
// default isolate if needed.
@@ -509,10 +539,10 @@ class Isolate {
static void EnterDefaultIsolate();
// Mutex for serializing access to break control structures.
- Mutex* break_access() { return break_access_; }
+ RecursiveMutex* break_access() { return &break_access_; }
// Mutex for serializing access to debugger.
- Mutex* debugger_access() { return debugger_access_; }
+ RecursiveMutex* debugger_access() { return &debugger_access_; }
Address get_address_from_id(AddressId id);
@@ -524,16 +554,11 @@ class Isolate {
}
Context** context_address() { return &thread_local_top_.context_; }
- SaveContext* save_context() {return thread_local_top_.save_context_; }
+ SaveContext* save_context() { return thread_local_top_.save_context_; }
void set_save_context(SaveContext* save) {
thread_local_top_.save_context_ = save;
}
- // Access to the map of "new Object()".
- Map* empty_object_map() {
- return context()->native_context()->object_function()->map();
- }
-
// Access to current thread id.
ThreadId thread_id() { return thread_local_top_.thread_id_; }
void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
@@ -564,7 +589,7 @@ class Isolate {
void clear_pending_message() {
thread_local_top_.has_pending_message_ = false;
thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
- thread_local_top_.pending_message_script_ = NULL;
+ thread_local_top_.pending_message_script_ = heap_.the_hole_value();
}
v8::TryCatch* try_catch_handler() {
return thread_local_top_.TryCatchHandler();
@@ -613,7 +638,7 @@ class Isolate {
bool IsExternallyCaught();
bool is_catchable_by_javascript(MaybeObject* exception) {
- return (exception != Failure::OutOfMemoryException()) &&
+ return (!exception->IsOutOfMemory()) &&
(exception != heap()->termination_exception());
}
@@ -631,9 +656,9 @@ class Isolate {
}
inline Address* handler_address() { return &thread_local_top_.handler_; }
- // Bottom JS entry (see StackTracer::Trace in log.cc).
- static Address js_entry_sp(ThreadLocalTop* thread) {
- return thread->js_entry_sp_;
+ // Bottom JS entry.
+ Address js_entry_sp() {
+ return thread_local_top_.js_entry_sp_;
}
inline Address* js_entry_sp_address() {
return &thread_local_top_.js_entry_sp_;
@@ -672,7 +697,8 @@ class Isolate {
// Scope currently can only be used for regular exceptions, not
// failures like OOM or termination exception.
isolate_(isolate),
- pending_exception_(isolate_->pending_exception()->ToObjectUnchecked()),
+ pending_exception_(isolate_->pending_exception()->ToObjectUnchecked(),
+ isolate_),
catcher_(isolate_->catcher())
{ }
@@ -705,6 +731,7 @@ class Isolate {
void PrintCurrentStackTrace(FILE* out);
void PrintStackTrace(FILE* out, char* thread_data);
void PrintStack(StringStream* accumulator);
+ void PrintStack(FILE* out);
void PrintStack();
Handle<String> StackTraceString();
NO_INLINE(void PushStackTraceAndDie(unsigned int magic,
@@ -715,11 +742,27 @@ class Isolate {
int frame_limit,
StackTrace::StackTraceOptions options);
- void CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object);
+ Handle<JSArray> CaptureSimpleStackTrace(Handle<JSObject> error_object,
+ Handle<Object> caller,
+ int limit);
+ void CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object);
// Returns if the top context may access the given global object. If
// the result is false, the pending exception is guaranteed to be
// set.
+
+ // TODO(yangguo): temporary wrappers
+ bool MayNamedAccessWrapper(Handle<JSObject> receiver,
+ Handle<Object> key,
+ v8::AccessType type) {
+ return MayNamedAccess(*receiver, *key, type);
+ }
+ bool MayIndexedAccessWrapper(Handle<JSObject> receiver,
+ uint32_t index,
+ v8::AccessType type) {
+ return MayIndexedAccess(*receiver, index, type);
+ }
+
bool MayNamedAccess(JSObject* receiver,
Object* key,
v8::AccessType type);
@@ -738,7 +781,12 @@ class Isolate {
// originally.
Failure* ReThrow(MaybeObject* exception);
void ScheduleThrow(Object* exception);
+ // Re-set pending message, script and positions reported to the TryCatch
+ // back to the TLS for re-use when rethrowing.
+ void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
void ReportPendingMessages();
+ // Return pending location if any or unfilled structure.
+ MessageLocation GetMessageLocation();
Failure* ThrowIllegalOperation();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
@@ -759,12 +807,12 @@ class Isolate {
// Out of resource exception helpers.
Failure* StackOverflow();
Failure* TerminateExecution();
+ void CancelTerminateExecution();
// Administration
void Iterate(ObjectVisitor* v);
void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
char* Iterate(ObjectVisitor* v, char* t);
- void IterateThread(ThreadVisitor* v);
void IterateThread(ThreadVisitor* v, char* t);
@@ -808,9 +856,12 @@ class Isolate {
ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
#undef GLOBAL_ARRAY_ACCESSOR
-#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
- Handle<type> name() { \
- return Handle<type>(context()->native_context()->name()); \
+#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
+ Handle<type> name() { \
+ return Handle<type>(context()->native_context()->name(), this); \
+ } \
+ bool is_##name(type* value) { \
+ return context()->native_context()->is_##name(value); \
}
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
@@ -875,10 +926,12 @@ class Isolate {
return inner_pointer_to_code_cache_;
}
- StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
+ ConsStringIteratorOp* write_iterator() { return write_iterator_; }
GlobalHandles* global_handles() { return global_handles_; }
+ EternalHandles* eternal_handles() { return eternal_handles_; }
+
ThreadManager* thread_manager() { return thread_manager_; }
ContextSwitcher* context_switcher() { return context_switcher_; }
@@ -897,16 +950,16 @@ class Isolate {
return &jsregexp_canonrange_;
}
- StringInputBuffer* objects_string_compare_buffer_a() {
- return &objects_string_compare_buffer_a_;
+ ConsStringIteratorOp* objects_string_compare_iterator_a() {
+ return &objects_string_compare_iterator_a_;
}
- StringInputBuffer* objects_string_compare_buffer_b() {
- return &objects_string_compare_buffer_b_;
+ ConsStringIteratorOp* objects_string_compare_iterator_b() {
+ return &objects_string_compare_iterator_b_;
}
- StaticResource<StringInputBuffer>* objects_string_input_buffer() {
- return &objects_string_input_buffer_;
+ StaticResource<ConsStringIteratorOp>* objects_string_iterator() {
+ return &objects_string_iterator_;
}
RuntimeState* runtime_state() { return &runtime_state_; }
@@ -917,10 +970,6 @@ class Isolate {
bool fp_stubs_generated() { return fp_stubs_generated_; }
- StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
- return &compiler_safe_string_input_buffer_;
- }
-
Builtins* builtins() { return &builtins_; }
void NotifyExtensionInstalled() {
@@ -945,6 +994,8 @@ class Isolate {
void PreallocatedStorageDelete(void* p);
void PreallocatedStorageInit(size_t size);
+ inline bool IsCodePreAgingActive();
+
#ifdef ENABLE_DEBUGGER_SUPPORT
Debugger* debugger() {
if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
@@ -959,6 +1010,9 @@ class Isolate {
inline bool IsDebuggerActive();
inline bool DebuggerHasBreakPoints();
+ CpuProfiler* cpu_profiler() const { return cpu_profiler_; }
+ HeapProfiler* heap_profiler() const { return heap_profiler_; }
+
#ifdef DEBUG
HistogramInfo* heap_histograms() { return heap_histograms_; }
@@ -969,8 +1023,8 @@ class Isolate {
int* code_kind_statistics() { return code_kind_statistics_; }
#endif
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
- defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
+#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_MIPS && !defined(__mips__)
bool simulator_initialized() { return simulator_initialized_; }
void set_simulator_initialized(bool initialized) {
simulator_initialized_ = initialized;
@@ -993,36 +1047,18 @@ class Isolate {
static const int kJSRegexpStaticOffsetsVectorSize = 128;
- Address external_callback() {
- return thread_local_top_.external_callback_;
+ ExternalCallbackScope* external_callback_scope() {
+ return thread_local_top_.external_callback_scope_;
}
- void set_external_callback(Address callback) {
- thread_local_top_.external_callback_ = callback;
+ void set_external_callback_scope(ExternalCallbackScope* scope) {
+ thread_local_top_.external_callback_scope_ = scope;
}
StateTag current_vm_state() {
return thread_local_top_.current_vm_state_;
}
- void SetCurrentVMState(StateTag state) {
- if (RuntimeProfiler::IsEnabled()) {
- // Make sure thread local top is initialized.
- ASSERT(thread_local_top_.isolate_ == this);
- StateTag current_state = thread_local_top_.current_vm_state_;
- if (current_state != JS && state == JS) {
- // Non-JS -> JS transition.
- RuntimeProfiler::IsolateEnteredJS(this);
- } else if (current_state == JS && state != JS) {
- // JS -> non-JS transition.
- ASSERT(RuntimeProfiler::IsSomeIsolateInJS());
- RuntimeProfiler::IsolateExitedJS(this);
- } else {
- // Other types of state transitions are not interesting to the
- // runtime profiler, because they don't affect whether we're
- // in JS or not.
- ASSERT((current_state == JS) == (state == JS));
- }
- }
+ void set_current_vm_state(StateTag state) {
thread_local_top_.current_vm_state_ = state;
}
@@ -1036,12 +1072,12 @@ class Isolate {
thread_local_top_.top_lookup_result_ = top;
}
- bool context_exit_happened() {
- return context_exit_happened_;
- }
- void set_context_exit_happened(bool context_exit_happened) {
- context_exit_happened_ = context_exit_happened;
- }
+ bool IsDead() { return has_fatal_error_; }
+ void SignalFatalError() { has_fatal_error_ = true; }
+
+ bool use_crankshaft() const { return use_crankshaft_; }
+
+ bool initialized_from_snapshot() { return initialized_from_snapshot_; }
double time_millis_since_init() {
return OS::TimeCurrentMillis() - time_millis_at_init_;
@@ -1058,14 +1094,51 @@ class Isolate {
date_cache_ = date_cache;
}
+ Map* get_initial_js_array_map(ElementsKind kind);
+
+ bool IsFastArrayConstructorPrototypeChainIntact();
+
+ CodeStubInterfaceDescriptor*
+ code_stub_interface_descriptor(int index);
+
void IterateDeferredHandles(ObjectVisitor* visitor);
void LinkDeferredHandles(DeferredHandles* deferred_handles);
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
+#ifdef DEBUG
+ bool IsDeferredHandle(Object** location);
+#endif // DEBUG
+
OptimizingCompilerThread* optimizing_compiler_thread() {
- return &optimizing_compiler_thread_;
+ return optimizing_compiler_thread_;
+ }
+
+ // PreInits and returns a default isolate. Needed when a new thread tries
+ // to create a Locker for the first time (the lock itself is in the isolate).
+ // TODO(svenpanne) This method is on death row...
+ static v8::Isolate* GetDefaultIsolateForLocking();
+
+ SweeperThread** sweeper_threads() {
+ return sweeper_thread_;
+ }
+
+ int id() const { return static_cast<int>(id_); }
+
+ HStatistics* GetHStatistics();
+ HTracer* GetHTracer();
+
+ FunctionEntryHook function_entry_hook() { return function_entry_hook_; }
+ void set_function_entry_hook(FunctionEntryHook function_entry_hook) {
+ function_entry_hook_ = function_entry_hook;
}
+ void* stress_deopt_count_address() { return &stress_deopt_count_; }
+
+ inline RandomNumberGenerator* random_number_generator();
+
+ // Given an address occupied by a live code object, return that object.
+ Object* FindCodeObject(Address a);
+
private:
Isolate();
@@ -1093,7 +1166,6 @@ class Isolate {
PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
void Insert(PerIsolateThreadData* data);
- void Remove(Isolate* isolate, ThreadId thread_id);
void Remove(PerIsolateThreadData* data);
void RemoveAllThreads(Isolate* isolate);
@@ -1128,7 +1200,7 @@ class Isolate {
// This mutex protects highest_thread_id_, thread_data_table_ and
// default_isolate_.
- static Mutex* process_wide_mutex_;
+ static Mutex process_wide_mutex_;
static Thread::LocalStorageKey per_isolate_thread_data_key_;
static Thread::LocalStorageKey isolate_key_;
@@ -1136,23 +1208,18 @@ class Isolate {
static Isolate* default_isolate_;
static ThreadDataTable* thread_data_table_;
+ // A global counter for all generated Isolates, might overflow.
+ static Atomic32 isolate_counter_;
+
void Deinit();
static void SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data);
- // Allocate and insert PerIsolateThreadData into the ThreadDataTable
- // (regardless of whether such data already exists).
- PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
-
// Find the PerThread for this particular (isolate, thread) combination.
// If one does not yet exist, allocate a new one.
PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
- // PreInits and returns a default isolate. Needed when a new thread tries
- // to create a Locker for the first time (the lock itself is in the isolate).
- static Isolate* GetDefaultIsolateForLocking();
-
// Initializes the current thread to run this Isolate.
// Not thread-safe. Multiple threads should not Enter/Exit the same isolate
// at the same time, this should be prevented using external locking.
@@ -1184,6 +1251,7 @@ class Isolate {
// the Error object.
bool IsErrorObject(Handle<Object> obj);
+ Atomic32 id_;
EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
StringStream* incomplete_message_;
@@ -1196,9 +1264,9 @@ class Isolate {
CompilationCache* compilation_cache_;
Counters* counters_;
CodeRange* code_range_;
- Mutex* break_access_;
+ RecursiveMutex break_access_;
Atomic32 debugger_initialized_;
- Mutex* debugger_access_;
+ RecursiveMutex debugger_access_;
Logger* logger_;
StackGuard stack_guard_;
StatsTable* stats_table_;
@@ -1221,36 +1289,43 @@ class Isolate {
PreallocatedStorage free_list_;
bool preallocated_storage_preallocated_;
InnerPointerToCodeCache* inner_pointer_to_code_cache_;
- StringInputBuffer* write_input_buffer_;
+ ConsStringIteratorOp* write_iterator_;
GlobalHandles* global_handles_;
+ EternalHandles* eternal_handles_;
ContextSwitcher* context_switcher_;
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
bool fp_stubs_generated_;
- StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
Builtins builtins_;
bool has_installed_extensions_;
StringTracker* string_tracker_;
unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
- StringInputBuffer objects_string_compare_buffer_a_;
- StringInputBuffer objects_string_compare_buffer_b_;
- StaticResource<StringInputBuffer> objects_string_input_buffer_;
+ ConsStringIteratorOp objects_string_compare_iterator_a_;
+ ConsStringIteratorOp objects_string_compare_iterator_b_;
+ StaticResource<ConsStringIteratorOp> objects_string_iterator_;
unibrow::Mapping<unibrow::Ecma262Canonicalize>
regexp_macro_assembler_canonicalize_;
RegExpStack* regexp_stack_;
DateCache* date_cache_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
+ CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
+ RandomNumberGenerator* random_number_generator_;
+
+ // True if fatal error has been signaled for this isolate.
+ bool has_fatal_error_;
- // The garbage collector should be a little more aggressive when it knows
- // that a context was recently exited.
- bool context_exit_happened_;
+ // True if we are using the Crankshaft optimizing compiler.
+ bool use_crankshaft_;
+
+ // True if this isolate was initialized from a snapshot.
+ bool initialized_from_snapshot_;
// Time stamp at initialization.
double time_millis_at_init_;
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
- defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
+#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_MIPS && !defined(__mips__)
bool simulator_initialized_;
HashMap* simulator_i_cache_;
Redirection* simulator_redirection_;
@@ -1267,6 +1342,9 @@ class Isolate {
Debugger* debugger_;
Debug* debug_;
#endif
+ CpuProfiler* cpu_profiler_;
+ HeapProfiler* heap_profiler_;
+ FunctionEntryHook function_entry_hook_;
#define GLOBAL_BACKING_STORE(type, name, initialvalue) \
type name##_;
@@ -1290,17 +1368,23 @@ class Isolate {
#endif
DeferredHandles* deferred_handles_head_;
- OptimizingCompilerThread optimizing_compiler_thread_;
+ OptimizingCompilerThread* optimizing_compiler_thread_;
+ SweeperThread** sweeper_thread_;
+
+ // Counts deopt points if deopt_every_n_times is enabled.
+ unsigned int stress_deopt_count_;
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class IsolateInitializer;
friend class OptimizingCompilerThread;
+ friend class SweeperThread;
friend class ThreadManager;
friend class Simulator;
friend class StackGuard;
friend class ThreadId;
friend class TestMemoryAllocatorScope;
+ friend class TestCodeRangeScope;
friend class v8::Isolate;
friend class v8::Locker;
friend class v8::Unlocker;
@@ -1317,15 +1401,8 @@ class SaveContext BASE_EMBEDDED {
inline explicit SaveContext(Isolate* isolate);
~SaveContext() {
- if (context_.is_null()) {
- Isolate* isolate = Isolate::Current();
- isolate->set_context(NULL);
- isolate->set_save_context(prev_);
- } else {
- Isolate* isolate = context_->GetIsolate();
- isolate->set_context(*context_);
- isolate->set_save_context(prev_);
- }
+ isolate_->set_context(context_.is_null() ? NULL : *context_);
+ isolate_->set_save_context(prev_);
}
Handle<Context> context() { return context_; }
@@ -1337,10 +1414,8 @@ class SaveContext BASE_EMBEDDED {
}
private:
+ Isolate* isolate_;
Handle<Context> context_;
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- Handle<Context> dummy_;
-#endif
SaveContext* prev_;
Address c_entry_fp_;
};
@@ -1349,21 +1424,19 @@ class SaveContext BASE_EMBEDDED {
class AssertNoContextChange BASE_EMBEDDED {
#ifdef DEBUG
public:
- AssertNoContextChange() :
- scope_(Isolate::Current()),
- context_(Isolate::Current()->context(), Isolate::Current()) {
- }
-
+ explicit AssertNoContextChange(Isolate* isolate)
+ : isolate_(isolate),
+ context_(isolate->context(), isolate) { }
~AssertNoContextChange() {
- ASSERT(Isolate::Current()->context() == *context_);
+ ASSERT(isolate_->context() == *context_);
}
private:
- HandleScope scope_;
+ Isolate* isolate_;
Handle<Context> context_;
#else
public:
- AssertNoContextChange() { }
+ explicit AssertNoContextChange(Isolate* isolate) { }
#endif
};
@@ -1375,11 +1448,11 @@ class ExecutionAccess BASE_EMBEDDED {
}
~ExecutionAccess() { Unlock(isolate_); }
- static void Lock(Isolate* isolate) { isolate->break_access_->Lock(); }
- static void Unlock(Isolate* isolate) { isolate->break_access_->Unlock(); }
+ static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
+ static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
static bool TryLock(Isolate* isolate) {
- return isolate->break_access_->TryLock();
+ return isolate->break_access()->TryLock();
}
private:
@@ -1394,12 +1467,7 @@ class StackLimitCheck BASE_EMBEDDED {
bool HasOverflowed() const {
StackGuard* stack_guard = isolate_->stack_guard();
- // Stack has overflowed in C++ code only if stack pointer exceeds the C++
- // stack guard and the limits are not set to interrupt values.
- // TODO(214): Stack overflows are ignored if a interrupt is pending. This
- // code should probably always use the initial C++ limit.
- return (reinterpret_cast<uintptr_t>(this) < stack_guard->climit()) &&
- stack_guard->IsStackOverflow();
+ return (reinterpret_cast<uintptr_t>(this) < stack_guard->real_climit());
}
private:
Isolate* isolate_;
@@ -1428,14 +1496,6 @@ class PostponeInterruptsScope BASE_EMBEDDED {
};
-// Temporary macros for accessing current isolate and its subobjects.
-// They provide better readability, especially when used a lot in the code.
-#define HEAP (v8::internal::Isolate::Current()->heap())
-#define FACTORY (v8::internal::Isolate::Current()->factory())
-#define ISOLATE (v8::internal::Isolate::Current())
-#define LOGGER (v8::internal::Isolate::Current()->logger())
-
-
// Tells whether the native context is marked with out of memory.
inline bool Context::has_out_of_memory() {
return native_context()->out_of_memory()->IsTrue();
@@ -1444,7 +1504,7 @@ inline bool Context::has_out_of_memory() {
// Mark the native context with out of memory.
inline void Context::mark_out_of_memory() {
- native_context()->set_out_of_memory(HEAP->true_value());
+ native_context()->set_out_of_memory(GetIsolate()->heap()->true_value());
}
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index ebe3db138..72c69100d 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -43,22 +43,40 @@ namespace internal {
template <bool seq_ascii>
class JsonParser BASE_EMBEDDED {
public:
- static Handle<Object> Parse(Handle<String> source, Zone* zone) {
- return JsonParser().ParseJson(source, zone);
+ static Handle<Object> Parse(Handle<String> source) {
+ return JsonParser(source).ParseJson();
}
static const int kEndOfString = -1;
private:
+ explicit JsonParser(Handle<String> source)
+ : source_(source),
+ source_length_(source->length()),
+ isolate_(source->map()->GetHeap()->isolate()),
+ factory_(isolate_->factory()),
+ zone_(isolate_),
+ object_constructor_(isolate_->native_context()->object_function(),
+ isolate_),
+ position_(-1) {
+ FlattenString(source_);
+ pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
+
+ // Optimized fast case where we only have ASCII characters.
+ if (seq_ascii) {
+ seq_source_ = Handle<SeqOneByteString>::cast(source_);
+ }
+ }
+
// Parse a string containing a single JSON value.
- Handle<Object> ParseJson(Handle<String> source, Zone* zone);
+ Handle<Object> ParseJson();
inline void Advance() {
position_++;
if (position_ >= source_length_) {
c0_ = kEndOfString;
} else if (seq_ascii) {
- c0_ = seq_source_->SeqAsciiStringGet(position_);
+ c0_ = seq_source_->SeqOneByteStringGet(position_);
} else {
c0_ = source_->Get(position_);
}
@@ -102,10 +120,38 @@ class JsonParser BASE_EMBEDDED {
Handle<String> ParseJsonString() {
return ScanJsonString<false>();
}
- Handle<String> ParseJsonSymbol() {
+
+ bool ParseJsonString(Handle<String> expected) {
+ int length = expected->length();
+ if (source_->length() - position_ - 1 > length) {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent content = expected->GetFlatContent();
+ if (content.IsAscii()) {
+ ASSERT_EQ('"', c0_);
+ const uint8_t* input_chars = seq_source_->GetChars() + position_ + 1;
+ const uint8_t* expected_chars = content.ToOneByteVector().start();
+ for (int i = 0; i < length; i++) {
+ uint8_t c0 = input_chars[i];
+ if (c0 != expected_chars[i] ||
+ c0 == '"' || c0 < 0x20 || c0 == '\\') {
+ return false;
+ }
+ }
+ if (input_chars[length] == '"') {
+ position_ = position_ + length + 1;
+ AdvanceSkipWhitespace();
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ Handle<String> ParseJsonInternalizedString() {
return ScanJsonString<true>();
}
- template <bool is_symbol>
+
+ template <bool is_internalized>
Handle<String> ScanJsonString();
// Creates a new string and copies prefix[start..end] into the beginning
// of it. Then scans the rest of the string, adding characters after the
@@ -151,43 +197,29 @@ class JsonParser BASE_EMBEDDED {
inline Isolate* isolate() { return isolate_; }
inline Factory* factory() { return factory_; }
inline Handle<JSFunction> object_constructor() { return object_constructor_; }
- inline Zone* zone() const { return zone_; }
static const int kInitialSpecialStringLength = 1024;
+ static const int kPretenureTreshold = 100 * 1024;
private:
+ Zone* zone() { return &zone_; }
+
Handle<String> source_;
int source_length_;
- Handle<SeqAsciiString> seq_source_;
+ Handle<SeqOneByteString> seq_source_;
+ PretenureFlag pretenure_;
Isolate* isolate_;
Factory* factory_;
+ Zone zone_;
Handle<JSFunction> object_constructor_;
uc32 c0_;
int position_;
- Zone* zone_;
};
template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source,
- Zone* zone) {
- isolate_ = source->map()->GetHeap()->isolate();
- factory_ = isolate_->factory();
- object_constructor_ =
- Handle<JSFunction>(isolate()->native_context()->object_function());
- zone_ = zone;
- FlattenString(source);
- source_ = source;
- source_length_ = source_->length();
-
- // Optimized fast case where we only have ASCII characters.
- if (seq_ascii) {
- seq_source_ = Handle<SeqAsciiString>::cast(source_);
- }
-
- // Set initial position right before the string.
- position_ = -1;
+Handle<Object> JsonParser<seq_ascii>::ParseJson() {
// Advance to the first character (possibly EOS)
AdvanceSkipWhitespace();
Handle<Object> result = ParseJsonValue();
@@ -225,14 +257,15 @@ Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source,
break;
default:
message = "unexpected_token";
- Handle<Object> name = LookupSingleCharacterStringFromCode(c0_);
+ Handle<Object> name =
+ LookupSingleCharacterStringFromCode(isolate_, c0_);
Handle<FixedArray> element = factory->NewFixedArray(1);
element->set(0, *name);
array = factory->NewJSArrayWithElements(element);
break;
}
- MessageLocation location(factory->NewScript(source),
+ MessageLocation location(factory->NewScript(source_),
position_,
position_ + 1);
Handle<Object> result = factory->NewSyntaxError(message, array);
@@ -287,12 +320,15 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() {
// Parse a JSON object. Position must be right at '{'.
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
- HandleScope scope;
- Handle<Object> prototype;
+ HandleScope scope(isolate());
Handle<JSObject> json_object =
- factory()->NewJSObject(object_constructor());
+ factory()->NewJSObject(object_constructor(), pretenure_);
+ Handle<Map> map(json_object->map());
+ ZoneList<Handle<Object> > properties(8, zone());
ASSERT_EQ(c0_, '{');
+ bool transitioning = true;
+
AdvanceSkipWhitespace();
if (c0_ != '}') {
do {
@@ -336,29 +372,94 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
c0_ = '"';
#endif
- Handle<String> key = ParseJsonSymbol();
- if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
+ Handle<String> key;
+ Handle<Object> value;
+
+ // Try to follow existing transitions as long as possible. Once we stop
+ // transitioning, no transition can be found anymore.
+ if (transitioning) {
+ // First check whether there is a single expected transition. If so, try
+ // to parse it first.
+ bool follow_expected = false;
+ Handle<Map> target;
+ if (seq_ascii) {
+ key = JSObject::ExpectedTransitionKey(map);
+ follow_expected = !key.is_null() && ParseJsonString(key);
+ }
+ // If the expected transition hits, follow it.
+ if (follow_expected) {
+ target = JSObject::ExpectedTransitionTarget(map);
+ } else {
+ // If the expected transition failed, parse an internalized string and
+ // try to find a matching transition.
+ key = ParseJsonInternalizedString();
+ if (key.is_null()) return ReportUnexpectedCharacter();
+
+ target = JSObject::FindTransitionToField(map, key);
+ // If a transition was found, follow it and continue.
+ transitioning = !target.is_null();
+ }
+ if (c0_ != ':') return ReportUnexpectedCharacter();
- AdvanceSkipWhitespace();
- Handle<Object> value = ParseJsonValue();
- if (value.is_null()) return ReportUnexpectedCharacter();
+ AdvanceSkipWhitespace();
+ value = ParseJsonValue();
+ if (value.is_null()) return ReportUnexpectedCharacter();
+
+ if (transitioning) {
+ int descriptor = map->NumberOfOwnDescriptors();
+ PropertyDetails details =
+ target->instance_descriptors()->GetDetails(descriptor);
+ Representation expected_representation = details.representation();
+
+ if (value->FitsRepresentation(expected_representation)) {
+ // If the target representation is double and the value is already
+ // double, use the existing box.
+ if (FLAG_track_double_fields &&
+ value->IsSmi() &&
+ expected_representation.IsDouble()) {
+ value = factory()->NewHeapNumber(
+ Handle<Smi>::cast(value)->value());
+ }
+ properties.Add(value, zone());
+ map = target;
+ continue;
+ } else {
+ transitioning = false;
+ }
+ }
- if (key->Equals(isolate()->heap()->Proto_symbol())) {
- prototype = value;
- } else {
- if (JSObject::TryTransitionToField(json_object, key)) {
- int index = json_object->LastAddedFieldIndex();
- json_object->FastPropertyAtPut(index, *value);
- } else {
- JSObject::SetLocalPropertyIgnoreAttributes(
- json_object, key, value, NONE);
+ // Commit the intermediate state to the object and stop transitioning.
+ JSObject::AllocateStorageForMap(json_object, map);
+ int length = properties.length();
+ for (int i = 0; i < length; i++) {
+ Handle<Object> value = properties[i];
+ json_object->FastPropertyAtPut(i, *value);
}
+ } else {
+ key = ParseJsonInternalizedString();
+ if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
+
+ AdvanceSkipWhitespace();
+ value = ParseJsonValue();
+ if (value.is_null()) return ReportUnexpectedCharacter();
}
+
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ json_object, key, value, NONE);
} while (MatchSkipWhiteSpace(','));
if (c0_ != '}') {
return ReportUnexpectedCharacter();
}
- if (!prototype.is_null()) SetPrototype(json_object, prototype);
+
+ // If we transitioned until the very end, transition the map now.
+ if (transitioning) {
+ JSObject::AllocateStorageForMap(json_object, map);
+ int length = properties.length();
+ for (int i = 0; i < length; i++) {
+ Handle<Object> value = properties[i];
+ json_object->FastPropertyAtPut(i, *value);
+ }
+ }
}
AdvanceSkipWhitespace();
return scope.CloseAndEscape(json_object);
@@ -367,8 +468,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
// Parse a JSON array. Position must be right at '['.
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
- HandleScope scope;
- ZoneScope zone_scope(zone(), DELETE_ON_EXIT);
+ HandleScope scope(isolate());
ZoneList<Handle<Object> > elements(4, zone());
ASSERT_EQ(c0_, '[');
@@ -386,11 +486,12 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
AdvanceSkipWhitespace();
// Allocate a fixed array with all the elements.
Handle<FixedArray> fast_elements =
- factory()->NewFixedArray(elements.length());
+ factory()->NewFixedArray(elements.length(), pretenure_);
for (int i = 0, n = elements.length(); i < n; i++) {
fast_elements->set(i, *elements[i]);
}
- Handle<Object> json_array = factory()->NewJSArrayWithElements(fast_elements);
+ Handle<Object> json_array = factory()->NewJSArrayWithElements(
+ fast_elements, FAST_ELEMENTS, pretenure_);
return scope.CloseAndEscape(json_array);
}
@@ -440,25 +541,25 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonNumber() {
int length = position_ - beg_pos;
double number;
if (seq_ascii) {
- Vector<const char> chars(seq_source_->GetChars() + beg_pos, length);
+ Vector<const uint8_t> chars(seq_source_->GetChars() + beg_pos, length);
number = StringToDouble(isolate()->unicode_cache(),
- chars,
+ Vector<const char>::cast(chars),
NO_FLAGS, // Hex, octal or trailing junk.
OS::nan_value());
} else {
- Vector<char> buffer = Vector<char>::New(length);
+ Vector<uint8_t> buffer = Vector<uint8_t>::New(length);
String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
- Vector<const char> result =
- Vector<const char>(reinterpret_cast<const char*>(buffer.start()),
- length);
+ Vector<const uint8_t> result =
+ Vector<const uint8_t>(buffer.start(), length);
number = StringToDouble(isolate()->unicode_cache(),
- result,
- NO_FLAGS, // Hex, octal or trailing junk.
- 0.0);
+ // TODO(dcarney): Convert StringToDouble to uint_t.
+ Vector<const char>::cast(result),
+ NO_FLAGS, // Hex, octal or trailing junk.
+ 0.0);
buffer.Dispose();
}
SkipWhitespace();
- return factory()->NewNumber(number);
+ return factory()->NewNumber(number, pretenure_);
}
@@ -471,21 +572,27 @@ inline void SeqStringSet(Handle<SeqTwoByteString> seq_str, int i, uc32 c) {
}
template <>
-inline void SeqStringSet(Handle<SeqAsciiString> seq_str, int i, uc32 c) {
- seq_str->SeqAsciiStringSet(i, c);
+inline void SeqStringSet(Handle<SeqOneByteString> seq_str, int i, uc32 c) {
+ seq_str->SeqOneByteStringSet(i, c);
}
template <typename StringType>
-inline Handle<StringType> NewRawString(Factory* factory, int length);
+inline Handle<StringType> NewRawString(Factory* factory,
+ int length,
+ PretenureFlag pretenure);
template <>
-inline Handle<SeqTwoByteString> NewRawString(Factory* factory, int length) {
- return factory->NewRawTwoByteString(length, NOT_TENURED);
+inline Handle<SeqTwoByteString> NewRawString(Factory* factory,
+ int length,
+ PretenureFlag pretenure) {
+ return factory->NewRawTwoByteString(length, pretenure);
}
template <>
-inline Handle<SeqAsciiString> NewRawString(Factory* factory, int length) {
- return factory->NewRawAsciiString(length, NOT_TENURED);
+inline Handle<SeqOneByteString> NewRawString(Factory* factory,
+ int length,
+ PretenureFlag pretenure) {
+ return factory->NewRawOneByteString(length, pretenure);
}
@@ -499,9 +606,10 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
int count = end - start;
int max_length = count + source_length_ - position_;
int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
- Handle<StringType> seq_str = NewRawString<StringType>(factory(), length);
+ Handle<StringType> seq_string =
+ NewRawString<StringType>(factory(), length, pretenure_);
// Copy prefix into seq_str.
- SinkChar* dest = seq_str->GetChars();
+ SinkChar* dest = seq_string->GetChars();
String::WriteToFlat(*prefix, dest, start, end);
while (c0_ != '"') {
@@ -509,7 +617,7 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
if (c0_ < 0x20) return Handle<String>::null();
if (count >= length) {
// We need to create a longer sequential string for the result.
- return SlowScanJsonString<StringType, SinkChar>(seq_str, 0, count);
+ return SlowScanJsonString<StringType, SinkChar>(seq_string, 0, count);
}
if (c0_ != '\\') {
// If the sink can contain UC16 characters, or source_ contains only
@@ -518,12 +626,12 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
// in the ASCII sink.
if (sizeof(SinkChar) == kUC16Size ||
seq_ascii ||
- c0_ <= kMaxAsciiCharCode) {
- SeqStringSet(seq_str, count++, c0_);
+ c0_ <= String::kMaxOneByteCharCode) {
+ SeqStringSet(seq_string, count++, c0_);
Advance();
} else {
- // StringType is SeqAsciiString and we just read a non-ASCII char.
- return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str, 0, count);
+ // StringType is SeqOneByteString and we just read a non-ASCII char.
+ return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string, 0, count);
}
} else {
Advance(); // Advance past the \.
@@ -531,22 +639,22 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
case '"':
case '\\':
case '/':
- SeqStringSet(seq_str, count++, c0_);
+ SeqStringSet(seq_string, count++, c0_);
break;
case 'b':
- SeqStringSet(seq_str, count++, '\x08');
+ SeqStringSet(seq_string, count++, '\x08');
break;
case 'f':
- SeqStringSet(seq_str, count++, '\x0c');
+ SeqStringSet(seq_string, count++, '\x0c');
break;
case 'n':
- SeqStringSet(seq_str, count++, '\x0a');
+ SeqStringSet(seq_string, count++, '\x0a');
break;
case 'r':
- SeqStringSet(seq_str, count++, '\x0d');
+ SeqStringSet(seq_string, count++, '\x0d');
break;
case 't':
- SeqStringSet(seq_str, count++, '\x09');
+ SeqStringSet(seq_string, count++, '\x09');
break;
case 'u': {
uc32 value = 0;
@@ -558,14 +666,15 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
}
value = value * 16 + digit;
}
- if (sizeof(SinkChar) == kUC16Size || value <= kMaxAsciiCharCode) {
- SeqStringSet(seq_str, count++, value);
+ if (sizeof(SinkChar) == kUC16Size ||
+ value <= String::kMaxOneByteCharCode) {
+ SeqStringSet(seq_string, count++, value);
break;
} else {
- // StringType is SeqAsciiString and we just read a non-ASCII char.
+ // StringType is SeqOneByteString and we just read a non-ASCII char.
position_ -= 6; // Rewind position_ to \ in \uxxxx.
Advance();
- return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str,
+ return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string,
0,
count);
}
@@ -576,40 +685,30 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
Advance();
}
}
- // Shrink seq_string length to count.
- if (isolate()->heap()->InNewSpace(*seq_str)) {
- isolate()->heap()->new_space()->
- template ShrinkStringAtAllocationBoundary<StringType>(
- *seq_str, count);
- } else {
- int string_size = StringType::SizeFor(count);
- int allocated_string_size = StringType::SizeFor(length);
- int delta = allocated_string_size - string_size;
- Address start_filler_object = seq_str->address() + string_size;
- seq_str->set_length(count);
- isolate()->heap()->CreateFillerObjectAt(start_filler_object, delta);
- }
+
ASSERT_EQ('"', c0_);
// Advance past the last '"'.
AdvanceSkipWhitespace();
- return seq_str;
+
+ // Shrink seq_string length to count and return.
+ return SeqString::Truncate(seq_string, count);
}
template <bool seq_ascii>
-template <bool is_symbol>
+template <bool is_internalized>
Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
ASSERT_EQ('"', c0_);
Advance();
if (c0_ == '"') {
AdvanceSkipWhitespace();
- return Handle<String>(isolate()->heap()->empty_string());
+ return factory()->empty_string();
}
- if (seq_ascii && is_symbol) {
- // Fast path for existing symbols. If the the string being parsed is not
- // a known symbol, contains backslashes or unexpectedly reaches the end of
- // string, return with an empty handle.
+ if (seq_ascii && is_internalized) {
+ // Fast path for existing internalized strings. If the the string being
+ // parsed is not a known internalized string, contains backslashes or
+ // unexpectedly reaches the end of string, return with an empty handle.
uint32_t running_hash = isolate()->heap()->HashSeed();
int position = position_;
uc32 c0 = c0_;
@@ -618,41 +717,61 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
c0_ = c0;
int beg_pos = position_;
position_ = position;
- return SlowScanJsonString<SeqAsciiString, char>(source_,
- beg_pos,
- position_);
+ return SlowScanJsonString<SeqOneByteString, uint8_t>(source_,
+ beg_pos,
+ position_);
}
if (c0 < 0x20) return Handle<String>::null();
- running_hash = StringHasher::AddCharacterCore(running_hash, c0);
+ if (static_cast<uint32_t>(c0) >
+ unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ running_hash =
+ StringHasher::AddCharacterCore(running_hash,
+ unibrow::Utf16::LeadSurrogate(c0));
+ running_hash =
+ StringHasher::AddCharacterCore(running_hash,
+ unibrow::Utf16::TrailSurrogate(c0));
+ } else {
+ running_hash = StringHasher::AddCharacterCore(running_hash, c0);
+ }
position++;
if (position >= source_length_) return Handle<String>::null();
- c0 = seq_source_->SeqAsciiStringGet(position);
+ c0 = seq_source_->SeqOneByteStringGet(position);
} while (c0 != '"');
int length = position - position_;
uint32_t hash = (length <= String::kMaxHashCalcLength)
? StringHasher::GetHashCore(running_hash) : length;
- Vector<const char> string_vector(
+ Vector<const uint8_t> string_vector(
seq_source_->GetChars() + position_, length);
- SymbolTable* symbol_table = isolate()->heap()->symbol_table();
- uint32_t capacity = symbol_table->Capacity();
- uint32_t entry = SymbolTable::FirstProbe(hash, capacity);
+ StringTable* string_table = isolate()->heap()->string_table();
+ uint32_t capacity = string_table->Capacity();
+ uint32_t entry = StringTable::FirstProbe(hash, capacity);
uint32_t count = 1;
+ Handle<String> result;
while (true) {
- Object* element = symbol_table->KeyAt(entry);
- if (element == isolate()->heap()->raw_unchecked_undefined_value()) {
+ Object* element = string_table->KeyAt(entry);
+ if (element == isolate()->heap()->undefined_value()) {
// Lookup failure.
+ result = factory()->InternalizeOneByteString(
+ seq_source_, position_, length);
break;
}
- if (element != isolate()->heap()->raw_unchecked_the_hole_value() &&
- String::cast(element)->IsAsciiEqualTo(string_vector)) {
- // Lookup success, update the current position.
- position_ = position;
- // Advance past the last '"'.
- AdvanceSkipWhitespace();
- return Handle<String>(String::cast(element));
+ if (element != isolate()->heap()->the_hole_value() &&
+ String::cast(element)->IsOneByteEqualTo(string_vector)) {
+ result = Handle<String>(String::cast(element), isolate());
+#ifdef DEBUG
+ uint32_t hash_field =
+ (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
+ ASSERT_EQ(static_cast<int>(result->Hash()),
+ static_cast<int>(hash_field >> String::kHashShift));
+#endif
+ break;
}
- entry = SymbolTable::NextProbe(entry, count++, capacity);
+ entry = StringTable::NextProbe(entry, count++, capacity);
}
+ position_ = position;
+ // Advance past the last '"'.
+ AdvanceSkipWhitespace();
+ return result;
}
int beg_pos = position_;
@@ -661,7 +780,7 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
// Check for control character (0x00-0x1f) or unterminated string (<0).
if (c0_ < 0x20) return Handle<String>::null();
if (c0_ != '\\') {
- if (seq_ascii || c0_ <= kMaxAsciiCharCode) {
+ if (seq_ascii || c0_ <= String::kMaxOneByteCharCode) {
Advance();
} else {
return SlowScanJsonString<SeqTwoByteString, uc16>(source_,
@@ -669,22 +788,16 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
position_);
}
} else {
- return SlowScanJsonString<SeqAsciiString, char>(source_,
- beg_pos,
- position_);
+ return SlowScanJsonString<SeqOneByteString, uint8_t>(source_,
+ beg_pos,
+ position_);
}
} while (c0_ != '"');
int length = position_ - beg_pos;
- Handle<String> result;
- if (seq_ascii && is_symbol) {
- result = factory()->LookupAsciiSymbol(seq_source_,
- beg_pos,
- length);
- } else {
- result = factory()->NewRawAsciiString(length);
- char* dest = SeqAsciiString::cast(*result)->GetChars();
- String::WriteToFlat(*source_, dest, beg_pos, position_);
- }
+ Handle<String> result = factory()->NewRawOneByteString(length, pretenure_);
+ uint8_t* dest = SeqOneByteString::cast(*result)->GetChars();
+ String::WriteToFlat(*source_, dest, beg_pos, position_);
+
ASSERT_EQ('"', c0_);
// Advance past the last '"'.
AdvanceSkipWhitespace();
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
new file mode 100644
index 000000000..0d17b356a
--- /dev/null
+++ b/deps/v8/src/json-stringifier.h
@@ -0,0 +1,855 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JSON_STRINGIFIER_H_
+#define V8_JSON_STRINGIFIER_H_
+
+#include "v8.h"
+#include "v8utils.h"
+#include "v8conversions.h"
+
+namespace v8 {
+namespace internal {
+
+class BasicJsonStringifier BASE_EMBEDDED {
+ public:
+ explicit BasicJsonStringifier(Isolate* isolate);
+
+ MaybeObject* Stringify(Handle<Object> object);
+
+ INLINE(static MaybeObject* StringifyString(Isolate* isolate,
+ Handle<String> object));
+
+ private:
+ static const int kInitialPartLength = 32;
+ static const int kMaxPartLength = 16 * 1024;
+ static const int kPartLengthGrowthFactor = 2;
+
+ enum Result { UNCHANGED, SUCCESS, EXCEPTION, CIRCULAR, STACK_OVERFLOW };
+
+ void Extend();
+
+ void ChangeEncoding();
+
+ INLINE(void ShrinkCurrentPart());
+
+ template <bool is_ascii, typename Char>
+ INLINE(void Append_(Char c));
+
+ template <bool is_ascii, typename Char>
+ INLINE(void Append_(const Char* chars));
+
+ INLINE(void Append(uint8_t c)) {
+ if (is_ascii_) {
+ Append_<true>(c);
+ } else {
+ Append_<false>(c);
+ }
+ }
+
+ INLINE(void AppendAscii(const char* chars)) {
+ if (is_ascii_) {
+ Append_<true>(reinterpret_cast<const uint8_t*>(chars));
+ } else {
+ Append_<false>(reinterpret_cast<const uint8_t*>(chars));
+ }
+ }
+
+ Handle<Object> ApplyToJsonFunction(Handle<Object> object,
+ Handle<Object> key);
+
+ Result SerializeGeneric(Handle<Object> object,
+ Handle<Object> key,
+ bool deferred_comma,
+ bool deferred_key);
+
+ template <typename ResultType, typename Char>
+ INLINE(static MaybeObject* StringifyString_(Isolate* isolate,
+ Vector<Char> vector,
+ Handle<String> result));
+
+ // Entry point to serialize the object.
+ INLINE(Result SerializeObject(Handle<Object> obj)) {
+ return Serialize_<false>(obj, false, factory_->empty_string());
+ }
+
+ // Serialize an array element.
+ // The index may serve as argument for the toJSON function.
+ INLINE(Result SerializeElement(Isolate* isolate,
+ Handle<Object> object,
+ int i)) {
+ return Serialize_<false>(object,
+ false,
+ Handle<Object>(Smi::FromInt(i), isolate));
+ }
+
+ // Serialize a object property.
+ // The key may or may not be serialized depending on the property.
+ // The key may also serve as argument for the toJSON function.
+ INLINE(Result SerializeProperty(Handle<Object> object,
+ bool deferred_comma,
+ Handle<String> deferred_key)) {
+ ASSERT(!deferred_key.is_null());
+ return Serialize_<true>(object, deferred_comma, deferred_key);
+ }
+
+ template <bool deferred_string_key>
+ Result Serialize_(Handle<Object> object, bool comma, Handle<Object> key);
+
+ void SerializeDeferredKey(bool deferred_comma, Handle<Object> deferred_key) {
+ if (deferred_comma) Append(',');
+ SerializeString(Handle<String>::cast(deferred_key));
+ Append(':');
+ }
+
+ Result SerializeSmi(Smi* object);
+
+ Result SerializeDouble(double number);
+ INLINE(Result SerializeHeapNumber(Handle<HeapNumber> object)) {
+ return SerializeDouble(object->value());
+ }
+
+ Result SerializeJSValue(Handle<JSValue> object);
+
+ INLINE(Result SerializeJSArray(Handle<JSArray> object));
+ INLINE(Result SerializeJSObject(Handle<JSObject> object));
+
+ Result SerializeJSArraySlow(Handle<JSArray> object, int length);
+
+ void SerializeString(Handle<String> object);
+
+ template <typename SrcChar, typename DestChar>
+ INLINE(static int SerializeStringUnchecked_(const SrcChar* src,
+ DestChar* dest,
+ int length));
+
+ template <bool is_ascii, typename Char>
+ INLINE(void SerializeString_(Handle<String> string));
+
+ template <typename Char>
+ INLINE(static bool DoNotEscape(Char c));
+
+ template <typename Char>
+ INLINE(static Vector<const Char> GetCharVector(Handle<String> string));
+
+ Result StackPush(Handle<Object> object);
+ void StackPop();
+
+ INLINE(Handle<String> accumulator()) {
+ return Handle<String>(String::cast(accumulator_store_->value()), isolate_);
+ }
+
+ INLINE(void set_accumulator(Handle<String> string)) {
+ return accumulator_store_->set_value(*string);
+ }
+
+ Isolate* isolate_;
+ Factory* factory_;
+ // We use a value wrapper for the string accumulator to keep the
+ // (indirect) handle to it in the outermost handle scope.
+ Handle<JSValue> accumulator_store_;
+ Handle<String> current_part_;
+ Handle<String> tojson_string_;
+ Handle<JSArray> stack_;
+ int current_index_;
+ int part_length_;
+ bool is_ascii_;
+
+ static const int kJsonEscapeTableEntrySize = 8;
+ static const char* const JsonEscapeTable;
+};
+
+
+// Translation table to escape ASCII characters.
+// Table entries start at a multiple of 8 and are null-terminated.
+const char* const BasicJsonStringifier::JsonEscapeTable =
+ "\\u0000\0 \\u0001\0 \\u0002\0 \\u0003\0 "
+ "\\u0004\0 \\u0005\0 \\u0006\0 \\u0007\0 "
+ "\\b\0 \\t\0 \\n\0 \\u000b\0 "
+ "\\f\0 \\r\0 \\u000e\0 \\u000f\0 "
+ "\\u0010\0 \\u0011\0 \\u0012\0 \\u0013\0 "
+ "\\u0014\0 \\u0015\0 \\u0016\0 \\u0017\0 "
+ "\\u0018\0 \\u0019\0 \\u001a\0 \\u001b\0 "
+ "\\u001c\0 \\u001d\0 \\u001e\0 \\u001f\0 "
+ " \0 !\0 \\\"\0 #\0 "
+ "$\0 %\0 &\0 '\0 "
+ "(\0 )\0 *\0 +\0 "
+ ",\0 -\0 .\0 /\0 "
+ "0\0 1\0 2\0 3\0 "
+ "4\0 5\0 6\0 7\0 "
+ "8\0 9\0 :\0 ;\0 "
+ "<\0 =\0 >\0 ?\0 "
+ "@\0 A\0 B\0 C\0 "
+ "D\0 E\0 F\0 G\0 "
+ "H\0 I\0 J\0 K\0 "
+ "L\0 M\0 N\0 O\0 "
+ "P\0 Q\0 R\0 S\0 "
+ "T\0 U\0 V\0 W\0 "
+ "X\0 Y\0 Z\0 [\0 "
+ "\\\\\0 ]\0 ^\0 _\0 "
+ "`\0 a\0 b\0 c\0 "
+ "d\0 e\0 f\0 g\0 "
+ "h\0 i\0 j\0 k\0 "
+ "l\0 m\0 n\0 o\0 "
+ "p\0 q\0 r\0 s\0 "
+ "t\0 u\0 v\0 w\0 "
+ "x\0 y\0 z\0 {\0 "
+ "|\0 }\0 ~\0 \177\0 "
+ "\200\0 \201\0 \202\0 \203\0 "
+ "\204\0 \205\0 \206\0 \207\0 "
+ "\210\0 \211\0 \212\0 \213\0 "
+ "\214\0 \215\0 \216\0 \217\0 "
+ "\220\0 \221\0 \222\0 \223\0 "
+ "\224\0 \225\0 \226\0 \227\0 "
+ "\230\0 \231\0 \232\0 \233\0 "
+ "\234\0 \235\0 \236\0 \237\0 "
+ "\240\0 \241\0 \242\0 \243\0 "
+ "\244\0 \245\0 \246\0 \247\0 "
+ "\250\0 \251\0 \252\0 \253\0 "
+ "\254\0 \255\0 \256\0 \257\0 "
+ "\260\0 \261\0 \262\0 \263\0 "
+ "\264\0 \265\0 \266\0 \267\0 "
+ "\270\0 \271\0 \272\0 \273\0 "
+ "\274\0 \275\0 \276\0 \277\0 "
+ "\300\0 \301\0 \302\0 \303\0 "
+ "\304\0 \305\0 \306\0 \307\0 "
+ "\310\0 \311\0 \312\0 \313\0 "
+ "\314\0 \315\0 \316\0 \317\0 "
+ "\320\0 \321\0 \322\0 \323\0 "
+ "\324\0 \325\0 \326\0 \327\0 "
+ "\330\0 \331\0 \332\0 \333\0 "
+ "\334\0 \335\0 \336\0 \337\0 "
+ "\340\0 \341\0 \342\0 \343\0 "
+ "\344\0 \345\0 \346\0 \347\0 "
+ "\350\0 \351\0 \352\0 \353\0 "
+ "\354\0 \355\0 \356\0 \357\0 "
+ "\360\0 \361\0 \362\0 \363\0 "
+ "\364\0 \365\0 \366\0 \367\0 "
+ "\370\0 \371\0 \372\0 \373\0 "
+ "\374\0 \375\0 \376\0 \377\0 ";
+
+
+BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
+ : isolate_(isolate), current_index_(0), is_ascii_(true) {
+ factory_ = isolate_->factory();
+ accumulator_store_ = Handle<JSValue>::cast(
+ factory_->ToObject(factory_->empty_string()));
+ part_length_ = kInitialPartLength;
+ current_part_ = factory_->NewRawOneByteString(part_length_);
+ tojson_string_ = factory_->toJSON_string();
+ stack_ = factory_->NewJSArray(8);
+}
+
+
+MaybeObject* BasicJsonStringifier::Stringify(Handle<Object> object) {
+ switch (SerializeObject(object)) {
+ case UNCHANGED:
+ return isolate_->heap()->undefined_value();
+ case SUCCESS:
+ ShrinkCurrentPart();
+ return *factory_->NewConsString(accumulator(), current_part_);
+ case CIRCULAR:
+ return isolate_->Throw(*factory_->NewTypeError(
+ "circular_structure", HandleVector<Object>(NULL, 0)));
+ case STACK_OVERFLOW:
+ return isolate_->StackOverflow();
+ default:
+ return Failure::Exception();
+ }
+}
+
+
+MaybeObject* BasicJsonStringifier::StringifyString(Isolate* isolate,
+ Handle<String> object) {
+ static const int kJsonQuoteWorstCaseBlowup = 6;
+ static const int kSpaceForQuotes = 2;
+ int worst_case_length =
+ object->length() * kJsonQuoteWorstCaseBlowup + kSpaceForQuotes;
+
+ if (worst_case_length > 32 * KB) { // Slow path if too large.
+ BasicJsonStringifier stringifier(isolate);
+ return stringifier.Stringify(object);
+ }
+
+ FlattenString(object);
+ ASSERT(object->IsFlat());
+ if (object->IsOneByteRepresentationUnderneath()) {
+ Handle<String> result =
+ isolate->factory()->NewRawOneByteString(worst_case_length);
+ DisallowHeapAllocation no_gc;
+ return StringifyString_<SeqOneByteString>(
+ isolate,
+ object->GetFlatContent().ToOneByteVector(),
+ result);
+ } else {
+ Handle<String> result =
+ isolate->factory()->NewRawTwoByteString(worst_case_length);
+ DisallowHeapAllocation no_gc;
+ return StringifyString_<SeqTwoByteString>(
+ isolate,
+ object->GetFlatContent().ToUC16Vector(),
+ result);
+ }
+}
+
+
+template <typename ResultType, typename Char>
+MaybeObject* BasicJsonStringifier::StringifyString_(Isolate* isolate,
+ Vector<Char> vector,
+ Handle<String> result) {
+ DisallowHeapAllocation no_gc;
+ int final_size = 0;
+ ResultType* dest = ResultType::cast(*result);
+ dest->Set(final_size++, '\"');
+ final_size += SerializeStringUnchecked_(vector.start(),
+ dest->GetChars() + 1,
+ vector.length());
+ dest->Set(final_size++, '\"');
+ return *SeqString::Truncate(Handle<SeqString>::cast(result), final_size);
+}
+
+
+template <bool is_ascii, typename Char>
+void BasicJsonStringifier::Append_(Char c) {
+ if (is_ascii) {
+ SeqOneByteString::cast(*current_part_)->SeqOneByteStringSet(
+ current_index_++, c);
+ } else {
+ SeqTwoByteString::cast(*current_part_)->SeqTwoByteStringSet(
+ current_index_++, c);
+ }
+ if (current_index_ == part_length_) Extend();
+}
+
+
+template <bool is_ascii, typename Char>
+void BasicJsonStringifier::Append_(const Char* chars) {
+ for ( ; *chars != '\0'; chars++) Append_<is_ascii, Char>(*chars);
+}
+
+
+Handle<Object> BasicJsonStringifier::ApplyToJsonFunction(
+ Handle<Object> object, Handle<Object> key) {
+ LookupResult lookup(isolate_);
+ JSObject::cast(*object)->LookupRealNamedProperty(*tojson_string_, &lookup);
+ if (!lookup.IsProperty()) return object;
+ PropertyAttributes attr;
+ Handle<Object> fun =
+ Object::GetProperty(object, object, &lookup, tojson_string_, &attr);
+ if (!fun->IsJSFunction()) return object;
+
+ // Call toJSON function.
+ if (key->IsSmi()) key = factory_->NumberToString(key);
+ Handle<Object> argv[] = { key };
+ bool has_exception = false;
+ HandleScope scope(isolate_);
+ object = Execution::Call(isolate_, fun, object, 1, argv, &has_exception);
+ // Return empty handle to signal an exception.
+ if (has_exception) return Handle<Object>::null();
+ return scope.CloseAndEscape(object);
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::StackPush(
+ Handle<Object> object) {
+ StackLimitCheck check(isolate_);
+ if (check.HasOverflowed()) return STACK_OVERFLOW;
+
+ int length = Smi::cast(stack_->length())->value();
+ FixedArray* elements = FixedArray::cast(stack_->elements());
+ for (int i = 0; i < length; i++) {
+ if (elements->get(i) == *object) {
+ return CIRCULAR;
+ }
+ }
+ stack_->EnsureSize(length + 1);
+ FixedArray::cast(stack_->elements())->set(length, *object);
+ stack_->set_length(Smi::FromInt(length + 1));
+ return SUCCESS;
+}
+
+
+void BasicJsonStringifier::StackPop() {
+ int length = Smi::cast(stack_->length())->value();
+ stack_->set_length(Smi::FromInt(length - 1));
+}
+
+
+template <bool deferred_string_key>
+BasicJsonStringifier::Result BasicJsonStringifier::Serialize_(
+ Handle<Object> object, bool comma, Handle<Object> key) {
+ if (object->IsJSObject()) {
+ object = ApplyToJsonFunction(object, key);
+ if (object.is_null()) return EXCEPTION;
+ }
+
+ if (object->IsSmi()) {
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ return SerializeSmi(Smi::cast(*object));
+ }
+
+ switch (HeapObject::cast(*object)->map()->instance_type()) {
+ case HEAP_NUMBER_TYPE:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ return SerializeHeapNumber(Handle<HeapNumber>::cast(object));
+ case ODDBALL_TYPE:
+ switch (Oddball::cast(*object)->kind()) {
+ case Oddball::kFalse:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ AppendAscii("false");
+ return SUCCESS;
+ case Oddball::kTrue:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ AppendAscii("true");
+ return SUCCESS;
+ case Oddball::kNull:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ AppendAscii("null");
+ return SUCCESS;
+ default:
+ return UNCHANGED;
+ }
+ case JS_ARRAY_TYPE:
+ if (object->IsAccessCheckNeeded()) break;
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ return SerializeJSArray(Handle<JSArray>::cast(object));
+ case JS_VALUE_TYPE:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ return SerializeJSValue(Handle<JSValue>::cast(object));
+ case JS_FUNCTION_TYPE:
+ return UNCHANGED;
+ default:
+ if (object->IsString()) {
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ SerializeString(Handle<String>::cast(object));
+ return SUCCESS;
+ } else if (object->IsJSObject()) {
+ if (object->IsAccessCheckNeeded()) break;
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ return SerializeJSObject(Handle<JSObject>::cast(object));
+ }
+ }
+
+ return SerializeGeneric(object, key, comma, deferred_string_key);
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
+ Handle<Object> object,
+ Handle<Object> key,
+ bool deferred_comma,
+ bool deferred_key) {
+ Handle<JSObject> builtins(isolate_->native_context()->builtins());
+ Handle<JSFunction> builtin =
+ Handle<JSFunction>::cast(GetProperty(builtins, "JSONSerializeAdapter"));
+
+ Handle<Object> argv[] = { key, object };
+ bool has_exception = false;
+ Handle<Object> result =
+ Execution::Call(isolate_, builtin, object, 2, argv, &has_exception);
+ if (has_exception) return EXCEPTION;
+ if (result->IsUndefined()) return UNCHANGED;
+ if (deferred_key) {
+ if (key->IsSmi()) key = factory_->NumberToString(key);
+ SerializeDeferredKey(deferred_comma, key);
+ }
+
+ Handle<String> result_string = Handle<String>::cast(result);
+ // Shrink current part, attach it to the accumulator, also attach the result
+ // string to the accumulator, and allocate a new part.
+ ShrinkCurrentPart(); // Shrink.
+ part_length_ = kInitialPartLength; // Allocate conservatively.
+ Extend(); // Attach current part and allocate new part.
+ // Attach result string to the accumulator.
+ set_accumulator(factory_->NewConsString(accumulator(), result_string));
+ return SUCCESS;
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
+ Handle<JSValue> object) {
+ bool has_exception = false;
+ String* class_name = object->class_name();
+ if (class_name == isolate_->heap()->String_string()) {
+ Handle<Object> value =
+ Execution::ToString(isolate_, object, &has_exception);
+ if (has_exception) return EXCEPTION;
+ SerializeString(Handle<String>::cast(value));
+ } else if (class_name == isolate_->heap()->Number_string()) {
+ Handle<Object> value =
+ Execution::ToNumber(isolate_, object, &has_exception);
+ if (has_exception) return EXCEPTION;
+ if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
+ SerializeHeapNumber(Handle<HeapNumber>::cast(value));
+ } else {
+ ASSERT(class_name == isolate_->heap()->Boolean_string());
+ Object* value = JSValue::cast(*object)->value();
+ ASSERT(value->IsBoolean());
+ AppendAscii(value->IsTrue() ? "true" : "false");
+ }
+ return SUCCESS;
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::SerializeSmi(Smi* object) {
+ static const int kBufferSize = 100;
+ char chars[kBufferSize];
+ Vector<char> buffer(chars, kBufferSize);
+ AppendAscii(IntToCString(object->value(), buffer));
+ return SUCCESS;
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::SerializeDouble(
+ double number) {
+ if (std::isinf(number) || std::isnan(number)) {
+ AppendAscii("null");
+ return SUCCESS;
+ }
+ static const int kBufferSize = 100;
+ char chars[kBufferSize];
+ Vector<char> buffer(chars, kBufferSize);
+ AppendAscii(DoubleToCString(number, buffer));
+ return SUCCESS;
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
+ Handle<JSArray> object) {
+ HandleScope handle_scope(isolate_);
+ Result stack_push = StackPush(object);
+ if (stack_push != SUCCESS) return stack_push;
+ int length = Smi::cast(object->length())->value();
+ Append('[');
+ switch (object->GetElementsKind()) {
+ case FAST_SMI_ELEMENTS: {
+ Handle<FixedArray> elements(
+ FixedArray::cast(object->elements()), isolate_);
+ for (int i = 0; i < length; i++) {
+ if (i > 0) Append(',');
+ SerializeSmi(Smi::cast(elements->get(i)));
+ }
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS: {
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(object->elements()), isolate_);
+ for (int i = 0; i < length; i++) {
+ if (i > 0) Append(',');
+ SerializeDouble(elements->get_scalar(i));
+ }
+ break;
+ }
+ case FAST_ELEMENTS: {
+ Handle<FixedArray> elements(
+ FixedArray::cast(object->elements()), isolate_);
+ for (int i = 0; i < length; i++) {
+ if (i > 0) Append(',');
+ Result result =
+ SerializeElement(isolate_,
+ Handle<Object>(elements->get(i), isolate_),
+ i);
+ if (result == SUCCESS) continue;
+ if (result == UNCHANGED) {
+ AppendAscii("null");
+ } else {
+ return result;
+ }
+ }
+ break;
+ }
+ // TODO(yangguo): The FAST_HOLEY_* cases could be handled in a faster way.
+ // They resemble the non-holey cases except that a prototype chain lookup
+ // is necessary for holes.
+ default: {
+ Result result = SerializeJSArraySlow(object, length);
+ if (result != SUCCESS) return result;
+ break;
+ }
+ }
+ Append(']');
+ StackPop();
+ current_part_ = handle_scope.CloseAndEscape(current_part_);
+ return SUCCESS;
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow(
+ Handle<JSArray> object, int length) {
+ for (int i = 0; i < length; i++) {
+ if (i > 0) Append(',');
+ Handle<Object> element = Object::GetElement(isolate_, object, i);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, element, EXCEPTION);
+ if (element->IsUndefined()) {
+ AppendAscii("null");
+ } else {
+ Result result = SerializeElement(isolate_, element, i);
+ if (result == SUCCESS) continue;
+ if (result == UNCHANGED) {
+ AppendAscii("null");
+ } else {
+ return result;
+ }
+ }
+ }
+ return SUCCESS;
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
+ Handle<JSObject> object) {
+ HandleScope handle_scope(isolate_);
+ Result stack_push = StackPush(object);
+ if (stack_push != SUCCESS) return stack_push;
+ if (object->IsJSGlobalProxy()) {
+ object = Handle<JSObject>(
+ JSObject::cast(object->GetPrototype()), isolate_);
+ ASSERT(object->IsGlobalObject());
+ }
+
+ Append('{');
+ bool comma = false;
+
+ if (object->HasFastProperties() &&
+ !object->HasIndexedInterceptor() &&
+ !object->HasNamedInterceptor() &&
+ object->elements()->length() == 0) {
+ Handle<Map> map(object->map());
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ Handle<Name> name(map->instance_descriptors()->GetKey(i), isolate_);
+ // TODO(rossberg): Should this throw?
+ if (!name->IsString()) continue;
+ Handle<String> key = Handle<String>::cast(name);
+ PropertyDetails details = map->instance_descriptors()->GetDetails(i);
+ if (details.IsDontEnum()) continue;
+ Handle<Object> property;
+ if (details.type() == FIELD && *map == object->map()) {
+ property = Handle<Object>(
+ object->RawFastPropertyAt(
+ map->instance_descriptors()->GetFieldIndex(i)),
+ isolate_);
+ } else {
+ property = GetProperty(isolate_, object, key);
+ if (property.is_null()) return EXCEPTION;
+ }
+ Result result = SerializeProperty(property, comma, key);
+ if (!comma && result == SUCCESS) comma = true;
+ if (result >= EXCEPTION) return result;
+ }
+ } else {
+ bool has_exception = false;
+ Handle<FixedArray> contents =
+ GetKeysInFixedArrayFor(object, LOCAL_ONLY, &has_exception);
+ if (has_exception) return EXCEPTION;
+
+ for (int i = 0; i < contents->length(); i++) {
+ Object* key = contents->get(i);
+ Handle<String> key_handle;
+ Handle<Object> property;
+ if (key->IsString()) {
+ key_handle = Handle<String>(String::cast(key), isolate_);
+ property = GetProperty(isolate_, object, key_handle);
+ } else {
+ ASSERT(key->IsNumber());
+ key_handle = factory_->NumberToString(Handle<Object>(key, isolate_));
+ uint32_t index;
+ if (key->IsSmi()) {
+ property = Object::GetElement(
+ isolate_, object, Smi::cast(key)->value());
+ } else if (key_handle->AsArrayIndex(&index)) {
+ property = Object::GetElement(isolate_, object, index);
+ } else {
+ property = GetProperty(isolate_, object, key_handle);
+ }
+ }
+ if (property.is_null()) return EXCEPTION;
+ Result result = SerializeProperty(property, comma, key_handle);
+ if (!comma && result == SUCCESS) comma = true;
+ if (result >= EXCEPTION) return result;
+ }
+ }
+
+ Append('}');
+ StackPop();
+ current_part_ = handle_scope.CloseAndEscape(current_part_);
+ return SUCCESS;
+}
+
+
+void BasicJsonStringifier::ShrinkCurrentPart() {
+ ASSERT(current_index_ < part_length_);
+ current_part_ = SeqString::Truncate(Handle<SeqString>::cast(current_part_),
+ current_index_);
+}
+
+
+void BasicJsonStringifier::Extend() {
+ set_accumulator(factory_->NewConsString(accumulator(), current_part_));
+ if (part_length_ <= kMaxPartLength / kPartLengthGrowthFactor) {
+ part_length_ *= kPartLengthGrowthFactor;
+ }
+ if (is_ascii_) {
+ current_part_ = factory_->NewRawOneByteString(part_length_);
+ } else {
+ current_part_ = factory_->NewRawTwoByteString(part_length_);
+ }
+ current_index_ = 0;
+}
+
+
+void BasicJsonStringifier::ChangeEncoding() {
+ ShrinkCurrentPart();
+ set_accumulator(factory_->NewConsString(accumulator(), current_part_));
+ current_part_ = factory_->NewRawTwoByteString(part_length_);
+ current_index_ = 0;
+ is_ascii_ = false;
+}
+
+
+template <typename SrcChar, typename DestChar>
+int BasicJsonStringifier::SerializeStringUnchecked_(const SrcChar* src,
+ DestChar* dest,
+ int length) {
+ DestChar* dest_start = dest;
+
+ // Assert that uc16 character is not truncated down to 8 bit.
+ // The <uc16, char> version of this method must not be called.
+ ASSERT(sizeof(*dest) >= sizeof(*src));
+
+ for (int i = 0; i < length; i++) {
+ SrcChar c = src[i];
+ if (DoNotEscape(c)) {
+ *(dest++) = static_cast<DestChar>(c);
+ } else {
+ const uint8_t* chars = reinterpret_cast<const uint8_t*>(
+ &JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
+ while (*chars != '\0') *(dest++) = *(chars++);
+ }
+ }
+
+ return static_cast<int>(dest - dest_start);
+}
+
+
+template <bool is_ascii, typename Char>
+void BasicJsonStringifier::SerializeString_(Handle<String> string) {
+ int length = string->length();
+ Append_<is_ascii, char>('"');
+ // We make a rough estimate to find out if the current string can be
+ // serialized without allocating a new string part. The worst case length of
+ // an escaped character is 6. Shifting the remainin string length right by 3
+ // is a more pessimistic estimate, but faster to calculate.
+
+ if (((part_length_ - current_index_) >> 3) > length) {
+ DisallowHeapAllocation no_gc;
+ Vector<const Char> vector = GetCharVector<Char>(string);
+ if (is_ascii) {
+ current_index_ += SerializeStringUnchecked_(
+ vector.start(),
+ SeqOneByteString::cast(*current_part_)->GetChars() + current_index_,
+ length);
+ } else {
+ current_index_ += SerializeStringUnchecked_(
+ vector.start(),
+ SeqTwoByteString::cast(*current_part_)->GetChars() + current_index_,
+ length);
+ }
+ } else {
+ String* string_location = NULL;
+ Vector<const Char> vector(NULL, 0);
+ for (int i = 0; i < length; i++) {
+ // If GC moved the string, we need to refresh the vector.
+ if (*string != string_location) {
+ DisallowHeapAllocation no_gc;
+ // This does not actually prevent the string from being relocated later.
+ vector = GetCharVector<Char>(string);
+ string_location = *string;
+ }
+ Char c = vector[i];
+ if (DoNotEscape(c)) {
+ Append_<is_ascii, Char>(c);
+ } else {
+ Append_<is_ascii, uint8_t>(reinterpret_cast<const uint8_t*>(
+ &JsonEscapeTable[c * kJsonEscapeTableEntrySize]));
+ }
+ }
+ }
+
+ Append_<is_ascii, uint8_t>('"');
+}
+
+
+template <>
+bool BasicJsonStringifier::DoNotEscape(uint8_t c) {
+ return c >= '#' && c <= '~' && c != '\\';
+}
+
+
+template <>
+bool BasicJsonStringifier::DoNotEscape(uint16_t c) {
+ return c >= '#' && c != '\\' && c != 0x7f;
+}
+
+
+template <>
+Vector<const uint8_t> BasicJsonStringifier::GetCharVector(
+ Handle<String> string) {
+ String::FlatContent flat = string->GetFlatContent();
+ ASSERT(flat.IsAscii());
+ return flat.ToOneByteVector();
+}
+
+
+template <>
+Vector<const uc16> BasicJsonStringifier::GetCharVector(Handle<String> string) {
+ String::FlatContent flat = string->GetFlatContent();
+ ASSERT(flat.IsTwoByte());
+ return flat.ToUC16Vector();
+}
+
+
+void BasicJsonStringifier::SerializeString(Handle<String> object) {
+ object = FlattenGetString(object);
+ if (is_ascii_) {
+ if (object->IsOneByteRepresentationUnderneath()) {
+ SerializeString_<true, uint8_t>(object);
+ } else {
+ ChangeEncoding();
+ SerializeString(object);
+ }
+ } else {
+ if (object->IsOneByteRepresentationUnderneath()) {
+ SerializeString_<false, uint8_t>(object);
+ } else {
+ SerializeString_<false, uc16>(object);
+ }
+ }
+}
+
+} } // namespace v8::internal
+
+#endif // V8_JSON_STRINGIFIER_H_
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index 85224b0f0..c21e6351d 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -25,8 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// This file relies on the fact that the following declarations have been made
+// in runtime.js:
+// var $Array = global.Array;
+// var $String = global.String;
+
var $JSON = global.JSON;
+// -------------------------------------------------------------------
+
function Revive(holder, name, reviver) {
var val = holder[name];
if (IS_OBJECT(val)) {
@@ -174,145 +181,13 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
}
}
// Undefined or a callable object.
- return void 0;
-}
-
-
-function BasicSerializeArray(value, stack, builder) {
- var len = value.length;
- if (len == 0) {
- builder.push("[]");
- return;
- }
- if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', $Array());
- }
- builder.push("[");
- var val = value[0];
- if (IS_STRING(val)) {
- // First entry is a string. Remaining entries are likely to be strings too.
- var array_string = %QuoteJSONStringArray(value);
- if (!IS_UNDEFINED(array_string)) {
- // array_string also includes bracket characters so we are done.
- builder[builder.length - 1] = array_string;
- stack.pop();
- return;
- } else {
- builder.push(%QuoteJSONString(val));
- for (var i = 1; i < len; i++) {
- val = value[i];
- if (IS_STRING(val)) {
- builder.push(%QuoteJSONStringComma(val));
- } else {
- builder.push(",");
- var before = builder.length;
- BasicJSONSerialize(i, val, stack, builder);
- if (before == builder.length) builder[before - 1] = ",null";
- }
- }
- }
- } else if (IS_NUMBER(val)) {
- // First entry is a number. Remaining entries are likely to be numbers too.
- builder.push(JSON_NUMBER_TO_STRING(val));
- for (var i = 1; i < len; i++) {
- builder.push(",");
- val = value[i];
- if (IS_NUMBER(val)) {
- builder.push(JSON_NUMBER_TO_STRING(val));
- } else {
- var before = builder.length;
- BasicJSONSerialize(i, val, stack, builder);
- if (before == builder.length) builder[before - 1] = ",null";
- }
- }
- } else {
- var before = builder.length;
- BasicJSONSerialize(0, val, stack, builder);
- if (before == builder.length) builder.push("null");
- for (var i = 1; i < len; i++) {
- builder.push(",");
- before = builder.length;
- BasicJSONSerialize(i, value[i], stack, builder);
- if (before == builder.length) builder[before - 1] = ",null";
- }
- }
- stack.pop();
- builder.push("]");
-}
-
-
-function BasicSerializeObject(value, stack, builder) {
- if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', $Array());
- }
- builder.push("{");
- var first = true;
- var keys = %ObjectKeys(value);
- var len = keys.length;
- for (var i = 0; i < len; i++) {
- var p = keys[i];
- if (!first) {
- builder.push(%QuoteJSONStringComma(p));
- } else {
- builder.push(%QuoteJSONString(p));
- }
- builder.push(":");
- var before = builder.length;
- BasicJSONSerialize(p, value[p], stack, builder);
- if (before == builder.length) {
- builder.pop();
- builder.pop();
- } else {
- first = false;
- }
- }
- stack.pop();
- builder.push("}");
-}
-
-
-function BasicJSONSerialize(key, value, stack, builder) {
- if (IS_SPEC_OBJECT(value)) {
- var toJSON = value.toJSON;
- if (IS_SPEC_FUNCTION(toJSON)) {
- value = %_CallFunction(value, ToString(key), toJSON);
- }
- }
- if (IS_STRING(value)) {
- builder.push(value !== "" ? %QuoteJSONString(value) : '""');
- } else if (IS_NUMBER(value)) {
- builder.push(JSON_NUMBER_TO_STRING(value));
- } else if (IS_BOOLEAN(value)) {
- builder.push(value ? "true" : "false");
- } else if (IS_NULL(value)) {
- builder.push("null");
- } else if (IS_SPEC_OBJECT(value) && !(typeof value == "function")) {
- // Value is a non-callable object.
- // Unwrap value if necessary
- if (IS_NUMBER_WRAPPER(value)) {
- value = ToNumber(value);
- builder.push(JSON_NUMBER_TO_STRING(value));
- } else if (IS_STRING_WRAPPER(value)) {
- builder.push(%QuoteJSONString(ToString(value)));
- } else if (IS_BOOLEAN_WRAPPER(value)) {
- builder.push(%_ValueOf(value) ? "true" : "false");
- } else if (IS_ARRAY(value)) {
- BasicSerializeArray(value, stack, builder);
- } else {
- BasicSerializeObject(value, stack, builder);
- }
- }
+ return UNDEFINED;
}
function JSONStringify(value, replacer, space) {
if (%_ArgumentsLength() == 1) {
- var builder = new InternalArray();
- BasicJSONSerialize('', value, new InternalArray(), builder);
- if (builder.length == 0) return;
- var result = %_FastAsciiArrayJoin(builder, "");
- if (!IS_UNDEFINED(result)) return result;
- return %StringBuilderConcat(builder, builder.length, "");
+ return %BasicJSONStringify(value);
}
if (IS_OBJECT(space)) {
// Unwrap 'space' if it is wrapped
@@ -325,10 +200,10 @@ function JSONStringify(value, replacer, space) {
var gap;
if (IS_NUMBER(space)) {
space = MathMax(0, MathMin(ToInteger(space), 10));
- gap = SubString(" ", 0, space);
+ gap = %_SubString(" ", 0, space);
} else if (IS_STRING(space)) {
if (space.length > 10) {
- gap = SubString(space, 0, 10);
+ gap = %_SubString(space, 0, 10);
} else {
gap = space;
}
@@ -338,8 +213,13 @@ function JSONStringify(value, replacer, space) {
return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
}
+
+// -------------------------------------------------------------------
+
function SetUpJSON() {
%CheckIsBootstrapping();
+
+ // Set up non-enumerable properties of the JSON object.
InstallFunctions($JSON, DONT_ENUM, $Array(
"parse", JSONParse,
"stringify", JSONStringify
@@ -347,3 +227,14 @@ function SetUpJSON() {
}
SetUpJSON();
+
+
+// -------------------------------------------------------------------
+// JSON Builtins
+
+function JSONSerializeAdapter(key, object) {
+ var holder = {};
+ holder[key] = object;
+ // No need to pass the actual holder since there is no replacer function.
+ return JSONSerialize(key, holder, UNDEFINED, new InternalArray(), "", "");
+}
diff --git a/deps/v8/src/jsregexp-inl.h b/deps/v8/src/jsregexp-inl.h
new file mode 100644
index 000000000..3ef07d8c5
--- /dev/null
+++ b/deps/v8/src/jsregexp-inl.h
@@ -0,0 +1,106 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_JSREGEXP_INL_H_
+#define V8_JSREGEXP_INL_H_
+
+#include "allocation.h"
+#include "handles.h"
+#include "heap.h"
+#include "jsregexp.h"
+#include "objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+RegExpImpl::GlobalCache::~GlobalCache() {
+ // Deallocate the register array if we allocated it in the constructor
+ // (as opposed to using the existing jsregexp_static_offsets_vector).
+ if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ DeleteArray(register_array_);
+ }
+}
+
+
+int32_t* RegExpImpl::GlobalCache::FetchNext() {
+ current_match_index_++;
+ if (current_match_index_ >= num_matches_) {
+ // Current batch of results exhausted.
+ // Fail if last batch was not even fully filled.
+ if (num_matches_ < max_matches_) {
+ num_matches_ = 0; // Signal failed match.
+ return NULL;
+ }
+
+ int32_t* last_match =
+ &register_array_[(current_match_index_ - 1) * registers_per_match_];
+ int last_end_index = last_match[1];
+
+ if (regexp_->TypeTag() == JSRegExp::ATOM) {
+ num_matches_ = RegExpImpl::AtomExecRaw(regexp_,
+ subject_,
+ last_end_index,
+ register_array_,
+ register_array_size_);
+ } else {
+ int last_start_index = last_match[0];
+ if (last_start_index == last_end_index) last_end_index++;
+ if (last_end_index > subject_->length()) {
+ num_matches_ = 0; // Signal failed match.
+ return NULL;
+ }
+ num_matches_ = RegExpImpl::IrregexpExecRaw(regexp_,
+ subject_,
+ last_end_index,
+ register_array_,
+ register_array_size_);
+ }
+
+ if (num_matches_ <= 0) return NULL;
+ current_match_index_ = 0;
+ return register_array_;
+ } else {
+ return &register_array_[current_match_index_ * registers_per_match_];
+ }
+}
+
+
+int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
+ int index = current_match_index_ * registers_per_match_;
+ if (num_matches_ == 0) {
+ // After a failed match we shift back by one result.
+ index -= registers_per_match_;
+ }
+ return &register_array_[index];
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_JSREGEXP_INL_H_
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index e59170d5a..3a3d91599 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -32,6 +32,7 @@
#include "execution.h"
#include "factory.h"
#include "jsregexp.h"
+#include "jsregexp-inl.h"
#include "platform.h"
#include "string-search.h"
#include "runtime.h"
@@ -167,10 +168,9 @@ static bool HasFewDifferentCharacters(Handle<String> pattern) {
Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> pattern,
- Handle<String> flag_str,
- Zone* zone) {
- ZoneScope zone_scope(zone, DELETE_ON_EXIT);
+ Handle<String> flag_str) {
Isolate* isolate = re->GetIsolate();
+ Zone zone(isolate);
JSRegExp::Flags flags = RegExpFlagsFromString(flag_str);
CompilationCache* compilation_cache = isolate->compilation_cache();
Handle<FixedArray> cached = compilation_cache->LookupRegExp(pattern, flags);
@@ -187,7 +187,7 @@ Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
RegExpCompileData parse_result;
FlatStringReader reader(isolate, pattern);
if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
- &parse_result, zone)) {
+ &parse_result, &zone)) {
// Throw an exception if we fail to parse the pattern.
ThrowRegExpException(re,
pattern,
@@ -269,7 +269,7 @@ static void SetAtomLastCapture(FixedArray* array,
String* subject,
int from,
int to) {
- NoHandleAllocation no_handles;
+ SealHandleScope shs(array->GetIsolate());
RegExpImpl::SetLastCaptureCount(array, 2);
RegExpImpl::SetLastSubject(array, subject);
RegExpImpl::SetLastInput(array, subject);
@@ -289,7 +289,7 @@ int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp,
ASSERT(index <= subject->length());
if (!subject->IsFlat()) FlattenString(subject);
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
String* needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
int needle_len = needle->length();
@@ -309,16 +309,16 @@ int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp,
index = (needle_content.IsAscii()
? (subject_content.IsAscii()
? SearchString(isolate,
- subject_content.ToAsciiVector(),
- needle_content.ToAsciiVector(),
+ subject_content.ToOneByteVector(),
+ needle_content.ToOneByteVector(),
index)
: SearchString(isolate,
subject_content.ToUC16Vector(),
- needle_content.ToAsciiVector(),
+ needle_content.ToOneByteVector(),
index))
: (subject_content.IsAscii()
? SearchString(isolate,
- subject_content.ToAsciiVector(),
+ subject_content.ToOneByteVector(),
needle_content.ToUC16Vector(),
index)
: SearchString(isolate,
@@ -352,7 +352,7 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
if (res == RegExpImpl::RE_FAILURE) return isolate->factory()->null_value();
ASSERT_EQ(res, RegExpImpl::RE_SUCCESS);
- NoHandleAllocation no_handles;
+ SealHandleScope shs(isolate);
FixedArray* array = FixedArray::cast(last_match_info->elements());
SetAtomLastCapture(array, *subject, output_registers[0], output_registers[1]);
return last_match_info;
@@ -409,7 +409,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
bool is_ascii) {
// Compile the RegExp.
Isolate* isolate = re->GetIsolate();
- ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
+ Zone zone(isolate);
PostponeInterruptsScope postpone(isolate);
// If we had a compilation error the last time this is saved at the
// saved code index.
@@ -440,10 +440,9 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
if (!pattern->IsFlat()) FlattenString(pattern);
RegExpCompileData compile_data;
FlatStringReader reader(isolate, pattern);
- Zone* zone = isolate->runtime_zone();
if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
&compile_data,
- zone)) {
+ &zone)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
ThrowRegExpException(re,
@@ -460,7 +459,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
pattern,
sample_subject,
is_ascii,
- zone);
+ &zone);
if (result.error_message != NULL) {
// Unable to compile regexp.
Handle<String> error_message =
@@ -529,7 +528,7 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
if (!subject->IsFlat()) FlattenString(subject);
// Check the asciiness of the underlying storage.
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
if (!EnsureCompiledIrregexp(regexp, subject, is_ascii)) return -1;
#ifdef V8_INTERPRETED_REGEXP
@@ -560,7 +559,7 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
ASSERT(index <= subject->length());
ASSERT(subject->IsFlat());
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
#ifndef V8_INTERPRETED_REGEXP
ASSERT(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
@@ -596,7 +595,7 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
// being internal and external, and even between being ASCII and UC16,
// but the characters are always the same).
IrregexpPrepare(regexp, subject);
- is_ascii = subject->IsAsciiRepresentationUnderneath();
+ is_ascii = subject->IsOneByteRepresentationUnderneath();
} while (true);
UNREACHABLE();
return RE_EXCEPTION;
@@ -623,7 +622,8 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
index);
if (result == RE_SUCCESS) {
// Copy capture results to the start of the registers array.
- memcpy(output, raw_output, number_of_capture_registers * sizeof(int32_t));
+ OS::MemCopy(
+ output, raw_output, number_of_capture_registers * sizeof(int32_t));
}
if (result == RE_EXCEPTION) {
ASSERT(!isolate->has_pending_exception());
@@ -686,9 +686,10 @@ Handle<JSArray> RegExpImpl::SetLastMatchInfo(Handle<JSArray> last_match_info,
Handle<String> subject,
int capture_count,
int32_t* match) {
+ ASSERT(last_match_info->HasFastObjectElements());
int capture_register_count = (capture_count + 1) * 2;
last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_allocation;
FixedArray* array = FixedArray::cast(last_match_info->elements());
if (match != NULL) {
for (int i = 0; i < capture_register_count; i += 2) {
@@ -760,68 +761,6 @@ RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
}
-RegExpImpl::GlobalCache::~GlobalCache() {
- // Deallocate the register array if we allocated it in the constructor
- // (as opposed to using the existing jsregexp_static_offsets_vector).
- if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- DeleteArray(register_array_);
- }
-}
-
-
-int32_t* RegExpImpl::GlobalCache::FetchNext() {
- current_match_index_++;
- if (current_match_index_ >= num_matches_) {
- // Current batch of results exhausted.
- // Fail if last batch was not even fully filled.
- if (num_matches_ < max_matches_) {
- num_matches_ = 0; // Signal failed match.
- return NULL;
- }
-
- int32_t* last_match =
- &register_array_[(current_match_index_ - 1) * registers_per_match_];
- int last_end_index = last_match[1];
-
- if (regexp_->TypeTag() == JSRegExp::ATOM) {
- num_matches_ = RegExpImpl::AtomExecRaw(regexp_,
- subject_,
- last_end_index,
- register_array_,
- register_array_size_);
- } else {
- int last_start_index = last_match[0];
- if (last_start_index == last_end_index) last_end_index++;
- if (last_end_index > subject_->length()) {
- num_matches_ = 0; // Signal failed match.
- return NULL;
- }
- num_matches_ = RegExpImpl::IrregexpExecRaw(regexp_,
- subject_,
- last_end_index,
- register_array_,
- register_array_size_);
- }
-
- if (num_matches_ <= 0) return NULL;
- current_match_index_ = 0;
- return register_array_;
- } else {
- return &register_array_[current_match_index_ * registers_per_match_];
- }
-}
-
-
-int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
- int index = current_match_index_ * registers_per_match_;
- if (num_matches_ == 0) {
- // After a failed match we shift back by one result.
- index -= registers_per_match_;
- }
- return &register_array_[index];
-}
-
-
// -------------------------------------------------------------------
// Implementation of the Irregexp regular expression engine.
//
@@ -994,27 +933,25 @@ void RegExpText::AppendToText(RegExpText* text, Zone* zone) {
TextElement TextElement::Atom(RegExpAtom* atom) {
- TextElement result = TextElement(ATOM);
- result.data.u_atom = atom;
- return result;
+ return TextElement(ATOM, atom);
}
-TextElement TextElement::CharClass(
- RegExpCharacterClass* char_class) {
- TextElement result = TextElement(CHAR_CLASS);
- result.data.u_char_class = char_class;
- return result;
+TextElement TextElement::CharClass(RegExpCharacterClass* char_class) {
+ return TextElement(CHAR_CLASS, char_class);
}
-int TextElement::length() {
- if (type == ATOM) {
- return data.u_atom->length();
- } else {
- ASSERT(type == CHAR_CLASS);
- return 1;
+int TextElement::length() const {
+ switch (text_type()) {
+ case ATOM:
+ return atom()->length();
+
+ case CHAR_CLASS:
+ return 1;
}
+ UNREACHABLE();
+ return 0;
}
@@ -1148,8 +1085,8 @@ class RecursionCheck {
};
-static RegExpEngine::CompilationResult IrregexpRegExpTooBig() {
- return RegExpEngine::CompilationResult("RegExp too big");
+static RegExpEngine::CompilationResult IrregexpRegExpTooBig(Isolate* isolate) {
+ return RegExpEngine::CompilationResult(isolate, "RegExp too big");
}
@@ -1206,7 +1143,7 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
while (!work_list.is_empty()) {
work_list.RemoveLast()->Emit(this, &new_trace);
}
- if (reg_exp_too_big_) return IrregexpRegExpTooBig();
+ if (reg_exp_too_big_) return IrregexpRegExpTooBig(zone_->isolate());
Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
heap->IncreaseTotalRegexpCodeGenerated(code->Size());
@@ -1224,7 +1161,7 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
bool Trace::DeferredAction::Mentions(int that) {
- if (type() == ActionNode::CLEAR_CAPTURES) {
+ if (action_type() == ActionNode::CLEAR_CAPTURES) {
Interval range = static_cast<DeferredClearCaptures*>(this)->range();
return range.Contains(that);
} else {
@@ -1250,7 +1187,7 @@ bool Trace::GetStoredPosition(int reg, int* cp_offset) {
action != NULL;
action = action->next()) {
if (action->Mentions(reg)) {
- if (action->type() == ActionNode::STORE_POSITION) {
+ if (action->action_type() == ActionNode::STORE_POSITION) {
*cp_offset = static_cast<DeferredCapture*>(action)->cp_offset();
return true;
} else {
@@ -1268,7 +1205,7 @@ int Trace::FindAffectedRegisters(OutSet* affected_registers,
for (DeferredAction* action = actions_;
action != NULL;
action = action->next()) {
- if (action->type() == ActionNode::CLEAR_CAPTURES) {
+ if (action->action_type() == ActionNode::CLEAR_CAPTURES) {
Interval range = static_cast<DeferredClearCaptures*>(action)->range();
for (int i = range.from(); i <= range.to(); i++)
affected_registers->Set(i, zone);
@@ -1332,7 +1269,7 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
action != NULL;
action = action->next()) {
if (action->Mentions(reg)) {
- switch (action->type()) {
+ switch (action->action_type()) {
case ActionNode::SET_REGISTER: {
Trace::DeferredSetRegister* psr =
static_cast<Trace::DeferredSetRegister*>(action);
@@ -1681,7 +1618,7 @@ static int GetCaseIndependentLetters(Isolate* isolate,
letters[0] = character;
length = 1;
}
- if (!ascii_subject || character <= String::kMaxAsciiCharCode) {
+ if (!ascii_subject || character <= String::kMaxOneByteCharCode) {
return length;
}
// The standard requires that non-ASCII characters cannot have ASCII
@@ -1732,7 +1669,7 @@ static inline bool EmitAtomNonLetter(Isolate* isolate,
bool checked = false;
// We handle the length > 1 case in a later pass.
if (length == 1) {
- if (ascii && c > String::kMaxAsciiCharCodeU) {
+ if (ascii && c > String::kMaxOneByteCharCodeU) {
// Can't match - see above.
return false; // Bounds not checked.
}
@@ -1753,7 +1690,7 @@ static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
Label* on_failure) {
uc16 char_mask;
if (ascii) {
- char_mask = String::kMaxAsciiCharCode;
+ char_mask = String::kMaxOneByteCharCode;
} else {
char_mask = String::kMaxUtf16CodeUnit;
}
@@ -1932,8 +1869,9 @@ static void EmitUseLookupTable(
for (int i = j; i < kSize; i++) {
templ[i] = bit;
}
+ Factory* factory = masm->zone()->isolate()->factory();
// TODO(erikcorry): Cache these.
- Handle<ByteArray> ba = FACTORY->NewByteArray(kSize, TENURED);
+ Handle<ByteArray> ba = factory->NewByteArray(kSize, TENURED);
for (int i = 0; i < kSize; i++) {
ba->set(i, templ[i]);
}
@@ -2007,7 +1945,7 @@ static void SplitSearchSpace(ZoneList<int>* ranges,
// range with a single not-taken branch, speeding up this important
// character range (even non-ASCII charset-based text has spaces and
// punctuation).
- if (*border - 1 > String::kMaxAsciiCharCode && // ASCII case.
+ if (*border - 1 > String::kMaxOneByteCharCode && // ASCII case.
end_index - start_index > (*new_start_index - start_index) * 2 &&
last - first > kSize * 2 &&
binary_chop_index > *new_start_index &&
@@ -2211,7 +2149,7 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
int max_char;
if (ascii) {
- max_char = String::kMaxAsciiCharCode;
+ max_char = String::kMaxOneByteCharCode;
} else {
max_char = String::kMaxUtf16CodeUnit;
}
@@ -2359,91 +2297,87 @@ RegExpNode::LimitResult RegExpNode::LimitVersions(RegExpCompiler* compiler,
int ActionNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
+ int budget,
bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
- if (type_ == POSITIVE_SUBMATCH_SUCCESS) return 0; // Rewinds input!
+ if (budget <= 0) return 0;
+ if (action_type_ == POSITIVE_SUBMATCH_SUCCESS) return 0; // Rewinds input!
return on_success()->EatsAtLeast(still_to_find,
- recursion_depth + 1,
+ budget - 1,
not_at_start);
}
void ActionNode::FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
- if (type_ == BEGIN_SUBMATCH) {
+ if (action_type_ == BEGIN_SUBMATCH) {
bm->SetRest(offset);
- } else if (type_ != POSITIVE_SUBMATCH_SUCCESS) {
- on_success()->FillInBMInfo(
- offset, recursion_depth + 1, budget - 1, bm, not_at_start);
+ } else if (action_type_ != POSITIVE_SUBMATCH_SUCCESS) {
+ on_success()->FillInBMInfo(offset, budget - 1, bm, not_at_start);
}
SaveBMInfo(bm, not_at_start, offset);
}
int AssertionNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
+ int budget,
bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
+ if (budget <= 0) return 0;
// If we know we are not at the start and we are asked "how many characters
// will you match if you succeed?" then we can answer anything since false
// implies false. So lets just return the max answer (still_to_find) since
// that won't prevent us from preloading a lot of characters for the other
// branches in the node graph.
- if (type() == AT_START && not_at_start) return still_to_find;
+ if (assertion_type() == AT_START && not_at_start) return still_to_find;
return on_success()->EatsAtLeast(still_to_find,
- recursion_depth + 1,
+ budget - 1,
not_at_start);
}
void AssertionNode::FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
// Match the behaviour of EatsAtLeast on this node.
- if (type() == AT_START && not_at_start) return;
- on_success()->FillInBMInfo(
- offset, recursion_depth + 1, budget - 1, bm, not_at_start);
+ if (assertion_type() == AT_START && not_at_start) return;
+ on_success()->FillInBMInfo(offset, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset);
}
int BackReferenceNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
+ int budget,
bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
+ if (budget <= 0) return 0;
return on_success()->EatsAtLeast(still_to_find,
- recursion_depth + 1,
+ budget - 1,
not_at_start);
}
int TextNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
+ int budget,
bool not_at_start) {
int answer = Length();
if (answer >= still_to_find) return answer;
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return answer;
+ if (budget <= 0) return answer;
// We are not at start after this node so we set the last argument to 'true'.
return answer + on_success()->EatsAtLeast(still_to_find - answer,
- recursion_depth + 1,
+ budget - 1,
true);
}
int NegativeLookaheadChoiceNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
+ int budget,
bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
+ if (budget <= 0) return 0;
// Alternative 0 is the negative lookahead, alternative 1 is what comes
// afterwards.
RegExpNode* node = alternatives_->at(1).node();
- return node->EatsAtLeast(still_to_find, recursion_depth + 1, not_at_start);
+ return node->EatsAtLeast(still_to_find, budget - 1, not_at_start);
}
@@ -2460,39 +2394,40 @@ void NegativeLookaheadChoiceNode::GetQuickCheckDetails(
int ChoiceNode::EatsAtLeastHelper(int still_to_find,
- int recursion_depth,
+ int budget,
RegExpNode* ignore_this_node,
bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
+ if (budget <= 0) return 0;
int min = 100;
int choice_count = alternatives_->length();
+ budget = (budget - 1) / choice_count;
for (int i = 0; i < choice_count; i++) {
RegExpNode* node = alternatives_->at(i).node();
if (node == ignore_this_node) continue;
- int node_eats_at_least = node->EatsAtLeast(still_to_find,
- recursion_depth + 1,
- not_at_start);
+ int node_eats_at_least =
+ node->EatsAtLeast(still_to_find, budget, not_at_start);
if (node_eats_at_least < min) min = node_eats_at_least;
+ if (min == 0) return 0;
}
return min;
}
int LoopChoiceNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
+ int budget,
bool not_at_start) {
return EatsAtLeastHelper(still_to_find,
- recursion_depth,
+ budget - 1,
loop_node_,
not_at_start);
}
int ChoiceNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
+ int budget,
bool not_at_start) {
return EatsAtLeastHelper(still_to_find,
- recursion_depth,
+ budget,
NULL,
not_at_start);
}
@@ -2513,7 +2448,7 @@ bool QuickCheckDetails::Rationalize(bool asc) {
bool found_useful_op = false;
uint32_t char_mask;
if (asc) {
- char_mask = String::kMaxAsciiCharCode;
+ char_mask = String::kMaxOneByteCharCode;
} else {
char_mask = String::kMaxUtf16CodeUnit;
}
@@ -2522,7 +2457,7 @@ bool QuickCheckDetails::Rationalize(bool asc) {
int char_shift = 0;
for (int i = 0; i < characters_; i++) {
Position* pos = &positions_[i];
- if ((pos->mask & String::kMaxAsciiCharCode) != 0) {
+ if ((pos->mask & String::kMaxOneByteCharCode) != 0) {
found_useful_op = true;
}
mask_ |= (pos->mask & char_mask) << char_shift;
@@ -2540,7 +2475,8 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
QuickCheckDetails* details,
bool fall_through_on_failure) {
if (details->characters() == 0) return false;
- GetQuickCheckDetails(details, compiler, 0, trace->at_start() == Trace::FALSE);
+ GetQuickCheckDetails(
+ details, compiler, 0, trace->at_start() == Trace::FALSE_VALUE);
if (details->cannot_match()) return false;
if (!details->Rationalize(compiler->ascii())) return false;
ASSERT(details->characters() == 1 ||
@@ -2565,7 +2501,7 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
// load so the value is already masked down.
uint32_t char_mask;
if (compiler->ascii()) {
- char_mask = String::kMaxAsciiCharCode;
+ char_mask = String::kMaxOneByteCharCode;
} else {
char_mask = String::kMaxUtf16CodeUnit;
}
@@ -2575,7 +2511,7 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
// For 2-character preloads in ASCII mode or 1-character preloads in
// TWO_BYTE mode we also use a 16 bit load with zero extend.
if (details->characters() == 2 && compiler->ascii()) {
- if ((mask & 0x7f7f) == 0x7f7f) need_mask = false;
+ if ((mask & 0xffff) == 0xffff) need_mask = false;
} else if (details->characters() == 1 && !compiler->ascii()) {
if ((mask & 0xffff) == 0xffff) need_mask = false;
} else {
@@ -2612,19 +2548,19 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = compiler->macro_assembler()->zone()->isolate();
ASSERT(characters_filled_in < details->characters());
int characters = details->characters();
int char_mask;
if (compiler->ascii()) {
- char_mask = String::kMaxAsciiCharCode;
+ char_mask = String::kMaxOneByteCharCode;
} else {
char_mask = String::kMaxUtf16CodeUnit;
}
for (int k = 0; k < elms_->length(); k++) {
TextElement elm = elms_->at(k);
- if (elm.type == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.data.u_atom->data();
+ if (elm.text_type() == TextElement::ATOM) {
+ Vector<const uc16> quarks = elm.atom()->data();
for (int i = 0; i < characters && i < quarks.length(); i++) {
QuickCheckDetails::Position* pos =
details->positions(characters_filled_in);
@@ -2686,7 +2622,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
} else {
QuickCheckDetails::Position* pos =
details->positions(characters_filled_in);
- RegExpCharacterClass* tree = elm.data.u_char_class;
+ RegExpCharacterClass* tree = elm.char_class();
ZoneList<CharacterRange>* ranges = tree->ranges(zone());
if (tree->is_negated()) {
// A quick check uses multi-character mask and compare. There is no
@@ -2834,24 +2770,41 @@ class VisitMarker {
};
-RegExpNode* SeqRegExpNode::FilterASCII(int depth) {
+RegExpNode* SeqRegExpNode::FilterASCII(int depth, bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
ASSERT(!info()->visited);
VisitMarker marker(info());
- return FilterSuccessor(depth - 1);
+ return FilterSuccessor(depth - 1, ignore_case);
}
-RegExpNode* SeqRegExpNode::FilterSuccessor(int depth) {
- RegExpNode* next = on_success_->FilterASCII(depth - 1);
+RegExpNode* SeqRegExpNode::FilterSuccessor(int depth, bool ignore_case) {
+ RegExpNode* next = on_success_->FilterASCII(depth - 1, ignore_case);
if (next == NULL) return set_replacement(NULL);
on_success_ = next;
return set_replacement(this);
}
-RegExpNode* TextNode::FilterASCII(int depth) {
+// We need to check for the following characters: 0x39c 0x3bc 0x178.
+static inline bool RangeContainsLatin1Equivalents(CharacterRange range) {
+ // TODO(dcarney): this could be a lot more efficient.
+ return range.Contains(0x39c) ||
+ range.Contains(0x3bc) || range.Contains(0x178);
+}
+
+
+static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) {
+ for (int i = 0; i < ranges->length(); i++) {
+ // TODO(dcarney): this could be a lot more efficient.
+ if (RangeContainsLatin1Equivalents(ranges->at(i))) return true;
+ }
+ return false;
+}
+
+
+RegExpNode* TextNode::FilterASCII(int depth, bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
ASSERT(!info()->visited);
@@ -2859,19 +2812,24 @@ RegExpNode* TextNode::FilterASCII(int depth) {
int element_count = elms_->length();
for (int i = 0; i < element_count; i++) {
TextElement elm = elms_->at(i);
- if (elm.type == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.data.u_atom->data();
+ if (elm.text_type() == TextElement::ATOM) {
+ Vector<const uc16> quarks = elm.atom()->data();
for (int j = 0; j < quarks.length(); j++) {
- // We don't need special handling for case independence
- // because of the rule that case independence cannot make
- // a non-ASCII character match an ASCII character.
- if (quarks[j] > String::kMaxAsciiCharCode) {
- return set_replacement(NULL);
- }
+ uint16_t c = quarks[j];
+ if (c <= String::kMaxOneByteCharCode) continue;
+ if (!ignore_case) return set_replacement(NULL);
+ // Here, we need to check for characters whose upper and lower cases
+ // are outside the Latin-1 range.
+ uint16_t converted = unibrow::Latin1::ConvertNonLatin1ToLatin1(c);
+ // Character is outside Latin-1 completely
+ if (converted == 0) return set_replacement(NULL);
+ // Convert quark to Latin-1 in place.
+ uint16_t* copy = const_cast<uint16_t*>(quarks.start());
+ copy[j] = converted;
}
} else {
- ASSERT(elm.type == TextElement::CHAR_CLASS);
- RegExpCharacterClass* cc = elm.data.u_char_class;
+ ASSERT(elm.text_type() == TextElement::CHAR_CLASS);
+ RegExpCharacterClass* cc = elm.char_class();
ZoneList<CharacterRange>* ranges = cc->ranges(zone());
if (!CharacterRange::IsCanonical(ranges)) {
CharacterRange::Canonicalize(ranges);
@@ -2881,39 +2839,44 @@ RegExpNode* TextNode::FilterASCII(int depth) {
if (cc->is_negated()) {
if (range_count != 0 &&
ranges->at(0).from() == 0 &&
- ranges->at(0).to() >= String::kMaxAsciiCharCode) {
+ ranges->at(0).to() >= String::kMaxOneByteCharCode) {
+ // This will be handled in a later filter.
+ if (ignore_case && RangesContainLatin1Equivalents(ranges)) continue;
return set_replacement(NULL);
}
} else {
if (range_count == 0 ||
- ranges->at(0).from() > String::kMaxAsciiCharCode) {
+ ranges->at(0).from() > String::kMaxOneByteCharCode) {
+ // This will be handled in a later filter.
+ if (ignore_case && RangesContainLatin1Equivalents(ranges)) continue;
return set_replacement(NULL);
}
}
}
}
- return FilterSuccessor(depth - 1);
+ return FilterSuccessor(depth - 1, ignore_case);
}
-RegExpNode* LoopChoiceNode::FilterASCII(int depth) {
+RegExpNode* LoopChoiceNode::FilterASCII(int depth, bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
{
VisitMarker marker(info());
- RegExpNode* continue_replacement = continue_node_->FilterASCII(depth - 1);
+ RegExpNode* continue_replacement =
+ continue_node_->FilterASCII(depth - 1, ignore_case);
// If we can't continue after the loop then there is no sense in doing the
// loop.
if (continue_replacement == NULL) return set_replacement(NULL);
}
- return ChoiceNode::FilterASCII(depth - 1);
+ return ChoiceNode::FilterASCII(depth - 1, ignore_case);
}
-RegExpNode* ChoiceNode::FilterASCII(int depth) {
+RegExpNode* ChoiceNode::FilterASCII(int depth, bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -2932,7 +2895,8 @@ RegExpNode* ChoiceNode::FilterASCII(int depth) {
RegExpNode* survivor = NULL;
for (int i = 0; i < choice_count; i++) {
GuardedAlternative alternative = alternatives_->at(i);
- RegExpNode* replacement = alternative.node()->FilterASCII(depth - 1);
+ RegExpNode* replacement =
+ alternative.node()->FilterASCII(depth - 1, ignore_case);
ASSERT(replacement != this); // No missing EMPTY_MATCH_CHECK.
if (replacement != NULL) {
alternatives_->at(i).set_node(replacement);
@@ -2952,7 +2916,7 @@ RegExpNode* ChoiceNode::FilterASCII(int depth) {
new(zone()) ZoneList<GuardedAlternative>(surviving, zone());
for (int i = 0; i < choice_count; i++) {
RegExpNode* replacement =
- alternatives_->at(i).node()->FilterASCII(depth - 1);
+ alternatives_->at(i).node()->FilterASCII(depth - 1, ignore_case);
if (replacement != NULL) {
alternatives_->at(i).set_node(replacement);
new_alternatives->Add(alternatives_->at(i), zone());
@@ -2963,7 +2927,8 @@ RegExpNode* ChoiceNode::FilterASCII(int depth) {
}
-RegExpNode* NegativeLookaheadChoiceNode::FilterASCII(int depth) {
+RegExpNode* NegativeLookaheadChoiceNode::FilterASCII(int depth,
+ bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -2971,12 +2936,12 @@ RegExpNode* NegativeLookaheadChoiceNode::FilterASCII(int depth) {
// Alternative 0 is the negative lookahead, alternative 1 is what comes
// afterwards.
RegExpNode* node = alternatives_->at(1).node();
- RegExpNode* replacement = node->FilterASCII(depth - 1);
+ RegExpNode* replacement = node->FilterASCII(depth - 1, ignore_case);
if (replacement == NULL) return set_replacement(NULL);
alternatives_->at(1).set_node(replacement);
RegExpNode* neg_node = alternatives_->at(0).node();
- RegExpNode* neg_replacement = neg_node->FilterASCII(depth - 1);
+ RegExpNode* neg_replacement = neg_node->FilterASCII(depth - 1, ignore_case);
// If the negative lookahead is always going to fail then
// we don't need to check it.
if (neg_replacement == NULL) return set_replacement(replacement);
@@ -2999,19 +2964,15 @@ void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
void LoopChoiceNode::FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
- if (body_can_be_zero_length_ ||
- recursion_depth > RegExpCompiler::kMaxRecursion ||
- budget <= 0) {
+ if (body_can_be_zero_length_ || budget <= 0) {
bm->SetRest(offset);
SaveBMInfo(bm, not_at_start, offset);
return;
}
- ChoiceNode::FillInBMInfo(
- offset, recursion_depth + 1, budget - 1, bm, not_at_start);
+ ChoiceNode::FillInBMInfo(offset, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset);
}
@@ -3104,24 +3065,28 @@ static void EmitHat(RegExpCompiler* compiler,
void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
RegExpMacroAssembler* assembler = compiler->macro_assembler();
Trace::TriBool next_is_word_character = Trace::UNKNOWN;
- bool not_at_start = (trace->at_start() == Trace::FALSE);
+ bool not_at_start = (trace->at_start() == Trace::FALSE_VALUE);
BoyerMooreLookahead* lookahead = bm_info(not_at_start);
if (lookahead == NULL) {
int eats_at_least =
- Min(kMaxLookaheadForBoyerMoore,
- EatsAtLeast(kMaxLookaheadForBoyerMoore, 0, not_at_start));
+ Min(kMaxLookaheadForBoyerMoore, EatsAtLeast(kMaxLookaheadForBoyerMoore,
+ kRecursionBudget,
+ not_at_start));
if (eats_at_least >= 1) {
BoyerMooreLookahead* bm =
new(zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
- FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start);
- if (bm->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
- if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE;
+ FillInBMInfo(0, kRecursionBudget, bm, not_at_start);
+ if (bm->at(0)->is_non_word())
+ next_is_word_character = Trace::FALSE_VALUE;
+ if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE_VALUE;
}
} else {
- if (lookahead->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
- if (lookahead->at(0)->is_word()) next_is_word_character = Trace::TRUE;
+ if (lookahead->at(0)->is_non_word())
+ next_is_word_character = Trace::FALSE_VALUE;
+ if (lookahead->at(0)->is_word())
+ next_is_word_character = Trace::TRUE_VALUE;
}
- bool at_boundary = (type_ == AssertionNode::AT_BOUNDARY);
+ bool at_boundary = (assertion_type_ == AssertionNode::AT_BOUNDARY);
if (next_is_word_character == Trace::UNKNOWN) {
Label before_non_word;
Label before_word;
@@ -3139,10 +3104,10 @@ void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
assembler->Bind(&before_word);
BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
assembler->Bind(&ok);
- } else if (next_is_word_character == Trace::TRUE) {
+ } else if (next_is_word_character == Trace::TRUE_VALUE) {
BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
} else {
- ASSERT(next_is_word_character == Trace::FALSE);
+ ASSERT(next_is_word_character == Trace::FALSE_VALUE);
BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
}
}
@@ -3184,7 +3149,7 @@ void AssertionNode::GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int filled_in,
bool not_at_start) {
- if (type_ == AT_START && not_at_start) {
+ if (assertion_type_ == AT_START && not_at_start) {
details->set_cannot_match();
return;
}
@@ -3197,7 +3162,7 @@ void AssertionNode::GetQuickCheckDetails(QuickCheckDetails* details,
void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
RegExpMacroAssembler* assembler = compiler->macro_assembler();
- switch (type_) {
+ switch (assertion_type_) {
case AT_END: {
Label ok;
assembler->CheckPosition(trace->cp_offset(), &ok);
@@ -3206,7 +3171,7 @@ void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
break;
}
case AT_START: {
- if (trace->at_start() == Trace::FALSE) {
+ if (trace->at_start() == Trace::FALSE_VALUE) {
assembler->GoTo(trace->backtrack());
return;
}
@@ -3281,25 +3246,25 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
Trace* trace,
bool first_element_checked,
int* checked_up_to) {
- Isolate* isolate = Isolate::Current();
RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ Isolate* isolate = assembler->zone()->isolate();
bool ascii = compiler->ascii();
Label* backtrack = trace->backtrack();
QuickCheckDetails* quick_check = trace->quick_check_performed();
int element_count = elms_->length();
for (int i = preloaded ? 0 : element_count - 1; i >= 0; i--) {
TextElement elm = elms_->at(i);
- int cp_offset = trace->cp_offset() + elm.cp_offset;
- if (elm.type == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.data.u_atom->data();
+ int cp_offset = trace->cp_offset() + elm.cp_offset();
+ if (elm.text_type() == TextElement::ATOM) {
+ Vector<const uc16> quarks = elm.atom()->data();
for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
if (first_element_checked && i == 0 && j == 0) continue;
- if (DeterminedAlready(quick_check, elm.cp_offset + j)) continue;
+ if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
EmitCharacterFunction* emit_function = NULL;
switch (pass) {
case NON_ASCII_MATCH:
ASSERT(ascii);
- if (quarks[j] > String::kMaxAsciiCharCode) {
+ if (quarks[j] > String::kMaxOneByteCharCode) {
assembler->GoTo(backtrack);
return;
}
@@ -3328,11 +3293,11 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
}
}
} else {
- ASSERT_EQ(elm.type, TextElement::CHAR_CLASS);
+ ASSERT_EQ(TextElement::CHAR_CLASS, elm.text_type());
if (pass == CHARACTER_CLASS_MATCH) {
if (first_element_checked && i == 0) continue;
- if (DeterminedAlready(quick_check, elm.cp_offset)) continue;
- RegExpCharacterClass* cc = elm.data.u_char_class;
+ if (DeterminedAlready(quick_check, elm.cp_offset())) continue;
+ RegExpCharacterClass* cc = elm.char_class();
EmitCharClass(assembler,
cc,
ascii,
@@ -3350,12 +3315,8 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
int TextNode::Length() {
TextElement elm = elms_->last();
- ASSERT(elm.cp_offset >= 0);
- if (elm.type == TextElement::ATOM) {
- return elm.cp_offset + elm.data.u_atom->data().length();
- } else {
- return elm.cp_offset + 1;
- }
+ ASSERT(elm.cp_offset() >= 0);
+ return elm.cp_offset() + elm.length();
}
@@ -3457,8 +3418,8 @@ void TextNode::MakeCaseIndependent(bool is_ascii) {
int element_count = elms_->length();
for (int i = 0; i < element_count; i++) {
TextElement elm = elms_->at(i);
- if (elm.type == TextElement::CHAR_CLASS) {
- RegExpCharacterClass* cc = elm.data.u_char_class;
+ if (elm.text_type() == TextElement::CHAR_CLASS) {
+ RegExpCharacterClass* cc = elm.char_class();
// None of the standard character classes is different in the case
// independent case and it slows us down if we don't know that.
if (cc->is_standard(zone())) continue;
@@ -3474,11 +3435,7 @@ void TextNode::MakeCaseIndependent(bool is_ascii) {
int TextNode::GreedyLoopTextLength() {
TextElement elm = elms_->at(elms_->length() - 1);
- if (elm.type == TextElement::CHAR_CLASS) {
- return elm.cp_offset + 1;
- } else {
- return elm.cp_offset + elm.data.u_atom->data().length();
- }
+ return elm.cp_offset() + elm.length();
}
@@ -3486,8 +3443,8 @@ RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler) {
if (elms_->length() != 1) return NULL;
TextElement elm = elms_->at(0);
- if (elm.type != TextElement::CHAR_CLASS) return NULL;
- RegExpCharacterClass* node = elm.data.u_char_class;
+ if (elm.text_type() != TextElement::CHAR_CLASS) return NULL;
+ RegExpCharacterClass* node = elm.char_class();
ZoneList<CharacterRange>* ranges = node->ranges(zone());
if (!CharacterRange::IsCanonical(ranges)) {
CharacterRange::Canonicalize(ranges);
@@ -3498,7 +3455,7 @@ RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
if (ranges->length() != 1) return NULL;
uint32_t max_char;
if (compiler->ascii()) {
- max_char = String::kMaxAsciiCharCode;
+ max_char = String::kMaxOneByteCharCode;
} else {
max_char = String::kMaxUtf16CodeUnit;
}
@@ -3698,7 +3655,7 @@ BoyerMooreLookahead::BoyerMooreLookahead(
: length_(length),
compiler_(compiler) {
if (compiler->ascii()) {
- max_char_ = String::kMaxAsciiCharCode;
+ max_char_ = String::kMaxOneByteCharCode;
} else {
max_char_ = String::kMaxUtf16CodeUnit;
}
@@ -3863,8 +3820,8 @@ bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
return true;
}
- Handle<ByteArray> boolean_skip_table =
- FACTORY->NewByteArray(kSize, TENURED);
+ Factory* factory = masm->zone()->isolate()->factory();
+ Handle<ByteArray> boolean_skip_table = factory->NewByteArray(kSize, TENURED);
int skip_distance = GetSkipTable(
min_lookahead, max_lookahead, boolean_skip_table);
ASSERT(skip_distance != 0);
@@ -4023,7 +3980,7 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
int first_normal_choice = greedy_loop ? 1 : 0;
- bool not_at_start = current_trace->at_start() == Trace::FALSE;
+ bool not_at_start = current_trace->at_start() == Trace::FALSE_VALUE;
const int kEatsAtLeastNotYetInitialized = -1;
int eats_at_least = kEatsAtLeastNotYetInitialized;
@@ -4045,16 +4002,17 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
ASSERT(trace->is_trivial()); // This is the case on LoopChoiceNodes.
BoyerMooreLookahead* lookahead = bm_info(not_at_start);
if (lookahead == NULL) {
- eats_at_least =
- Min(kMaxLookaheadForBoyerMoore,
- EatsAtLeast(kMaxLookaheadForBoyerMoore, 0, not_at_start));
+ eats_at_least = Min(kMaxLookaheadForBoyerMoore,
+ EatsAtLeast(kMaxLookaheadForBoyerMoore,
+ kRecursionBudget,
+ not_at_start));
if (eats_at_least >= 1) {
BoyerMooreLookahead* bm =
new(zone()) BoyerMooreLookahead(eats_at_least,
compiler,
zone());
GuardedAlternative alt0 = alternatives_->at(0);
- alt0.node()->FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start);
+ alt0.node()->FillInBMInfo(0, kRecursionBudget, bm, not_at_start);
skip_was_emitted = bm->EmitSkipInstructions(macro_assembler);
}
} else {
@@ -4066,7 +4024,8 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
if (eats_at_least == kEatsAtLeastNotYetInitialized) {
// Save some time by looking at most one machine word ahead.
- eats_at_least = EatsAtLeast(compiler->ascii() ? 4 : 2, 0, not_at_start);
+ eats_at_least =
+ EatsAtLeast(compiler->ascii() ? 4 : 2, kRecursionBudget, not_at_start);
}
int preload_characters = CalculatePreloadCharacters(compiler, eats_at_least);
@@ -4092,7 +4051,7 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
new_trace.set_bound_checked_up_to(preload_characters);
}
new_trace.quick_check_performed()->Clear();
- if (not_at_start_) new_trace.set_at_start(Trace::FALSE);
+ if (not_at_start_) new_trace.set_at_start(Trace::FALSE_VALUE);
alt_gen->expects_preload = preload_is_current;
bool generate_full_check_inline = false;
if (FLAG_regexp_optimization &&
@@ -4192,7 +4151,7 @@ void ChoiceNode::EmitOutOfLineContinuation(RegExpCompiler* compiler,
Trace out_of_line_trace(*trace);
out_of_line_trace.set_characters_preloaded(preload_characters);
out_of_line_trace.set_quick_check_performed(&alt_gen->quick_check_details);
- if (not_at_start_) out_of_line_trace.set_at_start(Trace::FALSE);
+ if (not_at_start_) out_of_line_trace.set_at_start(Trace::FALSE_VALUE);
ZoneList<Guard*>* guards = alternative.guards();
int guard_count = (guards == NULL) ? 0 : guards->length();
if (next_expects_preload) {
@@ -4229,7 +4188,7 @@ void ActionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
RecursionCheck rc(compiler);
- switch (type_) {
+ switch (action_type_) {
case STORE_POSITION: {
Trace::DeferredCapture
new_capture(data_.u_position_register.reg,
@@ -4559,13 +4518,13 @@ void DotPrinter::VisitText(TextNode* that) {
for (int i = 0; i < that->elements()->length(); i++) {
if (i > 0) stream()->Add(" ");
TextElement elm = that->elements()->at(i);
- switch (elm.type) {
+ switch (elm.text_type()) {
case TextElement::ATOM: {
- stream()->Add("'%w'", elm.data.u_atom->data());
+ stream()->Add("'%w'", elm.atom()->data());
break;
}
case TextElement::CHAR_CLASS: {
- RegExpCharacterClass* node = elm.data.u_char_class;
+ RegExpCharacterClass* node = elm.char_class();
stream()->Add("[");
if (node->is_negated())
stream()->Add("^");
@@ -4606,7 +4565,7 @@ void DotPrinter::VisitEnd(EndNode* that) {
void DotPrinter::VisitAssertion(AssertionNode* that) {
stream()->Add(" n%p [", that);
- switch (that->type()) {
+ switch (that->assertion_type()) {
case AssertionNode::AT_END:
stream()->Add("label=\"$\", shape=septagon");
break;
@@ -4633,7 +4592,7 @@ void DotPrinter::VisitAssertion(AssertionNode* that) {
void DotPrinter::VisitAction(ActionNode* that) {
stream()->Add(" n%p [", that);
- switch (that->type_) {
+ switch (that->action_type_) {
case ActionNode::SET_REGISTER:
stream()->Add("label=\"$%i:=%i\", shape=octagon",
that->data_.u_store_register.reg,
@@ -5046,7 +5005,7 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
NodeInfo info;
Zone* zone = compiler->zone();
- switch (type()) {
+ switch (assertion_type()) {
case START_OF_LINE:
return AssertionNode::AfterNewline(on_success);
case START_OF_INPUT:
@@ -5333,12 +5292,12 @@ void CharacterRange::Split(ZoneList<CharacterRange>* base,
void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
bool is_ascii,
Zone* zone) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = zone->isolate();
uc16 bottom = from();
uc16 top = to();
- if (is_ascii) {
- if (bottom > String::kMaxAsciiCharCode) return;
- if (top > String::kMaxAsciiCharCode) top = String::kMaxAsciiCharCode;
+ if (is_ascii && !RangeContainsLatin1Equivalents(*this)) {
+ if (bottom > String::kMaxOneByteCharCode) return;
+ if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
}
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
if (top == bottom) {
@@ -5721,7 +5680,7 @@ OutSet* DispatchTable::Get(uc16 value) {
void Analysis::EnsureAnalyzed(RegExpNode* that) {
- StackLimitCheck check(Isolate::Current());
+ StackLimitCheck check(that->zone()->isolate());
if (check.HasOverflowed()) {
fail("Stack overflow");
return;
@@ -5747,12 +5706,8 @@ void TextNode::CalculateOffsets() {
int cp_offset = 0;
for (int i = 0; i < element_count; i++) {
TextElement& elm = elements()->at(i);
- elm.cp_offset = cp_offset;
- if (elm.type == TextElement::ATOM) {
- cp_offset += elm.data.u_atom->data().length();
- } else {
- cp_offset++;
- }
+ elm.set_cp_offset(cp_offset);
+ cp_offset += elm.length();
}
}
@@ -5822,7 +5777,6 @@ void Analysis::VisitAssertion(AssertionNode* that) {
void BackReferenceNode::FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
@@ -5838,7 +5792,6 @@ STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
void ChoiceNode::FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
@@ -5851,15 +5804,13 @@ void ChoiceNode::FillInBMInfo(int offset,
SaveBMInfo(bm, not_at_start, offset);
return;
}
- alt.node()->FillInBMInfo(
- offset, recursion_depth + 1, budget, bm, not_at_start);
+ alt.node()->FillInBMInfo(offset, budget, bm, not_at_start);
}
SaveBMInfo(bm, not_at_start, offset);
}
void TextNode::FillInBMInfo(int initial_offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
@@ -5872,8 +5823,8 @@ void TextNode::FillInBMInfo(int initial_offset,
return;
}
TextElement text = elements()->at(i);
- if (text.type == TextElement::ATOM) {
- RegExpAtom* atom = text.data.u_atom;
+ if (text.text_type() == TextElement::ATOM) {
+ RegExpAtom* atom = text.atom();
for (int j = 0; j < atom->length(); j++, offset++) {
if (offset >= bm->length()) {
if (initial_offset == 0) set_bm_info(not_at_start, bm);
@@ -5883,9 +5834,9 @@ void TextNode::FillInBMInfo(int initial_offset,
if (bm->compiler()->ignore_case()) {
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
int length = GetCaseIndependentLetters(
- ISOLATE,
+ Isolate::Current(),
character,
- bm->max_char() == String::kMaxAsciiCharCode,
+ bm->max_char() == String::kMaxOneByteCharCode,
chars);
for (int j = 0; j < length; j++) {
bm->Set(offset, chars[j]);
@@ -5895,8 +5846,8 @@ void TextNode::FillInBMInfo(int initial_offset,
}
}
} else {
- ASSERT(text.type == TextElement::CHAR_CLASS);
- RegExpCharacterClass* char_class = text.data.u_char_class;
+ ASSERT_EQ(TextElement::CHAR_CLASS, text.text_type());
+ RegExpCharacterClass* char_class = text.char_class();
ZoneList<CharacterRange>* ranges = char_class->ranges(zone());
if (char_class->is_negated()) {
bm->SetAll(offset);
@@ -5916,7 +5867,6 @@ void TextNode::FillInBMInfo(int initial_offset,
return;
}
on_success()->FillInBMInfo(offset,
- recursion_depth + 1,
budget - 1,
bm,
true); // Not at start after a text node.
@@ -6009,14 +5959,14 @@ void DispatchTableConstructor::AddInverse(ZoneList<CharacterRange>* ranges) {
void DispatchTableConstructor::VisitText(TextNode* that) {
TextElement elm = that->elements()->at(0);
- switch (elm.type) {
+ switch (elm.text_type()) {
case TextElement::ATOM: {
- uc16 c = elm.data.u_atom->data()[0];
+ uc16 c = elm.atom()->data()[0];
AddRange(CharacterRange(c, c));
break;
}
case TextElement::CHAR_CLASS: {
- RegExpCharacterClass* tree = elm.data.u_char_class;
+ RegExpCharacterClass* tree = elm.char_class();
ZoneList<CharacterRange>* ranges = tree->ranges(that->zone());
if (tree->is_negated()) {
AddInverse(ranges);
@@ -6049,7 +5999,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
bool is_ascii,
Zone* zone) {
if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
- return IrregexpRegExpTooBig();
+ return IrregexpRegExpTooBig(zone->isolate());
}
RegExpCompiler compiler(data->capture_count, ignore_case, is_ascii, zone);
@@ -6099,10 +6049,12 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
}
}
if (is_ascii) {
- node = node->FilterASCII(RegExpCompiler::kMaxRecursion);
+ node = node->FilterASCII(RegExpCompiler::kMaxRecursion, ignore_case);
// Do it again to propagate the new nodes to places where they were not
// put because they had not been calculated yet.
- if (node != NULL) node = node->FilterASCII(RegExpCompiler::kMaxRecursion);
+ if (node != NULL) {
+ node = node->FilterASCII(RegExpCompiler::kMaxRecursion, ignore_case);
+ }
}
if (node == NULL) node = new(zone) EndNode(EndNode::BACKTRACK, zone);
@@ -6111,7 +6063,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
analysis.EnsureAnalyzed(node);
if (analysis.has_failed()) {
const char* error_message = analysis.error_message();
- return CompilationResult(error_message);
+ return CompilationResult(zone->isolate(), error_message);
}
// Create the correct assembler for the architecture.
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index 96825cef2..dfd415d5a 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -71,8 +71,7 @@ class RegExpImpl {
// Returns false if compilation fails.
static Handle<Object> Compile(Handle<JSRegExp> re,
Handle<String> pattern,
- Handle<String> flags,
- Zone* zone);
+ Handle<String> flags);
// See ECMA-262 section 15.10.6.2.
// This function calls the garbage collector if necessary.
@@ -153,17 +152,17 @@ class RegExpImpl {
bool is_global,
Isolate* isolate);
- ~GlobalCache();
+ INLINE(~GlobalCache());
// Fetch the next entry in the cache for global regexp match results.
// This does not set the last match info. Upon failure, NULL is returned.
// The cause can be checked with Result(). The previous
// result is still in available in memory when a failure happens.
- int32_t* FetchNext();
+ INLINE(int32_t* FetchNext());
- int32_t* LastSuccessfulMatch();
+ INLINE(int32_t* LastSuccessfulMatch());
- inline bool HasException() { return num_matches_ < 0; }
+ INLINE(bool HasException()) { return num_matches_ < 0; }
private:
int num_matches_;
@@ -427,20 +426,41 @@ FOR_EACH_REG_EXP_TREE_TYPE(FORWARD_DECLARE)
#undef FORWARD_DECLARE
-class TextElement {
+class TextElement V8_FINAL BASE_EMBEDDED {
public:
- enum Type {UNINITIALIZED, ATOM, CHAR_CLASS};
- TextElement() : type(UNINITIALIZED) { }
- explicit TextElement(Type t) : type(t), cp_offset(-1) { }
+ enum TextType {
+ ATOM,
+ CHAR_CLASS
+ };
+
static TextElement Atom(RegExpAtom* atom);
static TextElement CharClass(RegExpCharacterClass* char_class);
- int length();
- Type type;
- union {
- RegExpAtom* u_atom;
- RegExpCharacterClass* u_char_class;
- } data;
- int cp_offset;
+
+ int cp_offset() const { return cp_offset_; }
+ void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
+ int length() const;
+
+ TextType text_type() const { return text_type_; }
+
+ RegExpTree* tree() const { return tree_; }
+
+ RegExpAtom* atom() const {
+ ASSERT(text_type() == ATOM);
+ return reinterpret_cast<RegExpAtom*>(tree());
+ }
+
+ RegExpCharacterClass* char_class() const {
+ ASSERT(text_type() == CHAR_CLASS);
+ return reinterpret_cast<RegExpCharacterClass*>(tree());
+ }
+
+ private:
+ TextElement(TextType text_type, RegExpTree* tree)
+ : cp_offset_(-1), text_type_(text_type), tree_(tree) {}
+
+ int cp_offset_;
+ TextType text_type_;
+ RegExpTree* tree_;
};
@@ -582,9 +602,7 @@ class RegExpNode: public ZoneObject {
// used to indicate that we know we are not at the start of the input. In
// this case anchored branches will always fail and can be ignored when
// determining how many characters are consumed on success.
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start) = 0;
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start) = 0;
// Emits some quick code that checks whether the preloaded characters match.
// Falls through on certain failure, jumps to the label on possible success.
// If the node cannot make a quick check it does nothing and returns false.
@@ -616,9 +634,8 @@ class RegExpNode: public ZoneObject {
// implementation. TODO(erikcorry): This should share more code with
// EatsAtLeast, GetQuickCheckDetails. The budget argument is used to limit
// the number of nodes we are willing to look at in order to create this data.
- static const int kFillInBMBudget = 200;
+ static const int kRecursionBudget = 200;
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
@@ -628,7 +645,7 @@ class RegExpNode: public ZoneObject {
// If we know that the input is ASCII then there are some nodes that can
// never match. This method returns a node that can be substituted for
// itself, or NULL if the node can never match.
- virtual RegExpNode* FilterASCII(int depth) { return this; }
+ virtual RegExpNode* FilterASCII(int depth, bool ignore_case) { return this; }
// Helper for FilterASCII.
RegExpNode* replacement() {
ASSERT(info()->replacement_calculated);
@@ -723,19 +740,17 @@ class SeqRegExpNode: public RegExpNode {
: RegExpNode(on_success->zone()), on_success_(on_success) { }
RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; }
- virtual RegExpNode* FilterASCII(int depth);
+ virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
- on_success_->FillInBMInfo(
- offset, recursion_depth + 1, budget - 1, bm, not_at_start);
+ on_success_->FillInBMInfo(offset, budget - 1, bm, not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm);
}
protected:
- RegExpNode* FilterSuccessor(int depth);
+ RegExpNode* FilterSuccessor(int depth, bool ignore_case);
private:
RegExpNode* on_success_;
@@ -744,7 +759,7 @@ class SeqRegExpNode: public RegExpNode {
class ActionNode: public SeqRegExpNode {
public:
- enum Type {
+ enum ActionType {
SET_REGISTER,
INCREMENT_REGISTER,
STORE_POSITION,
@@ -773,9 +788,7 @@ class ActionNode: public SeqRegExpNode {
RegExpNode* on_success);
virtual void Accept(NodeVisitor* visitor);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
virtual void GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int filled_in,
@@ -784,11 +797,10 @@ class ActionNode: public SeqRegExpNode {
details, compiler, filled_in, not_at_start);
}
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
- Type type() { return type_; }
+ ActionType action_type() { return action_type_; }
// TODO(erikcorry): We should allow some action nodes in greedy loops.
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
@@ -821,10 +833,10 @@ class ActionNode: public SeqRegExpNode {
int range_to;
} u_clear_captures;
} data_;
- ActionNode(Type type, RegExpNode* on_success)
+ ActionNode(ActionType action_type, RegExpNode* on_success)
: SeqRegExpNode(on_success),
- type_(type) { }
- Type type_;
+ action_type_(action_type) { }
+ ActionType action_type_;
friend class DotPrinter;
};
@@ -843,9 +855,7 @@ class TextNode: public SeqRegExpNode {
}
virtual void Accept(NodeVisitor* visitor);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
virtual void GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int characters_filled_in,
@@ -856,12 +866,11 @@ class TextNode: public SeqRegExpNode {
virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler);
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
void CalculateOffsets();
- virtual RegExpNode* FilterASCII(int depth);
+ virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
private:
enum TextEmitPassType {
@@ -887,7 +896,7 @@ class TextNode: public SeqRegExpNode {
class AssertionNode: public SeqRegExpNode {
public:
- enum AssertionNodeType {
+ enum AssertionType {
AT_END,
AT_START,
AT_BOUNDARY,
@@ -911,20 +920,16 @@ class AssertionNode: public SeqRegExpNode {
}
virtual void Accept(NodeVisitor* visitor);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
virtual void GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int filled_in,
bool not_at_start);
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
- AssertionNodeType type() { return type_; }
- void set_type(AssertionNodeType type) { type_ = type; }
+ AssertionType assertion_type() { return assertion_type_; }
private:
void EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace);
@@ -932,9 +937,9 @@ class AssertionNode: public SeqRegExpNode {
void BacktrackIfPrevious(RegExpCompiler* compiler,
Trace* trace,
IfPrevious backtrack_if_previous);
- AssertionNode(AssertionNodeType t, RegExpNode* on_success)
- : SeqRegExpNode(on_success), type_(t) { }
- AssertionNodeType type_;
+ AssertionNode(AssertionType t, RegExpNode* on_success)
+ : SeqRegExpNode(on_success), assertion_type_(t) { }
+ AssertionType assertion_type_;
};
@@ -960,7 +965,6 @@ class BackReferenceNode: public SeqRegExpNode {
return;
}
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
@@ -989,7 +993,6 @@ class EndNode: public RegExpNode {
UNREACHABLE();
}
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
@@ -1075,11 +1078,9 @@ class ChoiceNode: public RegExpNode {
ZoneList<GuardedAlternative>* alternatives() { return alternatives_; }
DispatchTable* GetTable(bool ignore_case);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
int EatsAtLeastHelper(int still_to_find,
- int recursion_depth,
+ int budget,
RegExpNode* ignore_this_node,
bool not_at_start);
virtual void GetQuickCheckDetails(QuickCheckDetails* details,
@@ -1087,7 +1088,6 @@ class ChoiceNode: public RegExpNode {
int characters_filled_in,
bool not_at_start);
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
@@ -1097,7 +1097,7 @@ class ChoiceNode: public RegExpNode {
void set_not_at_start() { not_at_start_ = true; }
void set_being_calculated(bool b) { being_calculated_ = b; }
virtual bool try_to_emit_quick_check_for_alternative(int i) { return true; }
- virtual RegExpNode* FilterASCII(int depth);
+ virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
protected:
int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative);
@@ -1133,20 +1133,17 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
AddAlternative(this_must_fail);
AddAlternative(then_do_this);
}
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
virtual void GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
alternatives_->at(1).node()->FillInBMInfo(
- offset, recursion_depth + 1, budget - 1, bm, not_at_start);
+ offset, budget - 1, bm, not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm);
}
// For a negative lookahead we don't emit the quick check for the
@@ -1155,7 +1152,7 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
// characters, but on a negative lookahead the negative branch did not take
// part in that calculation (EatsAtLeast) so the assumptions don't hold.
virtual bool try_to_emit_quick_check_for_alternative(int i) { return i != 0; }
- virtual RegExpNode* FilterASCII(int depth);
+ virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
};
@@ -1169,15 +1166,12 @@ class LoopChoiceNode: public ChoiceNode {
void AddLoopAlternative(GuardedAlternative alt);
void AddContinueAlternative(GuardedAlternative alt);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
virtual void GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
@@ -1185,7 +1179,7 @@ class LoopChoiceNode: public ChoiceNode {
RegExpNode* continue_node() { return continue_node_; }
bool body_can_be_zero_length() { return body_can_be_zero_length_; }
virtual void Accept(NodeVisitor* visitor);
- virtual RegExpNode* FilterASCII(int depth);
+ virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
private:
// AddAlternative is made private for loop nodes because alternatives
@@ -1357,19 +1351,19 @@ class Trace {
// A value for a property that is either known to be true, know to be false,
// or not known.
enum TriBool {
- UNKNOWN = -1, FALSE = 0, TRUE = 1
+ UNKNOWN = -1, FALSE_VALUE = 0, TRUE_VALUE = 1
};
class DeferredAction {
public:
- DeferredAction(ActionNode::Type type, int reg)
- : type_(type), reg_(reg), next_(NULL) { }
+ DeferredAction(ActionNode::ActionType action_type, int reg)
+ : action_type_(action_type), reg_(reg), next_(NULL) { }
DeferredAction* next() { return next_; }
bool Mentions(int reg);
int reg() { return reg_; }
- ActionNode::Type type() { return type_; }
+ ActionNode::ActionType action_type() { return action_type_; }
private:
- ActionNode::Type type_;
+ ActionNode::ActionType action_type_;
int reg_;
DeferredAction* next_;
friend class Trace;
@@ -1453,7 +1447,9 @@ class Trace {
at_start_ == UNKNOWN;
}
TriBool at_start() { return at_start_; }
- void set_at_start(bool at_start) { at_start_ = at_start ? TRUE : FALSE; }
+ void set_at_start(bool at_start) {
+ at_start_ = at_start ? TRUE_VALUE : FALSE_VALUE;
+ }
Label* backtrack() { return backtrack_; }
Label* loop_label() { return loop_label_; }
RegExpNode* stop_node() { return stop_node_; }
@@ -1619,9 +1615,9 @@ struct RegExpCompileData {
class RegExpEngine: public AllStatic {
public:
struct CompilationResult {
- explicit CompilationResult(const char* error_message)
+ CompilationResult(Isolate* isolate, const char* error_message)
: error_message(error_message),
- code(HEAP->the_hole_value()),
+ code(isolate->heap()->the_hole_value()),
num_registers(0) {}
CompilationResult(Object* code, int registers)
: error_message(NULL),
diff --git a/deps/v8/src/lazy-instance.h b/deps/v8/src/lazy-instance.h
index 9d68b8cac..fc03f4d12 100644
--- a/deps/v8/src/lazy-instance.h
+++ b/deps/v8/src/lazy-instance.h
@@ -91,12 +91,13 @@
#ifndef V8_LAZY_INSTANCE_H_
#define V8_LAZY_INSTANCE_H_
+#include "checks.h"
#include "once.h"
namespace v8 {
namespace internal {
-#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, {} }
+#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, { {} } }
#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 }
// Default to static mode.
@@ -111,17 +112,15 @@ struct LeakyInstanceTrait {
// Traits that define how an instance is allocated and accessed.
-// TODO(kalmard): __alignof__ is only defined for GCC > 4.2. Fix alignment issue
-// on MIPS with other compilers.
-#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
-#define LAZY_ALIGN(x) __attribute__((aligned(__alignof__(x))))
-#else
-#define LAZY_ALIGN(x)
-#endif
template <typename T>
struct StaticallyAllocatedInstanceTrait {
- typedef char StorageType[sizeof(T)] LAZY_ALIGN(T);
+ // 16-byte alignment fallback to be on the safe side here.
+ struct V8_ALIGNAS(T, 16) StorageType {
+ char x[sizeof(T)];
+ };
+
+ STATIC_ASSERT(V8_ALIGNOF(StorageType) >= V8_ALIGNOF(T));
static T* MutableInstance(StorageType* storage) {
return reinterpret_cast<T*>(storage);
@@ -133,8 +132,6 @@ struct StaticallyAllocatedInstanceTrait {
}
};
-#undef LAZY_ALIGN
-
template <typename T>
struct DynamicallyAllocatedInstanceTrait {
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index 60a033df0..143c830ee 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -29,6 +29,7 @@
#define V8_LIST_INL_H_
#include "list.h"
+#include "platform.h"
namespace v8 {
namespace internal {
@@ -85,8 +86,9 @@ void List<T, P>::ResizeAddInternal(const T& element, P alloc) {
template<typename T, class P>
void List<T, P>::Resize(int new_capacity, P alloc) {
+ ASSERT_LE(length_, new_capacity);
T* new_data = NewData(new_capacity, alloc);
- memcpy(new_data, data_, capacity_ * sizeof(T));
+ OS::MemCopy(new_data, data_, length_ * sizeof(T));
List<T, P>::DeleteData(data_);
data_ = new_data;
capacity_ = new_capacity;
@@ -102,6 +104,13 @@ Vector<T> List<T, P>::AddBlock(T value, int count, P alloc) {
template<typename T, class P>
+void List<T, P>::Set(int index, const T& elm) {
+ ASSERT(index >= 0 && index <= length_);
+ data_[index] = elm;
+}
+
+
+template<typename T, class P>
void List<T, P>::InsertAt(int index, const T& elm, P alloc) {
ASSERT(index >= 0 && index <= length_);
Add(elm, alloc);
@@ -162,6 +171,14 @@ void List<T, P>::Rewind(int pos) {
template<typename T, class P>
+void List<T, P>::Trim(P alloc) {
+ if (length_ < capacity_ / 4) {
+ Resize(capacity_ / 2, alloc);
+ }
+}
+
+
+template<typename T, class P>
void List<T, P>::Iterate(void (*callback)(T* x)) {
for (int i = 0; i < length_; i++) callback(&data_[i]);
}
@@ -206,7 +223,7 @@ void List<T, P>::Sort(int (*cmp)(const T* x, const T* y)) {
template<typename T, class P>
void List<T, P>::Sort() {
- Sort(PointerValueCompare<T>);
+ ToVector().Sort();
}
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index 7fd4f5cd2..41666deb2 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -84,7 +84,7 @@ class List {
// backing store (e.g. Add).
inline T& operator[](int i) const {
ASSERT(0 <= i);
- ASSERT(i < length_);
+ SLOW_ASSERT(i < length_);
return data_[i];
}
inline T& at(int i) const { return operator[](i); }
@@ -115,6 +115,9 @@ class List {
void InsertAt(int index, const T& element,
AllocationPolicy allocator = AllocationPolicy());
+ // Overwrites the element at the specific index.
+ void Set(int index, const T& element);
+
// Added 'count' elements with the value 'value' and returns a
// vector that allows access to the elements. The vector is valid
// until the next change is made to this list.
@@ -149,6 +152,9 @@ class List {
// Drop the last 'count' elements from the list.
INLINE(void RewindBy(int count)) { Rewind(length_ - count); }
+ // Halve the capacity if fill level is less than a quarter.
+ INLINE(void Trim(AllocationPolicy allocator = AllocationPolicy()));
+
bool Contains(const T& elm) const;
int CountOccurrences(const T& elm, int start, int end) const;
diff --git a/deps/v8/src/lithium-allocator-inl.h b/deps/v8/src/lithium-allocator-inl.h
index 8f660ce0e..deee98877 100644
--- a/deps/v8/src/lithium-allocator-inl.h
+++ b/deps/v8/src/lithium-allocator-inl.h
@@ -99,6 +99,7 @@ bool InputIterator::Done() { return current_ >= limit_; }
LOperand* InputIterator::Current() {
ASSERT(!Done());
+ ASSERT(instr_->InputAt(current_) != NULL);
return instr_->InputAt(current_);
}
@@ -110,7 +111,9 @@ void InputIterator::Advance() {
void InputIterator::SkipUninteresting() {
- while (current_ < limit_ && instr_->InputAt(current_)->IsConstantOperand()) {
+ while (current_ < limit_) {
+ LOperand* current = instr_->InputAt(current_);
+ if (current != NULL && !current->IsConstantOperand()) break;
++current_;
}
}
@@ -127,9 +130,11 @@ bool UseIterator::Done() {
LOperand* UseIterator::Current() {
ASSERT(!Done());
- return input_iterator_.Done()
+ LOperand* result = input_iterator_.Done()
? env_iterator_.Current()
: input_iterator_.Current();
+ ASSERT(result != NULL);
+ return result;
}
@@ -139,6 +144,18 @@ void UseIterator::Advance() {
: input_iterator_.Advance();
}
+
+void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) {
+ if (range->Kind() == DOUBLE_REGISTERS) {
+ assigned_double_registers_->Add(reg);
+ } else {
+ ASSERT(range->Kind() == GENERAL_REGISTERS);
+ assigned_registers_->Add(reg);
+ }
+ range->set_assigned_register(reg, chunk()->zone());
+}
+
+
} } // namespace v8::internal
#endif // V8_LITHIUM_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 91a98112b..29c31942e 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -56,9 +56,11 @@ static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
}
-UsePosition::UsePosition(LifetimePosition pos, LOperand* operand)
+UsePosition::UsePosition(LifetimePosition pos,
+ LOperand* operand,
+ LOperand* hint)
: operand_(operand),
- hint_(NULL),
+ hint_(hint),
pos_(pos),
next_(NULL),
requires_reg_(false),
@@ -129,7 +131,7 @@ bool LiveRange::HasOverlap(UseInterval* target) const {
LiveRange::LiveRange(int id, Zone* zone)
: id_(id),
spilled_(false),
- is_double_(false),
+ kind_(UNALLOCATED_REGISTERS),
assigned_register_(kInvalidAssignment),
last_interval_(NULL),
first_interval_(NULL),
@@ -138,16 +140,14 @@ LiveRange::LiveRange(int id, Zone* zone)
next_(NULL),
current_interval_(NULL),
last_processed_use_(NULL),
+ current_hint_operand_(NULL),
spill_operand_(new(zone) LOperand()),
spill_start_index_(kMaxInt) { }
-void LiveRange::set_assigned_register(int reg,
- RegisterKind register_kind,
- Zone* zone) {
+void LiveRange::set_assigned_register(int reg, Zone* zone) {
ASSERT(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg;
- is_double_ = (register_kind == DOUBLE_REGISTERS);
ConvertOperands(zone);
}
@@ -196,6 +196,18 @@ UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
}
+UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
+ LifetimePosition start) {
+ UsePosition* pos = first_pos();
+ UsePosition* prev = NULL;
+ while (pos != NULL && pos->pos().Value() < start.Value()) {
+ if (pos->RegisterIsBeneficial()) prev = pos;
+ pos = pos->next();
+ }
+ return prev;
+}
+
+
UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
UsePosition* pos = NextUsePosition(start);
while (pos != NULL && !pos->RequiresRegister()) {
@@ -206,9 +218,6 @@ UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
bool LiveRange::CanBeSpilled(LifetimePosition pos) {
- // TODO(kmillikin): Comment. Now.
- if (pos.Value() <= Start().Value() && HasRegisterAssigned()) return false;
-
// We cannot spill a live range that has a use requiring a register
// at the current or the immediate next position.
UsePosition* use_pos = NextRegisterPosition(pos);
@@ -218,21 +227,19 @@ bool LiveRange::CanBeSpilled(LifetimePosition pos) {
}
-UsePosition* LiveRange::FirstPosWithHint() const {
- UsePosition* pos = first_pos_;
- while (pos != NULL && !pos->HasHint()) pos = pos->next();
- return pos;
-}
-
-
LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
LOperand* op = NULL;
if (HasRegisterAssigned()) {
ASSERT(!IsSpilled());
- if (IsDouble()) {
- op = LDoubleRegister::Create(assigned_register(), zone);
- } else {
- op = LRegister::Create(assigned_register(), zone);
+ switch (Kind()) {
+ case GENERAL_REGISTERS:
+ op = LRegister::Create(assigned_register(), zone);
+ break;
+ case DOUBLE_REGISTERS:
+ op = LDoubleRegister::Create(assigned_register(), zone);
+ break;
+ default:
+ UNREACHABLE();
}
} else if (IsSpilled()) {
ASSERT(!HasRegisterAssigned());
@@ -347,6 +354,7 @@ void LiveRange::SplitAt(LifetimePosition position,
// Link the new live range in the chain before any of the other
// ranges linked from the range before the split.
result->parent_ = (parent_ == NULL) ? this : parent_;
+ result->kind_ = result->parent_->kind_;
result->next_ = next_;
next_ = result;
@@ -366,7 +374,7 @@ bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
LifetimePosition start = Start();
LifetimePosition other_start = other->Start();
if (start.Value() == other_start.Value()) {
- UsePosition* pos = FirstPosWithHint();
+ UsePosition* pos = first_pos();
if (pos == NULL) return false;
UsePosition* other_pos = other->first_pos();
if (other_pos == NULL) return true;
@@ -440,16 +448,19 @@ void LiveRange::AddUseInterval(LifetimePosition start,
}
-UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
- LOperand* operand,
- Zone* zone) {
+void LiveRange::AddUsePosition(LifetimePosition pos,
+ LOperand* operand,
+ LOperand* hint,
+ Zone* zone) {
LAllocator::TraceAlloc("Add to live range %d use position %d\n",
id_,
pos.Value());
- UsePosition* use_pos = new(zone) UsePosition(pos, operand);
+ UsePosition* use_pos = new(zone) UsePosition(pos, operand, hint);
+ UsePosition* prev_hint = NULL;
UsePosition* prev = NULL;
UsePosition* current = first_pos_;
while (current != NULL && current->pos().Value() < pos.Value()) {
+ prev_hint = current->HasHint() ? current : prev_hint;
prev = current;
current = current->next();
}
@@ -462,7 +473,9 @@ UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
prev->next_ = use_pos;
}
- return use_pos;
+ if (prev_hint == NULL && use_pos->HasHint()) {
+ current_hint_operand_ = hint;
+ }
}
@@ -531,19 +544,19 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
LAllocator::LAllocator(int num_values, HGraph* graph)
- : zone_(graph->zone()),
+ : zone_(graph->isolate()),
chunk_(NULL),
- live_in_sets_(graph->blocks()->length(), zone_),
- live_ranges_(num_values * 2, zone_),
+ live_in_sets_(graph->blocks()->length(), zone()),
+ live_ranges_(num_values * 2, zone()),
fixed_live_ranges_(NULL),
fixed_double_live_ranges_(NULL),
- unhandled_live_ranges_(num_values * 2, zone_),
- active_live_ranges_(8, zone_),
- inactive_live_ranges_(8, zone_),
- reusable_slots_(8, zone_),
+ unhandled_live_ranges_(num_values * 2, zone()),
+ active_live_ranges_(8, zone()),
+ inactive_live_ranges_(8, zone()),
+ reusable_slots_(8, zone()),
next_virtual_register_(num_values),
first_artificial_register_(num_values),
- mode_(GENERAL_REGISTERS),
+ mode_(UNALLOCATED_REGISTERS),
num_registers_(-1),
graph_(graph),
has_osr_entry_(false),
@@ -561,7 +574,7 @@ void LAllocator::InitializeLivenessAnalysis() {
BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
// Compute live out for the given block, except not including backward
// successor edges.
- BitVector* live_out = new(zone_) BitVector(next_virtual_register_, zone_);
+ BitVector* live_out = new(zone()) BitVector(next_virtual_register_, zone());
// Process all successor blocks.
for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
@@ -599,14 +612,14 @@ void LAllocator::AddInitialIntervals(HBasicBlock* block,
while (!iterator.Done()) {
int operand_index = iterator.Current();
LiveRange* range = LiveRangeFor(operand_index);
- range->AddUseInterval(start, end, zone_);
+ range->AddUseInterval(start, end, zone());
iterator.Advance();
}
}
int LAllocator::FixedDoubleLiveRangeID(int index) {
- return -index - 1 - Register::kNumAllocatableRegisters;
+ return -index - 1 - Register::kMaxNumAllocatableRegisters;
}
@@ -615,13 +628,13 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
bool is_tagged) {
TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
ASSERT(operand->HasFixedPolicy());
- if (operand->policy() == LUnallocated::FIXED_SLOT) {
- operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_index());
- } else if (operand->policy() == LUnallocated::FIXED_REGISTER) {
- int reg_index = operand->fixed_index();
+ if (operand->HasFixedSlotPolicy()) {
+ operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_slot_index());
+ } else if (operand->HasFixedRegisterPolicy()) {
+ int reg_index = operand->fixed_register_index();
operand->ConvertTo(LOperand::REGISTER, reg_index);
- } else if (operand->policy() == LUnallocated::FIXED_DOUBLE_REGISTER) {
- int reg_index = operand->fixed_index();
+ } else if (operand->HasFixedDoubleRegisterPolicy()) {
+ int reg_index = operand->fixed_register_index();
operand->ConvertTo(LOperand::DOUBLE_REGISTER, reg_index);
} else {
UNREACHABLE();
@@ -630,7 +643,7 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
TraceAlloc("Fixed reg is tagged at %d\n", pos);
LInstruction* instr = InstructionAt(pos);
if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(operand, zone());
+ instr->pointer_map()->RecordPointer(operand, chunk()->zone());
}
}
return operand;
@@ -638,12 +651,13 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
LiveRange* LAllocator::FixedLiveRangeFor(int index) {
- ASSERT(index < Register::kNumAllocatableRegisters);
+ ASSERT(index < Register::kMaxNumAllocatableRegisters);
LiveRange* result = fixed_live_ranges_[index];
if (result == NULL) {
- result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
+ result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone());
ASSERT(result->IsFixed());
- result->set_assigned_register(index, GENERAL_REGISTERS, zone_);
+ result->kind_ = GENERAL_REGISTERS;
+ SetLiveRangeAssignedRegister(result, index);
fixed_live_ranges_[index] = result;
}
return result;
@@ -651,12 +665,14 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
- ASSERT(index < DoubleRegister::kNumAllocatableRegisters);
+ ASSERT(index < DoubleRegister::NumAllocatableRegisters());
LiveRange* result = fixed_double_live_ranges_[index];
if (result == NULL) {
- result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
+ result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index),
+ chunk()->zone());
ASSERT(result->IsFixed());
- result->set_assigned_register(index, DOUBLE_REGISTERS, zone_);
+ result->kind_ = DOUBLE_REGISTERS;
+ SetLiveRangeAssignedRegister(result, index);
fixed_double_live_ranges_[index] = result;
}
return result;
@@ -669,7 +685,7 @@ LiveRange* LAllocator::LiveRangeFor(int index) {
}
LiveRange* result = live_ranges_[index];
if (result == NULL) {
- result = new(zone_) LiveRange(index, zone_);
+ result = new(zone()) LiveRange(index, chunk()->zone());
live_ranges_[index] = result;
}
return result;
@@ -715,15 +731,15 @@ void LAllocator::Define(LifetimePosition position,
if (range->IsEmpty() || range->Start().Value() > position.Value()) {
// Can happen if there is a definition without use.
- range->AddUseInterval(position, position.NextInstruction(), zone_);
- range->AddUsePosition(position.NextInstruction(), NULL, zone_);
+ range->AddUseInterval(position, position.NextInstruction(), zone());
+ range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone());
} else {
range->ShortenTo(position);
}
if (operand->IsUnallocated()) {
LUnallocated* unalloc_operand = LUnallocated::cast(operand);
- range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
+ range->AddUsePosition(position, unalloc_operand, hint, zone());
}
}
@@ -736,9 +752,9 @@ void LAllocator::Use(LifetimePosition block_start,
if (range == NULL) return;
if (operand->IsUnallocated()) {
LUnallocated* unalloc_operand = LUnallocated::cast(operand);
- range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
+ range->AddUsePosition(position, unalloc_operand, hint, zone());
}
- range->AddUseInterval(block_start, position, zone_);
+ range->AddUseInterval(block_start, position, zone());
}
@@ -746,7 +762,8 @@ void LAllocator::AddConstraintsGapMove(int index,
LOperand* from,
LOperand* to) {
LGap* gap = GapAt(index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
+ chunk()->zone());
if (from->IsUnallocated()) {
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) {
@@ -755,19 +772,20 @@ void LAllocator::AddConstraintsGapMove(int index,
if (cur_to->IsUnallocated()) {
if (LUnallocated::cast(cur_to)->virtual_register() ==
LUnallocated::cast(from)->virtual_register()) {
- move->AddMove(cur.source(), to, zone());
+ move->AddMove(cur.source(), to, chunk()->zone());
return;
}
}
}
}
- move->AddMove(from, to, zone());
+ move->AddMove(from, to, chunk()->zone());
}
void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
int start = block->first_instruction_index();
int end = block->last_instruction_index();
+ if (start == -1) return;
for (int i = start; i <= end; ++i) {
if (IsGapAt(i)) {
LInstruction* instr = NULL;
@@ -800,7 +818,8 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
LiveRange* range = LiveRangeFor(first_output->virtual_register());
bool assigned = false;
if (first_output->HasFixedPolicy()) {
- LUnallocated* output_copy = first_output->CopyUnconstrained(zone());
+ LUnallocated* output_copy = first_output->CopyUnconstrained(
+ chunk()->zone());
bool is_tagged = HasTaggedValue(first_output->virtual_register());
AllocateFixed(first_output, gap_index, is_tagged);
@@ -821,8 +840,10 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
// Thus it should be inserted to a lifetime position corresponding to
// the instruction end.
LGap* gap = GapAt(gap_index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE, zone());
- move->AddMove(first_output, range->GetSpillOperand(), zone());
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE,
+ chunk()->zone());
+ move->AddMove(first_output, range->GetSpillOperand(),
+ chunk()->zone());
}
}
@@ -831,24 +852,27 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
for (UseIterator it(second); !it.Done(); it.Advance()) {
LUnallocated* cur_input = LUnallocated::cast(it.Current());
if (cur_input->HasFixedPolicy()) {
- LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
+ LUnallocated* input_copy = cur_input->CopyUnconstrained(
+ chunk()->zone());
bool is_tagged = HasTaggedValue(cur_input->virtual_register());
AllocateFixed(cur_input, gap_index + 1, is_tagged);
AddConstraintsGapMove(gap_index, input_copy, cur_input);
- } else if (cur_input->policy() == LUnallocated::WRITABLE_REGISTER) {
+ } else if (cur_input->HasWritableRegisterPolicy()) {
// The live range of writable input registers always goes until the end
// of the instruction.
ASSERT(!cur_input->IsUsedAtStart());
- LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
- cur_input->set_virtual_register(GetVirtualRegister());
+ LUnallocated* input_copy = cur_input->CopyUnconstrained(
+ chunk()->zone());
+ int vreg = GetVirtualRegister();
if (!AllocationOk()) return;
+ cur_input->set_virtual_register(vreg);
if (RequiredRegisterKind(input_copy->virtual_register()) ==
DOUBLE_REGISTERS) {
double_artificial_registers_.Add(
cur_input->virtual_register() - first_artificial_register_,
- zone_);
+ zone());
}
AddConstraintsGapMove(gap_index, input_copy, cur_input);
@@ -864,7 +888,8 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
int output_vreg = second_output->virtual_register();
int input_vreg = cur_input->virtual_register();
- LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
+ LUnallocated* input_copy = cur_input->CopyUnconstrained(
+ chunk()->zone());
cur_input->set_virtual_register(second_output->virtual_register());
AddConstraintsGapMove(gap_index, input_copy, cur_input);
@@ -872,7 +897,7 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
int index = gap_index + 1;
LInstruction* instr = InstructionAt(index);
if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(input_copy, zone());
+ instr->pointer_map()->RecordPointer(input_copy, chunk()->zone());
}
} else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
// The input is assumed to immediately have a tagged representation,
@@ -901,7 +926,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
if (IsGapAt(index)) {
// We have a gap at this position.
LGap* gap = GapAt(index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
+ chunk()->zone());
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) {
LMoveOperands* cur = &move_operands->at(i);
@@ -913,7 +939,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
if (phi != NULL) {
// This is a phi resolving move.
if (!phi->block()->IsLoopHeader()) {
- hint = LiveRangeFor(phi->id())->FirstHint();
+ hint = LiveRangeFor(phi->id())->current_hint_operand();
}
} else {
if (to->IsUnallocated()) {
@@ -946,26 +972,26 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
Define(curr_position, output, NULL);
}
- if (instr->IsMarkedAsCall()) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ if (instr->ClobbersRegisters()) {
+ for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
if (output == NULL || !output->IsRegister() ||
output->index() != i) {
LiveRange* range = FixedLiveRangeFor(i);
range->AddUseInterval(curr_position,
curr_position.InstructionEnd(),
- zone_);
+ zone());
}
}
}
- if (instr->IsMarkedAsCall()) {
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ if (instr->ClobbersDoubleRegisters()) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
if (output == NULL || !output->IsDoubleRegister() ||
output->index() != i) {
LiveRange* range = FixedDoubleLiveRangeFor(i);
range->AddUseInterval(curr_position,
curr_position.InstructionEnd(),
- zone_);
+ zone());
}
}
}
@@ -989,7 +1015,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
for (TempIterator it(instr); !it.Done(); it.Advance()) {
LOperand* temp = it.Current();
- if (instr->IsMarkedAsCall()) {
+ if (instr->ClobbersTemps()) {
if (temp->IsRegister()) continue;
if (temp->IsUnallocated()) {
LUnallocated* temp_unalloc = LUnallocated::cast(temp);
@@ -1013,7 +1039,8 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
const ZoneList<HPhi*>* phis = block->phis();
for (int i = 0; i < phis->length(); ++i) {
HPhi* phi = phis->at(i);
- LUnallocated* phi_operand = new(zone_) LUnallocated(LUnallocated::NONE);
+ LUnallocated* phi_operand =
+ new(chunk()->zone()) LUnallocated(LUnallocated::NONE);
phi_operand->set_virtual_register(phi->id());
for (int j = 0; j < phi->OperandCount(); ++j) {
HValue* op = phi->OperandAt(j);
@@ -1023,7 +1050,8 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
operand = chunk_->DefineConstantOperand(constant);
} else {
ASSERT(!op->EmitAtUses());
- LUnallocated* unalloc = new(zone_) LUnallocated(LUnallocated::ANY);
+ LUnallocated* unalloc =
+ new(chunk()->zone()) LUnallocated(LUnallocated::ANY);
unalloc->set_virtual_register(op->id());
operand = unalloc;
}
@@ -1045,18 +1073,18 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
LInstruction* branch =
InstructionAt(cur_block->last_instruction_index());
if (branch->HasPointerMap()) {
- if (phi->representation().IsTagged()) {
- branch->pointer_map()->RecordPointer(phi_operand, zone());
+ if (phi->representation().IsTagged() && !phi->type().IsSmi()) {
+ branch->pointer_map()->RecordPointer(phi_operand, chunk()->zone());
} else if (!phi->representation().IsDouble()) {
- branch->pointer_map()->RecordUntagged(phi_operand, zone());
+ branch->pointer_map()->RecordUntagged(phi_operand, chunk()->zone());
}
}
}
LiveRange* live_range = LiveRangeFor(phi->id());
LLabel* label = chunk_->GetLabel(phi->block()->block_id());
- label->GetOrCreateParallelMove(LGap::START, zone())->
- AddMove(phi_operand, live_range->GetSpillOperand(), zone());
+ label->GetOrCreateParallelMove(LGap::START, chunk()->zone())->
+ AddMove(phi_operand, live_range->GetSpillOperand(), chunk()->zone());
live_range->SetSpillStartIndex(phi->block()->first_instruction_index());
}
}
@@ -1065,6 +1093,12 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
bool LAllocator::Allocate(LChunk* chunk) {
ASSERT(chunk_ == NULL);
chunk_ = static_cast<LPlatformChunk*>(chunk);
+ assigned_registers_ =
+ new(chunk->zone()) BitVector(Register::NumAllocatableRegisters(),
+ chunk->zone());
+ assigned_double_registers_ =
+ new(chunk->zone()) BitVector(DoubleRegister::NumAllocatableRegisters(),
+ chunk->zone());
MeetRegisterConstraints();
if (!AllocationOk()) return false;
ResolvePhis();
@@ -1074,7 +1108,6 @@ bool LAllocator::Allocate(LChunk* chunk) {
AllocateDoubleRegisters();
if (!AllocationOk()) return false;
PopulatePointerMaps();
- if (has_osr_entry_) ProcessOsrEntry();
ConnectRanges();
ResolveControlFlow();
return true;
@@ -1082,7 +1115,7 @@ bool LAllocator::Allocate(LChunk* chunk) {
void LAllocator::MeetRegisterConstraints() {
- HPhase phase("L_Register constraints", chunk_);
+ LAllocatorPhase phase("L_Register constraints", this);
first_artificial_register_ = next_virtual_register_;
const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int i = 0; i < blocks->length(); ++i) {
@@ -1094,7 +1127,7 @@ void LAllocator::MeetRegisterConstraints() {
void LAllocator::ResolvePhis() {
- HPhase phase("L_Resolve phis", chunk_);
+ LAllocatorPhase phase("L_Resolve phis", this);
// Process the blocks in reverse order.
const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
@@ -1130,8 +1163,8 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
if (cur_cover->IsSpilled()) return;
ASSERT(pred_cover != NULL && cur_cover != NULL);
if (pred_cover != cur_cover) {
- LOperand* pred_op = pred_cover->CreateAssignedOperand(zone_);
- LOperand* cur_op = cur_cover->CreateAssignedOperand(zone_);
+ LOperand* pred_op = pred_cover->CreateAssignedOperand(chunk()->zone());
+ LOperand* cur_op = cur_cover->CreateAssignedOperand(chunk()->zone());
if (!pred_op->Equals(cur_op)) {
LGap* gap = NULL;
if (block->predecessors()->length() == 1) {
@@ -1151,7 +1184,7 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
LInstruction* branch = InstructionAt(pred->last_instruction_index());
if (branch->HasPointerMap()) {
if (HasTaggedValue(range->id())) {
- branch->pointer_map()->RecordPointer(cur_op, zone());
+ branch->pointer_map()->RecordPointer(cur_op, chunk()->zone());
} else if (!cur_op->IsDoubleStackSlot() &&
!cur_op->IsDoubleRegister()) {
branch->pointer_map()->RemovePointer(cur_op);
@@ -1159,7 +1192,8 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
}
}
gap->GetOrCreateParallelMove(
- LGap::START, zone())->AddMove(pred_op, cur_op, zone());
+ LGap::START, chunk()->zone())->AddMove(pred_op, cur_op,
+ chunk()->zone());
}
}
}
@@ -1170,11 +1204,11 @@ LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
if (IsGapAt(index)) {
LGap* gap = GapAt(index);
return gap->GetOrCreateParallelMove(
- pos.IsInstructionStart() ? LGap::START : LGap::END, zone());
+ pos.IsInstructionStart() ? LGap::START : LGap::END, chunk()->zone());
}
int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
return GapAt(gap_pos)->GetOrCreateParallelMove(
- (gap_pos < index) ? LGap::AFTER : LGap::BEFORE, zone());
+ (gap_pos < index) ? LGap::AFTER : LGap::BEFORE, chunk()->zone());
}
@@ -1185,7 +1219,7 @@ HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
void LAllocator::ConnectRanges() {
- HPhase phase("L_Connect ranges", this);
+ LAllocatorPhase phase("L_Connect ranges", this);
for (int i = 0; i < live_ranges()->length(); ++i) {
LiveRange* first_range = live_ranges()->at(i);
if (first_range == NULL || first_range->parent() != NULL) continue;
@@ -1204,9 +1238,12 @@ void LAllocator::ConnectRanges() {
}
if (should_insert) {
LParallelMove* move = GetConnectingParallelMove(pos);
- LOperand* prev_operand = first_range->CreateAssignedOperand(zone_);
- LOperand* cur_operand = second_range->CreateAssignedOperand(zone_);
- move->AddMove(prev_operand, cur_operand, zone());
+ LOperand* prev_operand = first_range->CreateAssignedOperand(
+ chunk()->zone());
+ LOperand* cur_operand = second_range->CreateAssignedOperand(
+ chunk()->zone());
+ move->AddMove(prev_operand, cur_operand,
+ chunk()->zone());
}
}
}
@@ -1225,7 +1262,7 @@ bool LAllocator::CanEagerlyResolveControlFlow(HBasicBlock* block) const {
void LAllocator::ResolveControlFlow() {
- HPhase phase("L_Resolve control flow", this);
+ LAllocatorPhase phase("L_Resolve control flow", this);
const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int block_id = 1; block_id < blocks->length(); ++block_id) {
HBasicBlock* block = blocks->at(block_id);
@@ -1246,7 +1283,7 @@ void LAllocator::ResolveControlFlow() {
void LAllocator::BuildLiveRanges() {
- HPhase phase("L_Build live ranges", this);
+ LAllocatorPhase phase("L_Build live ranges", this);
InitializeLivenessAnalysis();
// Process the blocks in reverse order.
const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
@@ -1271,7 +1308,8 @@ void LAllocator::BuildLiveRanges() {
LOperand* hint = NULL;
LOperand* phi_operand = NULL;
LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
+ chunk()->zone());
for (int j = 0; j < move->move_operands()->length(); ++j) {
LOperand* to = move->move_operands()->at(j).destination();
if (to->IsUnallocated() &&
@@ -1308,7 +1346,7 @@ void LAllocator::BuildLiveRanges() {
while (!iterator.Done()) {
int operand_index = iterator.Current();
LiveRange* range = LiveRangeFor(operand_index);
- range->EnsureInterval(start, end, zone_);
+ range->EnsureInterval(start, end, zone());
iterator.Advance();
}
@@ -1324,8 +1362,15 @@ void LAllocator::BuildLiveRanges() {
while (!iterator.Done()) {
found = true;
int operand_index = iterator.Current();
- PrintF("Function: %s\n",
- *chunk_->info()->function()->debug_name()->ToCString());
+ if (chunk_->info()->IsStub()) {
+ CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey();
+ PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
+ } else {
+ ASSERT(chunk_->info()->IsOptimizing());
+ AllowHandleDereference allow_deref;
+ PrintF("Function: %s\n",
+ *chunk_->info()->function()->debug_name()->ToCString());
+ }
PrintF("Value %d used before first definition!\n", operand_index);
LiveRange* range = LiveRangeFor(operand_index);
PrintF("First use is at %d\n", range->first_pos()->pos().Value());
@@ -1335,6 +1380,12 @@ void LAllocator::BuildLiveRanges() {
}
#endif
}
+
+ for (int i = 0; i < live_ranges_.length(); ++i) {
+ if (live_ranges_[i] != NULL) {
+ live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id());
+ }
+ }
}
@@ -1351,7 +1402,7 @@ bool LAllocator::SafePointsAreInOrder() const {
void LAllocator::PopulatePointerMaps() {
- HPhase phase("L_Populate pointer maps", this);
+ LAllocatorPhase phase("L_Populate pointer maps", this);
const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
ASSERT(SafePointsAreInOrder());
@@ -1411,7 +1462,7 @@ void LAllocator::PopulatePointerMaps() {
LifetimePosition safe_point_pos =
LifetimePosition::FromInstructionIndex(safe_point);
LiveRange* cur = range;
- while (cur != NULL && !cur->Covers(safe_point_pos.PrevInstruction())) {
+ while (cur != NULL && !cur->Covers(safe_point_pos)) {
cur = cur->next();
}
if (cur == NULL) continue;
@@ -1422,47 +1473,16 @@ void LAllocator::PopulatePointerMaps() {
safe_point >= range->spill_start_index()) {
TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
range->id(), range->spill_start_index(), safe_point);
- map->RecordPointer(range->GetSpillOperand(), zone());
+ map->RecordPointer(range->GetSpillOperand(), chunk()->zone());
}
if (!cur->IsSpilled()) {
TraceAlloc("Pointer in register for range %d (start at %d) "
"at safe point %d\n",
cur->id(), cur->Start().Value(), safe_point);
- LOperand* operand = cur->CreateAssignedOperand(zone_);
+ LOperand* operand = cur->CreateAssignedOperand(chunk()->zone());
ASSERT(!operand->IsStackSlot());
- map->RecordPointer(operand, zone());
- }
- }
- }
-}
-
-
-void LAllocator::ProcessOsrEntry() {
- const ZoneList<LInstruction*>* instrs = chunk_->instructions();
-
- // Linear search for the OSR entry instruction in the chunk.
- int index = -1;
- while (++index < instrs->length() &&
- !instrs->at(index)->IsOsrEntry()) {
- }
- ASSERT(index < instrs->length());
- LOsrEntry* instruction = LOsrEntry::cast(instrs->at(index));
-
- LifetimePosition position = LifetimePosition::FromInstructionIndex(index);
- for (int i = 0; i < live_ranges()->length(); ++i) {
- LiveRange* range = live_ranges()->at(i);
- if (range != NULL) {
- if (range->Covers(position) &&
- range->HasRegisterAssigned() &&
- range->TopLevel()->HasAllocatedSpillOperand()) {
- int reg_index = range->assigned_register();
- LOperand* spill_operand = range->TopLevel()->GetSpillOperand();
- if (range->IsDouble()) {
- instruction->MarkSpilledDoubleRegister(reg_index, spill_operand);
- } else {
- instruction->MarkSpilledRegister(reg_index, spill_operand);
- }
+ map->RecordPointer(operand, chunk()->zone());
}
}
}
@@ -1470,15 +1490,16 @@ void LAllocator::ProcessOsrEntry() {
void LAllocator::AllocateGeneralRegisters() {
- HPhase phase("L_Allocate general registers", this);
- num_registers_ = Register::kNumAllocatableRegisters;
+ LAllocatorPhase phase("L_Allocate general registers", this);
+ num_registers_ = Register::NumAllocatableRegisters();
+ mode_ = GENERAL_REGISTERS;
AllocateRegisters();
}
void LAllocator::AllocateDoubleRegisters() {
- HPhase phase("L_Allocate double registers", this);
- num_registers_ = DoubleRegister::kNumAllocatableRegisters;
+ LAllocatorPhase phase("L_Allocate double registers", this);
+ num_registers_ = DoubleRegister::NumAllocatableRegisters();
mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
}
@@ -1489,7 +1510,7 @@ void LAllocator::AllocateRegisters() {
for (int i = 0; i < live_ranges_.length(); ++i) {
if (live_ranges_[i] != NULL) {
- if (RequiredRegisterKind(live_ranges_[i]->id()) == mode_) {
+ if (live_ranges_[i]->Kind() == mode_) {
AddToUnhandledUnsorted(live_ranges_[i]);
}
}
@@ -1502,13 +1523,14 @@ void LAllocator::AllocateRegisters() {
ASSERT(inactive_live_ranges_.is_empty());
if (mode_ == DOUBLE_REGISTERS) {
- for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
LiveRange* current = fixed_double_live_ranges_.at(i);
if (current != NULL) {
AddToInactive(current);
}
}
} else {
+ ASSERT(mode_ == GENERAL_REGISTERS);
for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
LiveRange* current = fixed_live_ranges_.at(i);
if (current != NULL) {
@@ -1522,6 +1544,9 @@ void LAllocator::AllocateRegisters() {
LiveRange* current = unhandled_live_ranges_.RemoveLast();
ASSERT(UnhandledIsSorted());
LifetimePosition position = current->Start();
+#ifdef DEBUG
+ allocation_finger_ = position;
+#endif
TraceAlloc("Processing interval %d start=%d\n",
current->id(),
position.Value());
@@ -1612,7 +1637,7 @@ void LAllocator::TraceAlloc(const char* msg, ...) {
bool LAllocator::HasTaggedValue(int virtual_register) const {
HValue* value = graph_->LookupValue(virtual_register);
if (value == NULL) return false;
- return value->representation().IsTagged();
+ return value->representation().IsTagged() && !value->type().IsSmi();
}
@@ -1646,6 +1671,7 @@ void LAllocator::AddToInactive(LiveRange* range) {
void LAllocator::AddToUnhandledSorted(LiveRange* range) {
if (range == NULL || range->IsEmpty()) return;
ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
+ ASSERT(allocation_finger_.Value() <= range->Start().Value());
for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
LiveRange* cur_range = unhandled_live_ranges_.at(i);
if (range->ShouldBeAllocatedBefore(cur_range)) {
@@ -1757,14 +1783,14 @@ void LAllocator::InactiveToActive(LiveRange* range) {
// TryAllocateFreeReg and AllocateBlockedReg assume this
// when allocating local arrays.
-STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >=
- Register::kNumAllocatableRegisters);
+STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
+ Register::kMaxNumAllocatableRegisters);
bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
- LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters];
+ LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < num_registers_; i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
@@ -1784,26 +1810,23 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
}
- UsePosition* hinted_use = current->FirstPosWithHint();
- if (hinted_use != NULL) {
- LOperand* hint = hinted_use->hint();
- if (hint->IsRegister() || hint->IsDoubleRegister()) {
- int register_index = hint->index();
- TraceAlloc(
- "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
- RegisterName(register_index),
- free_until_pos[register_index].Value(),
- current->id(),
- current->End().Value());
-
- // The desired register is free until the end of the current live range.
- if (free_until_pos[register_index].Value() >= current->End().Value()) {
- TraceAlloc("Assigning preferred reg %s to live range %d\n",
- RegisterName(register_index),
- current->id());
- current->set_assigned_register(register_index, mode_, zone_);
- return true;
- }
+ LOperand* hint = current->FirstHint();
+ if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
+ int register_index = hint->index();
+ TraceAlloc(
+ "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
+ RegisterName(register_index),
+ free_until_pos[register_index].Value(),
+ current->id(),
+ current->End().Value());
+
+ // The desired register is free until the end of the current live range.
+ if (free_until_pos[register_index].Value() >= current->End().Value()) {
+ TraceAlloc("Assigning preferred reg %s to live range %d\n",
+ RegisterName(register_index),
+ current->id());
+ SetLiveRangeAssignedRegister(current, register_index);
+ return true;
}
}
@@ -1837,7 +1860,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
TraceAlloc("Assigning free reg %s to live range %d\n",
RegisterName(reg),
current->id());
- current->set_assigned_register(reg, mode_, zone_);
+ SetLiveRangeAssignedRegister(current, reg);
return true;
}
@@ -1853,10 +1876,10 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
}
- LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters];
- LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters];
+ LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+ LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < num_registers_; i++) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}
@@ -1903,12 +1926,6 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
if (pos.Value() < register_use->pos().Value()) {
// All registers are blocked before the first use that requires a register.
// Spill starting part of live range up to that use.
- //
- // Corner case: the first use position is equal to the start of the range.
- // In this case we have nothing to spill and SpillBetween will just return
- // this range to the list of unhandled ones. This will lead to the infinite
- // loop.
- ASSERT(current->Start().Value() < register_use->pos().Value());
SpillBetween(current, current->Start(), register_use->pos());
return;
}
@@ -1919,6 +1936,7 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
LiveRange* tail = SplitBetween(current,
current->Start(),
block_pos[reg].InstructionStart());
+ if (!AllocationOk()) return;
AddToUnhandledSorted(tail);
}
@@ -1927,7 +1945,7 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
TraceAlloc("Assigning blocked reg %s to live range %d\n",
RegisterName(reg),
current->id());
- current->set_assigned_register(reg, mode_, zone_);
+ SetLiveRangeAssignedRegister(current, reg);
// This register was not free. Thus we need to find and spill
// parts of active and inactive live regions that use the same register
@@ -1936,6 +1954,39 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
}
+LifetimePosition LAllocator::FindOptimalSpillingPos(LiveRange* range,
+ LifetimePosition pos) {
+ HBasicBlock* block = GetBlock(pos.InstructionStart());
+ HBasicBlock* loop_header =
+ block->IsLoopHeader() ? block : block->parent_loop_header();
+
+ if (loop_header == NULL) return pos;
+
+ UsePosition* prev_use =
+ range->PreviousUsePositionRegisterIsBeneficial(pos);
+
+ while (loop_header != NULL) {
+ // We are going to spill live range inside the loop.
+ // If possible try to move spilling position backwards to loop header.
+ // This will reduce number of memory moves on the back edge.
+ LifetimePosition loop_start = LifetimePosition::FromInstructionIndex(
+ loop_header->first_instruction_index());
+
+ if (range->Covers(loop_start)) {
+ if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) {
+ // No register beneficial use inside the loop before the pos.
+ pos = loop_start;
+ }
+ }
+
+ // Try hoisting out to an outer loop.
+ loop_header = loop_header->parent_loop_header();
+ }
+
+ return pos;
+}
+
+
void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
ASSERT(current->HasRegisterAssigned());
int reg = current->assigned_register();
@@ -1944,11 +1995,21 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
LiveRange* range = active_live_ranges_[i];
if (range->assigned_register() == reg) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+ LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
if (next_pos == NULL) {
- SpillAfter(range, split_pos);
+ SpillAfter(range, spill_pos);
} else {
- SpillBetween(range, split_pos, next_pos->pos());
+ // When spilling between spill_pos and next_pos ensure that the range
+ // remains spilled at least until the start of the current live range.
+ // This guarantees that we will not introduce new unhandled ranges that
+ // start before the current range as this violates allocation invariant
+ // and will lead to an inconsistent state of active and inactive
+ // live-ranges: ranges are allocated in order of their start positions,
+ // ranges are retired from active/inactive when the start of the
+ // current live-range is larger than their end.
+ SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
}
+ if (!AllocationOk()) return;
ActiveToHandled(range);
--i;
}
@@ -1967,6 +2028,7 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
next_intersection = Min(next_intersection, next_pos->pos());
SpillBetween(range, split_pos, next_intersection);
}
+ if (!AllocationOk()) return;
InactiveToHandled(range);
--i;
}
@@ -1992,9 +2054,10 @@ LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) {
ASSERT(pos.IsInstructionStart() ||
!chunk_->instructions()->at(pos.InstructionIndex())->IsControl());
- LiveRange* result = LiveRangeFor(GetVirtualRegister());
+ int vreg = GetVirtualRegister();
if (!AllocationOk()) return NULL;
- range->SplitAt(pos, result, zone_);
+ LiveRange* result = LiveRangeFor(vreg);
+ range->SplitAt(pos, result, zone());
return result;
}
@@ -2058,7 +2121,15 @@ void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
void LAllocator::SpillBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end) {
- ASSERT(start.Value() < end.Value());
+ SpillBetweenUntil(range, start, start, end);
+}
+
+
+void LAllocator::SpillBetweenUntil(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition until,
+ LifetimePosition end) {
+ CHECK(start.Value() < end.Value());
LiveRange* second_part = SplitRangeAt(range, start);
if (!AllocationOk()) return;
@@ -2068,8 +2139,9 @@ void LAllocator::SpillBetween(LiveRange* range,
// and put the rest to unhandled.
LiveRange* third_part = SplitBetween(
second_part,
- second_part->Start().InstructionEnd(),
+ Max(second_part->Start().InstructionEnd(), until),
end.PrevInstruction().InstructionEnd());
+ if (!AllocationOk()) return;
ASSERT(third_part != second_part);
@@ -2090,10 +2162,10 @@ void LAllocator::Spill(LiveRange* range) {
if (!first->HasAllocatedSpillOperand()) {
LOperand* op = TryReuseSpillSlot(range);
- if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
+ if (op == NULL) op = chunk_->GetNextSpillSlot(range->Kind());
first->SetSpillOperand(op);
}
- range->MakeSpilled(zone_);
+ range->MakeSpilled(chunk()->zone());
}
@@ -2116,4 +2188,32 @@ void LAllocator::Verify() const {
#endif
+LAllocatorPhase::LAllocatorPhase(const char* name, LAllocator* allocator)
+ : CompilationPhase(name, allocator->graph()->info()),
+ allocator_(allocator) {
+ if (FLAG_hydrogen_stats) {
+ allocator_zone_start_allocation_size_ =
+ allocator->zone()->allocation_size();
+ }
+}
+
+
+LAllocatorPhase::~LAllocatorPhase() {
+ if (FLAG_hydrogen_stats) {
+ unsigned size = allocator_->zone()->allocation_size() -
+ allocator_zone_start_allocation_size_;
+ isolate()->GetHStatistics()->SaveTiming(name(), TimeDelta(), size);
+ }
+
+ if (ShouldProduceTraceOutput()) {
+ isolate()->GetHTracer()->TraceLithium(name(), allocator_->chunk());
+ isolate()->GetHTracer()->TraceLiveRanges(name(), allocator_);
+ }
+
+#ifdef DEBUG
+ if (allocator_ != NULL) allocator_->Verify();
+#endif
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h
index 5b0526357..9908ea823 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/lithium-allocator.h
@@ -146,6 +146,7 @@ class LifetimePosition {
enum RegisterKind {
+ UNALLOCATED_REGISTERS,
GENERAL_REGISTERS,
DOUBLE_REGISTERS
};
@@ -244,13 +245,12 @@ class UseInterval: public ZoneObject {
// Representation of a use position.
class UsePosition: public ZoneObject {
public:
- UsePosition(LifetimePosition pos, LOperand* operand);
+ UsePosition(LifetimePosition pos, LOperand* operand, LOperand* hint);
LOperand* operand() const { return operand_; }
bool HasOperand() const { return operand_ != NULL; }
LOperand* hint() const { return hint_; }
- void set_hint(LOperand* hint) { hint_ = hint; }
bool HasHint() const;
bool RequiresRegister() const;
bool RegisterIsBeneficial() const;
@@ -261,9 +261,9 @@ class UsePosition: public ZoneObject {
private:
void set_next(UsePosition* next) { next_ = next; }
- LOperand* operand_;
- LOperand* hint_;
- LifetimePosition pos_;
+ LOperand* const operand_;
+ LOperand* const hint_;
+ LifetimePosition const pos_;
UsePosition* next_;
bool requires_reg_;
bool register_beneficial_;
@@ -291,9 +291,7 @@ class LiveRange: public ZoneObject {
LOperand* CreateAssignedOperand(Zone* zone);
int assigned_register() const { return assigned_register_; }
int spill_start_index() const { return spill_start_index_; }
- void set_assigned_register(int reg,
- RegisterKind register_kind,
- Zone* zone);
+ void set_assigned_register(int reg, Zone* zone);
void MakeSpilled(Zone* zone);
// Returns use position in this live range that follows both start
@@ -311,6 +309,10 @@ class LiveRange: public ZoneObject {
// Modifies internal state of live range!
UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start);
+ // Returns use position for which register is beneficial in this live
+ // range and which precedes start.
+ UsePosition* PreviousUsePositionRegisterIsBeneficial(LifetimePosition start);
+
// Can this live range be spilled at this position.
bool CanBeSpilled(LifetimePosition pos);
@@ -320,15 +322,19 @@ class LiveRange: public ZoneObject {
// live range to the result live range.
void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
- bool IsDouble() const { return is_double_; }
+ RegisterKind Kind() const { return kind_; }
bool HasRegisterAssigned() const {
return assigned_register_ != kInvalidAssignment;
}
bool IsSpilled() const { return spilled_; }
- UsePosition* FirstPosWithHint() const;
+ LOperand* current_hint_operand() const {
+ ASSERT(current_hint_operand_ == FirstHint());
+ return current_hint_operand_;
+ }
LOperand* FirstHint() const {
- UsePosition* pos = FirstPosWithHint();
+ UsePosition* pos = first_pos_;
+ while (pos != NULL && !pos->HasHint()) pos = pos->next();
if (pos != NULL) return pos->hint();
return NULL;
}
@@ -363,9 +369,10 @@ class LiveRange: public ZoneObject {
void AddUseInterval(LifetimePosition start,
LifetimePosition end,
Zone* zone);
- UsePosition* AddUsePosition(LifetimePosition pos,
- LOperand* operand,
- Zone* zone);
+ void AddUsePosition(LifetimePosition pos,
+ LOperand* operand,
+ LOperand* hint,
+ Zone* zone);
// Shorten the most recently added interval by setting a new start.
void ShortenTo(LifetimePosition start);
@@ -384,7 +391,7 @@ class LiveRange: public ZoneObject {
int id_;
bool spilled_;
- bool is_double_;
+ RegisterKind kind_;
int assigned_register_;
UseInterval* last_interval_;
UseInterval* first_interval_;
@@ -394,42 +401,12 @@ class LiveRange: public ZoneObject {
// This is used as a cache, it doesn't affect correctness.
mutable UseInterval* current_interval_;
UsePosition* last_processed_use_;
+ // This is used as a cache, it's invalid outside of BuildLiveRanges.
+ LOperand* current_hint_operand_;
LOperand* spill_operand_;
int spill_start_index_;
-};
-
-
-class GrowableBitVector BASE_EMBEDDED {
- public:
- GrowableBitVector() : bits_(NULL) { }
-
- bool Contains(int value) const {
- if (!InBitsRange(value)) return false;
- return bits_->Contains(value);
- }
-
- void Add(int value, Zone* zone) {
- EnsureCapacity(value, zone);
- bits_->Add(value);
- }
-
- private:
- static const int kInitialLength = 1024;
-
- bool InBitsRange(int value) const {
- return bits_ != NULL && bits_->length() > value;
- }
-
- void EnsureCapacity(int value, Zone* zone) {
- if (InBitsRange(value)) return;
- int new_length = bits_ == NULL ? kInitialLength : bits_->length();
- while (new_length <= value) new_length *= 2;
- BitVector* new_bits = new(zone) BitVector(new_length, zone);
- if (bits_ != NULL) new_bits->CopyFrom(*bits_);
- bits_ = new_bits;
- }
- BitVector* bits_;
+ friend class LAllocator; // Assigns to kind_.
};
@@ -457,11 +434,14 @@ class LAllocator BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; }
HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
+ Isolate* isolate() const { return graph_->isolate(); }
+ Zone* zone() { return &zone_; }
int GetVirtualRegister() {
- if (next_virtual_register_ > LUnallocated::kMaxVirtualRegisters) {
+ if (next_virtual_register_ >= LUnallocated::kMaxVirtualRegisters) {
allocation_ok_ = false;
+ // Maintain the invariant that we return something below the maximum.
+ return 0;
}
return next_virtual_register_++;
}
@@ -479,6 +459,13 @@ class LAllocator BASE_EMBEDDED {
void Verify() const;
#endif
+ BitVector* assigned_registers() {
+ return assigned_registers_;
+ }
+ BitVector* assigned_double_registers() {
+ return assigned_double_registers_;
+ }
+
private:
void MeetRegisterConstraints();
void ResolvePhis();
@@ -488,7 +475,6 @@ class LAllocator BASE_EMBEDDED {
void ConnectRanges();
void ResolveControlFlow();
void PopulatePointerMaps();
- void ProcessOsrEntry();
void AllocateRegisters();
bool CanEagerlyResolveControlFlow(HBasicBlock* block) const;
inline bool SafePointsAreInOrder() const;
@@ -556,13 +542,25 @@ class LAllocator BASE_EMBEDDED {
// Spill the given life range after position pos.
void SpillAfter(LiveRange* range, LifetimePosition pos);
- // Spill the given life range after position start and up to position end.
+ // Spill the given life range after position [start] and up to position [end].
void SpillBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end);
+ // Spill the given life range after position [start] and up to position [end].
+ // Range is guaranteed to be spilled at least until position [until].
+ void SpillBetweenUntil(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition until,
+ LifetimePosition end);
+
void SplitAndSpillIntersecting(LiveRange* range);
+ // If we are trying to spill a range inside the loop try to
+ // hoist spill position out to the point just before the loop.
+ LifetimePosition FindOptimalSpillingPos(LiveRange* range,
+ LifetimePosition pos);
+
void Spill(LiveRange* range);
bool IsBlockBoundary(LifetimePosition pos);
@@ -571,6 +569,8 @@ class LAllocator BASE_EMBEDDED {
HBasicBlock* block,
HBasicBlock* pred);
+ inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
+
// Return parallel move that should be used to connect ranges split at the
// given position.
LParallelMove* GetConnectingParallelMove(LifetimePosition pos);
@@ -596,7 +596,7 @@ class LAllocator BASE_EMBEDDED {
inline LGap* GapAt(int index);
- Zone* zone_;
+ Zone zone_;
LPlatformChunk* chunk_;
@@ -608,9 +608,9 @@ class LAllocator BASE_EMBEDDED {
ZoneList<LiveRange*> live_ranges_;
// Lists of live ranges
- EmbeddedVector<LiveRange*, Register::kNumAllocatableRegisters>
+ EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
fixed_live_ranges_;
- EmbeddedVector<LiveRange*, DoubleRegister::kNumAllocatableRegisters>
+ EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
fixed_double_live_ranges_;
ZoneList<LiveRange*> unhandled_live_ranges_;
ZoneList<LiveRange*> active_live_ranges_;
@@ -625,6 +625,9 @@ class LAllocator BASE_EMBEDDED {
RegisterKind mode_;
int num_registers_;
+ BitVector* assigned_registers_;
+ BitVector* assigned_double_registers_;
+
HGraph* graph_;
bool has_osr_entry_;
@@ -632,10 +635,27 @@ class LAllocator BASE_EMBEDDED {
// Indicates success or failure during register allocation.
bool allocation_ok_;
+#ifdef DEBUG
+ LifetimePosition allocation_finger_;
+#endif
+
DISALLOW_COPY_AND_ASSIGN(LAllocator);
};
+class LAllocatorPhase : public CompilationPhase {
+ public:
+ LAllocatorPhase(const char* name, LAllocator* allocator);
+ ~LAllocatorPhase();
+
+ private:
+ LAllocator* allocator_;
+ unsigned allocator_zone_start_allocation_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(LAllocatorPhase);
+};
+
+
} } // namespace v8::internal
#endif // V8_LITHIUM_ALLOCATOR_H_
diff --git a/deps/v8/src/lithium-codegen.cc b/deps/v8/src/lithium-codegen.cc
new file mode 100644
index 000000000..19ebe7e51
--- /dev/null
+++ b/deps/v8/src/lithium-codegen.cc
@@ -0,0 +1,150 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "lithium-codegen.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/lithium-ia32.h"
+#include "ia32/lithium-codegen-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/lithium-x64.h"
+#include "x64/lithium-codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/lithium-arm.h"
+#include "arm/lithium-codegen-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/lithium-mips.h"
+#include "mips/lithium-codegen-mips.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+
+HGraph* LCodeGenBase::graph() const {
+ return chunk()->graph();
+}
+
+
+LCodeGenBase::LCodeGenBase(LChunk* chunk,
+ MacroAssembler* assembler,
+ CompilationInfo* info)
+ : chunk_(static_cast<LPlatformChunk*>(chunk)),
+ masm_(assembler),
+ info_(info),
+ zone_(info->zone()),
+ status_(UNUSED),
+ current_block_(-1),
+ current_instruction_(-1),
+ instructions_(chunk->instructions()),
+ last_lazy_deopt_pc_(0) {
+}
+
+
+bool LCodeGenBase::GenerateBody() {
+ ASSERT(is_generating());
+ bool emit_instructions = true;
+ LCodeGen* codegen = static_cast<LCodeGen*>(this);
+ for (current_instruction_ = 0;
+ !is_aborted() && current_instruction_ < instructions_->length();
+ current_instruction_++) {
+ LInstruction* instr = instructions_->at(current_instruction_);
+
+ // Don't emit code for basic blocks with a replacement.
+ if (instr->IsLabel()) {
+ emit_instructions = !LLabel::cast(instr)->HasReplacement() &&
+ (!FLAG_unreachable_code_elimination ||
+ instr->hydrogen_value()->block()->IsReachable());
+ if (FLAG_code_comments && !emit_instructions) {
+ Comment(
+ ";;; <@%d,#%d> -------------------- B%d (unreachable/replaced) "
+ "--------------------",
+ current_instruction_,
+ instr->hydrogen_value()->id(),
+ instr->hydrogen_value()->block()->block_id());
+ }
+ }
+ if (!emit_instructions) continue;
+
+ if (FLAG_code_comments && instr->HasInterestingComment(codegen)) {
+ Comment(";;; <@%d,#%d> %s",
+ current_instruction_,
+ instr->hydrogen_value()->id(),
+ instr->Mnemonic());
+ }
+
+ GenerateBodyInstructionPre(instr);
+
+ HValue* value = instr->hydrogen_value();
+ if (value->position() != RelocInfo::kNoPosition) {
+ ASSERT(!graph()->info()->IsOptimizing() ||
+ !FLAG_emit_opt_code_positions ||
+ value->position() != RelocInfo::kNoPosition);
+ RecordAndWritePosition(value->position());
+ }
+
+ instr->CompileToNative(codegen);
+
+ GenerateBodyInstructionPost(instr);
+ }
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+ return !is_aborted();
+}
+
+
+void LCodeGenBase::Comment(const char* format, ...) {
+ if (!FLAG_code_comments) return;
+ char buffer[4 * KB];
+ StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+ va_list arguments;
+ va_start(arguments, format);
+ builder.AddFormattedList(format, arguments);
+ va_end(arguments);
+
+ // Copy the string before recording it in the assembler to avoid
+ // issues when the stack allocated buffer goes out of scope.
+ size_t length = builder.position();
+ Vector<char> copy = Vector<char>::New(static_cast<int>(length) + 1);
+ OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
+ masm()->RecordComment(copy.start());
+}
+
+
+int LCodeGenBase::GetNextEmittedBlock() const {
+ for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
+ if (!chunk_->GetLabel(i)->HasReplacement()) return i;
+ }
+ return -1;
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/lithium-codegen.h b/deps/v8/src/lithium-codegen.h
new file mode 100644
index 000000000..9caab8127
--- /dev/null
+++ b/deps/v8/src/lithium-codegen.h
@@ -0,0 +1,96 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LITHIUM_CODEGEN_H_
+#define V8_LITHIUM_CODEGEN_H_
+
+#include "v8.h"
+
+#include "compiler.h"
+
+namespace v8 {
+namespace internal {
+
+class LInstruction;
+class LPlatformChunk;
+
+class LCodeGenBase BASE_EMBEDDED {
+ public:
+ LCodeGenBase(LChunk* chunk,
+ MacroAssembler* assembler,
+ CompilationInfo* info);
+ virtual ~LCodeGenBase() {}
+
+ // Simple accessors.
+ MacroAssembler* masm() const { return masm_; }
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info_->isolate(); }
+ Factory* factory() const { return isolate()->factory(); }
+ Heap* heap() const { return isolate()->heap(); }
+ Zone* zone() const { return zone_; }
+ LPlatformChunk* chunk() const { return chunk_; }
+ HGraph* graph() const;
+
+ void FPRINTF_CHECKING Comment(const char* format, ...);
+
+ bool GenerateBody();
+ virtual void GenerateBodyInstructionPre(LInstruction* instr) {}
+ virtual void GenerateBodyInstructionPost(LInstruction* instr) {}
+
+ virtual void EnsureSpaceForLazyDeopt(int space_needed) = 0;
+ virtual void RecordAndWritePosition(int position) = 0;
+
+ int GetNextEmittedBlock() const;
+
+ protected:
+ enum Status {
+ UNUSED,
+ GENERATING,
+ DONE,
+ ABORTED
+ };
+
+ LPlatformChunk* const chunk_;
+ MacroAssembler* const masm_;
+ CompilationInfo* const info_;
+ Zone* zone_;
+ Status status_;
+ int current_block_;
+ int current_instruction_;
+ const ZoneList<LInstruction*>* instructions_;
+ int last_lazy_deopt_pc_;
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_generating() const { return status_ == GENERATING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_LITHIUM_CODEGEN_H_
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index 1232bf1a6..b66a64f90 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -58,24 +58,27 @@ void LOperand::PrintTo(StringStream* stream) {
case UNALLOCATED:
unalloc = LUnallocated::cast(this);
stream->Add("v%d", unalloc->virtual_register());
- switch (unalloc->policy()) {
+ if (unalloc->basic_policy() == LUnallocated::FIXED_SLOT) {
+ stream->Add("(=%dS)", unalloc->fixed_slot_index());
+ break;
+ }
+ switch (unalloc->extended_policy()) {
case LUnallocated::NONE:
break;
case LUnallocated::FIXED_REGISTER: {
+ int reg_index = unalloc->fixed_register_index();
const char* register_name =
- Register::AllocationIndexToString(unalloc->fixed_index());
+ Register::AllocationIndexToString(reg_index);
stream->Add("(=%s)", register_name);
break;
}
case LUnallocated::FIXED_DOUBLE_REGISTER: {
+ int reg_index = unalloc->fixed_register_index();
const char* double_register_name =
- DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
+ DoubleRegister::AllocationIndexToString(reg_index);
stream->Add("(=%s)", double_register_name);
break;
}
- case LUnallocated::FIXED_SLOT:
- stream->Add("(=%dS)", unalloc->fixed_index());
- break;
case LUnallocated::MUST_HAVE_REGISTER:
stream->Add("(R)");
break;
@@ -174,8 +177,11 @@ void LParallelMove::PrintDataTo(StringStream* stream) const {
void LEnvironment::PrintTo(StringStream* stream) {
stream->Add("[id=%d|", ast_id().ToInt());
- stream->Add("[parameters=%d|", parameter_count());
- stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
+ if (deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
+ stream->Add("deopt_id=%d|", deoptimization_index());
+ }
+ stream->Add("parameters=%d|", parameter_count());
+ stream->Add("arguments_stack_height=%d|", arguments_stack_height());
for (int i = 0; i < values_.length(); ++i) {
if (i != 0) stream->Add(";");
if (values_[i] == NULL) {
@@ -223,7 +229,29 @@ void LPointerMap::PrintTo(StringStream* stream) {
if (i != 0) stream->Add(";");
pointer_operands_[i]->PrintTo(stream);
}
- stream->Add("} @%d", position());
+ stream->Add("}");
+}
+
+
+int StackSlotOffset(int index) {
+ if (index >= 0) {
+ // Local or spill slot. Skip the frame pointer, function, and
+ // context in the fixed part of the frame.
+ return -(index + 3) * kPointerSize;
+ } else {
+ // Incoming parameter. Skip the return address.
+ return -(index + 1) * kPointerSize + kFPOnStackSize + kPCOnStackSize;
+ }
+}
+
+
+LChunk::LChunk(CompilationInfo* info, HGraph* graph)
+ : spill_slot_count_(0),
+ info_(info),
+ graph_(graph),
+ instructions_(32, graph->zone()),
+ pointer_maps_(8, graph->zone()),
+ inlined_closures_(1, graph->zone()) {
}
@@ -248,8 +276,9 @@ Label* LChunk::GetAssemblyLabel(int block_id) const {
return label->label();
}
+
void LChunk::MarkEmptyBlocks() {
- HPhase phase("L_Mark empty blocks", this);
+ LPhase phase("L_Mark empty blocks", this);
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
int first = block->first_instruction_index();
@@ -274,7 +303,6 @@ void LChunk::MarkEmptyBlocks() {
can_eliminate = false;
}
}
-
if (can_eliminate) {
label->set_replacement(GetLabel(goto_instr->block_id()));
}
@@ -286,6 +314,7 @@ void LChunk::MarkEmptyBlocks() {
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
+ gap->set_hydrogen_value(instr->hydrogen_value());
int index = -1;
if (instr->IsControl()) {
instructions_.Add(gap, zone());
@@ -361,13 +390,13 @@ Representation LChunk::LookupLiteralRepresentation(
LChunk* LChunk::NewChunk(HGraph* graph) {
- NoHandleAllocation no_handles;
- AssertNoAllocation no_gc;
-
+ DisallowHandleAllocation no_handles;
+ DisallowHeapAllocation no_gc;
+ graph->DisallowAddingNewValues();
int values = graph->GetMaximumValueID();
CompilationInfo* info = graph->info();
if (values > LUnallocated::kMaxVirtualRegisters) {
- info->set_bailout_reason("not enough virtual registers for values");
+ info->set_bailout_reason(kNotEnoughVirtualRegistersForValues);
return NULL;
}
LAllocator allocator(values, graph);
@@ -376,29 +405,38 @@ LChunk* LChunk::NewChunk(HGraph* graph) {
if (chunk == NULL) return NULL;
if (!allocator.Allocate(chunk)) {
- info->set_bailout_reason("not enough virtual registers (regalloc)");
+ info->set_bailout_reason(kNotEnoughVirtualRegistersRegalloc);
return NULL;
}
+ chunk->set_allocated_double_registers(
+ allocator.assigned_double_registers());
+
return chunk;
}
Handle<Code> LChunk::Codegen() {
MacroAssembler assembler(info()->isolate(), NULL, 0);
+ LOG_CODE_EVENT(info()->isolate(),
+ CodeStartLinePosInfoRecordEvent(
+ assembler.positions_recorder()));
LCodeGen generator(this, &assembler, info());
MarkEmptyBlocks();
if (generator.GenerateCode()) {
- if (FLAG_trace_codegen) {
- PrintF("Crankshaft Compiler - ");
- }
- CodeGenerator::MakeCodePrologue(info());
- Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
+ CodeGenerator::MakeCodePrologue(info(), "optimized");
+ Code::Flags flags = info()->flags();
Handle<Code> code =
CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
generator.FinishCode(code);
+ code->set_is_crankshafted(true);
+ void* jit_handler_data =
+ assembler.positions_recorder()->DetachJITHandlerData();
+ LOG_CODE_EVENT(info()->isolate(),
+ CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
+
CodeGenerator::PrintCode(code, info());
return code;
}
@@ -406,4 +444,36 @@ Handle<Code> LChunk::Codegen() {
}
+void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
+ allocated_double_registers_ = allocated_registers;
+ BitVector* doubles = allocated_double_registers();
+ BitVector::Iterator iterator(doubles);
+ while (!iterator.Done()) {
+ if (info()->saves_caller_doubles()) {
+ if (kDoubleSize == kPointerSize * 2) {
+ spill_slot_count_ += 2;
+ } else {
+ spill_slot_count_++;
+ }
+ }
+ iterator.Advance();
+ }
+}
+
+
+LInstruction* LChunkBuilder::CheckElideControlInstruction(
+ HControlInstruction* instr) {
+ HBasicBlock* successor;
+ if (!instr->KnownSuccessorBlock(&successor)) return NULL;
+ return new(zone()) LGoto(successor);
+}
+
+
+LPhase::~LPhase() {
+ if (ShouldProduceTraceOutput()) {
+ isolate()->GetHTracer()->TraceLithium(name(), chunk_);
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index a29d9d07e..d4395f2d7 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -43,7 +43,7 @@ namespace internal {
V(DoubleRegister, DOUBLE_REGISTER)
-class LOperand: public ZoneObject {
+class LOperand : public ZoneObject {
public:
enum Kind {
INVALID,
@@ -90,14 +90,18 @@ class LOperand: public ZoneObject {
};
-class LUnallocated: public LOperand {
+class LUnallocated : public LOperand {
public:
- enum Policy {
+ enum BasicPolicy {
+ FIXED_SLOT,
+ EXTENDED_POLICY
+ };
+
+ enum ExtendedPolicy {
NONE,
ANY,
FIXED_REGISTER,
FIXED_DOUBLE_REGISTER,
- FIXED_SLOT,
MUST_HAVE_REGISTER,
WRITABLE_REGISTER,
SAME_AS_FIRST_INPUT
@@ -117,104 +121,157 @@ class LUnallocated: public LOperand {
USED_AT_END
};
- explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, 0, USED_AT_END);
+ explicit LUnallocated(ExtendedPolicy policy) : LOperand(UNALLOCATED, 0) {
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(USED_AT_END);
}
- LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, fixed_index, USED_AT_END);
+ LUnallocated(BasicPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
+ ASSERT(policy == FIXED_SLOT);
+ value_ |= BasicPolicyField::encode(policy);
+ value_ |= index << FixedSlotIndexField::kShift;
+ ASSERT(this->fixed_slot_index() == index);
}
- LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, 0, lifetime);
+ LUnallocated(ExtendedPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
+ ASSERT(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(USED_AT_END);
+ value_ |= FixedRegisterField::encode(index);
}
- // The superclass has a KindField. Some policies have a signed fixed
- // index in the upper bits.
- static const int kPolicyWidth = 3;
- static const int kLifetimeWidth = 1;
- static const int kVirtualRegisterWidth = 15;
-
- static const int kPolicyShift = kKindFieldWidth;
- static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
- static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
- static const int kFixedIndexShift =
- kVirtualRegisterShift + kVirtualRegisterWidth;
- static const int kFixedIndexWidth = 32 - kFixedIndexShift;
- STATIC_ASSERT(kFixedIndexWidth > 5);
-
- class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
-
- class LifetimeField
- : public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
- };
+ LUnallocated(ExtendedPolicy policy, Lifetime lifetime)
+ : LOperand(UNALLOCATED, 0) {
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(lifetime);
+ }
- class VirtualRegisterField
- : public BitField<unsigned,
- kVirtualRegisterShift,
- kVirtualRegisterWidth> {
- };
+ LUnallocated* CopyUnconstrained(Zone* zone) {
+ LUnallocated* result = new(zone) LUnallocated(ANY);
+ result->set_virtual_register(virtual_register());
+ return result;
+ }
- static const int kMaxVirtualRegisters = 1 << kVirtualRegisterWidth;
- static const int kMaxFixedIndex = (1 << (kFixedIndexWidth - 1)) - 1;
- static const int kMinFixedIndex = -(1 << (kFixedIndexWidth - 1));
+ static LUnallocated* cast(LOperand* op) {
+ ASSERT(op->IsUnallocated());
+ return reinterpret_cast<LUnallocated*>(op);
+ }
+ // The encoding used for LUnallocated operands depends on the policy that is
+ // stored within the operand. The FIXED_SLOT policy uses a compact encoding
+ // because it accommodates a larger pay-load.
+ //
+ // For FIXED_SLOT policy:
+ // +------------------------------------------+
+ // | slot_index | vreg | 0 | 001 |
+ // +------------------------------------------+
+ //
+ // For all other (extended) policies:
+ // +------------------------------------------+
+ // | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime
+ // +------------------------------------------+ P ... Policy
+ //
+ // The slot index is a signed value which requires us to decode it manually
+ // instead of using the BitField utility class.
+
+ // The superclass has a KindField.
+ STATIC_ASSERT(kKindFieldWidth == 3);
+
+ // BitFields for all unallocated operands.
+ class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
+ class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
+
+ // BitFields specific to BasicPolicy::FIXED_SLOT.
+ class FixedSlotIndexField : public BitField<int, 22, 10> {};
+
+ // BitFields specific to BasicPolicy::EXTENDED_POLICY.
+ class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
+ class LifetimeField : public BitField<Lifetime, 25, 1> {};
+ class FixedRegisterField : public BitField<int, 26, 6> {};
+
+ static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
+ static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
+ static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
+ static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
+
+ // Predicates for the operand policy.
bool HasAnyPolicy() const {
- return policy() == ANY;
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == ANY;
}
bool HasFixedPolicy() const {
- return policy() == FIXED_REGISTER ||
- policy() == FIXED_DOUBLE_REGISTER ||
- policy() == FIXED_SLOT;
+ return basic_policy() == FIXED_SLOT ||
+ extended_policy() == FIXED_REGISTER ||
+ extended_policy() == FIXED_DOUBLE_REGISTER;
}
bool HasRegisterPolicy() const {
- return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
+ return basic_policy() == EXTENDED_POLICY && (
+ extended_policy() == WRITABLE_REGISTER ||
+ extended_policy() == MUST_HAVE_REGISTER);
}
bool HasSameAsInputPolicy() const {
- return policy() == SAME_AS_FIRST_INPUT;
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == SAME_AS_FIRST_INPUT;
}
- Policy policy() const { return PolicyField::decode(value_); }
- void set_policy(Policy policy) {
- value_ = PolicyField::update(value_, policy);
+ bool HasFixedSlotPolicy() const {
+ return basic_policy() == FIXED_SLOT;
}
- int fixed_index() const {
- return static_cast<int>(value_) >> kFixedIndexShift;
+ bool HasFixedRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == FIXED_REGISTER;
+ }
+ bool HasFixedDoubleRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == FIXED_DOUBLE_REGISTER;
+ }
+ bool HasWritableRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == WRITABLE_REGISTER;
}
- int virtual_register() const {
- return VirtualRegisterField::decode(value_);
+ // [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
+ BasicPolicy basic_policy() const {
+ return BasicPolicyField::decode(value_);
}
- void set_virtual_register(unsigned id) {
- value_ = VirtualRegisterField::update(value_, id);
+ // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
+ ExtendedPolicy extended_policy() const {
+ ASSERT(basic_policy() == EXTENDED_POLICY);
+ return ExtendedPolicyField::decode(value_);
}
- LUnallocated* CopyUnconstrained(Zone* zone) {
- LUnallocated* result = new(zone) LUnallocated(ANY);
- result->set_virtual_register(virtual_register());
- return result;
+ // [fixed_slot_index]: Only for FIXED_SLOT.
+ int fixed_slot_index() const {
+ ASSERT(HasFixedSlotPolicy());
+ return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
}
- static LUnallocated* cast(LOperand* op) {
- ASSERT(op->IsUnallocated());
- return reinterpret_cast<LUnallocated*>(op);
+ // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
+ int fixed_register_index() const {
+ ASSERT(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
+ return FixedRegisterField::decode(value_);
}
- bool IsUsedAtStart() {
- return LifetimeField::decode(value_) == USED_AT_START;
+ // [virtual_register]: The virtual register ID for this operand.
+ int virtual_register() const {
+ return VirtualRegisterField::decode(value_);
+ }
+ void set_virtual_register(unsigned id) {
+ value_ = VirtualRegisterField::update(value_, id);
}
- private:
- void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
- value_ |= PolicyField::encode(policy);
- value_ |= LifetimeField::encode(lifetime);
- value_ |= fixed_index << kFixedIndexShift;
- ASSERT(this->fixed_index() == fixed_index);
+ // [lifetime]: Only for non-FIXED_SLOT.
+ bool IsUsedAtStart() {
+ ASSERT(basic_policy() == EXTENDED_POLICY);
+ return LifetimeField::decode(value_) == USED_AT_START;
}
};
-class LMoveOperands BASE_EMBEDDED {
+class LMoveOperands V8_FINAL BASE_EMBEDDED {
public:
LMoveOperands(LOperand* source, LOperand* destination)
: source_(source), destination_(destination) {
@@ -260,7 +317,7 @@ class LMoveOperands BASE_EMBEDDED {
};
-class LConstantOperand: public LOperand {
+class LConstantOperand V8_FINAL : public LOperand {
public:
static LConstantOperand* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -285,7 +342,7 @@ class LConstantOperand: public LOperand {
};
-class LArgument: public LOperand {
+class LArgument V8_FINAL : public LOperand {
public:
explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
@@ -296,7 +353,7 @@ class LArgument: public LOperand {
};
-class LStackSlot: public LOperand {
+class LStackSlot V8_FINAL : public LOperand {
public:
static LStackSlot* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -321,7 +378,7 @@ class LStackSlot: public LOperand {
};
-class LDoubleStackSlot: public LOperand {
+class LDoubleStackSlot V8_FINAL : public LOperand {
public:
static LDoubleStackSlot* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -346,7 +403,7 @@ class LDoubleStackSlot: public LOperand {
};
-class LRegister: public LOperand {
+class LRegister V8_FINAL : public LOperand {
public:
static LRegister* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -371,7 +428,7 @@ class LRegister: public LOperand {
};
-class LDoubleRegister: public LOperand {
+class LDoubleRegister V8_FINAL : public LOperand {
public:
static LDoubleRegister* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -396,7 +453,7 @@ class LDoubleRegister: public LOperand {
};
-class LParallelMove : public ZoneObject {
+class LParallelMove V8_FINAL : public ZoneObject {
public:
explicit LParallelMove(Zone* zone) : move_operands_(4, zone) { }
@@ -417,12 +474,11 @@ class LParallelMove : public ZoneObject {
};
-class LPointerMap: public ZoneObject {
+class LPointerMap V8_FINAL : public ZoneObject {
public:
- explicit LPointerMap(int position, Zone* zone)
+ explicit LPointerMap(Zone* zone)
: pointer_operands_(8, zone),
untagged_operands_(0, zone),
- position_(position),
lithium_position_(-1) { }
const ZoneList<LOperand*>* GetNormalizedOperands() {
@@ -432,7 +488,6 @@ class LPointerMap: public ZoneObject {
untagged_operands_.Clear();
return &pointer_operands_;
}
- int position() const { return position_; }
int lithium_position() const { return lithium_position_; }
void set_lithium_position(int pos) {
@@ -448,12 +503,11 @@ class LPointerMap: public ZoneObject {
private:
ZoneList<LOperand*> pointer_operands_;
ZoneList<LOperand*> untagged_operands_;
- int position_;
int lithium_position_;
};
-class LEnvironment: public ZoneObject {
+class LEnvironment V8_FINAL : public ZoneObject {
public:
LEnvironment(Handle<JSFunction> closure,
FrameType frame_type,
@@ -470,13 +524,13 @@ class LEnvironment: public ZoneObject {
deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
translation_index_(-1),
ast_id_(ast_id),
+ translation_size_(value_count),
parameter_count_(parameter_count),
pc_offset_(-1),
values_(value_count, zone),
is_tagged_(value_count, zone),
is_uint32_(value_count, zone),
- spilled_registers_(NULL),
- spilled_double_registers_(NULL),
+ object_mapping_(0, zone),
outer_(outer),
entry_(entry),
zone_(zone) { }
@@ -487,27 +541,25 @@ class LEnvironment: public ZoneObject {
int deoptimization_index() const { return deoptimization_index_; }
int translation_index() const { return translation_index_; }
BailoutId ast_id() const { return ast_id_; }
+ int translation_size() const { return translation_size_; }
int parameter_count() const { return parameter_count_; }
int pc_offset() const { return pc_offset_; }
- LOperand** spilled_registers() const { return spilled_registers_; }
- LOperand** spilled_double_registers() const {
- return spilled_double_registers_;
- }
const ZoneList<LOperand*>* values() const { return &values_; }
LEnvironment* outer() const { return outer_; }
HEnterInlined* entry() { return entry_; }
+ Zone* zone() const { return zone_; }
void AddValue(LOperand* operand,
Representation representation,
bool is_uint32) {
values_.Add(operand, zone());
- if (representation.IsTagged()) {
+ if (representation.IsSmiOrTagged()) {
ASSERT(!is_uint32);
- is_tagged_.Add(values_.length() - 1);
+ is_tagged_.Add(values_.length() - 1, zone());
}
if (is_uint32) {
- is_uint32_.Add(values_.length() - 1);
+ is_uint32_.Add(values_.length() - 1, zone());
}
}
@@ -519,6 +571,38 @@ class LEnvironment: public ZoneObject {
return is_uint32_.Contains(index);
}
+ void AddNewObject(int length, bool is_arguments) {
+ uint32_t encoded = LengthOrDupeField::encode(length) |
+ IsArgumentsField::encode(is_arguments) |
+ IsDuplicateField::encode(false);
+ object_mapping_.Add(encoded, zone());
+ }
+
+ void AddDuplicateObject(int dupe_of) {
+ uint32_t encoded = LengthOrDupeField::encode(dupe_of) |
+ IsDuplicateField::encode(true);
+ object_mapping_.Add(encoded, zone());
+ }
+
+ int ObjectDuplicateOfAt(int index) {
+ ASSERT(ObjectIsDuplicateAt(index));
+ return LengthOrDupeField::decode(object_mapping_[index]);
+ }
+
+ int ObjectLengthAt(int index) {
+ ASSERT(!ObjectIsDuplicateAt(index));
+ return LengthOrDupeField::decode(object_mapping_[index]);
+ }
+
+ bool ObjectIsArgumentsAt(int index) {
+ ASSERT(!ObjectIsDuplicateAt(index));
+ return IsArgumentsField::decode(object_mapping_[index]);
+ }
+
+ bool ObjectIsDuplicateAt(int index) {
+ return IsDuplicateField::decode(object_mapping_[index]);
+ }
+
void Register(int deoptimization_index,
int translation_index,
int pc_offset) {
@@ -531,15 +615,15 @@ class LEnvironment: public ZoneObject {
return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
}
- void SetSpilledRegisters(LOperand** registers,
- LOperand** double_registers) {
- spilled_registers_ = registers;
- spilled_double_registers_ = double_registers;
- }
-
void PrintTo(StringStream* stream);
- Zone* zone() const { return zone_; }
+ // Marker value indicating a de-materialized object.
+ static LOperand* materialization_marker() { return NULL; }
+
+ // Encoding used for the object_mapping map below.
+ class LengthOrDupeField : public BitField<int, 0, 30> { };
+ class IsArgumentsField : public BitField<bool, 30, 1> { };
+ class IsDuplicateField : public BitField<bool, 31, 1> { };
private:
Handle<JSFunction> closure_;
@@ -548,27 +632,27 @@ class LEnvironment: public ZoneObject {
int deoptimization_index_;
int translation_index_;
BailoutId ast_id_;
+ int translation_size_;
int parameter_count_;
int pc_offset_;
+
+ // Value array: [parameters] [locals] [expression stack] [de-materialized].
+ // |>--------- translation_size ---------<|
ZoneList<LOperand*> values_;
- BitVector is_tagged_;
- BitVector is_uint32_;
+ GrowableBitVector is_tagged_;
+ GrowableBitVector is_uint32_;
- // Allocation index indexed arrays of spill slot operands for registers
- // that are also in spill slots at an OSR entry. NULL for environments
- // that do not correspond to an OSR entry.
- LOperand** spilled_registers_;
- LOperand** spilled_double_registers_;
+ // Map with encoded information about materialization_marker operands.
+ ZoneList<uint32_t> object_mapping_;
LEnvironment* outer_;
HEnterInlined* entry_;
-
Zone* zone_;
};
// Iterates over the non-null, non-constant operands in an environment.
-class ShallowIterator BASE_EMBEDDED {
+class ShallowIterator V8_FINAL BASE_EMBEDDED {
public:
explicit ShallowIterator(LEnvironment* env)
: env_(env),
@@ -581,6 +665,7 @@ class ShallowIterator BASE_EMBEDDED {
LOperand* Current() {
ASSERT(!Done());
+ ASSERT(env_->values()->at(current_) != NULL);
return env_->values()->at(current_);
}
@@ -611,7 +696,7 @@ class ShallowIterator BASE_EMBEDDED {
// Iterator for non-null, non-constant operands incl. outer environments.
-class DeepIterator BASE_EMBEDDED {
+class DeepIterator V8_FINAL BASE_EMBEDDED {
public:
explicit DeepIterator(LEnvironment* env)
: current_iterator_(env) {
@@ -622,6 +707,7 @@ class DeepIterator BASE_EMBEDDED {
LOperand* Current() {
ASSERT(!current_iterator_.Done());
+ ASSERT(current_iterator_.Current() != NULL);
return current_iterator_.Current();
}
@@ -647,7 +733,7 @@ class LLabel;
// Superclass providing data and behavior common to all the
// arch-specific LPlatformChunk classes.
-class LChunk: public ZoneObject {
+class LChunk : public ZoneObject {
public:
static LChunk* NewChunk(HGraph* graph);
@@ -661,6 +747,7 @@ class LChunk: public ZoneObject {
int spill_slot_count() const { return spill_slot_count_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
+ Isolate* isolate() const { return graph_->isolate(); }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
void AddGapMove(int index, LOperand* from, LOperand* to);
LGap* GetGapAt(int index) const;
@@ -684,26 +771,48 @@ class LChunk: public ZoneObject {
Handle<Code> Codegen();
+ void set_allocated_double_registers(BitVector* allocated_registers);
+ BitVector* allocated_double_registers() {
+ return allocated_double_registers_;
+ }
+
protected:
- LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- info_(info),
- graph_(graph),
- instructions_(32, graph->zone()),
- pointer_maps_(8, graph->zone()),
- inlined_closures_(1, graph->zone()) { }
+ LChunk(CompilationInfo* info, HGraph* graph);
int spill_slot_count_;
private:
CompilationInfo* info_;
HGraph* const graph_;
+ BitVector* allocated_double_registers_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
ZoneList<Handle<JSFunction> > inlined_closures_;
};
+int StackSlotOffset(int index);
+
+enum NumberUntagDMode {
+ NUMBER_CANDIDATE_IS_SMI,
+ NUMBER_CANDIDATE_IS_ANY_TAGGED
+};
+
+
+class LPhase : public CompilationPhase {
+ public:
+ LPhase(const char* name, LChunk* chunk)
+ : CompilationPhase(name, chunk->info()),
+ chunk_(chunk) { }
+ ~LPhase();
+
+ private:
+ LChunk* chunk_;
+
+ DISALLOW_COPY_AND_ASSIGN(LPhase);
+};
+
+
} } // namespace v8::internal
#endif // V8_LITHIUM_H_
diff --git a/deps/v8/src/liveedit-debugger.js b/deps/v8/src/liveedit-debugger.js
index cfcdb818c..4618eda36 100644
--- a/deps/v8/src/liveedit-debugger.js
+++ b/deps/v8/src/liveedit-debugger.js
@@ -76,7 +76,17 @@ Debug.LiveEdit = new function() {
try {
new_compile_info = GatherCompileInfo(new_source, script);
} catch (e) {
- throw new Failure("Failed to compile new version of script: " + e);
+ var failure =
+ new Failure("Failed to compile new version of script: " + e);
+ if (e instanceof SyntaxError) {
+ var details = {
+ type: "liveedit_compile_error",
+ syntaxErrorMessage: e.message
+ };
+ CopyErrorPositionToDetails(e, details);
+ failure.details = details;
+ }
+ throw failure;
}
var root_new_node = BuildCodeInfoTree(new_compile_info);
@@ -176,7 +186,7 @@ Debug.LiveEdit = new function() {
// to old version.
if (link_to_old_script_list.length == 0) {
%LiveEditReplaceScript(script, new_source, null);
- old_script = void 0;
+ old_script = UNDEFINED;
} else {
var old_script_name = CreateNameForOldScript(script);
@@ -211,7 +221,7 @@ Debug.LiveEdit = new function() {
change_log.push( {position_patched: position_patch_report} );
for (var i = 0; i < update_positions_list.length; i++) {
- // TODO(LiveEdit): take into account wether it's source_changed or
+ // TODO(LiveEdit): take into account whether it's source_changed or
// unchanged and whether positions changed at all.
PatchPositions(update_positions_list[i], diff_array,
position_patch_report);
@@ -256,7 +266,7 @@ Debug.LiveEdit = new function() {
// LiveEdit itself believe that any function in heap that points to a
// particular script is a regular function.
// For some functions we will restore this link later.
- %LiveEditFunctionSetScript(info.shared_function_info, void 0);
+ %LiveEditFunctionSetScript(info.shared_function_info, UNDEFINED);
compile_info.push(info);
old_index_map.push(i);
}
@@ -278,7 +288,7 @@ Debug.LiveEdit = new function() {
}
}
- // After sorting update outer_inder field using old_index_map. Also
+ // After sorting update outer_index field using old_index_map. Also
// set next_sibling_index field.
var current_index = 0;
@@ -532,16 +542,16 @@ Debug.LiveEdit = new function() {
this.children = children;
// an index in array of compile_info
this.array_index = array_index;
- this.parent = void 0;
+ this.parent = UNDEFINED;
this.status = FunctionStatus.UNCHANGED;
// Status explanation is used for debugging purposes and will be shown
// in user UI if some explanations are needed.
- this.status_explanation = void 0;
- this.new_start_pos = void 0;
- this.new_end_pos = void 0;
- this.corresponding_node = void 0;
- this.unmatched_new_nodes = void 0;
+ this.status_explanation = UNDEFINED;
+ this.new_start_pos = UNDEFINED;
+ this.new_end_pos = UNDEFINED;
+ this.corresponding_node = UNDEFINED;
+ this.unmatched_new_nodes = UNDEFINED;
// 'Textual' correspondence/matching is weaker than 'pure'
// correspondence/matching. We need 'textual' level for visual presentation
@@ -549,10 +559,10 @@ Debug.LiveEdit = new function() {
// Sometimes only function body is changed (functions in old and new script
// textually correspond), but we cannot patch the code, so we see them
// as an old function deleted and new function created.
- this.textual_corresponding_node = void 0;
- this.textually_unmatched_new_nodes = void 0;
+ this.textual_corresponding_node = UNDEFINED;
+ this.textually_unmatched_new_nodes = UNDEFINED;
- this.live_shared_function_infos = void 0;
+ this.live_shared_function_infos = UNDEFINED;
}
// From array of function infos that is implicitly a tree creates
@@ -682,10 +692,10 @@ Debug.LiveEdit = new function() {
ProcessInternals(code_info_tree);
}
- // For ecah old function (if it is not damaged) tries to find a corresponding
+ // For each old function (if it is not damaged) tries to find a corresponding
// function in new script. Typically it should succeed (non-damaged functions
// by definition may only have changes inside their bodies). However there are
- // reasons for corresponence not to be found; function with unmodified text
+ // reasons for correspondence not to be found; function with unmodified text
// in new script may become enclosed into other function; the innocent change
// inside function body may in fact be something like "} function B() {" that
// splits a function into 2 functions.
@@ -693,7 +703,13 @@ Debug.LiveEdit = new function() {
// A recursive function that tries to find a correspondence for all
// child functions and for their inner functions.
- function ProcessChildren(old_node, new_node) {
+ function ProcessNode(old_node, new_node) {
+ var scope_change_description =
+ IsFunctionContextLocalsChanged(old_node.info, new_node.info);
+ if (scope_change_description) {
+ old_node.status = FunctionStatus.CHANGED;
+ }
+
var old_children = old_node.children;
var new_children = new_node.children;
@@ -719,13 +735,20 @@ Debug.LiveEdit = new function() {
new_children[new_index];
old_children[old_index].textual_corresponding_node =
new_children[new_index];
- if (old_children[old_index].status != FunctionStatus.UNCHANGED) {
- ProcessChildren(old_children[old_index],
+ if (scope_change_description) {
+ old_children[old_index].status = FunctionStatus.DAMAGED;
+ old_children[old_index].status_explanation =
+ "Enclosing function is now incompatible. " +
+ scope_change_description;
+ old_children[old_index].corresponding_node = UNDEFINED;
+ } else if (old_children[old_index].status !=
+ FunctionStatus.UNCHANGED) {
+ ProcessNode(old_children[old_index],
new_children[new_index]);
if (old_children[old_index].status == FunctionStatus.DAMAGED) {
unmatched_new_nodes_list.push(
old_children[old_index].corresponding_node);
- old_children[old_index].corresponding_node = void 0;
+ old_children[old_index].corresponding_node = UNDEFINED;
old_node.status = FunctionStatus.CHANGED;
}
}
@@ -762,11 +785,10 @@ Debug.LiveEdit = new function() {
}
if (old_node.status == FunctionStatus.CHANGED) {
- var why_wrong_expectations =
- WhyFunctionExpectationsDiffer(old_node.info, new_node.info);
- if (why_wrong_expectations) {
+ if (old_node.info.param_num != new_node.info.param_num) {
old_node.status = FunctionStatus.DAMAGED;
- old_node.status_explanation = why_wrong_expectations;
+ old_node.status_explanation = "Changed parameter number: " +
+ old_node.info.param_num + " and " + new_node.info.param_num;
}
}
old_node.unmatched_new_nodes = unmatched_new_nodes_list;
@@ -774,7 +796,7 @@ Debug.LiveEdit = new function() {
textually_unmatched_new_nodes_list;
}
- ProcessChildren(old_code_tree, new_code_tree);
+ ProcessNode(old_code_tree, new_code_tree);
old_code_tree.corresponding_node = new_code_tree;
old_code_tree.textual_corresponding_node = new_code_tree;
@@ -846,7 +868,7 @@ Debug.LiveEdit = new function() {
this.raw_array = raw_array;
}
- // Changes positions (including all statments) in function.
+ // Changes positions (including all statements) in function.
function PatchPositions(old_info_node, diff_array, report_array) {
if (old_info_node.live_shared_function_infos) {
old_info_node.live_shared_function_infos.forEach(function (info) {
@@ -868,15 +890,9 @@ Debug.LiveEdit = new function() {
return script.name + " (old)";
}
- // Compares a function interface old and new version, whether it
+ // Compares a function scope heap structure, old and new version, whether it
// changed or not. Returns explanation if they differ.
- function WhyFunctionExpectationsDiffer(function_info1, function_info2) {
- // Check that function has the same number of parameters (there may exist
- // an adapter, that won't survive function parameter number change).
- if (function_info1.param_num != function_info2.param_num) {
- return "Changed parameter number: " + function_info1.param_num +
- " and " + function_info2.param_num;
- }
+ function IsFunctionContextLocalsChanged(function_info1, function_info2) {
var scope_info1 = function_info1.scope_info;
var scope_info2 = function_info2.scope_info;
@@ -895,8 +911,8 @@ Debug.LiveEdit = new function() {
}
if (scope_info1_text != scope_info2_text) {
- return "Incompatible variable maps: [" + scope_info1_text +
- "] and [" + scope_info2_text + "]";
+ return "Variable map changed: [" + scope_info1_text +
+ "] => [" + scope_info2_text + "]";
}
// No differences. Return undefined.
return;
@@ -978,6 +994,31 @@ Debug.LiveEdit = new function() {
return "LiveEdit Failure: " + this.message;
};
+ function CopyErrorPositionToDetails(e, details) {
+ function createPositionStruct(script, position) {
+ if (position == -1) return;
+ var location = script.locationFromPosition(position, true);
+ if (location == null) return;
+ return {
+ line: location.line + 1,
+ column: location.column + 1,
+ position: position
+ };
+ }
+
+ if (!("scriptObject" in e) || !("startPosition" in e)) {
+ return;
+ }
+
+ var script = e.scriptObject;
+
+ var position_struct = {
+ start: createPositionStruct(script, e.startPosition),
+ end: createPositionStruct(script, e.endPosition)
+ };
+ details.position = position_struct;
+ }
+
// A testing entry.
function GetPcFromSourcePos(func, source_pos) {
return %GetFunctionCodePositionFromSource(func, source_pos);
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index 2a3aafc1f..3d459d4ff 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -36,6 +36,7 @@
#include "debug.h"
#include "deoptimizer.h"
#include "global-handles.h"
+#include "messages.h"
#include "parser.h"
#include "scopeinfo.h"
#include "scopes.h"
@@ -60,6 +61,7 @@ void SetElementNonStrict(Handle<JSObject> object,
USE(no_failure);
}
+
// A simple implementation of dynamic programming algorithm. It solves
// the problem of finding the difference of 2 arrays. It uses a table of results
// of subproblems. Each cell contains a number together with 2-bit flag
@@ -348,23 +350,26 @@ static void NarrowDownInput(SubrangableInput* input,
// Each chunk is stored as 3 array elements: (pos1_begin, pos1_end, pos2_end).
class CompareOutputArrayWriter {
public:
- CompareOutputArrayWriter()
- : array_(FACTORY->NewJSArray(10)), current_size_(0) {}
+ explicit CompareOutputArrayWriter(Isolate* isolate)
+ : array_(isolate->factory()->NewJSArray(10)), current_size_(0) {}
Handle<JSArray> GetResult() {
return array_;
}
void WriteChunk(int char_pos1, int char_pos2, int char_len1, int char_len2) {
+ Isolate* isolate = array_->GetIsolate();
SetElementNonStrict(array_,
- current_size_,
- Handle<Object>(Smi::FromInt(char_pos1)));
+ current_size_,
+ Handle<Object>(Smi::FromInt(char_pos1), isolate));
SetElementNonStrict(array_,
current_size_ + 1,
- Handle<Object>(Smi::FromInt(char_pos1 + char_len1)));
+ Handle<Object>(Smi::FromInt(char_pos1 + char_len1),
+ isolate));
SetElementNonStrict(array_,
current_size_ + 2,
- Handle<Object>(Smi::FromInt(char_pos2 + char_len2)));
+ Handle<Object>(Smi::FromInt(char_pos2 + char_len2),
+ isolate));
current_size_ += 3;
}
@@ -526,7 +531,8 @@ class TokenizingLineArrayCompareOutput : public SubrangableOutput {
TokenizingLineArrayCompareOutput(LineEndsWrapper line_ends1,
LineEndsWrapper line_ends2,
Handle<String> s1, Handle<String> s2)
- : line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2),
+ : array_writer_(s1->GetIsolate()),
+ line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2),
subrange_offset1_(0), subrange_offset2_(0) {
}
@@ -541,7 +547,7 @@ class TokenizingLineArrayCompareOutput : public SubrangableOutput {
if (char_len1 < CHUNK_LEN_LIMIT && char_len2 < CHUNK_LEN_LIMIT) {
// Chunk is small enough to conduct a nested token-level diff.
- HandleScope subTaskScope;
+ HandleScope subTaskScope(s1_->GetIsolate());
TokensCompareInput tokens_input(s1_, char_pos1, char_len1,
s2_, char_pos2, char_len2);
@@ -604,7 +610,7 @@ static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) {
CompilationInfoWithZone info(script);
info.MarkAsGlobal();
// Parse and don't allow skipping lazy functions.
- if (ParserApi::Parse(&info, kNoParsingFlags)) {
+ if (Parser::Parse(&info)) {
// Compile the code.
LiveEditFunctionTracker tracker(info.isolate(), info.function());
if (Compiler::MakeCodeForLiveEdit(&info)) {
@@ -619,17 +625,17 @@ static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) {
// Unwraps JSValue object, returning its field "value"
static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
- return Handle<Object>(jsValue->value());
+ return Handle<Object>(jsValue->value(), jsValue->GetIsolate());
}
// Wraps any object into a OpaqueReference, that will hide the object
// from JavaScript.
-static Handle<JSValue> WrapInJSValue(Handle<Object> object) {
- Handle<JSFunction> constructor =
- Isolate::Current()->opaque_reference_function();
+static Handle<JSValue> WrapInJSValue(Handle<HeapObject> object) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<JSFunction> constructor = isolate->opaque_reference_function();
Handle<JSValue> result =
- Handle<JSValue>::cast(FACTORY->NewJSObject(constructor));
+ Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
result->set_value(*object);
return result;
}
@@ -656,8 +662,9 @@ static int GetArrayLength(Handle<JSArray> array) {
template<typename S>
class JSArrayBasedStruct {
public:
- static S Create() {
- Handle<JSArray> array = FACTORY->NewJSArray(S::kSize_);
+ static S Create(Isolate* isolate) {
+ Factory* factory = isolate->factory();
+ Handle<JSArray> array = factory->NewJSArray(S::kSize_);
return S(array);
}
static S cast(Object* object) {
@@ -670,6 +677,9 @@ class JSArrayBasedStruct {
Handle<JSArray> GetJSArray() {
return array_;
}
+ Isolate* isolate() const {
+ return array_->GetIsolate();
+ }
protected:
void SetField(int field_position, Handle<Object> value) {
@@ -678,10 +688,10 @@ class JSArrayBasedStruct {
void SetSmiValueField(int field_position, int value) {
SetElementNonStrict(array_,
field_position,
- Handle<Smi>(Smi::FromInt(value)));
+ Handle<Smi>(Smi::FromInt(value), isolate()));
}
Object* GetField(int field_position) {
- return array_->GetElementNoExceptionThrown(field_position);
+ return array_->GetElementNoExceptionThrown(isolate(), field_position);
}
int GetSmiValueField(int field_position) {
Object* res = GetField(field_position);
@@ -703,29 +713,34 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
: JSArrayBasedStruct<FunctionInfoWrapper>(array) {
}
void SetInitialProperties(Handle<String> name, int start_position,
- int end_position, int param_num, int parent_index) {
- HandleScope scope;
+ int end_position, int param_num,
+ int literal_count, int parent_index) {
+ HandleScope scope(isolate());
this->SetField(kFunctionNameOffset_, name);
this->SetSmiValueField(kStartPositionOffset_, start_position);
this->SetSmiValueField(kEndPositionOffset_, end_position);
this->SetSmiValueField(kParamNumOffset_, param_num);
+ this->SetSmiValueField(kLiteralNumOffset_, literal_count);
this->SetSmiValueField(kParentIndexOffset_, parent_index);
}
void SetFunctionCode(Handle<Code> function_code,
- Handle<Object> code_scope_info) {
+ Handle<HeapObject> code_scope_info) {
Handle<JSValue> code_wrapper = WrapInJSValue(function_code);
this->SetField(kCodeOffset_, code_wrapper);
Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info);
this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
}
- void SetOuterScopeInfo(Handle<Object> scope_info_array) {
- this->SetField(kOuterScopeInfoOffset_, scope_info_array);
+ void SetFunctionScopeInfo(Handle<Object> scope_info_array) {
+ this->SetField(kFunctionScopeInfoOffset_, scope_info_array);
}
void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
Handle<JSValue> info_holder = WrapInJSValue(info);
this->SetField(kSharedFunctionInfoOffset_, info_holder);
}
+ int GetLiteralCount() {
+ return this->GetSmiValueField(kLiteralNumOffset_);
+ }
int GetParentIndex() {
return this->GetSmiValueField(kParentIndexOffset_);
}
@@ -756,10 +771,11 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
static const int kParamNumOffset_ = 3;
static const int kCodeOffset_ = 4;
static const int kCodeScopeInfoOffset_ = 5;
- static const int kOuterScopeInfoOffset_ = 6;
+ static const int kFunctionScopeInfoOffset_ = 6;
static const int kParentIndexOffset_ = 7;
static const int kSharedFunctionInfoOffset_ = 8;
- static const int kSize_ = 9;
+ static const int kLiteralNumOffset_ = 9;
+ static const int kSize_ = 10;
friend class JSArrayBasedStruct<FunctionInfoWrapper>;
};
@@ -772,7 +788,8 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
public:
static bool IsInstance(Handle<JSArray> array) {
return array->length() == Smi::FromInt(kSize_) &&
- array->GetElementNoExceptionThrown(kSharedInfoOffset_)->IsJSValue();
+ array->GetElementNoExceptionThrown(
+ array->GetIsolate(), kSharedInfoOffset_)->IsJSValue();
}
explicit SharedInfoWrapper(Handle<JSArray> array)
@@ -781,7 +798,7 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
void SetProperties(Handle<String> name, int start_position, int end_position,
Handle<SharedFunctionInfo> info) {
- HandleScope scope;
+ HandleScope scope(isolate());
this->SetField(kFunctionNameOffset_, name);
Handle<JSValue> info_holder = WrapInJSValue(info);
this->SetField(kSharedInfoOffset_, info_holder);
@@ -808,17 +825,18 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
class FunctionInfoListener {
public:
- FunctionInfoListener() {
+ explicit FunctionInfoListener(Isolate* isolate) {
current_parent_index_ = -1;
len_ = 0;
- result_ = FACTORY->NewJSArray(10);
+ result_ = isolate->factory()->NewJSArray(10);
}
void FunctionStarted(FunctionLiteral* fun) {
- HandleScope scope;
- FunctionInfoWrapper info = FunctionInfoWrapper::Create();
+ HandleScope scope(isolate());
+ FunctionInfoWrapper info = FunctionInfoWrapper::Create(isolate());
info.SetInitialProperties(fun->name(), fun->start_position(),
fun->end_position(), fun->parameter_count(),
+ fun->materialized_literal_count(),
current_parent_index_);
current_parent_index_ = len_;
SetElementNonStrict(result_, len_, info.GetJSArray());
@@ -826,10 +844,11 @@ class FunctionInfoListener {
}
void FunctionDone() {
- HandleScope scope;
+ HandleScope scope(isolate());
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
+ result_->GetElementNoExceptionThrown(
+ isolate(), current_parent_index_));
current_parent_index_ = info.GetParentIndex();
}
@@ -838,8 +857,10 @@ class FunctionInfoListener {
void FunctionCode(Handle<Code> function_code) {
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
- info.SetFunctionCode(function_code, Handle<Object>(HEAP->null_value()));
+ result_->GetElementNoExceptionThrown(
+ isolate(), current_parent_index_));
+ info.SetFunctionCode(function_code,
+ Handle<HeapObject>(isolate()->heap()->null_value()));
}
// Saves full information about a function: its code, its scope info
@@ -851,35 +872,37 @@ class FunctionInfoListener {
}
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
+ result_->GetElementNoExceptionThrown(
+ isolate(), current_parent_index_));
info.SetFunctionCode(Handle<Code>(shared->code()),
- Handle<Object>(shared->scope_info()));
+ Handle<HeapObject>(shared->scope_info()));
info.SetSharedFunctionInfo(shared);
- Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone));
- info.SetOuterScopeInfo(scope_info_list);
+ Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone),
+ isolate());
+ info.SetFunctionScopeInfo(scope_info_list);
}
Handle<JSArray> GetResult() { return result_; }
private:
+ Isolate* isolate() const { return result_->GetIsolate(); }
+
Object* SerializeFunctionScope(Scope* scope, Zone* zone) {
- HandleScope handle_scope;
+ HandleScope handle_scope(isolate());
- Handle<JSArray> scope_info_list = FACTORY->NewJSArray(10);
+ Handle<JSArray> scope_info_list = isolate()->factory()->NewJSArray(10);
int scope_info_length = 0;
// Saves some description of scope. It stores name and indexes of
// variables in the whole scope chain. Null-named slots delimit
// scopes of this chain.
- Scope* outer_scope = scope->outer_scope();
- if (outer_scope == NULL) {
- return HEAP->undefined_value();
- }
- do {
- ZoneList<Variable*> stack_list(outer_scope->StackLocalCount(), zone);
- ZoneList<Variable*> context_list(outer_scope->ContextLocalCount(), zone);
- outer_scope->CollectStackAndContextLocals(&stack_list, &context_list);
+ Scope* current_scope = scope;
+ while (current_scope != NULL) {
+ ZoneList<Variable*> stack_list(current_scope->StackLocalCount(), zone);
+ ZoneList<Variable*> context_list(
+ current_scope->ContextLocalCount(), zone);
+ current_scope->CollectStackAndContextLocals(&stack_list, &context_list);
context_list.Sort(&Variable::CompareIndex);
for (int i = 0; i < context_list.length(); i++) {
@@ -890,16 +913,17 @@ class FunctionInfoListener {
SetElementNonStrict(
scope_info_list,
scope_info_length,
- Handle<Smi>(Smi::FromInt(context_list[i]->index())));
+ Handle<Smi>(Smi::FromInt(context_list[i]->index()), isolate()));
scope_info_length++;
}
SetElementNonStrict(scope_info_list,
scope_info_length,
- Handle<Object>(HEAP->null_value()));
+ Handle<Object>(isolate()->heap()->null_value(),
+ isolate()));
scope_info_length++;
- outer_scope = outer_scope->outer_scope();
- } while (outer_scope != NULL);
+ current_scope = current_scope->outer_scope();
+ }
return *scope_info_list;
}
@@ -912,27 +936,80 @@ class FunctionInfoListener {
JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<String> source) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = script->GetIsolate();
- FunctionInfoListener listener;
- Handle<Object> original_source = Handle<Object>(script->source());
+ FunctionInfoListener listener(isolate);
+ Handle<Object> original_source =
+ Handle<Object>(script->source(), isolate);
script->set_source(*source);
isolate->set_active_function_info_listener(&listener);
- CompileScriptForTracker(isolate, script);
+
+ {
+ // Creating verbose TryCatch from public API is currently the only way to
+ // force code save location. We do not use this the object directly.
+ v8::TryCatch try_catch;
+ try_catch.SetVerbose(true);
+
+ // A logical 'try' section.
+ CompileScriptForTracker(isolate, script);
+ }
+
+ // A logical 'catch' section.
+ Handle<JSObject> rethrow_exception;
+ if (isolate->has_pending_exception()) {
+ Handle<Object> exception(isolate->pending_exception()->ToObjectChecked(),
+ isolate);
+ MessageLocation message_location = isolate->GetMessageLocation();
+
+ isolate->clear_pending_message();
+ isolate->clear_pending_exception();
+
+ // If possible, copy positions from message object to exception object.
+ if (exception->IsJSObject() && !message_location.script().is_null()) {
+ rethrow_exception = Handle<JSObject>::cast(exception);
+
+ Factory* factory = isolate->factory();
+ Handle<String> start_pos_key = factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("startPosition"));
+ Handle<String> end_pos_key = factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("endPosition"));
+ Handle<String> script_obj_key = factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("scriptObject"));
+ Handle<Smi> start_pos(
+ Smi::FromInt(message_location.start_pos()), isolate);
+ Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()), isolate);
+ Handle<JSValue> script_obj = GetScriptWrapper(message_location.script());
+ JSReceiver::SetProperty(
+ rethrow_exception, start_pos_key, start_pos, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(
+ rethrow_exception, end_pos_key, end_pos, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(
+ rethrow_exception, script_obj_key, script_obj, NONE, kNonStrictMode);
+ }
+ }
+
+ // A logical 'finally' section.
isolate->set_active_function_info_listener(NULL);
script->set_source(*original_source);
- return *(listener.GetResult());
+ if (rethrow_exception.is_null()) {
+ return *(listener.GetResult());
+ } else {
+ isolate->Throw(*rethrow_exception);
+ return 0;
+ }
}
void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
- HandleScope scope;
+ Isolate* isolate = array->GetIsolate();
+ HandleScope scope(isolate);
int len = GetArrayLength(array);
for (int i = 0; i < len; i++) {
Handle<SharedFunctionInfo> info(
- SharedFunctionInfo::cast(array->GetElementNoExceptionThrown(i)));
- SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create();
+ SharedFunctionInfo::cast(
+ array->GetElementNoExceptionThrown(isolate, i)));
+ SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create(isolate);
Handle<String> name_handle(String::cast(info->name()));
info_wrapper.SetProperties(name_handle, info->start_position(),
info->end_position(), info);
@@ -991,29 +1068,153 @@ static void ReplaceCodeObject(Handle<Code> original,
// Since we are not in an incremental marking phase we can write pointers
// to code objects (that are never in new space) without worrying about
// write barriers.
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ Heap* heap = original->GetHeap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"liveedit.cc ReplaceCodeObject");
- ASSERT(!HEAP->InNewSpace(*substitution));
+ ASSERT(!heap->InNewSpace(*substitution));
- AssertNoAllocation no_allocations_please;
+ DisallowHeapAllocation no_allocation;
ReplacingVisitor visitor(*original, *substitution);
// Iterate over all roots. Stack frames may have pointer into original code,
// so temporary replace the pointers with offset numbers
// in prologue/epilogue.
- HEAP->IterateRoots(&visitor, VISIT_ALL);
+ heap->IterateRoots(&visitor, VISIT_ALL);
// Now iterate over all pointers of all objects, including code_target
// implicit pointers.
- HeapIterator iterator;
+ HeapIterator iterator(heap);
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
obj->Iterate(&visitor);
}
}
+// Patch function literals.
+// Name 'literals' is a misnomer. Rather it's a cache for complex object
+// boilerplates and for a native context. We must clean cached values.
+// Additionally we may need to allocate a new array if number of literals
+// changed.
+class LiteralFixer {
+ public:
+ static void PatchLiterals(FunctionInfoWrapper* compile_info_wrapper,
+ Handle<SharedFunctionInfo> shared_info,
+ Isolate* isolate) {
+ int new_literal_count = compile_info_wrapper->GetLiteralCount();
+ if (new_literal_count > 0) {
+ new_literal_count += JSFunction::kLiteralsPrefixSize;
+ }
+ int old_literal_count = shared_info->num_literals();
+
+ if (old_literal_count == new_literal_count) {
+ // If literal count didn't change, simply go over all functions
+ // and clear literal arrays.
+ ClearValuesVisitor visitor;
+ IterateJSFunctions(*shared_info, &visitor);
+ } else {
+ // When literal count changes, we have to create new array instances.
+ // Since we cannot create instances when iterating heap, we should first
+ // collect all functions and fix their literal arrays.
+ Handle<FixedArray> function_instances =
+ CollectJSFunctions(shared_info, isolate);
+ for (int i = 0; i < function_instances->length(); i++) {
+ Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
+ Handle<FixedArray> old_literals(fun->literals());
+ Handle<FixedArray> new_literals =
+ isolate->factory()->NewFixedArray(new_literal_count);
+ if (new_literal_count > 0) {
+ Handle<Context> native_context;
+ if (old_literals->length() >
+ JSFunction::kLiteralNativeContextIndex) {
+ native_context = Handle<Context>(
+ JSFunction::NativeContextFromLiterals(fun->literals()));
+ } else {
+ native_context = Handle<Context>(fun->context()->native_context());
+ }
+ new_literals->set(JSFunction::kLiteralNativeContextIndex,
+ *native_context);
+ }
+ fun->set_literals(*new_literals);
+ }
+
+ shared_info->set_num_literals(new_literal_count);
+ }
+ }
+
+ private:
+ // Iterates all function instances in the HEAP that refers to the
+ // provided shared_info.
+ template<typename Visitor>
+ static void IterateJSFunctions(SharedFunctionInfo* shared_info,
+ Visitor* visitor) {
+ DisallowHeapAllocation no_allocation;
+
+ HeapIterator iterator(shared_info->GetHeap());
+ for (HeapObject* obj = iterator.next(); obj != NULL;
+ obj = iterator.next()) {
+ if (obj->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(obj);
+ if (function->shared() == shared_info) {
+ visitor->visit(function);
+ }
+ }
+ }
+ }
+
+ // Finds all instances of JSFunction that refers to the provided shared_info
+ // and returns array with them.
+ static Handle<FixedArray> CollectJSFunctions(
+ Handle<SharedFunctionInfo> shared_info, Isolate* isolate) {
+ CountVisitor count_visitor;
+ count_visitor.count = 0;
+ IterateJSFunctions(*shared_info, &count_visitor);
+ int size = count_visitor.count;
+
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(size);
+ if (size > 0) {
+ CollectVisitor collect_visitor(result);
+ IterateJSFunctions(*shared_info, &collect_visitor);
+ }
+ return result;
+ }
+
+ class ClearValuesVisitor {
+ public:
+ void visit(JSFunction* fun) {
+ FixedArray* literals = fun->literals();
+ int len = literals->length();
+ for (int j = JSFunction::kLiteralsPrefixSize; j < len; j++) {
+ literals->set_undefined(j);
+ }
+ }
+ };
+
+ class CountVisitor {
+ public:
+ void visit(JSFunction* fun) {
+ count++;
+ }
+ int count;
+ };
+
+ class CollectVisitor {
+ public:
+ explicit CollectVisitor(Handle<FixedArray> output)
+ : m_output(output), m_pos(0) {}
+
+ void visit(JSFunction* fun) {
+ m_output->set(m_pos, fun);
+ m_pos++;
+ }
+ private:
+ Handle<FixedArray> m_output;
+ int m_pos;
+ };
+};
+
+
// Check whether the code is natural function code (not a lazy-compile stub
// code).
static bool IsJSFunctionCode(Code* code) {
@@ -1023,14 +1224,16 @@ static bool IsJSFunctionCode(Code* code) {
// Returns true if an instance of candidate were inlined into function's code.
static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
if (function->code()->kind() != Code::OPTIMIZED_FUNCTION) return false;
DeoptimizationInputData* data =
DeoptimizationInputData::cast(function->code()->deoptimization_data());
- if (data == HEAP->empty_fixed_array()) return false;
+ if (data == function->GetIsolate()->heap()->empty_fixed_array()) {
+ return false;
+ }
FixedArray* literals = data->LiteralArray();
@@ -1044,45 +1247,52 @@ static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
}
-class DependentFunctionsDeoptimizingVisitor : public OptimizedFunctionVisitor {
+// Marks code that shares the same shared function info or has inlined
+// code that shares the same function info.
+class DependentFunctionMarker: public OptimizedFunctionVisitor {
public:
- explicit DependentFunctionsDeoptimizingVisitor(
- SharedFunctionInfo* function_info)
- : function_info_(function_info) {}
+ SharedFunctionInfo* shared_info_;
+ bool found_;
- virtual void EnterContext(Context* context) {
- }
+ explicit DependentFunctionMarker(SharedFunctionInfo* shared_info)
+ : shared_info_(shared_info), found_(false) { }
+ virtual void EnterContext(Context* context) { } // Don't care.
+ virtual void LeaveContext(Context* context) { } // Don't care.
virtual void VisitFunction(JSFunction* function) {
- if (function->shared() == function_info_ ||
- IsInlined(function, function_info_)) {
- Deoptimizer::DeoptimizeFunction(function);
+ // It should be guaranteed by the iterator that everything is optimized.
+ ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
+ if (shared_info_ == function->shared() ||
+ IsInlined(function, shared_info_)) {
+ // Mark the code for deoptimization.
+ function->code()->set_marked_for_deoptimization(true);
+ found_ = true;
}
}
-
- virtual void LeaveContext(Context* context) {
- }
-
- private:
- SharedFunctionInfo* function_info_;
};
static void DeoptimizeDependentFunctions(SharedFunctionInfo* function_info) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
+ DependentFunctionMarker marker(function_info);
+ // TODO(titzer): need to traverse all optimized code to find OSR code here.
+ Deoptimizer::VisitAllOptimizedFunctions(function_info->GetIsolate(), &marker);
- DependentFunctionsDeoptimizingVisitor visitor(function_info);
- Deoptimizer::VisitAllOptimizedFunctions(&visitor);
+ if (marker.found_) {
+ // Only go through with the deoptimization if something was found.
+ Deoptimizer::DeoptimizeMarkedCode(function_info->GetIsolate());
+ }
}
MaybeObject* LiveEdit::ReplaceFunctionCode(
Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array) {
- HandleScope scope;
+ Isolate* isolate = new_compile_info_array->GetIsolate();
+ HandleScope scope(isolate);
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Isolate::Current()->ThrowIllegalOperation();
+ return isolate->ThrowIllegalOperation();
}
FunctionInfoWrapper compile_info_wrapper(new_compile_info_array);
@@ -1090,7 +1300,7 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
- HEAP->EnsureHeapIsIterable();
+ isolate->heap()->EnsureHeapIsIterable();
if (IsJSFunctionCode(shared_info->code())) {
Handle<Code> code = compile_info_wrapper.GetFunctionCode();
@@ -1099,12 +1309,13 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
if (code_scope_info->IsFixedArray()) {
shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
}
+ shared_info->DisableOptimization(kLiveEdit);
}
if (shared_info->debug_info()->IsDebugInfo()) {
Handle<DebugInfo> debug_info(DebugInfo::cast(shared_info->debug_info()));
Handle<Code> new_original_code =
- FACTORY->CopyCode(compile_info_wrapper.GetFunctionCode());
+ isolate->factory()->CopyCode(compile_info_wrapper.GetFunctionCode());
debug_info->set_original_code(*new_original_code);
}
@@ -1113,32 +1324,34 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
shared_info->set_start_position(start_position);
shared_info->set_end_position(end_position);
+ LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info, isolate);
+
shared_info->set_construct_stub(
- Isolate::Current()->builtins()->builtin(
- Builtins::kJSConstructStubGeneric));
+ isolate->builtins()->builtin(Builtins::kJSConstructStubGeneric));
DeoptimizeDependentFunctions(*shared_info);
- Isolate::Current()->compilation_cache()->Remove(shared_info);
+ isolate->compilation_cache()->Remove(shared_info);
- return HEAP->undefined_value();
+ return isolate->heap()->undefined_value();
}
MaybeObject* LiveEdit::FunctionSourceUpdated(
Handle<JSArray> shared_info_array) {
- HandleScope scope;
+ Isolate* isolate = shared_info_array->GetIsolate();
+ HandleScope scope(isolate);
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Isolate::Current()->ThrowIllegalOperation();
+ return isolate->ThrowIllegalOperation();
}
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
DeoptimizeDependentFunctions(*shared_info);
- Isolate::Current()->compilation_cache()->Remove(shared_info);
+ isolate->compilation_cache()->Remove(shared_info);
- return HEAP->undefined_value();
+ return isolate->heap()->undefined_value();
}
@@ -1149,7 +1362,7 @@ void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
CHECK(script_handle->IsScript() || script_handle->IsUndefined());
shared_info->set_script(*script_handle);
- Isolate::Current()->compilation_cache()->Remove(shared_info);
+ function_wrapper->GetIsolate()->compilation_cache()->Remove(shared_info);
}
@@ -1166,20 +1379,24 @@ static int TranslatePosition(int original_position,
Handle<JSArray> position_change_array) {
int position_diff = 0;
int array_len = GetArrayLength(position_change_array);
+ Isolate* isolate = position_change_array->GetIsolate();
// TODO(635): binary search may be used here
for (int i = 0; i < array_len; i += 3) {
- Object* element = position_change_array->GetElementNoExceptionThrown(i);
+ Object* element =
+ position_change_array->GetElementNoExceptionThrown(isolate, i);
CHECK(element->IsSmi());
int chunk_start = Smi::cast(element)->value();
if (original_position < chunk_start) {
break;
}
- element = position_change_array->GetElementNoExceptionThrown(i + 1);
+ element = position_change_array->GetElementNoExceptionThrown(isolate,
+ i + 1);
CHECK(element->IsSmi());
int chunk_end = Smi::cast(element)->value();
// Position mustn't be inside a chunk.
ASSERT(original_position >= chunk_end);
- element = position_change_array->GetElementNoExceptionThrown(i + 2);
+ element = position_change_array->GetElementNoExceptionThrown(isolate,
+ i + 2);
CHECK(element->IsSmi());
int chunk_changed_end = Smi::cast(element)->value();
position_diff = chunk_changed_end - chunk_end;
@@ -1244,8 +1461,8 @@ class RelocInfoBuffer {
// Copy the data.
int curently_used_size =
static_cast<int>(buffer_ + buffer_size_ - reloc_info_writer_.pos());
- memmove(new_buffer + new_buffer_size - curently_used_size,
- reloc_info_writer_.pos(), curently_used_size);
+ OS::MemMove(new_buffer + new_buffer_size - curently_used_size,
+ reloc_info_writer_.pos(), curently_used_size);
reloc_info_writer_.Reposition(
new_buffer + new_buffer_size - curently_used_size,
@@ -1264,17 +1481,19 @@ class RelocInfoBuffer {
static const int kMaximalBufferSize = 512*MB;
};
+
// Patch positions in code (changes relocation info section) and possibly
// returns new instance of code.
static Handle<Code> PatchPositionsInCode(
Handle<Code> code,
Handle<JSArray> position_change_array) {
+ Isolate* isolate = code->GetIsolate();
RelocInfoBuffer buffer_writer(code->relocation_size(),
code->instruction_start());
{
- AssertNoAllocation no_allocations_please;
+ DisallowHeapAllocation no_allocation;
for (RelocIterator it(*code); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
if (RelocInfo::IsPosition(rinfo->rmode())) {
@@ -1287,7 +1506,9 @@ static Handle<Code> PatchPositionsInCode(
continue;
}
}
- buffer_writer.Write(it.rinfo());
+ if (RelocInfo::IsRealRelocMode(rinfo->rmode())) {
+ buffer_writer.Write(it.rinfo());
+ }
}
}
@@ -1295,13 +1516,13 @@ static Handle<Code> PatchPositionsInCode(
if (buffer.length() == code->relocation_size()) {
// Simply patch relocation area of code.
- memcpy(code->relocation_start(), buffer.start(), buffer.length());
+ OS::MemCopy(code->relocation_start(), buffer.start(), buffer.length());
return code;
} else {
// Relocation info section now has different size. We cannot simply
// rewrite it inside code object. Instead we have to create a new
// code object.
- Handle<Code> result(FACTORY->CopyCode(code, buffer));
+ Handle<Code> result(isolate->factory()->CopyCode(code, buffer));
return result;
}
}
@@ -1310,7 +1531,7 @@ static Handle<Code> PatchPositionsInCode(
MaybeObject* LiveEdit::PatchFunctionPositions(
Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array) {
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Isolate::Current()->ThrowIllegalOperation();
+ return shared_info_array->GetIsolate()->ThrowIllegalOperation();
}
SharedInfoWrapper shared_info_wrapper(shared_info_array);
@@ -1328,7 +1549,7 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
info->set_end_position(new_function_end);
info->set_function_token_position(new_function_token_pos);
- HEAP->EnsureHeapIsIterable();
+ info->GetIsolate()->heap()->EnsureHeapIsIterable();
if (IsJSFunctionCode(info->code())) {
// Patch relocation info section of the code.
@@ -1344,14 +1565,15 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
}
}
- return HEAP->undefined_value();
+ return info->GetIsolate()->heap()->undefined_value();
}
static Handle<Script> CreateScriptCopy(Handle<Script> original) {
- Handle<String> original_source(String::cast(original->source()));
+ Isolate* isolate = original->GetIsolate();
- Handle<Script> copy = FACTORY->NewScript(original_source);
+ Handle<String> original_source(String::cast(original->source()));
+ Handle<Script> copy = isolate->factory()->NewScript(original_source);
copy->set_name(original->name());
copy->set_line_offset(original->line_offset());
@@ -1359,11 +1581,14 @@ static Handle<Script> CreateScriptCopy(Handle<Script> original) {
copy->set_data(original->data());
copy->set_type(original->type());
copy->set_context_data(original->context_data());
- copy->set_compilation_type(original->compilation_type());
copy->set_eval_from_shared(original->eval_from_shared());
copy->set_eval_from_instructions_offset(
original->eval_from_instructions_offset());
+ // Copy all the flags, but clear compilation state.
+ copy->set_flags(original->flags());
+ copy->set_compilation_state(Script::COMPILATION_STATE_INITIAL);
+
return copy;
}
@@ -1371,21 +1596,22 @@ static Handle<Script> CreateScriptCopy(Handle<Script> original) {
Object* LiveEdit::ChangeScriptSource(Handle<Script> original_script,
Handle<String> new_source,
Handle<Object> old_script_name) {
+ Isolate* isolate = original_script->GetIsolate();
Handle<Object> old_script_object;
if (old_script_name->IsString()) {
Handle<Script> old_script = CreateScriptCopy(original_script);
old_script->set_name(String::cast(*old_script_name));
old_script_object = old_script;
- Isolate::Current()->debugger()->OnAfterCompile(
+ isolate->debugger()->OnAfterCompile(
old_script, Debugger::SEND_WHEN_DEBUGGING);
} else {
- old_script_object = Handle<Object>(HEAP->null_value());
+ old_script_object = isolate->factory()->null_value();
}
original_script->set_source(*new_source);
// Drop line ends so that they will be recalculated.
- original_script->set_line_ends(HEAP->undefined_value());
+ original_script->set_line_ends(isolate->heap()->undefined_value());
return *old_script_object;
}
@@ -1422,19 +1648,21 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
LiveEdit::FunctionPatchabilityStatus status) {
if (!frame->is_java_script()) return false;
- Handle<JSFunction> function(
- JSFunction::cast(JavaScriptFrame::cast(frame)->function()));
+ Handle<JSFunction> function(JavaScriptFrame::cast(frame)->function());
+ Isolate* isolate = shared_info_array->GetIsolate();
int len = GetArrayLength(shared_info_array);
for (int i = 0; i < len; i++) {
- Object* element = shared_info_array->GetElementNoExceptionThrown(i);
+ Object* element =
+ shared_info_array->GetElementNoExceptionThrown(isolate, i);
CHECK(element->IsJSValue());
Handle<JSValue> jsvalue(JSValue::cast(element));
Handle<SharedFunctionInfo> shared =
UnwrapSharedFunctionInfoFromJSValue(jsvalue);
if (function->shared() == *shared || IsInlined(*function, *shared)) {
- SetElementNonStrict(result, i, Handle<Smi>(Smi::FromInt(status)));
+ SetElementNonStrict(result, i, Handle<Smi>(Smi::FromInt(status),
+ isolate));
return true;
}
}
@@ -1447,7 +1675,7 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
static bool FixTryCatchHandler(StackFrame* top_frame,
StackFrame* bottom_frame) {
Address* pointer_address =
- &Memory::Address_at(Isolate::Current()->get_address_from_id(
+ &Memory::Address_at(top_frame->isolate()->get_address_from_id(
Isolate::kHandlerAddress));
while (*pointer_address < top_frame->sp()) {
@@ -1483,11 +1711,11 @@ static const char* DropFrames(Vector<StackFrame*> frames,
ASSERT(bottom_js_frame->is_java_script());
// Check the nature of the top frame.
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = bottom_js_frame->isolate();
Code* pre_top_frame_code = pre_top_frame->LookupCode();
bool frame_has_padding;
if (pre_top_frame_code->is_inline_cache_stub() &&
- pre_top_frame_code->ic_state() == DEBUG_BREAK) {
+ pre_top_frame_code->is_debug_stub()) {
// OK, we can drop inline cache calls.
*mode = Debug::FRAME_DROPPED_IN_IC_CALL;
frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
@@ -1565,9 +1793,9 @@ static const char* DropFrames(Vector<StackFrame*> frames,
StackFrame* pre_pre_frame = frames[top_frame_index - 2];
- memmove(padding_start + kPointerSize - shortage_bytes,
- padding_start + kPointerSize,
- Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize);
+ OS::MemMove(padding_start + kPointerSize - shortage_bytes,
+ padding_start + kPointerSize,
+ Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize);
pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes);
pre_pre_frame->SetCallerFp(pre_top_frame->fp());
@@ -1586,7 +1814,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
// Make sure FixTryCatchHandler is idempotent.
ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
- Handle<Code> code = Isolate::Current()->builtins()->FrameDropper_LiveEdit();
+ Handle<Code> code = isolate->builtins()->FrameDropper_LiveEdit();
*top_frame_pc_address = code->entry();
pre_top_frame->SetCallerFp(bottom_js_frame->fp());
@@ -1630,14 +1858,14 @@ class MultipleFunctionTarget {
Handle<JSArray> m_result;
};
+
// Drops all call frame matched by target and all frames above them.
template<typename TARGET>
static const char* DropActivationsInActiveThreadImpl(
- TARGET& target, bool do_drop, Zone* zone) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate, TARGET& target, bool do_drop) {
Debug* debug = isolate->debug();
- ZoneScope scope(zone, DELETE_ON_EXIT);
- Vector<StackFrame*> frames = CreateStackMap(zone);
+ Zone zone(isolate);
+ Vector<StackFrame*> frames = CreateStackMap(isolate, &zone);
int top_frame_index = -1;
@@ -1726,27 +1954,28 @@ static const char* DropActivationsInActiveThreadImpl(
return NULL;
}
+
// Fills result array with statuses of functions. Modifies the stack
// removing all listed function if possible and if do_drop is true.
static const char* DropActivationsInActiveThread(
- Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop,
- Zone* zone) {
+ Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
MultipleFunctionTarget target(shared_info_array, result);
- const char* message =
- DropActivationsInActiveThreadImpl(target, do_drop, zone);
+ const char* message = DropActivationsInActiveThreadImpl(
+ shared_info_array->GetIsolate(), target, do_drop);
if (message) {
return message;
}
+ Isolate* isolate = shared_info_array->GetIsolate();
int array_len = GetArrayLength(shared_info_array);
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {
- if (result->GetElement(i) ==
+ if (result->GetElement(result->GetIsolate(), i) ==
Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
Handle<Object> replaced(
- Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK));
+ Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK), isolate);
SetElementNonStrict(result, i, replaced);
}
}
@@ -1780,24 +2009,25 @@ class InactiveThreadActivationsChecker : public ThreadVisitor {
Handle<JSArray> LiveEdit::CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop, Zone* zone) {
+ Handle<JSArray> shared_info_array, bool do_drop) {
+ Isolate* isolate = shared_info_array->GetIsolate();
int len = GetArrayLength(shared_info_array);
- Handle<JSArray> result = FACTORY->NewJSArray(len);
+ Handle<JSArray> result = isolate->factory()->NewJSArray(len);
// Fill the default values.
for (int i = 0; i < len; i++) {
SetElementNonStrict(
result,
i,
- Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH)));
+ Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH), isolate));
}
// First check inactive threads. Fail if some functions are blocked there.
InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array,
result);
- Isolate::Current()->thread_manager()->IterateArchivedThreads(
+ isolate->thread_manager()->IterateArchivedThreads(
&inactive_threads_checker);
if (inactive_threads_checker.HasBlockedFunctions()) {
return result;
@@ -1805,11 +2035,11 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
// Try to drop activations from the current stack.
const char* error_message =
- DropActivationsInActiveThread(shared_info_array, result, do_drop, zone);
+ DropActivationsInActiveThread(shared_info_array, result, do_drop);
if (error_message != NULL) {
// Add error message as an array extra element.
Vector<const char> vector_message(error_message, StrLength(error_message));
- Handle<String> str = FACTORY->NewStringFromAscii(vector_message);
+ Handle<String> str = isolate->factory()->NewStringFromAscii(vector_message);
SetElementNonStrict(result, len, str);
}
return result;
@@ -1846,10 +2076,11 @@ class SingleFrameTarget {
// Finds a drops required frame and all frames above.
// Returns error message or NULL.
-const char* LiveEdit::RestartFrame(JavaScriptFrame* frame, Zone* zone) {
+const char* LiveEdit::RestartFrame(JavaScriptFrame* frame) {
SingleFrameTarget target(frame);
- const char* result = DropActivationsInActiveThreadImpl(target, true, zone);
+ const char* result = DropActivationsInActiveThreadImpl(
+ frame->isolate(), target, true);
if (result != NULL) {
return result;
}
diff --git a/deps/v8/src/liveedit.h b/deps/v8/src/liveedit.h
index 5b12854d8..0efbb95cc 100644
--- a/deps/v8/src/liveedit.h
+++ b/deps/v8/src/liveedit.h
@@ -121,11 +121,11 @@ class LiveEdit : AllStatic {
// has restart the lowest found frames and drops all other frames above
// if possible and if do_drop is true.
static Handle<JSArray> CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop, Zone* zone);
+ Handle<JSArray> shared_info_array, bool do_drop);
// Restarts the call frame and completely drops all frames above it.
// Return error message or NULL.
- static const char* RestartFrame(JavaScriptFrame* frame, Zone* zone);
+ static const char* RestartFrame(JavaScriptFrame* frame);
// A copy of this is in liveedit-debugger.js.
enum FunctionPatchabilityStatus {
diff --git a/deps/v8/src/liveobjectlist-inl.h b/deps/v8/src/liveobjectlist-inl.h
deleted file mode 100644
index 2bc2296e2..000000000
--- a/deps/v8/src/liveobjectlist-inl.h
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LIVEOBJECTLIST_INL_H_
-#define V8_LIVEOBJECTLIST_INL_H_
-
-#include "v8.h"
-
-#include "liveobjectlist.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef LIVE_OBJECT_LIST
-
-void LiveObjectList::GCEpilogue() {
- if (!NeedLOLProcessing()) return;
- GCEpiloguePrivate();
-}
-
-
-void LiveObjectList::GCPrologue() {
- if (!NeedLOLProcessing()) return;
-#ifdef VERIFY_LOL
- if (FLAG_verify_lol) {
- Verify();
- }
-#endif
-}
-
-
-void LiveObjectList::IterateElements(ObjectVisitor* v) {
- if (!NeedLOLProcessing()) return;
- IterateElementsPrivate(v);
-}
-
-
-void LiveObjectList::ProcessNonLive(HeapObject* obj) {
- // Only do work if we have at least one list to process.
- if (last()) DoProcessNonLive(obj);
-}
-
-
-void LiveObjectList::UpdateReferencesForScavengeGC() {
- if (LiveObjectList::NeedLOLProcessing()) {
- UpdateLiveObjectListVisitor update_visitor;
- LiveObjectList::IterateElements(&update_visitor);
- }
-}
-
-
-LiveObjectList* LiveObjectList::FindLolForId(int id,
- LiveObjectList* start_lol) {
- if (id != 0) {
- LiveObjectList* lol = start_lol;
- while (lol != NULL) {
- if (lol->id() == id) {
- return lol;
- }
- lol = lol->prev_;
- }
- }
- return NULL;
-}
-
-
-// Iterates the elements in every lol and returns the one that matches the
-// specified key. If no matching element is found, then it returns NULL.
-template <typename T>
-inline LiveObjectList::Element*
-LiveObjectList::FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key) {
- LiveObjectList* lol = last();
- while (lol != NULL) {
- Element* elements = lol->elements_;
- for (int i = 0; i < lol->obj_count_; i++) {
- Element* element = &elements[i];
- if (GetValue(element) == key) {
- return element;
- }
- }
- lol = lol->prev_;
- }
- return NULL;
-}
-
-
-inline int LiveObjectList::GetElementId(LiveObjectList::Element* element) {
- return element->id_;
-}
-
-
-inline HeapObject*
-LiveObjectList::GetElementObj(LiveObjectList::Element* element) {
- return element->obj_;
-}
-
-#endif // LIVE_OBJECT_LIST
-
-} } // namespace v8::internal
-
-#endif // V8_LIVEOBJECTLIST_INL_H_
-
diff --git a/deps/v8/src/liveobjectlist.cc b/deps/v8/src/liveobjectlist.cc
deleted file mode 100644
index 6b89cf683..000000000
--- a/deps/v8/src/liveobjectlist.cc
+++ /dev/null
@@ -1,2631 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifdef LIVE_OBJECT_LIST
-
-#include <ctype.h>
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "checks.h"
-#include "global-handles.h"
-#include "heap.h"
-#include "inspector.h"
-#include "isolate.h"
-#include "list-inl.h"
-#include "liveobjectlist-inl.h"
-#include "string-stream.h"
-#include "v8utils.h"
-#include "v8conversions.h"
-
-namespace v8 {
-namespace internal {
-
-
-typedef int (*RawComparer)(const void*, const void*);
-
-
-#ifdef CHECK_ALL_OBJECT_TYPES
-
-#define DEBUG_LIVE_OBJECT_TYPES(v) \
- v(Smi, "unexpected: Smi") \
- \
- v(CodeCache, "unexpected: CodeCache") \
- v(BreakPointInfo, "unexpected: BreakPointInfo") \
- v(DebugInfo, "unexpected: DebugInfo") \
- v(TypeSwitchInfo, "unexpected: TypeSwitchInfo") \
- v(SignatureInfo, "unexpected: SignatureInfo") \
- v(Script, "unexpected: Script") \
- v(ObjectTemplateInfo, "unexpected: ObjectTemplateInfo") \
- v(FunctionTemplateInfo, "unexpected: FunctionTemplateInfo") \
- v(CallHandlerInfo, "unexpected: CallHandlerInfo") \
- v(InterceptorInfo, "unexpected: InterceptorInfo") \
- v(AccessCheckInfo, "unexpected: AccessCheckInfo") \
- v(AccessorInfo, "unexpected: AccessorInfo") \
- v(ExternalTwoByteString, "unexpected: ExternalTwoByteString") \
- v(ExternalAsciiString, "unexpected: ExternalAsciiString") \
- v(ExternalString, "unexpected: ExternalString") \
- v(SeqTwoByteString, "unexpected: SeqTwoByteString") \
- v(SeqAsciiString, "unexpected: SeqAsciiString") \
- v(SeqString, "unexpected: SeqString") \
- v(JSFunctionResultCache, "unexpected: JSFunctionResultCache") \
- v(NativeContext, "unexpected: NativeContext") \
- v(MapCache, "unexpected: MapCache") \
- v(CodeCacheHashTable, "unexpected: CodeCacheHashTable") \
- v(CompilationCacheTable, "unexpected: CompilationCacheTable") \
- v(SymbolTable, "unexpected: SymbolTable") \
- v(Dictionary, "unexpected: Dictionary") \
- v(HashTable, "unexpected: HashTable") \
- v(DescriptorArray, "unexpected: DescriptorArray") \
- v(ExternalFloatArray, "unexpected: ExternalFloatArray") \
- v(ExternalUnsignedIntArray, "unexpected: ExternalUnsignedIntArray") \
- v(ExternalIntArray, "unexpected: ExternalIntArray") \
- v(ExternalUnsignedShortArray, "unexpected: ExternalUnsignedShortArray") \
- v(ExternalShortArray, "unexpected: ExternalShortArray") \
- v(ExternalUnsignedByteArray, "unexpected: ExternalUnsignedByteArray") \
- v(ExternalByteArray, "unexpected: ExternalByteArray") \
- v(JSValue, "unexpected: JSValue")
-
-#else
-#define DEBUG_LIVE_OBJECT_TYPES(v)
-#endif
-
-
-#define FOR_EACH_LIVE_OBJECT_TYPE(v) \
- DEBUG_LIVE_OBJECT_TYPES(v) \
- \
- v(JSArray, "JSArray") \
- v(JSRegExp, "JSRegExp") \
- v(JSFunction, "JSFunction") \
- v(JSGlobalObject, "JSGlobal") \
- v(JSBuiltinsObject, "JSBuiltins") \
- v(GlobalObject, "Global") \
- v(JSGlobalProxy, "JSGlobalProxy") \
- v(JSObject, "JSObject") \
- \
- v(Context, "meta: Context") \
- v(ByteArray, "meta: ByteArray") \
- v(ExternalPixelArray, "meta: PixelArray") \
- v(ExternalArray, "meta: ExternalArray") \
- v(FixedArray, "meta: FixedArray") \
- v(String, "String") \
- v(HeapNumber, "HeapNumber") \
- \
- v(Code, "meta: Code") \
- v(Map, "meta: Map") \
- v(Oddball, "Oddball") \
- v(Foreign, "meta: Foreign") \
- v(SharedFunctionInfo, "meta: SharedFunctionInfo") \
- v(Struct, "meta: Struct") \
- \
- v(HeapObject, "HeapObject")
-
-
-enum /* LiveObjectType */ {
-#define DECLARE_OBJECT_TYPE_ENUM(type, name) kType##type,
- FOR_EACH_LIVE_OBJECT_TYPE(DECLARE_OBJECT_TYPE_ENUM)
- kInvalidLiveObjType,
- kNumberOfTypes
-#undef DECLARE_OBJECT_TYPE_ENUM
-};
-
-
-LiveObjectType GetObjectType(HeapObject* heap_obj) {
- // TODO(mlam): investigate usint Map::instance_type() instead.
-#define CHECK_FOR_OBJECT_TYPE(type, name) \
- if (heap_obj->Is##type()) return kType##type;
- FOR_EACH_LIVE_OBJECT_TYPE(CHECK_FOR_OBJECT_TYPE)
-#undef CHECK_FOR_OBJECT_TYPE
-
- UNREACHABLE();
- return kInvalidLiveObjType;
-}
-
-
-inline const char* GetObjectTypeDesc(LiveObjectType type) {
- static const char* const name[kNumberOfTypes] = {
- #define DEFINE_OBJECT_TYPE_NAME(type, name) name,
- FOR_EACH_LIVE_OBJECT_TYPE(DEFINE_OBJECT_TYPE_NAME)
- "invalid"
- #undef DEFINE_OBJECT_TYPE_NAME
- };
- ASSERT(type < kNumberOfTypes);
- return name[type];
-}
-
-
-const char* GetObjectTypeDesc(HeapObject* heap_obj) {
- LiveObjectType type = GetObjectType(heap_obj);
- return GetObjectTypeDesc(type);
-}
-
-
-bool IsOfType(LiveObjectType type, HeapObject* obj) {
- // Note: there are types that are more general (e.g. JSObject) that would
- // have passed the Is##type_() test for more specialized types (e.g.
- // JSFunction). If we find a more specialized match but we're looking for
- // the general type, then we should reject the ones that matches the
- // specialized type.
-#define CHECK_OBJECT_TYPE(type_, name) \
- if (obj->Is##type_()) return (type == kType##type_);
-
- FOR_EACH_LIVE_OBJECT_TYPE(CHECK_OBJECT_TYPE)
-#undef CHECK_OBJECT_TYPE
-
- return false;
-}
-
-
-const AllocationSpace kInvalidSpace = static_cast<AllocationSpace>(-1);
-
-static AllocationSpace FindSpaceFor(String* space_str) {
- SmartArrayPointer<char> s =
- space_str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-
- const char* key_str = *s;
- switch (key_str[0]) {
- case 'c':
- if (strcmp(key_str, "cell") == 0) return CELL_SPACE;
- if (strcmp(key_str, "code") == 0) return CODE_SPACE;
- break;
- case 'l':
- if (strcmp(key_str, "lo") == 0) return LO_SPACE;
- break;
- case 'm':
- if (strcmp(key_str, "map") == 0) return MAP_SPACE;
- break;
- case 'n':
- if (strcmp(key_str, "new") == 0) return NEW_SPACE;
- break;
- case 'o':
- if (strcmp(key_str, "old-pointer") == 0) return OLD_POINTER_SPACE;
- if (strcmp(key_str, "old-data") == 0) return OLD_DATA_SPACE;
- break;
- }
- return kInvalidSpace;
-}
-
-
-static bool InSpace(AllocationSpace space, HeapObject* heap_obj) {
- Heap* heap = ISOLATE->heap();
- if (space != LO_SPACE) {
- return heap->InSpace(heap_obj, space);
- }
-
- // This is an optimization to speed up the check for an object in the LO
- // space by exclusion because we know that all object pointers passed in
- // here are guaranteed to be in the heap. Hence, it is safe to infer
- // using an exclusion test.
- // Note: calling Heap::InSpace(heap_obj, LO_SPACE) is too slow for our
- // filters.
- int first_space = static_cast<int>(FIRST_SPACE);
- int last_space = static_cast<int>(LO_SPACE);
- for (int sp = first_space; sp < last_space; sp++) {
- if (heap->InSpace(heap_obj, static_cast<AllocationSpace>(sp))) {
- return false;
- }
- }
- SLOW_ASSERT(heap->InSpace(heap_obj, LO_SPACE));
- return true;
-}
-
-
-static LiveObjectType FindTypeFor(String* type_str) {
- SmartArrayPointer<char> s =
- type_str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-
-#define CHECK_OBJECT_TYPE(type_, name) { \
- const char* type_desc = GetObjectTypeDesc(kType##type_); \
- const char* key_str = *s; \
- if (strstr(type_desc, key_str) != NULL) return kType##type_; \
- }
- FOR_EACH_LIVE_OBJECT_TYPE(CHECK_OBJECT_TYPE)
-#undef CHECK_OBJECT_TYPE
-
- return kInvalidLiveObjType;
-}
-
-
-class LolFilter {
- public:
- explicit LolFilter(Handle<JSObject> filter_obj);
-
- inline bool is_active() const { return is_active_; }
- inline bool Matches(HeapObject* obj) {
- return !is_active() || MatchesSlow(obj);
- }
-
- private:
- void InitTypeFilter(Handle<JSObject> filter_obj);
- void InitSpaceFilter(Handle<JSObject> filter_obj);
- void InitPropertyFilter(Handle<JSObject> filter_obj);
- bool MatchesSlow(HeapObject* obj);
-
- bool is_active_;
- LiveObjectType type_;
- AllocationSpace space_;
- Handle<String> prop_;
-};
-
-
-LolFilter::LolFilter(Handle<JSObject> filter_obj)
- : is_active_(false),
- type_(kInvalidLiveObjType),
- space_(kInvalidSpace),
- prop_() {
- if (filter_obj.is_null()) return;
-
- InitTypeFilter(filter_obj);
- InitSpaceFilter(filter_obj);
- InitPropertyFilter(filter_obj);
-}
-
-
-void LolFilter::InitTypeFilter(Handle<JSObject> filter_obj) {
- Handle<String> type_sym = FACTORY->LookupAsciiSymbol("type");
- MaybeObject* maybe_result = filter_obj->GetProperty(*type_sym);
- Object* type_obj;
- if (maybe_result->ToObject(&type_obj)) {
- if (type_obj->IsString()) {
- String* type_str = String::cast(type_obj);
- type_ = FindTypeFor(type_str);
- if (type_ != kInvalidLiveObjType) {
- is_active_ = true;
- }
- }
- }
-}
-
-
-void LolFilter::InitSpaceFilter(Handle<JSObject> filter_obj) {
- Handle<String> space_sym = FACTORY->LookupAsciiSymbol("space");
- MaybeObject* maybe_result = filter_obj->GetProperty(*space_sym);
- Object* space_obj;
- if (maybe_result->ToObject(&space_obj)) {
- if (space_obj->IsString()) {
- String* space_str = String::cast(space_obj);
- space_ = FindSpaceFor(space_str);
- if (space_ != kInvalidSpace) {
- is_active_ = true;
- }
- }
- }
-}
-
-
-void LolFilter::InitPropertyFilter(Handle<JSObject> filter_obj) {
- Handle<String> prop_sym = FACTORY->LookupAsciiSymbol("prop");
- MaybeObject* maybe_result = filter_obj->GetProperty(*prop_sym);
- Object* prop_obj;
- if (maybe_result->ToObject(&prop_obj)) {
- if (prop_obj->IsString()) {
- prop_ = Handle<String>(String::cast(prop_obj));
- is_active_ = true;
- }
- }
-}
-
-
-bool LolFilter::MatchesSlow(HeapObject* obj) {
- if ((type_ != kInvalidLiveObjType) && !IsOfType(type_, obj)) {
- return false; // Fail because obj is not of the type of interest.
- }
- if ((space_ != kInvalidSpace) && !InSpace(space_, obj)) {
- return false; // Fail because obj is not in the space of interest.
- }
- if (!prop_.is_null() && obj->IsJSObject()) {
- LookupResult result;
- obj->Lookup(*prop_, &result);
- if (!result.IsProperty()) {
- return false; // Fail because obj does not have the property of interest.
- }
- }
- return true;
-}
-
-
-class LolIterator {
- public:
- LolIterator(LiveObjectList* older, LiveObjectList* newer)
- : older_(older),
- newer_(newer),
- curr_(0),
- elements_(0),
- count_(0),
- index_(0) { }
-
- inline void Init() {
- SetCurrent(newer_);
- // If the elements_ list is empty, then move on to the next list as long
- // as we're not at the last list (indicated by done()).
- while ((elements_ == NULL) && !Done()) {
- SetCurrent(curr_->prev_);
- }
- }
-
- inline bool Done() const {
- return (curr_ == older_);
- }
-
- // Object level iteration.
- inline void Next() {
- index_++;
- if (index_ >= count_) {
- // Iterate backwards until we get to the oldest list.
- while (!Done()) {
- SetCurrent(curr_->prev_);
- // If we have elements to process, we're good to go.
- if (elements_ != NULL) break;
-
- // Else, we should advance to the next older list.
- }
- }
- }
-
- inline int Id() const {
- return elements_[index_].id_;
- }
- inline HeapObject* Obj() const {
- return elements_[index_].obj_;
- }
-
- inline int LolObjCount() const {
- if (curr_ != NULL) return curr_->obj_count_;
- return 0;
- }
-
- protected:
- inline void SetCurrent(LiveObjectList* new_curr) {
- curr_ = new_curr;
- if (curr_ != NULL) {
- elements_ = curr_->elements_;
- count_ = curr_->obj_count_;
- index_ = 0;
- }
- }
-
- LiveObjectList* older_;
- LiveObjectList* newer_;
- LiveObjectList* curr_;
- LiveObjectList::Element* elements_;
- int count_;
- int index_;
-};
-
-
-class LolForwardIterator : public LolIterator {
- public:
- LolForwardIterator(LiveObjectList* first, LiveObjectList* last)
- : LolIterator(first, last) {
- }
-
- inline void Init() {
- SetCurrent(older_);
- // If the elements_ list is empty, then move on to the next list as long
- // as we're not at the last list (indicated by Done()).
- while ((elements_ == NULL) && !Done()) {
- SetCurrent(curr_->next_);
- }
- }
-
- inline bool Done() const {
- return (curr_ == newer_);
- }
-
- // Object level iteration.
- inline void Next() {
- index_++;
- if (index_ >= count_) {
- // Done with current list. Move on to the next.
- while (!Done()) { // If not at the last list already, ...
- SetCurrent(curr_->next_);
- // If we have elements to process, we're good to go.
- if (elements_ != NULL) break;
-
- // Else, we should advance to the next list.
- }
- }
- }
-};
-
-
-// Minimizes the white space in a string. Tabs and newlines are replaced
-// with a space where appropriate.
-static int CompactString(char* str) {
- char* src = str;
- char* dst = str;
- char prev_ch = 0;
- while (*dst != '\0') {
- char ch = *src++;
- // We will treat non-ASCII chars as '?'.
- if ((ch & 0x80) != 0) {
- ch = '?';
- }
- // Compact contiguous whitespace chars into a single ' '.
- if (isspace(ch)) {
- if (prev_ch != ' ') *dst++ = ' ';
- prev_ch = ' ';
- continue;
- }
- *dst++ = ch;
- prev_ch = ch;
- }
- return (dst - str);
-}
-
-
-// Generates a custom description based on the specific type of
-// object we're looking at. We only generate specialized
-// descriptions where we can. In all other cases, we emit the
-// generic info.
-static void GenerateObjectDesc(HeapObject* obj,
- char* buffer,
- int buffer_size) {
- Vector<char> buffer_v(buffer, buffer_size);
- ASSERT(obj != NULL);
- if (obj->IsJSArray()) {
- JSArray* jsarray = JSArray::cast(obj);
- double length = jsarray->length()->Number();
- OS::SNPrintF(buffer_v,
- "%p <%s> len %g",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj),
- length);
-
- } else if (obj->IsString()) {
- String* str = String::cast(obj);
- // Only grab up to 160 chars in case they are double byte.
- // We'll only dump 80 of them after we compact them.
- const int kMaxCharToDump = 80;
- const int kMaxBufferSize = kMaxCharToDump * 2;
- SmartArrayPointer<char> str_sp = str->ToCString(DISALLOW_NULLS,
- ROBUST_STRING_TRAVERSAL,
- 0,
- kMaxBufferSize);
- char* str_cstr = *str_sp;
- int length = CompactString(str_cstr);
- OS::SNPrintF(buffer_v,
- "%p <%s> '%.80s%s'",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj),
- str_cstr,
- (length > kMaxCharToDump) ? "..." : "");
-
- } else if (obj->IsJSFunction() || obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* sinfo;
- if (obj->IsJSFunction()) {
- JSFunction* func = JSFunction::cast(obj);
- sinfo = func->shared();
- } else {
- sinfo = SharedFunctionInfo::cast(obj);
- }
-
- String* name = sinfo->DebugName();
- SmartArrayPointer<char> name_sp =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- char* name_cstr = *name_sp;
-
- HeapStringAllocator string_allocator;
- StringStream stream(&string_allocator);
- sinfo->SourceCodePrint(&stream, 50);
- SmartArrayPointer<const char> source_sp = stream.ToCString();
- const char* source_cstr = *source_sp;
-
- OS::SNPrintF(buffer_v,
- "%p <%s> '%s' %s",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj),
- name_cstr,
- source_cstr);
-
- } else if (obj->IsFixedArray()) {
- FixedArray* fixed = FixedArray::cast(obj);
-
- OS::SNPrintF(buffer_v,
- "%p <%s> len %d",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj),
- fixed->length());
-
- } else {
- OS::SNPrintF(buffer_v,
- "%p <%s>",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj));
- }
-}
-
-
-// Utility function for filling in a line of detail in a verbose dump.
-static bool AddObjDetail(Handle<FixedArray> arr,
- int index,
- int obj_id,
- Handle<HeapObject> target,
- const char* desc_str,
- Handle<String> id_sym,
- Handle<String> desc_sym,
- Handle<String> size_sym,
- Handle<JSObject> detail,
- Handle<String> desc,
- Handle<Object> error) {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- detail = factory->NewJSObject(isolate->object_function());
- if (detail->IsFailure()) {
- error = detail;
- return false;
- }
-
- int size = 0;
- char buffer[512];
- if (desc_str == NULL) {
- ASSERT(!target.is_null());
- HeapObject* obj = *target;
- GenerateObjectDesc(obj, buffer, sizeof(buffer));
- desc_str = buffer;
- size = obj->Size();
- }
- desc = factory->NewStringFromAscii(CStrVector(desc_str));
- if (desc->IsFailure()) {
- error = desc;
- return false;
- }
-
- { MaybeObject* maybe_result = detail->SetProperty(*id_sym,
- Smi::FromInt(obj_id),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return false;
- }
- { MaybeObject* maybe_result = detail->SetProperty(*desc_sym,
- *desc,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return false;
- }
- { MaybeObject* maybe_result = detail->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return false;
- }
-
- arr->set(index, *detail);
- return true;
-}
-
-
-class DumpWriter {
- public:
- virtual ~DumpWriter() {}
-
- virtual void ComputeTotalCountAndSize(LolFilter* filter,
- int* count,
- int* size) = 0;
- virtual bool Write(Handle<FixedArray> elements_arr,
- int start,
- int dump_limit,
- LolFilter* filter,
- Handle<Object> error) = 0;
-};
-
-
-class LolDumpWriter: public DumpWriter {
- public:
- LolDumpWriter(LiveObjectList* older, LiveObjectList* newer)
- : older_(older), newer_(newer) {
- }
-
- void ComputeTotalCountAndSize(LolFilter* filter, int* count, int* size) {
- *count = 0;
- *size = 0;
-
- LolIterator it(older_, newer_);
- for (it.Init(); !it.Done(); it.Next()) {
- HeapObject* heap_obj = it.Obj();
- if (!filter->Matches(heap_obj)) {
- continue;
- }
-
- *size += heap_obj->Size();
- (*count)++;
- }
- }
-
- bool Write(Handle<FixedArray> elements_arr,
- int start,
- int dump_limit,
- LolFilter* filter,
- Handle<Object> error) {
- // The lols are listed in latest to earliest. We want to dump from
- // earliest to latest. So, compute the last element to start with.
- int index = 0;
- int count = 0;
-
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
-
- // Prefetch some needed symbols.
- Handle<String> id_sym = factory->LookupAsciiSymbol("id");
- Handle<String> desc_sym = factory->LookupAsciiSymbol("desc");
- Handle<String> size_sym = factory->LookupAsciiSymbol("size");
-
- // Fill the array with the lol object details.
- Handle<JSObject> detail;
- Handle<String> desc;
- Handle<HeapObject> target;
-
- LiveObjectList* first_lol = (older_ != NULL) ?
- older_->next_ : LiveObjectList::first_;
- LiveObjectList* last_lol = (newer_ != NULL) ? newer_->next_ : NULL;
-
- LolForwardIterator it(first_lol, last_lol);
- for (it.Init(); !it.Done() && (index < dump_limit); it.Next()) {
- HeapObject* heap_obj = it.Obj();
-
- // Skip objects that have been filtered out.
- if (!filter->Matches(heap_obj)) {
- continue;
- }
-
- // Only report objects that are in the section of interest.
- if (count >= start) {
- target = Handle<HeapObject>(heap_obj);
- bool success = AddObjDetail(elements_arr,
- index++,
- it.Id(),
- target,
- NULL,
- id_sym,
- desc_sym,
- size_sym,
- detail,
- desc,
- error);
- if (!success) return false;
- }
- count++;
- }
- return true;
- }
-
- private:
- LiveObjectList* older_;
- LiveObjectList* newer_;
-};
-
-
-class RetainersDumpWriter: public DumpWriter {
- public:
- RetainersDumpWriter(Handle<HeapObject> target,
- Handle<JSObject> instance_filter,
- Handle<JSFunction> args_function)
- : target_(target),
- instance_filter_(instance_filter),
- args_function_(args_function) {
- }
-
- void ComputeTotalCountAndSize(LolFilter* filter, int* count, int* size) {
- Handle<FixedArray> retainers_arr;
- Handle<Object> error;
-
- *size = -1;
- LiveObjectList::GetRetainers(target_,
- instance_filter_,
- retainers_arr,
- 0,
- Smi::kMaxValue,
- count,
- filter,
- NULL,
- *args_function_,
- error);
- }
-
- bool Write(Handle<FixedArray> elements_arr,
- int start,
- int dump_limit,
- LolFilter* filter,
- Handle<Object> error) {
- int dummy;
- int count;
-
- // Fill the retainer objects.
- count = LiveObjectList::GetRetainers(target_,
- instance_filter_,
- elements_arr,
- start,
- dump_limit,
- &dummy,
- filter,
- NULL,
- *args_function_,
- error);
- if (count < 0) {
- return false;
- }
- return true;
- }
-
- private:
- Handle<HeapObject> target_;
- Handle<JSObject> instance_filter_;
- Handle<JSFunction> args_function_;
-};
-
-
-class LiveObjectSummary {
- public:
- explicit LiveObjectSummary(LolFilter* filter)
- : total_count_(0),
- total_size_(0),
- found_root_(false),
- found_weak_root_(false),
- filter_(filter) {
- memset(counts_, 0, sizeof(counts_[0]) * kNumberOfEntries);
- memset(sizes_, 0, sizeof(sizes_[0]) * kNumberOfEntries);
- }
-
- void Add(HeapObject* heap_obj) {
- int size = heap_obj->Size();
- LiveObjectType type = GetObjectType(heap_obj);
- ASSERT(type != kInvalidLiveObjType);
- counts_[type]++;
- sizes_[type] += size;
- total_count_++;
- total_size_ += size;
- }
-
- void set_found_root() { found_root_ = true; }
- void set_found_weak_root() { found_weak_root_ = true; }
-
- inline int Count(LiveObjectType type) {
- return counts_[type];
- }
- inline int Size(LiveObjectType type) {
- return sizes_[type];
- }
- inline int total_count() {
- return total_count_;
- }
- inline int total_size() {
- return total_size_;
- }
- inline bool found_root() {
- return found_root_;
- }
- inline bool found_weak_root() {
- return found_weak_root_;
- }
- int GetNumberOfEntries() {
- int entries = 0;
- for (int i = 0; i < kNumberOfEntries; i++) {
- if (counts_[i]) entries++;
- }
- return entries;
- }
-
- inline LolFilter* filter() { return filter_; }
-
- static const int kNumberOfEntries = kNumberOfTypes;
-
- private:
- int counts_[kNumberOfEntries];
- int sizes_[kNumberOfEntries];
- int total_count_;
- int total_size_;
- bool found_root_;
- bool found_weak_root_;
-
- LolFilter* filter_;
-};
-
-
-// Abstraction for a summary writer.
-class SummaryWriter {
- public:
- virtual ~SummaryWriter() {}
- virtual void Write(LiveObjectSummary* summary) = 0;
-};
-
-
-// A summary writer for filling in a summary of lol lists and diffs.
-class LolSummaryWriter: public SummaryWriter {
- public:
- LolSummaryWriter(LiveObjectList* older_lol,
- LiveObjectList* newer_lol)
- : older_(older_lol), newer_(newer_lol) {
- }
-
- void Write(LiveObjectSummary* summary) {
- LolFilter* filter = summary->filter();
-
- // Fill the summary with the lol object details.
- LolIterator it(older_, newer_);
- for (it.Init(); !it.Done(); it.Next()) {
- HeapObject* heap_obj = it.Obj();
- if (!filter->Matches(heap_obj)) {
- continue;
- }
- summary->Add(heap_obj);
- }
- }
-
- private:
- LiveObjectList* older_;
- LiveObjectList* newer_;
-};
-
-
-// A summary writer for filling in a retainers list.
-class RetainersSummaryWriter: public SummaryWriter {
- public:
- RetainersSummaryWriter(Handle<HeapObject> target,
- Handle<JSObject> instance_filter,
- Handle<JSFunction> args_function)
- : target_(target),
- instance_filter_(instance_filter),
- args_function_(args_function) {
- }
-
- void Write(LiveObjectSummary* summary) {
- Handle<FixedArray> retainers_arr;
- Handle<Object> error;
- int dummy_total_count;
- LiveObjectList::GetRetainers(target_,
- instance_filter_,
- retainers_arr,
- 0,
- Smi::kMaxValue,
- &dummy_total_count,
- summary->filter(),
- summary,
- *args_function_,
- error);
- }
-
- private:
- Handle<HeapObject> target_;
- Handle<JSObject> instance_filter_;
- Handle<JSFunction> args_function_;
-};
-
-
-uint32_t LiveObjectList::next_element_id_ = 1;
-int LiveObjectList::list_count_ = 0;
-int LiveObjectList::last_id_ = 0;
-LiveObjectList* LiveObjectList::first_ = NULL;
-LiveObjectList* LiveObjectList::last_ = NULL;
-
-
-LiveObjectList::LiveObjectList(LiveObjectList* prev, int capacity)
- : prev_(prev),
- next_(NULL),
- capacity_(capacity),
- obj_count_(0) {
- elements_ = NewArray<Element>(capacity);
- id_ = ++last_id_;
-
- list_count_++;
-}
-
-
-LiveObjectList::~LiveObjectList() {
- DeleteArray<Element>(elements_);
- delete prev_;
-}
-
-
-int LiveObjectList::GetTotalObjCountAndSize(int* size_p) {
- int size = 0;
- int count = 0;
- LiveObjectList* lol = this;
- do {
- // Only compute total size if requested i.e. when size_p is not null.
- if (size_p != NULL) {
- Element* elements = lol->elements_;
- for (int i = 0; i < lol->obj_count_; i++) {
- HeapObject* heap_obj = elements[i].obj_;
- size += heap_obj->Size();
- }
- }
- count += lol->obj_count_;
- lol = lol->prev_;
- } while (lol != NULL);
-
- if (size_p != NULL) {
- *size_p = size;
- }
- return count;
-}
-
-
-// Adds an object to the lol.
-// Returns true if successful, else returns false.
-bool LiveObjectList::Add(HeapObject* obj) {
- // If the object is already accounted for in the prev list which we inherit
- // from, then no need to add it to this list.
- if ((prev() != NULL) && (prev()->Find(obj) != NULL)) {
- return true;
- }
- ASSERT(obj_count_ <= capacity_);
- if (obj_count_ == capacity_) {
- // The heap must have grown and we have more objects than capacity to store
- // them.
- return false; // Fail this addition.
- }
- Element& element = elements_[obj_count_++];
- element.id_ = next_element_id_++;
- element.obj_ = obj;
- return true;
-}
-
-
-// Comparator used for sorting and searching the lol.
-int LiveObjectList::CompareElement(const Element* a, const Element* b) {
- const HeapObject* obj1 = a->obj_;
- const HeapObject* obj2 = b->obj_;
- // For lol elements, it doesn't matter which comes first if 2 elements point
- // to the same object (which gets culled later). Hence, we only care about
- // the the greater than / less than relationships.
- return (obj1 > obj2) ? 1 : (obj1 == obj2) ? 0 : -1;
-}
-
-
-// Looks for the specified object in the lol, and returns its element if found.
-LiveObjectList::Element* LiveObjectList::Find(HeapObject* obj) {
- LiveObjectList* lol = this;
- Element key;
- Element* result = NULL;
-
- key.obj_ = obj;
- // Iterate through the chain of lol's to look for the object.
- while ((result == NULL) && (lol != NULL)) {
- result = reinterpret_cast<Element*>(
- bsearch(&key, lol->elements_, lol->obj_count_,
- sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement)));
- lol = lol->prev_;
- }
- return result;
-}
-
-
-// "Nullifies" (convert the HeapObject* into an SMI) so that it will get cleaned
-// up in the GCEpilogue, while preserving the sort order of the lol.
-// NOTE: the lols need to be already sorted before NullifyMostRecent() is
-// called.
-void LiveObjectList::NullifyMostRecent(HeapObject* obj) {
- LiveObjectList* lol = last();
- Element key;
- Element* result = NULL;
-
- key.obj_ = obj;
- // Iterate through the chain of lol's to look for the object.
- while (lol != NULL) {
- result = reinterpret_cast<Element*>(
- bsearch(&key, lol->elements_, lol->obj_count_,
- sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement)));
- if (result != NULL) {
- // Since there may be more than one (we are nullifying dup's after all),
- // find the first in the current lol, and nullify that. The lol should
- // be sorted already to make this easy (see the use of SortAll()).
- int i = result - lol->elements_;
-
- // NOTE: we sort the lol in increasing order. So, if an object has been
- // "nullified" (its lowest bit will be cleared to make it look like an
- // SMI), it would/should show up before the equivalent dups that have not
- // yet been "nullified". Hence, we should be searching backwards for the
- // first occurence of a matching object and nullify that instance. This
- // will ensure that we preserve the expected sorting order.
- for (i--; i > 0; i--) {
- Element* element = &lol->elements_[i];
- HeapObject* curr_obj = element->obj_;
- if (curr_obj != obj) {
- break; // No more matches. Let's move on.
- }
- result = element; // Let this earlier match be the result.
- }
-
- // Nullify the object.
- NullifyNonLivePointer(&result->obj_);
- return;
- }
- lol = lol->prev_;
- }
-}
-
-
-// Sorts the lol.
-void LiveObjectList::Sort() {
- if (obj_count_ > 0) {
- Vector<Element> elements_v(elements_, obj_count_);
- elements_v.Sort(CompareElement);
- }
-}
-
-
-// Sorts all captured lols starting from the latest.
-void LiveObjectList::SortAll() {
- LiveObjectList* lol = last();
- while (lol != NULL) {
- lol->Sort();
- lol = lol->prev_;
- }
-}
-
-
-// Counts the number of objects in the heap.
-static int CountHeapObjects() {
- int count = 0;
- // Iterate over all the heap spaces and count the number of objects.
- HeapIterator iterator;
- HeapObject* heap_obj = NULL;
- while ((heap_obj = iterator.next()) != NULL) {
- count++;
- }
- return count;
-}
-
-
-// Captures a current snapshot of all objects in the heap.
-MaybeObject* LiveObjectList::Capture() {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope scope(isolate);
-
- // Count the number of objects in the heap.
- int total_count = CountHeapObjects();
- int count = total_count;
- int size = 0;
-
- LiveObjectList* last_lol = last();
- if (last_lol != NULL) {
- count -= last_lol->TotalObjCount();
- }
-
- LiveObjectList* lol;
-
- // Create a lol large enough to track all the objects.
- lol = new LiveObjectList(last_lol, count);
- if (lol == NULL) {
- return NULL; // No memory to proceed.
- }
-
- // The HeapIterator needs to be in its own scope because it disables
- // allocation, and we need allocate below.
- {
- // Iterate over all the heap spaces and add the objects.
- HeapIterator iterator;
- HeapObject* heap_obj = NULL;
- bool failed = false;
- while (!failed && (heap_obj = iterator.next()) != NULL) {
- failed = !lol->Add(heap_obj);
- size += heap_obj->Size();
- }
- ASSERT(!failed);
-
- lol->Sort();
-
- // Add the current lol to the list of lols.
- if (last_ != NULL) {
- last_->next_ = lol;
- } else {
- first_ = lol;
- }
- last_ = lol;
-
-#ifdef VERIFY_LOL
- if (FLAG_verify_lol) {
- Verify(true);
- }
-#endif
- }
-
- Handle<String> id_sym = factory->LookupAsciiSymbol("id");
- Handle<String> count_sym = factory->LookupAsciiSymbol("count");
- Handle<String> size_sym = factory->LookupAsciiSymbol("size");
-
- Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
- if (result->IsFailure()) return Object::cast(*result);
-
- { MaybeObject* maybe_result = result->SetProperty(*id_sym,
- Smi::FromInt(lol->id()),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
- { MaybeObject* maybe_result = result->SetProperty(*count_sym,
- Smi::FromInt(total_count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
- { MaybeObject* maybe_result = result->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
-
- return *result;
-}
-
-
-// Delete doesn't actually deletes an lol. It just marks it as invisible since
-// its contents are considered to be part of subsequent lists as well. The
-// only time we'll actually delete the lol is when we Reset() or if the lol is
-// invisible, and its element count reaches 0.
-bool LiveObjectList::Delete(int id) {
- LiveObjectList* lol = last();
- while (lol != NULL) {
- if (lol->id() == id) {
- break;
- }
- lol = lol->prev_;
- }
-
- // If no lol is found for this id, then we fail to delete.
- if (lol == NULL) return false;
-
- // Else, mark the lol as invisible i.e. id == 0.
- lol->id_ = 0;
- list_count_--;
- ASSERT(list_count_ >= 0);
- if (lol->obj_count_ == 0) {
- // Point the next lol's prev to this lol's prev.
- LiveObjectList* next = lol->next_;
- LiveObjectList* prev = lol->prev_;
- // Point next's prev to prev.
- if (next != NULL) {
- next->prev_ = lol->prev_;
- } else {
- last_ = lol->prev_;
- }
- // Point prev's next to next.
- if (prev != NULL) {
- prev->next_ = lol->next_;
- } else {
- first_ = lol->next_;
- }
-
- lol->prev_ = NULL;
- lol->next_ = NULL;
-
- // Delete this now empty and invisible lol.
- delete lol;
- }
-
- // Just in case we've marked everything invisible, then clean up completely.
- if (list_count_ == 0) {
- Reset();
- }
-
- return true;
-}
-
-
-MaybeObject* LiveObjectList::Dump(int older_id,
- int newer_id,
- int start_idx,
- int dump_limit,
- Handle<JSObject> filter_obj) {
- if ((older_id < 0) || (newer_id < 0) || (last() == NULL)) {
- return Failure::Exception(); // Fail: 0 is not a valid lol id.
- }
- if (newer_id < older_id) {
- // They are not in the expected order. Swap them.
- int temp = older_id;
- older_id = newer_id;
- newer_id = temp;
- }
-
- LiveObjectList* newer_lol = FindLolForId(newer_id, last());
- LiveObjectList* older_lol = FindLolForId(older_id, newer_lol);
-
- // If the id is defined, and we can't find a LOL for it, then we have an
- // invalid id.
- if ((newer_id != 0) && (newer_lol == NULL)) {
- return Failure::Exception(); // Fail: the newer lol id is invalid.
- }
- if ((older_id != 0) && (older_lol == NULL)) {
- return Failure::Exception(); // Fail: the older lol id is invalid.
- }
-
- LolFilter filter(filter_obj);
- LolDumpWriter writer(older_lol, newer_lol);
- return DumpPrivate(&writer, start_idx, dump_limit, &filter);
-}
-
-
-MaybeObject* LiveObjectList::DumpPrivate(DumpWriter* writer,
- int start,
- int dump_limit,
- LolFilter* filter) {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
-
- HandleScope scope(isolate);
-
- // Calculate the number of entries of the dump.
- int count = -1;
- int size = -1;
- writer->ComputeTotalCountAndSize(filter, &count, &size);
-
- // Adjust for where to start the dump.
- if ((start < 0) || (start >= count)) {
- return Failure::Exception(); // invalid start.
- }
-
- int remaining_count = count - start;
- if (dump_limit > remaining_count) {
- dump_limit = remaining_count;
- }
-
- // Allocate an array to hold the result.
- Handle<FixedArray> elements_arr = factory->NewFixedArray(dump_limit);
- if (elements_arr->IsFailure()) return Object::cast(*elements_arr);
-
- // Fill in the dump.
- Handle<Object> error;
- bool success = writer->Write(elements_arr,
- start,
- dump_limit,
- filter,
- error);
- if (!success) return Object::cast(*error);
-
- MaybeObject* maybe_result;
-
- // Allocate the result body.
- Handle<JSObject> body = factory->NewJSObject(isolate->object_function());
- if (body->IsFailure()) return Object::cast(*body);
-
- // Set the updated body.count.
- Handle<String> count_sym = factory->LookupAsciiSymbol("count");
- maybe_result = body->SetProperty(*count_sym,
- Smi::FromInt(count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Set the updated body.size if appropriate.
- if (size >= 0) {
- Handle<String> size_sym = factory->LookupAsciiSymbol("size");
- maybe_result = body->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
-
- // Set body.first_index.
- Handle<String> first_sym = factory->LookupAsciiSymbol("first_index");
- maybe_result = body->SetProperty(*first_sym,
- Smi::FromInt(start),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Allocate the JSArray of the elements.
- Handle<JSObject> elements = factory->NewJSObject(isolate->array_function());
- if (elements->IsFailure()) return Object::cast(*elements);
-
- maybe_result = Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Set body.elements.
- Handle<String> elements_sym = factory->LookupAsciiSymbol("elements");
- maybe_result = body->SetProperty(*elements_sym,
- *elements,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- return *body;
-}
-
-
-MaybeObject* LiveObjectList::Summarize(int older_id,
- int newer_id,
- Handle<JSObject> filter_obj) {
- if ((older_id < 0) || (newer_id < 0) || (last() == NULL)) {
- return Failure::Exception(); // Fail: 0 is not a valid lol id.
- }
- if (newer_id < older_id) {
- // They are not in the expected order. Swap them.
- int temp = older_id;
- older_id = newer_id;
- newer_id = temp;
- }
-
- LiveObjectList* newer_lol = FindLolForId(newer_id, last());
- LiveObjectList* older_lol = FindLolForId(older_id, newer_lol);
-
- // If the id is defined, and we can't find a LOL for it, then we have an
- // invalid id.
- if ((newer_id != 0) && (newer_lol == NULL)) {
- return Failure::Exception(); // Fail: the newer lol id is invalid.
- }
- if ((older_id != 0) && (older_lol == NULL)) {
- return Failure::Exception(); // Fail: the older lol id is invalid.
- }
-
- LolFilter filter(filter_obj);
- LolSummaryWriter writer(older_lol, newer_lol);
- return SummarizePrivate(&writer, &filter, false);
-}
-
-
-// Creates a summary report for the debugger.
-// Note: the SummaryWriter takes care of iterating over objects and filling in
-// the summary.
-MaybeObject* LiveObjectList::SummarizePrivate(SummaryWriter* writer,
- LolFilter* filter,
- bool is_tracking_roots) {
- HandleScope scope;
- MaybeObject* maybe_result;
-
- LiveObjectSummary summary(filter);
- writer->Write(&summary);
-
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
-
- // The result body will look like this:
- // body: {
- // count: <total_count>,
- // size: <total_size>,
- // found_root: <boolean>, // optional.
- // found_weak_root: <boolean>, // optional.
- // summary: [
- // {
- // desc: "<object type name>",
- // count: <count>,
- // size: size
- // },
- // ...
- // ]
- // }
-
- // Prefetch some needed symbols.
- Handle<String> desc_sym = factory->LookupAsciiSymbol("desc");
- Handle<String> count_sym = factory->LookupAsciiSymbol("count");
- Handle<String> size_sym = factory->LookupAsciiSymbol("size");
- Handle<String> summary_sym = factory->LookupAsciiSymbol("summary");
-
- // Allocate the summary array.
- int entries_count = summary.GetNumberOfEntries();
- Handle<FixedArray> summary_arr =
- factory->NewFixedArray(entries_count);
- if (summary_arr->IsFailure()) return Object::cast(*summary_arr);
-
- int idx = 0;
- for (int i = 0; i < LiveObjectSummary::kNumberOfEntries; i++) {
- // Allocate the summary record.
- Handle<JSObject> detail = factory->NewJSObject(isolate->object_function());
- if (detail->IsFailure()) return Object::cast(*detail);
-
- // Fill in the summary record.
- LiveObjectType type = static_cast<LiveObjectType>(i);
- int count = summary.Count(type);
- if (count) {
- const char* desc_cstr = GetObjectTypeDesc(type);
- Handle<String> desc = factory->LookupAsciiSymbol(desc_cstr);
- int size = summary.Size(type);
-
- maybe_result = detail->SetProperty(*desc_sym,
- *desc,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*count_sym,
- Smi::FromInt(count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- summary_arr->set(idx++, *detail);
- }
- }
-
- // Wrap the summary fixed array in a JS array.
- Handle<JSObject> summary_obj =
- factory->NewJSObject(isolate->array_function());
- if (summary_obj->IsFailure()) return Object::cast(*summary_obj);
-
- maybe_result = Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Create the body object.
- Handle<JSObject> body = factory->NewJSObject(isolate->object_function());
- if (body->IsFailure()) return Object::cast(*body);
-
- // Fill out the body object.
- int total_count = summary.total_count();
- int total_size = summary.total_size();
- maybe_result = body->SetProperty(*count_sym,
- Smi::FromInt(total_count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- maybe_result = body->SetProperty(*size_sym,
- Smi::FromInt(total_size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- if (is_tracking_roots) {
- int found_root = summary.found_root();
- int found_weak_root = summary.found_weak_root();
- Handle<String> root_sym = factory->LookupAsciiSymbol("found_root");
- Handle<String> weak_root_sym =
- factory->LookupAsciiSymbol("found_weak_root");
- maybe_result = body->SetProperty(*root_sym,
- Smi::FromInt(found_root),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = body->SetProperty(*weak_root_sym,
- Smi::FromInt(found_weak_root),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
-
- maybe_result = body->SetProperty(*summary_sym,
- *summary_obj,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- return *body;
-}
-
-
-// Returns an array listing the captured lols.
-// Note: only dumps the section starting at start_idx and only up to
-// dump_limit entries.
-MaybeObject* LiveObjectList::Info(int start_idx, int dump_limit) {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
-
- HandleScope scope(isolate);
- MaybeObject* maybe_result;
-
- int total_count = LiveObjectList::list_count();
- int dump_count = total_count;
-
- // Adjust for where to start the dump.
- if (total_count == 0) {
- start_idx = 0; // Ensure this to get an empty list.
- } else if ((start_idx < 0) || (start_idx >= total_count)) {
- return Failure::Exception(); // invalid start.
- }
- dump_count -= start_idx;
-
- // Adjust for the dump limit.
- if (dump_count > dump_limit) {
- dump_count = dump_limit;
- }
-
- // Allocate an array to hold the result.
- Handle<FixedArray> list = factory->NewFixedArray(dump_count);
- if (list->IsFailure()) return Object::cast(*list);
-
- // Prefetch some needed symbols.
- Handle<String> id_sym = factory->LookupAsciiSymbol("id");
- Handle<String> count_sym = factory->LookupAsciiSymbol("count");
- Handle<String> size_sym = factory->LookupAsciiSymbol("size");
-
- // Fill the array with the lol details.
- int idx = 0;
- LiveObjectList* lol = first_;
- while ((lol != NULL) && (idx < start_idx)) { // Skip tail entries.
- if (lol->id() != 0) {
- idx++;
- }
- lol = lol->next();
- }
- idx = 0;
- while ((lol != NULL) && (dump_limit != 0)) {
- if (lol->id() != 0) {
- int count;
- int size;
- count = lol->GetTotalObjCountAndSize(&size);
-
- Handle<JSObject> detail =
- factory->NewJSObject(isolate->object_function());
- if (detail->IsFailure()) return Object::cast(*detail);
-
- maybe_result = detail->SetProperty(*id_sym,
- Smi::FromInt(lol->id()),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*count_sym,
- Smi::FromInt(count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- list->set(idx++, *detail);
- dump_limit--;
- }
- lol = lol->next();
- }
-
- // Return the result as a JS array.
- Handle<JSObject> lols = factory->NewJSObject(isolate->array_function());
-
- maybe_result = Handle<JSArray>::cast(lols)->SetContent(*list);
- if (maybe_result->IsFailure()) return maybe_result;
-
- Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
- if (result->IsFailure()) return Object::cast(*result);
-
- maybe_result = result->SetProperty(*count_sym,
- Smi::FromInt(total_count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- Handle<String> first_sym = factory->LookupAsciiSymbol("first_index");
- maybe_result = result->SetProperty(*first_sym,
- Smi::FromInt(start_idx),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- Handle<String> lists_sym = factory->LookupAsciiSymbol("lists");
- maybe_result = result->SetProperty(*lists_sym,
- *lols,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- return *result;
-}
-
-
-// Deletes all captured lols.
-void LiveObjectList::Reset() {
- LiveObjectList* lol = last();
- // Just delete the last. Each lol will delete it's prev automatically.
- delete lol;
-
- next_element_id_ = 1;
- list_count_ = 0;
- last_id_ = 0;
- first_ = NULL;
- last_ = NULL;
-}
-
-
-// Gets the object for the specified obj id.
-Object* LiveObjectList::GetObj(int obj_id) {
- Element* element = FindElementFor<int>(GetElementId, obj_id);
- if (element != NULL) {
- return Object::cast(element->obj_);
- }
- return HEAP->undefined_value();
-}
-
-
-// Gets the obj id for the specified address if valid.
-int LiveObjectList::GetObjId(Object* obj) {
- // Make a heap object pointer from the address.
- HeapObject* hobj = HeapObject::cast(obj);
- Element* element = FindElementFor<HeapObject*>(GetElementObj, hobj);
- if (element != NULL) {
- return element->id_;
- }
- return 0; // Invalid address.
-}
-
-
-// Gets the obj id for the specified address if valid.
-Object* LiveObjectList::GetObjId(Handle<String> address) {
- SmartArrayPointer<char> addr_str =
- address->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-
- Isolate* isolate = Isolate::Current();
-
- // Extract the address value from the string.
- int value =
- static_cast<int>(StringToInt(isolate->unicode_cache(), *address, 16));
- Object* obj = reinterpret_cast<Object*>(value);
- return Smi::FromInt(GetObjId(obj));
-}
-
-
-// Helper class for copying HeapObjects.
-class LolVisitor: public ObjectVisitor {
- public:
- LolVisitor(HeapObject* target, Handle<HeapObject> handle_to_skip)
- : target_(target), handle_to_skip_(handle_to_skip), found_(false) {}
-
- void VisitPointer(Object** p) { CheckPointer(p); }
-
- void VisitPointers(Object** start, Object** end) {
- // Check all HeapObject pointers in [start, end).
- for (Object** p = start; !found() && p < end; p++) CheckPointer(p);
- }
-
- inline bool found() const { return found_; }
- inline bool reset() { return found_ = false; }
-
- private:
- inline void CheckPointer(Object** p) {
- Object* object = *p;
- if (HeapObject::cast(object) == target_) {
- // We may want to skip this handle because the handle may be a local
- // handle in a handle scope in one of our callers. Once we return,
- // that handle will be popped. Hence, we don't want to count it as
- // a root that would have kept the target object alive.
- if (!handle_to_skip_.is_null() &&
- handle_to_skip_.location() == reinterpret_cast<HeapObject**>(p)) {
- return; // Skip this handle.
- }
- found_ = true;
- }
- }
-
- HeapObject* target_;
- Handle<HeapObject> handle_to_skip_;
- bool found_;
-};
-
-
-inline bool AddRootRetainerIfFound(const LolVisitor& visitor,
- LolFilter* filter,
- LiveObjectSummary* summary,
- void (*SetRootFound)(LiveObjectSummary* s),
- int start,
- int dump_limit,
- int* total_count,
- Handle<FixedArray> retainers_arr,
- int* count,
- int* index,
- const char* root_name,
- Handle<String> id_sym,
- Handle<String> desc_sym,
- Handle<String> size_sym,
- Handle<Object> error) {
- HandleScope scope;
-
- // Scratch handles.
- Handle<JSObject> detail;
- Handle<String> desc;
- Handle<HeapObject> retainer;
-
- if (visitor.found()) {
- if (!filter->is_active()) {
- (*total_count)++;
- if (summary) {
- SetRootFound(summary);
- } else if ((*total_count > start) && ((*index) < dump_limit)) {
- (*count)++;
- if (!retainers_arr.is_null()) {
- return AddObjDetail(retainers_arr,
- (*index)++,
- 0,
- retainer,
- root_name,
- id_sym,
- desc_sym,
- size_sym,
- detail,
- desc,
- error);
- }
- }
- }
- }
- return true;
-}
-
-
-inline void SetFoundRoot(LiveObjectSummary* summary) {
- summary->set_found_root();
-}
-
-
-inline void SetFoundWeakRoot(LiveObjectSummary* summary) {
- summary->set_found_weak_root();
-}
-
-
-int LiveObjectList::GetRetainers(Handle<HeapObject> target,
- Handle<JSObject> instance_filter,
- Handle<FixedArray> retainers_arr,
- int start,
- int dump_limit,
- int* total_count,
- LolFilter* filter,
- LiveObjectSummary* summary,
- JSFunction* arguments_function,
- Handle<Object> error) {
- HandleScope scope;
-
- // Scratch handles.
- Handle<JSObject> detail;
- Handle<String> desc;
- Handle<HeapObject> retainer;
-
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
-
- // Prefetch some needed symbols.
- Handle<String> id_sym = factory->LookupAsciiSymbol("id");
- Handle<String> desc_sym = factory->LookupAsciiSymbol("desc");
- Handle<String> size_sym = factory->LookupAsciiSymbol("size");
-
- NoHandleAllocation ha;
- int count = 0;
- int index = 0;
- Handle<JSObject> last_obj;
-
- *total_count = 0;
-
- // Iterate roots.
- LolVisitor lol_visitor(*target, target);
- isolate->heap()->IterateStrongRoots(&lol_visitor, VISIT_ALL);
- if (!AddRootRetainerIfFound(lol_visitor,
- filter,
- summary,
- SetFoundRoot,
- start,
- dump_limit,
- total_count,
- retainers_arr,
- &count,
- &index,
- "<root>",
- id_sym,
- desc_sym,
- size_sym,
- error)) {
- return -1;
- }
-
- lol_visitor.reset();
- isolate->heap()->IterateWeakRoots(&lol_visitor, VISIT_ALL);
- if (!AddRootRetainerIfFound(lol_visitor,
- filter,
- summary,
- SetFoundWeakRoot,
- start,
- dump_limit,
- total_count,
- retainers_arr,
- &count,
- &index,
- "<weak root>",
- id_sym,
- desc_sym,
- size_sym,
- error)) {
- return -1;
- }
-
- // Iterate the live object lists.
- LolIterator it(NULL, last());
- for (it.Init(); !it.Done() && (index < dump_limit); it.Next()) {
- HeapObject* heap_obj = it.Obj();
-
- // Only look at all JSObjects.
- if (heap_obj->IsJSObject()) {
- // Skip context extension objects and argument arrays as these are
- // checked in the context of functions using them.
- JSObject* obj = JSObject::cast(heap_obj);
- if (obj->IsJSContextExtensionObject() ||
- obj->map()->constructor() == arguments_function) {
- continue;
- }
-
- // Check if the JS object has a reference to the object looked for.
- if (obj->ReferencesObject(*target)) {
- // Check instance filter if supplied. This is normally used to avoid
- // references from mirror objects (see Runtime_IsInPrototypeChain).
- if (!instance_filter->IsUndefined()) {
- Object* V = obj;
- while (true) {
- Object* prototype = V->GetPrototype();
- if (prototype->IsNull()) {
- break;
- }
- if (*instance_filter == prototype) {
- obj = NULL; // Don't add this object.
- break;
- }
- V = prototype;
- }
- }
-
- if (obj != NULL) {
- // Skip objects that have been filtered out.
- if (filter->Matches(heap_obj)) {
- continue;
- }
-
- // Valid reference found add to instance array if supplied an update
- // count.
- last_obj = Handle<JSObject>(obj);
- (*total_count)++;
-
- if (summary != NULL) {
- summary->Add(heap_obj);
- } else if ((*total_count > start) && (index < dump_limit)) {
- count++;
- if (!retainers_arr.is_null()) {
- retainer = Handle<HeapObject>(heap_obj);
- bool success = AddObjDetail(retainers_arr,
- index++,
- it.Id(),
- retainer,
- NULL,
- id_sym,
- desc_sym,
- size_sym,
- detail,
- desc,
- error);
- if (!success) return -1;
- }
- }
- }
- }
- }
- }
-
- // Check for circular reference only. This can happen when the object is only
- // referenced from mirrors and has a circular reference in which case the
- // object is not really alive and would have been garbage collected if not
- // referenced from the mirror.
-
- if (*total_count == 1 && !last_obj.is_null() && *last_obj == *target) {
- count = 0;
- *total_count = 0;
- }
-
- return count;
-}
-
-
-MaybeObject* LiveObjectList::GetObjRetainers(int obj_id,
- Handle<JSObject> instance_filter,
- bool verbose,
- int start,
- int dump_limit,
- Handle<JSObject> filter_obj) {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
-
- HandleScope scope(isolate);
-
- // Get the target object.
- HeapObject* heap_obj = HeapObject::cast(GetObj(obj_id));
- if (heap_obj == heap->undefined_value()) {
- return heap_obj;
- }
-
- Handle<HeapObject> target = Handle<HeapObject>(heap_obj);
-
- // Get the constructor function for context extension and arguments array.
- JSObject* arguments_boilerplate =
- isolate->context()->native_context()->arguments_boilerplate();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_boilerplate->map()->constructor());
-
- Handle<JSFunction> args_function = Handle<JSFunction>(arguments_function);
- LolFilter filter(filter_obj);
-
- if (!verbose) {
- RetainersSummaryWriter writer(target, instance_filter, args_function);
- return SummarizePrivate(&writer, &filter, true);
-
- } else {
- RetainersDumpWriter writer(target, instance_filter, args_function);
- Object* body_obj;
- MaybeObject* maybe_result =
- DumpPrivate(&writer, start, dump_limit, &filter);
- if (!maybe_result->ToObject(&body_obj)) {
- return maybe_result;
- }
-
- // Set body.id.
- Handle<JSObject> body = Handle<JSObject>(JSObject::cast(body_obj));
- Handle<String> id_sym = factory->LookupAsciiSymbol("id");
- maybe_result = body->SetProperty(*id_sym,
- Smi::FromInt(obj_id),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- return *body;
- }
-}
-
-
-Object* LiveObjectList::PrintObj(int obj_id) {
- Object* obj = GetObj(obj_id);
- if (!obj) {
- return HEAP->undefined_value();
- }
-
- EmbeddedVector<char, 128> temp_filename;
- static int temp_count = 0;
- const char* path_prefix = ".";
-
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
-
- if (FLAG_lol_workdir) {
- path_prefix = FLAG_lol_workdir;
- }
- OS::SNPrintF(temp_filename, "%s/lol-print-%d", path_prefix, ++temp_count);
-
- FILE* f = OS::FOpen(temp_filename.start(), "w+");
-
- PrintF(f, "@%d ", LiveObjectList::GetObjId(obj));
-#ifdef OBJECT_PRINT
-#ifdef INSPECTOR
- Inspector::DumpObjectType(f, obj);
-#endif // INSPECTOR
- PrintF(f, "\n");
- obj->Print(f);
-#else // !OBJECT_PRINT
- obj->ShortPrint(f);
-#endif // !OBJECT_PRINT
- PrintF(f, "\n");
- Flush(f);
- fclose(f);
-
- // Create a string from the temp_file.
- // Note: the mmapped resource will take care of closing the file.
- MemoryMappedExternalResource* resource =
- new MemoryMappedExternalResource(temp_filename.start(), true);
- if (resource->exists() && !resource->is_empty()) {
- ASSERT(resource->IsAscii());
- Handle<String> dump_string =
- factory->NewExternalStringFromAscii(resource);
- heap->external_string_table()->AddString(*dump_string);
- return *dump_string;
- } else {
- delete resource;
- }
- return HEAP->undefined_value();
-}
-
-
-class LolPathTracer: public PathTracer {
- public:
- LolPathTracer(FILE* out,
- Object* search_target,
- WhatToFind what_to_find)
- : PathTracer(search_target, what_to_find, VISIT_ONLY_STRONG), out_(out) {}
-
- private:
- void ProcessResults();
-
- FILE* out_;
-};
-
-
-void LolPathTracer::ProcessResults() {
- if (found_target_) {
- PrintF(out_, "=====================================\n");
- PrintF(out_, "==== Path to object ====\n");
- PrintF(out_, "=====================================\n\n");
-
- ASSERT(!object_stack_.is_empty());
- Object* prev = NULL;
- for (int i = 0, index = 0; i < object_stack_.length(); i++) {
- Object* obj = object_stack_[i];
-
- // Skip this object if it is basically the internals of the
- // previous object (which would have dumped its details already).
- if (prev && prev->IsJSObject() &&
- (obj != search_target_)) {
- JSObject* jsobj = JSObject::cast(prev);
- if (obj->IsFixedArray() &&
- jsobj->properties() == FixedArray::cast(obj)) {
- // Skip this one because it would have been printed as the
- // properties of the last object already.
- continue;
- } else if (obj->IsHeapObject() &&
- jsobj->elements() == HeapObject::cast(obj)) {
- // Skip this one because it would have been printed as the
- // elements of the last object already.
- continue;
- }
- }
-
- // Print a connecting arrow.
- if (i > 0) PrintF(out_, "\n |\n |\n V\n\n");
-
- // Print the object index.
- PrintF(out_, "[%d] ", ++index);
-
- // Print the LOL object ID:
- int id = LiveObjectList::GetObjId(obj);
- if (id > 0) PrintF(out_, "@%d ", id);
-
-#ifdef OBJECT_PRINT
-#ifdef INSPECTOR
- Inspector::DumpObjectType(out_, obj);
-#endif // INSPECTOR
- PrintF(out_, "\n");
- obj->Print(out_);
-#else // !OBJECT_PRINT
- obj->ShortPrint(out_);
- PrintF(out_, "\n");
-#endif // !OBJECT_PRINT
- Flush(out_);
- }
- PrintF(out_, "\n");
- PrintF(out_, "=====================================\n\n");
- Flush(out_);
- }
-}
-
-
-Object* LiveObjectList::GetPathPrivate(HeapObject* obj1, HeapObject* obj2) {
- EmbeddedVector<char, 128> temp_filename;
- static int temp_count = 0;
- const char* path_prefix = ".";
-
- if (FLAG_lol_workdir) {
- path_prefix = FLAG_lol_workdir;
- }
- OS::SNPrintF(temp_filename, "%s/lol-getpath-%d", path_prefix, ++temp_count);
-
- FILE* f = OS::FOpen(temp_filename.start(), "w+");
-
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
-
- // Save the previous verbosity.
- bool prev_verbosity = FLAG_use_verbose_printer;
- FLAG_use_verbose_printer = false;
-
- // Dump the paths.
- {
- // The tracer needs to be scoped because its usage asserts no allocation,
- // and we need to allocate the result string below.
- LolPathTracer tracer(f, obj2, LolPathTracer::FIND_FIRST);
-
- bool found = false;
- if (obj1 == NULL) {
- // Check for ObjectGroups that references this object.
- // TODO(mlam): refactor this to be more modular.
- {
- List<ObjectGroup*>* groups = isolate->global_handles()->object_groups();
- for (int i = 0; i < groups->length(); i++) {
- ObjectGroup* group = groups->at(i);
- if (group == NULL) continue;
-
- bool found_group = false;
- for (size_t j = 0; j < group->length_; j++) {
- Object* object = *(group->objects_[j]);
- HeapObject* hobj = HeapObject::cast(object);
- if (obj2 == hobj) {
- found_group = true;
- break;
- }
- }
-
- if (found_group) {
- PrintF(f,
- "obj %p is a member of object group %p {\n",
- reinterpret_cast<void*>(obj2),
- reinterpret_cast<void*>(group));
- for (size_t j = 0; j < group->length_; j++) {
- Object* object = *(group->objects_[j]);
- if (!object->IsHeapObject()) continue;
-
- HeapObject* hobj = HeapObject::cast(object);
- int id = GetObjId(hobj);
- if (id != 0) {
- PrintF(f, " @%d:", id);
- } else {
- PrintF(f, " <no id>:");
- }
-
- char buffer[512];
- GenerateObjectDesc(hobj, buffer, sizeof(buffer));
- PrintF(f, " %s", buffer);
- if (hobj == obj2) {
- PrintF(f, " <===");
- }
- PrintF(f, "\n");
- }
- PrintF(f, "}\n");
- }
- }
- }
-
- PrintF(f, "path from roots to obj %p\n", reinterpret_cast<void*>(obj2));
- heap->IterateRoots(&tracer, VISIT_ONLY_STRONG);
- found = tracer.found();
-
- if (!found) {
- PrintF(f, " No paths found. Checking symbol tables ...\n");
- SymbolTable* symbol_table = HEAP->raw_unchecked_symbol_table();
- tracer.VisitPointers(reinterpret_cast<Object**>(&symbol_table),
- reinterpret_cast<Object**>(&symbol_table)+1);
- found = tracer.found();
- if (!found) {
- symbol_table->IteratePrefix(&tracer);
- found = tracer.found();
- }
- }
-
- if (!found) {
- PrintF(f, " No paths found. Checking weak roots ...\n");
- // Check weak refs next.
- isolate->global_handles()->IterateWeakRoots(&tracer);
- found = tracer.found();
- }
-
- } else {
- PrintF(f, "path from obj %p to obj %p:\n",
- reinterpret_cast<void*>(obj1), reinterpret_cast<void*>(obj2));
- tracer.TracePathFrom(reinterpret_cast<Object**>(&obj1));
- found = tracer.found();
- }
-
- if (!found) {
- PrintF(f, " No paths found\n\n");
- }
- }
-
- // Flush and clean up the dumped file.
- Flush(f);
- fclose(f);
-
- // Restore the previous verbosity.
- FLAG_use_verbose_printer = prev_verbosity;
-
- // Create a string from the temp_file.
- // Note: the mmapped resource will take care of closing the file.
- MemoryMappedExternalResource* resource =
- new MemoryMappedExternalResource(temp_filename.start(), true);
- if (resource->exists() && !resource->is_empty()) {
- ASSERT(resource->IsAscii());
- Handle<String> path_string =
- factory->NewExternalStringFromAscii(resource);
- heap->external_string_table()->AddString(*path_string);
- return *path_string;
- } else {
- delete resource;
- }
- return heap->undefined_value();
-}
-
-
-Object* LiveObjectList::GetPath(int obj_id1,
- int obj_id2,
- Handle<JSObject> instance_filter) {
- HandleScope scope;
-
- // Get the target object.
- HeapObject* obj1 = NULL;
- if (obj_id1 != 0) {
- obj1 = HeapObject::cast(GetObj(obj_id1));
- if (obj1 == HEAP->undefined_value()) {
- return obj1;
- }
- }
-
- HeapObject* obj2 = HeapObject::cast(GetObj(obj_id2));
- if (obj2 == HEAP->undefined_value()) {
- return obj2;
- }
-
- return GetPathPrivate(obj1, obj2);
-}
-
-
-void LiveObjectList::DoProcessNonLive(HeapObject* obj) {
- // We should only be called if we have at least one lol to search.
- ASSERT(last() != NULL);
- Element* element = last()->Find(obj);
- if (element != NULL) {
- NullifyNonLivePointer(&element->obj_);
- }
-}
-
-
-void LiveObjectList::IterateElementsPrivate(ObjectVisitor* v) {
- LiveObjectList* lol = last();
- while (lol != NULL) {
- Element* elements = lol->elements_;
- int count = lol->obj_count_;
- for (int i = 0; i < count; i++) {
- HeapObject** p = &elements[i].obj_;
- v->VisitPointer(reinterpret_cast<Object** >(p));
- }
- lol = lol->prev_;
- }
-}
-
-
-// Purpose: Called by GCEpilogue to purge duplicates. Not to be called by
-// anyone else.
-void LiveObjectList::PurgeDuplicates() {
- bool is_sorted = false;
- LiveObjectList* lol = last();
- if (!lol) {
- return; // Nothing to purge.
- }
-
- int total_count = lol->TotalObjCount();
- if (!total_count) {
- return; // Nothing to purge.
- }
-
- Element* elements = NewArray<Element>(total_count);
- int count = 0;
-
- // Copy all the object elements into a consecutive array.
- while (lol) {
- memcpy(&elements[count], lol->elements_, lol->obj_count_ * sizeof(Element));
- count += lol->obj_count_;
- lol = lol->prev_;
- }
- qsort(elements, total_count, sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement));
-
- ASSERT(count == total_count);
-
- // Iterate over all objects in the consolidated list and check for dups.
- total_count--;
- for (int i = 0; i < total_count; ) {
- Element* curr = &elements[i];
- HeapObject* curr_obj = curr->obj_;
- int j = i+1;
- bool done = false;
-
- while (!done && (j < total_count)) {
- // Process if the element's object is still live after the current GC.
- // Non-live objects will be converted to SMIs i.e. not HeapObjects.
- if (curr_obj->IsHeapObject()) {
- Element* next = &elements[j];
- HeapObject* next_obj = next->obj_;
- if (next_obj->IsHeapObject()) {
- if (curr_obj != next_obj) {
- done = true;
- continue; // Live object but no match. Move on.
- }
-
- // NOTE: we've just GCed the LOLs. Hence, they are no longer sorted.
- // Since we detected at least one need to search for entries, we'll
- // sort it to enable the use of NullifyMostRecent() below. We only
- // need to sort it once (except for one exception ... see below).
- if (!is_sorted) {
- SortAll();
- is_sorted = true;
- }
-
- // We have a match. Need to nullify the most recent ref to this
- // object. We'll keep the oldest ref:
- // Note: we will nullify the element record in the LOL
- // database, not in the local sorted copy of the elements.
- NullifyMostRecent(curr_obj);
- }
- }
- // Either the object was already marked for purging, or we just marked
- // it. Either way, if there's more than one dup, then we need to check
- // the next element for another possible dup against the current as well
- // before we move on. So, here we go.
- j++;
- }
-
- // We can move on to checking the match on the next element.
- i = j;
- }
-
- DeleteArray<Element>(elements);
-}
-
-
-// Purpose: Purges dead objects and resorts the LOLs.
-void LiveObjectList::GCEpiloguePrivate() {
- // Note: During the GC, ConsStrings may be collected and pointers may be
- // forwarded to its constituent string. As a result, we may find dupes of
- // objects references in the LOL list.
- // Another common way we get dups is that free chunks that have been swept
- // in the oldGen heap may be kept as ByteArray objects in a free list.
- //
- // When we promote live objects from the youngGen, the object may be moved
- // to the start of these free chunks. Since there is no free or move event
- // for the free chunks, their addresses will show up 2 times: once for their
- // original free ByteArray selves, and once for the newly promoted youngGen
- // object. Hence, we can get a duplicate address in the LOL again.
- //
- // We need to eliminate these dups because the LOL implementation expects to
- // only have at most one unique LOL reference to any object at any time.
- PurgeDuplicates();
-
- // After the GC, sweep away all free'd Elements and compact.
- LiveObjectList* prev = NULL;
- LiveObjectList* next = NULL;
-
- // Iterating from the youngest lol to the oldest lol.
- for (LiveObjectList* lol = last(); lol; lol = prev) {
- Element* elements = lol->elements_;
- prev = lol->prev(); // Save the prev.
-
- // Remove any references to collected objects.
- int i = 0;
- while (i < lol->obj_count_) {
- Element& element = elements[i];
- if (!element.obj_->IsHeapObject()) {
- // If the HeapObject address was converted into a SMI, then this
- // is a dead object. Copy the last element over this one.
- element = elements[lol->obj_count_ - 1];
- lol->obj_count_--;
- // We've just moved the last element into this index. We'll revisit
- // this index again. Hence, no need to increment the iterator.
- } else {
- i++; // Look at the next element next.
- }
- }
-
- int new_count = lol->obj_count_;
-
- // Check if there are any more elements to keep after purging the dead ones.
- if (new_count == 0) {
- DeleteArray<Element>(elements);
- lol->elements_ = NULL;
- lol->capacity_ = 0;
- ASSERT(lol->obj_count_ == 0);
-
- // If the list is also invisible, the clean up the list as well.
- if (lol->id_ == 0) {
- // Point the next lol's prev to this lol's prev.
- if (next) {
- next->prev_ = lol->prev_;
- } else {
- last_ = lol->prev_;
- }
-
- // Delete this now empty and invisible lol.
- delete lol;
-
- // Don't point the next to this lol since it is now deleted.
- // Leave the next pointer pointing to the current lol.
- continue;
- }
-
- } else {
- // If the obj_count_ is less than the capacity and the difference is
- // greater than a specified threshold, then we should shrink the list.
- int diff = lol->capacity_ - new_count;
- const int kMaxUnusedSpace = 64;
- if (diff > kMaxUnusedSpace) { // Threshold for shrinking.
- // Shrink the list.
- Element* new_elements = NewArray<Element>(new_count);
- memcpy(new_elements, elements, new_count * sizeof(Element));
-
- DeleteArray<Element>(elements);
- lol->elements_ = new_elements;
- lol->capacity_ = new_count;
- }
- ASSERT(lol->obj_count_ == new_count);
-
- lol->Sort(); // We've moved objects. Re-sort in case.
- }
-
- // Save the next (for the previous link) in case we need it later.
- next = lol;
- }
-
-#ifdef VERIFY_LOL
- if (FLAG_verify_lol) {
- Verify();
- }
-#endif
-}
-
-
-#ifdef VERIFY_LOL
-void LiveObjectList::Verify(bool match_heap_exactly) {
- OS::Print("Verifying the LiveObjectList database:\n");
-
- LiveObjectList* lol = last();
- if (lol == NULL) {
- OS::Print(" No lol database to verify\n");
- return;
- }
-
- OS::Print(" Preparing the lol database ...\n");
- int total_count = lol->TotalObjCount();
-
- Element* elements = NewArray<Element>(total_count);
- int count = 0;
-
- // Copy all the object elements into a consecutive array.
- OS::Print(" Copying the lol database ...\n");
- while (lol != NULL) {
- memcpy(&elements[count], lol->elements_, lol->obj_count_ * sizeof(Element));
- count += lol->obj_count_;
- lol = lol->prev_;
- }
- qsort(elements, total_count, sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement));
-
- ASSERT(count == total_count);
-
- // Iterate over all objects in the heap and check for:
- // 1. object in LOL but not in heap i.e. error.
- // 2. object in heap but not in LOL (possibly not an error). Usually
- // just means that we don't have the a capture of the latest heap.
- // That is unless we did this verify immediately after a capture,
- // and specified match_heap_exactly = true.
-
- int number_of_heap_objects = 0;
- int number_of_matches = 0;
- int number_not_in_heap = total_count;
- int number_not_in_lol = 0;
-
- OS::Print(" Start verify ...\n");
- OS::Print(" Verifying ...");
- Flush();
- HeapIterator iterator;
- HeapObject* heap_obj = NULL;
- while ((heap_obj = iterator.next()) != NULL) {
- number_of_heap_objects++;
-
- // Check if the heap_obj is in the lol.
- Element key;
- key.obj_ = heap_obj;
-
- Element* result = reinterpret_cast<Element*>(
- bsearch(&key, elements, total_count, sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement)));
-
- if (result != NULL) {
- number_of_matches++;
- number_not_in_heap--;
- // Mark it as found by changing it into a SMI (mask off low bit).
- // Note: we cannot use HeapObject::cast() here because it asserts that
- // the HeapObject bit is set on the address, but we're unsetting it on
- // purpose here for our marking.
- result->obj_ = reinterpret_cast<HeapObject*>(heap_obj->address());
-
- } else {
- number_not_in_lol++;
- if (match_heap_exactly) {
- OS::Print("heap object %p NOT in lol database\n", heap_obj);
- }
- }
- // Show some sign of life.
- if (number_of_heap_objects % 1000 == 0) {
- OS::Print(".");
- fflush(stdout);
- }
- }
- OS::Print("\n");
-
- // Reporting lol objects not found in the heap.
- if (number_not_in_heap) {
- int found = 0;
- for (int i = 0; (i < total_count) && (found < number_not_in_heap); i++) {
- Element& element = elements[i];
- if (element.obj_->IsHeapObject()) {
- OS::Print("lol database object [%d of %d] %p NOT in heap\n",
- i, total_count, element.obj_);
- found++;
- }
- }
- }
-
- DeleteArray<Element>(elements);
-
- OS::Print("number of objects in lol database %d\n", total_count);
- OS::Print("number of heap objects .......... %d\n", number_of_heap_objects);
- OS::Print("number of matches ............... %d\n", number_of_matches);
- OS::Print("number NOT in heap .............. %d\n", number_not_in_heap);
- OS::Print("number NOT in lol database ...... %d\n", number_not_in_lol);
-
- if (number_of_matches != total_count) {
- OS::Print(" *** ERROR: "
- "NOT all lol database objects match heap objects.\n");
- }
- if (number_not_in_heap != 0) {
- OS::Print(" *** ERROR: %d lol database objects not found in heap.\n",
- number_not_in_heap);
- }
- if (match_heap_exactly) {
- if (!(number_not_in_lol == 0)) {
- OS::Print(" *** ERROR: %d heap objects NOT found in lol database.\n",
- number_not_in_lol);
- }
- }
-
- ASSERT(number_of_matches == total_count);
- ASSERT(number_not_in_heap == 0);
- ASSERT(number_not_in_lol == (number_of_heap_objects - total_count));
- if (match_heap_exactly) {
- ASSERT(total_count == number_of_heap_objects);
- ASSERT(number_not_in_lol == 0);
- }
-
- OS::Print(" Verify the lol database is sorted ...\n");
- lol = last();
- while (lol != NULL) {
- Element* elements = lol->elements_;
- for (int i = 0; i < lol->obj_count_ - 1; i++) {
- if (elements[i].obj_ >= elements[i+1].obj_) {
- OS::Print(" *** ERROR: lol %p obj[%d] %p > obj[%d] %p\n",
- lol, i, elements[i].obj_, i+1, elements[i+1].obj_);
- }
- }
- lol = lol->prev_;
- }
-
- OS::Print(" DONE verifying.\n\n\n");
-}
-
-
-void LiveObjectList::VerifyNotInFromSpace() {
- OS::Print("VerifyNotInFromSpace() ...\n");
- LolIterator it(NULL, last());
- Heap* heap = ISOLATE->heap();
- int i = 0;
- for (it.Init(); !it.Done(); it.Next()) {
- HeapObject* heap_obj = it.Obj();
- if (heap->InFromSpace(heap_obj)) {
- OS::Print(" ERROR: VerifyNotInFromSpace: [%d] obj %p in From space %p\n",
- i++, heap_obj, Heap::new_space()->FromSpaceStart());
- }
- }
-}
-#endif // VERIFY_LOL
-
-
-} } // namespace v8::internal
-
-#endif // LIVE_OBJECT_LIST
diff --git a/deps/v8/src/liveobjectlist.h b/deps/v8/src/liveobjectlist.h
deleted file mode 100644
index 1aa919605..000000000
--- a/deps/v8/src/liveobjectlist.h
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LIVEOBJECTLIST_H_
-#define V8_LIVEOBJECTLIST_H_
-
-#include "v8.h"
-
-#include "checks.h"
-#include "heap.h"
-#include "objects.h"
-#include "globals.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef LIVE_OBJECT_LIST
-
-#ifdef DEBUG
-// The following symbol when defined enables thorough verification of lol data.
-// FLAG_verify_lol will also need to set to true to enable the verification.
-#define VERIFY_LOL
-#endif
-
-
-typedef int LiveObjectType;
-class LolFilter;
-class LiveObjectSummary;
-class DumpWriter;
-class SummaryWriter;
-
-
-// The LiveObjectList is both a mechanism for tracking a live capture of
-// objects in the JS heap, as well as is the data structure which represents
-// each of those captures. Unlike a snapshot, the lol is live. For example,
-// if an object in a captured lol dies and is collected by the GC, the lol
-// will reflect that the object is no longer available. The term
-// LiveObjectList (and lol) is used to describe both the mechanism and the
-// data structure depending on context of use.
-//
-// In captured lols, objects are tracked using their address and an object id.
-// The object id is unique. Once assigned to an object, the object id can never
-// be assigned to another object. That is unless all captured lols are deleted
-// which allows the user to start over with a fresh set of lols and object ids.
-// The uniqueness of the object ids allows the user to track specific objects
-// and inspect its longevity while debugging JS code in execution.
-//
-// The lol comes with utility functions to capture, dump, summarize, and diff
-// captured lols amongst other functionality. These functionality are
-// accessible via the v8 debugger interface.
-class LiveObjectList {
- public:
- inline static void GCEpilogue();
- inline static void GCPrologue();
- inline static void IterateElements(ObjectVisitor* v);
- inline static void ProcessNonLive(HeapObject* obj);
- inline static void UpdateReferencesForScavengeGC();
-
- // Note: LOLs can be listed by calling Dump(0, <lol id>), and 2 LOLs can be
- // compared/diff'ed using Dump(<lol id1>, <lol id2>, ...). This will yield
- // a verbose dump of all the objects in the resultant lists.
- // Similarly, a summarized result of a LOL listing or a diff can be
- // attained using the Summarize(0, <lol id>) and Summarize(<lol id1,
- // <lol id2>, ...) respectively.
-
- static MaybeObject* Capture();
- static bool Delete(int id);
- static MaybeObject* Dump(int id1,
- int id2,
- int start_idx,
- int dump_limit,
- Handle<JSObject> filter_obj);
- static MaybeObject* Info(int start_idx, int dump_limit);
- static MaybeObject* Summarize(int id1, int id2, Handle<JSObject> filter_obj);
-
- static void Reset();
- static Object* GetObj(int obj_id);
- static int GetObjId(Object* obj);
- static Object* GetObjId(Handle<String> address);
- static MaybeObject* GetObjRetainers(int obj_id,
- Handle<JSObject> instance_filter,
- bool verbose,
- int start,
- int count,
- Handle<JSObject> filter_obj);
-
- static Object* GetPath(int obj_id1,
- int obj_id2,
- Handle<JSObject> instance_filter);
- static Object* PrintObj(int obj_id);
-
- private:
- struct Element {
- int id_;
- HeapObject* obj_;
- };
-
- explicit LiveObjectList(LiveObjectList* prev, int capacity);
- ~LiveObjectList();
-
- static void GCEpiloguePrivate();
- static void IterateElementsPrivate(ObjectVisitor* v);
-
- static void DoProcessNonLive(HeapObject* obj);
-
- static int CompareElement(const Element* a, const Element* b);
-
- static Object* GetPathPrivate(HeapObject* obj1, HeapObject* obj2);
-
- static int GetRetainers(Handle<HeapObject> target,
- Handle<JSObject> instance_filter,
- Handle<FixedArray> retainers_arr,
- int start,
- int dump_limit,
- int* total_count,
- LolFilter* filter,
- LiveObjectSummary* summary,
- JSFunction* arguments_function,
- Handle<Object> error);
-
- static MaybeObject* DumpPrivate(DumpWriter* writer,
- int start,
- int dump_limit,
- LolFilter* filter);
- static MaybeObject* SummarizePrivate(SummaryWriter* writer,
- LolFilter* filter,
- bool is_tracking_roots);
-
- static bool NeedLOLProcessing() { return (last() != NULL); }
- static void NullifyNonLivePointer(HeapObject** p) {
- // Mask out the low bit that marks this as a heap object. We'll use this
- // cleared bit as an indicator that this pointer needs to be collected.
- //
- // Meanwhile, we still preserve its approximate value so that we don't
- // have to resort the elements list all the time.
- //
- // Note: Doing so also makes this HeapObject* look like an SMI. Hence,
- // GC pointer updater will ignore it when it gets scanned.
- *p = reinterpret_cast<HeapObject*>((*p)->address());
- }
-
- LiveObjectList* prev() { return prev_; }
- LiveObjectList* next() { return next_; }
- int id() { return id_; }
-
- static int list_count() { return list_count_; }
- static LiveObjectList* last() { return last_; }
-
- inline static LiveObjectList* FindLolForId(int id, LiveObjectList* start_lol);
- int TotalObjCount() { return GetTotalObjCountAndSize(NULL); }
- int GetTotalObjCountAndSize(int* size_p);
-
- bool Add(HeapObject* obj);
- Element* Find(HeapObject* obj);
- static void NullifyMostRecent(HeapObject* obj);
- void Sort();
- static void SortAll();
-
- static void PurgeDuplicates(); // Only to be called by GCEpilogue.
-
-#ifdef VERIFY_LOL
- static void Verify(bool match_heap_exactly = false);
- static void VerifyNotInFromSpace();
-#endif
-
- // Iterates the elements in every lol and returns the one that matches the
- // specified key. If no matching element is found, then it returns NULL.
- template <typename T>
- inline static LiveObjectList::Element*
- FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key);
-
- inline static int GetElementId(Element* element);
- inline static HeapObject* GetElementObj(Element* element);
-
- // Instance fields.
- LiveObjectList* prev_;
- LiveObjectList* next_;
- int id_;
- int capacity_;
- int obj_count_;
- Element* elements_;
-
- // Statics for managing all the lists.
- static uint32_t next_element_id_;
- static int list_count_;
- static int last_id_;
- static LiveObjectList* first_;
- static LiveObjectList* last_;
-
- friend class LolIterator;
- friend class LolForwardIterator;
- friend class LolDumpWriter;
- friend class RetainersDumpWriter;
- friend class RetainersSummaryWriter;
- friend class UpdateLiveObjectListVisitor;
-};
-
-
-// Helper class for updating the LiveObjectList HeapObject pointers.
-class UpdateLiveObjectListVisitor: public ObjectVisitor {
- public:
- void VisitPointer(Object** p) { UpdatePointer(p); }
-
- void VisitPointers(Object** start, Object** end) {
- // Copy all HeapObject pointers in [start, end).
- for (Object** p = start; p < end; p++) UpdatePointer(p);
- }
-
- private:
- // Based on Heap::ScavengeObject() but only does forwarding of pointers
- // to live new space objects, and not actually keep them alive.
- void UpdatePointer(Object** p) {
- Object* object = *p;
- if (!HEAP->InNewSpace(object)) return;
-
- HeapObject* heap_obj = HeapObject::cast(object);
- ASSERT(HEAP->InFromSpace(heap_obj));
-
- // We use the first word (where the map pointer usually is) of a heap
- // object to record the forwarding pointer. A forwarding pointer can
- // point to an old space, the code space, or the to space of the new
- // generation.
- MapWord first_word = heap_obj->map_word();
-
- // If the first word is a forwarding address, the object has already been
- // copied.
- if (first_word.IsForwardingAddress()) {
- *p = first_word.ToForwardingAddress();
- return;
-
- // Else, it's a dead object.
- } else {
- LiveObjectList::NullifyNonLivePointer(reinterpret_cast<HeapObject**>(p));
- }
- }
-};
-
-
-#else // !LIVE_OBJECT_LIST
-
-
-class LiveObjectList {
- public:
- inline static void GCEpilogue() {}
- inline static void GCPrologue() {}
- inline static void IterateElements(ObjectVisitor* v) {}
- inline static void ProcessNonLive(HeapObject* obj) {}
- inline static void UpdateReferencesForScavengeGC() {}
-
- inline static MaybeObject* Capture() { return HEAP->undefined_value(); }
- inline static bool Delete(int id) { return false; }
- inline static MaybeObject* Dump(int id1,
- int id2,
- int start_idx,
- int dump_limit,
- Handle<JSObject> filter_obj) {
- return HEAP->undefined_value();
- }
- inline static MaybeObject* Info(int start_idx, int dump_limit) {
- return HEAP->undefined_value();
- }
- inline static MaybeObject* Summarize(int id1,
- int id2,
- Handle<JSObject> filter_obj) {
- return HEAP->undefined_value();
- }
-
- inline static void Reset() {}
- inline static Object* GetObj(int obj_id) { return HEAP->undefined_value(); }
- inline static Object* GetObjId(Handle<String> address) {
- return HEAP->undefined_value();
- }
- inline static MaybeObject* GetObjRetainers(int obj_id,
- Handle<JSObject> instance_filter,
- bool verbose,
- int start,
- int count,
- Handle<JSObject> filter_obj) {
- return HEAP->undefined_value();
- }
-
- inline static Object* GetPath(int obj_id1,
- int obj_id2,
- Handle<JSObject> instance_filter) {
- return HEAP->undefined_value();
- }
- inline static Object* PrintObj(int obj_id) { return HEAP->undefined_value(); }
-};
-
-
-#endif // LIVE_OBJECT_LIST
-
-} } // namespace v8::internal
-
-#endif // V8_LIVEOBJECTLIST_H_
diff --git a/deps/v8/src/log-inl.h b/deps/v8/src/log-inl.h
index 8aebbc7dd..7f653cb72 100644
--- a/deps/v8/src/log-inl.h
+++ b/deps/v8/src/log-inl.h
@@ -29,7 +29,6 @@
#define V8_LOG_INL_H_
#include "log.h"
-#include "cpu-profiler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index 7bd7baa2d..909d4a513 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -35,27 +35,18 @@ namespace internal {
const char* const Log::kLogToTemporaryFile = "&";
+const char* const Log::kLogToConsole = "-";
Log::Log(Logger* logger)
: is_stopped_(false),
output_handle_(NULL),
- ll_output_handle_(NULL),
- mutex_(NULL),
message_buffer_(NULL),
logger_(logger) {
}
-static void AddIsolateIdIfNeeded(StringStream* stream) {
- Isolate* isolate = Isolate::Current();
- if (isolate->IsDefaultIsolate()) return;
- stream->Add("isolate-%p-", isolate);
-}
-
-
-void Log::Initialize() {
- mutex_ = OS::CreateMutex();
+void Log::Initialize(const char* log_file_name) {
message_buffer_ = NewArray<char>(kMessageBufferSize);
// --log-all enables all the log flags.
@@ -67,69 +58,20 @@ void Log::Initialize() {
FLAG_log_suspect = true;
FLAG_log_handles = true;
FLAG_log_regexp = true;
+ FLAG_log_internal_timer_events = true;
}
// --prof implies --log-code.
if (FLAG_prof) FLAG_log_code = true;
- // --prof_lazy controls --log-code, implies --noprof_auto.
- if (FLAG_prof_lazy) {
- FLAG_log_code = false;
- FLAG_prof_auto = false;
- }
-
- bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api
- || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
-
// If we're logging anything, we need to open the log file.
- if (open_log_file) {
- if (strcmp(FLAG_logfile, "-") == 0) {
+ if (Log::InitLogAtStart()) {
+ if (strcmp(log_file_name, kLogToConsole) == 0) {
OpenStdout();
- } else if (strcmp(FLAG_logfile, kLogToTemporaryFile) == 0) {
+ } else if (strcmp(log_file_name, kLogToTemporaryFile) == 0) {
OpenTemporaryFile();
} else {
- if (strchr(FLAG_logfile, '%') != NULL ||
- !Isolate::Current()->IsDefaultIsolate()) {
- // If there's a '%' in the log file name we have to expand
- // placeholders.
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- AddIsolateIdIfNeeded(&stream);
- for (const char* p = FLAG_logfile; *p; p++) {
- if (*p == '%') {
- p++;
- switch (*p) {
- case '\0':
- // If there's a % at the end of the string we back up
- // one character so we can escape the loop properly.
- p--;
- break;
- case 't': {
- // %t expands to the current time in milliseconds.
- double time = OS::TimeCurrentMillis();
- stream.Add("%.0f", FmtElm(time));
- break;
- }
- case '%':
- // %% expands (contracts really) to %.
- stream.Put('%');
- break;
- default:
- // All other %'s expand to themselves.
- stream.Put('%');
- stream.Put(*p);
- break;
- }
- } else {
- stream.Put(*p);
- }
- }
- SmartArrayPointer<const char> expanded = stream.ToCString();
- OpenFile(*expanded);
- } else {
- OpenFile(FLAG_logfile);
- }
+ OpenFile(log_file_name);
}
}
}
@@ -147,26 +89,9 @@ void Log::OpenTemporaryFile() {
}
-// Extension added to V8 log file name to get the low-level log name.
-static const char kLowLevelLogExt[] = ".ll";
-
-// File buffer size of the low-level log. We don't use the default to
-// minimize the associated overhead.
-static const int kLowLevelLogBufferSize = 2 * MB;
-
-
void Log::OpenFile(const char* name) {
ASSERT(!IsEnabled());
output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
- if (FLAG_ll_prof) {
- // Open the low-level log file.
- size_t len = strlen(name);
- ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLowLevelLogExt)));
- memcpy(ll_name.start(), name, len);
- memcpy(ll_name.start() + len, kLowLevelLogExt, sizeof(kLowLevelLogExt));
- ll_output_handle_ = OS::FOpen(ll_name.start(), OS::LogFileOpenMode);
- setvbuf(ll_output_handle_, NULL, _IOFBF, kLowLevelLogBufferSize);
- }
}
@@ -180,29 +105,24 @@ FILE* Log::Close() {
}
}
output_handle_ = NULL;
- if (ll_output_handle_ != NULL) fclose(ll_output_handle_);
- ll_output_handle_ = NULL;
DeleteArray(message_buffer_);
message_buffer_ = NULL;
- delete mutex_;
- mutex_ = NULL;
-
is_stopped_ = false;
return result;
}
-LogMessageBuilder::LogMessageBuilder(Logger* logger)
- : log_(logger->log_),
- sl(log_->mutex_),
+Log::MessageBuilder::MessageBuilder(Log* log)
+ : log_(log),
+ lock_guard_(&log_->mutex_),
pos_(0) {
ASSERT(log_->message_buffer_ != NULL);
}
-void LogMessageBuilder::Append(const char* format, ...) {
+void Log::MessageBuilder::Append(const char* format, ...) {
Vector<char> buf(log_->message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
va_list args;
@@ -213,7 +133,7 @@ void LogMessageBuilder::Append(const char* format, ...) {
}
-void LogMessageBuilder::AppendVA(const char* format, va_list args) {
+void Log::MessageBuilder::AppendVA(const char* format, va_list args) {
Vector<char> buf(log_->message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
int result = v8::internal::OS::VSNPrintF(buf, format, args);
@@ -228,7 +148,7 @@ void LogMessageBuilder::AppendVA(const char* format, va_list args) {
}
-void LogMessageBuilder::Append(const char c) {
+void Log::MessageBuilder::Append(const char c) {
if (pos_ < Log::kMessageBufferSize) {
log_->message_buffer_[pos_++] = c;
}
@@ -236,8 +156,20 @@ void LogMessageBuilder::Append(const char c) {
}
-void LogMessageBuilder::Append(String* str) {
- AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
+void Log::MessageBuilder::AppendDoubleQuotedString(const char* string) {
+ Append('"');
+ for (const char* p = string; *p != '\0'; p++) {
+ if (*p == '"') {
+ Append('\\');
+ }
+ Append(*p);
+ }
+ Append('"');
+}
+
+
+void Log::MessageBuilder::Append(String* str) {
+ DisallowHeapAllocation no_gc; // Ensure string stay valid.
int length = str->length();
for (int i = 0; i < length; i++) {
Append(static_cast<char>(str->Get(i)));
@@ -245,22 +177,34 @@ void LogMessageBuilder::Append(String* str) {
}
-void LogMessageBuilder::AppendAddress(Address addr) {
+void Log::MessageBuilder::AppendAddress(Address addr) {
Append("0x%" V8PRIxPTR, addr);
}
-void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
+void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) {
+ ASSERT(symbol);
+ Append("symbol(");
+ if (!symbol->name()->IsUndefined()) {
+ Append("\"");
+ AppendDetailed(String::cast(symbol->name()), false);
+ Append("\" ");
+ }
+ Append("hash %x)", symbol->Hash());
+}
+
+
+void Log::MessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
if (str == NULL) return;
- AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
+ DisallowHeapAllocation no_gc; // Ensure string stay valid.
int len = str->length();
if (len > 0x1000)
len = 0x1000;
if (show_impl_info) {
- Append(str->IsAsciiRepresentation() ? 'a' : '2');
+ Append(str->IsOneByteRepresentation() ? 'a' : '2');
if (StringShape(str).IsExternal())
Append('e');
- if (StringShape(str).IsSymbol())
+ if (StringShape(str).IsInternalized())
Append('#');
Append(":%i:", str->length());
}
@@ -283,7 +227,7 @@ void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
}
-void LogMessageBuilder::AppendStringPart(const char* str, int len) {
+void Log::MessageBuilder::AppendStringPart(const char* str, int len) {
if (pos_ + len > Log::kMessageBufferSize) {
len = Log::kMessageBufferSize - pos_;
ASSERT(len >= 0);
@@ -297,7 +241,7 @@ void LogMessageBuilder::AppendStringPart(const char* str, int len) {
}
-void LogMessageBuilder::WriteToLogFile() {
+void Log::MessageBuilder::WriteToLogFile() {
ASSERT(pos_ <= Log::kMessageBufferSize);
const int written = log_->WriteToFile(log_->message_buffer_, pos_);
if (written != pos_) {
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index d0cb82898..ec8415e4b 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -39,11 +39,17 @@ class Logger;
class Log {
public:
// Performs process-wide initialization.
- void Initialize();
+ void Initialize(const char* log_file_name);
// Disables logging, but preserves acquired resources.
void stop() { is_stopped_ = true; }
+ static bool InitLogAtStart() {
+ return FLAG_log || FLAG_log_runtime || FLAG_log_api
+ || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
+ || FLAG_log_regexp || FLAG_ll_prof || FLAG_log_internal_timer_events;
+ }
+
// Frees all resources acquired in Initialize and Open... functions.
// When a temporary file is used for the log, returns its stream descriptor,
// leaving the file open.
@@ -60,6 +66,50 @@ class Log {
// This mode is only used in tests, as temporary files are automatically
// deleted on close and thus can't be accessed afterwards.
static const char* const kLogToTemporaryFile;
+ static const char* const kLogToConsole;
+
+ // Utility class for formatting log messages. It fills the message into the
+ // static buffer in Log.
+ class MessageBuilder BASE_EMBEDDED {
+ public:
+ // Create a message builder starting from position 0.
+ // This acquires the mutex in the log as well.
+ explicit MessageBuilder(Log* log);
+ ~MessageBuilder() { }
+
+ // Append string data to the log message.
+ void Append(const char* format, ...);
+
+ // Append string data to the log message.
+ void AppendVA(const char* format, va_list args);
+
+ // Append a character to the log message.
+ void Append(const char c);
+
+ // Append double quoted string to the log message.
+ void AppendDoubleQuotedString(const char* string);
+
+ // Append a heap string.
+ void Append(String* str);
+
+ // Appends an address.
+ void AppendAddress(Address addr);
+
+ void AppendSymbolName(Symbol* symbol);
+
+ void AppendDetailed(String* str, bool show_impl_info);
+
+ // Append a portion of a string.
+ void AppendStringPart(const char* str, int len);
+
+ // Write the log message to the log file currently opened.
+ void WriteToLogFile();
+
+ private:
+ Log* log_;
+ LockGuard<Mutex> lock_guard_;
+ int pos_;
+ };
private:
explicit Log(Logger* logger);
@@ -90,12 +140,9 @@ class Log {
// destination. mutex_ should be acquired before using output_handle_.
FILE* output_handle_;
- // Used when low-level profiling is active.
- FILE* ll_output_handle_;
-
// mutex_ is a Mutex used for enforcing exclusive
// access to the formatting buffer and the log file or log memory buffer.
- Mutex* mutex_;
+ Mutex mutex_;
// Buffer used for formatting log messages. This is a singleton buffer and
// mutex_ should be acquired before using it.
@@ -104,48 +151,9 @@ class Log {
Logger* logger_;
friend class Logger;
- friend class LogMessageBuilder;
};
-// Utility class for formatting log messages. It fills the message into the
-// static buffer in Log.
-class LogMessageBuilder BASE_EMBEDDED {
- public:
- // Create a message builder starting from position 0. This acquires the mutex
- // in the log as well.
- explicit LogMessageBuilder(Logger* logger);
- ~LogMessageBuilder() { }
-
- // Append string data to the log message.
- void Append(const char* format, ...);
-
- // Append string data to the log message.
- void AppendVA(const char* format, va_list args);
-
- // Append a character to the log message.
- void Append(const char c);
-
- // Append a heap string.
- void Append(String* str);
-
- // Appends an address.
- void AppendAddress(Address addr);
-
- void AppendDetailed(String* str, bool show_impl_info);
-
- // Append a portion of a string.
- void AppendStringPart(const char* str, int len);
-
- // Write the log message to the log file currently opened.
- void WriteToLogFile();
-
- private:
- Log* log_;
- ScopedLock sl;
- int pos_;
-};
-
} } // namespace v8::internal
#endif // V8_LOG_UTILS_H_
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index b049ffe4e..b353f548f 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -31,9 +31,11 @@
#include "bootstrapper.h"
#include "code-stubs.h"
+#include "cpu-profiler.h"
#include "deoptimizer.h"
#include "global-handles.h"
#include "log.h"
+#include "log-utils.h"
#include "macro-assembler.h"
#include "platform.h"
#include "runtime-profiler.h"
@@ -44,37 +46,495 @@
namespace v8 {
namespace internal {
-//
-// Sliding state window. Updates counters to keep track of the last
-// window of kBufferSize states. This is useful to track where we
-// spent our time.
-//
-class SlidingStateWindow {
+
+#define DECLARE_EVENT(ignore1, name) name,
+static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
+ LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
+};
+#undef DECLARE_EVENT
+
+
+#define CALL_LISTENERS(Call) \
+for (int i = 0; i < listeners_.length(); ++i) { \
+ listeners_[i]->Call; \
+}
+
+#define PROFILER_LOG(Call) \
+ do { \
+ CpuProfiler* cpu_profiler = isolate_->cpu_profiler(); \
+ if (cpu_profiler->is_profiling()) { \
+ cpu_profiler->Call; \
+ } \
+ } while (false);
+
+// ComputeMarker must only be used when SharedFunctionInfo is known.
+static const char* ComputeMarker(Code* code) {
+ switch (code->kind()) {
+ case Code::FUNCTION: return code->optimizable() ? "~" : "";
+ case Code::OPTIMIZED_FUNCTION: return "*";
+ default: return "";
+ }
+}
+
+
+class CodeEventLogger::NameBuffer {
public:
- explicit SlidingStateWindow(Isolate* isolate);
- ~SlidingStateWindow();
- void AddState(StateTag state);
+ NameBuffer() { Reset(); }
+
+ void Reset() {
+ utf8_pos_ = 0;
+ }
+
+ void Init(Logger::LogEventsAndTags tag) {
+ Reset();
+ AppendBytes(kLogEventsNames[tag]);
+ AppendByte(':');
+ }
+
+ void AppendName(Name* name) {
+ if (name->IsString()) {
+ AppendString(String::cast(name));
+ } else {
+ Symbol* symbol = Symbol::cast(name);
+ AppendBytes("symbol(");
+ if (!symbol->name()->IsUndefined()) {
+ AppendBytes("\"");
+ AppendString(String::cast(symbol->name()));
+ AppendBytes("\" ");
+ }
+ AppendBytes("hash ");
+ AppendHex(symbol->Hash());
+ AppendByte(')');
+ }
+ }
+
+ void AppendString(String* str) {
+ if (str == NULL) return;
+ int uc16_length = Min(str->length(), kUtf16BufferSize);
+ String::WriteToFlat(str, utf16_buffer, 0, uc16_length);
+ int previous = unibrow::Utf16::kNoPreviousCharacter;
+ for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
+ uc16 c = utf16_buffer[i];
+ if (c <= unibrow::Utf8::kMaxOneByteChar) {
+ utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
+ } else {
+ int char_length = unibrow::Utf8::Length(c, previous);
+ if (utf8_pos_ + char_length > kUtf8BufferSize) break;
+ unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c, previous);
+ utf8_pos_ += char_length;
+ }
+ previous = c;
+ }
+ }
+
+ void AppendBytes(const char* bytes, int size) {
+ size = Min(size, kUtf8BufferSize - utf8_pos_);
+ OS::MemCopy(utf8_buffer_ + utf8_pos_, bytes, size);
+ utf8_pos_ += size;
+ }
+
+ void AppendBytes(const char* bytes) {
+ AppendBytes(bytes, StrLength(bytes));
+ }
+
+ void AppendByte(char c) {
+ if (utf8_pos_ >= kUtf8BufferSize) return;
+ utf8_buffer_[utf8_pos_++] = c;
+ }
+
+ void AppendInt(int n) {
+ Vector<char> buffer(utf8_buffer_ + utf8_pos_,
+ kUtf8BufferSize - utf8_pos_);
+ int size = OS::SNPrintF(buffer, "%d", n);
+ if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) {
+ utf8_pos_ += size;
+ }
+ }
+
+ void AppendHex(uint32_t n) {
+ Vector<char> buffer(utf8_buffer_ + utf8_pos_,
+ kUtf8BufferSize - utf8_pos_);
+ int size = OS::SNPrintF(buffer, "%x", n);
+ if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) {
+ utf8_pos_ += size;
+ }
+ }
+
+ const char* get() { return utf8_buffer_; }
+ int size() const { return utf8_pos_; }
private:
- static const int kBufferSize = 256;
- Counters* counters_;
- int current_index_;
- bool is_full_;
- byte buffer_[kBufferSize];
+ static const int kUtf8BufferSize = 512;
+ static const int kUtf16BufferSize = 128;
+
+ int utf8_pos_;
+ char utf8_buffer_[kUtf8BufferSize];
+ uc16 utf16_buffer[kUtf16BufferSize];
+};
+
+
+CodeEventLogger::CodeEventLogger() : name_buffer_(new NameBuffer) { }
+
+CodeEventLogger::~CodeEventLogger() { delete name_buffer_; }
+
+
+void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ const char* comment) {
+ name_buffer_->Init(tag);
+ name_buffer_->AppendBytes(comment);
+ LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
+}
- void IncrementStateCounter(StateTag state) {
- counters_->state_counters(state)->Increment();
+void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ Name* name) {
+ name_buffer_->Init(tag);
+ name_buffer_->AppendName(name);
+ LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
+}
+
+
+void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ CompilationInfo* info,
+ Name* name) {
+ name_buffer_->Init(tag);
+ name_buffer_->AppendBytes(ComputeMarker(code));
+ name_buffer_->AppendName(name);
+ LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
+}
+
+
+void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ CompilationInfo* info,
+ Name* source, int line, int column) {
+ name_buffer_->Init(tag);
+ name_buffer_->AppendBytes(ComputeMarker(code));
+ name_buffer_->AppendString(shared->DebugName());
+ name_buffer_->AppendByte(' ');
+ if (source->IsString()) {
+ name_buffer_->AppendString(String::cast(source));
+ } else {
+ name_buffer_->AppendBytes("symbol(hash ");
+ name_buffer_->AppendHex(Name::cast(source)->Hash());
+ name_buffer_->AppendByte(')');
}
+ name_buffer_->AppendByte(':');
+ name_buffer_->AppendInt(line);
+ LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
+}
+
+
+void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ int args_count) {
+ name_buffer_->Init(tag);
+ name_buffer_->AppendInt(args_count);
+ LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
+}
+
+
+void CodeEventLogger::RegExpCodeCreateEvent(Code* code, String* source) {
+ name_buffer_->Init(Logger::REG_EXP_TAG);
+ name_buffer_->AppendString(source);
+ LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
+}
+
+
+// Low-level logging support.
+#define LL_LOG(Call) if (ll_logger_) ll_logger_->Call;
+
+class LowLevelLogger : public CodeEventLogger {
+ public:
+ explicit LowLevelLogger(const char* file_name);
+ virtual ~LowLevelLogger();
+
+ virtual void CodeMoveEvent(Address from, Address to);
+ virtual void CodeDeleteEvent(Address from);
+ virtual void SnapshotPositionEvent(Address addr, int pos);
+ virtual void CodeMovingGCEvent();
+
+ private:
+ virtual void LogRecordedBuffer(Code* code,
+ SharedFunctionInfo* shared,
+ const char* name,
+ int length);
+
+ // Low-level profiling event structures.
+ struct CodeCreateStruct {
+ static const char kTag = 'C';
+
+ int32_t name_size;
+ Address code_address;
+ int32_t code_size;
+ };
- void DecrementStateCounter(StateTag state) {
- counters_->state_counters(state)->Decrement();
+ struct CodeMoveStruct {
+ static const char kTag = 'M';
+
+ Address from_address;
+ Address to_address;
+ };
+
+
+ struct CodeDeleteStruct {
+ static const char kTag = 'D';
+
+ Address address;
+ };
+
+
+ struct SnapshotPositionStruct {
+ static const char kTag = 'P';
+
+ Address address;
+ int32_t position;
+ };
+
+
+ static const char kCodeMovingGCTag = 'G';
+
+
+ // Extension added to V8 log file name to get the low-level log name.
+ static const char kLogExt[];
+
+ // File buffer size of the low-level log. We don't use the default to
+ // minimize the associated overhead.
+ static const int kLogBufferSize = 2 * MB;
+
+ void LogCodeInfo();
+ void LogWriteBytes(const char* bytes, int size);
+
+ template <typename T>
+ void LogWriteStruct(const T& s) {
+ char tag = T::kTag;
+ LogWriteBytes(reinterpret_cast<const char*>(&tag), sizeof(tag));
+ LogWriteBytes(reinterpret_cast<const char*>(&s), sizeof(s));
}
+
+ FILE* ll_output_handle_;
};
+const char LowLevelLogger::kLogExt[] = ".ll";
+
+LowLevelLogger::LowLevelLogger(const char* name)
+ : ll_output_handle_(NULL) {
+ // Open the low-level log file.
+ size_t len = strlen(name);
+ ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLogExt)));
+ OS::MemCopy(ll_name.start(), name, len);
+ OS::MemCopy(ll_name.start() + len, kLogExt, sizeof(kLogExt));
+ ll_output_handle_ = OS::FOpen(ll_name.start(), OS::LogFileOpenMode);
+ setvbuf(ll_output_handle_, NULL, _IOFBF, kLogBufferSize);
+
+ LogCodeInfo();
+}
+
+
+LowLevelLogger::~LowLevelLogger() {
+ fclose(ll_output_handle_);
+ ll_output_handle_ = NULL;
+}
+
+
+void LowLevelLogger::LogCodeInfo() {
+#if V8_TARGET_ARCH_IA32
+ const char arch[] = "ia32";
+#elif V8_TARGET_ARCH_X64
+ const char arch[] = "x64";
+#elif V8_TARGET_ARCH_ARM
+ const char arch[] = "arm";
+#elif V8_TARGET_ARCH_MIPS
+ const char arch[] = "mips";
+#else
+ const char arch[] = "unknown";
+#endif
+ LogWriteBytes(arch, sizeof(arch));
+}
+
+
+void LowLevelLogger::LogRecordedBuffer(Code* code,
+ SharedFunctionInfo*,
+ const char* name,
+ int length) {
+ CodeCreateStruct event;
+ event.name_size = length;
+ event.code_address = code->instruction_start();
+ ASSERT(event.code_address == code->address() + Code::kHeaderSize);
+ event.code_size = code->instruction_size();
+ LogWriteStruct(event);
+ LogWriteBytes(name, length);
+ LogWriteBytes(
+ reinterpret_cast<const char*>(code->instruction_start()),
+ code->instruction_size());
+}
+
+
+void LowLevelLogger::CodeMoveEvent(Address from, Address to) {
+ CodeMoveStruct event;
+ event.from_address = from + Code::kHeaderSize;
+ event.to_address = to + Code::kHeaderSize;
+ LogWriteStruct(event);
+}
+
+
+void LowLevelLogger::CodeDeleteEvent(Address from) {
+ CodeDeleteStruct event;
+ event.address = from + Code::kHeaderSize;
+ LogWriteStruct(event);
+}
+
+
+void LowLevelLogger::SnapshotPositionEvent(Address addr, int pos) {
+ SnapshotPositionStruct event;
+ event.address = addr + Code::kHeaderSize;
+ event.position = pos;
+ LogWriteStruct(event);
+}
+
+
+void LowLevelLogger::LogWriteBytes(const char* bytes, int size) {
+ size_t rv = fwrite(bytes, 1, size, ll_output_handle_);
+ ASSERT(static_cast<size_t>(size) == rv);
+ USE(rv);
+}
+
+
+void LowLevelLogger::CodeMovingGCEvent() {
+ const char tag = kCodeMovingGCTag;
+
+ LogWriteBytes(&tag, sizeof(tag));
+}
+
+
+#define JIT_LOG(Call) if (jit_logger_) jit_logger_->Call;
+
+
+class JitLogger : public CodeEventLogger {
+ public:
+ explicit JitLogger(JitCodeEventHandler code_event_handler);
+
+ virtual void CodeMoveEvent(Address from, Address to);
+ virtual void CodeDeleteEvent(Address from);
+ virtual void AddCodeLinePosInfoEvent(
+ void* jit_handler_data,
+ int pc_offset,
+ int position,
+ JitCodeEvent::PositionType position_type);
+
+ void* StartCodePosInfoEvent();
+ void EndCodePosInfoEvent(Code* code, void* jit_handler_data);
+
+ private:
+ virtual void LogRecordedBuffer(Code* code,
+ SharedFunctionInfo* shared,
+ const char* name,
+ int length);
+
+ JitCodeEventHandler code_event_handler_;
+};
+
+
+JitLogger::JitLogger(JitCodeEventHandler code_event_handler)
+ : code_event_handler_(code_event_handler) {
+}
+
+
+void JitLogger::LogRecordedBuffer(Code* code,
+ SharedFunctionInfo* shared,
+ const char* name,
+ int length) {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_ADDED;
+ event.code_start = code->instruction_start();
+ event.code_len = code->instruction_size();
+ Handle<Script> script_handle;
+ if (shared && shared->script()->IsScript()) {
+ script_handle = Handle<Script>(Script::cast(shared->script()));
+ }
+ event.script = ToApiHandle<v8::Script>(script_handle);
+ event.name.str = name;
+ event.name.len = length;
+ code_event_handler_(&event);
+}
+
+
+void JitLogger::CodeMoveEvent(Address from, Address to) {
+ Code* from_code = Code::cast(HeapObject::FromAddress(from));
+
+ JitCodeEvent event;
+ event.type = JitCodeEvent::CODE_MOVED;
+ event.code_start = from_code->instruction_start();
+ event.code_len = from_code->instruction_size();
+
+ // Calculate the header size.
+ const size_t header_size =
+ from_code->instruction_start() - reinterpret_cast<byte*>(from_code);
+
+ // Calculate the new start address of the instructions.
+ event.new_code_start =
+ reinterpret_cast<byte*>(HeapObject::FromAddress(to)) + header_size;
+
+ code_event_handler_(&event);
+}
+
+
+void JitLogger::CodeDeleteEvent(Address from) {
+ Code* from_code = Code::cast(HeapObject::FromAddress(from));
+
+ JitCodeEvent event;
+ event.type = JitCodeEvent::CODE_REMOVED;
+ event.code_start = from_code->instruction_start();
+ event.code_len = from_code->instruction_size();
+
+ code_event_handler_(&event);
+}
+
+void JitLogger::AddCodeLinePosInfoEvent(
+ void* jit_handler_data,
+ int pc_offset,
+ int position,
+ JitCodeEvent::PositionType position_type) {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_ADD_LINE_POS_INFO;
+ event.user_data = jit_handler_data;
+ event.line_info.offset = pc_offset;
+ event.line_info.pos = position;
+ event.line_info.position_type = position_type;
+
+ code_event_handler_(&event);
+}
+
+
+void* JitLogger::StartCodePosInfoEvent() {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_START_LINE_INFO_RECORDING;
+
+ code_event_handler_(&event);
+ return event.user_data;
+}
+
+
+void JitLogger::EndCodePosInfoEvent(Code* code, void* jit_handler_data) {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_END_LINE_INFO_RECORDING;
+ event.code_start = code->instruction_start();
+ event.user_data = jit_handler_data;
+
+ code_event_handler_(&event);
+}
+
-//
// The Profiler samples pc and sp values for the main thread.
// Each sample is appended to a circular buffer.
// An independent thread removes data and writes it to the log.
@@ -96,13 +556,20 @@ class Profiler: public Thread {
} else {
buffer_[head_] = *sample;
head_ = Succ(head_);
- buffer_semaphore_->Signal(); // Tell we have an element.
+ buffer_semaphore_.Signal(); // Tell we have an element.
}
}
+ virtual void Run();
+
+ // Pause and Resume TickSample data collection.
+ void pause() { paused_ = true; }
+ void resume() { paused_ = false; }
+
+ private:
// Waits for a signal and removes profiling data.
bool Remove(TickSample* sample) {
- buffer_semaphore_->Wait(); // Wait for an element.
+ buffer_semaphore_.Wait(); // Wait for an element.
*sample = buffer_[tail_];
bool result = overflow_;
tail_ = Succ(tail_);
@@ -110,14 +577,6 @@ class Profiler: public Thread {
return result;
}
- void Run();
-
- // Pause and Resume TickSample data collection.
- bool paused() const { return paused_; }
- void pause() { paused_ = true; }
- void resume() { paused_ = false; }
-
- private:
// Returns the next index in the cyclic buffer.
int Succ(int index) { return (index + 1) % kBufferSize; }
@@ -129,7 +588,8 @@ class Profiler: public Thread {
int head_; // Index to the buffer head.
int tail_; // Index to the buffer tail.
bool overflow_; // Tell whether a buffer overflow has occurred.
- Semaphore* buffer_semaphore_; // Sempahore used for buffer synchronization.
+ // Sempahore used for buffer synchronization.
+ Semaphore buffer_semaphore_;
// Tells whether profiler is engaged, that is, processing thread is stated.
bool engaged_;
@@ -143,45 +603,6 @@ class Profiler: public Thread {
//
-// StackTracer implementation
-//
-DISABLE_ASAN void StackTracer::Trace(Isolate* isolate, TickSample* sample) {
- ASSERT(isolate->IsInitialized());
-
- // Avoid collecting traces while doing GC.
- if (sample->state == GC) return;
-
- const Address js_entry_sp =
- Isolate::js_entry_sp(isolate->thread_local_top());
- if (js_entry_sp == 0) {
- // Not executing JS now.
- return;
- }
-
- const Address callback = isolate->external_callback();
- if (callback != NULL) {
- sample->external_callback = callback;
- sample->has_external_callback = true;
- } else {
- // Sample potential return address value for frameless invocation of
- // stubs (we'll figure out later, if this value makes sense).
- sample->tos = Memory::Address_at(sample->sp);
- sample->has_external_callback = false;
- }
-
- SafeStackTraceFrameIterator it(isolate,
- sample->fp, sample->sp,
- sample->sp, js_entry_sp);
- int i = 0;
- while (!it.done() && i < TickSample::kMaxFramesCount) {
- sample->stack[i++] = it.frame()->pc();
- it.Advance();
- }
- sample->frames_count = i;
-}
-
-
-//
// Ticker used to provide ticks to the profiler and the sliding state
// window.
//
@@ -189,81 +610,33 @@ class Ticker: public Sampler {
public:
Ticker(Isolate* isolate, int interval):
Sampler(isolate, interval),
- window_(NULL),
profiler_(NULL) {}
~Ticker() { if (IsActive()) Stop(); }
virtual void Tick(TickSample* sample) {
if (profiler_) profiler_->Insert(sample);
- if (window_) window_->AddState(sample->state);
- }
-
- void SetWindow(SlidingStateWindow* window) {
- window_ = window;
- if (!IsActive()) Start();
- }
-
- void ClearWindow() {
- window_ = NULL;
- if (!profiler_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop();
}
void SetProfiler(Profiler* profiler) {
ASSERT(profiler_ == NULL);
profiler_ = profiler;
IncreaseProfilingDepth();
- if (!FLAG_prof_lazy && !IsActive()) Start();
+ if (!IsActive()) Start();
}
void ClearProfiler() {
- DecreaseProfilingDepth();
profiler_ = NULL;
- if (!window_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop();
- }
-
- protected:
- virtual void DoSampleStack(TickSample* sample) {
- StackTracer::Trace(isolate(), sample);
+ if (IsActive()) Stop();
+ DecreaseProfilingDepth();
}
private:
- SlidingStateWindow* window_;
Profiler* profiler_;
};
//
-// SlidingStateWindow implementation.
-//
-SlidingStateWindow::SlidingStateWindow(Isolate* isolate)
- : counters_(isolate->counters()), current_index_(0), is_full_(false) {
- for (int i = 0; i < kBufferSize; i++) {
- buffer_[i] = static_cast<byte>(OTHER);
- }
- isolate->logger()->ticker_->SetWindow(this);
-}
-
-
-SlidingStateWindow::~SlidingStateWindow() {
- LOGGER->ticker_->ClearWindow();
-}
-
-
-void SlidingStateWindow::AddState(StateTag state) {
- if (is_full_) {
- DecrementStateCounter(static_cast<StateTag>(buffer_[current_index_]));
- } else if (current_index_ == kBufferSize - 1) {
- is_full_ = true;
- }
- buffer_[current_index_] = static_cast<byte>(state);
- IncrementStateCounter(state);
- ASSERT(IsPowerOf2(kBufferSize));
- current_index_ = (current_index_ + 1) & (kBufferSize - 1);
-}
-
-
-//
// Profiler implementation.
//
Profiler::Profiler(Isolate* isolate)
@@ -272,7 +645,7 @@ Profiler::Profiler(Isolate* isolate)
head_(0),
tail_(0),
overflow_(false),
- buffer_semaphore_(OS::CreateSemaphore(0)),
+ buffer_semaphore_(0),
engaged_(false),
running_(false),
paused_(false) {
@@ -283,20 +656,17 @@ void Profiler::Engage() {
if (engaged_) return;
engaged_ = true;
- // TODO(mnaganov): This is actually "Chromium" mode. Flags need to be revised.
- // http://code.google.com/p/v8/issues/detail?id=487
- if (!FLAG_prof_lazy) {
- OS::LogSharedLibraryAddresses();
- }
+ OS::LogSharedLibraryAddresses(isolate_);
// Start thread processing the profiler buffer.
running_ = true;
Start();
// Register to get ticks.
- LOGGER->ticker_->SetProfiler(this);
+ Logger* logger = isolate_->logger();
+ logger->ticker_->SetProfiler(this);
- LOGGER->ProfilerBeginEvent();
+ logger->ProfilerBeginEvent();
}
@@ -304,7 +674,7 @@ void Profiler::Disengage() {
if (!engaged_) return;
// Stop receiving ticks.
- LOGGER->ticker_->ClearProfiler();
+ isolate_->logger()->ticker_->ClearProfiler();
// Terminate the worker thread by setting running_ to false,
// inserting a fake element in the queue and then wait for
@@ -316,7 +686,7 @@ void Profiler::Disengage() {
Insert(&sample);
Join();
- LOG(ISOLATE, UncheckedStringEvent("profiler", "end"));
+ LOG(isolate_, UncheckedStringEvent("profiler", "end"));
}
@@ -330,274 +700,49 @@ void Profiler::Run() {
}
-// Low-level profiling event structures.
-
-struct LowLevelCodeCreateStruct {
- static const char kTag = 'C';
-
- int32_t name_size;
- Address code_address;
- int32_t code_size;
-};
-
-
-struct LowLevelCodeMoveStruct {
- static const char kTag = 'M';
-
- Address from_address;
- Address to_address;
-};
-
-
-struct LowLevelCodeDeleteStruct {
- static const char kTag = 'D';
-
- Address address;
-};
-
-
-struct LowLevelSnapshotPositionStruct {
- static const char kTag = 'P';
-
- Address address;
- int32_t position;
-};
-
-
-static const char kCodeMovingGCTag = 'G';
-
-
//
// Logger class implementation.
//
-class Logger::NameMap {
- public:
- NameMap() : impl_(&PointerEquals) {}
-
- ~NameMap() {
- for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
- DeleteArray(static_cast<const char*>(p->value));
- }
- }
-
- void Insert(Address code_address, const char* name, int name_size) {
- HashMap::Entry* entry = FindOrCreateEntry(code_address);
- if (entry->value == NULL) {
- entry->value = CopyName(name, name_size);
- }
- }
-
- const char* Lookup(Address code_address) {
- HashMap::Entry* entry = FindEntry(code_address);
- return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
- }
-
- void Remove(Address code_address) {
- HashMap::Entry* entry = FindEntry(code_address);
- if (entry != NULL) {
- DeleteArray(static_cast<char*>(entry->value));
- RemoveEntry(entry);
- }
- }
-
- void Move(Address from, Address to) {
- if (from == to) return;
- HashMap::Entry* from_entry = FindEntry(from);
- ASSERT(from_entry != NULL);
- void* value = from_entry->value;
- RemoveEntry(from_entry);
- HashMap::Entry* to_entry = FindOrCreateEntry(to);
- ASSERT(to_entry->value == NULL);
- to_entry->value = value;
- }
-
- private:
- static bool PointerEquals(void* lhs, void* rhs) {
- return lhs == rhs;
- }
-
- static char* CopyName(const char* name, int name_size) {
- char* result = NewArray<char>(name_size + 1);
- for (int i = 0; i < name_size; ++i) {
- char c = name[i];
- if (c == '\0') c = ' ';
- result[i] = c;
- }
- result[name_size] = '\0';
- return result;
- }
-
- HashMap::Entry* FindOrCreateEntry(Address code_address) {
- return impl_.Lookup(code_address, ComputePointerHash(code_address), true);
- }
-
- HashMap::Entry* FindEntry(Address code_address) {
- return impl_.Lookup(code_address, ComputePointerHash(code_address), false);
- }
-
- void RemoveEntry(HashMap::Entry* entry) {
- impl_.Remove(entry->key, entry->hash);
- }
-
- HashMap impl_;
-
- DISALLOW_COPY_AND_ASSIGN(NameMap);
-};
-
-
-class Logger::NameBuffer {
- public:
- NameBuffer() { Reset(); }
-
- void Reset() {
- utf8_pos_ = 0;
- }
-
- void AppendString(String* str) {
- if (str == NULL) return;
- if (str->HasOnlyAsciiChars()) {
- int utf8_length = Min(str->length(), kUtf8BufferSize - utf8_pos_);
- String::WriteToFlat(str, utf8_buffer_ + utf8_pos_, 0, utf8_length);
- utf8_pos_ += utf8_length;
- return;
- }
- int uc16_length = Min(str->length(), kUtf16BufferSize);
- String::WriteToFlat(str, utf16_buffer, 0, uc16_length);
- int previous = unibrow::Utf16::kNoPreviousCharacter;
- for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
- uc16 c = utf16_buffer[i];
- if (c <= String::kMaxAsciiCharCodeU) {
- utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
- } else {
- int char_length = unibrow::Utf8::Length(c, previous);
- if (utf8_pos_ + char_length > kUtf8BufferSize) break;
- unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c, previous);
- utf8_pos_ += char_length;
- }
- previous = c;
- }
- }
-
- void AppendBytes(const char* bytes, int size) {
- size = Min(size, kUtf8BufferSize - utf8_pos_);
- memcpy(utf8_buffer_ + utf8_pos_, bytes, size);
- utf8_pos_ += size;
- }
-
- void AppendBytes(const char* bytes) {
- AppendBytes(bytes, StrLength(bytes));
- }
-
- void AppendByte(char c) {
- if (utf8_pos_ >= kUtf8BufferSize) return;
- utf8_buffer_[utf8_pos_++] = c;
- }
-
- void AppendInt(int n) {
- Vector<char> buffer(utf8_buffer_ + utf8_pos_, kUtf8BufferSize - utf8_pos_);
- int size = OS::SNPrintF(buffer, "%d", n);
- if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) {
- utf8_pos_ += size;
- }
- }
-
- const char* get() { return utf8_buffer_; }
- int size() const { return utf8_pos_; }
-
- private:
- static const int kUtf8BufferSize = 512;
- static const int kUtf16BufferSize = 128;
-
- int utf8_pos_;
- char utf8_buffer_[kUtf8BufferSize];
- uc16 utf16_buffer[kUtf16BufferSize];
-};
-
-
-Logger::Logger()
- : ticker_(NULL),
+Logger::Logger(Isolate* isolate)
+ : isolate_(isolate),
+ ticker_(NULL),
profiler_(NULL),
- sliding_state_window_(NULL),
log_events_(NULL),
- logging_nesting_(0),
- cpu_profiler_nesting_(0),
+ is_logging_(false),
log_(new Log(this)),
- name_buffer_(new NameBuffer),
- address_to_name_map_(NULL),
- is_initialized_(false),
- code_event_handler_(NULL),
- last_address_(NULL),
- prev_sp_(NULL),
- prev_function_(NULL),
- prev_to_(NULL),
- prev_code_(NULL) {
+ ll_logger_(NULL),
+ jit_logger_(NULL),
+ listeners_(5),
+ is_initialized_(false) {
}
Logger::~Logger() {
- delete address_to_name_map_;
- delete name_buffer_;
delete log_;
}
-void Logger::IssueCodeAddedEvent(Code* code,
- const char* name,
- size_t name_len) {
- JitCodeEvent event;
- event.type = JitCodeEvent::CODE_ADDED;
- event.code_start = code->instruction_start();
- event.code_len = code->instruction_size();
- event.name.str = name;
- event.name.len = name_len;
-
- code_event_handler_(&event);
+void Logger::addCodeEventListener(CodeEventListener* listener) {
+ ASSERT(!hasCodeEventListener(listener));
+ listeners_.Add(listener);
}
-void Logger::IssueCodeMovedEvent(Address from, Address to) {
- Code* from_code = Code::cast(HeapObject::FromAddress(from));
-
- JitCodeEvent event;
- event.type = JitCodeEvent::CODE_MOVED;
- event.code_start = from_code->instruction_start();
- event.code_len = from_code->instruction_size();
-
- // Calculate the header size.
- const size_t header_size =
- from_code->instruction_start() - reinterpret_cast<byte*>(from_code);
-
- // Calculate the new start address of the instructions.
- event.new_code_start =
- reinterpret_cast<byte*>(HeapObject::FromAddress(to)) + header_size;
-
- code_event_handler_(&event);
+void Logger::removeCodeEventListener(CodeEventListener* listener) {
+ ASSERT(hasCodeEventListener(listener));
+ listeners_.RemoveElement(listener);
}
-void Logger::IssueCodeRemovedEvent(Address from) {
- Code* from_code = Code::cast(HeapObject::FromAddress(from));
-
- JitCodeEvent event;
- event.type = JitCodeEvent::CODE_REMOVED;
- event.code_start = from_code->instruction_start();
- event.code_len = from_code->instruction_size();
-
- code_event_handler_(&event);
+bool Logger::hasCodeEventListener(CodeEventListener* listener) {
+ return listeners_.Contains(listener);
}
-#define DECLARE_EVENT(ignore1, name) name,
-static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
- LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
-};
-#undef DECLARE_EVENT
-
-
void Logger::ProfilerBeginEvent() {
if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
msg.WriteToLogFile();
}
@@ -610,7 +755,7 @@ void Logger::StringEvent(const char* name, const char* value) {
void Logger::UncheckedStringEvent(const char* name, const char* value) {
if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,\"%s\"\n", name, value);
msg.WriteToLogFile();
}
@@ -628,7 +773,7 @@ void Logger::IntPtrTEvent(const char* name, intptr_t value) {
void Logger::UncheckedIntEvent(const char* name, int value) {
if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,%d\n", name, value);
msg.WriteToLogFile();
}
@@ -636,7 +781,7 @@ void Logger::UncheckedIntEvent(const char* name, int value) {
void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value);
msg.WriteToLogFile();
}
@@ -644,7 +789,7 @@ void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
void Logger::HandleEvent(const char* name, Object** location) {
if (!log_->IsEnabled() || !FLAG_log_handles) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,0x%" V8PRIxPTR "\n", name, location);
msg.WriteToLogFile();
}
@@ -655,7 +800,7 @@ void Logger::HandleEvent(const char* name, Object** location) {
// FLAG_log_api is true.
void Logger::ApiEvent(const char* format, ...) {
ASSERT(log_->IsEnabled() && FLAG_log_api);
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
va_list ap;
va_start(ap, format);
msg.AppendVA(format, ap);
@@ -670,6 +815,18 @@ void Logger::ApiNamedSecurityCheck(Object* key) {
SmartArrayPointer<char> str =
String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,check-security,\"%s\"\n", *str);
+ } else if (key->IsSymbol()) {
+ Symbol* symbol = Symbol::cast(key);
+ if (symbol->name()->IsUndefined()) {
+ ApiEvent("api,check-security,symbol(hash %x)\n",
+ Symbol::cast(key)->Hash());
+ } else {
+ SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
+ DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ ApiEvent("api,check-security,symbol(\"%s\" hash %x)\n",
+ *str,
+ Symbol::cast(key)->Hash());
+ }
} else if (key->IsUndefined()) {
ApiEvent("api,check-security,undefined\n");
} else {
@@ -682,7 +839,7 @@ void Logger::SharedLibraryEvent(const char* library_path,
uintptr_t start,
uintptr_t end) {
if (!log_->IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
library_path,
start,
@@ -695,7 +852,7 @@ void Logger::SharedLibraryEvent(const wchar_t* library_path,
uintptr_t start,
uintptr_t end) {
if (!log_->IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
library_path,
start,
@@ -704,10 +861,61 @@ void Logger::SharedLibraryEvent(const wchar_t* library_path,
}
+void Logger::CodeDeoptEvent(Code* code) {
+ if (!log_->IsEnabled()) return;
+ ASSERT(FLAG_log_internal_timer_events);
+ Log::MessageBuilder msg(log_);
+ int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
+ msg.Append("code-deopt,%ld,%d\n", since_epoch, code->CodeSize());
+ msg.WriteToLogFile();
+}
+
+
+void Logger::TimerEvent(StartEnd se, const char* name) {
+ if (!log_->IsEnabled()) return;
+ ASSERT(FLAG_log_internal_timer_events);
+ Log::MessageBuilder msg(log_);
+ int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
+ const char* format = (se == START) ? "timer-event-start,\"%s\",%ld\n"
+ : "timer-event-end,\"%s\",%ld\n";
+ msg.Append(format, name, since_epoch);
+ msg.WriteToLogFile();
+}
+
+
+void Logger::EnterExternal(Isolate* isolate) {
+ LOG(isolate, TimerEvent(START, TimerEventScope::v8_external));
+ ASSERT(isolate->current_vm_state() == JS);
+ isolate->set_current_vm_state(EXTERNAL);
+}
+
+
+void Logger::LeaveExternal(Isolate* isolate) {
+ LOG(isolate, TimerEvent(END, TimerEventScope::v8_external));
+ ASSERT(isolate->current_vm_state() == EXTERNAL);
+ isolate->set_current_vm_state(JS);
+}
+
+
+void Logger::TimerEventScope::LogTimerEvent(StartEnd se) {
+ LOG(isolate_, TimerEvent(se, name_));
+}
+
+
+const char* Logger::TimerEventScope::v8_recompile_synchronous =
+ "V8.RecompileSynchronous";
+const char* Logger::TimerEventScope::v8_recompile_concurrent =
+ "V8.RecompileConcurrent";
+const char* Logger::TimerEventScope::v8_compile_full_code =
+ "V8.CompileFullCode";
+const char* Logger::TimerEventScope::v8_execute = "V8.Execute";
+const char* Logger::TimerEventScope::v8_external = "V8.External";
+
+
void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
// Prints "/" + re.source + "/" +
// (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
Handle<Object> source = GetProperty(regexp, "source");
if (!source->IsString()) {
@@ -748,7 +956,7 @@ void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
if (!log_->IsEnabled() || !FLAG_log_regexp) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("regexp-compile,");
LogRegExpSource(regexp);
msg.Append(in_cache ? ",hit\n" : ",miss\n");
@@ -756,16 +964,17 @@ void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
}
-void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
+void Logger::LogRuntime(Vector<const char> format,
+ JSArray* args) {
if (!log_->IsEnabled() || !FLAG_log_runtime) return;
- HandleScope scope;
- LogMessageBuilder msg(this);
+ HandleScope scope(isolate_);
+ Log::MessageBuilder msg(log_);
for (int i = 0; i < format.length(); i++) {
char c = format[i];
if (c == '%' && i <= format.length() - 2) {
i++;
ASSERT('0' <= format[i] && format[i] <= '9');
- MaybeObject* maybe = args->GetElement(format[i] - '0');
+ MaybeObject* maybe = args->GetElement(isolate_, format[i] - '0');
Object* obj;
if (!maybe->ToObject(&obj)) {
msg.Append("<exception>");
@@ -809,14 +1018,27 @@ void Logger::ApiIndexedSecurityCheck(uint32_t index) {
void Logger::ApiNamedPropertyAccess(const char* tag,
JSObject* holder,
Object* name) {
- ASSERT(name->IsString());
+ ASSERT(name->IsName());
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
SmartArrayPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- SmartArrayPointer<char> property_name =
- String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
+ if (name->IsString()) {
+ SmartArrayPointer<char> property_name =
+ String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
+ } else {
+ Symbol* symbol = Symbol::cast(name);
+ uint32_t hash = symbol->Hash();
+ if (symbol->name()->IsUndefined()) {
+ ApiEvent("api,%s,\"%s\",symbol(hash %x)\n", tag, *class_name, hash);
+ } else {
+ SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
+ DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ ApiEvent("api,%s,\"%s\",symbol(\"%s\" hash %x)\n",
+ tag, *class_name, *str, hash);
+ }
+ }
}
void Logger::ApiIndexedPropertyAccess(const char* tag,
@@ -829,6 +1051,7 @@ void Logger::ApiIndexedPropertyAccess(const char* tag,
ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
}
+
void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = object->class_name();
@@ -846,7 +1069,7 @@ void Logger::ApiEntryCall(const char* name) {
void Logger::NewEvent(const char* name, void* object, size_t size) {
if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object,
static_cast<unsigned int>(size));
msg.WriteToLogFile();
@@ -855,93 +1078,92 @@ void Logger::NewEvent(const char* name, void* object, size_t size) {
void Logger::DeleteEvent(const char* name, void* object) {
if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object);
msg.WriteToLogFile();
}
void Logger::NewEventStatic(const char* name, void* object, size_t size) {
- LOGGER->NewEvent(name, object, size);
+ Isolate::Current()->logger()->NewEvent(name, object, size);
}
void Logger::DeleteEventStatic(const char* name, void* object) {
- LOGGER->DeleteEvent(name, object);
+ Isolate::Current()->logger()->DeleteEvent(name, object);
}
-void Logger::CallbackEventInternal(const char* prefix, const char* name,
+
+void Logger::CallbackEventInternal(const char* prefix, Name* name,
Address entry_point) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,",
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ Log::MessageBuilder msg(log_);
+ msg.Append("%s,%s,-2,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[CALLBACK_TAG]);
msg.AppendAddress(entry_point);
- msg.Append(",1,\"%s%s\"", prefix, name);
+ if (name->IsString()) {
+ SmartArrayPointer<char> str =
+ String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ msg.Append(",1,\"%s%s\"", prefix, *str);
+ } else {
+ Symbol* symbol = Symbol::cast(name);
+ if (symbol->name()->IsUndefined()) {
+ msg.Append(",1,symbol(hash %x)", prefix, symbol->Hash());
+ } else {
+ SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
+ DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ msg.Append(",1,symbol(\"%s\" hash %x)", prefix, *str, symbol->Hash());
+ }
+ }
msg.Append('\n');
msg.WriteToLogFile();
}
-void Logger::CallbackEvent(String* name, Address entry_point) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- SmartArrayPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- CallbackEventInternal("", *str, entry_point);
+void Logger::CallbackEvent(Name* name, Address entry_point) {
+ PROFILER_LOG(CallbackEvent(name, entry_point));
+ CallbackEventInternal("", name, entry_point);
}
-void Logger::GetterCallbackEvent(String* name, Address entry_point) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- SmartArrayPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- CallbackEventInternal("get ", *str, entry_point);
+void Logger::GetterCallbackEvent(Name* name, Address entry_point) {
+ PROFILER_LOG(GetterCallbackEvent(name, entry_point));
+ CallbackEventInternal("get ", name, entry_point);
}
-void Logger::SetterCallbackEvent(String* name, Address entry_point) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- SmartArrayPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- CallbackEventInternal("set ", *str, entry_point);
+void Logger::SetterCallbackEvent(Name* name, Address entry_point) {
+ PROFILER_LOG(SetterCallbackEvent(name, entry_point));
+ CallbackEventInternal("set ", name, entry_point);
+}
+
+
+static void AppendCodeCreateHeader(Log::MessageBuilder* msg,
+ Logger::LogEventsAndTags tag,
+ Code* code) {
+ ASSERT(msg);
+ msg->Append("%s,%s,%d,",
+ kLogEventsNames[Logger::CODE_CREATION_EVENT],
+ kLogEventsNames[tag],
+ code->kind());
+ msg->AppendAddress(code->address());
+ msg->Append(",%d,", code->ExecutableSize());
}
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
const char* comment) {
+ PROFILER_LOG(CodeCreateEvent(tag, code, comment));
+
if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
- name_buffer_->AppendBytes(comment);
- }
- if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag]);
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"", code->ExecutableSize());
- for (const char* p = comment; *p != '\0'; p++) {
- if (*p == '"') {
- msg.Append('\\');
- }
- msg.Append(*p);
- }
- msg.Append('"');
+ CALL_LISTENERS(CodeCreateEvent(tag, code, comment));
+
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ Log::MessageBuilder msg(log_);
+ AppendCodeCreateHeader(&msg, tag, code);
+ msg.AppendDoubleQuotedString(comment);
msg.Append('\n');
msg.WriteToLogFile();
}
@@ -949,83 +1171,52 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
- String* name) {
+ Name* name) {
+ PROFILER_LOG(CodeCreateEvent(tag, code, name));
+
if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
- name_buffer_->AppendString(name);
- }
- if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+ CALL_LISTENERS(CodeCreateEvent(tag, code, name));
+
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ Log::MessageBuilder msg(log_);
+ AppendCodeCreateHeader(&msg, tag, code);
+ if (name->IsString()) {
+ msg.Append('"');
+ msg.AppendDetailed(String::cast(name), false);
+ msg.Append('"');
+ } else {
+ msg.AppendSymbolName(Symbol::cast(name));
}
- if (!FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag]);
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"", code->ExecutableSize());
- msg.AppendDetailed(name, false);
- msg.Append('"');
msg.Append('\n');
msg.WriteToLogFile();
}
-// ComputeMarker must only be used when SharedFunctionInfo is known.
-static const char* ComputeMarker(Code* code) {
- switch (code->kind()) {
- case Code::FUNCTION: return code->optimizable() ? "~" : "";
- case Code::OPTIMIZED_FUNCTION: return "*";
- default: return "";
- }
-}
-
-
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
- String* name) {
+ CompilationInfo* info,
+ Name* name) {
+ PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, name));
+
if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
- name_buffer_->AppendBytes(ComputeMarker(code));
- name_buffer_->AppendString(name);
- }
- if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!FLAG_log_code) return;
- if (code == Isolate::Current()->builtins()->builtin(
+ CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, name));
+
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ if (code == isolate_->builtins()->builtin(
Builtins::kLazyCompile))
return;
- LogMessageBuilder msg(this);
- SmartArrayPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s,%s,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag]);
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"%s\",", code->ExecutableSize(), *str);
+ Log::MessageBuilder msg(log_);
+ AppendCodeCreateHeader(&msg, tag, code);
+ if (name->IsString()) {
+ SmartArrayPointer<char> str =
+ String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ msg.Append("\"%s\"", *str);
+ } else {
+ msg.AppendSymbolName(Symbol::cast(name));
+ }
+ msg.Append(',');
msg.AppendAddress(shared->address());
msg.Append(",%s", ComputeMarker(code));
msg.Append('\n');
@@ -1039,44 +1230,28 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
- String* source, int line) {
+ CompilationInfo* info,
+ Name* source, int line, int column) {
+ PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line, column));
+
if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
- name_buffer_->AppendBytes(ComputeMarker(code));
- name_buffer_->AppendString(shared->DebugName());
- name_buffer_->AppendByte(' ');
- name_buffer_->AppendString(source);
- name_buffer_->AppendByte(':');
- name_buffer_->AppendInt(line);
- }
- if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!FLAG_log_code) return;
- LogMessageBuilder msg(this);
+ CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, source, line,
+ column));
+
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ Log::MessageBuilder msg(log_);
+ AppendCodeCreateHeader(&msg, tag, code);
SmartArrayPointer<char> name =
shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- SmartArrayPointer<char> sourcestr =
- source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s,%s,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag]);
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"%s %s:%d\",",
- code->ExecutableSize(),
- *name,
- *sourcestr,
- line);
+ msg.Append("\"%s ", *name);
+ if (source->IsString()) {
+ SmartArrayPointer<char> sourcestr =
+ String::cast(source)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ msg.Append("%s", *sourcestr);
+ } else {
+ msg.AppendSymbolName(Symbol::cast(source));
+ }
+ msg.Append(":%d:%d\",", line, column);
msg.AppendAddress(shared->address());
msg.Append(",%s", ComputeMarker(code));
msg.Append('\n');
@@ -1084,114 +1259,122 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
-void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
+void Logger::CodeCreateEvent(LogEventsAndTags tag,
+ Code* code,
+ int args_count) {
+ PROFILER_LOG(CodeCreateEvent(tag, code, args_count));
+
if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
- name_buffer_->AppendInt(args_count);
- }
- if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag]);
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
+ CALL_LISTENERS(CodeCreateEvent(tag, code, args_count));
+
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ Log::MessageBuilder msg(log_);
+ AppendCodeCreateHeader(&msg, tag, code);
+ msg.Append("\"args_count: %d\"", args_count);
msg.Append('\n');
msg.WriteToLogFile();
}
void Logger::CodeMovingGCEvent() {
+ PROFILER_LOG(CodeMovingGCEvent());
+
+ if (!is_logging_code_events()) return;
if (!log_->IsEnabled() || !FLAG_ll_prof) return;
- LowLevelLogWriteBytes(&kCodeMovingGCTag, sizeof(kCodeMovingGCTag));
+ CALL_LISTENERS(CodeMovingGCEvent());
OS::SignalCodeMovingGC();
}
void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
+ PROFILER_LOG(RegExpCodeCreateEvent(code, source));
+
if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[REG_EXP_TAG]);
- name_buffer_->AppendByte(':');
- name_buffer_->AppendString(source);
- }
- if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
- if (!FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,%s,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[REG_EXP_TAG]);
- msg.AppendAddress(code->address());
- msg.Append(",%d,\"", code->ExecutableSize());
+ CALL_LISTENERS(RegExpCodeCreateEvent(code, source));
+
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ Log::MessageBuilder msg(log_);
+ AppendCodeCreateHeader(&msg, REG_EXP_TAG, code);
+ msg.Append('"');
msg.AppendDetailed(source, false);
- msg.Append('\"');
+ msg.Append('"');
msg.Append('\n');
msg.WriteToLogFile();
}
void Logger::CodeMoveEvent(Address from, Address to) {
- if (code_event_handler_ != NULL) IssueCodeMovedEvent(from, to);
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) LowLevelCodeMoveEvent(from, to);
- if (Serializer::enabled() && address_to_name_map_ != NULL) {
- address_to_name_map_->Move(from, to);
- }
+ PROFILER_LOG(CodeMoveEvent(from, to));
+
+ if (!is_logging_code_events()) return;
+ CALL_LISTENERS(CodeMoveEvent(from, to));
MoveEventInternal(CODE_MOVE_EVENT, from, to);
}
void Logger::CodeDeleteEvent(Address from) {
- if (code_event_handler_ != NULL) IssueCodeRemovedEvent(from);
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) LowLevelCodeDeleteEvent(from);
- if (Serializer::enabled() && address_to_name_map_ != NULL) {
- address_to_name_map_->Remove(from);
+ PROFILER_LOG(CodeDeleteEvent(from));
+
+ if (!is_logging_code_events()) return;
+ CALL_LISTENERS(CodeDeleteEvent(from));
+
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ Log::MessageBuilder msg(log_);
+ msg.Append("%s,", kLogEventsNames[CODE_DELETE_EVENT]);
+ msg.AppendAddress(from);
+ msg.Append('\n');
+ msg.WriteToLogFile();
+}
+
+
+void Logger::CodeLinePosInfoAddPositionEvent(void* jit_handler_data,
+ int pc_offset,
+ int position) {
+ JIT_LOG(AddCodeLinePosInfoEvent(jit_handler_data,
+ pc_offset,
+ position,
+ JitCodeEvent::POSITION));
+}
+
+
+void Logger::CodeLinePosInfoAddStatementPositionEvent(void* jit_handler_data,
+ int pc_offset,
+ int position) {
+ JIT_LOG(AddCodeLinePosInfoEvent(jit_handler_data,
+ pc_offset,
+ position,
+ JitCodeEvent::STATEMENT_POSITION));
+}
+
+
+void Logger::CodeStartLinePosInfoRecordEvent(PositionsRecorder* pos_recorder) {
+ if (jit_logger_ != NULL) {
+ pos_recorder->AttachJITHandlerData(jit_logger_->StartCodePosInfoEvent());
}
- DeleteEventInternal(CODE_DELETE_EVENT, from);
+}
+
+
+void Logger::CodeEndLinePosInfoRecordEvent(Code* code,
+ void* jit_handler_data) {
+ JIT_LOG(EndCodePosInfoEvent(code, jit_handler_data));
+}
+
+
+void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
+ if (code_name == NULL) return; // Not a code object.
+ Log::MessageBuilder msg(log_);
+ msg.Append("%s,%d,", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
+ msg.AppendDoubleQuotedString(code_name);
+ msg.Append("\n");
+ msg.WriteToLogFile();
}
void Logger::SnapshotPositionEvent(Address addr, int pos) {
if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) LowLevelSnapshotPositionEvent(addr, pos);
- if (Serializer::enabled() && address_to_name_map_ != NULL) {
- const char* code_name = address_to_name_map_->Lookup(addr);
- if (code_name == NULL) return; // Not a code object.
- LogMessageBuilder msg(this);
- msg.Append("%s,%d,\"", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
- for (const char* p = code_name; *p != '\0'; ++p) {
- if (*p == '"') msg.Append('\\');
- msg.Append(*p);
- }
- msg.Append("\"\n");
- msg.WriteToLogFile();
- }
+ LL_LOG(SnapshotPositionEvent(addr, pos));
if (!FLAG_log_snapshot_positions) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
msg.AppendAddress(addr);
msg.Append(",%d", pos);
@@ -1201,6 +1384,9 @@ void Logger::SnapshotPositionEvent(Address addr, int pos) {
void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
+ PROFILER_LOG(SharedFunctionInfoMoveEvent(from, to));
+
+ if (!is_logging_code_events()) return;
MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
}
@@ -1208,8 +1394,8 @@ void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
void Logger::MoveEventInternal(LogEventsAndTags event,
Address from,
Address to) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ Log::MessageBuilder msg(log_);
msg.Append("%s,", kLogEventsNames[event]);
msg.AppendAddress(from);
msg.Append(',');
@@ -1219,19 +1405,9 @@ void Logger::MoveEventInternal(LogEventsAndTags event,
}
-void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,", kLogEventsNames[event]);
- msg.AppendAddress(from);
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
void Logger::ResourceEvent(const char* name, const char* tag) {
if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,%s,", name, tag);
uint32_t sec, usec;
@@ -1245,18 +1421,22 @@ void Logger::ResourceEvent(const char* name, const char* tag) {
}
-void Logger::SuspectReadEvent(String* name, Object* obj) {
+void Logger::SuspectReadEvent(Name* name, Object* obj) {
if (!log_->IsEnabled() || !FLAG_log_suspect) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
String* class_name = obj->IsJSObject()
? JSObject::cast(obj)->class_name()
- : HEAP->empty_string();
+ : isolate_->heap()->empty_string();
msg.Append("suspect-read,");
msg.Append(class_name);
msg.Append(',');
- msg.Append('"');
- msg.Append(name);
- msg.Append('"');
+ if (name->IsString()) {
+ msg.Append('"');
+ msg.Append(String::cast(name));
+ msg.Append('"');
+ } else {
+ msg.AppendSymbolName(Symbol::cast(name));
+ }
msg.Append('\n');
msg.WriteToLogFile();
}
@@ -1264,7 +1444,7 @@ void Logger::SuspectReadEvent(String* name, Object* obj) {
void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
// Using non-relative system time in order to be able to synchronize with
// external memory profiling events (e.g. DOM memory size).
msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
@@ -1275,7 +1455,7 @@ void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
msg.WriteToLogFile();
}
@@ -1283,7 +1463,7 @@ void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
msg.WriteToLogFile();
}
@@ -1291,7 +1471,7 @@ void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
void Logger::DebugTag(const char* call_site_tag) {
if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("debug-tag,%s\n", call_site_tag);
msg.WriteToLogFile();
}
@@ -1304,7 +1484,7 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
s.AddCharacter(static_cast<char>(parameter[i]));
}
char* parameter_string = s.Finalize();
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("debug-queue-event,%s,%15.3f,%s\n",
event_type,
OS::TimeCurrentMillis(),
@@ -1316,11 +1496,10 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
void Logger::TickEvent(TickSample* sample, bool overflow) {
if (!log_->IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
msg.AppendAddress(sample->pc);
- msg.Append(',');
- msg.AppendAddress(sample->sp);
+ msg.Append(",%ld", static_cast<int>(timer_.Elapsed().InMicroseconds()));
if (sample->has_external_callback) {
msg.Append(",1,");
msg.AppendAddress(sample->external_callback);
@@ -1341,47 +1520,11 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
}
-bool Logger::IsProfilerPaused() {
- return profiler_ == NULL || profiler_->paused();
-}
-
-
-void Logger::PauseProfiler() {
- if (!log_->IsEnabled()) return;
- if (profiler_ != NULL) {
- // It is OK to have negative nesting.
- if (--cpu_profiler_nesting_ == 0) {
- profiler_->pause();
- if (FLAG_prof_lazy) {
- if (!FLAG_sliding_state_window && !RuntimeProfiler::IsEnabled()) {
- ticker_->Stop();
- }
- FLAG_log_code = false;
- LOG(ISOLATE, UncheckedStringEvent("profiler", "pause"));
- }
- --logging_nesting_;
- }
- }
-}
-
-
-void Logger::ResumeProfiler() {
+void Logger::StopProfiler() {
if (!log_->IsEnabled()) return;
if (profiler_ != NULL) {
- if (cpu_profiler_nesting_++ == 0) {
- ++logging_nesting_;
- if (FLAG_prof_lazy) {
- profiler_->Engage();
- LOG(ISOLATE, UncheckedStringEvent("profiler", "resume"));
- FLAG_log_code = true;
- LogCompiledFunctions();
- LogAccessorCallbacks();
- if (!FLAG_sliding_state_window && !ticker_->IsActive()) {
- ticker_->Start();
- }
- }
- profiler_->resume();
- }
+ profiler_->pause();
+ is_logging_ = false;
}
}
@@ -1389,12 +1532,7 @@ void Logger::ResumeProfiler() {
// This function can be called when Log's mutex is acquired,
// either from main or Profiler's thread.
void Logger::LogFailure() {
- PauseProfiler();
-}
-
-
-bool Logger::IsProfilerSamplerActive() {
- return ticker_->IsActive();
+ StopProfiler();
}
@@ -1430,10 +1568,11 @@ class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
};
-static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
+static int EnumerateCompiledFunctions(Heap* heap,
+ Handle<SharedFunctionInfo>* sfis,
Handle<Code>* code_objects) {
- HeapIterator iterator;
- AssertNoAllocation no_alloc;
+ HeapIterator iterator(heap);
+ DisallowHeapAllocation no_gc;
int compiled_funcs_count = 0;
// Iterate the heap to find shared function info objects and record
@@ -1458,149 +1597,85 @@ static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
EnumerateOptimizedFunctionsVisitor visitor(sfis,
code_objects,
&compiled_funcs_count);
- Deoptimizer::VisitAllOptimizedFunctions(&visitor);
+ Deoptimizer::VisitAllOptimizedFunctions(heap->isolate(), &visitor);
return compiled_funcs_count;
}
void Logger::LogCodeObject(Object* object) {
- if (FLAG_log_code || FLAG_ll_prof || is_logging_code_events()) {
- Code* code_object = Code::cast(object);
- LogEventsAndTags tag = Logger::STUB_TAG;
- const char* description = "Unknown code from the snapshot";
- switch (code_object->kind()) {
- case Code::FUNCTION:
- case Code::OPTIMIZED_FUNCTION:
- return; // We log this later using LogCompiledFunctions.
- case Code::UNARY_OP_IC: // fall through
- case Code::BINARY_OP_IC: // fall through
- case Code::COMPARE_IC: // fall through
- case Code::TO_BOOLEAN_IC: // fall through
- case Code::STUB:
- description =
- CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
- if (description == NULL)
- description = "A stub from the snapshot";
- tag = Logger::STUB_TAG;
- break;
- case Code::BUILTIN:
- description = "A builtin from the snapshot";
- tag = Logger::BUILTIN_TAG;
- break;
- case Code::KEYED_LOAD_IC:
- description = "A keyed load IC from the snapshot";
- tag = Logger::KEYED_LOAD_IC_TAG;
- break;
- case Code::LOAD_IC:
- description = "A load IC from the snapshot";
- tag = Logger::LOAD_IC_TAG;
- break;
- case Code::STORE_IC:
- description = "A store IC from the snapshot";
- tag = Logger::STORE_IC_TAG;
- break;
- case Code::KEYED_STORE_IC:
- description = "A keyed store IC from the snapshot";
- tag = Logger::KEYED_STORE_IC_TAG;
- break;
- case Code::CALL_IC:
- description = "A call IC from the snapshot";
- tag = Logger::CALL_IC_TAG;
- break;
- case Code::KEYED_CALL_IC:
- description = "A keyed call IC from the snapshot";
- tag = Logger::KEYED_CALL_IC_TAG;
- break;
+ Code* code_object = Code::cast(object);
+ LogEventsAndTags tag = Logger::STUB_TAG;
+ const char* description = "Unknown code from the snapshot";
+ switch (code_object->kind()) {
+ case Code::FUNCTION:
+ case Code::OPTIMIZED_FUNCTION:
+ return; // We log this later using LogCompiledFunctions.
+ case Code::BINARY_OP_IC: {
+ BinaryOpStub stub(code_object->extended_extra_ic_state());
+ description = stub.GetName().Detach();
+ tag = Logger::STUB_TAG;
+ break;
}
- PROFILE(ISOLATE, CodeCreateEvent(tag, code_object, description));
- }
-}
-
-
-void Logger::LogCodeInfo() {
- if (!log_->IsEnabled() || !FLAG_ll_prof) return;
-#if V8_TARGET_ARCH_IA32
- const char arch[] = "ia32";
-#elif V8_TARGET_ARCH_X64
- const char arch[] = "x64";
-#elif V8_TARGET_ARCH_ARM
- const char arch[] = "arm";
-#elif V8_TARGET_ARCH_MIPS
- const char arch[] = "mips";
-#else
- const char arch[] = "unknown";
-#endif
- LowLevelLogWriteBytes(arch, sizeof(arch));
-}
-
-
-void Logger::RegisterSnapshotCodeName(Code* code,
- const char* name,
- int name_size) {
- ASSERT(Serializer::enabled());
- if (address_to_name_map_ == NULL) {
- address_to_name_map_ = new NameMap;
+ case Code::COMPARE_IC: // fall through
+ case Code::COMPARE_NIL_IC: // fall through
+ case Code::TO_BOOLEAN_IC: // fall through
+ case Code::STUB:
+ description =
+ CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
+ if (description == NULL)
+ description = "A stub from the snapshot";
+ tag = Logger::STUB_TAG;
+ break;
+ case Code::REGEXP:
+ description = "Regular expression code";
+ tag = Logger::REG_EXP_TAG;
+ break;
+ case Code::BUILTIN:
+ description = "A builtin from the snapshot";
+ tag = Logger::BUILTIN_TAG;
+ break;
+ case Code::HANDLER:
+ description = "An IC handler from the snapshot";
+ tag = Logger::HANDLER_TAG;
+ break;
+ case Code::KEYED_LOAD_IC:
+ description = "A keyed load IC from the snapshot";
+ tag = Logger::KEYED_LOAD_IC_TAG;
+ break;
+ case Code::LOAD_IC:
+ description = "A load IC from the snapshot";
+ tag = Logger::LOAD_IC_TAG;
+ break;
+ case Code::STORE_IC:
+ description = "A store IC from the snapshot";
+ tag = Logger::STORE_IC_TAG;
+ break;
+ case Code::KEYED_STORE_IC:
+ description = "A keyed store IC from the snapshot";
+ tag = Logger::KEYED_STORE_IC_TAG;
+ break;
+ case Code::CALL_IC:
+ description = "A call IC from the snapshot";
+ tag = Logger::CALL_IC_TAG;
+ break;
+ case Code::KEYED_CALL_IC:
+ description = "A keyed call IC from the snapshot";
+ tag = Logger::KEYED_CALL_IC_TAG;
+ break;
+ case Code::NUMBER_OF_KINDS:
+ break;
}
- address_to_name_map_->Insert(code->address(), name, name_size);
-}
-
-
-void Logger::LowLevelCodeCreateEvent(Code* code,
- const char* name,
- int name_size) {
- if (log_->ll_output_handle_ == NULL) return;
- LowLevelCodeCreateStruct event;
- event.name_size = name_size;
- event.code_address = code->instruction_start();
- ASSERT(event.code_address == code->address() + Code::kHeaderSize);
- event.code_size = code->instruction_size();
- LowLevelLogWriteStruct(event);
- LowLevelLogWriteBytes(name, name_size);
- LowLevelLogWriteBytes(
- reinterpret_cast<const char*>(code->instruction_start()),
- code->instruction_size());
-}
-
-
-void Logger::LowLevelCodeMoveEvent(Address from, Address to) {
- if (log_->ll_output_handle_ == NULL) return;
- LowLevelCodeMoveStruct event;
- event.from_address = from + Code::kHeaderSize;
- event.to_address = to + Code::kHeaderSize;
- LowLevelLogWriteStruct(event);
-}
-
-
-void Logger::LowLevelCodeDeleteEvent(Address from) {
- if (log_->ll_output_handle_ == NULL) return;
- LowLevelCodeDeleteStruct event;
- event.address = from + Code::kHeaderSize;
- LowLevelLogWriteStruct(event);
-}
-
-
-void Logger::LowLevelSnapshotPositionEvent(Address addr, int pos) {
- if (log_->ll_output_handle_ == NULL) return;
- LowLevelSnapshotPositionStruct event;
- event.address = addr + Code::kHeaderSize;
- event.position = pos;
- LowLevelLogWriteStruct(event);
-}
-
-
-void Logger::LowLevelLogWriteBytes(const char* bytes, int size) {
- size_t rv = fwrite(bytes, 1, size, log_->ll_output_handle_);
- ASSERT(static_cast<size_t>(size) == rv);
- USE(rv);
+ PROFILE(isolate_, CodeCreateEvent(tag, code_object, description));
}
void Logger::LogCodeObjects() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ Heap* heap = isolate_->heap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"Logger::LogCodeObjects");
- HeapIterator iterator;
- AssertNoAllocation no_alloc;
+ HeapIterator iterator(heap);
+ DisallowHeapAllocation no_gc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsCode()) LogCodeObject(obj);
}
@@ -1612,27 +1687,30 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<String> func_name(shared->DebugName());
if (shared->script()->IsScript()) {
Handle<Script> script(Script::cast(shared->script()));
+ int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
+ int column_num =
+ GetScriptColumnNumber(script, shared->start_position()) + 1;
if (script->name()->IsString()) {
Handle<String> script_name(String::cast(script->name()));
- int line_num = GetScriptLineNumber(script, shared->start_position());
if (line_num > 0) {
- PROFILE(ISOLATE,
+ PROFILE(isolate_,
CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code, *shared,
- *script_name, line_num + 1));
+ *code, *shared, NULL,
+ *script_name, line_num, column_num));
} else {
// Can't distinguish eval and script here, so always use Script.
- PROFILE(ISOLATE,
+ PROFILE(isolate_,
CodeCreateEvent(
Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *code, *shared, *script_name));
+ *code, *shared, NULL, *script_name));
}
} else {
- PROFILE(ISOLATE,
+ PROFILE(isolate_,
CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code, *shared, *func_name));
+ *code, *shared, NULL,
+ isolate_->heap()->empty_string(), line_num, column_num));
}
} else if (shared->IsApiFunction()) {
// API function.
@@ -1642,29 +1720,30 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
Object* callback_obj = call_data->callback();
Address entry_point = v8::ToCData<Address>(callback_obj);
- PROFILE(ISOLATE, CallbackEvent(*func_name, entry_point));
+ PROFILE(isolate_, CallbackEvent(*func_name, entry_point));
}
} else {
- PROFILE(ISOLATE,
+ PROFILE(isolate_,
CodeCreateEvent(
- Logger::LAZY_COMPILE_TAG, *code, *shared, *func_name));
+ Logger::LAZY_COMPILE_TAG, *code, *shared, NULL, *func_name));
}
}
void Logger::LogCompiledFunctions() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ Heap* heap = isolate_->heap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"Logger::LogCompiledFunctions");
- HandleScope scope;
- const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
+ HandleScope scope(isolate_);
+ const int compiled_funcs_count = EnumerateCompiledFunctions(heap, NULL, NULL);
ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
ScopedVector< Handle<Code> > code_objects(compiled_funcs_count);
- EnumerateCompiledFunctions(sfis.start(), code_objects.start());
+ EnumerateCompiledFunctions(heap, sfis.start(), code_objects.start());
// During iteration, there can be heap allocation due to
// GetScriptLineNumber call.
for (int i = 0; i < compiled_funcs_count; ++i) {
- if (*code_objects[i] == Isolate::Current()->builtins()->builtin(
+ if (*code_objects[i] == isolate_->builtins()->builtin(
Builtins::kLazyCompile))
continue;
LogExistingFunction(sfis[i], code_objects[i]);
@@ -1673,28 +1752,85 @@ void Logger::LogCompiledFunctions() {
void Logger::LogAccessorCallbacks() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ Heap* heap = isolate_->heap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"Logger::LogAccessorCallbacks");
- HeapIterator iterator;
- AssertNoAllocation no_alloc;
+ HeapIterator iterator(heap);
+ DisallowHeapAllocation no_gc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (!obj->IsAccessorInfo()) continue;
- AccessorInfo* ai = AccessorInfo::cast(obj);
- if (!ai->name()->IsString()) continue;
- String* name = String::cast(ai->name());
+ if (!obj->IsExecutableAccessorInfo()) continue;
+ ExecutableAccessorInfo* ai = ExecutableAccessorInfo::cast(obj);
+ if (!ai->name()->IsName()) continue;
Address getter_entry = v8::ToCData<Address>(ai->getter());
+ Name* name = Name::cast(ai->name());
if (getter_entry != 0) {
- PROFILE(ISOLATE, GetterCallbackEvent(name, getter_entry));
+ PROFILE(isolate_, GetterCallbackEvent(name, getter_entry));
}
Address setter_entry = v8::ToCData<Address>(ai->setter());
if (setter_entry != 0) {
- PROFILE(ISOLATE, SetterCallbackEvent(name, setter_entry));
+ PROFILE(isolate_, SetterCallbackEvent(name, setter_entry));
}
}
}
-bool Logger::SetUp() {
+static void AddIsolateIdIfNeeded(Isolate* isolate, StringStream* stream) {
+ if (isolate->IsDefaultIsolate() || !FLAG_logfile_per_isolate) return;
+ stream->Add("isolate-%p-", isolate);
+}
+
+
+static SmartArrayPointer<const char> PrepareLogFileName(
+ Isolate* isolate, const char* file_name) {
+ if (strchr(file_name, '%') != NULL || !isolate->IsDefaultIsolate()) {
+ // If there's a '%' in the log file name we have to expand
+ // placeholders.
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ AddIsolateIdIfNeeded(isolate, &stream);
+ for (const char* p = file_name; *p; p++) {
+ if (*p == '%') {
+ p++;
+ switch (*p) {
+ case '\0':
+ // If there's a % at the end of the string we back up
+ // one character so we can escape the loop properly.
+ p--;
+ break;
+ case 'p':
+ stream.Add("%d", OS::GetCurrentProcessId());
+ break;
+ case 't': {
+ // %t expands to the current time in milliseconds.
+ double time = OS::TimeCurrentMillis();
+ stream.Add("%.0f", FmtElm(time));
+ break;
+ }
+ case '%':
+ // %% expands (contracts really) to %.
+ stream.Put('%');
+ break;
+ default:
+ // All other %'s expand to themselves.
+ stream.Put('%');
+ stream.Put(*p);
+ break;
+ }
+ } else {
+ stream.Put(*p);
+ }
+ }
+ return SmartArrayPointer<const char>(stream.ToCString());
+ }
+ int length = StrLength(file_name);
+ char* str = NewArray<char>(length + 1);
+ OS::MemCopy(str, file_name, length);
+ str[length] = '\0';
+ return SmartArrayPointer<const char>(str);
+}
+
+
+bool Logger::SetUp(Isolate* isolate) {
// Tests and EnsureInitialize() can call this twice in a row. It's harmless.
if (is_initialized_) return true;
is_initialized_ = true;
@@ -1704,59 +1840,49 @@ bool Logger::SetUp() {
FLAG_log_snapshot_positions = true;
}
- // --prof_lazy controls --log-code, implies --noprof_auto.
- if (FLAG_prof_lazy) {
- FLAG_log_code = false;
- FLAG_prof_auto = false;
- }
-
- // TODO(isolates): this assert introduces cyclic dependency (logger
- // -> thread local top -> heap -> logger).
- // ASSERT(VMState::is_outermost_external());
-
- log_->Initialize();
+ SmartArrayPointer<const char> log_file_name =
+ PrepareLogFileName(isolate, FLAG_logfile);
+ log_->Initialize(*log_file_name);
- if (FLAG_ll_prof) LogCodeInfo();
-
- Isolate* isolate = Isolate::Current();
- ticker_ = new Ticker(isolate, kSamplingIntervalMs);
-
- if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
- sliding_state_window_ = new SlidingStateWindow(isolate);
+ if (FLAG_ll_prof) {
+ ll_logger_ = new LowLevelLogger(*log_file_name);
+ addCodeEventListener(ll_logger_);
}
- bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
- || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
+ ticker_ = new Ticker(isolate, kSamplingIntervalMs);
- if (start_logging) {
- logging_nesting_ = 1;
+ if (Log::InitLogAtStart()) {
+ is_logging_ = true;
}
if (FLAG_prof) {
profiler_ = new Profiler(isolate);
- if (!FLAG_prof_auto) {
- profiler_->pause();
- } else {
- logging_nesting_ = 1;
- }
- if (!FLAG_prof_lazy) {
- profiler_->Engage();
- }
+ is_logging_ = true;
+ profiler_->Engage();
}
+ if (FLAG_log_internal_timer_events || FLAG_prof) timer_.Start();
+
return true;
}
void Logger::SetCodeEventHandler(uint32_t options,
JitCodeEventHandler event_handler) {
- code_event_handler_ = event_handler;
-
- if (code_event_handler_ != NULL && (options & kJitCodeEventEnumExisting)) {
- HandleScope scope;
- LogCodeObjects();
- LogCompiledFunctions();
+ if (jit_logger_) {
+ removeCodeEventListener(jit_logger_);
+ delete jit_logger_;
+ jit_logger_ = NULL;
+ }
+
+ if (event_handler) {
+ jit_logger_ = new JitLogger(event_handler);
+ addCodeEventListener(jit_logger_);
+ if (options & kJitCodeEventEnumExisting) {
+ HandleScope scope(isolate_);
+ LogCodeObjects();
+ LogCompiledFunctions();
+ }
}
}
@@ -1766,17 +1892,6 @@ Sampler* Logger::sampler() {
}
-void Logger::EnsureTickerStarted() {
- ASSERT(ticker_ != NULL);
- if (!ticker_->IsActive()) ticker_->Start();
-}
-
-
-void Logger::EnsureTickerStopped() {
- if (ticker_ != NULL && ticker_->IsActive()) ticker_->Stop();
-}
-
-
FILE* Logger::TearDown() {
if (!is_initialized_) return NULL;
is_initialized_ = false;
@@ -1788,90 +1903,22 @@ FILE* Logger::TearDown() {
profiler_ = NULL;
}
- delete sliding_state_window_;
- sliding_state_window_ = NULL;
-
delete ticker_;
ticker_ = NULL;
- return log_->Close();
-}
-
-
-void Logger::EnableSlidingStateWindow() {
- // If the ticker is NULL, Logger::SetUp has not been called yet. In
- // that case, we set the sliding_state_window flag so that the
- // sliding window computation will be started when Logger::SetUp is
- // called.
- if (ticker_ == NULL) {
- FLAG_sliding_state_window = true;
- return;
- }
- // Otherwise, if the sliding state window computation has not been
- // started we do it now.
- if (sliding_state_window_ == NULL) {
- sliding_state_window_ = new SlidingStateWindow(Isolate::Current());
+ if (ll_logger_) {
+ removeCodeEventListener(ll_logger_);
+ delete ll_logger_;
+ ll_logger_ = NULL;
}
-}
-
-// Protects the state below.
-static Mutex* active_samplers_mutex = NULL;
-
-List<Sampler*>* SamplerRegistry::active_samplers_ = NULL;
-
-
-void SamplerRegistry::SetUp() {
- if (!active_samplers_mutex) {
- active_samplers_mutex = OS::CreateMutex();
- }
-}
-
-bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) {
- ScopedLock lock(active_samplers_mutex);
- for (int i = 0;
- ActiveSamplersExist() && i < active_samplers_->length();
- ++i) {
- func(active_samplers_->at(i), param);
+ if (jit_logger_) {
+ removeCodeEventListener(jit_logger_);
+ delete jit_logger_;
+ jit_logger_ = NULL;
}
- return ActiveSamplersExist();
-}
-
-
-static void ComputeCpuProfiling(Sampler* sampler, void* flag_ptr) {
- bool* flag = reinterpret_cast<bool*>(flag_ptr);
- *flag |= sampler->IsProfiling();
-}
-
-SamplerRegistry::State SamplerRegistry::GetState() {
- bool flag = false;
- if (!IterateActiveSamplers(&ComputeCpuProfiling, &flag)) {
- return HAS_NO_SAMPLERS;
- }
- return flag ? HAS_CPU_PROFILING_SAMPLERS : HAS_SAMPLERS;
-}
-
-
-void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
- ASSERT(sampler->IsActive());
- ScopedLock lock(active_samplers_mutex);
- if (active_samplers_ == NULL) {
- active_samplers_ = new List<Sampler*>;
- } else {
- ASSERT(!active_samplers_->Contains(sampler));
- }
- active_samplers_->Add(sampler);
-}
-
-
-void SamplerRegistry::RemoveActiveSampler(Sampler* sampler) {
- ASSERT(sampler->IsActive());
- ScopedLock lock(active_samplers_mutex);
- ASSERT(active_samplers_ != NULL);
- bool removed = active_samplers_->RemoveElement(sampler);
- ASSERT(removed);
- USE(removed);
+ return log_->Close();
}
} } // namespace v8::internal
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 33f359a7f..c0efd6504 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -31,7 +31,7 @@
#include "allocation.h"
#include "objects.h"
#include "platform.h"
-#include "log-utils.h"
+#include "platform/elapsed-timer.h"
namespace v8 {
namespace internal {
@@ -71,11 +71,16 @@ namespace internal {
// tick profiler requires code events, so --prof implies --log-code.
// Forward declarations.
-class LogMessageBuilder;
+class CodeEventListener;
+class CompilationInfo;
+class CpuProfiler;
+class Isolate;
+class Log;
+class PositionsRecorder;
class Profiler;
class Semaphore;
-class SlidingStateWindow;
class Ticker;
+struct TickSample;
#undef LOG
#define LOG(isolate, Call) \
@@ -126,17 +131,20 @@ class Ticker;
V(CALLBACK_TAG, "Callback") \
V(EVAL_TAG, "Eval") \
V(FUNCTION_TAG, "Function") \
+ V(HANDLER_TAG, "Handler") \
V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
- V(KEYED_LOAD_MEGAMORPHIC_IC_TAG, "KeyedLoadMegamorphicIC") \
+ V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC") \
V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \
V(KEYED_STORE_IC_TAG, "KeyedStoreIC") \
- V(KEYED_STORE_MEGAMORPHIC_IC_TAG, "KeyedStoreMegamorphicIC") \
+ V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC") \
V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC") \
V(LAZY_COMPILE_TAG, "LazyCompile") \
V(LOAD_IC_TAG, "LoadIC") \
+ V(LOAD_POLYMORPHIC_IC_TAG, "LoadPolymorphicIC") \
V(REG_EXP_TAG, "RegExp") \
V(SCRIPT_TAG, "Script") \
V(STORE_IC_TAG, "StoreIC") \
+ V(STORE_POLYMORPHIC_IC_TAG, "StorePolymorphicIC") \
V(STUB_TAG, "Stub") \
V(NATIVE_FUNCTION_TAG, "Function") \
V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \
@@ -145,9 +153,10 @@ class Ticker;
// original tags when writing to the log.
+class JitLogger;
+class LowLevelLogger;
class Sampler;
-
class Logger {
public:
#define DECLARE_ENUM(enum_item, ignore) enum_item,
@@ -158,15 +167,12 @@ class Logger {
#undef DECLARE_ENUM
// Acquires resources for logging if the right flags are set.
- bool SetUp();
+ bool SetUp(Isolate* isolate);
// Sets the current code event handler.
void SetCodeEventHandler(uint32_t options,
JitCodeEventHandler event_handler);
- void EnsureTickerStarted();
- void EnsureTickerStopped();
-
Sampler* sampler();
// Frees resources acquired in SetUp.
@@ -174,9 +180,6 @@ class Logger {
// leaving the file open.
FILE* TearDown();
- // Enable the computation of a sliding window of states.
- void EnableSlidingStateWindow();
-
// Emits an event with a string value -> (name, value).
void StringEvent(const char* name, const char* value);
@@ -204,7 +207,7 @@ class Logger {
// Emits an event that an undefined property was read from an
// object.
- void SuspectReadEvent(String* name, Object* obj);
+ void SuspectReadEvent(Name* name, Object* obj);
// Emits an event when a message is put on or read from a debugging queue.
// DebugTag lets us put a call-site specific label on the event.
@@ -224,23 +227,30 @@ class Logger {
// ==== Events logged by --log-code. ====
+ void addCodeEventListener(CodeEventListener* listener);
+ void removeCodeEventListener(CodeEventListener* listener);
+ bool hasCodeEventListener(CodeEventListener* listener);
+
+
// Emits a code event for a callback function.
- void CallbackEvent(String* name, Address entry_point);
- void GetterCallbackEvent(String* name, Address entry_point);
- void SetterCallbackEvent(String* name, Address entry_point);
+ void CallbackEvent(Name* name, Address entry_point);
+ void GetterCallbackEvent(Name* name, Address entry_point);
+ void SetterCallbackEvent(Name* name, Address entry_point);
// Emits a code create event.
void CodeCreateEvent(LogEventsAndTags tag,
Code* code, const char* source);
void CodeCreateEvent(LogEventsAndTags tag,
- Code* code, String* name);
+ Code* code, Name* name);
void CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
- String* name);
+ CompilationInfo* info,
+ Name* name);
void CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
- String* source, int line);
+ CompilationInfo* info,
+ Name* source, int line, int column);
void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
void CodeMovingGCEvent();
// Emits a code create event for a RegExp.
@@ -249,9 +259,23 @@ class Logger {
void CodeMoveEvent(Address from, Address to);
// Emits a code delete event.
void CodeDeleteEvent(Address from);
+ // Emits a code line info add event with Postion type.
+ void CodeLinePosInfoAddPositionEvent(void* jit_handler_data,
+ int pc_offset,
+ int position);
+ // Emits a code line info add event with StatementPostion type.
+ void CodeLinePosInfoAddStatementPositionEvent(void* jit_handler_data,
+ int pc_offset,
+ int position);
+ // Emits a code line info start to record event
+ void CodeStartLinePosInfoRecordEvent(PositionsRecorder* pos_recorder);
+ // Emits a code line info finish record event.
+ // It's the callee's responsibility to dispose the parameter jit_handler_data.
+ void CodeEndLinePosInfoRecordEvent(Code* code, void* jit_handler_data);
void SharedFunctionInfoMoveEvent(Address from, Address to);
+ void CodeNameEvent(Address addr, int pos, const char* code_name);
void SnapshotPositionEvent(Address addr, int pos);
// ==== Events logged by --log-gc. ====
@@ -275,6 +299,40 @@ class Logger {
uintptr_t start,
uintptr_t end);
+ // ==== Events logged by --log-timer-events. ====
+ enum StartEnd { START, END };
+
+ void CodeDeoptEvent(Code* code);
+
+ void TimerEvent(StartEnd se, const char* name);
+
+ static void EnterExternal(Isolate* isolate);
+ static void LeaveExternal(Isolate* isolate);
+
+ class TimerEventScope {
+ public:
+ TimerEventScope(Isolate* isolate, const char* name)
+ : isolate_(isolate), name_(name) {
+ if (FLAG_log_internal_timer_events) LogTimerEvent(START);
+ }
+
+ ~TimerEventScope() {
+ if (FLAG_log_internal_timer_events) LogTimerEvent(END);
+ }
+
+ void LogTimerEvent(StartEnd se);
+
+ static const char* v8_recompile_synchronous;
+ static const char* v8_recompile_concurrent;
+ static const char* v8_compile_full_code;
+ static const char* v8_execute;
+ static const char* v8_external;
+
+ private:
+ Isolate* isolate_;
+ const char* name_;
+ };
+
// ==== Events logged by --log-regexp ====
// Regexp compilation and execution events.
@@ -284,19 +342,16 @@ class Logger {
void LogRuntime(Vector<const char> format, JSArray* args);
bool is_logging() {
- return logging_nesting_ > 0;
+ return is_logging_;
}
bool is_logging_code_events() {
- return is_logging() || code_event_handler_ != NULL;
+ return is_logging() || jit_logger_ != NULL;
}
- // Pause/Resume collection of profiling data.
- // When data collection is paused, CPU Tick events are discarded until
- // data collection is Resumed.
- void PauseProfiler();
- void ResumeProfiler();
- bool IsProfilerPaused();
+ // Stop collection of profiling data.
+ // When data collection is paused, CPU Tick events are discarded.
+ void StopProfiler();
void LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<Code> code);
@@ -323,60 +378,28 @@ class Logger {
void LogFailure();
private:
- class NameBuffer;
- class NameMap;
-
- Logger();
+ explicit Logger(Isolate* isolate);
~Logger();
- // Issue code notifications.
- void IssueCodeAddedEvent(Code* code, const char* name, size_t name_len);
- void IssueCodeMovedEvent(Address from, Address to);
- void IssueCodeRemovedEvent(Address from);
-
// Emits the profiler's first message.
void ProfilerBeginEvent();
// Emits callback event messages.
void CallbackEventInternal(const char* prefix,
- const char* name,
+ Name* name,
Address entry_point);
// Internal configurable move event.
void MoveEventInternal(LogEventsAndTags event, Address from, Address to);
- // Internal configurable move event.
- void DeleteEventInternal(LogEventsAndTags event, Address from);
-
// Emits the source code of a regexp. Used by regexp events.
void LogRegExpSource(Handle<JSRegExp> regexp);
// Used for logging stubs found in the snapshot.
void LogCodeObject(Object* code_object);
- // Emits general information about generated code.
- void LogCodeInfo();
-
- void RegisterSnapshotCodeName(Code* code, const char* name, int name_size);
-
- // Low-level logging support.
-
- void LowLevelCodeCreateEvent(Code* code, const char* name, int name_size);
-
- void LowLevelCodeMoveEvent(Address from, Address to);
-
- void LowLevelCodeDeleteEvent(Address from);
-
- void LowLevelSnapshotPositionEvent(Address addr, int pos);
-
- void LowLevelLogWriteBytes(const char* bytes, int size);
-
- template <typename T>
- void LowLevelLogWriteStruct(const T& s) {
- char tag = T::kTag;
- LowLevelLogWriteBytes(reinterpret_cast<const char*>(&tag), sizeof(tag));
- LowLevelLogWriteBytes(reinterpret_cast<const char*>(&s), sizeof(s));
- }
+ // Helper method. It resets name_buffer_ and add tag name into it.
+ void InitNameBuffer(LogEventsAndTags tag);
// Emits a profiler tick event. Used by the profiler thread.
void TickEvent(TickSample* sample, bool overflow);
@@ -390,8 +413,7 @@ class Logger {
void UncheckedIntEvent(const char* name, int value);
void UncheckedIntPtrTEvent(const char* name, intptr_t value);
- // Returns whether profiler's sampler is active.
- bool IsProfilerSamplerActive();
+ Isolate* isolate_;
// The sampler used by the profiler and the sliding state window.
Ticker* ticker_;
@@ -401,10 +423,6 @@ class Logger {
// of samples.
Profiler* profiler_;
- // SlidingStateWindow instance keeping a sliding window of the most
- // recent VM states.
- SlidingStateWindow* sliding_state_window_;
-
// An array of log events names.
const char* const* log_events_;
@@ -412,87 +430,107 @@ class Logger {
// private members.
friend class EventLog;
friend class Isolate;
- friend class LogMessageBuilder;
friend class TimeLog;
friend class Profiler;
- friend class SlidingStateWindow;
- friend class StackTracer;
- friend class VMState;
-
+ template <StateTag Tag> friend class VMState;
friend class LoggerTestHelper;
-
- int logging_nesting_;
- int cpu_profiler_nesting_;
-
+ bool is_logging_;
Log* log_;
-
- NameBuffer* name_buffer_;
-
- NameMap* address_to_name_map_;
+ LowLevelLogger* ll_logger_;
+ JitLogger* jit_logger_;
+ List<CodeEventListener*> listeners_;
// Guards against multiple calls to TearDown() that can happen in some tests.
// 'true' between SetUp() and TearDown().
bool is_initialized_;
- // The code event handler - if any.
- JitCodeEventHandler code_event_handler_;
-
- // Support for 'incremental addresses' in compressed logs:
- // LogMessageBuilder::AppendAddress(Address addr)
- Address last_address_;
- // Logger::TickEvent(...)
- Address prev_sp_;
- Address prev_function_;
- // Logger::MoveEventInternal(...)
- Address prev_to_;
- // Logger::FunctionCreateEvent(...)
- Address prev_code_;
+ ElapsedTimer timer_;
friend class CpuProfiler;
};
-// Process wide registry of samplers.
-class SamplerRegistry : public AllStatic {
+class CodeEventListener {
public:
- enum State {
- HAS_NO_SAMPLERS,
- HAS_SAMPLERS,
- HAS_CPU_PROFILING_SAMPLERS
- };
-
- static void SetUp();
-
- typedef void (*VisitSampler)(Sampler*, void*);
-
- static State GetState();
+ virtual ~CodeEventListener() {}
+
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ const char* comment) = 0;
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ Name* name) = 0;
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ CompilationInfo* info,
+ Name* name) = 0;
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ CompilationInfo* info,
+ Name* source,
+ int line, int column) = 0;
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ int args_count) = 0;
+ virtual void CallbackEvent(Name* name, Address entry_point) = 0;
+ virtual void GetterCallbackEvent(Name* name, Address entry_point) = 0;
+ virtual void SetterCallbackEvent(Name* name, Address entry_point) = 0;
+ virtual void RegExpCodeCreateEvent(Code* code, String* source) = 0;
+ virtual void CodeMoveEvent(Address from, Address to) = 0;
+ virtual void CodeDeleteEvent(Address from) = 0;
+ virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
+ virtual void CodeMovingGCEvent() = 0;
+};
- // Iterates over all active samplers keeping the internal lock held.
- // Returns whether there are any active samplers.
- static bool IterateActiveSamplers(VisitSampler func, void* param);
- // Adds/Removes an active sampler.
- static void AddActiveSampler(Sampler* sampler);
- static void RemoveActiveSampler(Sampler* sampler);
+class CodeEventLogger : public CodeEventListener {
+ public:
+ CodeEventLogger();
+ virtual ~CodeEventLogger();
+
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ const char* comment);
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ Name* name);
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ int args_count);
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ CompilationInfo* info,
+ Name* name);
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ CompilationInfo* info,
+ Name* source,
+ int line, int column);
+ virtual void RegExpCodeCreateEvent(Code* code, String* source);
+
+ virtual void CallbackEvent(Name* name, Address entry_point) { }
+ virtual void GetterCallbackEvent(Name* name, Address entry_point) { }
+ virtual void SetterCallbackEvent(Name* name, Address entry_point) { }
+ virtual void SharedFunctionInfoMoveEvent(Address from, Address to) { }
+ virtual void CodeMovingGCEvent() { }
private:
- static bool ActiveSamplersExist() {
- return active_samplers_ != NULL && !active_samplers_->is_empty();
- }
+ class NameBuffer;
- static List<Sampler*>* active_samplers_;
+ virtual void LogRecordedBuffer(Code* code,
+ SharedFunctionInfo* shared,
+ const char* name,
+ int length) = 0;
- DISALLOW_IMPLICIT_CONSTRUCTORS(SamplerRegistry);
+ NameBuffer* name_buffer_;
};
-// Class that extracts stack trace, used for profiling.
-class StackTracer : public AllStatic {
- public:
- static void Trace(Isolate* isolate, TickSample* sample);
-};
-
} } // namespace v8::internal
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 11e2217e0..9fdf2ee7d 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -36,6 +36,27 @@ enum InvokeFlag {
};
+// Flags used for the AllocateInNewSpace functions.
+enum AllocationFlags {
+ // No special flags.
+ NO_ALLOCATION_FLAGS = 0,
+ // Return the pointer to the allocated already tagged as a heap object.
+ TAG_OBJECT = 1 << 0,
+ // The content of the result register already contains the allocation top in
+ // new space.
+ RESULT_CONTAINS_TOP = 1 << 1,
+ // Specify that the requested size of the space to allocate is specified in
+ // words instead of bytes.
+ SIZE_IN_WORDS = 1 << 2,
+ // Align the allocation to a multiple of kDoubleSize
+ DOUBLE_ALIGNMENT = 1 << 3,
+ // Directly allocate in old pointer space
+ PRETENURE_OLD_POINTER_SPACE = 1 << 4,
+ // Directly allocate in old data space
+ PRETENURE_OLD_DATA_SPACE = 1 << 5
+};
+
+
// Invalid depth in prototype chain.
const int kInvalidProtoDepth = -1;
@@ -151,6 +172,35 @@ class Comment {
#endif // DEBUG
+
+class AllocationUtils {
+ public:
+ static ExternalReference GetAllocationTopReference(
+ Isolate* isolate, AllocationFlags flags) {
+ if ((flags & PRETENURE_OLD_POINTER_SPACE) != 0) {
+ return ExternalReference::old_pointer_space_allocation_top_address(
+ isolate);
+ } else if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ return ExternalReference::old_data_space_allocation_top_address(isolate);
+ }
+ return ExternalReference::new_space_allocation_top_address(isolate);
+ }
+
+
+ static ExternalReference GetAllocationLimitReference(
+ Isolate* isolate, AllocationFlags flags) {
+ if ((flags & PRETENURE_OLD_POINTER_SPACE) != 0) {
+ return ExternalReference::old_pointer_space_allocation_limit_address(
+ isolate);
+ } else if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ return ExternalReference::old_data_space_allocation_limit_address(
+ isolate);
+ }
+ return ExternalReference::new_space_allocation_limit_address(isolate);
+ }
+};
+
+
} } // namespace v8::internal
#endif // V8_MACRO_ASSEMBLER_H_
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 08fa82e68..1785d44a8 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -32,6 +32,8 @@ const NONE = 0;
const READ_ONLY = 1;
const DONT_ENUM = 2;
const DONT_DELETE = 4;
+const NEW_ONE_BYTE_STRING = true;
+const NEW_TWO_BYTE_STRING = false;
# Constants used for getter and setter operations.
const GETTER = 0;
@@ -40,8 +42,8 @@ const SETTER = 1;
# These definitions must match the index of the properties in objects.h.
const kApiTagOffset = 0;
const kApiPropertyListOffset = 1;
-const kApiSerialNumberOffset = 2;
-const kApiConstructorOffset = 2;
+const kApiSerialNumberOffset = 3;
+const kApiConstructorOffset = 3;
const kApiPrototypeTemplateOffset = 5;
const kApiParentTemplateOffset = 6;
const kApiFlagOffset = 14;
@@ -65,7 +67,9 @@ const msPerMonth = 2592000000;
# For apinatives.js
const kUninitialized = -1;
-const kReadOnlyPrototypeBit = 3; # For FunctionTemplateInfo, matches objects.h
+const kReadOnlyPrototypeBit = 3;
+const kRemovePrototypeBit = 4; # For FunctionTemplateInfo, matches objects.h
+const kDoNotCacheBit = 5; # For FunctionTemplateInfo, matches objects.h
# Note: kDayZeroInJulianDay = ToJulianDay(1970, 0, 1).
const kInvalidDate = 'Invalid Date';
@@ -97,6 +101,7 @@ macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined');
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
+macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
macro IS_OBJECT(arg) = (%_IsObject(arg));
macro IS_ARRAY(arg) = (%_IsArray(arg));
macro IS_FUNCTION(arg) = (%_IsFunction(arg));
@@ -104,14 +109,19 @@ macro IS_REGEXP(arg) = (%_IsRegExp(arg));
macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map');
macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
+macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet');
macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
+macro IS_SYMBOL_WRAPPER(arg) = (%_ClassOf(arg) === 'Symbol');
macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean');
macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
+macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
+macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView');
+macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
macro FLOOR(arg) = $floor(arg);
@@ -138,6 +148,7 @@ const kBoundArgumentsStartIndex = 2;
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
+macro TO_INTEGER_FOR_SIDE_EFFECT(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToNumber(arg));
macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
macro TO_UINT32(arg) = (arg >>> 0);
@@ -146,6 +157,11 @@ macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber
macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg));
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
+# Constants. The compiler constant folds them.
+const NAN = $NaN;
+const INFINITY = (1/0);
+const UNDEFINED = (void 0);
+
# Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]);
diff --git a/deps/v8/src/mark-compact-inl.h b/deps/v8/src/mark-compact-inl.h
index 10773e720..321309c60 100644
--- a/deps/v8/src/mark-compact-inl.h
+++ b/deps/v8/src/mark-compact-inl.h
@@ -58,7 +58,7 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
mark_bit.Set();
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
ASSERT(IsMarked(obj));
- ASSERT(HEAP->Contains(obj));
+ ASSERT(obj->GetIsolate()->heap()->Contains(obj));
marking_deque_.PushBlack(obj);
}
}
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 24730c6c0..b75ddb382 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -29,6 +29,7 @@
#include "code-stubs.h"
#include "compilation-cache.h"
+#include "cpu-profiler.h"
#include "deoptimizer.h"
#include "execution.h"
#include "gdb-jit.h"
@@ -36,11 +37,11 @@
#include "heap-profiler.h"
#include "ic-inl.h"
#include "incremental-marking.h"
-#include "liveobjectlist-inl.h"
#include "mark-compact.h"
#include "objects-visiting.h"
#include "objects-visiting-inl.h"
#include "stub-cache.h"
+#include "sweeper-thread.h"
namespace v8 {
namespace internal {
@@ -62,31 +63,47 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
sweep_precisely_(false),
reduce_memory_footprint_(false),
abort_incremental_marking_(false),
+ marking_parity_(ODD_MARKING_PARITY),
compacting_(false),
was_marked_incrementally_(false),
+ sweeping_pending_(false),
+ sequential_sweeping_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
heap_(NULL),
code_flusher_(NULL),
- encountered_weak_maps_(NULL) { }
-
+ encountered_weak_collections_(NULL),
+ have_code_to_deoptimize_(false) { }
#ifdef VERIFY_HEAP
class VerifyMarkingVisitor: public ObjectVisitor {
public:
+ explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
+
void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- CHECK(HEAP->mark_compact_collector()->IsMarked(object));
+ CHECK(heap_->mark_compact_collector()->IsMarked(object));
}
}
}
+
+ void VisitEmbeddedPointer(RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(),
+ rinfo->target_object())) {
+ VisitPointer(rinfo->target_object_address());
+ }
+ }
+
+ private:
+ Heap* heap_;
};
-static void VerifyMarking(Address bottom, Address top) {
- VerifyMarkingVisitor visitor;
+static void VerifyMarking(Heap* heap, Address bottom, Address top) {
+ VerifyMarkingVisitor visitor(heap);
HeapObject* object;
Address next_object_must_be_here_or_later = bottom;
@@ -114,7 +131,7 @@ static void VerifyMarking(NewSpace* space) {
NewSpacePage* page = it.next();
Address limit = it.has_next() ? page->area_end() : end;
CHECK(limit == end || !page->Contains(end));
- VerifyMarking(page->area_start(), limit);
+ VerifyMarking(space->heap(), page->area_start(), limit);
}
}
@@ -124,7 +141,7 @@ static void VerifyMarking(PagedSpace* space) {
while (it.has_next()) {
Page* p = it.next();
- VerifyMarking(p->area_start(), p->area_end());
+ VerifyMarking(space->heap(), p->area_start(), p->area_end());
}
}
@@ -134,10 +151,11 @@ static void VerifyMarking(Heap* heap) {
VerifyMarking(heap->old_data_space());
VerifyMarking(heap->code_space());
VerifyMarking(heap->cell_space());
+ VerifyMarking(heap->property_cell_space());
VerifyMarking(heap->map_space());
VerifyMarking(heap->new_space());
- VerifyMarkingVisitor visitor;
+ VerifyMarkingVisitor visitor(heap);
LargeObjectIterator it(heap->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
@@ -215,6 +233,7 @@ static void VerifyEvacuation(Heap* heap) {
VerifyEvacuation(heap->old_data_space());
VerifyEvacuation(heap->code_space());
VerifyEvacuation(heap->cell_space());
+ VerifyEvacuation(heap->property_cell_space());
VerifyEvacuation(heap->map_space());
VerifyEvacuation(heap->new_space());
@@ -269,13 +288,14 @@ class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
array->set_length(length);
}
break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ case CELL_TYPE:
case JS_PROXY_TYPE:
case JS_VALUE_TYPE:
case TYPE_FEEDBACK_INFO_TYPE:
object->Iterate(this);
break;
- case ACCESSOR_INFO_TYPE:
+ case DECLARED_ACCESSOR_INFO_TYPE:
+ case EXECUTABLE_ACCESSOR_INFO_TYPE:
case BYTE_ARRAY_TYPE:
case CALL_HANDLER_INFO_TYPE:
case CODE_TYPE:
@@ -319,6 +339,11 @@ static void VerifyNativeContextSeparation(Heap* heap) {
#endif
+void MarkCompactCollector::TearDown() {
+ AbortCompaction();
+}
+
+
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
p->MarkEvacuationCandidate();
evacuation_candidates_.Add(p);
@@ -360,6 +385,7 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->map_space());
TraceFragmentation(heap()->cell_space());
+ TraceFragmentation(heap()->property_cell_space());
}
heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
@@ -377,14 +403,16 @@ void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
ASSERT(state_ == PREPARE_GC);
- ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
+ ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
+
+ heap()->allocation_mementos_found_ = 0;
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
- if (FLAG_collect_maps) ClearNonLiveTransitions();
+ if (FLAG_collect_maps) ClearNonLiveReferences();
- ClearWeakMaps();
+ ClearWeakCollections();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -402,8 +430,29 @@ void MarkCompactCollector::CollectGarbage() {
}
#endif
+#ifdef VERIFY_HEAP
+ if (heap()->weak_embedded_objects_verification_enabled()) {
+ VerifyWeakEmbeddedObjectsInOptimizedCode();
+ }
+ if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
+ VerifyOmittedMapChecks();
+ }
+#endif
+
Finish();
+ if (marking_parity_ == EVEN_MARKING_PARITY) {
+ marking_parity_ = ODD_MARKING_PARITY;
+ } else {
+ ASSERT(marking_parity_ == ODD_MARKING_PARITY);
+ marking_parity_ = EVEN_MARKING_PARITY;
+ }
+
+ if (FLAG_trace_track_allocation_sites &&
+ heap()->allocation_mementos_found_ > 0) {
+ PrintF("AllocationMementos found during mark-sweep = %d\n",
+ heap()->allocation_mementos_found_);
+ }
tracer_ = NULL;
}
@@ -436,6 +485,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->old_data_space());
VerifyMarkbitsAreClean(heap_->code_space());
VerifyMarkbitsAreClean(heap_->cell_space());
+ VerifyMarkbitsAreClean(heap_->property_cell_space());
VerifyMarkbitsAreClean(heap_->map_space());
VerifyMarkbitsAreClean(heap_->new_space());
@@ -446,6 +496,30 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
}
}
+
+
+void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() {
+ HeapObjectIterator code_iterator(heap()->code_space());
+ for (HeapObject* obj = code_iterator.Next();
+ obj != NULL;
+ obj = code_iterator.Next()) {
+ Code* code = Code::cast(obj);
+ if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
+ if (WillBeDeoptimized(code)) continue;
+ code->VerifyEmbeddedObjectsDependency();
+ }
+}
+
+
+void MarkCompactCollector::VerifyOmittedMapChecks() {
+ HeapObjectIterator iterator(heap()->map_space());
+ for (HeapObject* obj = iterator.Next();
+ obj != NULL;
+ obj = iterator.Next()) {
+ Map* map = Map::cast(obj);
+ map->VerifyOmittedMapChecks();
+ }
+}
#endif // VERIFY_HEAP
@@ -473,6 +547,7 @@ void MarkCompactCollector::ClearMarkbits() {
ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
ClearMarkbitsInPagedSpace(heap_->old_data_space());
ClearMarkbitsInPagedSpace(heap_->cell_space());
+ ClearMarkbitsInPagedSpace(heap_->property_cell_space());
ClearMarkbitsInNewSpace(heap_->new_space());
LargeObjectIterator it(heap_->lo_space());
@@ -480,11 +555,55 @@ void MarkCompactCollector::ClearMarkbits() {
MarkBit mark_bit = Marking::MarkBitFrom(obj);
mark_bit.Clear();
mark_bit.Next().Clear();
+ Page::FromAddress(obj->address())->ResetProgressBar();
Page::FromAddress(obj->address())->ResetLiveBytes();
}
}
+void MarkCompactCollector::StartSweeperThreads() {
+ sweeping_pending_ = true;
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ isolate()->sweeper_threads()[i]->StartSweeping();
+ }
+}
+
+
+void MarkCompactCollector::WaitUntilSweepingCompleted() {
+ ASSERT(sweeping_pending_ == true);
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ isolate()->sweeper_threads()[i]->WaitForSweeperThread();
+ }
+ sweeping_pending_ = false;
+ StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
+ StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
+ heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
+ heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
+}
+
+
+intptr_t MarkCompactCollector::
+ StealMemoryFromSweeperThreads(PagedSpace* space) {
+ intptr_t freed_bytes = 0;
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space);
+ }
+ space->AddToAccountingStats(freed_bytes);
+ space->DecrementUnsweptFreeBytes(freed_bytes);
+ return freed_bytes;
+}
+
+
+bool MarkCompactCollector::AreSweeperThreadsActivated() {
+ return isolate()->sweeper_threads() != NULL;
+}
+
+
+bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
+ return sweeping_pending_;
+}
+
+
bool Marking::TransferMark(Address old_start, Address new_start) {
// This is only used when resizing an object.
ASSERT(MemoryChunk::FromAddress(old_start) ==
@@ -534,6 +653,8 @@ const char* AllocationSpaceName(AllocationSpace space) {
case CODE_SPACE: return "CODE_SPACE";
case MAP_SPACE: return "MAP_SPACE";
case CELL_SPACE: return "CELL_SPACE";
+ case PROPERTY_CELL_SPACE:
+ return "PROPERTY_CELL_SPACE";
case LO_SPACE: return "LO_SPACE";
default:
UNREACHABLE();
@@ -558,8 +679,8 @@ static int FreeListFragmentation(PagedSpace* space, Page* p) {
return 0;
}
- FreeList::SizeStats sizes;
- space->CountFreeListItems(p, &sizes);
+ PagedSpace::SizeStats sizes;
+ space->ObtainFreeListStatistics(p, &sizes);
intptr_t ratio;
intptr_t ratio_threshold;
@@ -659,10 +780,12 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
}
if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
- PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d)\n",
+ PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
+ "evacuation candidate limit: %d\n",
static_cast<double>(over_reserved) / MB,
static_cast<double>(reserved) / MB,
- static_cast<int>(kFreenessThreshold));
+ static_cast<int>(kFreenessThreshold),
+ max_evacuation_candidates);
}
intptr_t estimated_release = 0;
@@ -689,7 +812,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
if ((counter & 1) == (page_number & 1)) fragmentation = 1;
} else if (mode == REDUCE_MEMORY_FOOTPRINT) {
// Don't try to release too many pages.
- if (estimated_release >= ((over_reserved * 3) / 4)) {
+ if (estimated_release >= over_reserved) {
continue;
}
@@ -698,15 +821,15 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
if (!p->WasSwept()) {
free_bytes = (p->area_size() - p->LiveBytes());
} else {
- FreeList::SizeStats sizes;
- space->CountFreeListItems(p, &sizes);
+ PagedSpace::SizeStats sizes;
+ space->ObtainFreeListStatistics(p, &sizes);
free_bytes = sizes.Total();
}
int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
if (free_pct >= kFreenessThreshold) {
- estimated_release += 2 * p->area_size() - free_bytes;
+ estimated_release += free_bytes;
fragmentation = free_pct;
} else {
fragmentation = 0;
@@ -787,6 +910,11 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
ASSERT(!FLAG_never_compact || !FLAG_always_compact);
+ if (IsConcurrentSweepingInProgress()) {
+ // Instead of waiting we could also abort the sweeper threads here.
+ WaitUntilSweepingCompleted();
+ }
+
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && abort_incremental_marking_) {
heap()->incremental_marking()->Abort();
@@ -801,7 +929,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
StartCompaction(NON_INCREMENTAL_COMPACTION);
}
- PagedSpaces spaces;
+ PagedSpaces spaces(heap());
for (PagedSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
@@ -825,9 +953,13 @@ void MarkCompactCollector::Finish() {
// force lazy re-initialization of it. This must be done after the
// GC, because it relies on the new address of certain old space
// objects (empty string, illegal builtin).
- heap()->isolate()->stub_cache()->Clear();
+ isolate()->stub_cache()->Clear();
- heap()->external_string_table_.CleanUp();
+ if (have_code_to_deoptimize_) {
+ // Some code objects were marked for deoptimization during the GC.
+ Deoptimizer::DeoptimizeMarkedCode(isolate());
+ have_code_to_deoptimize_ = false;
+ }
}
@@ -874,10 +1006,15 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
Code* code = shared->code();
MarkBit code_mark = Marking::MarkBitFrom(code);
if (!code_mark.Get()) {
+ if (FLAG_trace_code_flushing && shared->is_compiled()) {
+ PrintF("[code-flushing clears: ");
+ shared->ShortPrint();
+ PrintF(" - age: %d]\n", code->GetAge());
+ }
shared->set_code(lazy_compile);
candidate->set_code(lazy_compile);
- } else if (code == lazy_compile) {
- candidate->set_code(lazy_compile);
+ } else {
+ candidate->set_code(code);
}
// We are in the middle of a GC cycle so the write barrier in the code
@@ -911,6 +1048,11 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
Code* code = candidate->code();
MarkBit code_mark = Marking::MarkBitFrom(code);
if (!code_mark.Get()) {
+ if (FLAG_trace_code_flushing && candidate->is_compiled()) {
+ PrintF("[code-flushing clears: ");
+ candidate->ShortPrint();
+ PrintF(" - age: %d]\n", code->GetAge());
+ }
candidate->set_code(lazy_compile);
}
@@ -926,6 +1068,227 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
}
+void CodeFlusher::ProcessOptimizedCodeMaps() {
+ static const int kEntriesStart = SharedFunctionInfo::kEntriesStart;
+ static const int kEntryLength = SharedFunctionInfo::kEntryLength;
+ static const int kContextOffset = 0;
+ static const int kCodeOffset = 1;
+ static const int kLiteralsOffset = 2;
+ STATIC_ASSERT(kEntryLength == 3);
+
+ SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+ SharedFunctionInfo* next_holder;
+ while (holder != NULL) {
+ next_holder = GetNextCodeMap(holder);
+ ClearNextCodeMap(holder);
+
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+ int new_length = kEntriesStart;
+ int old_length = code_map->length();
+ for (int i = kEntriesStart; i < old_length; i += kEntryLength) {
+ Code* code = Code::cast(code_map->get(i + kCodeOffset));
+ MarkBit code_mark = Marking::MarkBitFrom(code);
+ if (!code_mark.Get()) {
+ continue;
+ }
+
+ // Update and record the context slot in the optimized code map.
+ Object** context_slot = HeapObject::RawField(code_map,
+ FixedArray::OffsetOfElementAt(new_length));
+ code_map->set(new_length++, code_map->get(i + kContextOffset));
+ ASSERT(Marking::IsBlack(
+ Marking::MarkBitFrom(HeapObject::cast(*context_slot))));
+ isolate_->heap()->mark_compact_collector()->
+ RecordSlot(context_slot, context_slot, *context_slot);
+
+ // Update and record the code slot in the optimized code map.
+ Object** code_slot = HeapObject::RawField(code_map,
+ FixedArray::OffsetOfElementAt(new_length));
+ code_map->set(new_length++, code_map->get(i + kCodeOffset));
+ ASSERT(Marking::IsBlack(
+ Marking::MarkBitFrom(HeapObject::cast(*code_slot))));
+ isolate_->heap()->mark_compact_collector()->
+ RecordSlot(code_slot, code_slot, *code_slot);
+
+ // Update and record the literals slot in the optimized code map.
+ Object** literals_slot = HeapObject::RawField(code_map,
+ FixedArray::OffsetOfElementAt(new_length));
+ code_map->set(new_length++, code_map->get(i + kLiteralsOffset));
+ ASSERT(Marking::IsBlack(
+ Marking::MarkBitFrom(HeapObject::cast(*literals_slot))));
+ isolate_->heap()->mark_compact_collector()->
+ RecordSlot(literals_slot, literals_slot, *literals_slot);
+ }
+
+ // Trim the optimized code map if entries have been removed.
+ if (new_length < old_length) {
+ holder->TrimOptimizedCodeMap(old_length - new_length);
+ }
+
+ holder = next_holder;
+ }
+
+ optimized_code_map_holder_head_ = NULL;
+}
+
+
+void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
+ // Make sure previous flushing decisions are revisited.
+ isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
+
+ if (FLAG_trace_code_flushing) {
+ PrintF("[code-flushing abandons function-info: ");
+ shared_info->ShortPrint();
+ PrintF("]\n");
+ }
+
+ SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+ SharedFunctionInfo* next_candidate;
+ if (candidate == shared_info) {
+ next_candidate = GetNextCandidate(shared_info);
+ shared_function_info_candidates_head_ = next_candidate;
+ ClearNextCandidate(shared_info);
+ } else {
+ while (candidate != NULL) {
+ next_candidate = GetNextCandidate(candidate);
+
+ if (next_candidate == shared_info) {
+ next_candidate = GetNextCandidate(shared_info);
+ SetNextCandidate(candidate, next_candidate);
+ ClearNextCandidate(shared_info);
+ break;
+ }
+
+ candidate = next_candidate;
+ }
+ }
+}
+
+
+void CodeFlusher::EvictCandidate(JSFunction* function) {
+ ASSERT(!function->next_function_link()->IsUndefined());
+ Object* undefined = isolate_->heap()->undefined_value();
+
+ // Make sure previous flushing decisions are revisited.
+ isolate_->heap()->incremental_marking()->RecordWrites(function);
+ isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
+
+ if (FLAG_trace_code_flushing) {
+ PrintF("[code-flushing abandons closure: ");
+ function->shared()->ShortPrint();
+ PrintF("]\n");
+ }
+
+ JSFunction* candidate = jsfunction_candidates_head_;
+ JSFunction* next_candidate;
+ if (candidate == function) {
+ next_candidate = GetNextCandidate(function);
+ jsfunction_candidates_head_ = next_candidate;
+ ClearNextCandidate(function, undefined);
+ } else {
+ while (candidate != NULL) {
+ next_candidate = GetNextCandidate(candidate);
+
+ if (next_candidate == function) {
+ next_candidate = GetNextCandidate(function);
+ SetNextCandidate(candidate, next_candidate);
+ ClearNextCandidate(function, undefined);
+ break;
+ }
+
+ candidate = next_candidate;
+ }
+ }
+}
+
+
+void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
+ ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())->
+ get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
+
+ // Make sure previous flushing decisions are revisited.
+ isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
+
+ if (FLAG_trace_code_flushing) {
+ PrintF("[code-flushing abandons code-map: ");
+ code_map_holder->ShortPrint();
+ PrintF("]\n");
+ }
+
+ SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+ SharedFunctionInfo* next_holder;
+ if (holder == code_map_holder) {
+ next_holder = GetNextCodeMap(code_map_holder);
+ optimized_code_map_holder_head_ = next_holder;
+ ClearNextCodeMap(code_map_holder);
+ } else {
+ while (holder != NULL) {
+ next_holder = GetNextCodeMap(holder);
+
+ if (next_holder == code_map_holder) {
+ next_holder = GetNextCodeMap(code_map_holder);
+ SetNextCodeMap(holder, next_holder);
+ ClearNextCodeMap(code_map_holder);
+ break;
+ }
+
+ holder = next_holder;
+ }
+ }
+}
+
+
+void CodeFlusher::EvictJSFunctionCandidates() {
+ JSFunction* candidate = jsfunction_candidates_head_;
+ JSFunction* next_candidate;
+ while (candidate != NULL) {
+ next_candidate = GetNextCandidate(candidate);
+ EvictCandidate(candidate);
+ candidate = next_candidate;
+ }
+ ASSERT(jsfunction_candidates_head_ == NULL);
+}
+
+
+void CodeFlusher::EvictSharedFunctionInfoCandidates() {
+ SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+ SharedFunctionInfo* next_candidate;
+ while (candidate != NULL) {
+ next_candidate = GetNextCandidate(candidate);
+ EvictCandidate(candidate);
+ candidate = next_candidate;
+ }
+ ASSERT(shared_function_info_candidates_head_ == NULL);
+}
+
+
+void CodeFlusher::EvictOptimizedCodeMaps() {
+ SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+ SharedFunctionInfo* next_holder;
+ while (holder != NULL) {
+ next_holder = GetNextCodeMap(holder);
+ EvictOptimizedCodeMap(holder);
+ holder = next_holder;
+ }
+ ASSERT(optimized_code_map_holder_head_ == NULL);
+}
+
+
+void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
+ Heap* heap = isolate_->heap();
+
+ JSFunction** slot = &jsfunction_candidates_head_;
+ JSFunction* candidate = jsfunction_candidates_head_;
+ while (candidate != NULL) {
+ if (heap->InFromSpace(candidate)) {
+ v->VisitPointer(reinterpret_cast<Object**>(slot));
+ }
+ candidate = GetNextCandidate(*slot);
+ slot = GetNextCandidateSlot(*slot);
+ }
+}
+
+
MarkCompactCollector::~MarkCompactCollector() {
if (code_flusher_ != NULL) {
delete code_flusher_;
@@ -935,7 +1298,7 @@ MarkCompactCollector::~MarkCompactCollector() {
static inline HeapObject* ShortCircuitConsString(Object** p) {
- // Optimization: If the heap object pointed to by p is a non-symbol
+ // Optimization: If the heap object pointed to by p is a non-internalized
// cons string whose right substring is HEAP->empty_string, update
// it in place to its left substring. Return the updated value.
//
@@ -943,7 +1306,7 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
// (i.e., the left substring of a cons string is always a heap object).
//
// The check performed is:
- // object->IsConsString() && !object->IsSymbol() &&
+ // object->IsConsString() && !object->IsInternalizedString() &&
// (ConsString::cast(object)->second() == HEAP->empty_string())
// except the maps for the object and its possible substrings might be
// marked.
@@ -953,7 +1316,7 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
InstanceType type = map->instance_type();
if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
- Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
+ Object* second = reinterpret_cast<ConsString*>(object)->second();
Heap* heap = map->GetHeap();
if (second != heap->empty_string()) {
return object;
@@ -962,7 +1325,7 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
// Since we don't have the object's start, it is impossible to update the
// page dirty marks. Therefore, we only replace the string with its left
// substring when page dirty marks do not change.
- Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
+ Object* first = reinterpret_cast<ConsString*>(object)->first();
if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
*p = first;
@@ -1039,8 +1402,8 @@ class MarkCompactMarkingVisitor
INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
HeapObject* obj)) {
#ifdef DEBUG
- ASSERT(Isolate::Current()->heap()->Contains(obj));
- ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
+ ASSERT(collector->heap()->Contains(obj));
+ ASSERT(!collector->heap()->mark_compact_collector()->IsMarked(obj));
#endif
Map* map = obj->map();
Heap* heap = obj->GetHeap();
@@ -1054,9 +1417,9 @@ class MarkCompactMarkingVisitor
// Visit all unmarked objects pointed to by [start, end).
// Returns false if the operation fails (lack of stack space).
- static inline bool VisitUnmarkedObjects(Heap* heap,
+ INLINE(static bool VisitUnmarkedObjects(Heap* heap,
Object** start,
- Object** end) {
+ Object** end)) {
// Return false is we are close to the stack limit.
StackLimitCheck check(heap->isolate());
if (check.HasOverflowed()) return false;
@@ -1080,35 +1443,36 @@ class MarkCompactMarkingVisitor
shared->BeforeVisitingPointers();
}
- static void VisitJSWeakMap(Map* map, HeapObject* object) {
+ static void VisitWeakCollection(Map* map, HeapObject* object) {
MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
- JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
+ JSWeakCollection* weak_collection =
+ reinterpret_cast<JSWeakCollection*>(object);
// Enqueue weak map in linked list of encountered weak maps.
- if (weak_map->next() == Smi::FromInt(0)) {
- weak_map->set_next(collector->encountered_weak_maps());
- collector->set_encountered_weak_maps(weak_map);
+ if (weak_collection->next() == Smi::FromInt(0)) {
+ weak_collection->set_next(collector->encountered_weak_collections());
+ collector->set_encountered_weak_collections(weak_collection);
}
// Skip visiting the backing hash table containing the mappings.
- int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
+ int object_size = JSWeakCollection::BodyDescriptor::SizeOf(map, object);
BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
map->GetHeap(),
object,
- JSWeakMap::BodyDescriptor::kStartOffset,
- JSWeakMap::kTableOffset);
+ JSWeakCollection::BodyDescriptor::kStartOffset,
+ JSWeakCollection::kTableOffset);
BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
map->GetHeap(),
object,
- JSWeakMap::kTableOffset + kPointerSize,
+ JSWeakCollection::kTableOffset + kPointerSize,
object_size);
// Mark the backing hash table without pushing it on the marking stack.
- Object* table_object = weak_map->table();
+ Object* table_object = weak_collection->table();
if (!table_object->IsHashTable()) return;
- ObjectHashTable* table = ObjectHashTable::cast(table_object);
+ WeakHashTable* table = WeakHashTable::cast(table_object);
Object** table_slot =
- HeapObject::RawField(weak_map, JSWeakMap::kTableOffset);
+ HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
MarkBit table_mark = Marking::MarkBitFrom(table);
collector->RecordSlot(table_slot, table_slot, table);
if (!table_mark.Get()) collector->SetMark(table, table_mark);
@@ -1134,15 +1498,13 @@ class MarkCompactMarkingVisitor
FIXED_ARRAY_TYPE) return;
// Make sure this is a RegExp that actually contains code.
- if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
+ if (re->TypeTag() != JSRegExp::IRREGEXP) return;
- Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
+ Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
if (!code->IsSmi() &&
HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
// Save a copy that can be reinstated if we need the code again.
- re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
- code,
- heap);
+ re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
// Saving a copy might create a pointer into compaction candidate
// that was not observed by marker. This might happen if JSRegExp data
@@ -1154,9 +1516,8 @@ class MarkCompactMarkingVisitor
RecordSlot(slot, slot, code);
// Set a number in the 0-255 range to guarantee no smi overflow.
- re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
- Smi::FromInt(heap->sweep_generation() & 0xff),
- heap);
+ re->SetDataAt(JSRegExp::code_index(is_ascii),
+ Smi::FromInt(heap->sweep_generation() & 0xff));
} else if (code->IsSmi()) {
int value = Smi::cast(code)->value();
// The regexp has not been compiled yet or there was a compilation error.
@@ -1167,12 +1528,10 @@ class MarkCompactMarkingVisitor
// Check if we should flush now.
if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
- re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
- Smi::FromInt(JSRegExp::kUninitializedValue),
- heap);
- re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
- Smi::FromInt(JSRegExp::kUninitializedValue),
- heap);
+ re->SetDataAt(JSRegExp::code_index(is_ascii),
+ Smi::FromInt(JSRegExp::kUninitializedValue));
+ re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
+ Smi::FromInt(JSRegExp::kUninitializedValue));
}
}
}
@@ -1211,13 +1570,11 @@ void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
fixed_array->map() != heap->fixed_double_array_map() &&
fixed_array != heap->empty_fixed_array()) {
if (fixed_array->IsDictionary()) {
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- dictionary_type,
- fixed_array->Size());
+ heap->RecordFixedArraySubTypeStats(dictionary_type,
+ fixed_array->Size());
} else {
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- fast_type,
- fixed_array->Size());
+ heap->RecordFixedArraySubTypeStats(fast_type,
+ fixed_array->Size());
}
}
}
@@ -1227,7 +1584,7 @@ void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
Heap* heap = map->GetHeap();
int object_size = obj->Size();
- heap->RecordObjectStats(map->instance_type(), -1, object_size);
+ heap->RecordObjectStats(map->instance_type(), object_size);
non_count_table_.GetVisitorById(id)(map, obj);
if (obj->IsJSObject()) {
JSObject* object = JSObject::cast(obj);
@@ -1260,21 +1617,23 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
if (map_obj->owns_descriptors() &&
array != heap->empty_descriptor_array()) {
int fixed_array_size = array->Size();
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- DESCRIPTOR_ARRAY_SUB_TYPE,
- fixed_array_size);
+ heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
+ fixed_array_size);
}
if (map_obj->HasTransitionArray()) {
int fixed_array_size = map_obj->transitions()->Size();
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- TRANSITION_ARRAY_SUB_TYPE,
- fixed_array_size);
+ heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
+ fixed_array_size);
}
- if (map_obj->code_cache() != heap->empty_fixed_array()) {
- heap->RecordObjectStats(
- FIXED_ARRAY_TYPE,
- MAP_CODE_CACHE_SUB_TYPE,
- FixedArray::cast(map_obj->code_cache())->Size());
+ if (map_obj->has_code_cache()) {
+ CodeCache* cache = CodeCache::cast(map_obj->code_cache());
+ heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
+ cache->default_cache()->Size());
+ if (!cache->normal_type_cache()->IsUndefined()) {
+ heap->RecordFixedArraySubTypeStats(
+ MAP_CODE_CACHE_SUB_TYPE,
+ FixedArray::cast(cache->normal_type_cache())->Size());
+ }
}
ObjectStatsVisitBase(kVisitMap, map, obj);
}
@@ -1289,7 +1648,9 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
Heap* heap = map->GetHeap();
int object_size = obj->Size();
ASSERT(map->instance_type() == CODE_TYPE);
- heap->RecordObjectStats(CODE_TYPE, Code::cast(obj)->kind(), object_size);
+ Code* code_obj = Code::cast(obj);
+ heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetAge(),
+ object_size);
ObjectStatsVisitBase(kVisitCode, map, obj);
}
};
@@ -1303,8 +1664,7 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
Heap* heap = map->GetHeap();
SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
if (sfi->scope_info() != heap->empty_fixed_array()) {
- heap->RecordObjectStats(
- FIXED_ARRAY_TYPE,
+ heap->RecordFixedArraySubTypeStats(
SCOPE_INFO_SUB_TYPE,
FixedArray::cast(sfi->scope_info())->Size());
}
@@ -1320,10 +1680,9 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
static inline void Visit(Map* map, HeapObject* obj) {
Heap* heap = map->GetHeap();
FixedArray* fixed_array = FixedArray::cast(obj);
- if (fixed_array == heap->symbol_table()) {
- heap->RecordObjectStats(
- FIXED_ARRAY_TYPE,
- SYMBOL_TABLE_SUB_TYPE,
+ if (fixed_array == heap->string_table()) {
+ heap->RecordFixedArraySubTypeStats(
+ STRING_TABLE_SUB_TYPE,
fixed_array->Size());
}
ObjectStatsVisitBase(kVisitFixedArray, map, obj);
@@ -1352,23 +1711,6 @@ VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
MarkCompactMarkingVisitor::non_count_table_;
-class MarkingVisitor : public ObjectVisitor {
- public:
- explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
-
- void VisitPointer(Object** p) {
- MarkCompactMarkingVisitor::VisitPointer(heap_, p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- MarkCompactMarkingVisitor::VisitPointers(heap_, start, end);
- }
-
- private:
- Heap* heap_;
-};
-
-
class CodeMarkingVisitor : public ThreadVisitor {
public:
explicit CodeMarkingVisitor(MarkCompactCollector* collector)
@@ -1428,23 +1770,13 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
void MarkCompactCollector::PrepareForCodeFlushing() {
- ASSERT(heap() == Isolate::Current()->heap());
-
- // TODO(1609) Currently incremental marker does not support code flushing.
- if (!FLAG_flush_code || was_marked_incrementally_) {
- EnableCodeFlushing(false);
- return;
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (heap()->isolate()->debug()->IsLoaded() ||
- heap()->isolate()->debug()->has_break_points()) {
- EnableCodeFlushing(false);
- return;
+ // Enable code flushing for non-incremental cycles.
+ if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
+ EnableCodeFlushing(!was_marked_incrementally_);
}
-#endif
- EnableCodeFlushing(true);
+ // If code flushing is disabled, there is no need to prepare for it.
+ if (!is_code_flushing_enabled()) return;
// Ensure that empty descriptor array is marked. Method MarkDescriptorArray
// relies on it being marked before any other descriptor array.
@@ -1512,10 +1844,10 @@ class RootMarkingVisitor : public ObjectVisitor {
};
-// Helper class for pruning the symbol table.
-class SymbolTableCleaner : public ObjectVisitor {
+// Helper class for pruning the string table.
+class StringTableCleaner : public ObjectVisitor {
public:
- explicit SymbolTableCleaner(Heap* heap)
+ explicit StringTableCleaner(Heap* heap)
: heap_(heap), pointers_removed_(0) { }
virtual void VisitPointers(Object** start, Object** end) {
@@ -1524,8 +1856,8 @@ class SymbolTableCleaner : public ObjectVisitor {
Object* o = *p;
if (o->IsHeapObject() &&
!Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
- // Check if the symbol being pruned is an external symbol. We need to
- // delete the associated external data as this symbol is going away.
+ // Check if the internalized string being pruned is external. We need to
+ // delete the associated external data as this string is going away.
// Since no objects have yet been moved we can safely access the map of
// the object.
@@ -1592,47 +1924,36 @@ static void DiscoverGreyObjectsWithIterator(Heap* heap,
static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
-static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
+static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
+ MemoryChunk* p) {
ASSERT(!marking_deque->IsFull());
ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- MarkBit::CellType* cells = p->markbits()->cells();
-
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_end())));
-
- Address cell_base = p->area_start();
- int cell_index = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base)));
+ for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+ Address cell_base = it.CurrentCellBase();
+ MarkBit::CellType* cell = it.CurrentCell();
-
- for (;
- cell_index < last_cell_index;
- cell_index++, cell_base += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base))));
-
- const MarkBit::CellType current_cell = cells[cell_index];
+ const MarkBit::CellType current_cell = *cell;
if (current_cell == 0) continue;
- const MarkBit::CellType next_cell = cells[cell_index + 1];
- MarkBit::CellType grey_objects = current_cell &
- ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
+ MarkBit::CellType grey_objects;
+ if (it.HasNext()) {
+ const MarkBit::CellType next_cell = *(cell+1);
+ grey_objects = current_cell &
+ ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
+ } else {
+ grey_objects = current_cell & (current_cell >> 1);
+ }
int offset = 0;
while (grey_objects != 0) {
int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
grey_objects >>= trailing_zeros;
offset += trailing_zeros;
- MarkBit markbit(&cells[cell_index], 1 << offset, false);
+ MarkBit markbit(cell, 1 << offset, false);
ASSERT(Marking::IsGrey(markbit));
Marking::GreyToBlack(markbit);
Address addr = cell_base + offset * kPointerSize;
@@ -1649,6 +1970,74 @@ static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
}
+int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
+ NewSpace* new_space,
+ NewSpacePage* p) {
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ MarkBit::CellType* cells = p->markbits()->cells();
+ int survivors_size = 0;
+
+ for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+ Address cell_base = it.CurrentCellBase();
+ MarkBit::CellType* cell = it.CurrentCell();
+
+ MarkBit::CellType current_cell = *cell;
+ if (current_cell == 0) continue;
+
+ int offset = 0;
+ while (current_cell != 0) {
+ int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
+ current_cell >>= trailing_zeros;
+ offset += trailing_zeros;
+ Address address = cell_base + offset * kPointerSize;
+ HeapObject* object = HeapObject::FromAddress(address);
+
+ int size = object->Size();
+ survivors_size += size;
+
+ if (FLAG_trace_track_allocation_sites && object->IsJSObject()) {
+ if (AllocationMemento::FindForJSObject(JSObject::cast(object), true)
+ != NULL) {
+ heap()->allocation_mementos_found_++;
+ }
+ }
+
+ offset++;
+ current_cell >>= 1;
+ // Aggressively promote young survivors to the old space.
+ if (TryPromoteObject(object, size)) {
+ continue;
+ }
+
+ // Promotion failed. Just migrate object to another semispace.
+ MaybeObject* allocation = new_space->AllocateRaw(size);
+ if (allocation->IsFailure()) {
+ if (!new_space->AddFreshPage()) {
+ // Shouldn't happen. We are sweeping linearly, and to-space
+ // has the same number of pages as from-space, so there is
+ // always room.
+ UNREACHABLE();
+ }
+ allocation = new_space->AllocateRaw(size);
+ ASSERT(!allocation->IsFailure());
+ }
+ Object* target = allocation->ToObjectUnchecked();
+
+ MigrateObject(HeapObject::cast(target)->address(),
+ object->address(),
+ size,
+ NEW_SPACE);
+ }
+ *cells = 0;
+ }
+ return survivors_size;
+}
+
+
static void DiscoverGreyObjectsInSpace(Heap* heap,
MarkingDeque* marking_deque,
PagedSpace* space) {
@@ -1666,6 +2055,18 @@ static void DiscoverGreyObjectsInSpace(Heap* heap,
}
+static void DiscoverGreyObjectsInNewSpace(Heap* heap,
+ MarkingDeque* marking_deque) {
+ NewSpace* space = heap->new_space();
+ NewSpacePageIterator it(space->bottom(), space->top());
+ while (it.has_next()) {
+ NewSpacePage* page = it.next();
+ DiscoverGreyObjectsOnPage(marking_deque, page);
+ if (marking_deque->IsFull()) return;
+ }
+}
+
+
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
Object* o = *p;
if (!o->IsHeapObject()) return false;
@@ -1675,14 +2076,23 @@ bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
}
-void MarkCompactCollector::MarkSymbolTable() {
- SymbolTable* symbol_table = heap()->symbol_table();
- // Mark the symbol table itself.
- MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
- SetMark(symbol_table, symbol_table_mark);
+bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
+ Object** p) {
+ Object* o = *p;
+ ASSERT(o->IsHeapObject());
+ HeapObject* heap_object = HeapObject::cast(o);
+ MarkBit mark = Marking::MarkBitFrom(heap_object);
+ return !mark.Get();
+}
+
+
+void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
+ StringTable* string_table = heap()->string_table();
+ // Mark the string table itself.
+ MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
+ SetMark(string_table, string_table_mark);
// Explicitly mark the prefix.
- MarkingVisitor marker(heap());
- symbol_table->IteratePrefix(&marker);
+ string_table->IteratePrefix(visitor);
ProcessMarkingDeque();
}
@@ -1692,8 +2102,10 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// etc., and all objects reachable from them.
heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
- // Handle the symbol table specially.
- MarkSymbolTable();
+ // Handle the string table specially.
+ MarkStringTable(visitor);
+
+ MarkWeakObjectToCodeTable();
// There may be overflowed objects in the heap. Visit them now.
while (marking_deque_.overflowed()) {
@@ -1703,71 +2115,23 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
}
-void MarkCompactCollector::MarkObjectGroups() {
- List<ObjectGroup*>* object_groups =
- heap()->isolate()->global_handles()->object_groups();
-
- int last = 0;
- for (int i = 0; i < object_groups->length(); i++) {
- ObjectGroup* entry = object_groups->at(i);
- ASSERT(entry != NULL);
-
- Object*** objects = entry->objects_;
- bool group_marked = false;
- for (size_t j = 0; j < entry->length_; j++) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(object);
- MarkBit mark = Marking::MarkBitFrom(heap_object);
- if (mark.Get()) {
- group_marked = true;
- break;
- }
- }
- }
-
- if (!group_marked) {
- (*object_groups)[last++] = entry;
- continue;
- }
-
- // An object in the group is marked, so mark as grey all white heap
- // objects in the group.
- for (size_t j = 0; j < entry->length_; ++j) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(object);
- MarkBit mark = Marking::MarkBitFrom(heap_object);
- MarkObject(heap_object, mark);
- }
- }
-
- // Once the entire group has been colored grey, set the object group
- // to NULL so it won't be processed again.
- entry->Dispose();
- object_groups->at(i) = NULL;
- }
- object_groups->Rewind(last);
-}
-
-
void MarkCompactCollector::MarkImplicitRefGroups() {
List<ImplicitRefGroup*>* ref_groups =
- heap()->isolate()->global_handles()->implicit_ref_groups();
+ isolate()->global_handles()->implicit_ref_groups();
int last = 0;
for (int i = 0; i < ref_groups->length(); i++) {
ImplicitRefGroup* entry = ref_groups->at(i);
ASSERT(entry != NULL);
- if (!IsMarked(*entry->parent_)) {
+ if (!IsMarked(*entry->parent)) {
(*ref_groups)[last++] = entry;
continue;
}
- Object*** children = entry->children_;
+ Object*** children = entry->children;
// A parent object is marked, so mark all child heap objects.
- for (size_t j = 0; j < entry->length_; ++j) {
+ for (size_t j = 0; j < entry->length; ++j) {
if ((*children[j])->IsHeapObject()) {
HeapObject* child = HeapObject::cast(*children[j]);
MarkBit mark = Marking::MarkBitFrom(child);
@@ -1777,34 +2141,38 @@ void MarkCompactCollector::MarkImplicitRefGroups() {
// Once the entire group has been marked, dispose it because it's
// not needed anymore.
- entry->Dispose();
+ delete entry;
}
ref_groups->Rewind(last);
}
+void MarkCompactCollector::MarkWeakObjectToCodeTable() {
+ HeapObject* weak_object_to_code_table =
+ HeapObject::cast(heap()->weak_object_to_code_table());
+ if (!IsMarked(weak_object_to_code_table)) {
+ MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
+ SetMark(weak_object_to_code_table, mark);
+ }
+}
+
+
// Mark all objects reachable from the objects on the marking stack.
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
void MarkCompactCollector::EmptyMarkingDeque() {
while (!marking_deque_.IsEmpty()) {
- while (!marking_deque_.IsEmpty()) {
- HeapObject* object = marking_deque_.Pop();
- ASSERT(object->IsHeapObject());
- ASSERT(heap()->Contains(object));
- ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
-
- Map* map = object->map();
- MarkBit map_mark = Marking::MarkBitFrom(map);
- MarkObject(map, map_mark);
+ HeapObject* object = marking_deque_.Pop();
+ ASSERT(object->IsHeapObject());
+ ASSERT(heap()->Contains(object));
+ ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
- MarkCompactMarkingVisitor::IterateBody(map, object);
- }
+ Map* map = object->map();
+ MarkBit map_mark = Marking::MarkBitFrom(map);
+ MarkObject(map, map_mark);
- // Process encountered weak maps, mark objects only reachable by those
- // weak maps and repeat until fix-point is reached.
- ProcessWeakMaps();
+ MarkCompactMarkingVisitor::IterateBody(map, object);
}
}
@@ -1817,8 +2185,7 @@ void MarkCompactCollector::EmptyMarkingDeque() {
void MarkCompactCollector::RefillMarkingDeque() {
ASSERT(marking_deque_.overflowed());
- SemiSpaceIterator new_it(heap()->new_space());
- DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
+ DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
if (marking_deque_.IsFull()) return;
DiscoverGreyObjectsInSpace(heap(),
@@ -1846,6 +2213,11 @@ void MarkCompactCollector::RefillMarkingDeque() {
heap()->cell_space());
if (marking_deque_.IsFull()) return;
+ DiscoverGreyObjectsInSpace(heap(),
+ &marking_deque_,
+ heap()->property_cell_space());
+ if (marking_deque_.IsFull()) return;
+
LargeObjectIterator lo_it(heap()->lo_space());
DiscoverGreyObjectsWithIterator(heap(),
&marking_deque_,
@@ -1869,24 +2241,46 @@ void MarkCompactCollector::ProcessMarkingDeque() {
}
-void MarkCompactCollector::ProcessExternalMarking() {
+// Mark all objects reachable (transitively) from objects on the marking
+// stack including references only considered in the atomic marking pause.
+void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
bool work_to_do = true;
ASSERT(marking_deque_.IsEmpty());
while (work_to_do) {
- MarkObjectGroups();
+ isolate()->global_handles()->IterateObjectGroups(
+ visitor, &IsUnmarkedHeapObjectWithHeap);
MarkImplicitRefGroups();
+ ProcessWeakCollections();
work_to_do = !marking_deque_.IsEmpty();
ProcessMarkingDeque();
}
}
+void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
+ for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
+ !it.done(); it.Advance()) {
+ if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
+ return;
+ }
+ if (it.frame()->type() == StackFrame::OPTIMIZED) {
+ Code* code = it.frame()->LookupCode();
+ if (!code->CanDeoptAt(it.frame()->pc())) {
+ code->CodeIterateBody(visitor);
+ }
+ ProcessMarkingDeque();
+ return;
+ }
+ }
+}
+
+
void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
// The recursive GC marker detects when it is nearing stack overflow,
// and switches to a different marking system. JS interrupts interfere
// with the C stack limit check.
- PostponeInterruptsScope postpone(heap()->isolate());
+ PostponeInterruptsScope postpone(isolate());
bool incremental_marking_overflowed = false;
IncrementalMarking* incremental_marking = heap_->incremental_marking();
@@ -1935,28 +2329,41 @@ void MarkCompactCollector::MarkLiveObjects() {
HeapObjectIterator cell_iterator(heap()->cell_space());
HeapObject* cell;
while ((cell = cell_iterator.Next()) != NULL) {
- ASSERT(cell->IsJSGlobalPropertyCell());
+ ASSERT(cell->IsCell());
if (IsMarked(cell)) {
- int offset = JSGlobalPropertyCell::kValueOffset;
+ int offset = Cell::kValueOffset;
MarkCompactMarkingVisitor::VisitPointer(
heap(),
reinterpret_cast<Object**>(cell->address() + offset));
}
}
}
+ {
+ HeapObjectIterator js_global_property_cell_iterator(
+ heap()->property_cell_space());
+ HeapObject* cell;
+ while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
+ ASSERT(cell->IsPropertyCell());
+ if (IsMarked(cell)) {
+ MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
+ }
+ }
+ }
}
RootMarkingVisitor root_visitor(heap());
MarkRoots(&root_visitor);
+ ProcessTopOptimizedFrame(&root_visitor);
+
// The objects reachable from the roots are marked, yet unreachable
// objects are unmarked. Mark objects reachable due to host
- // application specific logic.
- ProcessExternalMarking();
+ // application specific logic or through Harmony weak maps.
+ ProcessEphemeralMarking(&root_visitor);
- // The objects reachable from the roots or object groups are marked,
- // yet unreachable objects are unmarked. Mark objects reachable
- // only from weak global handles.
+ // The objects reachable from the roots, weak maps or object groups
+ // are marked, yet unreachable objects are unmarked. Mark objects
+ // reachable only from weak global handles.
//
// First we identify nonlive weak handles and mark them as pending
// destruction.
@@ -1969,28 +2376,28 @@ void MarkCompactCollector::MarkLiveObjects() {
EmptyMarkingDeque();
}
- // Repeat host application specific marking to mark unmarked objects
- // reachable from the weak roots.
- ProcessExternalMarking();
+ // Repeat host application specific and Harmony weak maps marking to
+ // mark unmarked objects reachable from the weak roots.
+ ProcessEphemeralMarking(&root_visitor);
AfterMarking();
}
void MarkCompactCollector::AfterMarking() {
- // Object literal map caches reference symbols (cache keys) and maps
+ // Object literal map caches reference strings (cache keys) and maps
// (cache values). At this point still useful maps have already been
// marked. Mark the keys for the alive values before we process the
- // symbol table.
+ // string table.
ProcessMapCaches();
- // Prune the symbol table removing all symbols only pointed to by the
- // symbol table. Cannot use symbol_table() here because the symbol
+ // Prune the string table removing all strings only pointed to by the
+ // string table. Cannot use string_table() here because the string
// table is marked.
- SymbolTable* symbol_table = heap()->symbol_table();
- SymbolTableCleaner v(heap());
- symbol_table->IterateElements(&v);
- symbol_table->ElementsRemoved(v.PointersRemoved());
+ StringTable* string_table = heap()->string_table();
+ StringTableCleaner v(heap());
+ string_table->IterateElements(&v);
+ string_table->ElementsRemoved(v.PointersRemoved());
heap()->external_string_table_.Iterate(&v);
heap()->external_string_table_.CleanUp();
@@ -2005,9 +2412,11 @@ void MarkCompactCollector::AfterMarking() {
// Flush code from collected candidates.
if (is_code_flushing_enabled()) {
code_flusher_->ProcessCandidates();
- // TODO(1609) Currently incremental marker does not support code flushing,
- // we need to disable it before incremental marking steps for next cycle.
- EnableCodeFlushing(false);
+ // If incremental marker does not support code flushing, we need to
+ // disable it before incremental marking steps for next cycle.
+ if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
+ EnableCodeFlushing(false);
+ }
}
if (!FLAG_watch_ic_patching) {
@@ -2077,7 +2486,6 @@ void MarkCompactCollector::ReattachInitialMaps() {
for (HeapObject* obj = map_iterator.Next();
obj != NULL;
obj = map_iterator.Next()) {
- if (obj->IsFreeSpace()) continue;
Map* map = Map::cast(obj);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
@@ -2090,32 +2498,71 @@ void MarkCompactCollector::ReattachInitialMaps() {
}
-void MarkCompactCollector::ClearNonLiveTransitions() {
- HeapObjectIterator map_iterator(heap()->map_space());
+void MarkCompactCollector::ClearNonLiveReferences() {
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. This action
// is carried out only on maps of JSObjects and related subtypes.
+ HeapObjectIterator map_iterator(heap()->map_space());
for (HeapObject* obj = map_iterator.Next();
- obj != NULL; obj = map_iterator.Next()) {
- Map* map = reinterpret_cast<Map*>(obj);
- MarkBit map_mark = Marking::MarkBitFrom(map);
- if (map->IsFreeSpace()) continue;
+ obj != NULL;
+ obj = map_iterator.Next()) {
+ Map* map = Map::cast(obj);
- ASSERT(map->IsMap());
- // Only JSObject and subtypes have map transitions and back pointers.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
- if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
+ if (!map->CanTransition()) continue;
- if (map_mark.Get() &&
- map->attached_to_shared_function_info()) {
+ MarkBit map_mark = Marking::MarkBitFrom(map);
+ if (map_mark.Get() && map->attached_to_shared_function_info()) {
// This map is used for inobject slack tracking and has been detached
// from SharedFunctionInfo during the mark phase.
// Since it survived the GC, reattach it now.
- map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
+ JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
}
ClearNonLivePrototypeTransitions(map);
ClearNonLiveMapTransitions(map, map_mark);
+
+ if (map_mark.Get()) {
+ ClearNonLiveDependentCode(map->dependent_code());
+ } else {
+ ClearAndDeoptimizeDependentCode(map->dependent_code());
+ map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
+ }
+ }
+
+ // Iterate over property cell space, removing dependent code that is not
+ // otherwise kept alive by strong references.
+ HeapObjectIterator cell_iterator(heap_->property_cell_space());
+ for (HeapObject* cell = cell_iterator.Next();
+ cell != NULL;
+ cell = cell_iterator.Next()) {
+ if (IsMarked(cell)) {
+ ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
+ }
+ }
+
+ if (heap_->weak_object_to_code_table()->IsHashTable()) {
+ WeakHashTable* table =
+ WeakHashTable::cast(heap_->weak_object_to_code_table());
+ uint32_t capacity = table->Capacity();
+ for (uint32_t i = 0; i < capacity; i++) {
+ uint32_t key_index = table->EntryToIndex(i);
+ Object* key = table->get(key_index);
+ if (!table->IsKey(key)) continue;
+ uint32_t value_index = table->EntryToValueIndex(i);
+ Object* value = table->get(value_index);
+ if (IsMarked(key)) {
+ if (!IsMarked(value)) {
+ HeapObject* obj = HeapObject::cast(value);
+ MarkBit mark = Marking::MarkBitFrom(obj);
+ SetMark(obj, mark);
+ }
+ ClearNonLiveDependentCode(DependentCode::cast(value));
+ } else {
+ ClearAndDeoptimizeDependentCode(DependentCode::cast(value));
+ table->set(key_index, heap_->the_hole_value());
+ table->set(value_index, heap_->the_hole_value());
+ }
+ }
}
}
@@ -2136,13 +2583,11 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
int proto_index = proto_offset + new_number_of_transitions * step;
int map_index = map_offset + new_number_of_transitions * step;
if (new_number_of_transitions != i) {
- prototype_transitions->set_unchecked(
- heap_,
+ prototype_transitions->set(
proto_index,
prototype,
UPDATE_WRITE_BARRIER);
- prototype_transitions->set_unchecked(
- heap_,
+ prototype_transitions->set(
map_index,
cached_map,
SKIP_WRITE_BARRIER);
@@ -2163,7 +2608,7 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
for (int i = new_number_of_transitions * step;
i < number_of_transitions * step;
i++) {
- prototype_transitions->set_undefined(heap_, header + i);
+ prototype_transitions->set_undefined(header + i);
}
}
@@ -2184,12 +2629,71 @@ void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
}
-void MarkCompactCollector::ProcessWeakMaps() {
- Object* weak_map_obj = encountered_weak_maps();
- while (weak_map_obj != Smi::FromInt(0)) {
- ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
- JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
- ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
+void MarkCompactCollector::ClearAndDeoptimizeDependentCode(
+ DependentCode* entries) {
+ DisallowHeapAllocation no_allocation;
+ DependentCode::GroupStartIndexes starts(entries);
+ int number_of_entries = starts.number_of_entries();
+ if (number_of_entries == 0) return;
+ for (int i = 0; i < number_of_entries; i++) {
+ // If the entry is compilation info then the map must be alive,
+ // and ClearAndDeoptimizeDependentCode shouldn't be called.
+ ASSERT(entries->is_code_at(i));
+ Code* code = entries->code_at(i);
+
+ if (IsMarked(code) && !code->marked_for_deoptimization()) {
+ code->set_marked_for_deoptimization(true);
+ have_code_to_deoptimize_ = true;
+ }
+ entries->clear_at(i);
+ }
+}
+
+
+void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
+ DisallowHeapAllocation no_allocation;
+ DependentCode::GroupStartIndexes starts(entries);
+ int number_of_entries = starts.number_of_entries();
+ if (number_of_entries == 0) return;
+ int new_number_of_entries = 0;
+ // Go through all groups, remove dead codes and compact.
+ for (int g = 0; g < DependentCode::kGroupCount; g++) {
+ int group_number_of_entries = 0;
+ for (int i = starts.at(g); i < starts.at(g + 1); i++) {
+ Object* obj = entries->object_at(i);
+ ASSERT(obj->IsCode() || IsMarked(obj));
+ if (IsMarked(obj) &&
+ (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
+ if (new_number_of_entries + group_number_of_entries != i) {
+ entries->set_object_at(
+ new_number_of_entries + group_number_of_entries, obj);
+ }
+ Object** slot = entries->slot_at(new_number_of_entries +
+ group_number_of_entries);
+ RecordSlot(slot, slot, obj);
+ group_number_of_entries++;
+ }
+ }
+ entries->set_number_of_entries(
+ static_cast<DependentCode::DependencyGroup>(g),
+ group_number_of_entries);
+ new_number_of_entries += group_number_of_entries;
+ }
+ for (int i = new_number_of_entries; i < number_of_entries; i++) {
+ entries->clear_at(i);
+ }
+}
+
+
+void MarkCompactCollector::ProcessWeakCollections() {
+ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
+ Object* weak_collection_obj = encountered_weak_collections();
+ while (weak_collection_obj != Smi::FromInt(0)) {
+ ASSERT(MarkCompactCollector::IsMarked(
+ HeapObject::cast(weak_collection_obj)));
+ JSWeakCollection* weak_collection =
+ reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
+ ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
Object** anchor = reinterpret_cast<Object**>(table->address());
for (int i = 0; i < table->Capacity(); i++) {
if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
@@ -2204,26 +2708,29 @@ void MarkCompactCollector::ProcessWeakMaps() {
this, anchor, value_slot);
}
}
- weak_map_obj = weak_map->next();
+ weak_collection_obj = weak_collection->next();
}
}
-void MarkCompactCollector::ClearWeakMaps() {
- Object* weak_map_obj = encountered_weak_maps();
- while (weak_map_obj != Smi::FromInt(0)) {
- ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
- JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
- ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
+void MarkCompactCollector::ClearWeakCollections() {
+ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
+ Object* weak_collection_obj = encountered_weak_collections();
+ while (weak_collection_obj != Smi::FromInt(0)) {
+ ASSERT(MarkCompactCollector::IsMarked(
+ HeapObject::cast(weak_collection_obj)));
+ JSWeakCollection* weak_collection =
+ reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
+ ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
for (int i = 0; i < table->Capacity(); i++) {
if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
table->RemoveEntry(i);
}
}
- weak_map_obj = weak_map->next();
- weak_map->set_next(Smi::FromInt(0));
+ weak_collection_obj = weak_collection->next();
+ weak_collection->set_next(Smi::FromInt(0));
}
- set_encountered_weak_maps(Smi::FromInt(0));
+ set_encountered_weak_collections(Smi::FromInt(0));
}
@@ -2245,8 +2752,13 @@ void MarkCompactCollector::MigrateObject(Address dst,
Address src,
int size,
AllocationSpace dest) {
- HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
- if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
+ HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
+ if (heap_profiler->is_profiling()) {
+ heap_profiler->ObjectMoveEvent(src, dst, size);
+ }
+ ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
+ ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
+ if (dest == OLD_POINTER_SPACE) {
Address src_slot = src;
Address dst_slot = dst;
ASSERT(IsAligned(size, kPointerSize));
@@ -2282,7 +2794,7 @@ void MarkCompactCollector::MigrateObject(Address dst,
}
}
} else if (dest == CODE_SPACE) {
- PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
+ PROFILE(isolate(), CodeMoveEvent(src, dst));
heap()->MoveBlock(dst, src, size);
SlotsBuffer::AddTo(&slots_buffer_allocator_,
&migration_slots_buffer_,
@@ -2334,6 +2846,16 @@ class PointersUpdatingVisitor: public ObjectVisitor {
}
}
+ void VisitCodeAgeSequence(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+ Object* stub = rinfo->code_age_stub();
+ ASSERT(stub != NULL);
+ VisitPointer(&stub);
+ if (stub != rinfo->code_age_stub()) {
+ rinfo->set_code_age_stub(Code::cast(stub));
+ }
+ }
+
void VisitDebugTarget(RelocInfo* rinfo) {
ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
rinfo->IsPatchedReturnSequence()) ||
@@ -2408,37 +2930,26 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
int object_size) {
- Object* result;
+ // TODO(hpayer): Replace that check with an assert.
+ CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize);
- if (object_size > Page::kMaxNonCodeHeapObjectSize) {
- MaybeObject* maybe_result =
- heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
- if (maybe_result->ToObject(&result)) {
- HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
- object->address(),
- object_size,
- LO_SPACE);
- heap()->mark_compact_collector()->tracer()->
- increment_promoted_objects_size(object_size);
- return true;
- }
- } else {
- OldSpace* target_space = heap()->TargetSpace(object);
-
- ASSERT(target_space == heap()->old_pointer_space() ||
- target_space == heap()->old_data_space());
- MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
- if (maybe_result->ToObject(&result)) {
- HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
- object->address(),
- object_size,
- target_space->identity());
- heap()->mark_compact_collector()->tracer()->
- increment_promoted_objects_size(object_size);
- return true;
- }
+ OldSpace* target_space = heap()->TargetSpace(object);
+
+ ASSERT(target_space == heap()->old_pointer_space() ||
+ target_space == heap()->old_data_space());
+ Object* result;
+ MaybeObject* maybe_result = target_space->AllocateRaw(
+ object_size,
+ PagedSpace::MOVE_OBJECT);
+ if (maybe_result->ToObject(&result)) {
+ HeapObject* target = HeapObject::cast(result);
+ MigrateObject(target->address(),
+ object->address(),
+ object_size,
+ target_space->identity());
+ heap()->mark_compact_collector()->tracer()->
+ increment_promoted_objects_size(object_size);
+ return true;
}
return false;
@@ -2469,48 +2980,10 @@ void MarkCompactCollector::EvacuateNewSpace() {
// migrate live objects and write forwarding addresses. This stage puts
// new entries in the store buffer and may cause some pages to be marked
// scan-on-scavenge.
- SemiSpaceIterator from_it(from_bottom, from_top);
- for (HeapObject* object = from_it.Next();
- object != NULL;
- object = from_it.Next()) {
- MarkBit mark_bit = Marking::MarkBitFrom(object);
- if (mark_bit.Get()) {
- mark_bit.Clear();
- // Don't bother decrementing live bytes count. We'll discard the
- // entire page at the end.
- int size = object->Size();
- survivors_size += size;
-
- // Aggressively promote young survivors to the old space.
- if (TryPromoteObject(object, size)) {
- continue;
- }
-
- // Promotion failed. Just migrate object to another semispace.
- MaybeObject* allocation = new_space->AllocateRaw(size);
- if (allocation->IsFailure()) {
- if (!new_space->AddFreshPage()) {
- // Shouldn't happen. We are sweeping linearly, and to-space
- // has the same number of pages as from-space, so there is
- // always room.
- UNREACHABLE();
- }
- allocation = new_space->AllocateRaw(size);
- ASSERT(!allocation->IsFailure());
- }
- Object* target = allocation->ToObjectUnchecked();
-
- MigrateObject(HeapObject::cast(target)->address(),
- object->address(),
- size,
- NEW_SPACE);
- } else {
- // Process the dead object before we write a NULL into its header.
- LiveObjectList::ProcessNonLive(object);
-
- // Mark dead objects in the new space with null in their map field.
- Memory::Address_at(object->address()) = NULL;
- }
+ NewSpacePageIterator it(from_bottom, from_top);
+ while (it.has_next()) {
+ NewSpacePage* p = it.next();
+ survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p);
}
heap_->IncrementYoungSurvivorsCounter(survivors_size);
@@ -2522,31 +2995,17 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
AlwaysAllocateScope always_allocate;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
- MarkBit::CellType* cells = p->markbits()->cells();
p->MarkSweptPrecisely();
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_end())));
-
- Address cell_base = p->area_start();
- int cell_index = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base)));
-
int offsets[16];
- for (;
- cell_index < last_cell_index;
- cell_index++, cell_base += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base))));
- if (cells[cell_index] == 0) continue;
+ for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+ Address cell_base = it.CurrentCellBase();
+ MarkBit::CellType* cell = it.CurrentCell();
+
+ if (*cell == 0) continue;
- int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
+ int live_objects = MarkWordToObjectStarts(*cell, offsets);
for (int i = 0; i < live_objects; i++) {
Address object_addr = cell_base + offsets[i] * kPointerSize;
HeapObject* object = HeapObject::FromAddress(object_addr);
@@ -2554,7 +3013,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
int size = object->Size();
- MaybeObject* target = space->AllocateRaw(size);
+ MaybeObject* target = space->AllocateRaw(size, PagedSpace::MOVE_OBJECT);
if (target->IsFailure()) {
// OS refused to give us memory.
V8::FatalProcessOutOfMemory("Evacuation");
@@ -2571,7 +3030,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
}
// Clear marking bits for current cell.
- cells[cell_index] = 0;
+ *cell = 0;
}
p->ResetLiveBytes();
}
@@ -2596,6 +3055,7 @@ void MarkCompactCollector::EvacuatePages() {
slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
page->ClearEvacuationCandidate();
page->SetFlag(Page::RESCAN_ON_EVACUATION);
+ page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
}
return;
}
@@ -2619,13 +3079,14 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
};
-static inline void UpdateSlot(ObjectVisitor* v,
+static inline void UpdateSlot(Isolate* isolate,
+ ObjectVisitor* v,
SlotsBuffer::SlotType slot_type,
Address addr) {
switch (slot_type) {
case SlotsBuffer::CODE_TARGET_SLOT: {
RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
- rinfo.Visit(v);
+ rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::CODE_ENTRY_SLOT: {
@@ -2639,17 +3100,17 @@ static inline void UpdateSlot(ObjectVisitor* v,
}
case SlotsBuffer::DEBUG_TARGET_SLOT: {
RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
- if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
+ if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::JS_RETURN_SLOT: {
RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
- if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
+ if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
- rinfo.Visit(v);
+ rinfo.Visit(isolate, v);
break;
}
default:
@@ -2686,22 +3147,15 @@ static void SweepPrecisely(PagedSpace* space,
space->identity() == CODE_SPACE);
ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
- MarkBit::CellType* cells = p->markbits()->cells();
- p->MarkSweptPrecisely();
+ double start_time = 0.0;
+ if (FLAG_print_cumulative_gc_stat) {
+ start_time = OS::TimeCurrentMillis();
+ }
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_end())));
+ p->MarkSweptPrecisely();
Address free_start = p->area_start();
- int cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(free_start)));
-
ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
- Address object_address = free_start;
int offsets[16];
SkipList* skip_list = p->skip_list();
@@ -2710,19 +3164,20 @@ static void SweepPrecisely(PagedSpace* space,
skip_list->Clear();
}
- for (;
- cell_index < last_cell_index;
- cell_index++, object_address += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(object_address))));
- int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
+ for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+ Address cell_base = it.CurrentCellBase();
+ MarkBit::CellType* cell = it.CurrentCell();
+ int live_objects = MarkWordToObjectStarts(*cell, offsets);
int live_index = 0;
for ( ; live_objects != 0; live_objects--) {
- Address free_end = object_address + offsets[live_index++] * kPointerSize;
+ Address free_end = cell_base + offsets[live_index++] * kPointerSize;
if (free_end != free_start) {
space->Free(free_start, static_cast<int>(free_end - free_start));
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
+ GDBJITInterface::RemoveCodeRange(free_start, free_end);
+ }
+#endif
}
HeapObject* live_object = HeapObject::FromAddress(free_end);
ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
@@ -2745,12 +3200,20 @@ static void SweepPrecisely(PagedSpace* space,
free_start = free_end + size;
}
// Clear marking bits for current cell.
- cells[cell_index] = 0;
+ *cell = 0;
}
if (free_start != p->area_end()) {
space->Free(free_start, static_cast<int>(p->area_end() - free_start));
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
+ GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
+ }
+#endif
}
p->ResetLiveBytes();
+ if (FLAG_print_cumulative_gc_stat) {
+ space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
+ }
}
@@ -2833,6 +3296,12 @@ void MarkCompactCollector::InvalidateCode(Code* code) {
}
+// Return true if the given code is deoptimized or will be deoptimized.
+bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
+ return code->marked_for_deoptimization();
+}
+
+
bool MarkCompactCollector::MarkInvalidatedCode() {
bool code_marked = false;
@@ -2876,11 +3345,9 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
bool code_slots_filtering_required;
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
code_slots_filtering_required = MarkInvalidatedCode();
-
EvacuateNewSpace();
}
-
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
EvacuatePages();
}
@@ -2907,7 +3374,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
// Update roots.
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
- LiveObjectList::IterateElements(&updating_visitor);
}
{ GCTracer::Scope gc_scope(tracer_,
@@ -2915,7 +3381,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
StoreBufferRebuildScope scope(heap_,
heap_->store_buffer(),
&Heap::ScavengeStoreBufferCallback);
- heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
+ heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
+ &UpdatePointer);
}
{ GCTracer::Scope gc_scope(tracer_,
@@ -2978,7 +3445,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
switch (space->identity()) {
case OLD_DATA_SPACE:
- SweepConservatively(space, p);
+ SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
break;
case OLD_POINTER_SPACE:
SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
@@ -3003,18 +3470,32 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
for (HeapObject* cell = cell_iterator.Next();
cell != NULL;
cell = cell_iterator.Next()) {
- if (cell->IsJSGlobalPropertyCell()) {
- Address value_address =
- reinterpret_cast<Address>(cell) +
- (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
- updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+ if (cell->IsCell()) {
+ Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
}
}
- // Update pointer from the native contexts list.
+ HeapObjectIterator js_global_property_cell_iterator(
+ heap_->property_cell_space());
+ for (HeapObject* cell = js_global_property_cell_iterator.Next();
+ cell != NULL;
+ cell = js_global_property_cell_iterator.Next()) {
+ if (cell->IsPropertyCell()) {
+ PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
+ }
+ }
+
+ // Update the head of the native contexts list in the heap.
updating_visitor.VisitPointer(heap_->native_contexts_list_address());
- heap_->symbol_table()->Iterate(&updating_visitor);
+ heap_->string_table()->Iterate(&updating_visitor);
+ updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
+ if (heap_->weak_object_to_code_table()->IsHashTable()) {
+ WeakHashTable* table =
+ WeakHashTable::cast(heap_->weak_object_to_code_table());
+ table->Iterate(&updating_visitor);
+ table->Rehash(heap_->undefined_value());
+ }
// Update pointers from external string table.
heap_->UpdateReferencesInExternalStringTable(
@@ -3043,6 +3524,23 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
ASSERT(migration_slots_buffer_ == NULL);
+}
+
+
+void MarkCompactCollector::UnlinkEvacuationCandidates() {
+ int npages = evacuation_candidates_.length();
+ for (int i = 0; i < npages; i++) {
+ Page* p = evacuation_candidates_[i];
+ if (!p->IsEvacuationCandidate()) continue;
+ p->Unlink();
+ p->ClearSweptPrecisely();
+ p->ClearSweptConservatively();
+ }
+}
+
+
+void MarkCompactCollector::ReleaseEvacuationCandidates() {
+ int npages = evacuation_candidates_.length();
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
if (!p->IsEvacuationCandidate()) continue;
@@ -3051,10 +3549,11 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
p->set_scan_on_scavenge(false);
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
p->ResetLiveBytes();
- space->ReleasePage(p);
+ space->ReleasePage(p, false);
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
+ heap()->FreeQueuedChunks();
}
@@ -3334,6 +3833,33 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
}
+template<MarkCompactCollector::SweepingParallelism mode>
+static intptr_t Free(PagedSpace* space,
+ FreeList* free_list,
+ Address start,
+ int size) {
+ if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
+ return space->Free(start, size);
+ } else {
+ return size - free_list->Free(start, size);
+ }
+}
+
+
+// Force instantiation of templatized SweepConservatively method for
+// SWEEP_SEQUENTIALLY mode.
+template intptr_t MarkCompactCollector::
+ SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
+ PagedSpace*, FreeList*, Page*);
+
+
+// Force instantiation of templatized SweepConservatively method for
+// SWEEP_IN_PARALLEL mode.
+template intptr_t MarkCompactCollector::
+ SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
+ PagedSpace*, FreeList*, Page*);
+
+
// Sweeps a space conservatively. After this has been done the larger free
// spaces have been put on the free list and the smaller ones have been
// ignored and left untouched. A free space is always either ignored or put
@@ -3341,91 +3867,87 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
// because it means that any FreeSpace maps left actually describe a region of
// memory that can be ignored when scanning. Dead objects other than free
// spaces will not contain the free space map.
-intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
+template<MarkCompactCollector::SweepingParallelism mode>
+intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
+ FreeList* free_list,
+ Page* p) {
ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
- MarkBit::CellType* cells = p->markbits()->cells();
- p->MarkSweptConservatively();
-
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_end())));
+ ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
+ free_list != NULL) ||
+ (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
+ free_list == NULL));
- int cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_start())));
+ p->MarkSweptConservatively();
intptr_t freed_bytes = 0;
-
- // This is the start of the 32 word block that we are currently looking at.
- Address block_address = p->area_start();
+ size_t size = 0;
// Skip over all the dead objects at the start of the page and mark them free.
- for (;
- cell_index < last_cell_index;
- cell_index++, block_address += 32 * kPointerSize) {
- if (cells[cell_index] != 0) break;
- }
- size_t size = block_address - p->area_start();
- if (cell_index == last_cell_index) {
- freed_bytes += static_cast<int>(space->Free(p->area_start(),
- static_cast<int>(size)));
+ Address cell_base = 0;
+ MarkBit::CellType* cell = NULL;
+ MarkBitCellIterator it(p);
+ for (; !it.Done(); it.Advance()) {
+ cell_base = it.CurrentCellBase();
+ cell = it.CurrentCell();
+ if (*cell != 0) break;
+ }
+
+ if (it.Done()) {
+ size = p->area_end() - p->area_start();
+ freed_bytes += Free<mode>(space, free_list, p->area_start(),
+ static_cast<int>(size));
ASSERT_EQ(0, p->LiveBytes());
return freed_bytes;
}
+
// Grow the size of the start-of-page free space a little to get up to the
// first live object.
- Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
+ Address free_end = StartOfLiveObject(cell_base, *cell);
// Free the first free space.
size = free_end - p->area_start();
- freed_bytes += space->Free(p->area_start(),
- static_cast<int>(size));
+ freed_bytes += Free<mode>(space, free_list, p->area_start(),
+ static_cast<int>(size));
+
// The start of the current free area is represented in undigested form by
// the address of the last 32-word section that contained a live object and
// the marking bitmap for that cell, which describes where the live object
// started. Unless we find a large free space in the bitmap we will not
// digest this pair into a real address. We start the iteration here at the
// first word in the marking bit map that indicates a live object.
- Address free_start = block_address;
- uint32_t free_start_cell = cells[cell_index];
-
- for ( ;
- cell_index < last_cell_index;
- cell_index++, block_address += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(block_address))));
- uint32_t cell = cells[cell_index];
- if (cell != 0) {
+ Address free_start = cell_base;
+ MarkBit::CellType free_start_cell = *cell;
+
+ for (; !it.Done(); it.Advance()) {
+ cell_base = it.CurrentCellBase();
+ cell = it.CurrentCell();
+ if (*cell != 0) {
// We have a live object. Check approximately whether it is more than 32
// words since the last live object.
- if (block_address - free_start > 32 * kPointerSize) {
+ if (cell_base - free_start > 32 * kPointerSize) {
free_start = DigestFreeStart(free_start, free_start_cell);
- if (block_address - free_start > 32 * kPointerSize) {
+ if (cell_base - free_start > 32 * kPointerSize) {
// Now that we know the exact start of the free space it still looks
// like we have a large enough free space to be worth bothering with.
// so now we need to find the start of the first live object at the
// end of the free space.
- free_end = StartOfLiveObject(block_address, cell);
- freed_bytes += space->Free(free_start,
- static_cast<int>(free_end - free_start));
+ free_end = StartOfLiveObject(cell_base, *cell);
+ freed_bytes += Free<mode>(space, free_list, free_start,
+ static_cast<int>(free_end - free_start));
}
}
// Update our undigested record of where the current free area started.
- free_start = block_address;
- free_start_cell = cell;
+ free_start = cell_base;
+ free_start_cell = *cell;
// Clear marking bits for current cell.
- cells[cell_index] = 0;
+ *cell = 0;
}
}
// Handle the free space at the end of the page.
- if (block_address - free_start > 32 * kPointerSize) {
+ if (cell_base - free_start > 32 * kPointerSize) {
free_start = DigestFreeStart(free_start, free_start_cell);
- freed_bytes += space->Free(free_start,
- static_cast<int>(block_address - free_start));
+ freed_bytes += Free<mode>(space, free_list, free_start,
+ static_cast<int>(p->area_end() - free_start));
}
p->ResetLiveBytes();
@@ -3433,34 +3955,48 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
}
+void MarkCompactCollector::SweepInParallel(PagedSpace* space,
+ FreeList* private_free_list,
+ FreeList* free_list) {
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+
+ if (p->TryParallelSweeping()) {
+ SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p);
+ free_list->Concatenate(private_free_list);
+ }
+ }
+}
+
+
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
- sweeper == LAZY_CONSERVATIVE);
-
+ sweeper == LAZY_CONSERVATIVE ||
+ sweeper == PARALLEL_CONSERVATIVE ||
+ sweeper == CONCURRENT_CONSERVATIVE);
space->ClearStats();
PageIterator it(space);
- intptr_t freed_bytes = 0;
int pages_swept = 0;
- intptr_t newspace_size = space->heap()->new_space()->Size();
bool lazy_sweeping_active = false;
bool unused_page_present = false;
+ bool parallel_sweeping_active = false;
while (it.has_next()) {
Page* p = it.next();
+ ASSERT(p->parallel_sweeping() == 0);
+ ASSERT(!p->IsEvacuationCandidate());
+
// Clear sweeping flags indicating that marking bits are still intact.
p->ClearSweptPrecisely();
p->ClearSweptConservatively();
- if (p->IsEvacuationCandidate()) {
- ASSERT(evacuation_candidates_.length() > 0);
- continue;
- }
-
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
// Will be processed in EvacuateNewSpaceAndCandidates.
+ ASSERT(evacuation_candidates_.length() > 0);
continue;
}
@@ -3474,46 +4010,58 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
// Adjust unswept free bytes because releasing a page expects said
// counter to be accurate for unswept pages.
space->IncreaseUnsweptFreeBytes(p);
- space->ReleasePage(p);
+ space->ReleasePage(p, true);
continue;
}
unused_page_present = true;
}
- if (lazy_sweeping_active) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
- reinterpret_cast<intptr_t>(p));
- }
- space->IncreaseUnsweptFreeBytes(p);
- continue;
- }
-
switch (sweeper) {
case CONSERVATIVE: {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
reinterpret_cast<intptr_t>(p));
}
- SweepConservatively(space, p);
+ SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
pages_swept++;
break;
}
case LAZY_CONSERVATIVE: {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
- reinterpret_cast<intptr_t>(p));
- }
- freed_bytes += SweepConservatively(space, p);
- pages_swept++;
- if (freed_bytes > 2 * newspace_size) {
+ if (lazy_sweeping_active) {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ space->IncreaseUnsweptFreeBytes(p);
+ } else {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
+ pages_swept++;
space->SetPagesToSweep(p->next_page());
lazy_sweeping_active = true;
+ }
+ break;
+ }
+ case CONCURRENT_CONSERVATIVE:
+ case PARALLEL_CONSERVATIVE: {
+ if (!parallel_sweeping_active) {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
+ pages_swept++;
+ parallel_sweeping_active = true;
} else {
if (FLAG_gc_verbose) {
- PrintF("Only %" V8PRIdPTR " bytes freed. Still sweeping.\n",
- freed_bytes);
+ PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
+ reinterpret_cast<intptr_t>(p));
}
+ p->set_parallel_sweeping(1);
+ space->IncreaseUnsweptFreeBytes(p);
}
break;
}
@@ -3554,20 +4102,39 @@ void MarkCompactCollector::SweepSpaces() {
#endif
SweeperType how_to_sweep =
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
+ if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
+ if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
if (sweep_precisely_) how_to_sweep = PRECISE;
+
+ // Unlink evacuation candidates before sweeper threads access the list of
+ // pages to avoid race condition.
+ UnlinkEvacuationCandidates();
+
// Noncompacting collections simply sweep the spaces to clear the mark
// bits and free the nonlive blocks (for old and map spaces). We sweep
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
+ SequentialSweepingScope scope(this);
SweepSpace(heap()->old_pointer_space(), how_to_sweep);
SweepSpace(heap()->old_data_space(), how_to_sweep);
+ if (how_to_sweep == PARALLEL_CONSERVATIVE ||
+ how_to_sweep == CONCURRENT_CONSERVATIVE) {
+ // TODO(hpayer): fix race with concurrent sweeper
+ StartSweeperThreads();
+ }
+
+ if (how_to_sweep == PARALLEL_CONSERVATIVE) {
+ WaitUntilSweepingCompleted();
+ }
+
RemoveDeadInvalidatedCode();
SweepSpace(heap()->code_space(), PRECISE);
SweepSpace(heap()->cell_space(), PRECISE);
+ SweepSpace(heap()->property_cell_space(), PRECISE);
EvacuateNewSpaceAndCandidates();
@@ -3578,18 +4145,33 @@ void MarkCompactCollector::SweepSpaces() {
// Deallocate unmarked objects and clear marked bits for marked objects.
heap_->lo_space()->FreeUnmarkedObjects();
+
+ // Deallocate evacuated candidate pages.
+ ReleaseEvacuationCandidates();
}
void MarkCompactCollector::EnableCodeFlushing(bool enable) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (isolate()->debug()->IsLoaded() ||
+ isolate()->debug()->has_break_points()) {
+ enable = false;
+ }
+#endif
+
if (enable) {
if (code_flusher_ != NULL) return;
- code_flusher_ = new CodeFlusher(heap()->isolate());
+ code_flusher_ = new CodeFlusher(isolate());
} else {
if (code_flusher_ == NULL) return;
+ code_flusher_->EvictAllCandidates();
delete code_flusher_;
code_flusher_ = NULL;
}
+
+ if (FLAG_trace_code_flushing) {
+ PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
+ }
}
@@ -3609,6 +4191,11 @@ void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
}
+Isolate* MarkCompactCollector::isolate() const {
+ return heap_->isolate();
+}
+
+
void MarkCompactCollector::Initialize() {
MarkCompactMarkingVisitor::Initialize();
IncrementalMarking::Initialize();
@@ -3690,7 +4277,7 @@ void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
if (is_compacting()) {
- Code* host = heap()->isolate()->inner_pointer_to_code_cache()->
+ Code* host = isolate()->inner_pointer_to_code_cache()->
GcSafeFindCodeForInnerPointer(pc);
MarkBit mark_bit = Marking::MarkBitFrom(host);
if (Marking::IsBlack(mark_bit)) {
@@ -3717,7 +4304,8 @@ void SlotsBuffer::UpdateSlots(Heap* heap) {
} else {
++slot_idx;
ASSERT(slot_idx < idx_);
- UpdateSlot(&v,
+ UpdateSlot(heap->isolate(),
+ &v,
DecodeSlotType(slot),
reinterpret_cast<Address>(slots_[slot_idx]));
}
@@ -3739,7 +4327,8 @@ void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
ASSERT(slot_idx < idx_);
Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
if (!IsOnInvalidatedCodeObject(pc)) {
- UpdateSlot(&v,
+ UpdateSlot(heap->isolate(),
+ &v,
DecodeSlotType(slot),
reinterpret_cast<Address>(slots_[slot_idx]));
}
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 7c648000d..aea5e1cf6 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -53,59 +53,59 @@ class Marking {
: heap_(heap) {
}
- static inline MarkBit MarkBitFrom(Address addr);
+ INLINE(static MarkBit MarkBitFrom(Address addr));
- static inline MarkBit MarkBitFrom(HeapObject* obj) {
+ INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
return MarkBitFrom(reinterpret_cast<Address>(obj));
}
// Impossible markbits: 01
static const char* kImpossibleBitPattern;
- static inline bool IsImpossible(MarkBit mark_bit) {
+ INLINE(static bool IsImpossible(MarkBit mark_bit)) {
return !mark_bit.Get() && mark_bit.Next().Get();
}
// Black markbits: 10 - this is required by the sweeper.
static const char* kBlackBitPattern;
- static inline bool IsBlack(MarkBit mark_bit) {
+ INLINE(static bool IsBlack(MarkBit mark_bit)) {
return mark_bit.Get() && !mark_bit.Next().Get();
}
// White markbits: 00 - this is required by the mark bit clearer.
static const char* kWhiteBitPattern;
- static inline bool IsWhite(MarkBit mark_bit) {
+ INLINE(static bool IsWhite(MarkBit mark_bit)) {
return !mark_bit.Get();
}
// Grey markbits: 11
static const char* kGreyBitPattern;
- static inline bool IsGrey(MarkBit mark_bit) {
+ INLINE(static bool IsGrey(MarkBit mark_bit)) {
return mark_bit.Get() && mark_bit.Next().Get();
}
- static inline void MarkBlack(MarkBit mark_bit) {
+ INLINE(static void MarkBlack(MarkBit mark_bit)) {
mark_bit.Set();
mark_bit.Next().Clear();
}
- static inline void BlackToGrey(MarkBit markbit) {
+ INLINE(static void BlackToGrey(MarkBit markbit)) {
markbit.Next().Set();
}
- static inline void WhiteToGrey(MarkBit markbit) {
+ INLINE(static void WhiteToGrey(MarkBit markbit)) {
markbit.Set();
markbit.Next().Set();
}
- static inline void GreyToBlack(MarkBit markbit) {
+ INLINE(static void GreyToBlack(MarkBit markbit)) {
markbit.Next().Clear();
}
- static inline void BlackToGrey(HeapObject* obj) {
+ INLINE(static void BlackToGrey(HeapObject* obj)) {
BlackToGrey(MarkBitFrom(obj));
}
- static inline void AnyToGrey(MarkBit markbit) {
+ INLINE(static void AnyToGrey(MarkBit markbit)) {
markbit.Set();
markbit.Next().Set();
}
@@ -194,7 +194,7 @@ class MarkingDeque {
// Push the (marked) object on the marking stack if there is room,
// otherwise mark the object as overflowed and wait for a rescan of the
// heap.
- inline void PushBlack(HeapObject* object) {
+ INLINE(void PushBlack(HeapObject* object)) {
ASSERT(object->IsHeapObject());
if (IsFull()) {
Marking::BlackToGrey(object);
@@ -206,7 +206,7 @@ class MarkingDeque {
}
}
- inline void PushGrey(HeapObject* object) {
+ INLINE(void PushGrey(HeapObject* object)) {
ASSERT(object->IsHeapObject());
if (IsFull()) {
SetOverflowed();
@@ -216,7 +216,7 @@ class MarkingDeque {
}
}
- inline HeapObject* Pop() {
+ INLINE(HeapObject* Pop()) {
ASSERT(!IsEmpty());
top_ = ((top_ - 1) & mask_);
HeapObject* object = array_[top_];
@@ -224,7 +224,7 @@ class MarkingDeque {
return object;
}
- inline void UnshiftGrey(HeapObject* object) {
+ INLINE(void UnshiftGrey(HeapObject* object)) {
ASSERT(object->IsHeapObject());
if (IsFull()) {
SetOverflowed();
@@ -366,10 +366,10 @@ class SlotsBuffer {
return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
}
- static bool AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address,
- ObjectSlot slot,
- AdditionMode mode) {
+ INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
+ SlotsBuffer** buffer_address,
+ ObjectSlot slot,
+ AdditionMode mode)) {
SlotsBuffer* buffer = *buffer_address;
if (buffer == NULL || buffer->IsFull()) {
if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
@@ -406,9 +406,10 @@ class SlotsBuffer {
// CodeFlusher collects candidates for code flushing during marking and
// processes those candidates after marking has completed in order to
// reset those functions referencing code objects that would otherwise
-// be unreachable. Code objects can be referenced in two ways:
+// be unreachable. Code objects can be referenced in three ways:
// - SharedFunctionInfo references unoptimized code.
// - JSFunction references either unoptimized or optimized code.
+// - OptimizedCodeMap references optimized code.
// We are not allowed to flush unoptimized code for functions that got
// optimized or inlined into optimized code, because we might bailout
// into the unoptimized code again during deoptimization.
@@ -417,28 +418,61 @@ class CodeFlusher {
explicit CodeFlusher(Isolate* isolate)
: isolate_(isolate),
jsfunction_candidates_head_(NULL),
- shared_function_info_candidates_head_(NULL) {}
+ shared_function_info_candidates_head_(NULL),
+ optimized_code_map_holder_head_(NULL) {}
void AddCandidate(SharedFunctionInfo* shared_info) {
- SetNextCandidate(shared_info, shared_function_info_candidates_head_);
- shared_function_info_candidates_head_ = shared_info;
+ if (GetNextCandidate(shared_info) == NULL) {
+ SetNextCandidate(shared_info, shared_function_info_candidates_head_);
+ shared_function_info_candidates_head_ = shared_info;
+ }
}
void AddCandidate(JSFunction* function) {
ASSERT(function->code() == function->shared()->code());
- ASSERT(function->next_function_link()->IsUndefined());
- SetNextCandidate(function, jsfunction_candidates_head_);
- jsfunction_candidates_head_ = function;
+ if (GetNextCandidate(function)->IsUndefined()) {
+ SetNextCandidate(function, jsfunction_candidates_head_);
+ jsfunction_candidates_head_ = function;
+ }
}
+ void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
+ if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
+ SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
+ optimized_code_map_holder_head_ = code_map_holder;
+ }
+ }
+
+ void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
+ void EvictCandidate(SharedFunctionInfo* shared_info);
+ void EvictCandidate(JSFunction* function);
+
void ProcessCandidates() {
+ ProcessOptimizedCodeMaps();
ProcessSharedFunctionInfoCandidates();
ProcessJSFunctionCandidates();
}
+ void EvictAllCandidates() {
+ EvictOptimizedCodeMaps();
+ EvictJSFunctionCandidates();
+ EvictSharedFunctionInfoCandidates();
+ }
+
+ void IteratePointersToFromSpace(ObjectVisitor* v);
+
private:
+ void ProcessOptimizedCodeMaps();
void ProcessJSFunctionCandidates();
void ProcessSharedFunctionInfoCandidates();
+ void EvictOptimizedCodeMaps();
+ void EvictJSFunctionCandidates();
+ void EvictSharedFunctionInfoCandidates();
+
+ static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
+ return reinterpret_cast<JSFunction**>(
+ HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
+ }
static JSFunction* GetNextCandidate(JSFunction* candidate) {
Object* next_candidate = candidate->next_function_link();
@@ -469,9 +503,27 @@ class CodeFlusher {
candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
}
+ static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+ Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
+ return reinterpret_cast<SharedFunctionInfo*>(next_map);
+ }
+
+ static void SetNextCodeMap(SharedFunctionInfo* holder,
+ SharedFunctionInfo* next_holder) {
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+ code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
+ }
+
+ static void ClearNextCodeMap(SharedFunctionInfo* holder) {
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+ code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
+ }
+
Isolate* isolate_;
JSFunction* jsfunction_candidates_head_;
SharedFunctionInfo* shared_function_info_candidates_head_;
+ SharedFunctionInfo* optimized_code_map_holder_head_;
DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
};
@@ -519,6 +571,8 @@ class MarkCompactCollector {
static void Initialize();
+ void TearDown();
+
void CollectEvacuationCandidates(PagedSpace* space);
void AddEvacuationCandidate(Page* p);
@@ -560,6 +614,7 @@ class MarkCompactCollector {
static inline bool IsMarked(Object* obj);
inline Heap* heap() const { return heap_; }
+ inline Isolate* isolate() const;
CodeFlusher* code_flusher() { return code_flusher_; }
inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
@@ -568,18 +623,30 @@ class MarkCompactCollector {
enum SweeperType {
CONSERVATIVE,
LAZY_CONSERVATIVE,
+ PARALLEL_CONSERVATIVE,
+ CONCURRENT_CONSERVATIVE,
PRECISE
};
+ enum SweepingParallelism {
+ SWEEP_SEQUENTIALLY,
+ SWEEP_IN_PARALLEL
+ };
+
#ifdef VERIFY_HEAP
void VerifyMarkbitsAreClean();
static void VerifyMarkbitsAreClean(PagedSpace* space);
static void VerifyMarkbitsAreClean(NewSpace* space);
+ void VerifyWeakEmbeddedObjectsInOptimizedCode();
+ void VerifyOmittedMapChecks();
#endif
// Sweep a single page from the given space conservatively.
// Return a number of reclaimed bytes.
- static intptr_t SweepConservatively(PagedSpace* space, Page* p);
+ template<SweepingParallelism type>
+ static intptr_t SweepConservatively(PagedSpace* space,
+ FreeList* free_list,
+ Page* p);
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
return Page::FromAddress(reinterpret_cast<Address>(anchor))->
@@ -596,7 +663,7 @@ class MarkCompactCollector {
IsEvacuationCandidate();
}
- void EvictEvacuationCandidate(Page* page) {
+ INLINE(void EvictEvacuationCandidate(Page* page)) {
if (FLAG_trace_fragmentation) {
PrintF("Page %p is too popular. Disabling evacuation.\n",
reinterpret_cast<void*>(page));
@@ -630,25 +697,61 @@ class MarkCompactCollector {
bool TryPromoteObject(HeapObject* object, int object_size);
- inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
- inline void set_encountered_weak_maps(Object* weak_map) {
- encountered_weak_maps_ = weak_map;
+ inline Object* encountered_weak_collections() {
+ return encountered_weak_collections_;
+ }
+ inline void set_encountered_weak_collections(Object* weak_collection) {
+ encountered_weak_collections_ = weak_collection;
}
void InvalidateCode(Code* code);
void ClearMarkbits();
+ bool abort_incremental_marking() const { return abort_incremental_marking_; }
+
bool is_compacting() const { return compacting_; }
+ MarkingParity marking_parity() { return marking_parity_; }
+
+ // Concurrent and parallel sweeping support.
+ void SweepInParallel(PagedSpace* space,
+ FreeList* private_free_list,
+ FreeList* free_list);
+
+ void WaitUntilSweepingCompleted();
+
+ intptr_t StealMemoryFromSweeperThreads(PagedSpace* space);
+
+ bool AreSweeperThreadsActivated();
+
+ bool IsConcurrentSweepingInProgress();
+
+ void set_sequential_sweeping(bool sequential_sweeping) {
+ sequential_sweeping_ = sequential_sweeping;
+ }
+
+ bool sequential_sweeping() const {
+ return sequential_sweeping_;
+ }
+
+ // Mark the global table which maps weak objects to dependent code without
+ // marking its contents.
+ void MarkWeakObjectToCodeTable();
+
private:
MarkCompactCollector();
~MarkCompactCollector();
bool MarkInvalidatedCode();
+ bool WillBeDeoptimized(Code* code);
void RemoveDeadInvalidatedCode();
void ProcessInvalidatedCode(ObjectVisitor* visitor);
+ void UnlinkEvacuationCandidates();
+ void ReleaseEvacuationCandidates();
+
+ void StartSweeperThreads();
#ifdef DEBUG
enum CollectorState {
@@ -673,12 +776,19 @@ class MarkCompactCollector {
bool abort_incremental_marking_;
+ MarkingParity marking_parity_;
+
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
bool compacting_;
bool was_marked_incrementally_;
+ // True if concurrent or parallel sweeping is currently in progress.
+ bool sweeping_pending_;
+
+ bool sequential_sweeping_;
+
// A pointer to the current stack-allocated GC tracer object during a full
// collection (NULL before and after).
GCTracer* tracer_;
@@ -727,26 +837,31 @@ class MarkCompactCollector {
// Mark the heap roots and all objects reachable from them.
void MarkRoots(RootMarkingVisitor* visitor);
- // Mark the symbol table specially. References to symbols from the
- // symbol table are weak.
- void MarkSymbolTable();
-
- // Mark objects in object groups that have at least one object in the
- // group marked.
- void MarkObjectGroups();
+ // Mark the string table specially. References to internalized strings from
+ // the string table are weak.
+ void MarkStringTable(RootMarkingVisitor* visitor);
// Mark objects in implicit references groups if their parent object
// is marked.
void MarkImplicitRefGroups();
- // Mark all objects which are reachable due to host application
- // logic like object groups or implicit references' groups.
- void ProcessExternalMarking();
-
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
void ProcessMarkingDeque();
+ // Mark objects reachable (transitively) from objects in the marking stack
+ // or overflowed in the heap. This respects references only considered in
+ // the final atomic marking pause including the following:
+ // - Processing of objects reachable through Harmony WeakMaps.
+ // - Objects reachable due to host application logic like object groups
+ // or implicit references' groups.
+ void ProcessEphemeralMarking(ObjectVisitor* visitor);
+
+ // If the call-site of the top optimized code was not prepared for
+ // deoptimization, then treat the maps in the code as strong pointers,
+ // otherwise a map can die and deoptimize the code.
+ void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
+
// Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's
@@ -765,28 +880,32 @@ class MarkCompactCollector {
// Callback function for telling whether the object *p is an unmarked
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
+ static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
- void ClearNonLiveTransitions();
+ void ClearNonLiveReferences();
void ClearNonLivePrototypeTransitions(Map* map);
void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
+ void ClearAndDeoptimizeDependentCode(DependentCode* dependent_code);
+ void ClearNonLiveDependentCode(DependentCode* dependent_code);
+
// Marking detaches initial maps from SharedFunctionInfo objects
// to make this reference weak. We need to reattach initial maps
// back after collection. This is either done during
// ClearNonLiveTransitions pass or by calling this function.
void ReattachInitialMaps();
- // Mark all values associated with reachable keys in weak maps encountered
- // so far. This might push new object or even new weak maps onto the
- // marking stack.
- void ProcessWeakMaps();
+ // Mark all values associated with reachable keys in weak collections
+ // encountered so far. This might push new object or even new weak maps onto
+ // the marking stack.
+ void ProcessWeakCollections();
// After all reachable objects have been marked those weak map entries
// with an unreachable key are removed from all encountered weak maps.
// The linked list of all encountered weak maps is destroyed.
- void ClearWeakMaps();
+ void ClearWeakCollections();
// -----------------------------------------------------------------------
// Phase 2: Sweeping to clear mark bits and free non-live objects for
@@ -804,6 +923,9 @@ class MarkCompactCollector {
// regions to each space's free list.
void SweepSpaces();
+ int DiscoverAndPromoteBlackObjectsOnPage(NewSpace* new_space,
+ NewSpacePage* p);
+
void EvacuateNewSpace();
void EvacuateLiveObjectsFromPage(Page* p);
@@ -825,7 +947,8 @@ class MarkCompactCollector {
Heap* heap_;
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
- Object* encountered_weak_maps_;
+ Object* encountered_weak_collections_;
+ bool have_code_to_deoptimize_;
List<Page*> evacuation_candidates_;
List<Code*> invalidated_code_;
@@ -834,6 +957,66 @@ class MarkCompactCollector {
};
+class MarkBitCellIterator BASE_EMBEDDED {
+ public:
+ explicit MarkBitCellIterator(MemoryChunk* chunk)
+ : chunk_(chunk) {
+ last_cell_index_ = Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(chunk_->area_end())));
+ cell_base_ = chunk_->area_start();
+ cell_index_ = Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(cell_base_)));
+ cells_ = chunk_->markbits()->cells();
+ }
+
+ inline bool Done() { return cell_index_ == last_cell_index_; }
+
+ inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
+
+ inline MarkBit::CellType* CurrentCell() {
+ ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(cell_base_))));
+ return &cells_[cell_index_];
+ }
+
+ inline Address CurrentCellBase() {
+ ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(cell_base_))));
+ return cell_base_;
+ }
+
+ inline void Advance() {
+ cell_index_++;
+ cell_base_ += 32 * kPointerSize;
+ }
+
+ private:
+ MemoryChunk* chunk_;
+ MarkBit::CellType* cells_;
+ unsigned int last_cell_index_;
+ unsigned int cell_index_;
+ Address cell_base_;
+};
+
+
+class SequentialSweepingScope BASE_EMBEDDED {
+ public:
+ explicit SequentialSweepingScope(MarkCompactCollector *collector) :
+ collector_(collector) {
+ collector_->set_sequential_sweeping(true);
+ }
+
+ ~SequentialSweepingScope() {
+ collector_->set_sequential_sweeping(false);
+ }
+
+ private:
+ MarkCompactCollector* collector_;
+};
+
+
const char* AllocationSpaceName(AllocationSpace space);
} } // namespace v8::internal
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index aee56af4f..efab63a18 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// This file relies on the fact that the following declarations have been made
+// in runtime.js:
+// var $Object = global.Object;
// Keep reference to original values of some global properties. This
// has the added benefit that the code in this file is isolated from
@@ -35,67 +38,58 @@ var $abs = MathAbs;
// Instance class name can only be set on functions. That is the only
// purpose for MathConstructor.
function MathConstructor() {}
-%FunctionSetInstanceClassName(MathConstructor, 'Math');
var $Math = new MathConstructor();
-$Math.__proto__ = $Object.prototype;
-%SetProperty(global, "Math", $Math, DONT_ENUM);
+
+// -------------------------------------------------------------------
// ECMA 262 - 15.8.2.1
function MathAbs(x) {
if (%_IsSmi(x)) return x >= 0 ? x : -x;
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ x = TO_NUMBER_INLINE(x);
if (x === 0) return 0; // To handle -0.
return x > 0 ? x : -x;
}
// ECMA 262 - 15.8.2.2
function MathAcos(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_acos(x);
+ return %Math_acos(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.3
function MathAsin(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_asin(x);
+ return %Math_asin(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.4
function MathAtan(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_atan(x);
+ return %Math_atan(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.5
// The naming of y and x matches the spec, as does the order in which
// ToNumber (valueOf) is called.
function MathAtan2(y, x) {
- if (!IS_NUMBER(y)) y = NonNumberToNumber(y);
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_atan2(y, x);
+ return %Math_atan2(TO_NUMBER_INLINE(y), TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.6
function MathCeil(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_ceil(x);
+ return %Math_ceil(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.7
function MathCos(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathCos(x);
+ return %_MathCos(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.8
function MathExp(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_exp(x);
+ return %Math_exp(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.9
function MathFloor(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ x = TO_NUMBER_INLINE(x);
// It's more common to call this with a positive number that's out
// of range than negative numbers; check the upper bound first.
if (x < 0x80000000 && x > 0) {
@@ -111,16 +105,15 @@ function MathFloor(x) {
// ECMA 262 - 15.8.2.10
function MathLog(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathLog(x);
+ return %_MathLog(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2
var length = %_ArgumentsLength();
if (length == 2) {
- if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1);
- if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2);
+ arg1 = TO_NUMBER_INLINE(arg1);
+ arg2 = TO_NUMBER_INLINE(arg2);
if (arg2 > arg1) return arg2;
if (arg1 > arg2) return arg1;
if (arg1 == arg2) {
@@ -129,21 +122,18 @@ function MathMax(arg1, arg2) { // length == 2
return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg2 : arg1;
}
// All comparisons failed, one of the arguments must be NaN.
- return 0/0; // Compiler constant-folds this to NaN.
- }
- if (length == 0) {
- return -1/0; // Compiler constant-folds this to -Infinity.
+ return NAN;
}
- var r = arg1;
- if (!IS_NUMBER(r)) r = NonNumberToNumber(r);
- if (NUMBER_IS_NAN(r)) return r;
- for (var i = 1; i < length; i++) {
+ var r = -INFINITY;
+ for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
- if (NUMBER_IS_NAN(n)) return n;
// Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be
// a Smi or heap number.
- if (n > r || (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
+ if (NUMBER_IS_NAN(n) || n > r ||
+ (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) {
+ r = n;
+ }
}
return r;
}
@@ -152,8 +142,8 @@ function MathMax(arg1, arg2) { // length == 2
function MathMin(arg1, arg2) { // length == 2
var length = %_ArgumentsLength();
if (length == 2) {
- if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1);
- if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2);
+ arg1 = TO_NUMBER_INLINE(arg1);
+ arg2 = TO_NUMBER_INLINE(arg2);
if (arg2 > arg1) return arg1;
if (arg1 > arg2) return arg2;
if (arg1 == arg2) {
@@ -162,30 +152,25 @@ function MathMin(arg1, arg2) { // length == 2
return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg1 : arg2;
}
// All comparisons failed, one of the arguments must be NaN.
- return 0/0; // Compiler constant-folds this to NaN.
- }
- if (length == 0) {
- return 1/0; // Compiler constant-folds this to Infinity.
+ return NAN;
}
- var r = arg1;
- if (!IS_NUMBER(r)) r = NonNumberToNumber(r);
- if (NUMBER_IS_NAN(r)) return r;
- for (var i = 1; i < length; i++) {
+ var r = INFINITY;
+ for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
- if (NUMBER_IS_NAN(n)) return n;
// Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be a
// Smi or a heap number.
- if (n < r || (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
+ if (NUMBER_IS_NAN(n) || n < r ||
+ (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) {
+ r = n;
+ }
}
return r;
}
// ECMA 262 - 15.8.2.13
function MathPow(x, y) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- if (!IS_NUMBER(y)) y = NonNumberToNumber(y);
- return %_MathPow(x, y);
+ return %_MathPow(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y));
}
// ECMA 262 - 15.8.2.14
@@ -195,26 +180,27 @@ function MathRandom() {
// ECMA 262 - 15.8.2.15
function MathRound(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %RoundNumber(x);
+ return %RoundNumber(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.16
function MathSin(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathSin(x);
+ return %_MathSin(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.17
function MathSqrt(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathSqrt(x);
+ return %_MathSqrt(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.18
function MathTan(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathTan(x);
+ return %_MathTan(TO_NUMBER_INLINE(x));
+}
+
+// Non-standard extension.
+function MathImul(x, y) {
+ return %NumberImul(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y));
}
@@ -222,6 +208,11 @@ function MathTan(x) {
function SetUpMath() {
%CheckIsBootstrapping();
+
+ %SetPrototype($Math, $Object.prototype);
+ %SetProperty(global, "Math", $Math, DONT_ENUM);
+ %FunctionSetInstanceClassName(MathConstructor, 'Math');
+
// Set up math constants.
// ECMA-262, section 15.8.1.1.
%OptimizeObjectForAddingMultipleProperties($Math, 8);
@@ -282,7 +273,8 @@ function SetUpMath() {
"atan2", MathAtan2,
"pow", MathPow,
"max", MathMax,
- "min", MathMin
+ "min", MathMin,
+ "imul", MathImul
));
}
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index a041770d1..9eae67a72 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -38,14 +38,15 @@ namespace internal {
// If no message listeners have been registered this one is called
// by default.
-void MessageHandler::DefaultMessageReport(const MessageLocation* loc,
+void MessageHandler::DefaultMessageReport(Isolate* isolate,
+ const MessageLocation* loc,
Handle<Object> message_obj) {
- SmartArrayPointer<char> str = GetLocalizedMessage(message_obj);
+ SmartArrayPointer<char> str = GetLocalizedMessage(isolate, message_obj);
if (loc == NULL) {
PrintF("%s\n", *str);
} else {
- HandleScope scope;
- Handle<Object> data(loc->script()->name());
+ HandleScope scope(isolate);
+ Handle<Object> data(loc->script()->name(), isolate);
SmartArrayPointer<char> data_str;
if (data->IsString())
data_str = Handle<String>::cast(data)->ToCString(DISALLOW_NULLS);
@@ -56,23 +57,25 @@ void MessageHandler::DefaultMessageReport(const MessageLocation* loc,
Handle<JSMessageObject> MessageHandler::MakeMessageObject(
+ Isolate* isolate,
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
Handle<String> stack_trace,
Handle<JSArray> stack_frames) {
- Handle<String> type_handle = FACTORY->LookupAsciiSymbol(type);
+ Factory* factory = isolate->factory();
+ Handle<String> type_handle = factory->InternalizeUtf8String(type);
Handle<FixedArray> arguments_elements =
- FACTORY->NewFixedArray(args.length());
+ factory->NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
arguments_elements->set(i, *args[i]);
}
Handle<JSArray> arguments_handle =
- FACTORY->NewJSArrayWithElements(arguments_elements);
+ factory->NewJSArrayWithElements(arguments_elements);
int start = 0;
int end = 0;
- Handle<Object> script_handle = FACTORY->undefined_value();
+ Handle<Object> script_handle = factory->undefined_value();
if (loc) {
start = loc->start_pos();
end = loc->end_pos();
@@ -80,15 +83,15 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
}
Handle<Object> stack_trace_handle = stack_trace.is_null()
- ? Handle<Object>::cast(FACTORY->undefined_value())
+ ? Handle<Object>::cast(factory->undefined_value())
: Handle<Object>::cast(stack_trace);
Handle<Object> stack_frames_handle = stack_frames.is_null()
- ? Handle<Object>::cast(FACTORY->undefined_value())
+ ? Handle<Object>::cast(factory->undefined_value())
: Handle<Object>::cast(stack_frames);
Handle<JSMessageObject> message =
- FACTORY->NewJSMessageObject(type_handle,
+ factory->NewJSMessageObject(type_handle,
arguments_handle,
start,
end,
@@ -112,7 +115,7 @@ void MessageHandler::ReportMessage(Isolate* isolate,
if (isolate->has_pending_exception()) {
isolate->pending_exception()->ToObject(&exception_object);
}
- Handle<Object> exception_handle(exception_object);
+ Handle<Object> exception_handle(exception_object, isolate);
Isolate::ExceptionScope exception_scope(isolate);
isolate->clear_pending_exception();
@@ -121,10 +124,10 @@ void MessageHandler::ReportMessage(Isolate* isolate,
v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception_handle);
- v8::NeanderArray global_listeners(FACTORY->message_listeners());
+ v8::NeanderArray global_listeners(isolate->factory()->message_listeners());
int global_length = global_listeners.length();
if (global_length == 0) {
- DefaultMessageReport(loc, message);
+ DefaultMessageReport(isolate, loc, message);
if (isolate->has_scheduled_exception()) {
isolate->clear_scheduled_exception();
}
@@ -152,25 +155,30 @@ void MessageHandler::ReportMessage(Isolate* isolate,
}
-Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
- Handle<String> fmt_str = FACTORY->LookupAsciiSymbol("FormatMessage");
+Handle<String> MessageHandler::GetMessage(Isolate* isolate,
+ Handle<Object> data) {
+ Factory* factory = isolate->factory();
+ Handle<String> fmt_str =
+ factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("FormatMessage"));
Handle<JSFunction> fun =
Handle<JSFunction>(
JSFunction::cast(
- Isolate::Current()->js_builtins_object()->
+ isolate->js_builtins_object()->
GetPropertyNoExceptionThrown(*fmt_str)));
- Handle<Object> argv[] = { data };
+ Handle<JSMessageObject> message = Handle<JSMessageObject>::cast(data);
+ Handle<Object> argv[] = { Handle<Object>(message->type(), isolate),
+ Handle<Object>(message->arguments(), isolate) };
bool caught_exception;
Handle<Object> result =
Execution::TryCall(fun,
- Isolate::Current()->js_builtins_object(),
+ isolate->js_builtins_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
if (caught_exception || !result->IsString()) {
- return FACTORY->LookupAsciiSymbol("<error>");
+ return factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("<error>"));
}
Handle<String> result_string = Handle<String>::cast(result);
// A string that has been obtained from JS code in this way is
@@ -184,9 +192,10 @@ Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
SmartArrayPointer<char> MessageHandler::GetLocalizedMessage(
+ Isolate* isolate,
Handle<Object> data) {
- HandleScope scope;
- return GetMessage(data)->ToCString(DISALLOW_NULLS);
+ HandleScope scope(isolate);
+ return GetMessage(isolate, data)->ToCString(DISALLOW_NULLS);
}
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 358509ec3..5d84e46ca 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -91,6 +91,7 @@ class MessageHandler {
public:
// Returns a message object for the API to use.
static Handle<JSMessageObject> MakeMessageObject(
+ Isolate* isolate,
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
@@ -102,10 +103,12 @@ class MessageHandler {
MessageLocation* loc,
Handle<Object> message);
- static void DefaultMessageReport(const MessageLocation* loc,
+ static void DefaultMessageReport(Isolate* isolate,
+ const MessageLocation* loc,
Handle<Object> message_obj);
- static Handle<String> GetMessage(Handle<Object> data);
- static SmartArrayPointer<char> GetLocalizedMessage(Handle<Object> data);
+ static Handle<String> GetMessage(Isolate* isolate, Handle<Object> data);
+ static SmartArrayPointer<char> GetLocalizedMessage(Isolate* isolate,
+ Handle<Object> data);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index fe894b578..0a301228d 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -26,18 +26,165 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// -------------------------------------------------------------------
-//
-// If this object gets passed to an error constructor the error will
-// get an accessor for .message that constructs a descriptive error
-// message on access.
-var kAddMessageAccessorsMarker = { };
-
-// This will be lazily initialized when first needed (and forcibly
-// overwritten even though it's const).
-var kMessages = 0;
-function FormatString(format, message) {
- var args = %MessageGetArguments(message);
+var kMessages = {
+ // Error
+ cyclic_proto: ["Cyclic __proto__ value"],
+ code_gen_from_strings: ["%0"],
+ generator_running: ["Generator is already running"],
+ generator_finished: ["Generator has already finished"],
+ // TypeError
+ unexpected_token: ["Unexpected token ", "%0"],
+ unexpected_token_number: ["Unexpected number"],
+ unexpected_token_string: ["Unexpected string"],
+ unexpected_token_identifier: ["Unexpected identifier"],
+ unexpected_reserved: ["Unexpected reserved word"],
+ unexpected_strict_reserved: ["Unexpected strict mode reserved word"],
+ unexpected_eos: ["Unexpected end of input"],
+ malformed_regexp: ["Invalid regular expression: /", "%0", "/: ", "%1"],
+ unterminated_regexp: ["Invalid regular expression: missing /"],
+ regexp_flags: ["Cannot supply flags when constructing one RegExp from another"],
+ incompatible_method_receiver: ["Method ", "%0", " called on incompatible receiver ", "%1"],
+ invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"],
+ invalid_lhs_in_for_in: ["Invalid left-hand side in for-in"],
+ invalid_lhs_in_postfix_op: ["Invalid left-hand side expression in postfix operation"],
+ invalid_lhs_in_prefix_op: ["Invalid left-hand side expression in prefix operation"],
+ multiple_defaults_in_switch: ["More than one default clause in switch statement"],
+ newline_after_throw: ["Illegal newline after throw"],
+ redeclaration: ["%0", " '", "%1", "' has already been declared"],
+ no_catch_or_finally: ["Missing catch or finally after try"],
+ unknown_label: ["Undefined label '", "%0", "'"],
+ uncaught_exception: ["Uncaught ", "%0"],
+ stack_trace: ["Stack Trace:\n", "%0"],
+ called_non_callable: ["%0", " is not a function"],
+ undefined_method: ["Object ", "%1", " has no method '", "%0", "'"],
+ property_not_function: ["Property '", "%0", "' of object ", "%1", " is not a function"],
+ cannot_convert_to_primitive: ["Cannot convert object to primitive value"],
+ not_constructor: ["%0", " is not a constructor"],
+ not_defined: ["%0", " is not defined"],
+ non_object_property_load: ["Cannot read property '", "%0", "' of ", "%1"],
+ non_object_property_store: ["Cannot set property '", "%0", "' of ", "%1"],
+ non_object_property_call: ["Cannot call method '", "%0", "' of ", "%1"],
+ with_expression: ["%0", " has no properties"],
+ illegal_invocation: ["Illegal invocation"],
+ no_setter_in_callback: ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
+ apply_non_function: ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
+ apply_wrong_args: ["Function.prototype.apply: Arguments list has wrong type"],
+ invalid_in_operator_use: ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
+ instanceof_function_expected: ["Expecting a function in instanceof check, but got ", "%0"],
+ instanceof_nonobject_proto: ["Function has non-object prototype '", "%0", "' in instanceof check"],
+ undefined_or_null_to_object: ["Cannot convert undefined or null to object"],
+ reduce_no_initial: ["Reduce of empty array with no initial value"],
+ getter_must_be_callable: ["Getter must be a function: ", "%0"],
+ setter_must_be_callable: ["Setter must be a function: ", "%0"],
+ value_and_accessor: ["Invalid property. A property cannot both have accessors and be writable or have a value, ", "%0"],
+ proto_object_or_null: ["Object prototype may only be an Object or null"],
+ property_desc_object: ["Property description must be an object: ", "%0"],
+ redefine_disallowed: ["Cannot redefine property: ", "%0"],
+ define_disallowed: ["Cannot define property:", "%0", ", object is not extensible."],
+ non_extensible_proto: ["%0", " is not extensible"],
+ handler_non_object: ["Proxy.", "%0", " called with non-object as handler"],
+ proto_non_object: ["Proxy.", "%0", " called with non-object as prototype"],
+ trap_function_expected: ["Proxy.", "%0", " called with non-function for '", "%1", "' trap"],
+ handler_trap_missing: ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
+ handler_trap_must_be_callable: ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"],
+ handler_returned_false: ["Proxy handler ", "%0", " returned false from '", "%1", "' trap"],
+ handler_returned_undefined: ["Proxy handler ", "%0", " returned undefined from '", "%1", "' trap"],
+ proxy_prop_not_configurable: ["Proxy handler ", "%0", " returned non-configurable descriptor for property '", "%2", "' from '", "%1", "' trap"],
+ proxy_non_object_prop_names: ["Trap '", "%1", "' returned non-object ", "%0"],
+ proxy_repeated_prop_name: ["Trap '", "%1", "' returned repeated property name '", "%2", "'"],
+ invalid_weakmap_key: ["Invalid value used as weak map key"],
+ invalid_weakset_value: ["Invalid value used in weak set"],
+ not_date_object: ["this is not a Date object."],
+ observe_non_object: ["Object.", "%0", " cannot ", "%0", " non-object"],
+ observe_non_function: ["Object.", "%0", " cannot deliver to non-function"],
+ observe_callback_frozen: ["Object.observe cannot deliver to a frozen function object"],
+ observe_invalid_accept: ["Object.observe accept must be an array of strings."],
+ observe_type_non_string: ["Invalid changeRecord with non-string 'type' property"],
+ observe_perform_non_string: ["Invalid non-string changeType"],
+ observe_perform_non_function: ["Cannot perform non-function"],
+ observe_notify_non_notifier: ["notify called on non-notifier object"],
+ proto_poison_pill: ["Generic use of __proto__ accessor not allowed"],
+ not_typed_array: ["this is not a typed array."],
+ invalid_argument: ["invalid_argument"],
+ data_view_not_array_buffer: ["First argument to DataView constructor must be an ArrayBuffer"],
+ constructor_not_function: ["Constructor ", "%0", " requires 'new'"],
+ // RangeError
+ invalid_array_length: ["Invalid array length"],
+ invalid_array_buffer_length: ["Invalid array buffer length"],
+ invalid_typed_array_offset: ["Start offset is too large:"],
+ invalid_typed_array_length: ["Invalid typed array length"],
+ invalid_typed_array_alignment: ["%0", "of", "%1", "should be a multiple of", "%3"],
+ typed_array_set_source_too_large:
+ ["Source is too large"],
+ typed_array_set_negative_offset:
+ ["Start offset is negative"],
+ invalid_data_view_offset: ["Start offset is outside the bounds of the buffer"],
+ invalid_data_view_length: ["Invalid data view length"],
+ invalid_data_view_accessor_offset:
+ ["Offset is outside the bounds of the DataView"],
+
+ stack_overflow: ["Maximum call stack size exceeded"],
+ invalid_time_value: ["Invalid time value"],
+ invalid_count_value: ["Invalid count value"],
+ // SyntaxError
+ paren_in_arg_string: ["Function arg string contains parenthesis"],
+ not_isvar: ["builtin %IS_VAR: not a variable"],
+ single_function_literal: ["Single function literal required"],
+ invalid_regexp_flags: ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
+ invalid_regexp: ["Invalid RegExp pattern /", "%0", "/"],
+ illegal_break: ["Illegal break statement"],
+ illegal_continue: ["Illegal continue statement"],
+ illegal_return: ["Illegal return statement"],
+ illegal_let: ["Illegal let declaration outside extended mode"],
+ error_loading_debugger: ["Error loading debugger"],
+ no_input_to_regexp: ["No input to ", "%0"],
+ invalid_json: ["String '", "%0", "' is not valid JSON"],
+ circular_structure: ["Converting circular structure to JSON"],
+ called_on_non_object: ["%0", " called on non-object"],
+ called_on_null_or_undefined: ["%0", " called on null or undefined"],
+ array_indexof_not_defined: ["Array.getIndexOf: Argument undefined"],
+ object_not_extensible: ["Can't add property ", "%0", ", object is not extensible"],
+ illegal_access: ["Illegal access"],
+ invalid_preparser_data: ["Invalid preparser data for function ", "%0"],
+ strict_mode_with: ["Strict mode code may not include a with statement"],
+ strict_catch_variable: ["Catch variable may not be eval or arguments in strict mode"],
+ too_many_arguments: ["Too many arguments in function call (only 32766 allowed)"],
+ too_many_parameters: ["Too many parameters in function definition (only 32766 allowed)"],
+ too_many_variables: ["Too many variables declared (only 131071 allowed)"],
+ strict_param_name: ["Parameter name eval or arguments is not allowed in strict mode"],
+ strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
+ strict_var_name: ["Variable name may not be eval or arguments in strict mode"],
+ strict_function_name: ["Function name may not be eval or arguments in strict mode"],
+ strict_octal_literal: ["Octal literals are not allowed in strict mode."],
+ strict_duplicate_property: ["Duplicate data property in object literal not allowed in strict mode"],
+ accessor_data_property: ["Object literal may not have data and accessor property with the same name"],
+ accessor_get_set: ["Object literal may not have multiple get/set accessors with the same name"],
+ strict_lhs_assignment: ["Assignment to eval or arguments is not allowed in strict mode"],
+ strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
+ strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
+ strict_reserved_word: ["Use of future reserved word in strict mode"],
+ strict_delete: ["Delete of an unqualified identifier in strict mode."],
+ strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
+ strict_const: ["Use of const in strict mode."],
+ strict_function: ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
+ strict_read_only_property: ["Cannot assign to read only property '", "%0", "' of ", "%1"],
+ strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"],
+ strict_poison_pill: ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
+ strict_caller: ["Illegal access to a strict mode caller function."],
+ unprotected_let: ["Illegal let declaration in unprotected statement context."],
+ unprotected_const: ["Illegal const declaration in unprotected statement context."],
+ cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"],
+ redef_external_array_element: ["Cannot redefine a property of an object with external array elements"],
+ harmony_const_assign: ["Assignment to constant variable."],
+ symbol_to_string: ["Conversion from symbol to string"],
+ invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"],
+ module_type_error: ["Module '", "%0", "' used improperly"],
+ module_export_undefined: ["Export '", "%0", "' is not defined in module"]
+};
+
+
+function FormatString(format, args) {
var result = "";
var arg_num = 0;
for (var i = 0; i < format.length; i++) {
@@ -48,7 +195,7 @@ function FormatString(format, message) {
if (arg_num < 4) {
// str is one of %0, %1, %2 or %3.
try {
- str = ToDetailString(args[arg_num]);
+ str = NoSideEffectToString(args[arg_num]);
} catch (e) {
if (%IsJSModule(args[arg_num]))
str = "module";
@@ -65,11 +212,34 @@ function FormatString(format, message) {
}
-// To check if something is a native error we need to check the
-// concrete native error types. It is not sufficient to use instanceof
-// since it possible to create an object that has Error.prototype on
-// its prototype chain. This is the case for DOMException for example.
-function IsNativeErrorObject(obj) {
+function NoSideEffectToString(obj) {
+ if (IS_STRING(obj)) return obj;
+ if (IS_NUMBER(obj)) return %_NumberToString(obj);
+ if (IS_BOOLEAN(obj)) return x ? 'true' : 'false';
+ if (IS_UNDEFINED(obj)) return 'undefined';
+ if (IS_NULL(obj)) return 'null';
+ if (IS_FUNCTION(obj)) return %_CallFunction(obj, FunctionToString);
+ if (IS_OBJECT(obj) && %GetDataProperty(obj, "toString") === ObjectToString) {
+ var constructor = %GetDataProperty(obj, "constructor");
+ if (typeof constructor == "function") {
+ var constructorName = constructor.name;
+ if (IS_STRING(constructorName) && constructorName !== "") {
+ return "#<" + constructorName + ">";
+ }
+ }
+ }
+ if (CanBeSafelyTreatedAsAnErrorObject(obj)) {
+ return %_CallFunction(obj, ErrorToString);
+ }
+ return %_CallFunction(obj, ObjectToString);
+}
+
+// To determine whether we can safely stringify an object using ErrorToString
+// without the risk of side-effects, we need to check whether the object is
+// either an instance of a native error type (via '%_ClassOf'), or has $Error
+// in its prototype chain and hasn't overwritten 'toString' with something
+// strange and unusual.
+function CanBeSafelyTreatedAsAnErrorObject(obj) {
switch (%_ClassOf(obj)) {
case 'Error':
case 'EvalError':
@@ -80,7 +250,9 @@ function IsNativeErrorObject(obj) {
case 'URIError':
return true;
}
- return false;
+
+ var objToString = %GetDataProperty(obj, "toString");
+ return obj instanceof $Error && objToString === ErrorToString;
}
@@ -89,7 +261,7 @@ function IsNativeErrorObject(obj) {
// the error to string method. This is to avoid leaking error
// objects between script tags in a browser setting.
function ToStringCheckErrorObject(obj) {
- if (IsNativeErrorObject(obj)) {
+ if (CanBeSafelyTreatedAsAnErrorObject(obj)) {
return %_CallFunction(obj, ErrorToString);
} else {
return ToString(obj);
@@ -112,13 +284,8 @@ function ToDetailString(obj) {
function MakeGenericError(constructor, type, args) {
- if (IS_UNDEFINED(args)) {
- args = [];
- }
- var e = new constructor(kAddMessageAccessorsMarker);
- e.type = type;
- e.arguments = args;
- return e;
+ if (IS_UNDEFINED(args)) args = [];
+ return new constructor(FormatMessage(type, args));
}
@@ -135,156 +302,10 @@ function MakeGenericError(constructor, type, args) {
// Helper functions; called from the runtime system.
-function FormatMessage(message) {
- if (kMessages === 0) {
- var messagesDictionary = [
- // Error
- "cyclic_proto", ["Cyclic __proto__ value"],
- "code_gen_from_strings", ["%0"],
- // TypeError
- "unexpected_token", ["Unexpected token ", "%0"],
- "unexpected_token_number", ["Unexpected number"],
- "unexpected_token_string", ["Unexpected string"],
- "unexpected_token_identifier", ["Unexpected identifier"],
- "unexpected_reserved", ["Unexpected reserved word"],
- "unexpected_strict_reserved", ["Unexpected strict mode reserved word"],
- "unexpected_eos", ["Unexpected end of input"],
- "malformed_regexp", ["Invalid regular expression: /", "%0", "/: ", "%1"],
- "unterminated_regexp", ["Invalid regular expression: missing /"],
- "regexp_flags", ["Cannot supply flags when constructing one RegExp from another"],
- "incompatible_method_receiver", ["Method ", "%0", " called on incompatible receiver ", "%1"],
- "invalid_lhs_in_assignment", ["Invalid left-hand side in assignment"],
- "invalid_lhs_in_for_in", ["Invalid left-hand side in for-in"],
- "invalid_lhs_in_postfix_op", ["Invalid left-hand side expression in postfix operation"],
- "invalid_lhs_in_prefix_op", ["Invalid left-hand side expression in prefix operation"],
- "multiple_defaults_in_switch", ["More than one default clause in switch statement"],
- "newline_after_throw", ["Illegal newline after throw"],
- "redeclaration", ["%0", " '", "%1", "' has already been declared"],
- "no_catch_or_finally", ["Missing catch or finally after try"],
- "unknown_label", ["Undefined label '", "%0", "'"],
- "uncaught_exception", ["Uncaught ", "%0"],
- "stack_trace", ["Stack Trace:\n", "%0"],
- "called_non_callable", ["%0", " is not a function"],
- "undefined_method", ["Object ", "%1", " has no method '", "%0", "'"],
- "property_not_function", ["Property '", "%0", "' of object ", "%1", " is not a function"],
- "cannot_convert_to_primitive", ["Cannot convert object to primitive value"],
- "not_constructor", ["%0", " is not a constructor"],
- "not_defined", ["%0", " is not defined"],
- "non_object_property_load", ["Cannot read property '", "%0", "' of ", "%1"],
- "non_object_property_store", ["Cannot set property '", "%0", "' of ", "%1"],
- "non_object_property_call", ["Cannot call method '", "%0", "' of ", "%1"],
- "with_expression", ["%0", " has no properties"],
- "illegal_invocation", ["Illegal invocation"],
- "no_setter_in_callback", ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
- "apply_non_function", ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
- "apply_wrong_args", ["Function.prototype.apply: Arguments list has wrong type"],
- "invalid_in_operator_use", ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
- "instanceof_function_expected", ["Expecting a function in instanceof check, but got ", "%0"],
- "instanceof_nonobject_proto", ["Function has non-object prototype '", "%0", "' in instanceof check"],
- "null_to_object", ["Cannot convert null to object"],
- "reduce_no_initial", ["Reduce of empty array with no initial value"],
- "getter_must_be_callable", ["Getter must be a function: ", "%0"],
- "setter_must_be_callable", ["Setter must be a function: ", "%0"],
- "value_and_accessor", ["Invalid property. A property cannot both have accessors and be writable or have a value, ", "%0"],
- "proto_object_or_null", ["Object prototype may only be an Object or null"],
- "property_desc_object", ["Property description must be an object: ", "%0"],
- "redefine_disallowed", ["Cannot redefine property: ", "%0"],
- "define_disallowed", ["Cannot define property:", "%0", ", object is not extensible."],
- "non_extensible_proto", ["%0", " is not extensible"],
- "handler_non_object", ["Proxy.", "%0", " called with non-object as handler"],
- "proto_non_object", ["Proxy.", "%0", " called with non-object as prototype"],
- "trap_function_expected", ["Proxy.", "%0", " called with non-function for '", "%1", "' trap"],
- "handler_trap_missing", ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
- "handler_trap_must_be_callable", ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"],
- "handler_returned_false", ["Proxy handler ", "%0", " returned false from '", "%1", "' trap"],
- "handler_returned_undefined", ["Proxy handler ", "%0", " returned undefined from '", "%1", "' trap"],
- "proxy_prop_not_configurable", ["Proxy handler ", "%0", " returned non-configurable descriptor for property '", "%2", "' from '", "%1", "' trap"],
- "proxy_non_object_prop_names", ["Trap '", "%1", "' returned non-object ", "%0"],
- "proxy_repeated_prop_name", ["Trap '", "%1", "' returned repeated property name '", "%2", "'"],
- "invalid_weakmap_key", ["Invalid value used as weak map key"],
- "not_date_object", ["this is not a Date object."],
- // RangeError
- "invalid_array_length", ["Invalid array length"],
- "stack_overflow", ["Maximum call stack size exceeded"],
- "invalid_time_value", ["Invalid time value"],
- // SyntaxError
- "unable_to_parse", ["Parse error"],
- "invalid_regexp_flags", ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
- "invalid_regexp", ["Invalid RegExp pattern /", "%0", "/"],
- "illegal_break", ["Illegal break statement"],
- "illegal_continue", ["Illegal continue statement"],
- "illegal_return", ["Illegal return statement"],
- "illegal_let", ["Illegal let declaration outside extended mode"],
- "error_loading_debugger", ["Error loading debugger"],
- "no_input_to_regexp", ["No input to ", "%0"],
- "invalid_json", ["String '", "%0", "' is not valid JSON"],
- "circular_structure", ["Converting circular structure to JSON"],
- "called_on_non_object", ["%0", " called on non-object"],
- "called_on_null_or_undefined", ["%0", " called on null or undefined"],
- "array_indexof_not_defined", ["Array.getIndexOf: Argument undefined"],
- "object_not_extensible", ["Can't add property ", "%0", ", object is not extensible"],
- "illegal_access", ["Illegal access"],
- "invalid_preparser_data", ["Invalid preparser data for function ", "%0"],
- "strict_mode_with", ["Strict mode code may not include a with statement"],
- "strict_catch_variable", ["Catch variable may not be eval or arguments in strict mode"],
- "too_many_arguments", ["Too many arguments in function call (only 32766 allowed)"],
- "too_many_parameters", ["Too many parameters in function definition (only 32766 allowed)"],
- "too_many_variables", ["Too many variables declared (only 131071 allowed)"],
- "strict_param_name", ["Parameter name eval or arguments is not allowed in strict mode"],
- "strict_param_dupe", ["Strict mode function may not have duplicate parameter names"],
- "strict_var_name", ["Variable name may not be eval or arguments in strict mode"],
- "strict_function_name", ["Function name may not be eval or arguments in strict mode"],
- "strict_octal_literal", ["Octal literals are not allowed in strict mode."],
- "strict_duplicate_property", ["Duplicate data property in object literal not allowed in strict mode"],
- "accessor_data_property", ["Object literal may not have data and accessor property with the same name"],
- "accessor_get_set", ["Object literal may not have multiple get/set accessors with the same name"],
- "strict_lhs_assignment", ["Assignment to eval or arguments is not allowed in strict mode"],
- "strict_lhs_postfix", ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
- "strict_lhs_prefix", ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
- "strict_reserved_word", ["Use of future reserved word in strict mode"],
- "strict_delete", ["Delete of an unqualified identifier in strict mode."],
- "strict_delete_property", ["Cannot delete property '", "%0", "' of ", "%1"],
- "strict_const", ["Use of const in strict mode."],
- "strict_function", ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
- "strict_read_only_property", ["Cannot assign to read only property '", "%0", "' of ", "%1"],
- "strict_cannot_assign", ["Cannot assign to read only '", "%0", "' in strict mode"],
- "strict_poison_pill", ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
- "strict_caller", ["Illegal access to a strict mode caller function."],
- "unprotected_let", ["Illegal let declaration in unprotected statement context."],
- "unprotected_const", ["Illegal const declaration in unprotected statement context."],
- "cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"],
- "redef_external_array_element", ["Cannot redefine a property of an object with external array elements"],
- "harmony_const_assign", ["Assignment to constant variable."],
- "invalid_module_path", ["Module does not export '", "%0", "', or export is not itself a module"],
- "module_type_error", ["Module '", "%0", "' used improperly"],
- "module_export_undefined", ["Export '", "%0", "' is not defined in module"],
- ];
- var messages = { __proto__ : null };
- for (var i = 0; i < messagesDictionary.length; i += 2) {
- var key = messagesDictionary[i];
- var format = messagesDictionary[i + 1];
-
- for (var j = 0; j < format.length; j++) {
- %IgnoreAttributesAndSetProperty(format, %_NumberToString(j), format[j],
- DONT_DELETE | READ_ONLY | DONT_ENUM);
- }
- %IgnoreAttributesAndSetProperty(format, 'length', format.length,
- DONT_DELETE | READ_ONLY | DONT_ENUM);
- %PreventExtensions(format);
- %IgnoreAttributesAndSetProperty(messages,
- key,
- format,
- DONT_DELETE | DONT_ENUM | READ_ONLY);
- }
- %PreventExtensions(messages);
- %IgnoreAttributesAndSetProperty(builtins, "kMessages",
- messages,
- DONT_DELETE | DONT_ENUM | READ_ONLY);
- }
- var message_type = %MessageGetType(message);
- var format = kMessages[message_type];
- if (!format) return "<unknown message " + message_type + ">";
- return FormatString(format, message);
+function FormatMessage(type, args) {
+ var format = kMessages[type];
+ if (!format) return "<unknown message " + type + ">";
+ return FormatString(format, args);
}
@@ -535,11 +556,11 @@ function ScriptLineCount() {
* If sourceURL comment is available and script starts at zero returns sourceURL
* comment contents. Otherwise, script name is returned. See
* http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
- * for details on using //@ sourceURL comment to identify scritps that don't
- * have name.
+ * and Source Map Revision 3 proposal for details on using //# sourceURL and
+ * deprecated //@ sourceURL comment to identify scripts that don't have name.
*
- * @return {?string} script name if present, value for //@ sourceURL comment
- * otherwise.
+ * @return {?string} script name if present, value for //# sourceURL or
+ * deprecated //@ sourceURL comment otherwise.
*/
function ScriptNameOrSourceURL() {
if (this.line_offset > 0 || this.column_offset > 0) {
@@ -564,7 +585,7 @@ function ScriptNameOrSourceURL() {
this.cachedNameOrSourceURL = this.name;
if (sourceUrlPos > 4) {
var sourceUrlPattern =
- /\/\/@[\040\t]sourceURL=[\040\t]*([^\s\'\"]*)[\040\t]*$/gm;
+ /\/\/[#@][\040\t]sourceURL=[\040\t]*([^\s\'\"]*)[\040\t]*$/gm;
// Don't reuse lastMatchInfo here, so we create a new array with room
// for four captures (array with length one longer than the index
// of the fourth capture, where the numbering is zero-based).
@@ -573,7 +594,7 @@ function ScriptNameOrSourceURL() {
%_RegExpExec(sourceUrlPattern, source, sourceUrlPos - 4, matchInfo);
if (match) {
this.cachedNameOrSourceURL =
- SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
+ %_SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
}
}
return this.cachedNameOrSourceURL;
@@ -756,87 +777,70 @@ function GetPositionInLine(message) {
function GetStackTraceLine(recv, fun, pos, isGlobal) {
- return new CallSite(recv, fun, pos).toString();
+ return new CallSite(recv, fun, pos, false).toString();
}
// ----------------------------------------------------------------------------
// Error implementation
-// Defines accessors for a property that is calculated the first time
-// the property is read.
-function DefineOneShotAccessor(obj, name, fun) {
- // Note that the accessors consistently operate on 'obj', not 'this'.
- // Since the object may occur in someone else's prototype chain we
- // can't rely on 'this' being the same as 'obj'.
- var value;
- var value_factory = fun;
- var getter = function() {
- if (value_factory == null) {
- return value;
- }
- value = value_factory(obj);
- value_factory = null;
- return value;
- };
- var setter = function(v) {
- value_factory = null;
- value = v;
- };
- %DefineOrRedefineAccessorProperty(obj, name, getter, setter, DONT_ENUM);
-}
+var CallSiteReceiverKey = %CreateSymbol("receiver");
+var CallSiteFunctionKey = %CreateSymbol("function");
+var CallSitePositionKey = %CreateSymbol("position");
+var CallSiteStrictModeKey = %CreateSymbol("strict mode");
-function CallSite(receiver, fun, pos) {
- this.receiver = receiver;
- this.fun = fun;
- this.pos = pos;
+function CallSite(receiver, fun, pos, strict_mode) {
+ this[CallSiteReceiverKey] = receiver;
+ this[CallSiteFunctionKey] = fun;
+ this[CallSitePositionKey] = pos;
+ this[CallSiteStrictModeKey] = strict_mode;
}
function CallSiteGetThis() {
- return this.receiver;
+ return this[CallSiteStrictModeKey] ? UNDEFINED : this[CallSiteReceiverKey];
}
function CallSiteGetTypeName() {
- return GetTypeName(this, false);
+ return GetTypeName(this[CallSiteReceiverKey], false);
}
function CallSiteIsToplevel() {
- if (this.receiver == null) {
+ if (this[CallSiteReceiverKey] == null) {
return true;
}
- return IS_GLOBAL(this.receiver);
+ return IS_GLOBAL(this[CallSiteReceiverKey]);
}
function CallSiteIsEval() {
- var script = %FunctionGetScript(this.fun);
+ var script = %FunctionGetScript(this[CallSiteFunctionKey]);
return script && script.compilation_type == COMPILATION_TYPE_EVAL;
}
function CallSiteGetEvalOrigin() {
- var script = %FunctionGetScript(this.fun);
+ var script = %FunctionGetScript(this[CallSiteFunctionKey]);
return FormatEvalOrigin(script);
}
function CallSiteGetScriptNameOrSourceURL() {
- var script = %FunctionGetScript(this.fun);
+ var script = %FunctionGetScript(this[CallSiteFunctionKey]);
return script ? script.nameOrSourceURL() : null;
}
function CallSiteGetFunction() {
- return this.fun;
+ return this[CallSiteStrictModeKey] ? UNDEFINED : this[CallSiteFunctionKey];
}
function CallSiteGetFunctionName() {
// See if the function knows its own name
- var name = this.fun.name;
+ var name = this[CallSiteFunctionKey].name;
if (name) {
return name;
}
- name = %FunctionGetInferredName(this.fun);
+ name = %FunctionGetInferredName(this[CallSiteFunctionKey]);
if (name) {
return name;
}
// Maybe this is an evaluation?
- var script = %FunctionGetScript(this.fun);
+ var script = %FunctionGetScript(this[CallSiteFunctionKey]);
if (script && script.compilation_type == COMPILATION_TYPE_EVAL) {
return "eval";
}
@@ -846,25 +850,22 @@ function CallSiteGetFunctionName() {
function CallSiteGetMethodName() {
// See if we can find a unique property on the receiver that holds
// this function.
- var ownName = this.fun.name;
- if (ownName && this.receiver &&
- (%_CallFunction(this.receiver,
- ownName,
- ObjectLookupGetter) === this.fun ||
- %_CallFunction(this.receiver,
- ownName,
- ObjectLookupSetter) === this.fun ||
- this.receiver[ownName] === this.fun)) {
+ var receiver = this[CallSiteReceiverKey];
+ var fun = this[CallSiteFunctionKey];
+ var ownName = fun.name;
+ if (ownName && receiver &&
+ (%_CallFunction(receiver, ownName, ObjectLookupGetter) === fun ||
+ %_CallFunction(receiver, ownName, ObjectLookupSetter) === fun ||
+ (IS_OBJECT(receiver) && %GetDataProperty(receiver, ownName) === fun))) {
// To handle DontEnum properties we guess that the method has
// the same name as the function.
return ownName;
}
var name = null;
- for (var prop in this.receiver) {
- if (%_CallFunction(this.receiver, prop, ObjectLookupGetter) === this.fun ||
- %_CallFunction(this.receiver, prop, ObjectLookupSetter) === this.fun ||
- (!%_CallFunction(this.receiver, prop, ObjectLookupGetter) &&
- this.receiver[prop] === this.fun)) {
+ for (var prop in receiver) {
+ if (%_CallFunction(receiver, prop, ObjectLookupGetter) === fun ||
+ %_CallFunction(receiver, prop, ObjectLookupSetter) === fun ||
+ (IS_OBJECT(receiver) && %GetDataProperty(receiver, prop) === fun)) {
// If we find more than one match bail out to avoid confusion.
if (name) {
return null;
@@ -879,49 +880,49 @@ function CallSiteGetMethodName() {
}
function CallSiteGetFileName() {
- var script = %FunctionGetScript(this.fun);
+ var script = %FunctionGetScript(this[CallSiteFunctionKey]);
return script ? script.name : null;
}
function CallSiteGetLineNumber() {
- if (this.pos == -1) {
+ if (this[CallSitePositionKey] == -1) {
return null;
}
- var script = %FunctionGetScript(this.fun);
+ var script = %FunctionGetScript(this[CallSiteFunctionKey]);
var location = null;
if (script) {
- location = script.locationFromPosition(this.pos, true);
+ location = script.locationFromPosition(this[CallSitePositionKey], true);
}
return location ? location.line + 1 : null;
}
function CallSiteGetColumnNumber() {
- if (this.pos == -1) {
+ if (this[CallSitePositionKey] == -1) {
return null;
}
- var script = %FunctionGetScript(this.fun);
+ var script = %FunctionGetScript(this[CallSiteFunctionKey]);
var location = null;
if (script) {
- location = script.locationFromPosition(this.pos, true);
+ location = script.locationFromPosition(this[CallSitePositionKey], true);
}
return location ? location.column + 1: null;
}
function CallSiteIsNative() {
- var script = %FunctionGetScript(this.fun);
+ var script = %FunctionGetScript(this[CallSiteFunctionKey]);
return script ? (script.type == TYPE_NATIVE) : false;
}
function CallSiteGetPosition() {
- return this.pos;
+ return this[CallSitePositionKey];
}
function CallSiteIsConstructor() {
- var constructor = this.receiver ? this.receiver.constructor : null;
- if (!constructor) {
- return false;
- }
- return this.fun === constructor;
+ var receiver = this[CallSiteReceiverKey];
+ var constructor = (receiver != null && IS_OBJECT(receiver))
+ ? %GetDataProperty(receiver, "constructor") : null;
+ if (!constructor) return false;
+ return this[CallSiteFunctionKey] === constructor;
}
function CallSiteToString() {
@@ -964,15 +965,17 @@ function CallSiteToString() {
var isConstructor = this.isConstructor();
var isMethodCall = !(this.isToplevel() || isConstructor);
if (isMethodCall) {
- var typeName = GetTypeName(this, true);
+ var typeName = GetTypeName(this[CallSiteReceiverKey], true);
var methodName = this.getMethodName();
if (functionName) {
- if (typeName && functionName.indexOf(typeName) != 0) {
+ if (typeName &&
+ %_CallFunction(functionName, typeName, StringIndexOf) != 0) {
line += typeName + ".";
}
line += functionName;
- if (methodName && functionName.lastIndexOf("." + methodName) !=
- functionName.length - methodName.length - 1) {
+ if (methodName &&
+ (%_CallFunction(functionName, "." + methodName, StringIndexOf) !=
+ functionName.length - methodName.length - 1)) {
line += " [as " + methodName + "]";
}
} else {
@@ -1050,17 +1053,58 @@ function FormatEvalOrigin(script) {
return eval_origin;
}
-function FormatStackTrace(error, frames) {
- var lines = [];
+
+function FormatErrorString(error) {
try {
- lines.push(error.toString());
+ return %_CallFunction(error, ErrorToString);
} catch (e) {
try {
- lines.push("<error: " + e + ">");
+ return "<error: " + e + ">";
} catch (ee) {
- lines.push("<error>");
+ return "<error>";
+ }
+ }
+}
+
+
+function GetStackFrames(raw_stack) {
+ var frames = new InternalArray();
+ var non_strict_frames = raw_stack[0];
+ for (var i = 1; i < raw_stack.length; i += 4) {
+ var recv = raw_stack[i];
+ var fun = raw_stack[i + 1];
+ var code = raw_stack[i + 2];
+ var pc = raw_stack[i + 3];
+ var pos = %FunctionGetPositionForOffset(code, pc);
+ non_strict_frames--;
+ frames.push(new CallSite(recv, fun, pos, (non_strict_frames < 0)));
+ }
+ return frames;
+}
+
+
+// Flag to prevent recursive call of Error.prepareStackTrace.
+var formatting_custom_stack_trace = false;
+
+
+function FormatStackTrace(obj, error_string, frames) {
+ if (IS_FUNCTION($Error.prepareStackTrace) && !formatting_custom_stack_trace) {
+ var array = [];
+ %MoveArrayContents(frames, array);
+ formatting_custom_stack_trace = true;
+ var stack_trace = UNDEFINED;
+ try {
+ stack_trace = $Error.prepareStackTrace(obj, array);
+ } catch (e) {
+ throw e; // The custom formatting function threw. Rethrow.
+ } finally {
+ formatting_custom_stack_trace = false;
}
+ return stack_trace;
}
+
+ var lines = new InternalArray();
+ lines.push(error_string);
for (var i = 0; i < frames.length; i++) {
var frame = frames[i];
var line;
@@ -1076,52 +1120,62 @@ function FormatStackTrace(error, frames) {
}
lines.push(" at " + line);
}
- return lines.join("\n");
+ return %_CallFunction(lines, "\n", ArrayJoin);
}
-function FormatRawStackTrace(error, raw_stack) {
- var frames = [ ];
- for (var i = 0; i < raw_stack.length; i += 4) {
- var recv = raw_stack[i];
- var fun = raw_stack[i + 1];
- var code = raw_stack[i + 2];
- var pc = raw_stack[i + 3];
- var pos = %FunctionGetPositionForOffset(code, pc);
- frames.push(new CallSite(recv, fun, pos));
- }
- if (IS_FUNCTION($Error.prepareStackTrace)) {
- return $Error.prepareStackTrace(error, frames);
- } else {
- return FormatStackTrace(error, frames);
- }
-}
-function GetTypeName(obj, requireConstructor) {
- var constructor = obj.receiver.constructor;
+function GetTypeName(receiver, requireConstructor) {
+ var constructor = receiver.constructor;
if (!constructor) {
return requireConstructor ? null :
- %_CallFunction(obj.receiver, ObjectToString);
+ %_CallFunction(receiver, ObjectToString);
}
var constructorName = constructor.name;
if (!constructorName) {
return requireConstructor ? null :
- %_CallFunction(obj.receiver, ObjectToString);
+ %_CallFunction(receiver, ObjectToString);
}
return constructorName;
}
+
function captureStackTrace(obj, cons_opt) {
var stackTraceLimit = $Error.stackTraceLimit;
if (!stackTraceLimit || !IS_NUMBER(stackTraceLimit)) return;
if (stackTraceLimit < 0 || stackTraceLimit > 10000) {
stackTraceLimit = 10000;
}
- var raw_stack = %CollectStackTrace(obj,
- cons_opt ? cons_opt : captureStackTrace,
- stackTraceLimit);
- DefineOneShotAccessor(obj, 'stack', function (obj) {
- return FormatRawStackTrace(obj, raw_stack);
- });
+ var stack = %CollectStackTrace(obj,
+ cons_opt ? cons_opt : captureStackTrace,
+ stackTraceLimit);
+
+ var error_string = FormatErrorString(obj);
+ // The holder of this getter ('obj') may not be the receiver ('this').
+ // When this getter is called the first time, we use the context values to
+ // format a stack trace string and turn this accessor pair into a data
+ // property (on the holder).
+ var getter = function() {
+ // Stack is still a raw array awaiting to be formatted.
+ var result = FormatStackTrace(obj, error_string, GetStackFrames(stack));
+ // Turn this accessor into a data property.
+ %DefineOrRedefineDataProperty(obj, 'stack', result, NONE);
+ // Release context values.
+ stack = error_string = UNDEFINED;
+ return result;
+ };
+
+ // Set the 'stack' property on the receiver. If the receiver is the same as
+ // holder of this setter, the accessor pair is turned into a data property.
+ var setter = function(v) {
+ // Set data property on the receiver (not necessarily holder).
+ %DefineOrRedefineDataProperty(this, 'stack', v, NONE);
+ if (this === obj) {
+ // Release context values if holder is the same as the receiver.
+ stack = error_string = UNDEFINED;
+ }
+ };
+
+ %DefineOrRedefineAccessorProperty(obj, 'stack', getter, setter, DONT_ENUM);
}
@@ -1159,16 +1213,8 @@ function SetUpError() {
// Define all the expected properties directly on the error
// object. This avoids going through getters and setters defined
// on prototype objects.
- %IgnoreAttributesAndSetProperty(this, 'stack', void 0, DONT_ENUM);
- %IgnoreAttributesAndSetProperty(this, 'arguments', void 0, DONT_ENUM);
- %IgnoreAttributesAndSetProperty(this, 'type', void 0, DONT_ENUM);
- if (m === kAddMessageAccessorsMarker) {
- // DefineOneShotAccessor always inserts a message property and
- // ignores setters.
- DefineOneShotAccessor(this, 'message', function (obj) {
- return FormatMessage(%NewMessageObject(obj.type, obj.arguments));
- });
- } else if (!IS_UNDEFINED(m)) {
+ %IgnoreAttributesAndSetProperty(this, 'stack', UNDEFINED, DONT_ENUM);
+ if (!IS_UNDEFINED(m)) {
%IgnoreAttributesAndSetProperty(
this, 'message', ToString(m), DONT_ENUM);
}
@@ -1203,9 +1249,9 @@ var cyclic_error_marker = new $Object();
function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
// Climb the prototype chain until we find the holder.
while (error && !%HasLocalProperty(error, name)) {
- error = error.__proto__;
+ error = %GetPrototype(error);
}
- if (error === null) return void 0;
+ if (IS_NULL(error)) return UNDEFINED;
if (!IS_OBJECT(error)) return error[name];
// If the property is an accessor on one of the predefined errors that can be
// generated statically by the compiler, don't touch it. This is to address
@@ -1214,11 +1260,11 @@ function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
if (desc && desc[IS_ACCESSOR_INDEX]) {
var isName = name === "name";
if (error === $ReferenceError.prototype)
- return isName ? "ReferenceError" : void 0;
+ return isName ? "ReferenceError" : UNDEFINED;
if (error === $SyntaxError.prototype)
- return isName ? "SyntaxError" : void 0;
+ return isName ? "SyntaxError" : UNDEFINED;
if (error === $TypeError.prototype)
- return isName ? "TypeError" : void 0;
+ return isName ? "TypeError" : UNDEFINED;
}
// Otherwise, read normally.
return error[name];
@@ -1227,15 +1273,9 @@ function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
function ErrorToStringDetectCycle(error) {
if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
try {
- var type = GetPropertyWithoutInvokingMonkeyGetters(error, "type");
var name = GetPropertyWithoutInvokingMonkeyGetters(error, "name");
name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
var message = GetPropertyWithoutInvokingMonkeyGetters(error, "message");
- var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty);
- if (type && !hasMessage) {
- var args = GetPropertyWithoutInvokingMonkeyGetters(error, "arguments");
- message = FormatMessage(%NewMessageObject(type, args));
- }
message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
if (name === "") return message;
if (message === "") return name;
@@ -1267,4 +1307,44 @@ InstallFunctions($Error.prototype, DONT_ENUM, ['toString', ErrorToString]);
// Boilerplate for exceptions for stack overflows. Used from
// Isolate::StackOverflow().
-var kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
+function SetUpStackOverflowBoilerplate() {
+ var boilerplate = MakeRangeError('stack_overflow', []);
+
+ var error_string = boilerplate.name + ": " + boilerplate.message;
+
+ // The raw stack trace is stored as a hidden property on the holder of this
+ // getter, which may not be the same as the receiver. Find the holder to
+ // retrieve the raw stack trace and then turn this accessor pair into a
+ // data property.
+ var getter = function() {
+ var holder = this;
+ while (!IS_ERROR(holder)) {
+ holder = %GetPrototype(holder);
+ if (IS_NULL(holder)) return MakeSyntaxError('illegal_access', []);
+ }
+ var stack = %GetAndClearOverflowedStackTrace(holder);
+ // We may not have captured any stack trace.
+ if (IS_UNDEFINED(stack)) return stack;
+
+ var result = FormatStackTrace(holder, error_string, GetStackFrames(stack));
+ // Replace this accessor with a data property.
+ %DefineOrRedefineDataProperty(holder, 'stack', result, NONE);
+ return result;
+ };
+
+ // Set the 'stack' property on the receiver. If the receiver is the same as
+ // holder of this setter, the accessor pair is turned into a data property.
+ var setter = function(v) {
+ %DefineOrRedefineDataProperty(this, 'stack', v, NONE);
+ // Tentatively clear the hidden property. If the receiver is the same as
+ // holder, we release the raw stack trace this way.
+ %GetAndClearOverflowedStackTrace(this);
+ };
+
+ %DefineOrRedefineAccessorProperty(
+ boilerplate, 'stack', getter, setter, DONT_ENUM);
+
+ return boilerplate;
+}
+
+var kStackOverflowBoilerplate = SetUpStackOverflowBoilerplate();
diff --git a/deps/v8/src/mips/OWNERS b/deps/v8/src/mips/OWNERS
new file mode 100644
index 000000000..38473b56d
--- /dev/null
+++ b/deps/v8/src/mips/OWNERS
@@ -0,0 +1,2 @@
+plind44@gmail.com
+gergely@homejinni.com
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 3e726a754..de91051ed 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -1,3 +1,4 @@
+
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
@@ -65,7 +66,7 @@ Operand::Operand(const ExternalReference& f) {
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
@@ -79,9 +80,24 @@ bool Operand::is_reg() const {
}
+int Register::NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+}
+
+
+int DoubleRegister::NumRegisters() {
+ return FPURegister::kMaxNumRegisters;
+}
+
+
+int DoubleRegister::NumAllocatableRegisters() {
+ return FPURegister::kMaxNumAllocatableRegisters;
+}
+
+
int FPURegister::ToAllocationIndex(FPURegister reg) {
ASSERT(reg.code() % 2 == 0);
- ASSERT(reg.code() / 2 < kNumAllocatableRegisters);
+ ASSERT(reg.code() / 2 < kMaxNumAllocatableRegisters);
ASSERT(reg.is_valid());
ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kLithiumScratchDouble));
@@ -111,14 +127,14 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
return Assembler::target_address_at(pc_);
}
Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) ||
- rmode_ == RUNTIME_ENTRY ||
+ IsRuntimeEntry(rmode_) ||
rmode_ == EMBEDDED_OBJECT ||
rmode_ == EXTERNAL_REFERENCE);
// Read the address of the word containing the target_address in an
@@ -146,7 +162,7 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(pc_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -186,6 +202,7 @@ Object** RelocInfo::target_object_address() {
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
@@ -203,24 +220,35 @@ Address* RelocInfo::target_reference_address() {
}
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode mode) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ if (target_address() != target) set_target_address(target, mode);
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ ASSERT(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
+ return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
@@ -231,6 +259,31 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
}
+static const int kNoCodeAgeSequenceLength = 7;
+
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on Arm.
+ return Handle<Object>();
+}
+
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ return Code::GetCodeFromTargetAddress(
+ Memory::Address_at(pc_ + Assembler::kInstrSize *
+ (kNoCodeAgeSequenceLength - 1)));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Memory::Address_at(pc_ + Assembler::kInstrSize *
+ (kNoCodeAgeSequenceLength - 1)) =
+ stub->instruction_start();
+}
+
+
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -292,26 +345,27 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
-void RelocInfo::Visit(ObjectVisitor* visitor) {
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
+ isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
}
@@ -324,10 +378,12 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@@ -336,7 +392,7 @@ void RelocInfo::Visit(Heap* heap) {
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index a4563a64f..0972a8295 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -35,7 +35,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "mips/assembler-mips-inl.h"
#include "serialize.h"
@@ -47,7 +47,14 @@ namespace internal {
bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
+unsigned CpuFeatures::cross_compile_ = 0;
+
+
+ExternalReference ExternalReference::cpu_features() {
+ ASSERT(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
// Get the CPU features enabled by the build. For cross compilation the
@@ -57,7 +64,7 @@ unsigned CpuFeatures::found_by_runtime_probing_ = 0;
static uint64_t CpuFeaturesImpliedByCompiler() {
uint64_t answer = 0;
#ifdef CAN_USE_FPU_INSTRUCTIONS
- answer |= 1u << FPU;
+ answer |= static_cast<uint64_t>(1) << FPU;
#endif // def CAN_USE_FPU_INSTRUCTIONS
#ifdef __mips__
@@ -65,7 +72,7 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
// generation even when generating snapshots. This won't work for cross
// compilation.
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
- answer |= 1u << FPU;
+ answer |= static_cast<uint64_t>(1) << FPU;
#endif // defined(__mips_hard_float) && __mips_hard_float != 0
#endif // def __mips__
@@ -73,6 +80,28 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
}
+const char* DoubleRegister::AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ const char* const names[] = {
+ "f0",
+ "f2",
+ "f4",
+ "f6",
+ "f8",
+ "f10",
+ "f12",
+ "f14",
+ "f16",
+ "f18",
+ "f20",
+ "f22",
+ "f24",
+ "f26"
+ };
+ return names[index];
+}
+
+
void CpuFeatures::Probe() {
unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
CpuFeaturesImpliedByCompiler());
@@ -94,17 +123,16 @@ void CpuFeatures::Probe() {
// If the compiler is allowed to use fpu then we can use fpu too in our
// code generation.
#if !defined(__mips__)
- // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
- if (FLAG_enable_fpu) {
- supported_ |= 1u << FPU;
- }
+ // For the simulator build, use FPU.
+ supported_ |= static_cast<uint64_t>(1) << FPU;
#else
// Probe for additional features not already known to be available.
- if (OS::MipsCpuHasFeature(FPU)) {
+ CPU cpu;
+ if (cpu.has_fpu()) {
// This implementation also sets the FPU flags if
// runtime detection of FPU returns true.
- supported_ |= 1u << FPU;
- found_by_runtime_probing_ |= 1u << FPU;
+ supported_ |= static_cast<uint64_t>(1) << FPU;
+ found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << FPU;
}
#endif
}
@@ -211,17 +239,18 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// See assembler-mips-inl.h for inlined constructors.
Operand::Operand(Handle<Object> handle) {
+ AllowDeferredHandleDereference using_raw_address;
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!HEAP->InNewSpace(obj));
if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
// No relocation needed.
imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
}
@@ -267,45 +296,11 @@ const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
-// Spare buffer.
-static const int kMinimalBufferSize = 4 * KB;
-
-
-Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
- : AssemblerBase(arg_isolate),
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
- positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code) {
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
-
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
- // Set up buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ positions_recorder_(this) {
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
last_trampoline_pool_end_ = 0;
no_trampoline_pool_before_ = 0;
@@ -324,18 +319,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
}
-Assembler::~Assembler() {
- if (own_buffer_) {
- if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
void Assembler::GetCode(CodeDesc* desc) {
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
@@ -503,7 +486,6 @@ bool Assembler::IsBranch(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
uint32_t rt_field = GetRtField(instr);
uint32_t rs_field = GetRsField(instr);
- uint32_t label_constant = GetLabelConst(instr);
// Checks if the instruction is a branch.
return opcode == BEQ ||
opcode == BNE ||
@@ -515,8 +497,13 @@ bool Assembler::IsBranch(Instr instr) {
opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
- (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
- label_constant == 0; // Emitted label const in reg-exp engine.
+ (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
+}
+
+
+bool Assembler::IsEmittedConstant(Instr instr) {
+ uint32_t label_constant = GetLabelConst(instr);
+ return label_constant == 0; // Emitted label const in reg-exp engine.
}
@@ -553,10 +540,12 @@ bool Assembler::IsJal(Instr instr) {
return GetOpcodeField(instr) == JAL;
}
+
bool Assembler::IsJr(Instr instr) {
return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
}
+
bool Assembler::IsJalr(Instr instr) {
return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
}
@@ -602,7 +591,7 @@ bool Assembler::IsNop(Instr instr, unsigned int type) {
int32_t Assembler::GetBranchOffset(Instr instr) {
ASSERT(IsBranch(instr));
- return ((int16_t)(instr & kImm16Mask)) << 2;
+ return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
}
@@ -735,7 +724,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
ASSERT(IsOri(instr_ori));
- uint32_t imm = (uint32_t)buffer_ + target_pos;
+ uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
ASSERT((imm & 3) == 0);
instr_lui &= ~kImm16Mask;
@@ -746,7 +735,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
} else {
- uint32_t imm28 = (uint32_t)buffer_ + target_pos;
+ uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
imm28 &= kImm28Mask;
ASSERT((imm28 & 3) == 0);
@@ -809,7 +798,7 @@ void Assembler::bind_to(Label* L, int pos) {
}
target_at_put(fixup_pos, pos);
} else {
- ASSERT(IsJ(instr) || IsLui(instr));
+ ASSERT(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
target_at_put(fixup_pos, pos);
}
}
@@ -839,6 +828,7 @@ void Assembler::next(Label* L) {
}
}
+
bool Assembler::is_near(Label* L) {
if (L->is_bound()) {
return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
@@ -846,12 +836,13 @@ bool Assembler::is_near(Label* L) {
return false;
}
+
// We have to use a temporary register for things that can be relocated even
// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
- return rmode != RelocInfo::NONE;
+ return !RelocInfo::IsNone(rmode);
}
void Assembler::GenInstrRegister(Opcode opcode,
@@ -887,7 +878,6 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
- ASSERT(CpuFeatures::IsEnabled(FPU));
Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
| (fd.code() << kFdShift) | func;
emit(instr);
@@ -895,13 +885,25 @@ void Assembler::GenInstrRegister(Opcode opcode,
void Assembler::GenInstrRegister(Opcode opcode,
+ FPURegister fr,
+ FPURegister ft,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func) {
+ ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
+ Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
+ | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
SecondaryField fmt,
Register rt,
FPURegister fs,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
- ASSERT(CpuFeatures::IsEnabled(FPU));
Instr instr = opcode | fmt | (rt.code() << kRtShift)
| (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
@@ -914,7 +916,6 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPUControlRegister fs,
SecondaryField func) {
ASSERT(fs.is_valid() && rt.is_valid());
- ASSERT(CpuFeatures::IsEnabled(FPU));
Instr instr =
opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
emit(instr);
@@ -949,7 +950,6 @@ void Assembler::GenInstrImmediate(Opcode opcode,
FPURegister ft,
int32_t j) {
ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
- ASSERT(CpuFeatures::IsEnabled(FPU));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask);
emit(instr);
@@ -998,7 +998,7 @@ uint32_t Assembler::jump_address(Label* L) {
}
}
- uint32_t imm = (uint32_t)buffer_ + target_pos;
+ uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
ASSERT((imm & 3) == 0);
return imm;
@@ -1133,7 +1133,8 @@ void Assembler::j(int32_t target) {
#if DEBUG
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
ASSERT(in_range && ((target & 3) == 0));
#endif
GenInstrJump(J, target >> 2);
@@ -1154,7 +1155,8 @@ void Assembler::jal(int32_t target) {
#ifdef DEBUG
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
ASSERT(in_range && ((target & 3) == 0));
#endif
positions_recorder()->WriteRecordedPositions();
@@ -1173,8 +1175,8 @@ void Assembler::jalr(Register rs, Register rd) {
void Assembler::j_or_jr(int32_t target, Register rs) {
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
-
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
if (in_range) {
j(target);
} else {
@@ -1186,8 +1188,8 @@ void Assembler::j_or_jr(int32_t target, Register rs) {
void Assembler::jal_or_jalr(int32_t target, Register rs) {
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
-
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits+kImmFieldShift)) == 0;
if (in_range) {
jal(target);
} else {
@@ -1344,7 +1346,7 @@ void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Helper for base-reg + offset, when offset is larger than int16.
void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
ASSERT(!src.rm().is(at));
- lui(at, src.offset_ >> kLuiShift);
+ lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
addu(at, at, src.rm()); // Add base register.
}
@@ -1478,7 +1480,7 @@ void Assembler::break_(uint32_t code, bool break_as_stop) {
void Assembler::stop(const char* msg, uint32_t code) {
ASSERT(code > kMaxWatchpointCode);
ASSERT(code <= kMaxStopCode);
-#if defined(V8_HOST_ARCH_MIPS)
+#if V8_HOST_ARCH_MIPS
break_(0x54321);
#else // V8_HOST_ARCH_MIPS
BlockTrampolinePoolFor(2);
@@ -1672,14 +1674,16 @@ void Assembler::cfc1(Register rt, FPUControlRegister fs) {
GenInstrRegister(COP1, CFC1, rt, fs);
}
+
void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
- memcpy(&i, &d, 8);
+ OS::MemCopy(&i, &d, 8);
*lo = i & 0xffffffff;
*hi = i >> 32;
}
+
// Arithmetic.
void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
@@ -1697,6 +1701,12 @@ void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
}
+void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft) {
+ GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
+}
+
+
void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
}
@@ -1863,7 +1873,6 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
// Conditions.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
ASSERT(is_uint3(cc));
ASSERT((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
@@ -1874,7 +1883,6 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt,
void Assembler::fcmp(FPURegister src1, const double src2,
FPUCondition cond) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
ASSERT(src2 == 0.0);
mtc1(zero_reg, f14);
cvt_d_w(f14, f14);
@@ -1883,7 +1891,6 @@ void Assembler::fcmp(FPURegister src1, const double src2,
void Assembler::bc1f(int16_t offset, uint16_t cc) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr);
@@ -1891,7 +1898,6 @@ void Assembler::bc1f(int16_t offset, uint16_t cc) {
void Assembler::bc1t(int16_t offset, uint16_t cc) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr);
@@ -1946,7 +1952,7 @@ int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
return 2; // Number of instructions patched.
} else {
uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
- if ((int32_t)imm28 == kEndOfJumpChain) {
+ if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
return 0; // Number of instructions patched.
}
imm28 += pc_delta;
@@ -1986,9 +1992,9 @@ void Assembler::GrowBuffer() {
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
+ OS::MemMove(desc.buffer, buffer_, desc.instr_size);
+ OS::MemMove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
DeleteArray(buffer_);
@@ -2036,7 +2042,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
}
- if (rinfo.rmode() != RelocInfo::NONE) {
+ if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
@@ -2196,9 +2202,9 @@ void Assembler::set_target_address_at(Address pc, Address target) {
Instr instr3 = instr_at(pc + 2 * kInstrSize);
uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
- bool in_range =
- ((uint32_t)(ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
- uint32_t target_field = (uint32_t)(itarget & kJumpAddrMask) >> kImmFieldShift;
+ bool in_range = ((ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
+ uint32_t target_field =
+ static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
bool patched_jump = false;
#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
@@ -2257,6 +2263,7 @@ void Assembler::set_target_address_at(Address pc, Address target) {
CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
}
+
void Assembler::JumpLabelToJumpRegister(Address pc) {
// Address pc points to lui/ori instructions.
// Jump to label may follow at pc + 2 * kInstrSize.
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 59c45c927..2468c3c34 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -72,20 +72,29 @@ namespace internal {
// Core register.
struct Register {
static const int kNumRegisters = v8::internal::kNumRegisters;
- static const int kNumAllocatableRegisters = 14; // v0 through t7.
+ static const int kMaxNumAllocatableRegisters = 14; // v0 through t6 and cp.
static const int kSizeInBytes = 4;
+ static const int kCpRegister = 23; // cp (s7) is the 23rd register.
+
+ inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
- return reg.code() - 2; // zero_reg and 'at' are skipped.
+ ASSERT((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) ||
+ reg.is(from_code(kCpRegister)));
+ return reg.is(from_code(kCpRegister)) ?
+ kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'.
+ reg.code() - 2; // zero_reg and 'at' are skipped.
}
static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- return from_code(index + 2); // zero_reg and 'at' are skipped.
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ return index == kMaxNumAllocatableRegisters - 1 ?
+ from_code(kCpRegister) : // Last index is always the 'cp' register.
+ from_code(index + 2); // zero_reg and 'at' are skipped.
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"v0",
"v1",
@@ -100,7 +109,7 @@ struct Register {
"t4",
"t5",
"t6",
- "t7",
+ "s7",
};
return names[index];
}
@@ -186,7 +195,7 @@ Register ToRegister(int num);
// Coprocessor register.
struct FPURegister {
- static const int kNumRegisters = v8::internal::kNumFPURegisters;
+ static const int kMaxNumRegisters = v8::internal::kNumFPURegisters;
// TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
// to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
@@ -197,44 +206,25 @@ struct FPURegister {
// f28: 0.0
// f30: scratch register.
static const int kNumReservedRegisters = 2;
- static const int kNumAllocatableRegisters = kNumRegisters / 2 -
+ static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 -
kNumReservedRegisters;
-
+ inline static int NumRegisters();
+ inline static int NumAllocatableRegisters();
inline static int ToAllocationIndex(FPURegister reg);
+ static const char* AllocationIndexToString(int index);
static FPURegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index * 2);
}
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "f0",
- "f2",
- "f4",
- "f6",
- "f8",
- "f10",
- "f12",
- "f14",
- "f16",
- "f18",
- "f20",
- "f22",
- "f24",
- "f26"
- };
- return names[index];
- }
-
static FPURegister from_code(int code) {
FPURegister r = { code };
return r;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; }
+ bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
bool is(FPURegister creg) const { return code_ == creg.code_; }
FPURegister low() const {
// Find low reg of a Double-reg pair, which is the reg itself.
@@ -361,7 +351,7 @@ class Operand BASE_EMBEDDED {
public:
// Immediate.
INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE));
+ RelocInfo::Mode rmode = RelocInfo::NONE32));
INLINE(explicit Operand(const ExternalReference& f));
INLINE(explicit Operand(const char* s));
INLINE(explicit Operand(Object** opp));
@@ -375,6 +365,11 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
INLINE(bool is_reg() const);
+ inline int32_t immediate() const {
+ ASSERT(!is_reg());
+ return imm32_;
+ }
+
Register rm() const { return rm_; }
private:
@@ -406,7 +401,7 @@ class MemOperand : public Operand {
// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
+// Supported features must be enabled by a CpuFeatureScope before use.
class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
@@ -416,93 +411,49 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
- if (f == FPU && !FLAG_enable_fpu) return false;
- return (supported_ & (1u << f)) != 0;
+ return Check(f, supported_);
}
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
+ static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
- return (enabled & (1u << f)) != 0;
+ return Check(f, found_by_runtime_probing_only_);
}
-#endif
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- unsigned mask = 1u << f;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
-
- private:
- Isolate* isolate_;
- unsigned old_enabled_;
-#else
-
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
+ static bool IsSafeForSnapshot(CpuFeature f) {
+ return Check(f, cross_compile_) ||
+ (IsSupported(f) &&
+ (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ }
- class TryForceFeatureScope BASE_EMBEDDED {
- public:
- explicit TryForceFeatureScope(CpuFeature f)
- : old_supported_(CpuFeatures::supported_) {
- if (CanForce()) {
- CpuFeatures::supported_ |= (1u << f);
- }
- }
+ static bool VerifyCrossCompiling() {
+ return cross_compile_ == 0;
+ }
- ~TryForceFeatureScope() {
- if (CanForce()) {
- CpuFeatures::supported_ = old_supported_;
- }
- }
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ unsigned mask = flag2set(f);
+ return cross_compile_ == 0 ||
+ (cross_compile_ & mask) == mask;
+ }
- private:
- static bool CanForce() {
- // It's only safe to temporarily force support of CPU features
- // when there's only a single isolate, which is guaranteed when
- // the serializer is enabled.
- return Serializer::enabled();
- }
+ private:
+ static bool Check(CpuFeature f, unsigned set) {
+ return (set & flag2set(f)) != 0;
+ }
- const unsigned old_supported_;
- };
+ static unsigned flag2set(CpuFeature f) {
+ return 1u << f;
+ }
- private:
#ifdef DEBUG
static bool initialized_;
#endif
static unsigned supported_;
- static unsigned found_by_runtime_probing_;
+ static unsigned found_by_runtime_probing_only_;
+
+ static unsigned cross_compile_;
+ friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -523,13 +474,7 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- ~Assembler();
-
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
- // Dummy for cross platform compatibility.
- void set_predictable_code_size(bool value) { }
+ virtual ~Assembler() { }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -669,7 +614,10 @@ class Assembler : public AssemblerBase {
PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
// Helper values.
LAST_CODE_MARKER,
- FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
+ // Code aging
+ CODE_AGE_MARKER_NOP = 6,
+ CODE_AGE_SEQUENCE_NOP
};
// Type == 0 is the default non-marking nop. For mips this is a
@@ -822,6 +770,7 @@ class Assembler : public AssemblerBase {
void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
void abs_d(FPURegister fd, FPURegister fs);
void mov_d(FPURegister fd, FPURegister fs);
@@ -947,8 +896,6 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
- int32_t pc_offset() const { return pc_ - buffer_; }
-
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Postpone the generation of the trampoline pool for the specified number of
@@ -1024,6 +971,7 @@ class Assembler : public AssemblerBase {
static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
static bool IsAndImmediate(Instr instr);
+ static bool IsEmittedConstant(Instr instr);
void CheckTrampolinePool();
@@ -1033,8 +981,6 @@ class Assembler : public AssemblerBase {
// the relocation info.
TypeFeedbackId recorded_ast_id_;
- bool emit_debug_code() const { return emit_debug_code_; }
-
int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos.
@@ -1093,13 +1039,6 @@ class Assembler : public AssemblerBase {
}
private:
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes.
static const int kBufferCheckInterval = 1*KB/2;
@@ -1110,7 +1049,6 @@ class Assembler : public AssemblerBase {
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
- byte* pc_; // The program counter - moves forward.
// Repeated checking whether the trampoline pool should be emitted is rather
@@ -1175,6 +1113,13 @@ class Assembler : public AssemblerBase {
SecondaryField func = NULLSF);
void GenInstrRegister(Opcode opcode,
+ FPURegister fr,
+ FPURegister ft,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func = NULLSF);
+
+ void GenInstrRegister(Opcode opcode,
SecondaryField fmt,
Register rt,
FPURegister fs,
@@ -1285,7 +1230,6 @@ class Assembler : public AssemblerBase {
friend class BlockTrampolinePoolScope;
PositionsRecorder positions_recorder_;
- bool emit_debug_code_;
friend class PositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 0342e6505..0b495831b 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -29,7 +29,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "codegen.h"
#include "debug.h"
@@ -108,378 +108,6 @@ static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
}
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. An elements backing store is allocated with size initial_capacity
-// and filled with the hole values.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ sw(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- __ sw(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(scratch3, zero_reg);
- __ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
-
- if (initial_capacity == 0) {
- __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ Addu(scratch1, result, Operand(JSArray::kSize));
- __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- __ And(scratch1, scratch1, Operand(~kHeapObjectTagMask));
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array (untagged)
- // scratch2: start of next object
- __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
- STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
- __ sw(scratch3, MemOperand(scratch1));
- __ Addu(scratch1, scratch1, kPointerSize);
- __ li(scratch3, Operand(Smi::FromInt(initial_capacity)));
- STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
- __ sw(scratch3, MemOperand(scratch1));
- __ Addu(scratch1, scratch1, kPointerSize);
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- static const int kLoopUnfoldLimit = 4;
- if (initial_capacity <= kLoopUnfoldLimit) {
- for (int i = 0; i < initial_capacity; i++) {
- __ sw(scratch3, MemOperand(scratch1, i * kPointerSize));
- }
- } else {
- Label loop, entry;
- __ Addu(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
- __ Branch(&entry);
- __ bind(&loop);
- __ sw(scratch3, MemOperand(scratch1));
- __ Addu(scratch1, scratch1, kPointerSize);
- __ bind(&entry);
- __ Branch(&loop, lt, scratch1, Operand(scratch2));
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array_storage and elements_array_end
-// (see below for when that is not the case). If the parameter fill_with_holes
-// is true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array_storage is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array_storage,
- Register elements_array_end,
- Register scratch1,
- Register scratch2,
- bool fill_with_hole,
- Label* gc_required) {
- // Load the initial map from the array function.
- __ LoadInitialArrayMap(array_function, scratch2,
- elements_array_storage, fill_with_hole);
-
- if (FLAG_debug_code) { // Assert that array size is not zero.
- __ Assert(
- ne, "array size is unexpectedly 0", array_size, Operand(zero_reg));
- }
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested number of elements.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ li(elements_array_end,
- (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
- __ sra(scratch1, array_size, kSmiTagSize);
- __ Addu(elements_array_end, elements_array_end, scratch1);
- __ AllocateInNewSpace(
- elements_array_end,
- result,
- scratch1,
- scratch2,
- gc_required,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array_storage: initial map
- // array_size: size of array (smi)
- __ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
- __ sw(elements_array_storage,
- FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ sw(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // array_size: size of array (smi)
- __ Addu(elements_array_storage, result, Operand(JSArray::kSize));
- __ sw(elements_array_storage,
- FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- __ And(elements_array_storage,
- elements_array_storage,
- Operand(~kHeapObjectTagMask));
- // Initialize the fixed array and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // elements_array_storage: elements array (untagged)
- // array_size: size of array (smi)
- __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
- __ sw(scratch1, MemOperand(elements_array_storage));
- __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
-
- // Length of the FixedArray is the number of pre-allocated elements if
- // the actual JSArray has length 0 and the size of the JSArray for non-empty
- // JSArrays. The length of a FixedArray is stored as a smi.
- STATIC_ASSERT(kSmiTag == 0);
-
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ sw(array_size, MemOperand(elements_array_storage));
- __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
-
- // Calculate elements array and elements array end.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // array_size: smi-tagged size of elements array
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(elements_array_end, array_size, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(elements_array_end, elements_array_storage, elements_array_end);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // elements_array_end: start of next object
- if (fill_with_hole) {
- Label loop, entry;
- __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
- __ Branch(&entry);
- __ bind(&loop);
- __ sw(scratch1, MemOperand(elements_array_storage));
- __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
-
- __ bind(&entry);
- __ Branch(&loop, lt, elements_array_storage, Operand(elements_array_end));
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// a0: argc
-// a1: constructor (built-in Array function)
-// ra: return address
-// sp[0]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in a1 needs to be preserved for
-// entering the generic code. In both cases argc in a0 needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// construct call and normal call.
-static void ArrayNativeCode(MacroAssembler* masm,
- Label* call_generic_code) {
- Counters* counters = masm->isolate()->counters();
- Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
- has_non_smi_element, finish, cant_transition_map, not_double;
-
- // Check for array construction with zero arguments or one.
- __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
- // Handle construction of an empty array.
- __ bind(&empty_array);
- AllocateEmptyJSArray(masm,
- a1,
- a2,
- a3,
- t0,
- t1,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
- // Set up return value, remove receiver from stack and return.
- __ mov(v0, a2);
- __ Addu(sp, sp, Operand(kPointerSize));
- __ Ret();
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ Branch(&argc_two_or_more, ne, a0, Operand(1));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ lw(a2, MemOperand(sp)); // Get the argument from the stack.
- __ Branch(&not_empty_array, ne, a2, Operand(zero_reg));
- __ Drop(1); // Adjust stack.
- __ mov(a0, zero_reg); // Treat this as a call with argc of zero.
- __ Branch(&empty_array);
-
- __ bind(&not_empty_array);
- __ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
- __ Branch(call_generic_code, eq, a3, Operand(zero_reg));
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is too large to actually allocate an elements array.
- STATIC_ASSERT(kSmiTag == 0);
- __ Branch(call_generic_code, Ugreater_equal, a2,
- Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
-
- // a0: argc
- // a1: constructor
- // a2: array_size (smi)
- // sp[0]: argument
- AllocateJSArray(masm,
- a1,
- a2,
- a3,
- t0,
- t1,
- t2,
- t3,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
-
- // Set up return value, remove receiver and argument from stack and return.
- __ mov(v0, a3);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ sll(a2, a0, kSmiTagSize); // Convert argc to a smi.
-
- // a0: argc
- // a1: constructor
- // a2: array_size (smi)
- // sp[0]: last argument
- AllocateJSArray(masm,
- a1,
- a2,
- a3,
- t0,
- t1,
- t2,
- t3,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, a2, t2);
-
- // Fill arguments as array elements. Copy from the top of the stack (last
- // element) to the array backing store filling it backwards. Note:
- // elements_array_end points after the backing store.
- // a0: argc
- // a3: JSArray
- // t0: elements_array storage start (untagged)
- // t1: elements_array_end (untagged)
- // sp[0]: last argument
-
- Label loop, entry;
- __ Branch(USE_DELAY_SLOT, &entry);
- __ mov(t3, sp);
- __ bind(&loop);
- __ lw(a2, MemOperand(t3));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(a2, &has_non_smi_element);
- }
- __ Addu(t3, t3, kPointerSize);
- __ Addu(t1, t1, -kPointerSize);
- __ sw(a2, MemOperand(t1));
- __ bind(&entry);
- __ Branch(&loop, lt, t0, Operand(t1));
-
- __ bind(&finish);
- __ mov(sp, t3);
-
- // Remove caller arguments and receiver from the stack, setup return value and
- // return.
- // a0: argc
- // a3: JSArray
- // sp[0]: receiver
- __ Addu(sp, sp, Operand(kPointerSize));
- __ mov(v0, a3);
- __ Ret();
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(
- a2, t5, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- __ UndoAllocationInNewSpace(a3, t0);
- __ Branch(call_generic_code);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- // a3: JSArray
- __ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- a2,
- t5,
- &cant_transition_map);
- __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ RecordWriteField(a3,
- HeapObject::kMapOffset,
- a2,
- t5,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- Label loop2;
- __ bind(&loop2);
- __ lw(a2, MemOperand(t3));
- __ Addu(t3, t3, kPointerSize);
- __ Subu(t1, t1, kPointerSize);
- __ sw(a2, MemOperand(t1));
- __ Branch(&loop2, lt, t0, Operand(t1));
- __ Branch(&finish);
-}
-
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -495,24 +123,18 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ And(t0, a2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for InternalArray function",
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction,
t0, Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
- __ Assert(eq, "Unexpected initial map for InternalArray function",
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction,
t0, Operand(MAP_TYPE));
}
// Run the native code for the InternalArray function called as a normal
// function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle the
- // construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
+ // Tail call a stub.
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -531,58 +153,21 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ And(t0, a2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function (1)",
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction1,
t0, Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
- __ Assert(eq, "Unexpected initial map for Array function (2)",
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction2,
t0, Operand(MAP_TYPE));
}
// Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_constructor;
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
- // Initial map for the builtin Array function should be a map.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function (3)",
- t0, Operand(zero_reg));
- __ GetObjectType(a2, a3, t0);
- __ Assert(eq, "Unexpected initial map for Array function (4)",
- t0, Operand(MAP_TYPE));
- }
-
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
-
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ // Tail call a stub.
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+ __ li(a2, Operand(undefined_sentinel));
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -600,7 +185,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
Register function = a1;
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
- __ Assert(eq, "Unexpected String function", function, Operand(a2));
+ __ Assert(eq, kUnexpectedStringFunction, function, Operand(a2));
}
// Load the first arguments in a0 and get rid of the rest.
@@ -616,15 +201,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
Register argument = a2;
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- a0, // Input.
- argument, // Result.
- a3, // Scratch.
- t0, // Scratch.
- t1, // Scratch.
- false, // Is it a Smi?
- &not_cached);
+ __ LookupNumberStringCache(a0, // Input.
+ argument, // Result.
+ a3, // Scratch.
+ t0, // Scratch.
+ t1, // Scratch.
+ &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
__ bind(&argument_is_string);
@@ -635,22 +217,22 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -----------------------------------
Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- v0, // Result.
- a3, // Scratch.
- t0, // Scratch.
- &gc_required,
- TAG_OBJECT);
+ __ Allocate(JSValue::kSize,
+ v0, // Result.
+ a3, // Scratch.
+ t0, // Scratch.
+ &gc_required,
+ TAG_OBJECT);
// Initialising the String Object.
Register map = a3;
__ LoadGlobalFunctionInitialMap(function, map, t0);
if (FLAG_debug_code) {
__ lbu(t0, FieldMemOperand(map, Map::kInstanceSizeOffset));
- __ Assert(eq, "Unexpected string wrapper instance size",
+ __ Assert(eq, kUnexpectedStringWrapperInstanceSize,
t0, Operand(JSValue::kSize >> kPointerSizeLog2));
__ lbu(t0, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ Assert(eq, "Unexpected unused properties of string wrapper",
+ __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper,
t0, Operand(zero_reg));
}
__ sw(map, FieldMemOperand(v0, HeapObject::kMapOffset));
@@ -688,7 +270,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(v0);
+ __ push(a0);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
}
__ pop(function);
@@ -698,7 +280,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the empty string into a2, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(argument, Heap::kempty_stringRootIndex);
__ Drop(1);
__ Branch(&argument_is_string);
@@ -715,6 +297,24 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
+ // Function is also the parameter to the runtime call.
+ __ push(a1);
+
+ __ CallRuntime(function_id, 1);
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore receiver.
+ __ pop(a1);
+}
+
+
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
@@ -724,30 +324,27 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(t0));
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
+ // Tail call to returned code.
+ __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+
+ __ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
-
- __ push(a1); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(t1);
- // Restore receiver.
- __ pop(a1);
-
- // Tear down internal frame.
- }
-
+void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
@@ -840,7 +437,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a1: constructor function
// a2: initial map
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+ __ Allocate(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
@@ -876,7 +473,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ addu(a0, t5, t0);
// a0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ Assert(le, "Unexpected number of pre-allocated property fields.",
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields,
a0, Operand(t6));
}
__ InitializeFieldsWithFiller(t5, a0, t7);
@@ -909,7 +506,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Done if no extra properties are to be allocated.
__ Branch(&allocated, eq, a3, Operand(zero_reg));
- __ Assert(greater_equal, "Property allocation count failed.",
+ __ Assert(greater_equal, kPropertyAllocationCountFailed,
a3, Operand(zero_reg));
// Scale the number of elements by pointer size and add the header for
@@ -919,7 +516,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// t4: JSObject
// t5: start of next object
__ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateInNewSpace(
+ __ Allocate(
a0,
t5,
t6,
@@ -956,7 +553,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
} else if (FLAG_debug_code) {
__ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
- __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+ __ Assert(eq, kUndefinedValueNotLoaded, t7, Operand(t8));
}
__ jmp(&entry);
__ bind(&loop);
@@ -1072,7 +669,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the type of the result (stored in its map) is less than
// FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ GetObjectType(v0, a3, a3);
+ __ GetObjectType(v0, a1, a3);
__ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
// Throw away the result of the constructor invocation and use the
@@ -1126,6 +723,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// -- a3: argc
// -- s0: argv
// -----------------------------------
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the JS frame.
__ mov(cp, zero_reg);
@@ -1171,6 +769,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as a0.
__ mov(a0, a3);
if (is_construct) {
+ // No type feedback cell is available
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ __ li(a2, Operand(undefined_sentinel));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@@ -1197,61 +799,122 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ CallRuntimePassFunction(masm, Runtime::kLazyCompile);
+ // Do a tail-call of the compiled function.
+ __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t9);
+}
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- // Call the runtime function.
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
+ // Do a tail-call of the compiled function.
+ __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t9);
+}
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
- // Tear down temporary frame.
- }
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ __ mov(a0, ra);
+ // Adjust a0 to point to the head of the PlatformCodeAge sequence
+ __ Subu(a0, a0,
+ Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
+ // Restore the original return address of the function
+ __ mov(ra, at);
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // a0 - contains return address (beginning of patch sequence)
+ // a1 - isolate
+ RegList saved_regs =
+ (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ MultiPush(saved_regs);
+ __ PrepareCallCFunction(1, 0, a2);
+ __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ __ MultiPop(saved_regs);
+ __ Jump(a0);
+}
- // Do a tail-call of the compiled function.
- __ Jump(t9);
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+
+ __ mov(a0, ra);
+ // Adjust a0 to point to the head of the PlatformCodeAge sequence
+ __ Subu(a0, a0,
+ Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
+ // Restore the original return address of the function
+ __ mov(ra, at);
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // a0 - contains return address (beginning of patch sequence)
+ // a1 - isolate
+ RegList saved_regs =
+ (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ MultiPush(saved_regs);
+ __ PrepareCallCFunction(1, 0, a2);
+ __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
+ __ MultiPop(saved_regs);
+ // Perform prologue operations usually performed by the young code stub.
+ __ Push(ra, fp, cp, a1);
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // Jump to point after the code-age stub.
+ __ Addu(a0, a0, Operand((kNoCodeAgeSequenceLength) * Assembler::kInstrSize));
+ __ Jump(a0);
+}
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Tear down temporary frame.
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ MultiPop(kJSCallerSaved | kCalleeSaved);
}
- // Do a tail-call of the compiled function.
- __ Jump(t9);
+ __ Addu(sp, sp, Operand(kPointerSize)); // Ignore state
+ __ Jump(ra); // Jump to miss handler
}
@@ -1272,15 +935,17 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Label with_tos_register, unknown_state;
__ Branch(&with_tos_register,
ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
+ __ Ret(USE_DELAY_SLOT);
+ // Safe to fill delay slot Addu will emit one instruction.
__ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
- __ Ret();
__ bind(&with_tos_register);
__ lw(v0, MemOperand(sp, 1 * kPointerSize));
__ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
+ __ Ret(USE_DELAY_SLOT);
+ // Safe to fill delay slot Addu will emit one instruction.
__ Addu(sp, sp, Operand(2 * kPointerSize)); // Remove state.
- __ Ret();
__ bind(&unknown_state);
__ stop("no cases left");
@@ -1292,56 +957,72 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- RegList saved_regs =
- (kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit();
- __ MultiPush(saved_regs);
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
+ __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
+ // Lookup and calculate pc offset.
+ __ lw(a1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ lw(a2, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Subu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Subu(a1, a1, a2);
+ __ SmiTag(a1);
+
+ // Pass both function and pc offset as arguments.
+ __ push(a0);
+ __ push(a1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
- __ MultiPop(saved_regs);
+
+ // If the code object is null, just return to the unoptimized code.
+ __ Ret(eq, v0, Operand(Smi::FromInt(0)));
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ lw(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ __ SmiUntag(a1);
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ addu(v0, v0, a1);
+ __ addiu(ra, v0, Code::kHeaderSize - kHeapObjectTag);
+
+ // And "return" to the OSR entry point of the function.
__ Ret();
}
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- CpuFeatures::TryForceFeatureScope scope(VFP3);
- if (!CpuFeatures::IsSupported(FPU)) {
- __ Abort("Unreachable code: Cannot optimize without FPU support.");
- return;
- }
-
- // Lookup the function in the JavaScript frame and push it as an
- // argument to the on-stack replacement function.
- __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(at));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kStackGuard, 0);
}
+ __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
- __ Ret(eq, v0, Operand(Smi::FromInt(-1)));
-
- // Untag the AST id and push it on the stack.
- __ SmiUntag(v0);
- __ push(v0);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
+ __ bind(&ok);
+ __ Ret();
}
@@ -1371,7 +1052,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// a0: actual number of arguments
// a1: function
Label shift_arguments;
- __ li(t0, Operand(0, RelocInfo::NONE)); // Indicate regular JS_FUNCTION.
+ __ li(t0, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION.
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -1425,7 +1106,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ sll(at, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ lw(a1, MemOperand(at));
- __ li(t0, Operand(0, RelocInfo::NONE));
+ __ li(t0, Operand(0, RelocInfo::NONE32));
__ Branch(&patch_receiver);
// Use the global receiver object from the called function as the
@@ -1448,11 +1129,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 3b. Check for function proxy.
__ bind(&slow);
- __ li(t0, Operand(1, RelocInfo::NONE)); // Indicate function proxy.
+ __ li(t0, Operand(1, RelocInfo::NONE32)); // Indicate function proxy.
__ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
__ bind(&non_function);
- __ li(t0, Operand(2, RelocInfo::NONE)); // Indicate non-function.
+ __ li(t0, Operand(2, RelocInfo::NONE32)); // Indicate non-function.
// 3c. Patch the first argument when calling a non-function. The
// CALL_NON_FUNCTION builtin expects the non-function callee as
@@ -1683,7 +1364,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ bind(&call_proxy);
__ push(a1); // Add function proxy as last argument.
__ Addu(a0, a0, Operand(1));
- __ li(a2, Operand(0, RelocInfo::NONE));
+ __ li(a2, Operand(0, RelocInfo::NONE32));
__ SetCallKind(t1, CALL_AS_METHOD);
__ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index ca3182645..e334b2896 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -27,197 +27,308 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "bootstrapper.h"
#include "code-stubs.h"
#include "codegen.h"
#include "regexp-macro-assembler.h"
+#include "stub-cache.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a2 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cc,
- bool never_nan_nan);
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* rhs_not_nan,
- Label* slow,
- bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register lhs,
- Register rhs);
+void ToNumberStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
-// Check if the operand is a heap number.
-static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
- Register scratch1, Register scratch2,
- Label* not_a_heap_number) {
- __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
- __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
- __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
+
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
}
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in a0.
- Label check_heap_number, call_builtin;
- __ JumpIfNotSmi(a0, &check_heap_number);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a3, a2, a1 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+}
- __ bind(&check_heap_number);
- EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&call_builtin);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a3, a2, a1, a0 };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in cp.
- Counters* counters = masm->isolate()->counters();
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a2 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
- Label gc;
- // Pop the function info from the stack.
- __ pop(a3);
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
- // Attempt to allocate new JSFunction in new space.
- __ AllocateInNewSpace(JSFunction::kSize,
- v0,
- a1,
- a2,
- &gc,
- TAG_OBJECT);
- __ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
- __ lw(t1, MemOperand(a2, Context::SlotOffset(map_index)));
- __ sw(t1, FieldMemOperand(v0, HeapObject::kMapOffset));
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
- __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ sw(t1, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
- __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
- __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
- __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ lw(a1,
- FieldMemOperand(a3, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ And(at, a1, a1);
- __ Branch(&check_optimized, ne, at, Operand(zero_reg));
+
+void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a2, a1, a0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0, a1 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ Address entry =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
+}
+
+
+void CompareNilICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(CompareNilIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+}
+
+
+static void InitializeArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // a0 -- number of arguments
+ // a1 -- function
+ // a2 -- type info cell with elements kind
+ static Register registers[] = { a1, a2 };
+ descriptor->register_param_count_ = 2;
+ if (constant_stack_parameter_count != 0) {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = a0;
}
- __ bind(&install_unoptimized);
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
- __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
- __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->register_params_ = registers;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+}
- // Return result. The argument function info has been popped already.
- __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
- __ Ret();
- __ bind(&check_optimized);
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // a0 -- number of arguments
+ // a1 -- constructor function
+ static Register registers[] = { a1 };
+ descriptor->register_param_count_ = 1;
+
+ if (constant_stack_parameter_count != 0) {
+ // Stack param count needs (constructor pointer, and single argument).
+ descriptor->stack_parameter_count_ = a0;
+ }
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->register_params_ = registers;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+}
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3);
- // a2 holds native context, a1 points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into t0.
- __ lw(t0, FieldMemOperand(a1, FixedArray::kHeaderSize + kPointerSize));
- __ lw(t1, FieldMemOperand(a1, FixedArray::kHeaderSize));
- __ Branch(&install_optimized, eq, a2, Operand(t1));
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+}
- // Iterate through the rest of map backwards. t0 holds an index as a Smi.
- Label loop;
- __ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Do not double check first entry.
-
- __ Branch(&install_unoptimized, eq, t0,
- Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ Subu(t0, t0, Operand(
- Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
- __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t1, t1, Operand(at));
- __ lw(t1, MemOperand(t1));
- __ Branch(&loop, ne, a2, Operand(t1));
- // Hit: fetch the optimized code.
- __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t1, t1, Operand(at));
- __ Addu(t1, t1, Operand(kPointerSize));
- __ lw(t0, MemOperand(t1));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(),
- 1, t2, t3);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sw(t0, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
-
- // Now link a function into a list of optimized functions.
- __ lw(t0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
- // No need for write barrier as JSFunction (eax) is in the new space.
-
- __ sw(v0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Store JSFunction (eax) into edx before issuing write barrier as
- // it clobbers all the registers passed.
- __ mov(t0, v0);
- __ RecordWriteContextSlot(
- a2,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- t0,
- a1,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
-
- // Return result. The argument function info has been popped already.
- __ Ret();
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ LoadRoot(t0, Heap::kFalseValueRootIndex);
- __ Push(cp, a3, t0);
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a2, a0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0, a3, a1, a2 };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* rhs_not_nan,
+ Label* slow,
+ bool strict);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ Isolate* isolate = masm->isolate();
+ isolate->counters()->code_stubs()->Increment();
+
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ int param_count = descriptor->register_param_count_;
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ASSERT(descriptor->register_param_count_ == 0 ||
+ a0.is(descriptor->register_params_[param_count - 1]));
+ // Push arguments
+ for (int i = 0; i < param_count; ++i) {
+ __ push(descriptor->register_params_[i]);
+ }
+ ExternalReference miss = descriptor->miss_handler();
+ __ CallExternalReference(miss, descriptor->register_param_count_);
+ }
+
+ __ Ret();
}
@@ -227,12 +338,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
// Attempt to allocate the context in new space.
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- v0,
- a1,
- a2,
- &gc,
- TAG_OBJECT);
+ __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
// Load the function from the stack.
__ lw(a3, MemOperand(sp, 0));
@@ -276,8 +382,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- v0, a1, a2, &gc, TAG_OBJECT);
+ __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
// Load the function from the stack.
__ lw(a3, MemOperand(sp, 0));
@@ -298,8 +403,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
Label after_sentinel;
__ JumpIfNotSmi(a3, &after_sentinel);
if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ Assert(eq, message, a3, Operand(zero_reg));
+ __ Assert(eq, kExpected0AsASmiSentinel, a3, Operand(zero_reg));
}
__ lw(a3, GlobalObjectOperand());
__ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
@@ -329,179 +433,12 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
}
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- Label* fail) {
- // Registers on entry:
- // a3: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
- int size = JSArray::kSize + elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size,
- v0,
- a1,
- a2,
- fail,
- TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ lw(a1, FieldMemOperand(a3, i));
- __ sw(a1, FieldMemOperand(v0, i));
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
- __ Addu(a2, v0, Operand(JSArray::kSize));
- __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
-
- // Copy the elements array.
- ASSERT((elements_size % kPointerSize) == 0);
- __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
- }
-}
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: constant elements.
- // [sp + kPointerSize]: literal index.
- // [sp + (2 * kPointerSize)]: literals array.
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ lw(a3, MemOperand(sp, 2 * kPointerSize));
- __ lw(a0, MemOperand(sp, 1 * kPointerSize));
- __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a3, t0);
- __ lw(a3, MemOperand(t0));
- __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
- __ Branch(&slow_case, eq, a3, Operand(t1));
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
- __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
- __ Branch(&check_fast_elements, ne, v0, Operand(t1));
- GenerateFastCloneShallowArrayCommon(masm, 0,
- COPY_ON_WRITE_ELEMENTS, &slow_case);
- // Return and remove the on-stack parameters.
- __ DropAndRet(3);
-
- __ bind(&check_fast_elements);
- __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
- __ Branch(&double_elements, ne, v0, Operand(t1));
- GenerateFastCloneShallowArrayCommon(masm, length_,
- CLONE_ELEMENTS, &slow_case);
- // Return and remove the on-stack parameters.
- __ DropAndRet(3);
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(a3);
- __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
- __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ LoadRoot(at, expected_map_index);
- __ Assert(eq, message, a3, Operand(at));
- __ pop(a3);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
-
- // Return and remove the on-stack parameters.
- __ DropAndRet(3);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: object literal flags.
- // [sp + kPointerSize]: constant properties.
- // [sp + (2 * kPointerSize)]: literal index.
- // [sp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into a3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ lw(a3, MemOperand(sp, 3 * kPointerSize));
- __ lw(a0, MemOperand(sp, 2 * kPointerSize));
- __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a3, t0, a3);
- __ lw(a3, MemOperand(a3));
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&slow_case, eq, a3, Operand(t0));
-
- // Check that the boilerplate contains only fast properties and we can
- // statically determine the instance size.
- int size = JSObject::kHeaderSize + length_ * kPointerSize;
- __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
- __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
-
- // Allocate the JS object and copy header together with all in-object
- // properties from the boilerplate.
- __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT);
- for (int i = 0; i < size; i += kPointerSize) {
- __ lw(a1, FieldMemOperand(a3, i));
- __ sw(a1, FieldMemOperand(v0, i));
- }
-
- // Return and remove the on-stack parameters.
- __ DropAndRet(4);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
// scratch register. Destroys the source register. No GC occurs during this
// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public CodeStub {
+class ConvertToDoubleStub : public PlatformCodeStub {
public:
ConvertToDoubleStub(Register result_reg_1,
Register result_reg_2,
@@ -595,510 +532,141 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
}
-void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ sra(scratch1, a0, kSmiTagSize);
- __ mtc1(scratch1, f14);
- __ cvt_d_w(f14, f14);
- __ sra(scratch1, a1, kSmiTagSize);
- __ mtc1(scratch1, f12);
- __ cvt_d_w(f12, f12);
- if (destination == kCoreRegisters) {
- __ Move(a2, a3, f14);
- __ Move(a0, a1, f12);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write Smi from a0 to a3 and a2 in double format.
- __ mov(scratch1, a0);
- ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
- __ push(ra);
- __ Call(stub1.GetCode());
- // Write Smi from a1 to a1 and a0 in double format.
- __ mov(scratch1, a1);
- ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
- __ Call(stub2.GetCode());
- __ pop(ra);
- }
-}
-
-
-void FloatingPointHelper::LoadOperands(
- MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* slow) {
-
- // Load right operand (a0) to f12 or a2/a3.
- LoadNumber(masm, destination,
- a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
-
- // Load left operand (a1) to f14 or a0/a1.
- LoadNumber(masm, destination,
- a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
-}
-
-
-void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
- Destination destination,
- Register object,
- FPURegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
-
- Label is_smi, done;
-
- // Smi-check
- __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
- // Heap number check
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
-
- // Handle loading a double from a heap number.
- if (CpuFeatures::IsSupported(FPU) &&
- destination == kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
- // Load the double from tagged HeapNumber to double register.
-
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
- // point in generating even more instructions.
- __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
- } else {
- ASSERT(destination == kCoreRegisters);
- // Load the double from heap number to dst1 and dst2 in double format.
- __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
- __ lw(dst2, FieldMemOperand(object,
- HeapNumber::kValueOffset + kPointerSize));
- }
- __ Branch(&done);
-
- // Handle loading a double from a smi.
- __ bind(&is_smi);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Convert smi to double using FPU instructions.
- __ mtc1(scratch1, dst);
- __ cvt_d_w(dst, dst);
- if (destination == kCoreRegisters) {
- // Load the converted smi to dst1 and dst2 in double format.
- __ Move(dst1, dst2, dst);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write smi to dst1 and dst2 double format.
- __ mov(scratch1, object);
- ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
- __ push(ra);
- __ Call(stub.GetCode());
- __ pop(ra);
- }
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- Label done;
- Label not_in_int32_range;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
- __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
- __ ConvertToInt32(object,
- dst,
- scratch1,
- scratch2,
- double_scratch,
- &not_in_int32_range);
- __ jmp(&done);
-
- __ bind(&not_in_int32_range);
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- __ EmitOutOfInt32RangeTruncate(dst,
- scratch1,
- scratch2,
- scratch3);
-
- __ bind(&done);
-}
-
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label out_of_range, only_low, negate, done;
+ Register input_reg = source();
+ Register result_reg = destination();
-void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- FPURegister double_dst,
- Register dst1,
- Register dst2,
- Register scratch2,
- FPURegister single_scratch) {
- ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst1));
- ASSERT(!int_scratch.is(dst2));
-
- Label done;
+ int double_offset = offset();
+ // Account for saved regs if input is sp.
+ if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ mtc1(int_scratch, single_scratch);
- __ cvt_d_w(double_dst, single_scratch);
- if (destination == kCoreRegisters) {
- __ Move(dst1, dst2, double_dst);
- }
- } else {
- Label fewer_than_20_useful_bits;
- // Expected output:
- // | dst2 | dst1 |
- // | s | exp | mantissa |
-
- // Check for zero.
- __ mov(dst2, int_scratch);
- __ mov(dst1, int_scratch);
- __ Branch(&done, eq, int_scratch, Operand(zero_reg));
-
- // Preload the sign of the value.
- __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
- // Get the absolute value of the object (as an unsigned integer).
- Label skip_sub;
- __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
- __ Subu(int_scratch, zero_reg, int_scratch);
- __ bind(&skip_sub);
-
- // Get mantissa[51:20].
-
- // Get the position of the first set bit.
- __ Clz(dst1, int_scratch);
- __ li(scratch2, 31);
- __ Subu(dst1, scratch2, dst1);
-
- // Set the exponent.
- __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
- __ Ins(dst2, scratch2,
- HeapNumber::kExponentShift, HeapNumber::kExponentBits);
-
- // Clear the first non null bit.
- __ li(scratch2, Operand(1));
- __ sllv(scratch2, scratch2, dst1);
- __ li(at, -1);
- __ Xor(scratch2, scratch2, at);
- __ And(int_scratch, int_scratch, scratch2);
-
- // Get the number of bits to set in the lower part of the mantissa.
- __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
- __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
- // Set the higher 20 bits of the mantissa.
- __ srlv(at, int_scratch, scratch2);
- __ or_(dst2, dst2, at);
- __ li(at, 32);
- __ subu(scratch2, at, scratch2);
- __ sllv(dst1, int_scratch, scratch2);
- __ Branch(&done);
-
- __ bind(&fewer_than_20_useful_bits);
- __ li(at, HeapNumber::kMantissaBitsInTopWord);
- __ subu(scratch2, at, dst1);
- __ sllv(scratch2, int_scratch, scratch2);
- __ Or(dst2, dst2, scratch2);
- // Set dst1 to 0.
- __ mov(dst1, zero_reg);
- }
- __ bind(&done);
-}
+ Register scratch =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg);
+ Register scratch2 =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+ Register scratch3 =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
+ DoubleRegister double_scratch = kLithiumScratchDouble;
+ __ Push(scratch, scratch2, scratch3);
-void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DoubleRegister double_dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister single_scratch,
- Label* not_int32) {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!heap_number_map.is(object) &&
- !heap_number_map.is(scratch1) &&
- !heap_number_map.is(scratch2));
-
- Label done, obj_is_not_smi;
-
- __ JumpIfNotSmi(object, &obj_is_not_smi);
- __ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
- scratch2, single_scratch);
- __ Branch(&done);
+ if (!skip_fastpath()) {
+ // Load double input.
+ __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
- __ bind(&obj_is_not_smi);
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
-
- // Load the number.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Load the double value.
- __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- single_scratch,
- double_dst,
- scratch1,
- except_flag,
- kCheckForInexactConversion);
+ // Clear cumulative exception flags and save the FCSR.
+ __ cfc1(scratch2, FCSR);
+ __ ctc1(zero_reg, FCSR);
- // Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ // Try a conversion to a signed integer.
+ __ Trunc_w_d(double_scratch, double_scratch);
+ // Move the converted value into the result register.
+ __ mfc1(result_reg, double_scratch);
- if (destination == kCoreRegisters) {
- __ Move(dst1, dst2, double_dst);
- }
+ // Retrieve and restore the FCSR.
+ __ cfc1(scratch, FCSR);
+ __ ctc1(scratch2, FCSR);
- } else {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- // Load the double value in the destination registers.
- __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- // Check for 0 and -0.
- __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
- __ Or(scratch1, scratch1, Operand(dst2));
- __ Branch(&done, eq, scratch1, Operand(zero_reg));
-
- // Check that the value can be exactly represented by a 32-bit integer.
- // Jump to not_int32 if that's not the case.
- DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
-
- // dst1 and dst2 were trashed. Reload the double value.
- __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+ // Check for overflow and NaNs.
+ __ And(
+ scratch, scratch,
+ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
+ | kFCSRInvalidOpFlagMask);
+ // If we had no exceptions we are done.
+ __ Branch(&done, eq, scratch, Operand(zero_reg));
}
- __ bind(&done);
-}
+ // Load the double value and perform a manual truncation.
+ Register input_high = scratch2;
+ Register input_low = scratch3;
+ __ lw(input_low, MemOperand(input_reg, double_offset));
+ __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
-void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DoubleRegister double_scratch,
- Label* not_int32) {
- ASSERT(!dst.is(object));
- ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
- ASSERT(!scratch1.is(scratch2) &&
- !scratch1.is(scratch3) &&
- !scratch2.is(scratch3));
+ Label normal_exponent, restore_sign;
+ // Extract the biased exponent in result.
+ __ Ext(result_reg,
+ input_high,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
- Label done;
+ // Check for Infinity and NaNs, which should return 0.
+ __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
+ __ Movz(result_reg, zero_reg, scratch);
+ __ Branch(&done, eq, scratch, Operand(zero_reg));
- __ UntagAndJumpIfSmi(dst, object, &done);
+ // Express exponent as delta to (number of mantissa bits + 31).
+ __ Subu(result_reg,
+ result_reg,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+ // If the delta is strictly positive, all bits would be shifted away,
+ // which means that we can return 0.
+ __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
+ __ mov(result_reg, zero_reg);
+ __ Branch(&done);
- // Object is a heap number.
- // Convert the floating point value to a 32-bit integer.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Load the double value.
- __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ bind(&normal_exponent);
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ // Calculate shift.
+ __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+ // Save the sign.
+ Register sign = result_reg;
+ result_reg = no_reg;
+ __ And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+ // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
+ // to check for this specific case.
+ Label high_shift_needed, high_shift_done;
+ __ Branch(&high_shift_needed, lt, scratch, Operand(32));
+ __ mov(input_high, zero_reg);
+ __ Branch(&high_shift_done);
+ __ bind(&high_shift_needed);
+
+ // Set the implicit 1 before the mantissa part in input_high.
+ __ Or(input_high,
+ input_high,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ // Shift the mantissa bits to the correct position.
+ // We don't need to clear non-mantissa bits as they will be shifted away.
+ // If they weren't, it would mean that the answer is in the 32bit range.
+ __ sllv(input_high, input_high, scratch);
+
+ __ bind(&high_shift_done);
+
+ // Replace the shifted bits with bits from the lower mantissa word.
+ Label pos_shift, shift_done;
+ __ li(at, 32);
+ __ subu(scratch, at, scratch);
+ __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
- FPURegister single_scratch = double_scratch.low();
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- single_scratch,
- double_scratch,
- scratch1,
- except_flag,
- kCheckForInexactConversion);
+ // Negate scratch.
+ __ Subu(scratch, zero_reg, scratch);
+ __ sllv(input_low, input_low, scratch);
+ __ Branch(&shift_done);
- // Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
- // Get the result in the destination register.
- __ mfc1(dst, single_scratch);
+ __ bind(&pos_shift);
+ __ srlv(input_low, input_low, scratch);
- } else {
- // Load the double value in the destination registers.
- __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- // Check for 0 and -0.
- __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
- __ Or(dst, scratch2, Operand(dst));
- __ Branch(&done, eq, dst, Operand(zero_reg));
-
- DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
-
- // Registers state after DoubleIs32BitInteger.
- // dst: mantissa[51:20].
- // scratch2: 1
-
- // Shift back the higher bits of the mantissa.
- __ srlv(dst, dst, scratch3);
- // Set the implicit first bit.
- __ li(at, 32);
- __ subu(scratch3, at, scratch3);
- __ sllv(scratch2, scratch2, scratch3);
- __ Or(dst, dst, scratch2);
- // Set the sign.
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- Label skip_sub;
- __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
- __ Subu(dst, zero_reg, dst);
- __ bind(&skip_sub);
- }
+ __ bind(&shift_done);
+ __ Or(input_high, input_high, Operand(input_low));
+ // Restore sign if necessary.
+ __ mov(scratch, sign);
+ result_reg = sign;
+ sign = no_reg;
+ __ Subu(result_reg, zero_reg, input_high);
+ __ Movz(result_reg, input_high, scratch);
__ bind(&done);
-}
-
-
-void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
- Register dst,
- Register scratch,
- Label* not_int32) {
- // Get exponent alone in scratch.
- __ Ext(scratch,
- src1,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // Substract the bias from the exponent.
- __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
-
- // src1: higher (exponent) part of the double value.
- // src2: lower (mantissa) part of the double value.
- // scratch: unbiased exponent.
-
- // Fast cases. Check for obvious non 32-bit integer values.
- // Negative exponent cannot yield 32-bit integers.
- __ Branch(not_int32, lt, scratch, Operand(zero_reg));
- // Exponent greater than 31 cannot yield 32-bit integers.
- // Also, a positive value with an exponent equal to 31 is outside of the
- // signed 32-bit integer range.
- // Another way to put it is that if (exponent - signbit) > 30 then the
- // number cannot be represented as an int32.
- Register tmp = dst;
- __ srl(at, src1, 31);
- __ subu(tmp, scratch, at);
- __ Branch(not_int32, gt, tmp, Operand(30));
- // - Bits [21:0] in the mantissa are not null.
- __ And(tmp, src2, 0x3fffff);
- __ Branch(not_int32, ne, tmp, Operand(zero_reg));
-
- // Otherwise the exponent needs to be big enough to shift left all the
- // non zero bits left. So we need the (30 - exponent) last bits of the
- // 31 higher bits of the mantissa to be null.
- // Because bits [21:0] are null, we can check instead that the
- // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
-
- // Get the 32 higher bits of the mantissa in dst.
- __ Ext(dst,
- src2,
- HeapNumber::kMantissaBitsInTopWord,
- 32 - HeapNumber::kMantissaBitsInTopWord);
- __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
- __ or_(dst, dst, at);
-
- // Create the mask and test the lower bits (of the higher bits).
- __ li(at, 32);
- __ subu(scratch, at, scratch);
- __ li(src2, 1);
- __ sllv(src1, src2, scratch);
- __ Subu(src1, src1, Operand(1));
- __ And(src1, dst, src1);
- __ Branch(not_int32, ne, src1, Operand(zero_reg));
-}
-
-
-void FloatingPointHelper::CallCCodeForDoubleOperation(
- MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Using core registers:
- // a0: Left value (least significant part of mantissa).
- // a1: Left value (sign, exponent, top of mantissa).
- // a2: Right value (least significant part of mantissa).
- // a3: Right value (sign, exponent, top of mantissa).
-
- // Assert that heap_number_result is saved.
- // We currently always use s0 to pass it.
- ASSERT(heap_number_result.is(s0));
-
- // Push the current return address before the C call.
- __ push(ra);
- __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
- if (!IsMipsSoftFloatABI) {
- CpuFeatures::Scope scope(FPU);
- // We are not using MIPS FPU instructions, and parameters for the runtime
- // function call are prepaired in a0-a3 registers, but function we are
- // calling is compiled with hard-float flag and expecting hard float ABI
- // (parameters in f12/f14 registers). We need to copy parameters from
- // a0-a3 registers to f12/f14 register pairs.
- __ Move(f12, a0, a1);
- __ Move(f14, a2, a3);
- }
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number.
- if (!IsMipsSoftFloatABI) {
- CpuFeatures::Scope scope(FPU);
- // Double returned in register f0.
- __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- } else {
- // Double returned in registers v0 and v1.
- __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
- __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
- }
- // Place heap_number_result in v0 and return to the pushed return address.
- __ pop(ra);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
+ __ Pop(scratch, scratch2, scratch3);
+ __ Ret();
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
// These variants are compiled ahead of time. See next method.
if (the_int_.is(a1) &&
the_heap_number_.is(v0) &&
@@ -1119,11 +687,12 @@ bool WriteInt32ToHeapNumberStub::IsPregenerated() {
}
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
- stub1.GetCode()->set_is_pregenerated(true);
- stub2.GetCode()->set_is_pregenerated(true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -1158,9 +727,9 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
__ sw(scratch_, FieldMemOperand(the_heap_number_,
HeapNumber::kExponentOffset));
__ sll(scratch_, the_int_, 32 - shift_distance);
+ __ Ret(USE_DELAY_SLOT);
__ sw(scratch_, FieldMemOperand(the_heap_number_,
HeapNumber::kMantissaOffset));
- __ Ret();
__ bind(&max_negative_int);
// The max negative int32 is stored as a positive number in the mantissa of
@@ -1172,9 +741,9 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
__ sw(scratch_,
FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
__ mov(scratch_, zero_reg);
+ __ Ret(USE_DELAY_SLOT);
__ sw(scratch_,
FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
- __ Ret();
}
@@ -1183,54 +752,51 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc,
- bool never_nan_nan) {
+ Condition cc) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = t5;
__ Branch(&not_identical, ne, a0, Operand(a1));
- // The two objects are identical. If we know that one of them isn't NaN then
- // we now know they test equal.
- if (cc != eq || !never_nan_nan) {
- __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
-
- // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cc == less || cc == greater) {
- __ GetObjectType(a0, t4, t4);
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
- } else {
- __ GetObjectType(a0, t4, t4);
- __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
- // Comparing JS objects with <=, >= is complicated.
- if (cc != eq) {
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cc == less_equal || cc == greater_equal) {
- __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
- __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
- __ Branch(&return_equal, ne, a0, Operand(t2));
- if (cc == le) {
- // undefined <= undefined should fail.
- __ li(v0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ li(v0, Operand(LESS));
- }
- __ Ret();
+ __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == less || cc == greater) {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ } else {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cc == less_equal || cc == greater_equal) {
+ __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&return_equal, ne, a0, Operand(t2));
+ ASSERT(is_int16(GREATER) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
+ if (cc == le) {
+ // undefined <= undefined should fail.
+ __ li(v0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ li(v0, Operand(LESS));
}
}
}
}
__ bind(&return_equal);
-
+ ASSERT(is_int16(GREATER) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
if (cc == less) {
__ li(v0, Operand(GREATER)); // Things aren't less than themselves.
} else if (cc == greater) {
@@ -1238,48 +804,46 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
} else {
__ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
}
- __ Ret();
- if (cc != eq || !never_nan_nan) {
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cc != lt && cc != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ And(t3, t2, Operand(exp_mask_reg));
- // If all bits not set (ne cond), then not a NaN, objects are equal.
- __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
- // Or with all low-bits of mantissa.
- __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
- __ Or(v0, t3, Operand(t2));
- // For equal we already have the right value in v0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load v0 with the failing
- // value if it's a NaN.
- if (cc != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq, v0, Operand(zero_reg));
- if (cc == le) {
- __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
- }
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ And(t3, t2, Operand(exp_mask_reg));
+ // If all bits not set (ne cond), then not a NaN, objects are equal.
+ __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
+ // Or with all low-bits of mantissa.
+ __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ Or(v0, t3, Operand(t2));
+ // For equal we already have the right value in v0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load v0 with the failing
+ // value if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq, v0, Operand(zero_reg));
+ ASSERT(is_int16(GREATER) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
+ if (cc == le) {
+ __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
}
- __ Ret();
}
- // No fall through here.
}
+ // No fall through here.
__ bind(&not_identical);
}
@@ -1312,25 +876,10 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Rhs is a smi, lhs is a number.
// Convert smi rhs to double.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ sra(at, rhs, kSmiTagSize);
- __ mtc1(at, f14);
- __ cvt_d_w(f14, f14);
- __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- } else {
- // Load lhs to a double in a2, a3.
- __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
- __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
-
- // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
- __ mov(t6, rhs);
- ConvertToDoubleStub stub1(a1, a0, t6, t5);
- __ push(ra);
- __ Call(stub1.GetCode());
-
- __ pop(ra);
- }
+ __ sra(at, rhs, kSmiTagSize);
+ __ mtc1(at, f14);
+ __ cvt_d_w(f14, f14);
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
// We now have both loaded as doubles.
__ jmp(both_loaded_as_doubles);
@@ -1351,179 +900,14 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Lhs is a smi, rhs is a number.
// Convert smi lhs to double.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ sra(at, lhs, kSmiTagSize);
- __ mtc1(at, f12);
- __ cvt_d_w(f12, f12);
- __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- // Convert lhs to a double format. t5 is scratch.
- __ mov(t6, lhs);
- ConvertToDoubleStub stub2(a3, a2, t6, t5);
- __ push(ra);
- __ Call(stub2.GetCode());
- __ pop(ra);
- // Load rhs to a double in a1, a0.
- if (rhs.is(a0)) {
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- }
- }
+ __ sra(at, lhs, kSmiTagSize);
+ __ mtc1(at, f12);
+ __ cvt_d_w(f12, f12);
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
// Fall through to both_loaded_as_doubles.
}
-void EmitNanCheck(MacroAssembler* masm, Condition cc) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Lhs and rhs are already loaded to f12 and f14 register pairs.
- __ Move(t0, t1, f14);
- __ Move(t2, t3, f12);
- } else {
- // Lhs and rhs are already loaded to GP registers.
- __ mov(t0, a0); // a0 has LS 32 bits of rhs.
- __ mov(t1, a1); // a1 has MS 32 bits of rhs.
- __ mov(t2, a2); // a2 has LS 32 bits of lhs.
- __ mov(t3, a3); // a3 has MS 32 bits of lhs.
- }
- Register rhs_exponent = exp_first ? t0 : t1;
- Register lhs_exponent = exp_first ? t2 : t3;
- Register rhs_mantissa = exp_first ? t1 : t0;
- Register lhs_mantissa = exp_first ? t3 : t2;
- Label one_is_nan, neither_is_nan;
- Label lhs_not_nan_exp_mask_is_loaded;
-
- Register exp_mask_reg = t4;
- __ li(exp_mask_reg, HeapNumber::kExponentMask);
- __ and_(t5, lhs_exponent, exp_mask_reg);
- __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
-
- __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
- __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
-
- __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
-
- __ li(exp_mask_reg, HeapNumber::kExponentMask);
- __ bind(&lhs_not_nan_exp_mask_is_loaded);
- __ and_(t5, rhs_exponent, exp_mask_reg);
-
- __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
-
- __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
- __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
-
- __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
-
- __ bind(&one_is_nan);
- // NaN comparisons always fail.
- // Load whatever we need in v0 to make the comparison fail.
-
- if (cc == lt || cc == le) {
- __ li(v0, Operand(GREATER));
- } else {
- __ li(v0, Operand(LESS));
- }
- __ Ret();
-
- __ bind(&neither_is_nan);
-}
-
-
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
- // f12 and f14 have the two doubles. Neither is a NaN.
- // Call a native function to do a comparison between two non-NaNs.
- // Call C routine that may not cause GC or other trouble.
- // We use a call_was and return manually because we need arguments slots to
- // be freed.
-
- Label return_result_not_equal, return_result_equal;
- if (cc == eq) {
- // Doubles are not equal unless they have the same bit pattern.
- // Exception: 0 and -0.
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Lhs and rhs are already loaded to f12 and f14 register pairs.
- __ Move(t0, t1, f14);
- __ Move(t2, t3, f12);
- } else {
- // Lhs and rhs are already loaded to GP registers.
- __ mov(t0, a0); // a0 has LS 32 bits of rhs.
- __ mov(t1, a1); // a1 has MS 32 bits of rhs.
- __ mov(t2, a2); // a2 has LS 32 bits of lhs.
- __ mov(t3, a3); // a3 has MS 32 bits of lhs.
- }
- Register rhs_exponent = exp_first ? t0 : t1;
- Register lhs_exponent = exp_first ? t2 : t3;
- Register rhs_mantissa = exp_first ? t1 : t0;
- Register lhs_mantissa = exp_first ? t3 : t2;
-
- __ xor_(v0, rhs_mantissa, lhs_mantissa);
- __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
-
- __ subu(v0, rhs_exponent, lhs_exponent);
- __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
- // 0, -0 case.
- __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
- __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
- __ or_(t4, rhs_exponent, lhs_exponent);
- __ or_(t4, t4, rhs_mantissa);
-
- __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
-
- __ bind(&return_result_equal);
-
- __ li(v0, Operand(EQUAL));
- __ Ret();
- }
-
- __ bind(&return_result_not_equal);
-
- if (!CpuFeatures::IsSupported(FPU)) {
- __ push(ra);
- __ PrepareCallCFunction(0, 2, t4);
- if (!IsMipsSoftFloatABI) {
- // We are not using MIPS FPU instructions, and parameters for the runtime
- // function call are prepaired in a0-a3 registers, but function we are
- // calling is compiled with hard-float flag and expecting hard float ABI
- // (parameters in f12/f14 registers). We need to copy parameters from
- // a0-a3 registers to f12/f14 register pairs.
- __ Move(f12, a0, a1);
- __ Move(f14, a2, a3);
- }
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
- 0, 2);
- __ pop(ra); // Because this function returns int, result is in v0.
- __ Ret();
- } else {
- CpuFeatures::Scope scope(FPU);
- Label equal, less_than;
- __ BranchF(&equal, NULL, eq, f12, f14);
- __ BranchF(&less_than, NULL, lt, f12, f14);
-
- // Not equal, not less, not NaN, must be greater.
-
- __ li(v0, Operand(GREATER));
- __ Ret();
-
- __ bind(&equal);
- __ li(v0, Operand(EQUAL));
- __ Ret();
-
- __ bind(&less_than);
- __ li(v0, Operand(LESS));
- __ Ret();
- }
-}
-
-
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs) {
@@ -1553,13 +937,12 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// Check for oddballs: true, false, null, undefined.
__ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
- // Now that we have the types we might as well check for symbol-symbol.
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ And(t2, a2, Operand(a3));
- __ And(t0, t2, Operand(kIsSymbolMask));
- __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
+ // Now that we have the types we might as well check for
+ // internalized-internalized.
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ Or(a2, a2, Operand(a3));
+ __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
}
@@ -1577,49 +960,36 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
- if (rhs.is(a0)) {
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- }
- }
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+
__ jmp(both_loaded_as_doubles);
}
-// Fast negative check for symbol-to-symbol equality.
-static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* possible_strings,
- Label* not_both_strings) {
+// Fast negative check for internalized-to-internalized equality.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* possible_strings,
+ Label* not_both_strings) {
ASSERT((lhs.is(a0) && rhs.is(a1)) ||
(lhs.is(a1) && rhs.is(a0)));
- // a2 is object type of lhs.
- // Ensure that no non-strings have the symbol bit set.
+ // a2 is object type of rhs.
Label object_test;
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ And(at, a2, Operand(kIsNotStringMask));
__ Branch(&object_test, ne, at, Operand(zero_reg));
- __ And(at, a2, Operand(kIsSymbolMask));
- __ Branch(possible_strings, eq, at, Operand(zero_reg));
+ __ And(at, a2, Operand(kIsNotInternalizedMask));
+ __ Branch(possible_strings, ne, at, Operand(zero_reg));
__ GetObjectType(rhs, a3, a3);
__ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
- __ And(at, a3, Operand(kIsSymbolMask));
- __ Branch(possible_strings, eq, at, Operand(zero_reg));
+ __ And(at, a3, Operand(kIsNotInternalizedMask));
+ __ Branch(possible_strings, ne, at, Operand(zero_reg));
- // Both are symbols. We already checked they weren't the same pointer
- // so they are not equal.
+ // Both are internalized strings. We already checked they weren't the same
+ // pointer so they are not equal.
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(1)); // Non-zero indicates not equal.
@@ -1641,154 +1011,61 @@ static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ sra(mask, mask, kSmiTagSize + 1);
- __ Addu(mask, mask, -1); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Isolate* isolate = masm->isolate();
- Label is_smi;
- Label load_result_from_cache;
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ Addu(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ lw(scratch2, MemOperand(scratch1, kPointerSize));
- __ lw(scratch1, MemOperand(scratch1, 0));
- __ Xor(scratch1, scratch1, Operand(scratch2));
- __ And(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
- __ Addu(scratch1, number_string_cache, scratch1);
-
- Register probe = mask;
- __ lw(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
- __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
- __ Branch(not_found);
- } else {
- // Note that there is no cache check for non-FPU case, even though
- // it seems there could be. May be a tiny opimization for non-FPU
- // cores.
- __ Branch(not_found);
- }
+static void ICCompareStub_CheckInputType(MacroAssembler* masm,
+ Register input,
+ Register scratch,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
}
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ sra(scratch, object, 1); // Shift away the tag.
- __ And(scratch, mask, Operand(scratch));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch, scratch, kPointerSizeLog2 + 1);
- __ Addu(scratch, number_string_cache, scratch);
-
- // Check if the entry is the smi we are looking for.
- Register probe = mask;
- __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ Branch(not_found, ne, object, Operand(probe));
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ lw(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
-
- __ IncrementCounter(isolate->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
+ // We could be strict about internalized/string here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
}
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ lw(a1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
- __ DropAndRet(1);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
-}
+// On entry a1 and a2 are the values to be compared.
+// On exit a0 is 0, positive or negative to indicate the result of
+// the comparison.
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = a1;
+ Register rhs = a0;
+ Condition cc = GetCondition();
+ Label miss;
+ ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
+ ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
-// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
-// On exit, v0 is 0, positive, or negative (smi) to indicate the result
-// of the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles;
-
- if (include_smi_compare_) {
- Label not_two_smis, smi_done;
- __ Or(a2, a1, a0);
- __ JumpIfNotSmi(a2, &not_two_smis);
- __ sra(a1, a1, 1);
- __ sra(a0, a0, 1);
- __ Ret(USE_DELAY_SLOT);
- __ subu(v0, a1, a0);
- __ bind(&not_two_smis);
- } else if (FLAG_debug_code) {
- __ Or(a2, a1, a0);
- __ And(a2, a2, kSmiTagMask);
- __ Assert(ne, "CompareStub: unexpected smi operands.",
- a2, Operand(zero_reg));
- }
-
+ Label not_two_smis, smi_done;
+ __ Or(a2, a1, a0);
+ __ JumpIfNotSmi(a2, &not_two_smis);
+ __ sra(a1, a1, 1);
+ __ sra(a0, a0, 1);
+ __ Ret(USE_DELAY_SLOT);
+ __ subu(v0, a1, a0);
+ __ bind(&not_two_smis);
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
- __ And(t2, lhs_, Operand(rhs_));
+ __ And(t2, lhs, Operand(rhs));
__ JumpIfNotSmi(t2, &not_smis, t0);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
@@ -1798,8 +1075,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
// In cases 3 and 4 we have found out we were dealing with a number-number
// comparison and the numbers have been loaded into f12 and f14 as doubles,
// or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
- EmitSmiNonsmiComparison(masm, lhs_, rhs_,
- &both_loaded_as_doubles, &slow, strict_);
+ EmitSmiNonsmiComparison(masm, lhs, rhs,
+ &both_loaded_as_doubles, &slow, strict());
__ bind(&both_loaded_as_doubles);
// f12, f14 are the double representations of the left hand side
@@ -1807,98 +1084,92 @@ void CompareStub::Generate(MacroAssembler* masm) {
// left hand side and a0, a1 represent right hand side.
Isolate* isolate = masm->isolate();
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- Label nan;
- __ li(t0, Operand(LESS));
- __ li(t1, Operand(GREATER));
- __ li(t2, Operand(EQUAL));
-
- // Check if either rhs or lhs is NaN.
- __ BranchF(NULL, &nan, eq, f12, f14);
-
- // Check if LESS condition is satisfied. If true, move conditionally
- // result to v0.
- __ c(OLT, D, f12, f14);
- __ Movt(v0, t0);
- // Use previous check to store conditionally to v0 oposite condition
- // (GREATER). If rhs is equal to lhs, this will be corrected in next
- // check.
- __ Movf(v0, t1);
- // Check if EQUAL condition is satisfied. If true, move conditionally
- // result to v0.
- __ c(EQ, D, f12, f14);
- __ Movt(v0, t2);
+ Label nan;
+ __ li(t0, Operand(LESS));
+ __ li(t1, Operand(GREATER));
+ __ li(t2, Operand(EQUAL));
+
+ // Check if either rhs or lhs is NaN.
+ __ BranchF(NULL, &nan, eq, f12, f14);
+
+ // Check if LESS condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(OLT, D, f12, f14);
+ __ Movt(v0, t0);
+ // Use previous check to store conditionally to v0 oposite condition
+ // (GREATER). If rhs is equal to lhs, this will be corrected in next
+ // check.
+ __ Movf(v0, t1);
+ // Check if EQUAL condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(EQ, D, f12, f14);
+ __ Movt(v0, t2);
- __ Ret();
+ __ Ret();
- __ bind(&nan);
- // NaN comparisons always fail.
- // Load whatever we need in v0 to make the comparison fail.
- if (cc_ == lt || cc_ == le) {
- __ li(v0, Operand(GREATER));
- } else {
- __ li(v0, Operand(LESS));
- }
- __ Ret();
+ __ bind(&nan);
+ // NaN comparisons always fail.
+ // Load whatever we need in v0 to make the comparison fail.
+ ASSERT(is_int16(GREATER) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
+ if (cc == lt || cc == le) {
+ __ li(v0, Operand(GREATER));
} else {
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds rhs_not_nan.
- EmitNanCheck(masm, cc_);
-
- // Compares two doubles that are not NaNs. Returns the answer.
- // Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
+ __ li(v0, Operand(LESS));
}
+
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
// and neither of them is a Smi. The objects are in lhs_ and rhs_.
- if (strict_) {
+ if (strict()) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
}
- Label check_for_symbols;
+ Label check_for_internalized_strings;
Label flat_string_check;
// Check for heap-number-heap-number comparison. Can jump to slow case,
// or load both doubles and jump to the code that handles
- // that case. If the inputs are not doubles then jumps to check_for_symbols.
+ // that case. If the inputs are not doubles then jumps to
+ // check_for_internalized_strings.
// In this case a2 will contain the type of lhs_.
EmitCheckForTwoHeapNumbers(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
&both_loaded_as_doubles,
- &check_for_symbols,
+ &check_for_internalized_strings,
&flat_string_check);
- __ bind(&check_for_symbols);
- if (cc_ == eq && !strict_) {
- // Returns an answer for two symbols or two detectable objects.
+ __ bind(&check_for_internalized_strings);
+ if (cc == eq && !strict()) {
+ // Returns an answer for two internalized strings or two
+ // detectable objects.
// Otherwise jumps to string case or not both strings case.
// Assumes that a2 is the type of lhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+ EmitCheckForInternalizedStringsOrObjects(
+ masm, lhs, rhs, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
// case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
__ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
- if (cc_ == eq) {
+ if (cc == eq) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
a2,
a3,
t0);
} else {
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
a2,
a3,
t0,
@@ -1909,18 +1180,18 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
// Prepare for call to builtin. Push object pointers, a0 (lhs) first,
// a1 (rhs) second.
- __ Push(lhs_, rhs_);
+ __ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- if (cc_ == eq) {
- native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
int ncr; // NaN compare result.
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
ncr = GREATER;
} else {
- ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
+ ASSERT(cc == gt || cc == ge); // Remaining cases.
ncr = LESS;
}
__ li(a0, Operand(Smi::FromInt(ncr)));
@@ -1930,119 +1201,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
-}
-
-// The stub expects its argument in the tos_ register and returns its result in
-// it, too: zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub uses FPU instructions.
- CpuFeatures::Scope scope(FPU);
-
- Label patch;
- const Register map = t5.is(tos_) ? t3 : t5;
-
- // undefined -> false.
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value.
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- __ And(at, tos_, kSmiTagMask);
- // tos_ contains the correct return value already
- __ Ret(eq, at, Operand(zero_reg));
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(tos_, &patch);
- }
-
- if (types_.NeedsMap()) {
- __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, at, Operand(1 << Map::kIsUndetectable));
- // Undetectable -> false.
- __ Movn(tos_, zero_reg, at);
- __ Ret(ne, at, Operand(zero_reg));
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // Spec object -> true.
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- // tos_ contains the correct non-zero return value already.
- __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- Label skip;
- __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
- __ Ret(USE_DELAY_SLOT); // the string length is OK as the return value
- __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- __ bind(&skip);
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // Heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&not_heap_number, ne, map, Operand(at));
- Label zero_or_nan, number;
- __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ bind(&zero_or_nan);
- __ mov(tos_, zero_reg);
- __ bind(&number);
- __ Ret();
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- __ LoadRoot(at, value);
- __ Subu(at, at, tos_); // This is a check for equality for the movz below.
- // The value of a root is never NULL, so we can avoid loading a non-null
- // value into tos_ when we want to return 'true'.
- if (!result) {
- __ Movz(tos_, zero_reg, at);
- }
- __ Ret(eq, at, Operand(zero_reg));
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ Move(a3, tos_);
- __ li(a2, Operand(Smi::FromInt(tos_.code())));
- __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
- __ Push(a3, a2, a1);
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -2052,7 +1213,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// restore them.
__ MultiPush(kJSCallerSaved | ra.bit());
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
__ MultiPushFPU(kCallerSavedFPU);
}
const int argument_count = 1;
@@ -2061,12 +1221,11 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ li(a0, Operand(ExternalReference::isolate_address()));
+ __ li(a0, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
__ MultiPopFPU(kCallerSavedFPU);
}
@@ -2075,1331 +1234,18 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- // Argument is in a0 and v0 at this point, so we can overwrite a0.
- __ li(a2, Operand(Smi::FromInt(op_)));
- __ li(a1, Operand(Smi::FromInt(mode_)));
- __ li(a0, Operand(Smi::FromInt(operand_type_)));
- __ Push(v0, a2, a1, a0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
+void BinaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
}
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow) {
- __ JumpIfNotSmi(a0, non_smi);
-
- // The result of negating zero or the smallest negative smi is not a smi.
- __ And(t0, a0, ~0x80000000);
- __ Branch(slow, eq, t0, Operand(zero_reg));
-
- // Return '0 - value'.
- __ Ret(USE_DELAY_SLOT);
- __ subu(v0, zero_reg, a0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi) {
- __ JumpIfNotSmi(a0, non_smi);
-
- // Flip bits and revert inverted smi-tag.
- __ Neg(v0, a0);
- __ And(v0, v0, ~kSmiTagMask);
- __ Ret();
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateHeapNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateHeapNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
- Label non_smi, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
- // a0 is a heap number. Get a new heap number in a1.
- if (mode_ == UNARY_OVERWRITE) {
- __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- } else {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(a1, v0);
- __ pop(a0);
- }
-
- __ bind(&heapnumber_allocated);
- __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
- __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
- __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
- __ mov(v0, a1);
- }
- __ Ret();
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(
- MacroAssembler* masm,
- Label* slow) {
- Label impossible;
-
- EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
- // Convert the heap number in a0 to an untagged integer in a1.
- __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ Neg(a1, a1);
- __ Addu(a2, a1, Operand(0x40000000));
- __ Branch(&try_float, lt, a2, Operand(zero_reg));
-
- // Tag the result as a smi and we're done.
- __ SmiTag(v0, a1);
- __ Ret();
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (mode_ == UNARY_NO_OVERWRITE) {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- // Allocate a new heap number without zapping v0, which we need if it fails.
- __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(v0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(a2, v0); // Move the new heap number into a2.
- // Get the heap number into v0, now that the new heap number is in a2.
- __ pop(v0);
- }
-
- // Convert the heap number in v0 to an untagged integer in a1.
- // This can't go slow-case because it's the same number we already
- // converted once again.
- __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
- // Negate the result.
- __ Xor(a1, a1, -1);
-
- __ bind(&heapnumber_allocated);
- __ mov(v0, a2); // Move newly allocated heap number to v0.
- }
-
- if (CpuFeatures::IsSupported(FPU)) {
- // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
- CpuFeatures::Scope scope(FPU);
- __ mtc1(a1, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
- }
-
- __ bind(&impossible);
- if (FLAG_debug_code) {
- __ stop("Incorrect assumption in bit-not stub");
- }
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(
- MacroAssembler* masm) {
- // Handle the slow case by jumping to the JavaScript builtin.
- __ push(a0);
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(a1, a0);
-
- __ li(a2, Operand(Smi::FromInt(MinorKey())));
- __ li(a1, Operand(Smi::FromInt(op_)));
- __ li(a0, Operand(Smi::FromInt(operands_type_)));
- __ Push(a2, a1, a0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 5,
- 1);
-}
-
-
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-
-void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
- Register left = a1;
- Register right = a0;
-
- Register scratch1 = t0;
- Register scratch2 = t1;
-
- ASSERT(right.is(a0));
- STATIC_ASSERT(kSmiTag == 0);
-
- Label not_smi_result;
- switch (op_) {
- case Token::ADD:
- __ AdduAndCheckForOverflow(v0, left, right, scratch1);
- __ RetOnNoOverflow(scratch1);
- // No need to revert anything - right and left are intact.
- break;
- case Token::SUB:
- __ SubuAndCheckForOverflow(v0, left, right, scratch1);
- __ RetOnNoOverflow(scratch1);
- // No need to revert anything - right and left are intact.
- break;
- case Token::MUL: {
- // Remove tag from one of the operands. This way the multiplication result
- // will be a smi if it fits the smi range.
- __ SmiUntag(scratch1, right);
- // Do multiplication.
- // lo = lower 32 bits of scratch1 * left.
- // hi = higher 32 bits of scratch1 * left.
- __ Mult(left, scratch1);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ mflo(scratch1);
- __ mfhi(scratch2);
- __ sra(scratch1, scratch1, 31);
- __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
- // Go slow on zero result to handle -0.
- __ mflo(v0);
- __ Ret(ne, v0, Operand(zero_reg));
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ Addu(scratch2, right, left);
- Label skip;
- // ARM uses the 'pl' condition, which is 'ge'.
- // Negating it results in 'lt'.
- __ Branch(&skip, lt, scratch2, Operand(zero_reg));
- ASSERT(Smi::FromInt(0) == 0);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive.
- __ bind(&skip);
- // We fall through here if we multiplied a negative number with 0, because
- // that would mean we should produce -0.
- }
- break;
- case Token::DIV: {
- Label done;
- __ SmiUntag(scratch2, right);
- __ SmiUntag(scratch1, left);
- __ Div(scratch1, scratch2);
- // A minor optimization: div may be calculated asynchronously, so we check
- // for division by zero before getting the result.
- __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
- // If the result is 0, we need to make sure the dividsor (right) is
- // positive, otherwise it is a -0 case.
- // Quotient is in 'lo', remainder is in 'hi'.
- // Check for no remainder first.
- __ mfhi(scratch1);
- __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
- __ mflo(scratch1);
- __ Branch(&done, ne, scratch1, Operand(zero_reg));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ bind(&done);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ SmiTag(v0, scratch1);
- __ Ret();
- }
- break;
- case Token::MOD: {
- Label done;
- __ SmiUntag(scratch2, right);
- __ SmiUntag(scratch1, left);
- __ Div(scratch1, scratch2);
- // A minor optimization: div may be calculated asynchronously, so we check
- // for division by 0 before calling mfhi.
- // Check for zero on the right hand side.
- __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
- // If the result is 0, we need to make sure the dividend (left) is
- // positive (or 0), otherwise it is a -0 case.
- // Remainder is in 'hi'.
- __ mfhi(scratch2);
- __ Branch(&done, ne, scratch2, Operand(zero_reg));
- __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
- __ bind(&done);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch1, scratch2, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
- __ SmiTag(v0, scratch2);
- __ Ret();
- }
- break;
- case Token::BIT_OR:
- __ Ret(USE_DELAY_SLOT);
- __ or_(v0, left, right);
- break;
- case Token::BIT_AND:
- __ Ret(USE_DELAY_SLOT);
- __ and_(v0, left, right);
- break;
- case Token::BIT_XOR:
- __ Ret(USE_DELAY_SLOT);
- __ xor_(v0, left, right);
- break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ srav(scratch1, left, scratch1);
- // Smi tag result.
- __ And(v0, scratch1, ~kSmiTagMask);
- __ Ret();
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ srlv(v0, scratch1, scratch2);
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ And(scratch1, v0, Operand(0xc0000000));
- __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
- // Smi tag result.
- __ SmiTag(v0);
- __ Ret();
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ sllv(scratch1, scratch1, scratch2);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ SmiTag(v0, scratch1);
- __ Ret();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&not_smi_result);
-}
-
-
-void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required) {
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
- Register scratch2 = t5;
- Register scratch3 = t0;
-
- ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands) {
- __ AssertSmi(left);
- __ AssertSmi(right);
- }
-
- Register heap_number_map = t2;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
- // depending on whether FPU is available or not.
- FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(FPU) &&
- op_ != Token::MOD ?
- FloatingPointHelper::kFPURegisters :
- FloatingPointHelper::kCoreRegisters;
-
- // Allocate new heap number for result.
- Register result = s0;
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
-
- // Load the operands.
- if (smi_operands) {
- FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
- } else {
- FloatingPointHelper::LoadOperands(masm,
- destination,
- heap_number_map,
- scratch1,
- scratch2,
- not_numbers);
- }
-
- // Calculate the result.
- if (destination == FloatingPointHelper::kFPURegisters) {
- // Using FPU registers:
- // f12: Left value.
- // f14: Right value.
- CpuFeatures::Scope scope(FPU);
- switch (op_) {
- case Token::ADD:
- __ add_d(f10, f12, f14);
- break;
- case Token::SUB:
- __ sub_d(f10, f12, f14);
- break;
- case Token::MUL:
- __ mul_d(f10, f12, f14);
- break;
- case Token::DIV:
- __ div_d(f10, f12, f14);
- break;
- default:
- UNREACHABLE();
- }
-
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into sdc1 so
- // there's no point in generating even more instructions.
- __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, result);
- } else {
- // Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op_,
- result,
- scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
- }
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- if (smi_operands) {
- __ SmiUntag(a3, left);
- __ SmiUntag(a2, right);
- } else {
- // Convert operands to 32-bit integers. Right in a2 and left in a3.
- FloatingPointHelper::ConvertNumberToInt32(masm,
- left,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- not_numbers);
- FloatingPointHelper::ConvertNumberToInt32(masm,
- right,
- a2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- not_numbers);
- }
- Label result_not_a_smi;
- switch (op_) {
- case Token::BIT_OR:
- __ Or(a2, a3, Operand(a2));
- break;
- case Token::BIT_XOR:
- __ Xor(a2, a3, Operand(a2));
- break;
- case Token::BIT_AND:
- __ And(a2, a3, Operand(a2));
- break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ srav(a2, a3, a2);
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ srlv(a2, a3, a2);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of
- // writing the register as an unsigned int so we go to slow case if we
- // hit this case.
- if (CpuFeatures::IsSupported(FPU)) {
- __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
- } else {
- __ Branch(not_numbers, lt, a2, Operand(zero_reg));
- }
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ sllv(a2, a3, a2);
- break;
- default:
- UNREACHABLE();
- }
- // Check that the *signed* result fits in a smi.
- __ Addu(a3, a2, Operand(0x40000000));
- __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
- __ SmiTag(v0, a2);
- __ Ret();
-
- // Allocate new heap number for result.
- __ bind(&result_not_a_smi);
- Register result = t1;
- if (smi_operands) {
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- } else {
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
- }
-
- // a2: Answer as signed int32.
- // t1: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to v0, which is the
- // result.
- __ mov(v0, t1);
-
- if (CpuFeatures::IsSupported(FPU)) {
- // Convert the int32 in a2 to the heap number in a0. As
- // mentioned above SHR needs to always produce a positive result.
- CpuFeatures::Scope scope(FPU);
- __ mtc1(a2, f0);
- if (op_ == Token::SHR) {
- __ Cvt_d_uw(f0, f0, f22);
- } else {
- __ cvt_d_w(f0, f0);
- }
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into sdc1 so
- // there's no point in generating even more instructions.
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // Tail call that writes the int32 in a2 to the heap number in v0, using
- // a3 and a0 as scratch. v0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
- __ TailCallStub(&stub);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the lable gc_required.
-void BinaryOpStub::GenerateSmiCode(
- MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
- Label not_smis;
-
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
-
- // Perform combined smi check on both operands.
- __ Or(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(scratch1, &not_smis);
-
- // If the smi-smi operation results in a smi return is generated.
- GenerateSmiSmiOperation(masm);
-
- // If heap number results are possible generate the result in an allocated
- // heap number.
- if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
- GenerateFPOperation(masm, true, use_runtime, gc_required);
- }
- __ bind(&not_smis);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label not_smis, call_runtime;
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- GenerateSmiCode(masm,
- &call_runtime,
- &call_runtime,
- ALLOW_HEAPNUMBER_RESULTS);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = a1;
- Register right = a0;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ GetObjectType(left, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ GetObjectType(right, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::INT32);
-
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
- Register scratch2 = t5;
- FPURegister double_scratch = f0;
- FPURegister single_scratch = f6;
-
- Register heap_number_result = no_reg;
- Register heap_number_map = t2;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label call_runtime;
- // Labels for type transition, used for wrong input or output types.
- // Both label are currently actually bound to the same position. We use two
- // different label to differentiate the cause leading to type transition.
- Label transition;
-
- // Smi-smi fast case.
- Label skip;
- __ Or(scratch1, left, right);
- __ JumpIfNotSmi(scratch1, &skip);
- GenerateSmiSmiOperation(masm);
- // Fall through if the result is not a smi.
- __ bind(&skip);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers a0 and a1 (right
- // and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination =
- (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
- ? FloatingPointHelper::kFPURegisters
- : FloatingPointHelper::kCoreRegisters;
-
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- right,
- destination,
- f14,
- a2,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- f2,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- left,
- destination,
- f12,
- t0,
- t1,
- heap_number_map,
- scratch1,
- scratch2,
- f2,
- &transition);
-
- if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
- Label return_heap_number;
- switch (op_) {
- case Token::ADD:
- __ add_d(f10, f12, f14);
- break;
- case Token::SUB:
- __ sub_d(f10, f12, f14);
- break;
- case Token::MUL:
- __ mul_d(f10, f12, f14);
- break;
- case Token::DIV:
- __ div_d(f10, f12, f14);
- break;
- default:
- UNREACHABLE();
- }
-
- if (op_ != Token::DIV) {
- // These operations produce an integer result.
- // Try to return a smi if we can.
- // Otherwise return a heap number if allowed, or jump to type
- // transition.
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- single_scratch,
- f10,
- scratch1,
- except_flag);
-
- if (result_type_ <= BinaryOpIC::INT32) {
- // If except_flag != 0, result does not fit in a 32-bit integer.
- __ Branch(&transition, ne, except_flag, Operand(zero_reg));
- }
-
- // Check if the result fits in a smi.
- __ mfc1(scratch1, single_scratch);
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- // If not try to return a heap number.
- __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
- // Check for minus zero. Return heap number for minus zero.
- Label not_zero;
- __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
- __ mfc1(scratch2, f11);
- __ And(scratch2, scratch2, HeapNumber::kSignMask);
- __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
- __ bind(&not_zero);
-
- // Tag the result and return.
- __ SmiTag(v0, scratch1);
- __ Ret();
- } else {
- // DIV just falls through to allocating a heap number.
- }
-
- __ bind(&return_heap_number);
- // Return a heap number, or fall through to type transition or runtime
- // call if we can't.
- if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
- : BinaryOpIC::INT32)) {
- // We are using FPU registers so s0 is available.
- heap_number_result = s0;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
- __ mov(v0, heap_number_result);
- __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- }
-
- // A DIV operation expecting an integer result falls through
- // to type transition.
-
- } else {
- // We preserved a0 and a1 to be able to call runtime.
- // Save the left value on the stack.
- __ Push(t1, t0);
-
- Label pop_and_call_runtime;
-
- // Allocate a heap number to store the result.
- heap_number_result = s0;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime);
-
- // Load the left value from the value saved on the stack.
- __ Pop(a1, a0);
-
- // Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(
- masm, op_, heap_number_result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-
- __ bind(&pop_and_call_runtime);
- __ Drop(2);
- __ Branch(&call_runtime);
- }
-
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label return_heap_number;
- Register scratch3 = t1;
- // Convert operands to 32-bit integers. Right in a2 and left in a3. The
- // registers a0 and a1 (right and left) are preserved for the runtime
- // call.
- FloatingPointHelper::LoadNumberAsInt32(masm,
- left,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32(masm,
- right,
- a2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- &transition);
-
- // The ECMA-262 standard specifies that, for shift operations, only the
- // 5 least significant bits of the shift value should be used.
- switch (op_) {
- case Token::BIT_OR:
- __ Or(a2, a3, Operand(a2));
- break;
- case Token::BIT_XOR:
- __ Xor(a2, a3, Operand(a2));
- break;
- case Token::BIT_AND:
- __ And(a2, a3, Operand(a2));
- break;
- case Token::SAR:
- __ And(a2, a2, Operand(0x1f));
- __ srav(a2, a3, a2);
- break;
- case Token::SHR:
- __ And(a2, a2, Operand(0x1f));
- __ srlv(a2, a3, a2);
- // SHR is special because it is required to produce a positive answer.
- // We only get a negative result if the shift value (a2) is 0.
- // This result cannot be respresented as a signed 32-bit integer, try
- // to return a heap number if we can.
- // The non FPU code does not support this special case, so jump to
- // runtime if we don't support it.
- if (CpuFeatures::IsSupported(FPU)) {
- __ Branch((result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number,
- lt,
- a2,
- Operand(zero_reg));
- } else {
- __ Branch((result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &call_runtime,
- lt,
- a2,
- Operand(zero_reg));
- }
- break;
- case Token::SHL:
- __ And(a2, a2, Operand(0x1f));
- __ sllv(a2, a3, a2);
- break;
- default:
- UNREACHABLE();
- }
-
- // Check if the result fits in a smi.
- __ Addu(scratch1, a2, Operand(0x40000000));
- // If not try to return a heap number. (We know the result is an int32.)
- __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
- // Tag the result and return.
- __ SmiTag(v0, a2);
- __ Ret();
-
- __ bind(&return_heap_number);
- heap_number_result = t1;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
-
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ mtc1(a2, double_scratch);
- __ cvt_d_w(double_scratch, double_scratch);
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ mtc1(a2, double_scratch);
- __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
- }
-
- // Store the result.
- __ mov(v0, heap_number_result);
- __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // Tail call that writes the int32 in a2 to the heap number in v0, using
- // a3 and a0 as scratch. v0 is preserved and returned.
- __ mov(v0, t1);
- WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
- __ TailCallStub(&stub);
- }
-
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- // We never expect DIV to yield an integer result, so we always generate
- // type transition code for DIV operations expecting an integer result: the
- // code will fall through to this type transition.
- if (transition.is_linked() ||
- ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
- __ bind(&transition);
- GenerateTypeTransition(masm);
- }
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&check, ne, a1, Operand(t0));
- if (Token::IsBitOp(op_)) {
- __ li(a1, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(a1, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&done, ne, a0, Operand(t0));
- if (Token::IsBitOp(op_)) {
- __ li(a0, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(a0, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateHeapNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime;
- GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
-
- GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
-
- GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- Register left = a1;
- Register right = a0;
-
- // Check if left argument is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ GetObjectType(left, a2, a2);
- __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ GetObjectType(right, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // At least one argument is not a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateHeapResultAllocation(
- MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
-
- // Code below will scratch result if allocation fails. To keep both arguments
- // intact for the runtime call result cannot be one of these.
- ASSERT(!result.is(a0) && !result.is(a1));
-
- if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
- Label skip_allocation, allocated;
- Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
- // If the overwritable operand is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
- // Allocate a heap number for the result.
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- __ Branch(&allocated);
- __ bind(&skip_allocation);
- // Use object holding the overwritable operand for result.
- __ mov(result, overwritable_operand);
- __ bind(&allocated);
- } else {
- ASSERT(mode_ == NO_OVERWRITE);
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ Push(a1, a0);
-}
-
-
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Untagged case: double input in f4, double result goes
// into f4.
@@ -3415,107 +1261,102 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
const Register cache_entry = a0;
const bool tagged = (argument_type_ == TAGGED);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
-
- if (tagged) {
- // Argument is a number and is on stack and in a0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(a0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into a2, a3.
- __ sra(t0, a0, kSmiTagSize);
- __ mtc1(t0, f4);
- __ cvt_d_w(f4, f4);
- __ Move(a2, a3, f4);
- __ Branch(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(a0,
- a1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Store the
- // low and high words into a2, a3.
- __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
- __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
- } else {
- // Input is untagged double in f4. Output goes to f4.
- __ Move(a2, a3, f4);
- }
- __ bind(&loaded);
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ Xor(a1, a2, a3);
- __ sra(t0, a1, 16);
- __ Xor(a1, a1, t0);
- __ sra(t0, a1, 8);
- __ Xor(a1, a1, t0);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // a1 = TranscendentalCache::hash(double value).
- __ li(cache_entry, Operand(
- ExternalReference::transcendental_cache_array_address(
- masm->isolate())));
- // a0 points to cache array.
- __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
- Isolate::Current()->transcendental_cache()->caches_[0])));
- // a0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
+ if (tagged) {
+ // Argument is a number and is on stack and in a0.
+ // Load argument and check if it is a smi.
+ __ JumpIfNotSmi(a0, &input_not_smi);
+
+ // Input is a smi. Convert to double and load the low and high words
+ // of the double into a2, a3.
+ __ sra(t0, a0, kSmiTagSize);
+ __ mtc1(t0, f4);
+ __ cvt_d_w(f4, f4);
+ __ Move(a2, a3, f4);
+ __ Branch(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ CheckMap(a0,
+ a1,
+ Heap::kHeapNumberMapRootIndex,
+ &calculate,
+ DONT_DO_SMI_CHECK);
+ // Input is a HeapNumber. Store the
+ // low and high words into a2, a3.
+ __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
+ __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
+ } else {
+ // Input is untagged double in f4. Output goes to f4.
+ __ Move(a2, a3, f4);
+ }
+ __ bind(&loaded);
+ // a2 = low 32 bits of double value.
+ // a3 = high 32 bits of double value.
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ Xor(a1, a2, a3);
+ __ sra(t0, a1, 16);
+ __ Xor(a1, a1, t0);
+ __ sra(t0, a1, 8);
+ __ Xor(a1, a1, t0);
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
+
+ // a2 = low 32 bits of double value.
+ // a3 = high 32 bits of double value.
+ // a1 = TranscendentalCache::hash(double value).
+ __ li(cache_entry, Operand(
+ ExternalReference::transcendental_cache_array_address(
+ masm->isolate())));
+ // a0 points to cache array.
+ __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
+ Isolate::Current()->transcendental_cache()->caches_[0])));
+ // a0 points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::SubCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
#endif
- // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
- __ sll(t0, a1, 1);
- __ Addu(a1, a1, t0);
- __ sll(t0, a1, 2);
- __ Addu(cache_entry, cache_entry, t0);
-
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ lw(t0, MemOperand(cache_entry, 0));
- __ lw(t1, MemOperand(cache_entry, 4));
- __ lw(t2, MemOperand(cache_entry, 8));
- __ Branch(&calculate, ne, a2, Operand(t0));
- __ Branch(&calculate, ne, a3, Operand(t1));
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into v0.
- __ Drop(1);
- __ mov(v0, t2);
- } else {
- // Load result into f4.
- __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
- }
- __ Ret();
- } // if (CpuFeatures::IsSupported(FPU))
+ // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
+ __ sll(t0, a1, 1);
+ __ Addu(a1, a1, t0);
+ __ sll(t0, a1, 2);
+ __ Addu(cache_entry, cache_entry, t0);
+
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ __ lw(t0, MemOperand(cache_entry, 0));
+ __ lw(t1, MemOperand(cache_entry, 4));
+ __ lw(t2, MemOperand(cache_entry, 8));
+ __ Branch(&calculate, ne, a2, Operand(t0));
+ __ Branch(&calculate, ne, a3, Operand(t1));
+ // Cache hit. Load result, cleanup and return.
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(
+ counters->transcendental_cache_hit(), 1, scratch0, scratch1);
+ if (tagged) {
+ // Pop input value from stack and load result into v0.
+ __ Drop(1);
+ __ mov(v0, t2);
+ } else {
+ // Load result into f4.
+ __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+ }
+ __ Ret();
__ bind(&calculate);
- Counters* counters = masm->isolate()->counters();
__ IncrementCounter(
counters->transcendental_cache_miss(), 1, scratch0, scratch1);
if (tagged) {
@@ -3525,9 +1366,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1,
1);
} else {
- ASSERT(CpuFeatures::IsSupported(FPU));
- CpuFeatures::Scope scope(FPU);
-
Label no_update;
Label skip_cache;
@@ -3643,18 +1481,7 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatures::Scope fpu_scope(FPU);
const Register base = a1;
const Register exponent = a2;
const Register heapnumbermap = t1;
@@ -3708,9 +1535,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ EmitFPUTruncate(kRoundToMinusInf,
- single_scratch,
- double_exponent,
scratch,
+ double_exponent,
+ at,
+ double_scratch,
scratch2,
kCheckForInexactConversion);
// scratch2 == 0 means there was no conversion error.
@@ -3768,7 +1596,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(ra);
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
+ __ PrepareCallCFunction(0, 2, scratch2);
__ SetCallCDoubleArguments(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
@@ -3779,7 +1607,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&done);
__ bind(&int_exponent_convert);
- __ mfc1(scratch, single_scratch);
}
// Calculate power with integer exponent.
@@ -3874,37 +1701,63 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+bool CEntryStub::IsPregenerated(Isolate* isolate) {
+ return (!save_doubles_ || isolate->fp_stubs_generated()) &&
result_size_ == 1;
}
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpStub::GenerateAheadOfTime(isolate);
}
-void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- StoreBufferOverflowStub stub(kSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ SaveFPRegsMode mode = kSaveFPRegs;
+ CEntryStub save_doubles(1, mode);
+ StoreBufferOverflowStub stub(mode);
+ // These stubs might already be in the snapshot, detect that and don't
+ // regenerate, which would lead to code stub initialization state being messed
+ // up.
+ Code* save_doubles_code;
+ if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
+ save_doubles_code = *save_doubles.GetCode(isolate);
+ }
+ Code* store_buffer_overflow_code;
+ if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
+ store_buffer_overflow_code = *stub.GetCode(isolate);
+ }
+ save_doubles_code->set_is_pregenerated(true);
+ store_buffer_overflow_code->set_is_pregenerated(true);
+ isolate->set_fp_stubs_generated(true);
}
-void CEntryStub::GenerateAheadOfTime() {
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode();
+ Handle<Code> code = stub.GetCode(isolate);
code->set_is_pregenerated(true);
}
+static void JumpIfOOM(MacroAssembler* masm,
+ Register value,
+ Register scratch,
+ Label* oom_label) {
+ STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
+ STATIC_ASSERT(kFailureTag == 3);
+ __ andi(scratch, value, 0xf);
+ __ Branch(oom_label, eq, scratch, Operand(0xf));
+}
+
+
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -3921,8 +1774,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (do_gc) {
// Move result passed in v0 into a0 to call PerformGC.
__ mov(a0, v0);
- __ PrepareCallCFunction(1, 0, a1);
- __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
+ __ PrepareCallCFunction(2, 0, a1);
+ __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
}
ExternalReference scope_depth =
@@ -3944,7 +1798,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ AssertStackIsAligned();
- __ li(a2, Operand(ExternalReference::isolate_address()));
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate)));
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
@@ -4001,7 +1855,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(save_doubles_, s0, true);
+ __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
// Check if we should retry or throw exception.
Label retry;
@@ -4011,20 +1865,20 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ Branch(&retry, eq, t0, Operand(zero_reg));
// Special handling of out of memory exceptions.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ Branch(USE_DELAY_SLOT,
- throw_out_of_memory_exception,
- eq,
- v0,
- Operand(reinterpret_cast<int32_t>(out_of_memory)));
- // If we throw the OOM exception, the value of a3 doesn't matter.
- // Any instruction can be in the delay slot that's not a jump.
+ JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
- // Retrieve the pending exception and clear the variable.
- __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+ // Retrieve the pending exception.
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ lw(v0, MemOperand(t0));
+
+ // See if we just retrieved an OOM exception.
+ JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
+
+ // Clear the pending exception.
+ __ li(a3, Operand(isolate->factory()->the_hole_value()));
+ __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
__ sw(a3, MemOperand(t0));
// Special handling of termination exceptions which are uncatchable
@@ -4049,6 +1903,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// NOTE: Invocations of builtins may return failure objects
// instead of a proper result. The builtin entry handles
// this by performing a garbage collection and retrying the
@@ -4105,13 +1961,16 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
isolate);
- __ li(a0, Operand(false, RelocInfo::NONE));
+ __ li(a0, Operand(false, RelocInfo::NONE32));
__ li(a2, Operand(external_caught));
__ sw(a0, MemOperand(a2));
// Set pending exception and v0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
+ Label already_have_failure;
+ JumpIfOOM(masm, v0, t0, &already_have_failure);
+ Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
__ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ bind(&already_have_failure);
__ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ sw(v0, MemOperand(a2));
@@ -4139,23 +1998,20 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// 4 args slots
// args
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Save callee saved registers on the stack.
__ MultiPush(kCalleeSaved | ra.bit());
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Save callee-saved FPU registers.
- __ MultiPushFPU(kCalleeSavedFPU);
- // Set up the reserved register for 0.0.
- __ Move(kDoubleRegZero, 0.0);
- }
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(kCalleeSavedFPU);
+ // Set up the reserved register for 0.0.
+ __ Move(kDoubleRegZero, 0.0);
// Load argv in s0 register.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- if (CpuFeatures::IsSupported(FPU)) {
- offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
- }
+ offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
__ InitializeRootRegister();
__ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
@@ -4291,11 +2147,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Reset the stack to the callee saved registers.
__ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Restore callee-saved fpu registers.
- __ MultiPopFPU(kCalleeSavedFPU);
- }
+ // Restore callee-saved fpu registers.
+ __ MultiPopFPU(kCalleeSavedFPU);
// Restore callee saved registers from the stack.
__ MultiPop(kCalleeSaved | ra.bit());
@@ -4375,7 +2228,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Subu(inline_site, ra, scratch);
// Get the map location in scratch and patch it.
__ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
- __ sw(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
}
// Register mapping: a3 is object map and t0 is function prototype.
@@ -4482,6 +2335,138 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ __ Branch(&miss, ne, a0,
+ Operand(masm->isolate()->factory()->prototype_string()));
+ receiver = a1;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = a0;
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
+ __ bind(&miss);
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void StringLengthStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ __ Branch(&miss, ne, a0,
+ Operand(masm->isolate()->factory()->length_string()));
+ receiver = a1;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = a0;
+ }
+
+ StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
+
+ __ bind(&miss);
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
+ Label miss;
+
+ Register receiver;
+ Register value;
+ if (kind() == Code::KEYED_STORE_IC) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -----------------------------------
+ __ Branch(&miss, ne, a1,
+ Operand(masm->isolate()->factory()->length_string()));
+ receiver = a2;
+ value = a0;
+ } else {
+ ASSERT(kind() == Code::STORE_IC);
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : key
+ // -----------------------------------
+ receiver = a1;
+ value = a0;
+ }
+ Register scratch = a3;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ GetObjectType(receiver, scratch, scratch);
+ __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+ // Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
+ __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
+ __ GetObjectType(scratch, scratch, scratch);
+ __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
+
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
+ __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&miss, eq, scratch, Operand(at));
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver, value);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&miss);
+
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
Register InstanceofStub::left() { return a0; }
@@ -4516,8 +2501,8 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ subu(a3, a0, a1);
__ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, fp, Operand(t3));
+ __ Ret(USE_DELAY_SLOT);
__ lw(v0, MemOperand(a3, kDisplacement));
- __ Ret();
// Arguments adaptor case: Check index (a1) against actual arguments
// limit found in the arguments adaptor frame. Use unsigned
@@ -4530,8 +2515,8 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ subu(a3, a0, a1);
__ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, a2, Operand(t3));
+ __ Ret(USE_DELAY_SLOT);
__ lw(v0, MemOperand(a3, kDisplacement));
- __ Ret();
// Slow-case: Handle non-smi or out-of-bounds access to arguments
// by calling the runtime system.
@@ -4634,7 +2619,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
+ __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
// v0 = address of new object(s) (tagged)
// a2 = argument count (tagged)
@@ -4825,13 +2810,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
// Do the allocation of both objects in one go.
- __ AllocateInNewSpace(a1,
- v0,
- a2,
- a3,
- &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT |
- SIZE_IN_WORDS));
+ __ Allocate(a1, v0, a2, a3, &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
@@ -4910,8 +2890,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
- Label runtime, invoke_regexp;
-
+ Label runtime;
// Allocation of registers for this function. These are in callee save
// registers and will be preserved by the call to the native RegExp code, as
// this code is called using the normal C calling convention. When calling
@@ -4944,12 +2923,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ And(t0, regexp_data, Operand(kSmiTagMask));
__ Check(nz,
- "Unexpected type for RegExp data, FixedArray expected",
+ kUnexpectedTypeForRegExpDataFixedArrayExpected,
t0,
Operand(zero_reg));
__ GetObjectType(regexp_data, a0, a0);
__ Check(eq,
- "Unexpected type for RegExp data, FixedArray expected",
+ kUnexpectedTypeForRegExpDataFixedArrayExpected,
a0,
Operand(FIXED_ARRAY_TYPE));
}
@@ -4963,149 +2942,111 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the number of captures fit in the static offsets vector buffer.
__ lw(a2,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // Multiplying by 2 comes for free since a2 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ Addu(a2, a2, Operand(2)); // a2 was a smi.
- // Check that the static offsets vector buffer is large enough.
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
__ Branch(
- &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
-
- // a2: Number of capture registers
- // regexp_data: RegExp data (FixedArray)
- // Check that the second argument is a string.
- __ lw(subject, MemOperand(sp, kSubjectOffset));
- __ JumpIfSmi(subject, &runtime);
- __ GetObjectType(subject, a0, a0);
- __ And(a0, a0, Operand(kIsNotStringMask));
- STATIC_ASSERT(kStringTag == 0);
- __ Branch(&runtime, ne, a0, Operand(zero_reg));
-
- // Get the length of the string to r3.
- __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
-
- // a2: Number of capture registers
- // a3: Length of subject string as a smi
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
- __ JumpIfNotSmi(a0, &runtime);
- __ Branch(&runtime, ls, a3, Operand(a0));
-
- // a2: Number of capture registers
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the fourth object is a JSArray object.
- __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(a0, &runtime);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
- // Check that the JSArray is in fast case.
- __ lw(last_match_info_elements,
- FieldMemOperand(a0, JSArray::kElementsOffset));
- __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ Branch(&runtime, ne, a0, Operand(
- isolate->factory()->fixed_array_map()));
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ lw(a0,
- FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
- __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
- __ Branch(&runtime, gt, a2, Operand(at));
+ &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
// Reset offset for possibly sliced string.
__ mov(t0, zero_reg);
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_string;
+ __ lw(subject, MemOperand(sp, kSubjectOffset));
+ __ JumpIfSmi(subject, &runtime);
+ __ mov(a3, subject); // Make a copy of the original subject string.
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
- // First check for flat string. None of the following string type tests will
- // succeed if subject is not a string or a short external string.
+ // subject: subject string
+ // a3: subject string
+ // a0: subject string instance type
+ // regexp_data: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (5).
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ // (3) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (4) Is subject external? If yes, go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (6) Not a long external string? If yes, go to (8).
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (5).
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+
+ Label seq_string /* 5 */, external_string /* 7 */,
+ check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
+ not_long_external /* 8 */;
+
+ // (1) Sequential string? If yes, go to (5).
__ And(a1,
a0,
Operand(kIsNotStringMask |
kStringRepresentationMask |
kShortExternalStringMask));
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ Branch(&seq_string, eq, a1, Operand(zero_reg));
+ __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
- // subject: Subject string
- // a0: instance type if Subject string
- // regexp_data: RegExp data (FixedArray)
- // a1: whether subject is a string and if yes, its string representation
- // Check for flat cons string or sliced string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- // In the case of a sliced string its offset has to be taken into account.
- Label cons_string, external_string, check_encoding;
+ // (2) Anything but sequential or cons? If yes, go to (6).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
- __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
+ // Go to (6).
+ __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
- __ Branch(&runtime, ne, at, Operand(zero_reg));
-
- // String is sliced.
- __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
- __ sra(t0, t0, kSmiTagSize);
- __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
- // t5: offset of sliced string, smi-tagged.
- __ jmp(&check_encoding);
- // String is a cons string, check whether it is flat.
- __ bind(&cons_string);
+ // (3) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
__ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(a1, Heap::kempty_stringRootIndex);
__ Branch(&runtime, ne, a0, Operand(a1));
__ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
- // Is first part of cons or parent of slice a flat string?
- __ bind(&check_encoding);
+
+ // (4) Is subject external? If yes, go to (7).
+ __ bind(&check_underlying);
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ And(at, a0, Operand(kStringRepresentationMask));
- __ Branch(&external_string, ne, at, Operand(zero_reg));
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
__ bind(&seq_string);
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // a0: Instance type of subject string
+ // subject: sequential subject string (or look-alike, external string)
+ // a3: original subject string
+ // Load previous index and check range before a3 is overwritten. We have to
+ // use a3 instead of subject here because subject might have been only made
+ // to look like a sequential string when it actually is an external string.
+ __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(a1, &runtime);
+ __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
+ __ Branch(&runtime, ls, a3, Operand(a1));
+ __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
+
STATIC_ASSERT(kStringEncodingMask == 4);
- STATIC_ASSERT(kAsciiStringTag == 4);
+ STATIC_ASSERT(kOneByteStringTag == 4);
STATIC_ASSERT(kTwoByteStringTag == 0);
- // Find the code object based on the assumptions above.
__ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
__ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
__ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
__ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
__ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+ // (E) Carry on. String handling is done.
+ // t9: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
__ JumpIfSmi(t9, &runtime);
- // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // t9: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
- __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
-
// a1: previous index
// a3: encoding of subject string (1 if ASCII, 0 if two_byte);
// t9: code
@@ -5136,7 +3077,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 9: Pass current isolate address.
// CFunctionArgumentOperand handles MIPS stack argument slots.
- __ li(a0, Operand(ExternalReference::isolate_address()));
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
__ sw(a0, MemOperand(sp, 5 * kPointerSize));
// Argument 8: Indicate that this is a direct call from JavaScript.
@@ -5194,15 +3135,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
DirectCEntryStub stub;
stub.GenerateCall(masm, t9);
- __ LeaveExitFrame(false, no_reg);
+ __ LeaveExitFrame(false, no_reg, true);
// v0: result
// subject: subject string (callee saved)
// regexp_data: RegExp data (callee saved)
// last_match_info_elements: Last match info elements (callee saved)
-
// Check the result.
-
Label success;
__ Branch(&success, eq, v0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
@@ -5243,10 +3182,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(a1,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
+ // Multiplying by 2 comes for free since r1 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ Addu(a1, a1, Operand(2)); // a1 was a smi.
+ __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(a0, &runtime);
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
+ // Check that the JSArray is in fast case.
+ __ lw(last_match_info_elements,
+ FieldMemOperand(a0, JSArray::kElementsOffset));
+ __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&runtime, ne, a0, Operand(at));
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ lw(a0,
+ FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
+ __ sra(at, a0, kSmiTagSize);
+ __ Branch(&runtime, gt, a2, Operand(at));
+
// a1: number of capture registers
// subject: subject string
// Store the capture count.
@@ -5260,10 +3218,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(a2, subject);
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastSubjectOffset,
- a2,
+ subject,
t3,
kRAHasNotBeenSaved,
kDontSaveFPRegs);
+ __ mov(subject, a2);
__ sw(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
@@ -5305,8 +3264,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
__ DropAndRet(4);
- // External string. Short external strings have already been ruled out.
- // a0: scratch
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (6) Not a long external string? If yes, go to (8).
+ __ bind(&not_seq_nor_cons);
+ // Go to (8).
+ __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
+
+ // (7) External string. Make it, offset-wise, look like a sequential string.
__ bind(&external_string);
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
@@ -5315,22 +3283,31 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Sequential strings have already been ruled out.
__ And(at, a0, Operand(kIsIndirectStringMask));
__ Assert(eq,
- "external string expected, but not found",
+ kExternalStringExpectedButNotFound,
at,
Operand(zero_reg));
}
__ lw(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Subu(subject,
subject,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- __ jmp(&seq_string);
+ __ jmp(&seq_string); // Go to (5).
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ __ bind(&not_long_external);
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+ // Load offset into t0 and replace subject string with parent.
+ __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ sra(t0, t0, kSmiTagSize);
+ __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ jmp(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
@@ -5354,7 +3331,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
(JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
__ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
__ Addu(a2, t1, Operand(objects_size));
- __ AllocateInNewSpace(
+ __ Allocate(
a2, // In: Size, in words.
v0, // Out: Start of allocation (tagged).
a3, // Scratch register.
@@ -5426,9 +3403,10 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // a0 : number of arguments to the construct function
// a1 : the function to call
// a2 : cache cell for call target
- Label done;
+ Label initialize, done, miss, megamorphic, not_array_function;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
@@ -5436,29 +3414,69 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
masm->isolate()->heap()->the_hole_value());
// Load the cache state into a3.
- __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ Branch(&done, eq, a3, Operand(a1));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&done, eq, a3, Operand(at));
+
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the cell either some other function or an
+ // AllocationSite. Do a map check on the object in a3.
+ __ lw(t1, FieldMemOperand(a3, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&miss, ne, t1, Operand(at));
+
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(a3);
+ __ Branch(&megamorphic, ne, a1, Operand(a3));
+ __ jmp(&done);
+
+ __ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-
- __ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at));
- // An uninitialized cache is patched with the function.
- // Store a1 in the delay slot. This may or may not get overwritten depending
- // on the result of the comparison.
- __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
-
+ __ Branch(&initialize, eq, a3, Operand(at));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
+ __ bind(&megamorphic);
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
+ __ jmp(&done);
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(a3);
+ __ Branch(&not_array_function, ne, a1, Operand(a3));
+
+ // The target function is the Array constructor.
+ // Create an AllocationSite if we don't already have it, store it in the cell.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ const RegList kSavedRegs =
+ 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6; // a2
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(a0);
+ __ MultiPush(kSavedRegs);
+
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
+
+ __ MultiPop(kSavedRegs);
+ __ SmiUntag(a0);
+ }
+ __ Branch(&done);
+
+ __ bind(&not_array_function);
+ __ sw(a1, FieldMemOperand(a2, Cell::kValueOffset));
+ // No need for a write barrier here - cells are rescanned.
__ bind(&done);
}
@@ -5529,13 +3547,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
}
// Check for function proxy.
__ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
__ push(a1); // Put proxy as additional argument.
- __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
- __ li(a2, Operand(0, RelocInfo::NONE));
+ __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
+ __ li(a2, Operand(0, RelocInfo::NONE32));
__ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
__ SetCallKind(t1, CALL_AS_METHOD);
{
@@ -5574,9 +3592,11 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
// Jump to the function-specific construct stub.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
- __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Register jmp_reg = a3;
+ __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(jmp_reg, FieldMemOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
// a0: number of arguments
@@ -5592,52 +3612,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
- __ li(a2, Operand(0, RelocInfo::NONE));
+ __ li(a2, Operand(0, RelocInfo::NONE32));
__ SetCallKind(t1, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
- (lhs_.is(a1) && rhs_.is(a0)));
- const char* cc_name;
- switch (cc_) {
- case lt: cc_name = "LT"; break;
- case gt: cc_name = "GT"; break;
- case le: cc_name = "LE"; break;
- case ge: cc_name = "GE"; break;
- case eq: cc_name = "EQ"; break;
- case ne: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == eq || cc_ == ne;
- stream->Add("CompareStub_%s", cc_name);
- stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
- stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the two parameters in a unique 16 bit value.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
- ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
- (lhs_.is(a1) && rhs_.is(a0)));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(lhs_.is(a0))
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
@@ -5684,7 +3665,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
// Index is not a smi.
__ bind(&index_not_smi_);
@@ -5733,7 +3714,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
@@ -5748,11 +3729,11 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
__ And(t0,
code_,
Operand(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
__ Branch(&slow_case_, ne, t0, Operand(zero_reg));
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
@@ -5770,7 +3751,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
@@ -5781,24 +3762,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ Branch(&exit_);
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
@@ -5853,7 +3817,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
// that it is.
__ And(scratch4, dest, Operand(kPointerAlignmentMask));
__ Check(eq,
- "Destination of copy not aligned.",
+ kDestinationOfCopyNotAligned,
scratch4,
Operand(zero_reg));
}
@@ -5947,7 +3911,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
}
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -5960,7 +3924,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register scratch = scratch3;
// Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
+ // different hash algorithm. Don't try to look for these in the string table.
Label not_array_index;
__ Subu(scratch, c1, Operand(static_cast<int>('0')));
__ Branch(&not_array_index,
@@ -5995,43 +3959,43 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string.
- // Load symbol table.
- // Load address of first element of the symbol table.
- Register symbol_table = c2;
- __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+ // Load string table.
+ // Load address of first element of the string table.
+ Register string_table = c2;
+ __ LoadRoot(string_table, Heap::kStringTableRootIndex);
Register undefined = scratch4;
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
- // Calculate capacity mask from the symbol table capacity.
+ // Calculate capacity mask from the string table capacity.
Register mask = scratch2;
- __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ lw(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
__ sra(mask, mask, 1);
__ Addu(mask, mask, -1);
- // Calculate untagged address of the first element of the symbol table.
- Register first_symbol_table_element = symbol_table;
- __ Addu(first_symbol_table_element, symbol_table,
- Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+ // Calculate untagged address of the first element of the string table.
+ Register first_string_table_element = string_table;
+ __ Addu(first_string_table_element, string_table,
+ Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
// Registers.
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string
// mask: capacity mask
- // first_symbol_table_element: address of the first element of
- // the symbol table
+ // first_string_table_element: address of the first element of
+ // the string table
// undefined: the undefined object
// scratch: -
- // Perform a number of probes in the symbol table.
+ // Perform a number of probes in the string table.
const int kProbes = 4;
- Label found_in_symbol_table;
+ Label found_in_string_table;
Label next_probe[kProbes];
Register candidate = scratch5; // Scratch register contains candidate.
for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
+ // Calculate entry in string table.
if (i > 0) {
- __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+ __ Addu(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
} else {
__ mov(candidate, hash);
}
@@ -6039,9 +4003,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ And(candidate, candidate, Operand(mask));
// Load the entry from the symble table.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ STATIC_ASSERT(StringTable::kEntrySize == 1);
__ sll(scratch, candidate, kPointerSizeLog2);
- __ Addu(scratch, scratch, first_symbol_table_element);
+ __ Addu(scratch, scratch, first_string_table_element);
__ lw(candidate, MemOperand(scratch));
// If entry is undefined no string with this hash can be found.
@@ -6053,7 +4017,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Must be the hole (deleted entry).
if (FLAG_debug_code) {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, "oddball in symbol table is not undefined or the hole",
+ __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole,
scratch, Operand(candidate));
}
__ jmp(&next_probe[i]);
@@ -6071,8 +4035,8 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Check if the two characters match.
// Assumes that word load is little endian.
- __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
- __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
+ __ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
+ __ Branch(&found_in_string_table, eq, chars, Operand(scratch));
__ bind(&next_probe[i]);
}
@@ -6081,7 +4045,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Scratch register contains result when we fall through to here.
Register result = candidate;
- __ bind(&found_in_symbol_table);
+ __ bind(&found_in_string_table);
__ mov(v0, result);
}
@@ -6182,6 +4146,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Branch(&runtime, ne, t0, Operand(zero_reg));
+ Label single_char;
+ __ Branch(&single_char, eq, a2, Operand(1));
+
// Short-cut for the case of trivial substring.
Label return_v0;
// v0: original string
@@ -6211,7 +4178,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Branch(&sliced_string, ne, t0, Operand(zero_reg));
// Cons string. Check whether it is flat, then fetch first part.
__ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
- __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(t0, Heap::kempty_stringRootIndex);
__ Branch(&runtime, ne, t1, Operand(t0));
__ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
// Update instance type.
@@ -6250,7 +4217,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ And(t0, a1, Operand(kStringEncodingMask));
__ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
@@ -6288,12 +4255,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sequential_string);
// Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
- __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&allocate_result);
// Sequential acii string. Allocate the result.
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ And(t0, a1, Operand(kStringEncodingMask));
__ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
@@ -6304,13 +4271,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Addu(t1, t1, a3);
// Locate first character of result.
- __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// v0: result string
// a1: first character of result string
// a2: result string length
// t1: first character of substring to copy
- STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_v0);
@@ -6342,6 +4309,18 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+ __ bind(&single_char);
+ // v0: original string
+ // a1: instance type
+ // a2: length
+ // a3: from index (untagged)
+ __ SmiTag(a3, a3);
+ StringCharAtGenerator generator(
+ v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ DropAndRet(3);
+ generator.SkipSlow(masm, &runtime);
}
@@ -6359,16 +4338,18 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
__ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ Branch(&check_zero_length, eq, length, Operand(scratch2));
__ bind(&strings_not_equal);
+ ASSERT(is_int16(NOT_EQUAL));
+ __ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
- __ Ret();
// Check if the length is zero.
Label compare_chars;
__ bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0);
__ Branch(&compare_chars, ne, length, Operand(zero_reg));
+ ASSERT(is_int16(EQUAL));
+ __ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
// Compare characters.
__ bind(&compare_chars);
@@ -6378,8 +4359,8 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
&strings_not_equal);
// Characters are equal.
+ __ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
}
@@ -6442,7 +4423,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiUntag(length);
__ Addu(scratch1, length,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ Addu(left, left, Operand(scratch1));
__ Addu(right, right, Operand(scratch1));
__ Subu(length, zero_reg, length);
@@ -6511,7 +4492,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
+ // Otherwise, at least one of the arguments is definitely a string,
+ // and we convert the one that is not known to be a string.
+ if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
__ JumpIfEitherSmi(a0, a1, &call_runtime);
// Load instance types.
__ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
@@ -6523,20 +4508,16 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Or(t4, t0, Operand(t1));
__ And(t4, t4, Operand(kIsNotStringMask));
__ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
+ } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
+ GenerateConvertArgument(
+ masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
+ GenerateConvertArgument(
+ masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
}
// Both arguments are strings.
@@ -6582,12 +4563,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Adding two lengths can't overflow.
STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
__ Addu(t2, a2, Operand(a3));
- // Use the symbol table when adding two one character strings, as it
- // helps later optimizations to return a symbol here.
+ // Use the string table when adding two one character strings, as it
+ // helps later optimizations to return a string here.
__ Branch(&longer_than_two, ne, t2, Operand(2));
// Check that both strings are non-external ASCII strings.
- if (flags_ != NO_STRING_ADD_FLAGS) {
+ if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
__ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
@@ -6597,13 +4578,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
&call_runtime);
// Get the two characters forming the sub string.
- __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
- __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
+ __ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize));
+ __ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize));
- // Try to lookup two character string in symbol table. If it is not found
+ // Try to lookup two character string in string table. If it is not found
// just allocate a new one.
Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ StringHelper::GenerateTwoCharacterStringTableProbe(
masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
__ DropAndRet(2);
@@ -6616,7 +4597,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// in a little endian mode).
__ li(t2, Operand(2));
__ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+ __ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize));
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
__ DropAndRet(2);
@@ -6631,7 +4612,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If result is not supposed to be flat, allocate a cons string object.
// If both strings are ASCII the result is an ASCII cons string.
- if (flags_ != NO_STRING_ADD_FLAGS) {
+ if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
__ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
@@ -6649,25 +4630,53 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
+ Label skip_write_barrier, after_writing;
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(masm->isolate());
+ __ li(t0, Operand(high_promotion_mode));
+ __ lw(t0, MemOperand(t0, 0));
+ __ Branch(&skip_write_barrier, eq, t0, Operand(zero_reg));
+
+ __ mov(t3, v0);
+ __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
+ __ RecordWriteField(t3,
+ ConsString::kFirstOffset,
+ a0,
+ t0,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
+ __ RecordWriteField(t3,
+ ConsString::kSecondOffset,
+ a1,
+ t0,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ jmp(&after_writing);
+
+ __ bind(&skip_write_barrier);
__ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
__ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
+
+ __ bind(&after_writing);
+
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
__ DropAndRet(2);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
- // to contain only ASCII characters.
+ // to contain only one byte characters.
// t0: first instance type.
// t1: second instance type.
- // Branch to if _both_ instances have kAsciiDataHintMask set.
- __ And(at, t0, Operand(kAsciiDataHintMask));
+ // Branch to if _both_ instances have kOneByteDataHintMask set.
+ __ And(at, t0, Operand(kOneByteDataHintMask));
__ and_(at, at, t1);
__ Branch(&ascii_data, ne, at, Operand(zero_reg));
-
- __ xor_(t0, t0, t1);
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ Xor(t0, t0, Operand(t1));
+ STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
+ __ And(t0, t0, Operand(kOneByteStringTag | kOneByteDataHintTag));
+ __ Branch(&ascii_data, eq, t0,
+ Operand(kOneByteStringTag | kOneByteDataHintTag));
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
@@ -6686,7 +4695,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// t2: sum of lengths.
Label first_prepared, second_prepared;
__ bind(&string_add_flat_result);
- if (flags_ != NO_STRING_ADD_FLAGS) {
+ if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
__ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
@@ -6700,11 +4709,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t4, t0, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
Label skip_first_add;
__ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
__ Branch(USE_DELAY_SLOT, &first_prepared);
- __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ bind(&skip_first_add);
// External string: rule out short external string and load string resource.
STATIC_ASSERT(kShortExternalStringTag != 0);
@@ -6715,11 +4724,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t4, t1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
Label skip_second_add;
__ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
__ Branch(USE_DELAY_SLOT, &second_prepared);
- __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ bind(&skip_second_add);
// External string: rule out short external string and load string resource.
STATIC_ASSERT(kShortExternalStringTag != 0);
@@ -6740,7 +4749,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
__ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// v0: result string.
// t3: first character of first string.
// a1: first character of second string
@@ -6781,6 +4790,18 @@ void StringAddStub::Generate(MacroAssembler* masm) {
}
+void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ push(a0);
+ __ push(a1);
+}
+
+
+void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) {
+ __ pop(a1);
+ __ pop(a0);
+}
+
+
void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
int stack_offset,
Register arg,
@@ -6796,115 +4817,115 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
__ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
// Check the number to string cache.
- Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- false,
- &not_cached);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
__ mov(arg, scratch1);
__ sw(arg, MemOperand(sp, stack_offset));
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
- __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
- __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ And(scratch2, scratch2, scratch4);
- __ Branch(slow, ne, scratch2, Operand(scratch4));
- __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
- __ sw(arg, MemOperand(sp, stack_offset));
-
__ bind(&done);
}
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ Or(a2, a1, a0);
__ JumpIfNotSmi(a2, &miss);
if (GetCondition() == eq) {
// For equality we do not care about the sign of the result.
+ __ Ret(USE_DELAY_SLOT);
__ Subu(v0, a0, a1);
} else {
// Untag before subtracting to avoid handling overflow.
__ SmiUntag(a1);
__ SmiUntag(a0);
+ __ Ret(USE_DELAY_SLOT);
__ Subu(v0, a1, a0);
}
- __ Ret();
__ bind(&miss);
GenerateMiss(masm);
}
-void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- __ And(a2, a1, Operand(a0));
- __ JumpIfSmi(a2, &generic_stub);
- __ GetObjectType(a0, a2, a2);
- __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE));
- __ GetObjectType(a1, a2, a2);
- __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(a1, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(a0, &miss);
+ }
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or FPU is unsupported.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ // stub if NaN is involved.
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(a0, &right_smi);
+ __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
+ __ Subu(a2, a0, Operand(kHeapObjectTag));
+ __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Branch(&left);
+ __ bind(&right_smi);
+ __ SmiUntag(a2, a0); // Can't clobber a0 yet.
+ FPURegister single_scratch = f6;
+ __ mtc1(a2, single_scratch);
+ __ cvt_d_w(f2, single_scratch);
- // Load left and right operand.
- __ Subu(a2, a1, Operand(kHeapObjectTag));
- __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
- __ Subu(a2, a0, Operand(kHeapObjectTag));
- __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+ __ bind(&left);
+ __ JumpIfSmi(a1, &left_smi);
+ __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ Subu(a2, a1, Operand(kHeapObjectTag));
+ __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Branch(&done);
+ __ bind(&left_smi);
+ __ SmiUntag(a2, a1); // Can't clobber a1 yet.
+ single_scratch = f8;
+ __ mtc1(a2, single_scratch);
+ __ cvt_d_w(f0, single_scratch);
- // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
- Label fpu_eq, fpu_lt;
- // Test if equal, and also handle the unordered/NaN case.
- __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
+ __ bind(&done);
- // Test if less (unordered case is already handled).
- __ BranchF(&fpu_lt, NULL, lt, f0, f2);
+ // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
+ Label fpu_eq, fpu_lt;
+ // Test if equal, and also handle the unordered/NaN case.
+ __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
- // Otherwise it's greater, so just fall thru, and return.
- __ li(v0, Operand(GREATER));
- __ Ret();
+ // Test if less (unordered case is already handled).
+ __ BranchF(&fpu_lt, NULL, lt, f0, f2);
- __ bind(&fpu_eq);
- __ li(v0, Operand(EQUAL));
- __ Ret();
+ // Otherwise it's greater, so just fall thru, and return.
+ ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(GREATER));
- __ bind(&fpu_lt);
- __ li(v0, Operand(LESS));
- __ Ret();
- }
+ __ bind(&fpu_eq);
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(EQUAL));
- __ bind(&unordered);
+ __ bind(&fpu_lt);
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(LESS));
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+ __ bind(&unordered);
__ bind(&generic_stub);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&miss, ne, a0, Operand(at));
+ __ JumpIfSmi(a1, &unordered);
__ GetObjectType(a1, a2, a2);
__ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
__ jmp(&unordered);
@@ -6921,8 +4942,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
Label miss;
// Registers containing left and right operands respectively.
@@ -6934,24 +4955,70 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
// Check that both operands are heap objects.
__ JumpIfEitherSmi(left, right, &miss);
- // Check that both operands are symbols.
+ // Check that both operands are internalized strings.
__ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
__ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
__ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
- __ And(tmp1, tmp1, Operand(tmp2));
- __ And(tmp1, tmp1, kIsSymbolMask);
- __ Branch(&miss, eq, tmp1, Operand(zero_reg));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ Or(tmp1, tmp1, Operand(tmp2));
+ __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ Branch(&miss, ne, at, Operand(zero_reg));
+
// Make sure a0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(a0));
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
__ mov(v0, right);
- // Symbols are compared by identity.
+ // Internalized strings are compared by identity.
__ Ret(ne, left, Operand(right));
+ ASSERT(is_int16(EQUAL));
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASSERT(GetCondition() == eq);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+ Register tmp1 = a2;
+ Register tmp2 = a3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+
+ __ JumpIfNotUniqueName(tmp1, &miss);
+ __ JumpIfNotUniqueName(tmp2, &miss);
+
+ // Use a0 as result
+ __ mov(v0, a0);
+
+ // Unique names are compared by identity.
+ Label done;
+ __ Branch(&done, ne, left, Operand(right));
+ // Make sure a0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(a0));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ bind(&done);
__ Ret();
__ bind(&miss);
@@ -6960,7 +5027,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -6999,15 +5066,16 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle not identical strings.
- // Check that both strings are symbols. If they are, we're done
- // because we already know they are not identical.
+ // Check that both strings are internalized strings. If they are, we're done
+ // because we already know they are not identical. We know they are both
+ // strings.
if (equality) {
ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kSymbolTag != 0);
- __ And(tmp3, tmp1, Operand(tmp2));
- __ And(tmp5, tmp3, Operand(kIsSymbolMask));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ Or(tmp3, tmp1, Operand(tmp2));
+ __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
Label is_symbol;
- __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
+ __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
// Make sure a0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(a0));
@@ -7045,7 +5113,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
__ And(a2, a1, Operand(a0));
__ JumpIfSmi(a2, &miss);
@@ -7080,6 +5148,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
GenerateMiss(masm);
}
+
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
@@ -7103,9 +5172,16 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // No need to pop or drop anything, LeaveExitFrame will restore the old
- // stack, thus dropping the allocated space for the return value.
- // The saved ra is after the reserved stack space for the 4 args.
+ // Make place for arguments to fit C calling convention. Most of the callers
+ // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
+ // so they handle stack restoring and we don't have to do that here.
+ // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
+ // kCArgsSlotsSize stack space after the call.
+ __ Subu(sp, sp, Operand(kCArgsSlotsSize));
+ // Place the return address on the stack, making the call
+ // GC safe. The RegExp backend also relies on this.
+ __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
+ __ Call(t9); // Call the C++ function.
__ lw(t9, MemOperand(sp, kCArgsSlotsSize));
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
@@ -7113,7 +5189,7 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
// filled with kZapValue by the GC.
// Dereference the address and check for this.
__ lw(t0, MemOperand(t9));
- __ Assert(ne, "Received invalid return address.", t0,
+ __ Assert(ne, kReceivedInvalidReturnAddress, t0,
Operand(reinterpret_cast<uint32_t>(kZapValue)));
}
__ Jump(t9);
@@ -7121,52 +5197,23 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- ExternalReference function) {
- __ li(t9, Operand(function));
- this->GenerateCall(masm, t9);
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
+ intptr_t loc =
+ reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
__ Move(t9, target);
- __ AssertStackIsAligned();
- // Allocate space for arg slots.
- __ Subu(sp, sp, kCArgsSlotsSize);
-
- // Block the trampoline pool through the whole function to make sure the
- // number of generated instructions is constant.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
-
- // We need to get the current 'pc' value, which is not available on MIPS.
- Label find_ra;
- masm->bal(&find_ra); // ra = pc + 8.
- masm->nop(); // Branch delay slot nop.
- masm->bind(&find_ra);
-
- const int kNumInstructionsToJump = 6;
- masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
- // Push return address (accessible to GC through exit frame pc).
- // This spot for ra was reserved in EnterExitFrame.
- masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
- masm->li(ra,
- Operand(reinterpret_cast<intptr_t>(GetCode().location()),
- RelocInfo::CODE_TARGET),
- CONSTANT_SIZE);
- // Call the function.
- masm->Jump(t9);
- // Make sure the stored 'ra' points to this position.
- ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
+ __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
+ __ Call(ra);
}
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<String> name,
- Register scratch0) {
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0) {
+ ASSERT(name->IsUniqueName());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
@@ -7180,10 +5227,10 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ lw(index, FieldMemOperand(properties, kCapacityOffset));
__ Subu(index, index, Operand(1));
__ And(index, index, Operand(
- Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+ Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
__ sll(at, index, 1);
__ Addu(index, index, at);
@@ -7199,29 +5246,25 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
__ Branch(done, eq, entity_name, Operand(tmp));
- if (i != kInlinedProbes - 1) {
- // Load the hole ready for use below:
- __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
+ // Load the hole ready for use below:
+ __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
- // Stop if found the property.
- __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
-
- Label the_hole;
- __ Branch(&the_hole, eq, entity_name, Operand(tmp));
+ // Stop if found the property.
+ __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
- // Check if the entry name is not a symbol.
- __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ lbu(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ And(scratch0, entity_name, Operand(kIsSymbolMask));
- __ Branch(miss, eq, scratch0, Operand(zero_reg));
+ Label good;
+ __ Branch(&good, eq, entity_name, Operand(tmp));
- __ bind(&the_hole);
+ // Check if the entry name is not a unique name.
+ __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ lbu(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueName(entity_name, miss);
+ __ bind(&good);
- // Restore the properties.
- __ lw(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- }
+ // Restore the properties.
+ __ lw(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOffset));
}
const int spill_mask =
@@ -7230,8 +5273,8 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ MultiPush(spill_mask);
__ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ li(a1, Operand(Handle<String>(name)));
- StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ __ li(a1, Operand(Handle<Name>(name)));
+ NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
__ CallStub(&stub);
__ mov(at, v0);
__ MultiPop(spill_mask);
@@ -7241,23 +5284,23 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
}
-// Probe the string dictionary in the |elements| register. Jump to the
+// Probe the name dictionary in the |elements| register. Jump to the
// |done| label if a property with the given name is found. Jump to
// the |miss| label otherwise.
// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
+void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
ASSERT(!elements.is(scratch1));
ASSERT(!elements.is(scratch2));
ASSERT(!name.is(scratch1));
ASSERT(!name.is(scratch2));
- __ AssertString(name);
+ __ AssertName(name);
// Compute the capacity mask.
__ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
@@ -7269,21 +5312,21 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
// cover ~93% of loads from dictionaries.
for (int i = 0; i < kInlinedProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+ __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
__ Addu(scratch2, scratch2, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
}
- __ srl(scratch2, scratch2, String::kHashShift);
+ __ srl(scratch2, scratch2, Name::kHashShift);
__ And(scratch2, scratch1, scratch2);
// Scale the index by multiplying by the element size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ sll(at, scratch2, 1);
@@ -7310,7 +5353,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ Move(a0, elements);
__ Move(a1, name);
}
- StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
__ CallStub(&stub);
__ mov(scratch2, a2);
__ mov(at, v0);
@@ -7321,15 +5364,15 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
}
-void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// Registers:
- // result: StringDictionary to probe
+ // result: NameDictionary to probe
// a1: key
- // : StringDictionary to probe.
- // index_: will hold an index of entry if lookup is successful.
- // might alias with result_.
+ // dictionary: NameDictionary to probe.
+ // index: will hold an index of entry if lookup is successful.
+ // might alias with result_.
// Returns:
// result_ is zero if lookup failed, non zero otherwise.
@@ -7348,7 +5391,7 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ sra(mask, mask, kSmiTagSize);
__ Subu(mask, mask, Operand(1));
- __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
+ __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
@@ -7359,18 +5402,18 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
__ Addu(index, hash, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
} else {
__ mov(index, hash);
}
- __ srl(index, index, String::kHashShift);
+ __ srl(index, index, Name::kHashShift);
__ And(index, mask, index);
// Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
// index *= 3.
__ mov(at, index);
__ sll(index, index, 1);
@@ -7389,12 +5432,11 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ Branch(&in_dictionary, eq, entry_key, Operand(key));
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not a unique name.
__ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ lbu(entry_key,
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ And(result, entry_key, Operand(kIsSymbolMask));
- __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
+ __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
}
}
@@ -7422,18 +5464,16 @@ struct AheadOfTimeWriteBarrierStubList {
RememberedSetAction action;
};
+
#define REG(Name) { kRegister_ ## Name ## _Code }
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
{ REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
- { REG(s2), REG(a2), REG(t3), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
{ REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(t0), REG(a1), REG(a2), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
{ REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
{ REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
@@ -7455,6 +5495,9 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
// FastNewClosureStub::Generate
{ REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
+ // StringAddStub::Generate
+ { REG(t3), REG(a1), REG(t0), EMIT_REMEMBERED_SET },
+ { REG(t3), REG(a0), REG(t0), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -7462,7 +5505,7 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
#undef REG
-bool RecordWriteStub::IsPregenerated() {
+bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -7478,18 +5521,17 @@ bool RecordWriteStub::IsPregenerated() {
}
-bool StoreBufferOverflowStub::IsPregenerated() {
- return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ // Hydrogen code stubs need stub2 at snapshot time.
+ StoreBufferOverflowStub stub2(kSaveFPRegs);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
}
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -7498,13 +5540,13 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
entry->address,
entry->action,
kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
}
}
bool CodeStub::CanUseFPRegisters() {
- return CpuFeatures::IsSupported(FPU);
+ return true; // FPU is a base requirement for V8.
}
@@ -7600,13 +5642,8 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
ASSERT(!address.is(a0));
__ Move(address, regs_.address());
__ Move(a0, regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- __ Move(a1, address);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ lw(a1, MemOperand(address, 0));
- }
- __ li(a2, Operand(ExternalReference::isolate_address()));
+ __ Move(a1, address);
+ __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
if (mode == INCREMENTAL_COMPACTION) {
@@ -7714,10 +5751,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : element value to store
- // -- a1 : array literal
- // -- a2 : map of array literal
// -- a3 : element index as smi
- // -- t0 : array literal index in function as smi
+ // -- sp[0] : array literal index in function as smi
+ // -- sp[4] : array literal
+ // clobbers a1, a2, t0
// -----------------------------------
Label element_done;
@@ -7726,6 +5763,11 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label slow_elements;
Label fast_elements;
+ // Get array literal index, array literal and its map.
+ __ lw(t0, MemOperand(sp, 0 * kPointerSize));
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+ __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
+
__ CheckFastElements(a2, t1, &double_elements);
// Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
__ JumpIfSmi(a0, &smi_element);
@@ -7767,17 +5809,31 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3, a1,
- // Overwrites all regs after this.
- t1, t2, t3, t5, a2,
- &slow_elements);
+ __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
}
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ lw(a1, MemOperand(fp, parameter_count_offset));
+ if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ __ Addu(a1, a1, Operand(1));
+ }
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ sll(a1, a1, kPointerSizeLog2);
+ __ Ret(USE_DELAY_SLOT);
+ __ Addu(sp, sp, a1);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ AllowStubCallsScope allow_stub_calls(masm, true);
ProfileEntryHookStub stub;
__ push(ra);
__ CallStub(&stub);
@@ -7792,9 +5848,16 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
const int32_t kReturnAddressDistanceFromFunctionStart =
Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
- // Save live volatile registers.
- __ Push(ra, t1, a1);
- const int32_t kNumSavedRegs = 3;
+ // This should contain all kJSCallerSaved registers.
+ const RegList kSavedRegs =
+ kJSCallerSaved | // Caller saved registers.
+ s5.bit(); // Saved stack pointer.
+
+ // We also save ra, so the count here is one higher than the mask indicates.
+ const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
+
+ // Save all caller-save registers as this may be called from anywhere.
+ __ MultiPush(kSavedRegs | ra.bit());
// Compute the function's address for the first argument.
__ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
@@ -7806,20 +5869,22 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Align the stack if necessary.
int frame_alignment = masm->ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
- __ mov(t1, sp);
+ __ mov(s5, sp);
ASSERT(IsPowerOf2(frame_alignment));
__ And(sp, sp, Operand(-frame_alignment));
}
#if defined(V8_HOST_ARCH_MIPS)
- __ li(at, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
- __ lw(at, MemOperand(at));
+ int32_t entry_hook =
+ reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
+ __ li(at, Operand(entry_hook));
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
- Address trampoline_address = reinterpret_cast<Address>(
- reinterpret_cast<intptr_t>(EntryHookTrampoline));
- ApiFunction dispatcher(trampoline_address);
+ // It additionally takes an isolate as a third parameter.
+ __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
+
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ li(at, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
masm->isolate())));
@@ -7828,14 +5893,331 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Restore the stack pointer if needed.
if (frame_alignment > kPointerSize) {
- __ mov(sp, t1);
+ __ mov(sp, s5);
}
- __ Pop(ra, t1, a1);
+ // Also pop ra to get Ret(0).
+ __ MultiPop(kSavedRegs | ra.bit());
__ Ret();
}
+template<class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(),
+ CONTEXT_CHECK_REQUIRED,
+ mode);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ Branch(&next, ne, a3, Operand(kind));
+ T stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // a2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // a0 - number of arguments
+ // a1 - constructor?
+ // sp[0] - last argument
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ And(at, a3, Operand(1));
+ __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
+ }
+
+ // look at the first argument
+ __ lw(t1, MemOperand(sp, 0));
+ __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the cell).
+ __ Addu(a3, a3, Operand(1));
+ __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
+
+ if (FLAG_debug_code) {
+ __ lw(t1, FieldMemOperand(t1, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSiteInCell, t1, Operand(at));
+ __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
+ }
+
+ // Save the resulting elements kind in type info
+ __ SmiTag(a3);
+ __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
+ __ sw(a3, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(a3);
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ Branch(&next, ne, a3, Operand(kind));
+ ArraySingleArgumentConstructorStub stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+template<class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ ElementsKind initial_kind = GetInitialFastElementsKind();
+ ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
+
+ int to_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(kind);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
+ (!FLAG_track_allocation_sites &&
+ (kind == initial_kind || kind == initial_holey_kind))) {
+ T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ }
+ }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things.
+ InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
+ stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
+ stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
+ stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ And(at, a0, a0);
+ __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ Branch(&not_one_case, gt, a0, Operand(1));
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc (only if argument_count_ == ANY)
+ // -- a1 : constructor
+ // -- a2 : type info cell
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ And(at, a3, Operand(kSmiTagMask));
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
+ at, Operand(zero_reg));
+ __ GetObjectType(a3, a3, t0);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
+ t0, Operand(MAP_TYPE));
+
+ // We should either have undefined in a2 or a valid cell.
+ Label okay_here;
+ Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&okay_here, eq, a2, Operand(at));
+ __ lw(a3, FieldMemOperand(a2, 0));
+ __ Assert(eq, kExpectedPropertyCellInRegisterA2,
+ a3, Operand(cell_map));
+ __ bind(&okay_here);
+ }
+
+ Label no_info;
+ // Get the elements kind and case on that.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&no_info, eq, a2, Operand(at));
+ __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
+
+ // If the type cell is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
+ __ lw(t0, FieldMemOperand(a3, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&no_info, ne, t0, Operand(at));
+
+ __ lw(a3, FieldMemOperand(a3, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(a3);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label not_zero_case, not_one_case;
+ Label normal_sequence;
+
+ __ Branch(&not_zero_case, ne, a0, Operand(zero_reg));
+ InternalArrayNoArgumentConstructorStub stub0(kind);
+ __ TailCallStub(&stub0);
+
+ __ bind(&not_zero_case);
+ __ Branch(&not_one_case, gt, a0, Operand(1));
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument.
+ __ lw(at, MemOperand(sp, 0));
+ __ Branch(&normal_sequence, eq, at, Operand(zero_reg));
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+ }
+
+ __ bind(&normal_sequence);
+ InternalArraySingleArgumentConstructorStub stub1(kind);
+ __ TailCallStub(&stub1);
+
+ __ bind(&not_one_case);
+ InternalArrayNArgumentsConstructorStub stubN(kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- a1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ And(at, a3, Operand(kSmiTagMask));
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
+ at, Operand(zero_reg));
+ __ GetObjectType(a3, a3, t0);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
+ t0, Operand(MAP_TYPE));
+ }
+
+ // Figure out the right elements kind.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into a3. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
+ __ Assert(
+ eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
+ a3, Operand(FAST_HOLEY_ELEMENTS));
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index e0954d837..10531a800 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -35,9 +35,12 @@ namespace v8 {
namespace internal {
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
+
+
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
@@ -59,15 +62,15 @@ class TranscendentalCacheStub: public CodeStub {
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
+ : save_doubles_(save_fp) {}
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
@@ -78,173 +81,6 @@ class StoreBufferOverflowStub: public CodeStub {
};
-class UnaryOpStub: public CodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
- void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateHeapNumberStubSub(MacroAssembler* masm);
- void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- use_fpu_ = CpuFeatures::IsSupported(FPU);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_fpu_(FPUBits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_fpu_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class FPUBits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FPUBits::encode(use_fpu_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiSmiOperation(MacroAssembler* masm);
- void GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -275,14 +111,14 @@ class StringHelper : public AllStatic {
int flags);
- // Probe the symbol table for a two character string. If the string is
+ // Probe the string table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
+ // does not guarantee that the string is not in the string table. If the
// string is found the code falls through with the string in register r0.
// Contents of both c1 and c2 registers are modified. At the exit c1 is
// guaranteed to contain halfword with low and high bytes equal to
// initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -309,20 +145,7 @@ class StringHelper : public AllStatic {
};
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -341,11 +164,14 @@ class StringAddStub: public CodeStub {
Register scratch4,
Label* slow);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateRegisterArgsPop(MacroAssembler* masm);
+
const StringAddFlags flags_;
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -357,7 +183,7 @@ class SubStringStub: public CodeStub {
};
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
@@ -398,7 +224,7 @@ class StringCompareStub: public CodeStub {
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
+class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
@@ -414,8 +240,8 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
ASSERT(SignRegisterBits::is_valid(sign_.code()));
}
- bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
Register the_int_;
@@ -442,33 +268,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
};
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
@@ -491,8 +291,8 @@ class RecordWriteStub: public CodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
@@ -566,7 +366,7 @@ class RecordWriteStub: public CodeStub {
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+ scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
@@ -586,7 +386,6 @@ class RecordWriteStub: public CodeStub {
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
masm->MultiPushFPU(kCallerSavedFPU);
}
}
@@ -594,7 +393,6 @@ class RecordWriteStub: public CodeStub {
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
masm->MultiPopFPU(kCallerSavedFPU);
}
masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
@@ -611,19 +409,6 @@ class RecordWriteStub: public CodeStub {
Register scratch0_;
Register scratch1_;
- Register GetRegThatIsNotOneOf(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
friend class RecordWriteStub;
};
@@ -670,33 +455,15 @@ class RecordWriteStub: public CodeStub {
};
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM and MIPS.
-class RegExpCEntryStub: public CodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-};
-
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
-class DirectCEntryStub: public CodeStub {
+class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
- void GenerateCall(MacroAssembler* masm,
- ExternalReference function);
void GenerateCall(MacroAssembler* masm, Register target);
private:
@@ -706,153 +473,12 @@ class DirectCEntryStub: public CodeStub {
bool NeedsImmovableCode() { return true; }
};
-class FloatingPointHelper : public AllStatic {
- public:
- enum Destination {
- kFPURegisters,
- kCoreRegisters
- };
-
-
- // Loads smis from a0 and a1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
- // is floating point registers FPU must be supported. If core registers are
- // requested when FPU is supported f12 and f14 will be scratched.
- static void LoadSmis(MacroAssembler* masm,
- Destination destination,
- Register scratch1,
- Register scratch2);
-
- // Loads objects from a0 and a1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
- // is floating point registers FPU must be supported. If core registers are
- // requested when FPU is supported f12 and f14 will still be scratched. If
- // either a0 or a1 is not a number (not smi and not heap number object) the
- // not_number label is jumped to with a0 and a1 intact.
- static void LoadOperands(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-
- // Convert the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1.
- static void ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch,
- Label* not_int32);
-
- // Converts the integer (untagged smi) in |int_scratch| to a double, storing
- // the result either in |double_dst| or |dst2:dst1|, depending on
- // |destination|.
- // Warning: The value in |int_scratch| will be changed in the process!
- static void ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- FPURegister double_dst,
- Register dst1,
- Register dst2,
- Register scratch2,
- FPURegister single_scratch);
-
- // Load the number from object into double_dst in the double format.
- // Control will jump to not_int32 if the value cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be loaded.
- static void LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- FPURegister double_dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister single_scratch,
- Label* not_int32);
-
- // Loads the number from object into dst as a 32-bit integer.
- // Control will jump to not_int32 if the object cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be converted.
- // scratch3 is not used when FPU is supported.
- static void LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch,
- Label* not_int32);
-
- // Generate non FPU code to check if a double can be exactly represented by a
- // 32-bit integer. This does not check for 0 or -0, which need
- // to be checked for separately.
- // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
- // through otherwise.
- // src1 and src2 will be cloberred.
- //
- // Expected input:
- // - src1: higher (exponent) part of the double value.
- // - src2: lower (mantissa) part of the double value.
- // Output status:
- // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
- // - src2: contains 1.
- // - other registers are clobbered.
- static void DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
- Register dst,
- Register scratch,
- Label* not_int32);
-
- // Generates code to call a C function to do a double operation using core
- // registers. (Used when FPU is not supported.)
- // This code never falls through, but returns with a heap number containing
- // the result in v0.
- // Register heapnumber_result must be a heap number in which the
- // result of the operation will be stored.
- // Requires the following layout on entry:
- // a0: Left value (least significant part of mantissa).
- // a1: Left value (sign, exponent, top of mantissa).
- // a2: Right value (least significant part of mantissa).
- // a3: Right value (sign, exponent, top of mantissa).
- static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch);
-
- private:
- static void LoadNumber(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register object,
- FPURegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-};
-
-class StringDictionaryLookupStub: public CodeStub {
+class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+ explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
void Generate(MacroAssembler* masm);
@@ -861,7 +487,7 @@ class StringDictionaryLookupStub: public CodeStub {
Label* done,
Register receiver,
Register properties,
- Handle<String> name,
+ Handle<Name> name,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
@@ -879,14 +505,14 @@ class StringDictionaryLookupStub: public CodeStub {
static const int kTotalProbes = 20;
static const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryLookup; }
+ Major MajorKey() { return NameDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 44e0359e4..ec6649533 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -27,15 +27,15 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "codegen.h"
#include "macro-assembler.h"
+#include "simulator-mips.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
@@ -49,10 +49,78 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
+#define __ masm.
+
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_mips_machine_code = NULL;
+double fast_exp_simulator(double x) {
+ return Simulator::current(Isolate::Current())->CallFP(
+ fast_exp_mips_machine_code, x, 0);
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!FLAG_fast_math) return &exp;
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &exp;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ {
+ DoubleRegister input = f12;
+ DoubleRegister result = f0;
+ DoubleRegister double_scratch1 = f4;
+ DoubleRegister double_scratch2 = f6;
+ Register temp1 = t0;
+ Register temp2 = t1;
+ Register temp3 = t2;
+
+ if (!IsMipsSoftFloatABI) {
+ // Input value is in f12 anyway, nothing to do.
+ } else {
+ __ Move(input, a0, a1);
+ }
+ __ Push(temp3, temp2, temp1);
+ MathExpGenerator::EmitMathExp(
+ &masm, input, result, double_scratch1, double_scratch2,
+ temp1, temp2, temp3);
+ __ Pop(temp3, temp2, temp1);
+ if (!IsMipsSoftFloatABI) {
+ // Result is already in f0, nothing to do.
+ } else {
+ __ Move(v0, v1, result);
+ }
+ __ Ret();
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+ fast_exp_mips_machine_code = buffer;
+ return &fast_exp_simulator;
+#endif
+}
+
+
+#undef __
+
+
UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
@@ -69,11 +137,15 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->set_has_frame(false);
}
+
// -------------------------------------------------------------------------
// Code generators
+#define __ ACCESS_MASM(masm)
+
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm) {
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -82,6 +154,11 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- a3 : target map, scratch for subsequent call
// -- t0 : scratch (elements)
// -----------------------------------
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(a2, t0, allocation_memento_found);
+ }
+
// Set transitioned map.
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
__ RecordWriteField(a2,
@@ -96,7 +173,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -106,10 +183,13 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// -- t0 : scratch (elements)
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done;
- bool fpu_supported = CpuFeatures::IsSupported(FPU);
Register scratch = t6;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
@@ -124,8 +204,9 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new FixedDoubleArray.
__ sll(scratch, t1, 2);
__ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
- __ AllocateInNewSpace(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
+ __ Allocate(scratch, t2, t3, t5, &gc_required, DOUBLE_ALIGNMENT);
// t2: destination FixedDoubleArray, not tagged as heap object
+
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
__ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
@@ -166,8 +247,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// t2: end of destination FixedDoubleArray, not tagged
// t3: begin of FixedDoubleArray element fields, not tagged
- if (!fpu_supported) __ Push(a1, a0);
-
__ Branch(&entry);
__ bind(&only_change_map);
@@ -176,7 +255,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
HeapObject::kMapOffset,
a3,
t5,
- kRAHasBeenSaved,
+ kRAHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -195,25 +274,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
// Normal smi, convert to double and store.
- if (fpu_supported) {
- CpuFeatures::Scope scope(FPU);
- __ mtc1(t5, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, MemOperand(t3));
- __ Addu(t3, t3, kDoubleSize);
- } else {
- FloatingPointHelper::ConvertIntToDouble(masm,
- t5,
- FloatingPointHelper::kCoreRegisters,
- f0,
- a0,
- a1,
- t7,
- f0);
- __ sw(a0, MemOperand(t3)); // mantissa
- __ sw(a1, MemOperand(t3, kIntSize)); // exponent
- __ Addu(t3, t3, kDoubleSize);
- }
+ __ mtc1(t5, f0);
+ __ cvt_d_w(f0, f0);
+ __ sdc1(f0, MemOperand(t3));
+ __ Addu(t3, t3, kDoubleSize);
+
__ Branch(&entry);
// Hole found, store the-hole NaN.
@@ -223,7 +288,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ SmiTag(t5);
__ Or(t5, t5, Operand(1));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, "object found in smi-only array", at, Operand(t5));
+ __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
}
__ sw(t0, MemOperand(t3)); // mantissa
__ sw(t1, MemOperand(t3, kIntSize)); // exponent
@@ -232,14 +297,13 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ bind(&entry);
__ Branch(&loop, lt, t3, Operand(t2));
- if (!fpu_supported) __ Pop(a1, a0);
__ pop(ra);
__ bind(&done);
}
void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -250,6 +314,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -----------------------------------
Label entry, loop, convert_hole, gc_required, only_change_map;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
@@ -265,7 +333,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Allocate new FixedArray.
__ sll(a0, t1, 1);
__ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
- __ AllocateInNewSpace(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
+ __ Allocate(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
// t2: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
@@ -389,7 +457,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// the string.
__ bind(&cons_string);
__ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ LoadRoot(at, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(at, Heap::kempty_stringRootIndex);
__ Branch(call_runtime, ne, result, Operand(at));
// Get the first of the two strings and load its instance type.
__ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
@@ -408,7 +476,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(&external_string, ne, at, Operand(zero_reg));
// Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Addu(string,
string,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
@@ -420,7 +488,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ And(at, result, Operand(kIsIndirectStringMask));
- __ Assert(eq, "external string expected, but not found",
+ __ Assert(eq, kExternalStringExpectedButNotFound,
at, Operand(zero_reg));
}
// Rule out short external strings.
@@ -446,6 +514,168 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
+
+static MemOperand ExpConstant(int index, Register base) {
+ return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3) {
+ ASSERT(!input.is(result));
+ ASSERT(!input.is(double_scratch1));
+ ASSERT(!input.is(double_scratch2));
+ ASSERT(!result.is(double_scratch1));
+ ASSERT(!result.is(double_scratch2));
+ ASSERT(!double_scratch1.is(double_scratch2));
+ ASSERT(!temp1.is(temp2));
+ ASSERT(!temp1.is(temp3));
+ ASSERT(!temp2.is(temp3));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label zero, infinity, done;
+
+ __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
+
+ __ ldc1(double_scratch1, ExpConstant(0, temp3));
+ __ BranchF(&zero, NULL, ge, double_scratch1, input);
+
+ __ ldc1(double_scratch2, ExpConstant(1, temp3));
+ __ BranchF(&infinity, NULL, ge, input, double_scratch2);
+
+ __ ldc1(double_scratch1, ExpConstant(3, temp3));
+ __ ldc1(result, ExpConstant(4, temp3));
+ __ mul_d(double_scratch1, double_scratch1, input);
+ __ add_d(double_scratch1, double_scratch1, result);
+ __ FmoveLow(temp2, double_scratch1);
+ __ sub_d(double_scratch1, double_scratch1, result);
+ __ ldc1(result, ExpConstant(6, temp3));
+ __ ldc1(double_scratch2, ExpConstant(5, temp3));
+ __ mul_d(double_scratch1, double_scratch1, double_scratch2);
+ __ sub_d(double_scratch1, double_scratch1, input);
+ __ sub_d(result, result, double_scratch1);
+ __ mul_d(double_scratch2, double_scratch1, double_scratch1);
+ __ mul_d(result, result, double_scratch2);
+ __ ldc1(double_scratch2, ExpConstant(7, temp3));
+ __ mul_d(result, result, double_scratch2);
+ __ sub_d(result, result, double_scratch1);
+ // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
+ ASSERT(*reinterpret_cast<double*>
+ (ExternalReference::math_exp_constants(8).address()) == 1);
+ __ Move(double_scratch2, 1);
+ __ add_d(result, result, double_scratch2);
+ __ srl(temp1, temp2, 11);
+ __ Ext(temp2, temp2, 0, 11);
+ __ Addu(temp1, temp1, Operand(0x3ff));
+
+ // Must not call ExpConstant() after overwriting temp3!
+ __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
+ __ sll(at, temp2, 3);
+ __ Addu(temp3, temp3, Operand(at));
+ __ lw(temp2, MemOperand(temp3, 0));
+ __ lw(temp3, MemOperand(temp3, kPointerSize));
+ // The first word is loaded is the lower number register.
+ if (temp2.code() < temp3.code()) {
+ __ sll(at, temp1, 20);
+ __ Or(temp1, temp3, at);
+ __ Move(double_scratch1, temp2, temp1);
+ } else {
+ __ sll(at, temp1, 20);
+ __ Or(temp1, temp2, at);
+ __ Move(double_scratch1, temp3, temp1);
+ }
+ __ mul_d(result, result, double_scratch1);
+ __ Branch(&done);
+
+ __ bind(&zero);
+ __ Move(result, kDoubleRegZero);
+ __ Branch(&done);
+
+ __ bind(&infinity);
+ __ ldc1(result, ExpConstant(2, temp3));
+
+ __ bind(&done);
+}
+
+
+// nop(CODE_AGE_MARKER_NOP)
+static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
+
+static byte* GetNoCodeAgeSequence(uint32_t* length) {
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found in FUNCTIONS
+ static bool initialized = false;
+ static uint32_t sequence[kNoCodeAgeSequenceLength];
+ byte* byte_sequence = reinterpret_cast<byte*>(sequence);
+ *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
+ if (!initialized) {
+ CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
+ patcher.masm()->Push(ra, fp, cp, a1);
+ patcher.masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize));
+ initialized = true;
+ }
+ return byte_sequence;
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ bool result = !memcmp(sequence, young_sequence, young_length);
+ ASSERT(result ||
+ Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ Address target_address = Memory::Address_at(
+ sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (age == kNoAgeCodeAge) {
+ CopyBytes(sequence, young_sequence, young_length);
+ CPU::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
+ CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ // Mark this code sequence for FindPlatformCodeAgeSequence()
+ patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
+ // Save the function's original return address
+ // (it will be clobbered by Call(t9))
+ patcher.masm()->mov(at, ra);
+ // Load the stub address to t9 and call it
+ patcher.masm()->li(t9,
+ Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
+ patcher.masm()->Call(t9);
+ // Record the stub address in the empty space for GetCodeAgeAndParity()
+ patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ }
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index e704c4f56..822b94ad7 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -46,10 +46,14 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
+ explicit CodeGenerator(Isolate* isolate) {
+ InitializeAstVisitor(isolate);
+ }
+
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
+ static void MakeCodePrologue(CompilationInfo* info, const char* kind);
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
@@ -59,7 +63,7 @@ class CodeGenerator: public AstVisitor {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
- static bool ShouldGenerateLog(Expression* type);
+ static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
@@ -70,6 +74,8 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
@@ -90,6 +96,23 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
+
+class MathExpGenerator : public AllStatic {
+ public:
+ // Register input isn't modified. All other registers are clobbered.
+ static void EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index 7d654f6d6..2dd7a31f3 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "constants-mips.h"
@@ -58,6 +58,7 @@ const char* Registers::names_[kNumSimuRegisters] = {
"pc"
};
+
// List of alias names which can be used when referring to MIPS registers.
const Registers::RegisterAlias Registers::aliases_[] = {
{0, "zero"},
@@ -67,6 +68,7 @@ const Registers::RegisterAlias Registers::aliases_[] = {
{kInvalidRegister, NULL}
};
+
const char* Registers::Name(int reg) {
const char* result;
if ((0 <= reg) && (reg < kNumSimuRegisters)) {
@@ -106,11 +108,13 @@ const char* FPURegisters::names_[kNumFPURegisters] = {
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
};
+
// List of alias names which can be used when referring to MIPS registers.
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
{kInvalidRegister, NULL}
};
+
const char* FPURegisters::Name(int creg) {
const char* result;
if ((0 <= creg) && (creg < kNumFPURegisters)) {
@@ -302,6 +306,8 @@ Instruction::Type Instruction::InstructionType() const {
return kRegisterType;
};
break;
+ case COP1X:
+ return kRegisterType;
// 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
case REGIMM:
case BEQ:
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 3d585717c..5a0870fd2 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -61,8 +61,9 @@ enum ArchVariants {
// -mhard-float is passed to the compiler.
const bool IsMipsSoftFloatABI = false;
#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
-// Not using floating-point coprocessor instructions. This flag is raised when
-// -msoft-float is passed to the compiler.
+// This flag is raised when -msoft-float is passed to the compiler.
+// Although FPU is a base requirement for v8, soft-float ABI is used
+// on soft-float systems with FPU kernel emulation.
const bool IsMipsSoftFloatABI = true;
#else
const bool IsMipsSoftFloatABI = true;
@@ -99,7 +100,7 @@ const int kInvalidFPURegister = -1;
// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
const int kFCSRRegister = 31;
const int kInvalidFPUControlRegister = -1;
-const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
// FCSR constants.
const uint32_t kFCSRInexactFlagBit = 2;
@@ -216,6 +217,8 @@ const int kImm28Bits = 28;
// and are therefore shifted by 2.
const int kImmFieldShift = 2;
+const int kFrBits = 5;
+const int kFrShift = 21;
const int kFsShift = 11;
const int kFsBits = 5;
const int kFtShift = 16;
@@ -295,7 +298,9 @@ enum Opcode {
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
SWC1 = ((7 << 3) + 1) << kOpcodeShift,
- SDC1 = ((7 << 3) + 5) << kOpcodeShift
+ SDC1 = ((7 << 3) + 5) << kOpcodeShift,
+
+ COP1X = ((1 << 4) + 3) << kOpcodeShift
};
enum SecondaryField {
@@ -416,6 +421,8 @@ enum SecondaryField {
CVT_S_L = ((4 << 3) + 0),
CVT_D_L = ((4 << 3) + 1),
// COP1 Encoding of Function Field When rs=PS.
+ // COP1X Encoding of Function Field.
+ MADD_D = ((4 << 3) + 1),
NULLSF = 0
};
@@ -423,7 +430,9 @@ enum SecondaryField {
// ----- Emulated conditions.
// On MIPS we use this enum to abstract from conditionnal branch instructions.
-// the 'U' prefix is used to specify unsigned comparisons.
+// The 'U' prefix is used to specify unsigned comparisons.
+// Oppposite conditions must be paired as odd/even numbers
+// because 'NegateCondition' function flips LSB to negate condition.
enum Condition {
// Any value < 0 is considered no_condition.
kNoCondition = -1,
@@ -444,8 +453,10 @@ enum Condition {
greater_equal = 13,
less_equal = 14,
greater = 15,
+ ueq = 16, // Unordered or Equal.
+ nue = 17, // Not (Unordered or Equal).
- cc_always = 16,
+ cc_always = 18,
// Aliases.
carry = Uless,
@@ -677,6 +688,10 @@ class Instruction {
return Bits(kFtShift + kFtBits - 1, kFtShift);
}
+ inline int FrValue() const {
+ return Bits(kFrShift + kFrBits -1, kFrShift);
+ }
+
// Float Compare condition code instruction bits.
inline int FCccValue() const {
return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc
index 93ebeda80..49d0b377e 100644
--- a/deps/v8/src/mips/cpu-mips.cc
+++ b/deps/v8/src/mips/cpu-mips.cc
@@ -36,7 +36,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "cpu.h"
#include "macro-assembler.h"
@@ -87,14 +87,6 @@ void CPU::FlushICache(void* start, size_t size) {
#endif // USE_SIMULATOR.
}
-
-void CPU::DebugBreak() {
-#ifdef __mips
- asm volatile("break");
-#endif // #ifdef __mips
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index 3be1e4d8b..1535231dd 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -29,7 +29,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "codegen.h"
#include "debug.h"
@@ -60,7 +60,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// li and Call pseudo-instructions emit two instructions each.
patcher.masm()->li(v8::internal::t9,
Operand(reinterpret_cast<int32_t>(
- Isolate::Current()->debug()->debug_break_return()->entry())));
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry())));
patcher.masm()->Call(v8::internal::t9);
patcher.masm()->nop();
patcher.masm()->nop();
@@ -105,7 +105,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// call t9 (jalr t9 / nop instruction pair)
CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
- Isolate::Current()->debug()->debug_break_slot()->entry())));
+ debug_info_->GetIsolate()->debug()->debug_break_slot()->entry())));
patcher.masm()->Call(v8::internal::t9);
}
@@ -142,8 +142,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ And(at, reg, 0xc0000000);
- __ Assert(
- eq, "Unable to encode value as smi", at, Operand(zero_reg));
+ __ Assert(eq, kUnableToEncodeValueAsSmi, at, Operand(zero_reg));
}
__ sll(reg, reg, kSmiTagSize);
}
@@ -236,6 +235,15 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a0.bit(), 0);
+}
+
+
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-mips.cc).
// ----------- S t a t e -------------
@@ -316,12 +324,12 @@ void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on mips");
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnMips);
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on mips");
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnMips);
}
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 9fd815bb4..d31990be5 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -1,3 +1,4 @@
+
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
@@ -42,20 +43,8 @@ int Deoptimizer::patch_size() {
}
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- HandleScope scope;
- AssertNoAllocation no_allocation;
-
- if (!function->IsOptimized()) return;
-
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
- // Get the optimized code.
- Code* code = function->code();
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
-
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
@@ -70,14 +59,14 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
- Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
- RelocInfo::NONE);
+ RelocInfo::NONE32);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size());
CodePatcher patcher(call_address, call_size_in_words);
- patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
+ patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
@@ -86,781 +75,9 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
prev_call_address = call_address;
#endif
}
-
- Isolate* isolate = code->GetIsolate();
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
-#ifdef DEBUG
- if (FLAG_print_code) {
- code->PrintLn();
- }
-#endif
- }
-}
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
- // This structure comes from FullCodeGenerator::EmitStackCheck.
- // The call of the stack guard check has the following form:
- // sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
- // beq at, zero_reg, ok
- // lui t9, <stack guard address> upper
- // ori t9, <stack guard address> lower
- // jalr t9
- // nop
- // ----- pc_after points here
-
- ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
-
- // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
- CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- patcher.masm()->addiu(at, zero_reg, 1);
-
- // Replace the stack check address in the load-immediate (lui/ori pair)
- // with the entry address of the replacement code.
- ASSERT(reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(check_code->entry()));
- Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
- replacement_code->entry());
-
- // We patched the code to the following form:
- // addiu at, zero_reg, 1
- // beq at, zero_reg, ok ;; Not changed
- // lui t9, <on-stack replacement address> upper
- // ori t9, <on-stack replacement address> lower
- // jalr t9 ;; Not changed
- // nop ;; Not changed
- // ----- pc_after points here
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
}
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- // Exact opposite of the function above.
- const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Assembler::IsAddImmediate(
- Assembler::instr_at(pc_after - 6 * kInstrSize)));
- ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
-
- // Restore the sltu instruction so beq can be taken again.
- CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- if (FLAG_count_based_interrupts) {
- patcher.masm()->slt(at, a3, zero_reg);
- } else {
- patcher.masm()->sltu(at, sp, t0);
- }
-
- // Replace the on-stack replacement address in the load-immediate (lui/ori
- // pair) with the entry address of the normal stack-check code.
- ASSERT(reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(replacement_code->entry()));
- Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
- check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 4 * kInstrSize, check_code);
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
-
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
- output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- optimized_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index) {
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
- // Arguments adaptor can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // A marker value is used in place of the context.
- output_offset -= kPointerSize;
- intptr_t context = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- output_frame->SetFrameSlot(output_offset, context);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
- top_address + output_offset, output_offset, context);
- }
-
- // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- ASSERT(0 == output_offset);
-
- Builtins* builtins = isolate_->builtins();
- Code* adaptor_trampoline =
- builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
- uint32_t pc = reinterpret_cast<uint32_t>(
- adaptor_trampoline->instruction_start() +
- isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = 8 * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
- // Construct stub can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The output frame reflects a JSConstructStubGeneric frame.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(construct_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- // Constructor function being invoked by the stub.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
- top_address + output_offset, output_offset, value);
- }
-
- ASSERT(0 == output_offset);
-
- uint32_t pc = reinterpret_cast<uint32_t>(
- construct_stub->instruction_start() +
- isolate_->heap()->construct_stub_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
- bool is_setter_stub_frame) {
- JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
- // The receiver (and the implicit return value, if any) are expected in
- // registers by the LoadIC/StoreIC, so they don't belong to the output stack
- // frame. This means that we have to use a height of 0.
- unsigned height = 0;
- unsigned height_in_bytes = height * kPointerSize;
- const char* kind = is_setter_stub_frame ? "setter" : "getter";
- if (FLAG_trace_deopt) {
- PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
- }
-
- // We need 5 stack entries from StackFrame::INTERNAL (ra, fp, cp, frame type,
- // code object, see MacroAssembler::EnterFrame). For a setter stub frame we
- // need one additional entry for the implicit return value, see
- // StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0);
- unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, accessor);
- output_frame->SetFrameType(StackFrame::INTERNAL);
-
- // A frame for an accessor stub can not be the topmost or bottommost one.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous frame's top and
- // this frame's size.
- uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- unsigned output_offset = output_frame_size;
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; function (%s sentinel)\n",
- top_address + output_offset, output_offset, value, kind);
- }
-
- // Get Code object from accessor stub.
- output_offset -= kPointerSize;
- Builtins::Name name = is_setter_stub_frame ?
- Builtins::kStoreIC_Setter_ForDeopt :
- Builtins::kLoadIC_Getter_ForDeopt;
- Code* accessor_stub = isolate_->builtins()->builtin(name);
- value = reinterpret_cast<intptr_t>(accessor_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Skip receiver.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
-
- if (is_setter_stub_frame) {
- // The implicit return value was part of the artificial setter stub
- // environment.
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- ASSERT(0 == output_offset);
-
- Smi* offset = is_setter_stub_frame ?
- isolate_->heap()->setter_stub_deopt_pc_offset() :
- isolate_->heap()->getter_stub_deopt_pc_offset();
- intptr_t pc = reinterpret_cast<intptr_t>(
- accessor_stub->instruction_start() + offset->value());
- output_frame->SetPc(pc);
-}
-
-
-// This code is very similar to ia32/arm code, but relies on register names
-// (fp, sp) and how the frame is laid out.
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- uint32_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) {
- output_frame->SetRegister(fp.code(), fp_value);
- }
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(cp.code(), value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
- }
-}
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -871,7 +88,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -882,6 +99,32 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ ApiFunction function(descriptor->deoptimization_handler_);
+ ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+ intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+ int params = descriptor->environment_length();
+ output_frame->SetRegister(s0.code(), params);
+ output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
+ output_frame->SetRegister(s2.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ // There is no dynamic alignment padding on MIPS in the input frame.
+ return false;
+}
+
+
#define __ masm()->
@@ -890,9 +133,6 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
- Isolate* isolate = masm()->isolate();
-
- CpuFeatures::Scope scope(FPU);
// Unlike on ARM we don't save all the registers, just the useful ones.
// For the rest, there are gaps on the stack, so the offsets remain the same.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -901,11 +141,11 @@ void Deoptimizer::EntryGenerator::Generate() {
RegList saved_regs = restored_regs | sp.bit() | ra.bit();
const int kDoubleRegsSize =
- kDoubleSize * FPURegister::kNumAllocatableRegisters;
+ kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
// Save all FPU registers before messing with them.
__ Subu(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
@@ -926,22 +166,12 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the bailout id from the stack.
__ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
- // Get the address of the location in the code object if possible (a3) (return
+ // Get the address of the location in the code object (a3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register t0.
- if (type() == EAGER) {
- __ mov(a3, zero_reg);
- // Correct one word for bailout id.
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else if (type() == OSR) {
- __ mov(a3, ra);
- // Correct one word for bailout id.
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ mov(a3, ra);
- // Correct two words for bailout id and return address.
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
+ __ mov(a3, ra);
+ // Correct one word for bailout id.
+ __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
__ Subu(t0, fp, t0);
@@ -953,12 +183,12 @@ void Deoptimizer::EntryGenerator::Generate() {
// a2: bailout id already loaded.
// a3: code address or 0 already loaded.
__ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
- __ li(t1, Operand(ExternalReference::isolate_address()));
+ __ li(t1, Operand(ExternalReference::isolate_address(isolate())));
__ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
}
// Preserve "deoptimizer" object in register v0 and get the input
@@ -980,23 +210,18 @@ void Deoptimizer::EntryGenerator::Generate() {
}
}
+ int double_regs_offset = FrameDescription::double_registers_offset();
// Copy FPU registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
}
- // Remove the bailout id, eventually return address, and the saved registers
- // from the stack.
- if (type() == EAGER || type() == OSR) {
- __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
+ // Remove the bailout id and the saved registers from the stack.
+ __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
// Compute a pointer to the unwinding limit in register a2; that is
// the first stack slot not part of the input frame.
@@ -1008,11 +233,14 @@ void Deoptimizer::EntryGenerator::Generate() {
// frame description.
__ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
+ Label pop_loop_header;
+ __ Branch(&pop_loop_header);
__ bind(&pop_loop);
__ pop(t0);
__ sw(t0, MemOperand(a3, 0));
- __ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp));
- __ addiu(a3, a3, sizeof(uint32_t)); // In delay slot.
+ __ addiu(a3, a3, sizeof(uint32_t));
+ __ bind(&pop_loop_header);
+ __ Branch(&pop_loop, ne, a2, Operand(sp));
// Compute the output frame in the deoptimizer.
__ push(a0); // Preserve deoptimizer object across call.
@@ -1022,38 +250,47 @@ void Deoptimizer::EntryGenerator::Generate() {
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
+ ExternalReference::compute_output_frames_function(isolate()), 1);
}
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop;
- // Outer loop state: a0 = current "FrameDescription** output_",
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
+ // Outer loop state: t0 = current "FrameDescription** output_",
// a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
- __ lw(a0, MemOperand(a0, Deoptimizer::output_offset())); // a0 is output_.
+ __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
__ sll(a1, a1, kPointerSizeLog2); // Count to offset.
- __ addu(a1, a0, a1); // a1 = one past the last FrameDescription**.
+ __ addu(a1, t0, a1); // a1 = one past the last FrameDescription**.
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
- __ lw(a2, MemOperand(a0, 0)); // output_[ix]
+ __ lw(a2, MemOperand(t0, 0)); // output_[ix]
__ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ Subu(a3, a3, Operand(sizeof(uint32_t)));
__ Addu(t2, a2, Operand(a3));
__ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
__ push(t3);
+ __ bind(&inner_loop_header);
__ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
- __ Addu(a0, a0, Operand(kPointerSize));
- __ Branch(&outer_push_loop, lt, a0, Operand(a1));
+ __ Addu(t0, t0, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ Branch(&outer_push_loop, lt, t0, Operand(a1));
+ __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
+ const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ ldc1(fpu_reg, MemOperand(a1, src_offset));
+ }
// Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
- __ push(t2);
- }
+ __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
+ __ push(t2);
__ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
__ push(t2);
@@ -1083,25 +320,19 @@ void Deoptimizer::EntryGenerator::Generate() {
// Maximum size of a table entry generated below.
-const int Deoptimizer::table_entry_size_ = 9 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 7 * Assembler::kInstrSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
- // Create a sequence of deoptimization entries. Note that any
- // registers may be still live.
+ // Create a sequence of deoptimization entries.
+ // Note that registers are still live when jumping to an entry.
Label table_start;
__ bind(&table_start);
for (int i = 0; i < count(); i++) {
Label start;
__ bind(&start);
- if (type() != EAGER) {
- // Emulate ia32 like call by pushing return address to stack.
- __ addiu(sp, sp, -2 * kPointerSize);
- __ sw(ra, MemOperand(sp, 1 * kPointerSize));
- } else {
- __ addiu(sp, sp, -1 * kPointerSize);
- }
+ __ addiu(sp, sp, -1 * kPointerSize);
// Jump over the remaining deopt entries (including this one).
// This code is always reached by calling Jump, which puts the target (label
// start) into t9.
@@ -1124,6 +355,17 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
count() * table_entry_size_);
}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
#undef __
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 1d40c2c82..691df940f 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -50,13 +50,10 @@
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
-#ifndef WIN32
-#include <stdint.h>
-#endif
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "mips/constants-mips.h"
#include "disasm.h"
@@ -350,6 +347,10 @@ int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
int reg = instr->FdValue();
PrintFPURegister(reg);
return 2;
+ } else if (format[1] == 'r') { // 'fr: fr register.
+ int reg = instr->FrValue();
+ PrintFPURegister(reg);
+ return 2;
}
UNREACHABLE();
return -1;
@@ -618,6 +619,15 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
UNREACHABLE();
}
break;
+ case COP1X:
+ switch (instr->FunctionFieldRaw()) {
+ case MADD_D:
+ Format(instr, "madd.d 'fd, 'fr, 'fs, 'ft");
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR:
@@ -1038,8 +1048,8 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p %08x %s\n",
- prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ v8::internal::PrintF(f, "%p %08x %s\n",
+ prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc
index faaa0e0f4..1bd511654 100644
--- a/deps/v8/src/mips/frames-mips.cc
+++ b/deps/v8/src/mips/frames-mips.cc
@@ -28,18 +28,23 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
-#include "frames-inl.h"
-#include "mips/assembler-mips-inl.h"
+#include "assembler.h"
+#include "assembler-mips.h"
+#include "assembler-mips-inl.h"
+#include "frames.h"
namespace v8 {
namespace internal {
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+
+
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index 2ed358a91..437bf3a9f 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -152,18 +152,6 @@ const int kSafepointRegisterStackIndexMap[kNumRegs] = {
// ----------------------------------------------------
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -3 * kPointerSize;
@@ -193,30 +181,6 @@ class ExitFrameConstants : public AllStatic {
};
-class StandardFrameConstants : public AllStatic {
- public:
- // Fixed part of the frame consists of return address, caller fp,
- // context and function.
- static const int kFixedFrameSize = 4 * kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
-
- // Size of the MIPS 4 32-bit argument slots.
- // This is just an alias with a shorter name. Use it from now on.
- static const int kRArgsSlotsSize = 4 * kPointerSize;
- static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
-
- // JS argument slots size.
- static const int kJSArgsSlotsSize = 0 * kPointerSize;
- // Assembly builtins argument slots size.
- static const int kBArgsSlotsSize = 0 * kPointerSize;
-};
-
-
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
@@ -232,14 +196,30 @@ class JavaScriptFrameConstants : public AllStatic {
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
+ // FP-relative.
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kImplicitReceiverOffset = -6 * kPointerSize;
+ static const int kConstructorOffset = -5 * kPointerSize;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+};
+
+
class InternalFrameConstants : public AllStatic {
public:
+ // FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};
@@ -250,6 +230,11 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
+inline void StackHandler::SetFp(Address slot, Address fp) {
+ Memory::Address_at(slot) = fp;
+}
+
+
} } // namespace v8::internal
#endif
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 3e89fb43b..cbd078812 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
// Note on Mips implementation:
//
@@ -138,8 +138,8 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -147,7 +147,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
@@ -170,20 +170,19 @@ void FullCodeGenerator::Generate() {
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- int locals_count = info->scope()->num_stack_slots();
-
- __ Push(ra, fp, cp, a1);
- if (locals_count > 0) {
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- }
- // Adjust fp to point to caller's fp.
- __ Addu(fp, sp, Operand(2 * kPointerSize));
+ info->set_prologue_offset(masm_->pc_offset());
+ __ Prologue(BUILD_FUNCTION_FRAME);
+ info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
- for (int i = 0; i < locals_count; i++) {
- __ push(at);
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
+ if (locals_count > 0) {
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < locals_count; i++) {
+ __ push(at);
+ }
}
}
@@ -294,8 +293,7 @@ void FullCodeGenerator::Generate() {
Label ok;
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -323,9 +321,9 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ li(a2, Operand(profiling_counter_));
- __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
__ Subu(a3, a3, Operand(Smi::FromInt(delta)));
- __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ sw(a3, FieldMemOperand(a2, Cell::kValueOffset));
}
@@ -341,49 +339,37 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
__ li(a2, Operand(profiling_counter_));
__ li(a3, Operand(Smi::FromInt(reset_value)));
- __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ sw(a3, FieldMemOperand(a2, Cell::kValueOffset));
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
// to make sure it is constant. Branch may emit a skip-or-jump sequence
// instead of the normal Branch. It seems that the "skip" part of that
// sequence is about as long as this Branch would be so it is safe to ignore
// that.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Comment cmnt(masm_, "[ Stack check");
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ slt(at, a3, zero_reg);
- __ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9 first, so it is safe to use the delay slot.
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- __ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ sltu(at, sp, t0);
- __ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9 first, so it is safe to use the delay slot.
- StackCheckStub stub;
- __ CallStub(&stub);
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ __ slt(at, a3, zero_reg);
+ __ beq(at, zero_reg, &ok);
+ // Call will emit a li t9 first, so it is safe to use the delay slot.
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ RecordBackEdge(stmt->OsrEntryId());
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -414,7 +400,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ Max(1, distance / kCodeSizeMultiplier));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -425,8 +411,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(a2);
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(v0);
EmitProfilingCounterReset();
@@ -447,9 +433,11 @@ void FullCodeGenerator::EmitReturnSequence() {
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
masm_->mov(sp, fp);
+ int no_frame_start = masm_->pc_offset();
masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
masm_->Addu(sp, sp, Operand(sp_delta));
masm_->Jump(ra);
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
#ifdef DEBUG
@@ -683,17 +671,10 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- if (CpuFeatures::IsSupported(FPU)) {
- ToBooleanStub stub(result_register());
- __ CallStub(&stub);
- __ mov(at, zero_reg);
- } else {
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(result_register());
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- }
+ __ mov(a0, result_register());
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
+ __ mov(at, zero_reg);
Split(ne, v0, Operand(at), if_true, if_false, fall_through);
}
@@ -798,10 +779,10 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// Check that we're not inside a with or catch context.
__ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ LoadRoot(t0, Heap::kWithContextMapRootIndex);
- __ Check(ne, "Declaration in with context.",
+ __ Check(ne, kDeclarationInWithContext,
a1, Operand(t0));
__ LoadRoot(t0, Heap::kCatchContextMapRootIndex);
- __ Check(ne, "Declaration in catch context.",
+ __ Check(ne, kDeclarationInCatchContext,
a1, Operand(t0));
}
}
@@ -929,34 +910,33 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- Handle<JSModule> instance = declaration->module()->interface()->Instance();
- ASSERT(!instance.is_null());
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name(), zone());
- globals_->Add(instance, zone());
- Visit(declaration->module());
- break;
- }
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ li(a1, Operand(instance));
- __ sw(a1, ContextOperand(cp, variable->index()));
- Visit(declaration->module());
- break;
- }
+ // Load instance object.
+ __ LoadContext(a1, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ lw(a1, ContextOperand(a1, variable->interface()->Index()));
+ __ lw(a1, ContextOperand(a1, Context::EXTENSION_INDEX));
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
+ // Assign it.
+ __ sw(a1, ContextOperand(cp, variable->index()));
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ Context::SlotOffset(variable->index()),
+ a1,
+ a3,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
}
@@ -999,6 +979,14 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1049,7 +1037,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1090,9 +1078,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ mov(a0, result_register()); // Result as param to InvokeBuiltin below.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -1171,14 +1158,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+ Handle<Cell> cell = isolate()->factory()->NewCell(
+ Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(a1, cell);
__ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ sw(a2, FieldMemOperand(a1, JSGlobalPropertyCell::kValueOffset));
+ __ sw(a2, FieldMemOperand(a1, Cell::kValueOffset));
__ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ lw(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1251,7 +1237,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Addu(a0, a0, Operand(Smi::FromInt(1)));
__ push(a0);
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ Branch(&loop);
// Remove the pointers stored on the stack.
@@ -1265,6 +1251,67 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+ __ mov(a0, v0);
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(loop_statement.break_label(), eq, a0, Operand(at));
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(loop_statement.break_label(), eq, a0, Operand(at));
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(a0, &convert);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ bind(&convert);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a0, v0);
+ __ bind(&done_convert);
+ __ push(a0);
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
@@ -1278,9 +1325,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
- __ li(a0, Operand(info));
- __ push(a0);
+ FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ __ li(a2, Operand(info));
__ CallStub(&stub);
} else {
__ li(a0, Operand(info));
@@ -1399,9 +1445,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST ||
- local->mode() == CONST_HARMONY ||
- local->mode() == LET) {
+ if (local->mode() == LET ||
+ local->mode() == CONST ||
+ local->mode() == CONST_HARMONY) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
if (local->mode() == CONST) {
@@ -1555,7 +1601,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
@@ -1589,7 +1635,7 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_properties));
@@ -1600,13 +1646,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
__ li(a0, Operand(Smi::FromInt(flags)));
- __ Push(a3, a2, a1, a0);
int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (flags != ObjectLiteral::kFastElements ||
+ if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ Push(a3, a2, a1, a0);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@@ -1639,11 +1685,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
+ if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ mov(a0, result_register());
- __ li(a2, Operand(key->handle()));
+ __ li(a2, Operand(key->value()));
__ lw(a1, MemOperand(sp));
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -1655,8 +1701,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
}
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
// Duplicate receiver on stack.
__ lw(a0, MemOperand(sp));
__ push(a0);
@@ -1670,6 +1714,17 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Drop(3);
}
break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ lw(a0, MemOperand(sp));
+ __ push(a0);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ CallRuntime(Runtime::kSetPrototype, 2);
+ } else {
+ __ Drop(2);
+ }
+ break;
case ObjectLiteral::Property::GETTER:
accessor_table.lookup(key)->second->getter = value;
break;
@@ -1729,25 +1784,36 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_elements));
- __ Push(a3, a2, a1);
if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ DONT_TRACK_ALLOCATION_SITE,
+ length);
__ CallStub(&stub);
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
1, a1, a2);
} else if (expr->depth() > 1) {
+ __ Push(a3, a2, a1);
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ } else if (Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ Push(a3, a2, a1);
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode = has_fast_elements
- ? FastCloneShallowArrayStub::CLONE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+
+ if (has_fast_elements) {
+ mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
__ CallStub(&stub);
}
@@ -1759,13 +1825,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(v0);
+ __ push(v0); // array literal
+ __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
@@ -1773,7 +1837,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (IsFastObjectElementsKind(constant_elements_kind)) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ lw(t2, MemOperand(sp)); // Copy of array literal.
+ __ lw(t2, MemOperand(sp, kPointerSize)); // Copy of array literal.
__ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
__ sw(result_register(), FieldMemOperand(a1, offset));
// Update the write barrier for the array store.
@@ -1781,10 +1845,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
kRAHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
- __ lw(a1, MemOperand(sp)); // Copy of array literal.
- __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
__ li(a3, Operand(Smi::FromInt(i)));
- __ li(t0, Operand(Smi::FromInt(expr->literal_index())));
__ mov(a0, result_register());
StoreArrayLiteralElementStub stub;
__ CallStub(&stub);
@@ -1793,6 +1854,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
if (result_saved) {
+ __ Pop(); // literal index
context()->PlugTOS();
} else {
context()->Plug(v0);
@@ -1914,11 +1976,289 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ switch (expr->yield_kind()) {
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+
+ __ bind(&continuation);
+ __ jmp(&resume);
+
+ __ bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ li(a1, Operand(Smi::FromInt(continuation.pos())));
+ __ sw(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
+ __ sw(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
+ __ mov(a1, cp);
+ __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
+ kRAHasBeenSaved, kDontSaveFPRegs);
+ __ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ Branch(&post_runtime, eq, sp, Operand(a1));
+ __ push(v0); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ __ pop(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::FINAL: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
+ __ sw(a1, FieldMemOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset));
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
+ // Initial send value is undefined.
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&l_next);
+
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+ __ bind(&l_catch);
+ __ mov(a0, v0);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
+ __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ push(a3); // iter
+ __ push(a0); // exception
+ __ jmp(&l_call);
+
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
+ __ bind(&l_try);
+ __ pop(a0); // result
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ push(a0); // result
+ __ jmp(&l_suspend);
+ __ bind(&l_continuation);
+ __ mov(a0, v0);
+ __ jmp(&l_resume);
+ __ bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ lw(a0, MemOperand(sp, generator_object_depth));
+ __ push(a0); // g
+ ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+ __ li(a1, Operand(Smi::FromInt(l_continuation.pos())));
+ __ sw(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset));
+ __ sw(cp, FieldMemOperand(a0, JSGeneratorObject::kContextOffset));
+ __ mov(a1, cp);
+ __ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
+ kRAHasBeenSaved, kDontSaveFPRegs);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ pop(v0); // result
+ EmitReturnSequence();
+ __ mov(a0, v0);
+ __ bind(&l_resume); // received in a0
+ __ PopTryHandler();
+
+ // receiver = iter; f = 'next'; arg = received;
+ __ bind(&l_next);
+ __ LoadRoot(a2, Heap::knext_stringRootIndex); // "next"
+ __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ push(a3); // iter
+ __ push(a0); // received
+
+ // result = receiver[f](arg);
+ __ bind(&l_call);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
+ CallIC(ic);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // if (!result.done) goto l_try;
+ __ bind(&l_loop);
+ __ mov(a0, v0);
+ __ push(a0); // save result
+ __ LoadRoot(a2, Heap::kdone_stringRootIndex); // "done"
+ Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(done_ic); // result.done in v0
+ __ mov(a0, v0);
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ Branch(&l_try, eq, v0, Operand(zero_reg));
+
+ // result.value
+ __ pop(a0); // result
+ __ LoadRoot(a2, Heap::kvalue_stringRootIndex); // "value"
+ Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(value_ic); // result.value in v0
+ context()->DropAndPlug(2, v0); // drop iter and g
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
+ Expression *value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ // The value stays in a0, and is ultimately read by the resumed generator, as
+ // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. a1
+ // will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ __ pop(a1);
+
+ // Check generator state.
+ Label wrong_state, done;
+ __ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
+ __ Branch(&wrong_state, le, a3, Operand(zero_reg));
+
+ // Load suspended function and context.
+ __ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
+ __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+ // Load receiver and store as the first argument.
+ __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+ __ push(a2);
+
+ // Push holes for the rest of the arguments to the generator function.
+ __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3,
+ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
+ Label push_argument_holes, push_frame;
+ __ bind(&push_argument_holes);
+ __ Subu(a3, a3, Operand(Smi::FromInt(1)));
+ __ Branch(&push_frame, lt, a3, Operand(zero_reg));
+ __ push(a2);
+ __ jmp(&push_argument_holes);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame;
+ __ bind(&push_frame);
+ __ Call(&resume_frame);
+ __ jmp(&done);
+ __ bind(&resume_frame);
+ __ push(ra); // Return address.
+ __ push(fp); // Caller's frame pointer.
+ __ mov(fp, sp);
+ __ push(cp); // Callee's context.
+ __ push(t0); // Callee's JS Function.
+
+ // Load the operand stack size.
+ __ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+ __ lw(a3, FieldMemOperand(a3, FixedArray::kLengthOffset));
+ __ SmiUntag(a3);
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ Branch(&slow_resume, ne, a3, Operand(zero_reg));
+ __ lw(a3, FieldMemOperand(t0, JSFunction::kCodeEntryOffset));
+ __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(a2);
+ __ Addu(a3, a3, Operand(a2));
+ __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+ __ Jump(a3);
+ __ bind(&slow_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ Label push_operand_holes, call_resume;
+ __ bind(&push_operand_holes);
+ __ Subu(a3, a3, Operand(1));
+ __ Branch(&call_resume, lt, a3, Operand(zero_reg));
+ __ push(a2);
+ __ Branch(&push_operand_holes);
+ __ bind(&call_resume);
+ __ push(a1);
+ __ push(result_register());
+ __ Push(Smi::FromInt(resume_mode));
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ // Not reached: the runtime call returns elsewhere.
+ __ stop("not-reached");
+
+ // Throw error if we attempt to operate on a running generator.
+ __ bind(&wrong_state);
+ __ push(a1);
+ __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+
+ __ bind(&done);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ __ Allocate(map->instance_size(), v0, a2, a3, &gc_required, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ lw(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&allocated);
+ __ li(a1, Operand(map));
+ __ pop(a2);
+ __ li(a3, Operand(isolate()->factory()->ToBoolean(done)));
+ __ li(t0, Operand(isolate()->factory()->empty_fixed_array()));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ sw(a2,
+ FieldMemOperand(v0, JSGeneratorObject::kResultValuePropertyOffset));
+ __ sw(a3,
+ FieldMemOperand(v0, JSGeneratorObject::kResultDonePropertyOffset));
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(v0, JSGeneratorObject::kResultValuePropertyOffset,
+ a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ mov(a0, result_register());
- __ li(a2, Operand(key->handle()));
+ __ li(a2, Operand(key->value()));
// Call load IC. It has arguments receiver and property name a0 and a2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
@@ -1958,7 +2298,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2042,7 +2382,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(a1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(v0);
@@ -2050,7 +2390,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten to have a 'throw
+ // Invalid left-hand sides are rewritten by the parser to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
VisitForEffect(expr);
@@ -2080,7 +2420,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->obj());
__ mov(a1, result_register());
__ pop(a0); // Restore value.
- __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+ __ li(a2, Operand(prop->key()->AsLiteral()->value()));
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2179,7 +2519,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Check for an uninitialized let binding.
__ lw(a2, location);
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Check(eq, "Let binding re-initialization.", a2, Operand(t0));
+ __ Check(eq, kLetBindingReInitialization, a2, Operand(t0));
}
// Perform the assignment.
__ sw(v0, location);
@@ -2211,7 +2551,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ mov(a0, result_register()); // Load the value.
- __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+ __ li(a2, Operand(prop->key()->AsLiteral()->value()));
__ pop(a1);
Handle<Code> ic = is_classic_mode()
@@ -2349,14 +2689,13 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
__ li(a2, Operand(cell));
CallFunctionStub stub(arg_count, flags);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2402,7 +2741,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
@@ -2490,7 +2829,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
if (property->key()->IsPropertyName()) {
EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
+ property->key()->AsLiteral()->value(),
RelocInfo::CODE_TARGET);
} else {
EmitKeyedCallWithIC(expr, property->key());
@@ -2544,13 +2883,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
__ li(a2, Operand(cell));
CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(v0);
}
@@ -2683,7 +3021,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@@ -2695,7 +3033,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset));
__ And(t0, t0, 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ Branch(if_true, ne, t0, Operand(zero_reg));
+ __ Branch(&skip_lookup, ne, t0, Operand(zero_reg));
// Check for fast case object. Generate false result for slow case object.
__ lw(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset));
@@ -2703,7 +3041,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ LoadRoot(t0, Heap::kHashTableMapRootIndex);
__ Branch(if_false, eq, a2, Operand(t0));
- // Look for valueOf symbol in the descriptor array, and indicate false if
+ // Look for valueOf name in the descriptor array, and indicate false if
// found. Since we omit an enumeration index check, if it is added via a
// transition that shares its descriptor array, this is a false positive.
Label entry, loop, done;
@@ -2728,10 +3066,10 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ Addu(a2, a2, t1);
// Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- // The use of t2 to store the valueOf symbol asumes that it is not otherwise
+ // string "valueOf" the result is false.
+ // The use of t2 to store the valueOf string assumes that it is not otherwise
// used in the loop below.
- __ LoadRoot(t2, Heap::kvalue_of_symbolRootIndex);
+ __ li(t2, Operand(isolate()->factory()->value_of_string()));
__ jmp(&entry);
__ bind(&loop);
__ lw(a3, MemOperand(t0, 0));
@@ -2741,6 +3079,14 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ Branch(&loop, ne, t0, Operand(a2));
__ bind(&done);
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+ __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+
+ __ bind(&skip_lookup);
+
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
@@ -2749,16 +3095,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
__ lw(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ Branch(if_false, ne, a2, Operand(a3));
-
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
- __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
- __ jmp(if_true);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a2, Operand(a3), if_true, if_false, fall_through);
+
context()->Plug(if_true, if_false);
}
@@ -2962,12 +3301,12 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ LoadRoot(v0, Heap::kfunction_class_symbolRootIndex);
+ __ LoadRoot(v0, Heap::kfunction_class_stringRootIndex);
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ LoadRoot(v0, Heap::kObject_symbolRootIndex);
+ __ LoadRoot(v0, Heap::kObject_stringRootIndex);
__ jmp(&done);
// Non-JS objects have class null.
@@ -2991,7 +3330,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
@@ -3025,31 +3364,21 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Convert 32 random bits in v0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(FPU)) {
- __ PrepareCallCFunction(1, a0);
- __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- CpuFeatures::Scope scope(FPU);
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- __ li(a1, Operand(0x41300000));
- // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
- __ Move(f12, v0, a1);
- // Move 0x4130000000000000 to FPU.
- __ Move(f14, zero_reg, a1);
- // Subtract and store the result in the heap number.
- __ sub_d(f0, f12, f14);
- __ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
- __ mov(v0, s0);
- } else {
- __ PrepareCallCFunction(2, a0);
- __ mov(a0, s0);
- __ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
- __ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
- }
+ __ PrepareCallCFunction(1, a0);
+ __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+
+ // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+ __ li(a1, Operand(0x41300000));
+ // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
+ __ Move(f12, v0, a1);
+ // Move 0x4130000000000000 to FPU.
+ __ Move(f14, zero_reg, a1);
+ // Subtract and store the result in the heap number.
+ __ sub_d(f0, f12, f14);
+ __ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
+ __ mov(v0, s0);
context()->Plug(v0);
}
@@ -3106,7 +3435,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -3149,18 +3478,97 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ __ And(at, index, Operand(kSmiTagMask));
+ __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
+ __ And(at, value, Operand(kSmiTagMask));
+ __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
+
+ __ lw(at, FieldMemOperand(string, String::kLengthOffset));
+ __ Check(lt, kIndexIsTooLarge, index, Operand(at));
+
+ __ Check(ge, kIndexIsNegative, index, Operand(zero_reg));
+
+ __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+
+ __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
+ __ Subu(at, at, Operand(encoding_mask));
+ __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = v0;
+ Register index = a1;
+ Register value = a2;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(value);
+ __ pop(index);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
+ __ SmiUntag(value, value);
+ __ Addu(at,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ SmiUntag(index);
+ __ Addu(at, at, index);
+ __ sb(value, MemOperand(at));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = v0;
+ Register index = a1;
+ Register value = a2;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(value);
+ __ pop(index);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ if (FLAG_debug_code) {
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ }
+
+ __ SmiUntag(value, value);
+ __ Addu(at,
+ string,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ Addu(at, at, index);
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ sh(value, MemOperand(at));
+ context()->Plug(string);
+}
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- if (CpuFeatures::IsSupported(FPU)) {
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kMath_pow, 2);
- }
+ MathPowStub stub(MathPowStub::ON_STACK);
+ __ CallStub(&stub);
context()->Plug(v0);
}
@@ -3198,8 +3606,9 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into a0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
+ __ mov(a0, result_register());
NumberToStringStub stub;
__ CallStub(&stub);
@@ -3305,7 +3714,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
__ jmp(&done);
__ bind(&need_conversion);
@@ -3328,7 +3737,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringAddStub stub(NO_STRING_ADD_FLAGS);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH);
__ CallStub(&stub);
context()->Plug(v0);
}
@@ -3459,12 +3868,12 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
+ __ Abort(kAttemptToUseUndefinedCache);
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
context()->Plug(v0);
return;
@@ -3620,7 +4029,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ lw(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
__ SmiUntag(array_length);
__ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
- __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(v0, Heap::kempty_stringRootIndex);
__ Branch(&done);
__ bind(&non_trivial_array);
@@ -3646,7 +4055,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// element: Current array element.
// elements_end: Array end.
if (generate_debug_code_) {
- __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin",
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin,
array_length, Operand(zero_reg));
}
__ bind(&loop);
@@ -3656,7 +4065,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ lw(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
__ BranchOnOverflow(&bailout, scratch3);
__ Branch(&loop, lt, element, Operand(elements_end));
@@ -3683,7 +4092,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi.
- __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ Subu(string_length, string_length, Operand(scratch1));
__ Mult(array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
@@ -3723,10 +4132,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
array_length = no_reg;
__ Addu(result_pos,
result,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ li(at, Operand(Smi::FromInt(1)));
__ Branch(&one_char_separator, eq, scratch1, Operand(at));
__ Branch(&long_separator, gt, scratch1, Operand(at));
@@ -3743,7 +4152,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
@@ -3753,7 +4162,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case.
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
- __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+ __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator.
__ jmp(&one_char_separator_loop_entry);
@@ -3775,7 +4184,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
@@ -3796,7 +4205,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiUntag(string_length);
__ Addu(string,
separator,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ bind(&long_separator);
@@ -3804,7 +4213,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&long_separator_loop, lt, element, Operand(elements_end));
@@ -3965,51 +4374,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- __ JumpIfSmi(result_register(), &no_conversion);
- __ mov(a0, result_register());
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
default:
UNREACHABLE();
}
}
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- // TODO(svenpanne): Allowing format strings in Comment would be nice here...
- Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
- // GenericUnaryOpStub expects the argument to be in a0.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- __ mov(a0, result_register());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
@@ -4068,7 +4438,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call ToNumber only if operand is not a smi.
Label no_conversion;
- __ JumpIfSmi(v0, &no_conversion);
+ if (ShouldInlineSmiCase(expr->op())) {
+ __ JumpIfSmi(v0, &no_conversion);
+ }
__ mov(a0, v0);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
@@ -4100,9 +4472,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
JumpPatchSite patch_site(masm_);
int count_value = expr->op() == Token::INC ? 1 : -1;
- __ li(a1, Operand(Smi::FromInt(count_value)));
-
if (ShouldInlineSmiCase(expr->op())) {
+ __ li(a1, Operand(Smi::FromInt(count_value)));
__ AdduAndCheckForOverflow(v0, a0, a1, t0);
__ BranchOnOverflow(&stub_call, t0); // Do stub on overflow.
@@ -4111,12 +4482,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
patch_site.EmitJumpIfSmi(v0, &done);
__ bind(&stub_call);
}
+ __ mov(a1, a0);
+ __ li(a0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()),
+ RelocInfo::CODE_TARGET,
+ expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4144,7 +4519,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case NAMED_PROPERTY: {
__ mov(a0, result_register()); // Value.
- __ li(a2, Operand(prop->key()->AsLiteral()->handle())); // Name.
+ __ li(a2, Operand(prop->key()->AsLiteral()->value())); // Name.
__ pop(a1); // Receiver.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -4232,12 +4607,12 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_symbol())) {
+ if (check->Equals(isolate()->heap()->number_string())) {
__ JumpIfSmi(v0, if_true);
__ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ } else if (check->Equals(isolate()->heap()->string_string())) {
__ JumpIfSmi(v0, if_false);
// Check for undetectable objects => false.
__ GetObjectType(v0, v0, a1);
@@ -4246,16 +4621,20 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(eq, a1, Operand(zero_reg),
if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, v0, a1);
+ Split(eq, a1, Operand(SYMBOL_TYPE), if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->boolean_string())) {
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_symbol())) {
+ check->Equals(isolate()->heap()->null_string())) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ } else if (check->Equals(isolate()->heap()->undefined_string())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
__ JumpIfSmi(v0, if_false);
@@ -4264,14 +4643,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ } else if (check->Equals(isolate()->heap()->function_string())) {
__ JumpIfSmi(v0, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ GetObjectType(v0, v0, a1);
__ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ } else if (check->Equals(isolate()->heap()->object_string())) {
__ JumpIfSmi(v0, if_false);
if (!FLAG_harmony_typeof) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
@@ -4333,29 +4712,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = eq;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cc = eq;
- break;
- case Token::LT:
- cc = lt;
- break;
- case Token::GT:
- cc = gt;
- break;
- case Token::LTE:
- cc = le;
- break;
- case Token::GTE:
- cc = ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cc = CompareIC::ComputeCondition(op);
__ mov(a0, result_register());
__ pop(a1);
@@ -4370,7 +4727,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4396,26 +4753,17 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
__ mov(a0, result_register());
- __ LoadRoot(a1, nil_value);
if (expr->op() == Token::EQ_STRICT) {
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ LoadRoot(a1, nil_value);
Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
} else {
- Heap::RootListIndex other_nil_value = nil == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- __ Branch(if_true, eq, a0, Operand(a1));
- __ LoadRoot(a1, other_nil_value);
- __ Branch(if_true, eq, a0, Operand(a1));
- __ JumpIfSmi(a0, if_false);
- // It can be an undetectable object.
- __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
- __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
- Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -4572,6 +4920,83 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ Address branch_address = pc - 6 * kInstrSize;
+ CodePatcher patcher(branch_address, 1);
+
+ switch (target_state) {
+ case INTERRUPT:
+ // slt at, a3, zero_reg (in case of count based interrupts)
+ // beq at, zero_reg, ok
+ // lui t9, <interrupt stub address> upper
+ // ori t9, <interrupt stub address> lower
+ // jalr t9
+ // nop
+ // ok-label ----- pc_after points here
+ patcher.masm()->slt(at, a3, zero_reg);
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // addiu at, zero_reg, 1
+ // beq at, zero_reg, ok ;; Not changed
+ // lui t9, <on-stack replacement address> upper
+ // ori t9, <on-stack replacement address> lower
+ // jalr t9 ;; Not changed
+ // nop ;; Not changed
+ // ok-label ----- pc_after points here
+ patcher.masm()->addiu(at, zero_reg, 1);
+ break;
+ }
+ Address pc_immediate_load_address = pc - 4 * kInstrSize;
+ // Replace the stack check address in the load-immediate (lui/ori pair)
+ // with the entry address of the replacement code.
+ Assembler::set_target_address_at(pc_immediate_load_address,
+ replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_immediate_load_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ Address branch_address = pc - 6 * kInstrSize;
+ Address pc_immediate_load_address = pc - 4 * kInstrSize;
+
+ ASSERT(Assembler::IsBeq(Assembler::instr_at(pc - 5 * kInstrSize)));
+ if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) {
+ ASSERT(reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_immediate_load_address)) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->InterruptCheck()->entry()));
+ return INTERRUPT;
+ }
+
+ ASSERT(Assembler::IsAddImmediate(Assembler::instr_at(branch_address)));
+
+ if (reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_immediate_load_address)) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->OnStackReplacement()->entry())) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT(reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_immediate_load_address)) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->OsrAfterStackCheck()->entry()));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index cf706815e..aa2773462 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -29,7 +29,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "codegen.h"
#include "code-stubs.h"
@@ -61,12 +61,12 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register elements,
- Register scratch0,
- Register scratch1,
- Label* miss) {
+static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register scratch0,
+ Register scratch1,
+ Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// elements: holds the property dictionary on fall through.
@@ -129,19 +129,19 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
// If probing finds an entry check that the value is a normal
// property.
__ bind(&done); // scratch2 == elements + 4 * index.
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
__ And(at,
@@ -182,19 +182,19 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
// If probing finds an entry in the dictionary check that the value
// is a normal property that is not read only.
__ bind(&done); // scratch2 == elements + 4 * index.
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask =
(PropertyDetails::TypeField::kMask |
@@ -215,53 +215,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
}
-void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadArrayLength(masm, a0, a3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- lr : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadStringLength(masm, a0, a1, a3, &miss,
- support_wrappers);
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- lr : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, a0, a1, a3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
@@ -352,30 +305,36 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
}
-// Checks whether a key is an array index string or a symbol string.
-// Falls through if a key is a symbol.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_symbol) {
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_unique) {
// The key is not a smi.
- // Is it a string?
+ Label unique;
+ // Is it a name?
__ GetObjectType(key, map, hash);
- __ Branch(not_symbol, ge, hash, Operand(FIRST_NONSTRING_TYPE));
+ __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
// Is the string an array index, with cached numeric value?
- __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
- __ And(at, hash, Operand(String::kContainsCachedArrayIndexMask));
+ __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
__ Branch(index_string, eq, at, Operand(zero_reg));
- // Is the string a symbol?
+ // Is the string internalized? We know it's a string, so a single
+ // bit test is enough.
// map: key map
__ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
- __ And(at, hash, Operand(kIsSymbolMask));
- __ Branch(not_symbol, eq, at, Operand(zero_reg));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ And(at, hash, Operand(kIsNotInternalizedMask));
+ __ Branch(not_unique, ne, at, Operand(zero_reg));
+
+ __ bind(&unique);
}
@@ -400,7 +359,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
extra_state,
Code::NORMAL,
argc);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
// If the stub cache probing failed, the receiver might be a value.
@@ -436,7 +395,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
__ bind(&miss);
@@ -473,7 +432,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack into a1.
__ lw(a1, MemOperand(sp, argc * kPointerSize));
- GenerateStringDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
// a0: elements
// Search the dictionary - put result in register a1.
@@ -576,11 +535,11 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ lw(a1, MemOperand(sp, argc * kPointerSize));
Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
+ Label check_number_dictionary, check_name, lookup_monomorphic_cache;
+ Label index_smi, index_name;
// Check that the key is a smi.
- __ JumpIfNotSmi(a2, &check_string);
+ __ JumpIfNotSmi(a2, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@@ -627,10 +586,10 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ mov(a1, v0);
__ jmp(&do_call);
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, a2, a0, a3, &index_string, &slow_call);
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, a2, a0, a3, &index_name, &slow_call);
- // The key is known to be a symbol.
+ // The key is known to be a unique name.
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
@@ -657,14 +616,14 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&slow_call);
// This branch is taken if:
// - the receiver requires boxing or access check,
- // - the key is neither smi nor symbol,
+ // - the key is neither smi nor a unique name,
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub,
// that will get fetched next time.
__ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3);
GenerateMiss(masm, argc);
- __ bind(&index_string);
+ __ bind(&index_name);
__ IndexFromHash(a3, a2);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
@@ -677,10 +636,10 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -- ra : return address
// -----------------------------------
- // Check if the name is a string.
+ // Check if the name is really a name.
Label miss;
__ JumpIfSmi(a2, &miss);
- __ IsObjectJSStringType(a2, a0, &miss);
+ __ IsObjectNameType(a2, a0, &miss);
CallICBase::GenerateNormal(masm, argc);
__ bind(&miss);
@@ -688,20 +647,18 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
}
-// Defined in ic.cc.
-Object* LoadIC_Miss(Arguments args);
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
// -- a0 : receiver
- // -- sp[0] : receiver
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
+ Code::NORMAL, Code::LOAD_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a0, a2, a3, t0, t1, t2);
// Cache miss: Jump to runtime.
@@ -714,11 +671,10 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- a2 : name
// -- lr : return address
// -- a0 : receiver
- // -- sp[0] : receiver
// -----------------------------------
Label miss;
- GenerateStringDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
// a1: elements
GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
@@ -735,7 +691,6 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// -- a2 : name
// -- ra : return address
// -- a0 : receiver
- // -- sp[0] : receiver
// -----------------------------------
Isolate* isolate = masm->isolate();
@@ -750,6 +705,20 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -----------------------------------
+
+ __ mov(a3, a0);
+ __ Push(a3, a2);
+
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register object,
Register key,
@@ -858,7 +827,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -893,7 +862,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -923,10 +892,7 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
}
-Object* KeyedLoadIC_Miss(Arguments args);
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
@@ -939,7 +905,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
__ Push(a1, a0);
// Perform tail call to the entry.
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
: ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
@@ -966,7 +932,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
+ Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
Register key = a0;
@@ -975,7 +941,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
// Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_string);
+ __ JumpIfNotSmi(key, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@@ -1014,8 +980,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
a3);
GenerateRuntimeGetProperty(masm);
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, key, a2, a3, &index_string, &slow);
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(
masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
@@ -1029,16 +995,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ Branch(&probe_dictionary, eq, t0, Operand(at));
// Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
+ // based on 32 bits of the map pointer and the name hash.
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
__ sra(a3, a2, KeyedLookupCache::kMapHashShift);
- __ lw(t0, FieldMemOperand(a0, String::kHashFieldOffset));
- __ sra(at, t0, String::kHashShift);
+ __ lw(t0, FieldMemOperand(a0, Name::kHashFieldOffset));
+ __ sra(at, t0, Name::kHashShift);
__ xor_(a3, a3, at);
int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
__ And(a3, a3, Operand(mask));
- // Load the key (consisting of map and symbol) from the cache and
+ // Load the key (consisting of map and unique name) from the cache and
// check for match.
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
@@ -1131,7 +1097,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
a3);
__ Ret();
- __ bind(&index_string);
+ __ bind(&index_name);
__ IndexFromHash(a3, key);
// Now jump to the place where smi keys are handled.
__ Branch(&index_smi);
@@ -1166,7 +1132,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -1268,12 +1234,10 @@ static void KeyedStoreGenerateGenericHelper(
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
- receiver,
elements, // Overwritten.
a3, // Scratch regs...
t0,
t1,
- t2,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
@@ -1296,7 +1260,9 @@ static void KeyedStoreGenerateGenericHelper(
t0,
slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@@ -1308,7 +1274,9 @@ static void KeyedStoreGenerateGenericHelper(
t0,
slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -1322,7 +1290,8 @@ static void KeyedStoreGenerateGenericHelper(
t0,
slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@@ -1453,11 +1422,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -1468,7 +1437,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// Push receiver, key and value for runtime call.
__ Push(a2, a1, a0);
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
masm->isolate())
: ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@@ -1476,65 +1445,43 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
}
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
+ // -- a2 : key
+ // -- a1 : receiver
// -- ra : return address
// -----------------------------------
// Push receiver, key and value for runtime call.
- // We can't use MultiPush as the order of the registers is important.
- __ Push(a2, a1, a0);
+ __ Push(a1, a2, a0);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
-
+ ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
// -- a2 : receiver
- // -- a3 : target map
// -- ra : return address
// -----------------------------------
- // Must return the modified receiver in v0.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a2);
- __ bind(&fail);
- }
-
- __ push(a2);
- __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
+ // Push receiver, key and value for runtime call.
+ // We can't use MultiPush as the order of the registers is important.
+ __ Push(a2, a1, a0);
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
- MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- a2 : receiver
- // -- a3 : target map
- // -- ra : return address
- // -----------------------------------
- // Must return the modified receiver in v0.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a2);
- __ bind(&fail);
- }
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ push(a2);
- __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -1548,9 +1495,10 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, strict_mode,
+ Code::NORMAL, Code::STORE_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
// Cache miss: Jump to runtime.
@@ -1574,62 +1522,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = a1;
- Register value = a0;
- Register scratch = a3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ GetObjectType(receiver, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ GetObjectType(scratch, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&miss, eq, scratch, Operand(at));
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength),
- masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- GenerateMiss(masm);
-}
-
-
void StoreIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : value
@@ -1639,7 +1531,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
// -----------------------------------
Label miss;
- GenerateStringDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
Counters* counters = masm->isolate()->counters();
@@ -1652,8 +1544,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -1695,36 +1587,16 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
- State state = TargetState(previous_state, false, x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address andi_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
+ // If the instruction following the call is not a andi at, rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(andi_instruction_address);
+ return Assembler::IsAndImmediate(instr) &&
+ Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
}
@@ -1736,7 +1608,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address);
if (!(Assembler::IsAndImmediate(instr) &&
- Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) {
+ Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
return;
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 21fd2ce48..4484dc634 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -31,12 +31,13 @@
#include "mips/lithium-gap-resolver-mips.h"
#include "code-stubs.h"
#include "stub-cache.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
-class SafepointGenerator : public CallWrapper {
+class SafepointGenerator V8_FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -44,11 +45,11 @@ class SafepointGenerator : public CallWrapper {
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
+ virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const { }
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
- virtual void AfterCall() const {
+ virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -62,12 +63,9 @@ class SafepointGenerator : public CallWrapper {
#define __ masm()->
bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
+ LPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
- CpuFeatures::Scope scope(FPU);
-
- CodeStub::GenerateFPStubs();
// Open a frame scope to indicate that there is a frame on the stack. The
// NONE indicates that the scope shouldn't actually generate code to set up
@@ -77,6 +75,7 @@ bool LCodeGen::GenerateCode() {
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
+ GenerateDeoptJumpTable() &&
GenerateSafepointTable();
}
@@ -85,86 +84,96 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (FLAG_weak_embedded_maps_in_optimized_code) {
+ RegisterDependentCodeForEmbeddedMaps(code);
+ }
PopulateDeoptimizationData(code);
+ info()->CommitDependencies(code);
}
-void LChunkBuilder::Abort(const char* reason) {
+void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop_at");
+ }
#endif
- // a1: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ Branch(&ok, eq, t1, Operand(zero_reg));
-
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ sw(a2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
+ // a1: Callee's JS function.
+ // cp: Callee's context.
+ // fp: Caller's frame pointer.
+ // lr: Caller's pc.
+
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ Branch(&ok, eq, t1, Operand(zero_reg));
+
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ sw(a2, MemOperand(sp, receiver_offset));
+ __ bind(&ok);
+ }
}
- __ Push(ra, fp, cp, a1);
- __ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ frame_is_built_ = true;
+ info_->AddNoFrameRange(0, masm_->pc_offset());
+ }
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
- __ li(a0, Operand(slots));
- __ li(a2, Operand(kSlotsZapValue));
+ __ Subu(sp, sp, Operand(slots * kPointerSize));
+ __ push(a0);
+ __ push(a1);
+ __ Addu(a0, sp, Operand(slots * kPointerSize));
+ __ li(a1, Operand(kSlotsZapValue));
Label loop;
__ bind(&loop);
- __ push(a2);
- __ Subu(a0, a0, 1);
- __ Branch(&loop, ne, a0, Operand(zero_reg));
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ sw(a1, MemOperand(a0, 2 * kPointerSize));
+ __ Branch(&loop, ne, a0, Operand(sp));
+ __ pop(a1);
+ __ pop(a0);
} else {
__ Subu(sp, sp, Operand(slots * kPointerSize));
}
}
+ if (info()->saves_caller_doubles()) {
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
+
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in a1.
@@ -193,39 +202,34 @@ bool LCodeGen::GeneratePrologue() {
__ sw(a0, target);
// Update the write barrier. This clobbers a3 and a0.
__ RecordWriteContextSlot(
- cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
+ cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
}
}
Comment(";;; End allocate local context");
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so cp still holds the
+ // incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
}
- EnsureSpaceForLazyDeopt();
return !is_aborted();
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
- }
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
- if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- instr->CompileToNative(this);
- }
- }
- return !is_aborted();
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ Subu(sp, sp, Operand(slots * kPointerSize));
}
@@ -234,11 +238,36 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- Comment(";;; Deferred code @%d: %s.",
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
+
+ Comment(";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
code->instruction_index(),
+ code->instr()->hydrogen_value()->id(),
code->instr()->Mnemonic());
+ __ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+ Comment(";;; Deferred code");
+ }
code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Destroy frame");
+ ASSERT(frame_is_built_);
+ __ pop(at);
+ __ MultiPop(cp.bit() | fp.bit() | ra.bit());
+ frame_is_built_ = false;
+ }
__ jmp(code->exit());
}
}
@@ -250,10 +279,49 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateDeoptJumpTable() {
- // TODO(plind): not clear that this will have advantage for MIPS.
- // Skipping it for now. Raised issue #100 for this.
- Abort("Unimplemented: GenerateDeoptJumpTable");
- return false;
+ if (deopt_jump_table_.length() > 0) {
+ Comment(";;; -------------------- Jump table --------------------");
+ }
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Label table_start;
+ __ bind(&table_start);
+ Label needs_frame;
+ for (int i = 0; i < deopt_jump_table_.length(); i++) {
+ __ bind(&deopt_jump_table_[i].label);
+ Address entry = deopt_jump_table_[i].address;
+ Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
+ if (deopt_jump_table_[i].needs_frame) {
+ if (needs_frame.is_bound()) {
+ __ Branch(&needs_frame);
+ } else {
+ __ bind(&needs_frame);
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Call(t9);
+ }
+ } else {
+ __ Call(t9);
+ }
+ }
+ __ RecordComment("]");
+
+ // The deoptimization jump table is the last part of the instruction
+ // sequence. Mark the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
}
@@ -286,20 +354,19 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
+ Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
__ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
+ } else if (r.IsSmi()) {
+ ASSERT(constant->HasSmiValue());
+ __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
} else if (r.IsDouble()) {
- Abort("EmitLoadRegister: Unsupported double immediate.");
+ Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
} else {
- ASSERT(r.IsTagged());
- if (literal->IsSmi()) {
- __ li(scratch, Operand(literal));
- } else {
- __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
- }
+ ASSERT(r.IsSmiOrTagged());
+ __ LoadObject(scratch, literal);
}
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
@@ -325,7 +392,7 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
+ Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -334,9 +401,9 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
__ cvt_d_w(dbl_scratch, flt_scratch);
return dbl_scratch;
} else if (r.IsDouble()) {
- Abort("unsupported double immediate");
+ Abort(kUnsupportedDoubleImmediate);
} else if (r.IsTagged()) {
- Abort("unsupported tagged immediate");
+ Abort(kUnsupportedTaggedImmediate);
}
} else if (op->IsStackSlot() || op->IsArgument()) {
MemOperand mem_op = ToMemOperand(op);
@@ -350,21 +417,39 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return constant->handle();
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
}
bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsInteger32();
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
}
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(constant->HasInteger32Value());
- return constant->Integer32Value();
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ ASSERT(r.IsSmiOrTagged());
+ return reinterpret_cast<int32_t>(Smi::FromInt(value));
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
}
@@ -380,18 +465,21 @@ Operand LCodeGen::ToOperand(LOperand* op) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
+ if (r.IsSmi()) {
+ ASSERT(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
ASSERT(constant->HasInteger32Value());
return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
- Abort("ToOperand Unsupported double immediate.");
+ Abort(kToOperandUnsupportedDoubleImmediate);
}
ASSERT(r.IsTagged());
- return Operand(constant->handle());
+ return Operand(constant->handle(isolate()));
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
- Abort("ToOperand IsDoubleRegister unimplemented");
+ Abort(kToOperandIsDoubleRegisterUnimplemented);
return Operand(0);
}
// Stack slots not implemented, use ToMemOperand instead.
@@ -404,56 +492,29 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return MemOperand(fp, -(index + 3) * kPointerSize);
- } else {
- // Incoming parameter. Skip the return address.
- return MemOperand(fp, -(index - 1) * kPointerSize);
- }
+ return MemOperand(fp, StackSlotOffset(op->index()));
}
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
ASSERT(op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, context,
- // and the first word of the double in the fixed part of the frame.
- return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
- } else {
- // Incoming parameter. Skip the return address and the first word of
- // the double.
- return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
- }
+ return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count) {
+ Translation* translation) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
+ int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *arguments_index = -environment->parameter_count();
- *arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- arguments_index,
- arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ WriteTranslation(environment->outer(), translation);
+ bool has_closure_id = !info()->closure().is_null() &&
+ !info()->closure().is_identical_to(environment->closure());
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
@@ -474,71 +535,66 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
ASSERT(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
}
- // Inlined frames which push their arguments cause the index to be
- // bumped and a new stack area to be used for materialization.
- if (environment->entry() != NULL &&
- environment->entry()->arguments_pushed()) {
- *arguments_index = *arguments_index < 0
- ? GetStackSlotCount()
- : *arguments_index + *arguments_count;
- *arguments_count = environment->entry()->arguments_count() + 1;
- }
-
+ int object_index = 0;
+ int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- *arguments_index,
- *arguments_count);
- }
- }
-
- AddToTranslation(translation,
+ AddToTranslation(environment,
+ translation,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
+ &object_index,
+ &dematerialized_index);
}
}
-void LCodeGen::AddToTranslation(Translation* translation,
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
} else if (is_uint32) {
@@ -566,7 +622,7 @@ void LCodeGen::AddToTranslation(Translation* translation,
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -585,9 +641,8 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
__ Call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
}
@@ -595,20 +650,36 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr) {
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- ASSERT(pointers != NULL);
- RecordPosition(pointers->position());
- __ CallRuntime(function, num_arguments);
+ __ CallRuntime(function, num_arguments, save_doubles);
+
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Move(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ lw(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ LoadObject(cp, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr) {
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
@@ -633,8 +704,6 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
int frame_count = 0;
int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
@@ -642,7 +711,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
+ WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
@@ -653,39 +722,98 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
-void LCodeGen::DeoptimizeIf(Condition cc,
+void LCodeGen::DeoptimizeIf(Condition condition,
LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type,
Register src1,
const Operand& src2) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
- Abort("bailout was not prepared");
+ Abort(kBailoutWasNotPrepared);
return;
}
ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
-
if (FLAG_deopt_every_n_times == 1 &&
- info_->shared_info()->opt_count() == id) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ !info()->IsStub() &&
+ info()->opt_count() == id) {
+ ASSERT(frame_is_built_);
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
return;
}
- if (FLAG_trap_on_deopt) {
+ if (info()->ShouldTrapOnDeopt()) {
Label skip;
- if (cc != al) {
- __ Branch(&skip, NegateCondition(cc), src1, src2);
+ if (condition != al) {
+ __ Branch(&skip, NegateCondition(condition), src1, src2);
}
__ stop("trap_on_deopt");
__ bind(&skip);
}
- // TODO(plind): The Arm port is a little different here, due to their
- // DeOpt jump table, which is not used for Mips yet.
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+ ASSERT(info()->IsStub() || frame_is_built_);
+ if (condition == al && frame_is_built_) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
+ } else {
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (deopt_jump_table_.is_empty() ||
+ (deopt_jump_table_.last().address != entry) ||
+ (deopt_jump_table_.last().bailout_type != bailout_type) ||
+ (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
+ Deoptimizer::JumpTableEntry table_entry(entry,
+ bailout_type,
+ !frame_is_built_);
+ deopt_jump_table_.Add(table_entry, zone());
+ }
+ __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
+ }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition,
+ LEnvironment* environment,
+ Register src1,
+ const Operand& src2) {
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ DeoptimizeIf(condition, environment, bailout_type, src1, src2);
+}
+
+
+void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
+ ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
+ maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
+ }
+ }
+ }
+#ifdef VERIFY_HEAP
+ // This disables verification of weak embedded objects after full GC.
+ // AddDependentCode can cause a GC, which would observe the state where
+ // this code is not yet in the depended code lists of the embedded maps.
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
+#endif
+ for (int i = 0; i < maps.length(); i++) {
+ maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
+ }
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
@@ -695,16 +823,19 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<DeoptimizationInputData> data =
factory()->NewDeoptimizationInputData(length, TENURED);
- Handle<ByteArray> translations = translations_.CreateByteArray();
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
+ { AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
}
- data->SetLiteralArray(*literals);
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
@@ -778,10 +909,6 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
- if (kind & Safepoint::kWithRegisters) {
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp, zone());
- }
}
@@ -792,7 +919,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -814,18 +941,26 @@ void LCodeGen::RecordSafepointWithRegistersAndDoubles(
}
-void LCodeGen::RecordPosition(int position) {
+void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
}
void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_,
+ label->hydrogen_value()->id(),
+ label->block_id(),
+ LabelType(label));
__ bind(label->label());
current_block_ = label->block_id();
DoGap(label);
@@ -859,43 +994,34 @@ void LCodeGen::DoParameter(LParameter* instr) {
void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::RegExpExec: {
RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
__ lw(a0, MemOperand(sp, 0));
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -905,56 +1031,203 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
+ GenerateOsrPrologue();
}
void LCodeGen::DoModI(LModI* instr) {
- Register scratch = scratch0();
- const Register left = ToRegister(instr->left());
- const Register result = ToRegister(instr->result());
+ HMod* hmod = instr->hydrogen();
+ HValue* left = hmod->left();
+ HValue* right = hmod->right();
+ if (hmod->HasPowerOf2Divisor()) {
+ const Register left_reg = ToRegister(instr->left());
+ const Register result_reg = ToRegister(instr->result());
+
+ // Note: The code below even works when right contains kMinInt.
+ int32_t divisor = Abs(right->GetInteger32Constant());
+
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
+ &left_is_not_negative, ge, left_reg, Operand(zero_reg));
+ __ subu(result_reg, zero_reg, left_reg);
+ __ And(result_reg, result_reg, divisor - 1);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ }
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ subu(result_reg, zero_reg, result_reg);
+ }
- Label done;
+ __ bind(&left_is_not_negative);
+ __ And(result_reg, left_reg, divisor - 1);
+ __ bind(&done);
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register scratch = scratch0();
- ASSERT(!left.is(scratch));
- __ mov(scratch, left);
- int32_t p2constant = HConstant::cast(
- instr->hydrogen()->right())->Integer32Value();
- ASSERT(p2constant != 0);
- // Result always takes the sign of the dividend (left).
- p2constant = abs(p2constant);
-
- Label positive_dividend;
- __ Branch(USE_DELAY_SLOT, &positive_dividend, ge, left, Operand(zero_reg));
- __ subu(result, zero_reg, left);
- __ And(result, result, p2constant - 1);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ } else if (hmod->fixed_right_arg().has_value) {
+ const Register left_reg = ToRegister(instr->left());
+ const Register result_reg = ToRegister(instr->result());
+ const Register right_reg = ToRegister(instr->right());
+
+ int32_t divisor = hmod->fixed_right_arg().value;
+ ASSERT(IsPowerOf2(divisor));
+
+ // Check if our assumption of a fixed right operand still holds.
+ DeoptimizeIf(ne, instr->environment(), right_reg, Operand(divisor));
+
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
+ &left_is_not_negative, ge, left_reg, Operand(zero_reg));
+ __ subu(result_reg, zero_reg, left_reg);
+ __ And(result_reg, result_reg, divisor - 1);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ }
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ subu(result_reg, zero_reg, result_reg);
}
- __ Branch(USE_DELAY_SLOT, &done);
- __ subu(result, zero_reg, result);
- __ bind(&positive_dividend);
- __ And(result, scratch, p2constant - 1);
+
+ __ bind(&left_is_not_negative);
+ __ And(result_reg, left_reg, divisor - 1);
+ __ bind(&done);
+
} else {
+ const Register scratch = scratch0();
+ const Register left_reg = ToRegister(instr->left());
+ const Register result_reg = ToRegister(instr->result());
+
// div runs in the background while we check for special cases.
- Register right = EmitLoadRegister(instr->right(), scratch);
- __ div(left, right);
+ Register right_reg = EmitLoadRegister(instr->right(), scratch);
+ __ div(left_reg, right_reg);
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+ Label done;
+ // Check for x % 0, we have to deopt in this case because we can't return a
+ // NaN.
+ if (right->CanBeZero()) {
+ DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
}
- __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
- __ mfhi(result);
+ // Check for kMinInt % -1, we have to deopt if we care about -0, because we
+ // can't return that.
+ if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
+ Label left_not_min_int;
+ __ Branch(&left_not_min_int, ne, left_reg, Operand(kMinInt));
+ // TODO(svenpanne) Don't deopt when we don't care about -0.
+ DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
+ __ bind(&left_not_min_int);
+ }
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ // TODO(svenpanne) Only emit the test/deopt if we have to.
+ __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
+ __ mfhi(result_reg);
+
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
}
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::EmitSignedIntegerDivisionByConstant(
+ Register result,
+ Register dividend,
+ int32_t divisor,
+ Register remainder,
+ Register scratch,
+ LEnvironment* environment) {
+ ASSERT(!AreAliased(dividend, scratch, at, no_reg));
+
+ uint32_t divisor_abs = abs(divisor);
+
+ int32_t power_of_2_factor =
+ CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+
+ switch (divisor_abs) {
+ case 0:
+ DeoptimizeIf(al, environment);
+ return;
+
+ case 1:
+ if (divisor > 0) {
+ __ Move(result, dividend);
+ } else {
+ __ SubuAndCheckForOverflow(result, zero_reg, dividend, scratch);
+ DeoptimizeIf(lt, environment, scratch, Operand(zero_reg));
+ }
+ // Compute the remainder.
+ __ Move(remainder, zero_reg);
+ return;
+
+ default:
+ if (IsPowerOf2(divisor_abs)) {
+ // Branch and condition free code for integer division by a power
+ // of two.
+ int32_t power = WhichPowerOf2(divisor_abs);
+ if (power > 1) {
+ __ sra(scratch, dividend, power - 1);
+ }
+ __ srl(scratch, scratch, 32 - power);
+ __ Addu(scratch, dividend, Operand(scratch));
+ __ sra(result, scratch, power);
+ // Negate if necessary.
+ // We don't need to check for overflow because the case '-1' is
+ // handled separately.
+ if (divisor < 0) {
+ ASSERT(divisor != -1);
+ __ Subu(result, zero_reg, Operand(result));
+ }
+ // Compute the remainder.
+ if (divisor > 0) {
+ __ sll(scratch, result, power);
+ __ Subu(remainder, dividend, Operand(scratch));
+ } else {
+ __ sll(scratch, result, power);
+ __ Addu(remainder, dividend, Operand(scratch));
+ }
+ return;
+ } else if (LChunkBuilder::HasMagicNumberForDivisor(divisor)) {
+ // Use magic numbers for a few specific divisors.
+ // Details and proofs can be found in:
+ // - Hacker's Delight, Henry S. Warren, Jr.
+ // - The PowerPC Compiler Writer's Guide
+ // and probably many others.
+ //
+ // We handle
+ // <divisor with magic numbers> * <power of 2>
+ // but not
+ // <divisor with magic numbers> * <other divisor with magic numbers>
+ DivMagicNumbers magic_numbers =
+ DivMagicNumberFor(divisor_abs >> power_of_2_factor);
+ // Branch and condition free code for integer division by a power
+ // of two.
+ const int32_t M = magic_numbers.M;
+ const int32_t s = magic_numbers.s + power_of_2_factor;
+
+ __ li(scratch, Operand(M));
+ __ mult(dividend, scratch);
+ __ mfhi(scratch);
+ if (M < 0) {
+ __ Addu(scratch, scratch, Operand(dividend));
+ }
+ if (s > 0) {
+ __ sra(scratch, scratch, s);
+ __ mov(scratch, scratch);
+ }
+ __ srl(at, dividend, 31);
+ __ Addu(result, scratch, Operand(at));
+ if (divisor < 0) __ Subu(result, zero_reg, Operand(result));
+ // Compute the remainder.
+ __ li(scratch, Operand(divisor));
+ __ Mul(scratch, result, Operand(scratch));
+ __ Subu(remainder, dividend, Operand(scratch));
+ } else {
+ __ li(scratch, Operand(divisor));
+ __ div(dividend, scratch);
+ __ mfhi(remainder);
+ __ mflo(result);
+ }
}
- __ bind(&done);
}
@@ -980,7 +1253,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_zero);
}
- // Check for (-kMinInt / -1).
+ // Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
@@ -988,12 +1261,90 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_min_int);
}
- __ mfhi(result);
- DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ mfhi(result);
+ DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
+ }
__ mflo(result);
}
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+ DoubleRegister addend = ToDoubleRegister(instr->addend());
+ DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+ // This is computed in-place.
+ ASSERT(addend.is(ToDoubleRegister(instr->result())));
+
+ __ madd_d(addend, addend, multiplier, multiplicand);
+}
+
+
+void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
+ const Register result = ToRegister(instr->result());
+ const Register left = ToRegister(instr->left());
+ const Register remainder = ToRegister(instr->temp());
+ const Register scratch = scratch0();
+
+ if (instr->right()->IsConstantOperand()) {
+ Label done;
+ int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
+ if (divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+ }
+ EmitSignedIntegerDivisionByConstant(result,
+ left,
+ divisor,
+ remainder,
+ scratch,
+ instr->environment());
+ // We performed a truncating division. Correct the result if necessary.
+ __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
+ __ Xor(scratch , remainder, Operand(divisor));
+ __ Branch(&done, ge, scratch, Operand(zero_reg));
+ __ Subu(result, result, Operand(1));
+ __ bind(&done);
+ } else {
+ Label done;
+ const Register right = ToRegister(instr->right());
+
+ // On MIPS div is asynchronous - it will run in the background while we
+ // check for special cases.
+ __ div(left, right);
+
+ // Check for x / 0.
+ DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label left_not_zero;
+ __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
+ __ bind(&left_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ Label left_not_min_int;
+ __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
+ __ bind(&left_not_min_int);
+ }
+
+ __ mfhi(remainder);
+ __ mflo(result);
+
+ // We performed a truncating division. Correct the result if necessary.
+ __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
+ __ Xor(scratch , remainder, Operand(right));
+ __ Branch(&done, ge, scratch, Operand(zero_reg));
+ __ Subu(result, result, Operand(1));
+ __ bind(&done);
+ }
+}
+
+
void LCodeGen::DoMulI(LMulI* instr) {
Register scratch = scratch0();
Register result = ToRegister(instr->result());
@@ -1001,12 +1352,11 @@ void LCodeGen::DoMulI(LMulI* instr) {
Register left = ToRegister(instr->left());
LOperand* right_op = instr->right();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero =
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- if (right_op->IsConstantOperand() && !can_overflow) {
- // Use optimized code for specific constants.
+ if (right_op->IsConstantOperand()) {
int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
if (bailout_on_minus_zero && (constant < 0)) {
@@ -1017,7 +1367,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
switch (constant) {
case -1:
- __ Subu(result, zero_reg, left);
+ if (overflow) {
+ __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
+ DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ Subu(result, zero_reg, left);
+ }
break;
case 0:
if (bailout_on_minus_zero) {
@@ -1038,27 +1393,23 @@ void LCodeGen::DoMulI(LMulI* instr) {
int32_t mask = constant >> 31;
uint32_t constant_abs = (constant + mask) ^ mask;
- if (IsPowerOf2(constant_abs) ||
- IsPowerOf2(constant_abs - 1) ||
- IsPowerOf2(constant_abs + 1)) {
- if (IsPowerOf2(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ sll(result, left, shift);
- } else if (IsPowerOf2(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ sll(result, left, shift);
- __ Addu(result, result, left);
- } else if (IsPowerOf2(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ sll(result, left, shift);
- __ Subu(result, result, left);
- }
-
- // Correct the sign of the result is the constant is negative.
- if (constant < 0) {
- __ Subu(result, zero_reg, result);
- }
-
+ if (IsPowerOf2(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ sll(result, left, shift);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Subu(result, zero_reg, result);
+ } else if (IsPowerOf2(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ sll(scratch, left, shift);
+ __ Addu(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Subu(result, zero_reg, result);
+ } else if (IsPowerOf2(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ sll(scratch, left, shift);
+ __ Subu(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Subu(result, zero_reg, result);
} else {
// Generate standard code.
__ li(at, constant);
@@ -1067,29 +1418,40 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else {
- Register right = EmitLoadRegister(right_op, scratch);
- if (bailout_on_minus_zero) {
- __ Or(ToRegister(instr->temp()), left, right);
- }
+ ASSERT(right_op->IsRegister());
+ Register right = ToRegister(right_op);
- if (can_overflow) {
+ if (overflow) {
// hi:lo = left * right.
- __ mult(left, right);
- __ mfhi(scratch);
- __ mflo(result);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ mult(result, right);
+ __ mfhi(scratch);
+ __ mflo(result);
+ } else {
+ __ mult(left, right);
+ __ mfhi(scratch);
+ __ mflo(result);
+ }
__ sra(at, result, 31);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
} else {
- __ Mul(result, left, right);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ Mul(result, result, right);
+ } else {
+ __ Mul(result, left, right);
+ }
}
if (bailout_on_minus_zero) {
- // Bail out if the result is supposed to be negative zero.
Label done;
- __ Branch(&done, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt,
+ __ Xor(at, left, right);
+ __ Branch(&done, ge, at, Operand(zero_reg));
+ // Bail out if the result is minus zero.
+ DeoptimizeIf(eq,
instr->environment(),
- ToRegister(instr->temp()),
+ result,
Operand(zero_reg));
__ bind(&done);
}
@@ -1120,7 +1482,11 @@ void LCodeGen::DoBitI(LBitI* instr) {
__ Or(result, left, right);
break;
case Token::BIT_XOR:
- __ Xor(result, left, right);
+ if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
+ __ Nor(result, zero_reg, left);
+ } else {
+ __ Xor(result, left, right);
+ }
break;
default:
UNREACHABLE();
@@ -1135,11 +1501,15 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
LOperand* right_op = instr->right();
Register left = ToRegister(instr->left());
Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
if (right_op->IsRegister()) {
// No need to mask the right operand on MIPS, it is built into the variable
// shift instructions.
switch (instr->op()) {
+ case Token::ROR:
+ __ Ror(result, left, Operand(ToRegister(right_op)));
+ break;
case Token::SAR:
__ srav(result, left, ToRegister(right_op));
break;
@@ -1161,6 +1531,13 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count != 0) {
+ __ Ror(result, left, Operand(shift_count));
+ } else {
+ __ Move(result, left);
+ }
+ break;
case Token::SAR:
if (shift_count != 0) {
__ sra(result, left, shift_count);
@@ -1181,7 +1558,18 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
break;
case Token::SHL:
if (shift_count != 0) {
- __ sll(result, left, shift_count);
+ if (instr->hydrogen_value()->representation().IsSmi() &&
+ instr->can_deopt()) {
+ if (shift_count != 1) {
+ __ sll(result, left, shift_count - 1);
+ __ SmiTagCheckOverflow(result, result, scratch);
+ } else {
+ __ SmiTagCheckOverflow(result, left, scratch);
+ }
+ DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ sll(result, left, shift_count);
+ }
} else {
__ Move(result, left);
}
@@ -1234,7 +1622,11 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
+ __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
__ li(ToRegister(instr->result()), Operand(instr->value()));
}
@@ -1247,28 +1639,15 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
}
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
- if (value->IsSmi()) {
- __ li(ToRegister(instr->result()), Operand(value));
- } else {
- __ LoadHeapObject(ToRegister(instr->result()),
- Handle<HeapObject>::cast(value));
- }
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ li(ToRegister(instr->result()), Operand(instr->value()));
}
-void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Handle<Object> value = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ LoadObject(ToRegister(instr->result()), value);
}
@@ -1299,9 +1678,11 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
Register map = ToRegister(instr->temp());
Label done;
- // If the object is a smi return the object.
- __ Move(result, input);
- __ JumpIfSmi(input, &done);
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ // If the object is a smi return the object.
+ __ Move(result, input);
+ __ JumpIfSmi(input, &done);
+ }
// If the object is not a value type, return the object.
__ GetObjectType(input, map, map);
@@ -1350,16 +1731,53 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ Nor(result, zero_reg, Operand(input));
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ Register string = ToRegister(instr->string());
+ LOperand* index_op = instr->index();
+ Register value = ToRegister(instr->value());
+ Register scratch = scratch0();
+ String::Encoding encoding = instr->encoding();
+
+ if (FLAG_debug_code) {
+ __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ __ And(scratch, scratch,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
+ }
+
+ if (index_op->IsConstantOperand()) {
+ int constant_index = ToInteger32(LConstantOperand::cast(index_op));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ sb(value,
+ FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
+ } else {
+ __ sh(value,
+ FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
+ }
+ } else {
+ Register index = ToRegister(index_op);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Addu(scratch, string, Operand(index));
+ __ sb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+ } else {
+ __ sll(scratch, index, 1);
+ __ Addu(scratch, string, scratch);
+ __ sh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+ }
+ }
}
void LCodeGen::DoThrow(LThrow* instr) {
Register input_reg = EmitLoadRegister(instr->value(), at);
__ push(input_reg);
+ ASSERT(ToRegister(instr->context()).is(cp));
CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
@@ -1412,7 +1830,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
LOperand* right = instr->right();
HMathMinMax::Operation operation = instr->hydrogen()->operation();
Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
- if (instr->hydrogen()->representation().IsInteger32()) {
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
Register left_reg = ToRegister(left);
Operand right_op = (right->IsRegister() || right->IsConstantOperand())
? ToOperand(right)
@@ -1511,116 +1929,145 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->left()).is(a1));
ASSERT(ToRegister(instr->right()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
// instruction (andi zero_reg) will never be used in normal code.
}
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr,
+ Condition condition,
+ Register src1,
+ const Operand& src2) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
-
-void LCodeGen::EmitBranch(int left_block, int right_block,
- Condition cc, Register src1, const Operand& src2) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
- if (right_block == left_block) {
+ int next_block = GetNextEmittedBlock();
+ if (right_block == left_block || condition == al) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ Branch(chunk_->GetAssemblyLabel(right_block),
- NegateCondition(cc), src1, src2);
+ NegateCondition(condition), src1, src2);
} else if (right_block == next_block) {
- __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
+ __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
} else {
- __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
+ __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
__ Branch(chunk_->GetAssemblyLabel(right_block));
}
}
-void LCodeGen::EmitBranchF(int left_block, int right_block,
- Condition cc, FPURegister src1, FPURegister src2) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
+template<class InstrType>
+void LCodeGen::EmitBranchF(InstrType instr,
+ Condition condition,
+ FPURegister src1,
+ FPURegister src2) {
+ int right_block = instr->FalseDestination(chunk_);
+ int left_block = instr->TrueDestination(chunk_);
+
+ int next_block = GetNextEmittedBlock();
if (right_block == left_block) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
- NegateCondition(cc), src1, src2);
+ NegateCondition(condition), src1, src2);
} else if (right_block == next_block) {
- __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
+ __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
+ condition, src1, src2);
} else {
- __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
+ __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
+ condition, src1, src2);
__ Branch(chunk_->GetAssemblyLabel(right_block));
}
}
-void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
+template<class InstrType>
+void LCodeGen::EmitFalseBranchF(InstrType instr,
+ Condition condition,
+ FPURegister src1,
+ FPURegister src2) {
+ int false_block = instr->FalseDestination(chunk_);
+ __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
+ condition, src1, src2);
+}
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ stop("LDebugBreak");
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
+ if (r.IsInteger32() || r.IsSmi()) {
+ ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
- EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+ EmitBranch(instr, ne, reg, Operand(zero_reg));
} else if (r.IsDouble()) {
+ ASSERT(!info()->IsStub());
DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
- EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
+ EmitBranchF(instr, nue, reg, kDoubleRegZero);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
__ LoadRoot(at, Heap::kTrueValueRootIndex);
- EmitBranch(true_block, false_block, eq, reg, Operand(at));
+ EmitBranch(instr, eq, reg, Operand(at));
} else if (type.IsSmi()) {
- EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, ne, reg, Operand(zero_reg));
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, al, zero_reg, Operand(zero_reg));
+ } else if (type.IsHeapNumber()) {
+ ASSERT(!info()->IsStub());
+ DoubleRegister dbl_scratch = double_scratch0();
+ __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
+ EmitBranch(instr, ne, at, Operand(zero_reg));
} else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
if (expected.Contains(ToBooleanStub::UNDEFINED)) {
// undefined -> false.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(false_label, eq, reg, Operand(at));
+ __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
if (expected.Contains(ToBooleanStub::BOOLEAN)) {
// Boolean -> its value.
__ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(true_label, eq, reg, Operand(at));
+ __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
- __ Branch(false_label, eq, reg, Operand(at));
+ __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
// 'null' -> false.
__ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(false_label, eq, reg, Operand(at));
+ __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
if (expected.Contains(ToBooleanStub::SMI)) {
// Smis: 0 -> false, all other -> true.
- __ Branch(false_label, eq, reg, Operand(zero_reg));
- __ JumpIfSmi(reg, true_label);
+ __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ And(at, reg, Operand(kSmiTagMask));
@@ -1634,14 +2081,15 @@ void LCodeGen::DoBranch(LBranch* instr) {
// Undetectable -> false.
__ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, at, Operand(1 << Map::kIsUndetectable));
- __ Branch(false_label, ne, at, Operand(zero_reg));
+ __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
}
}
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(instr->TrueLabel(chunk_),
+ ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
}
if (expected.Contains(ToBooleanStub::STRING)) {
@@ -1650,11 +2098,18 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
__ lw(at, FieldMemOperand(reg, String::kLengthOffset));
- __ Branch(true_label, ne, at, Operand(zero_reg));
- __ Branch(false_label);
+ __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
+ __ Branch(instr->FalseLabel(chunk_));
__ bind(&not_string);
}
+ if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ // Symbol value -> true.
+ const Register scratch = scratch1();
+ __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
@@ -1662,24 +2117,26 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&not_heap_number, ne, map, Operand(at));
__ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
- __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
+ __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ ne, dbl_scratch, kDoubleRegZero);
// Falls through if dbl_scratch == 0.
- __ Branch(false_label);
+ __ Branch(instr->FalseLabel(chunk_));
__ bind(&not_heap_number);
}
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ }
}
}
}
void LCodeGen::EmitGoto(int block) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
+ if (!IsNextEmittedBlock(block)) {
+ __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
}
}
@@ -1696,6 +2153,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
case Token::EQ_STRICT:
cond = eq;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
case Token::LT:
cond = is_unsigned ? lo : lt;
break;
@@ -1717,21 +2178,17 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
Condition cond = TokenToCondition(instr->op(), false);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
@@ -1742,20 +2199,32 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
// If a NaN is involved, i.e. the result is unordered,
// jump to false block label.
- __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
+ __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
left_reg, right_reg);
- EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
+ EmitBranchF(instr, cond, left_reg, right_reg);
} else {
Register cmp_left;
Operand cmp_right = Operand(0);
if (right->IsConstantOperand()) {
- cmp_left = ToRegister(left);
- cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ cmp_left = ToRegister(left);
+ cmp_right = Operand(Smi::FromInt(value));
+ } else {
+ cmp_left = ToRegister(left);
+ cmp_right = Operand(value);
+ }
} else if (left->IsConstantOperand()) {
- cmp_left = ToRegister(right);
- cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
+ int32_t value = ToInteger32(LConstantOperand::cast(left));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ cmp_left = ToRegister(right);
+ cmp_right = Operand(Smi::FromInt(value));
+ } else {
+ cmp_left = ToRegister(right);
+ cmp_right = Operand(value);
+ }
// We transposed the operands. Reverse the condition.
cond = ReverseCondition(cond);
} else {
@@ -1763,7 +2232,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
cmp_right = Operand(ToRegister(right));
}
- EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
+ EmitBranch(instr, cond, cmp_left, cmp_right);
}
}
}
@@ -1772,62 +2241,25 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->left());
Register right = ToRegister(instr->right());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- EmitBranch(true_block, false_block, eq, left, Operand(right));
-}
-
-
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- EmitBranch(true_block, false_block, eq, left,
- Operand(instr->hydrogen()->right()));
+ EmitBranch(instr, eq, left, Operand(right));
}
-
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register scratch = scratch0();
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ li(at, Operand(factory()->the_hole_value()));
+ EmitBranch(instr, eq, input_reg, Operand(at));
return;
}
- int true_block = chunk_->LookupDestination(instr->true_block_id());
+ DoubleRegister input_reg = ToDoubleRegister(instr->object());
+ EmitFalseBranchF(instr, eq, input_reg, input_reg);
- Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(at, nil_value);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, eq, reg, Operand(at));
- } else {
- Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
- __ LoadRoot(at, other_nil_value); // In the delay slot.
- __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
- __ JumpIfSmi(reg, false_label); // In the delay slot.
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ And(scratch, scratch, 1 << Map::kIsUndetectable);
- EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
- }
+ Register scratch = scratch0();
+ __ FmoveHigh(scratch, input_reg);
+ EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
}
@@ -1862,23 +2294,22 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register temp1 = ToRegister(instr->temp());
Register temp2 = scratch0();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
Condition true_cond =
- EmitIsObject(reg, temp1, temp2, false_label, true_label);
+ EmitIsObject(reg, temp1, temp2,
+ instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
- EmitBranch(true_block, false_block, true_cond, temp2,
+ EmitBranch(instr, true_cond, temp2,
Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
}
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
__ GetObjectType(input, temp1, temp1);
return lt;
@@ -1889,25 +2320,21 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp1 = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Condition true_cond =
- EmitIsString(reg, temp1, false_label);
+ EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
- EmitBranch(true_block, false_block, true_cond, temp1,
+ EmitBranch(instr, true_cond, temp1,
Operand(FIRST_NONSTRING_TYPE));
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
Register input_reg = EmitLoadRegister(instr->value(), at);
__ And(at, input_reg, kSmiTagMask);
- EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+ EmitBranch(instr, eq, at, Operand(zero_reg));
}
@@ -1915,14 +2342,13 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
__ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
__ And(at, temp, Operand(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
+ EmitBranch(instr, ne, at, Operand(zero_reg));
}
@@ -1947,16 +2373,15 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
- EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
+ EmitBranch(instr, condition, v0, Operand(zero_reg));
}
@@ -1984,16 +2409,12 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ GetObjectType(input, scratch, scratch);
- EmitBranch(true_block,
- false_block,
+ EmitBranch(instr,
BranchCondition(instr->hydrogen()),
scratch,
Operand(TestType(instr->hydrogen())));
@@ -2016,13 +2437,10 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
Register input = ToRegister(instr->value());
Register scratch = scratch0();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
__ lw(scratch,
FieldMemOperand(input, String::kHashFieldOffset));
__ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+ EmitBranch(instr, eq, at, Operand(zero_reg));
}
@@ -2040,7 +2458,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
- if (class_name->IsEqualTo(CStrVector("Function"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2069,7 +2487,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Objects with a non-function constructor have class 'Object'.
__ GetObjectType(temp, temp2, temp2);
- if (class_name->IsEqualTo(CStrVector("Object"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
__ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
} else {
__ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
@@ -2080,12 +2498,12 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
__ lw(temp, FieldMemOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is a symbol because it's a literal.
- // The name in the constructor is a symbol because of the way the context is
- // booted. This routine isn't expected to work for random API-created
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
// classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are symbols it is sufficient to use an identity
- // comparison.
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
// End with the address of this class_name instance in temp register.
// On MIPS, the caller must do the comparison with Handle<String>class_name.
@@ -2098,30 +2516,24 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register temp2 = ToRegister(instr->temp());
Handle<String> class_name = instr->hydrogen()->class_name();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
+ EmitBranch(instr, eq, temp, Operand(class_name));
}
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
__ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
- EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
+ EmitBranch(instr, eq, temp, Operand(instr->map()));
}
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Label true_label, done;
ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0.
ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1.
@@ -2129,7 +2541,7 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
ASSERT(result.is(v0));
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ Branch(&true_label, eq, result, Operand(zero_reg));
__ li(result, Operand(factory()->false_value()));
@@ -2141,15 +2553,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
@@ -2183,10 +2595,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+ Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ li(at, Operand(Handle<Object>(cell)));
- __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+ __ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
__ Branch(&cache_miss, ne, map, Operand(at));
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
@@ -2233,6 +2644,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub stub(flags);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
// Get the temp register reserved by the instruction. This needs to be t0 as
// its slot of the pushing of safepoint registers is used to communicate the
@@ -2249,7 +2661,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
__ StoreToSafepointRegisterSlot(temp, temp);
}
- CallCodeGeneric(stub.GetCode(),
+ CallCodeGeneric(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -2262,9 +2674,10 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
void LCodeGen::DoCmpT(LCmpT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// On MIPS there is no need for a "no inlined smi code" marker (nop).
@@ -2272,34 +2685,70 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
// A minor optimization that relies on LoadRoot always emitting one
// instruction.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
- Label done;
+ Label done, check;
__ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
+ __ bind(&check);
__ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+ ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check));
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
__ bind(&done);
}
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in v0.
+ // Runtime::TraceExit returns its parameter in v0. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
__ push(v0);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
- int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
- __ mov(sp, fp);
- __ Pop(ra, fp);
- __ Addu(sp, sp, Operand(sp_delta));
+ if (info()->saves_caller_doubles()) {
+ ASSERT(NeedsEagerFrame());
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
+ int no_frame_start = -1;
+ if (NeedsEagerFrame()) {
+ __ mov(sp, fp);
+ no_frame_start = masm_->pc_offset();
+ __ Pop(ra, fp);
+ }
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+ if (sp_delta != 0) {
+ __ Addu(sp, sp, Operand(sp_delta));
+ }
+ } else {
+ Register reg = ToRegister(instr->parameter_count());
+ // The argument count parameter is a smi
+ __ SmiUntag(reg);
+ __ sll(at, reg, kPointerSizeLog2);
+ __ Addu(sp, sp, at);
+ }
+
__ Jump(ra);
+
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
}
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
- __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+ __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
+ __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr->environment(), result, Operand(at));
@@ -2308,6 +2757,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->global_object()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
@@ -2324,7 +2774,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register cell = scratch0();
// Load the cell.
- __ li(cell, Operand(instr->hydrogen()->cell()));
+ __ li(cell, Operand(instr->hydrogen()->cell().handle()));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -2333,18 +2783,19 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
// We use a temp to check the payload.
Register payload = ToRegister(instr->temp());
- __ lw(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+ __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
}
// Store the value.
- __ sw(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+ __ sw(value, FieldMemOperand(cell, Cell::kValueOffset));
// Cells are always rescanned, so no write barrier here.
}
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->global_object()).is(a1));
ASSERT(ToRegister(instr->value()).is(a0));
@@ -2397,14 +2848,14 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ sw(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteContextSlot(context,
target.offset(),
value,
scratch0(),
- kRAHasBeenSaved,
+ GetRAState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
@@ -2415,104 +2866,43 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
- } else {
- __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ MemOperand operand = MemOperand(object, offset);
+ if (access.representation().IsByte()) {
+ __ lb(result, operand);
} else {
- // Non-negative property indices are in the properties array.
- __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
- }
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
- } else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ lw(result, FieldMemOperand(result, HeapObject::kMapOffset));
- DeoptimizeIf(ne, env, result, Operand(Handle<Map>(current->map())));
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
+ __ lw(result, operand);
}
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ return;
}
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- Register object_map = scratch0();
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(al, instr->environment());
+ if (instr->hydrogen()->representation().IsDouble()) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ ldc1(result, FieldMemOperand(object, offset));
return;
}
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMapAndBranch(
- object_map, map, &check_passed,
- eq, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
- if (last && !need_generic) {
- DeoptimizeIf(al, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- } else {
- Label next;
- __ Branch(&next);
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- __ Branch(&done);
- __ bind(&next);
- }
+
+ Register result = ToRegister(instr->result());
+ if (!access.IsInobject()) {
+ __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ object = result;
}
- if (need_generic) {
- __ li(a2, Operand(name));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ MemOperand operand = FieldMemOperand(object, offset);
+ if (access.representation().IsByte()) {
+ __ lb(result, operand);
+ } else {
+ __ lw(result, operand);
}
- __ bind(&done);
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
@@ -2566,35 +2956,9 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- Register scratch = scratch0();
-
- __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, fail;
- __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
- __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); // In the delay slot.
- __ Branch(&done, eq, scratch, Operand(at));
- // |scratch| still contains |input|'s map.
- __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
- __ Ext(scratch, scratch, Map::kElementsKindShift,
- Map::kElementsKindBitCount);
- __ Branch(&fail, lt, scratch,
- Operand(GetInitialFastElementsKind()));
- __ Branch(&done, le, scratch,
- Operand(TERMINAL_FAST_ELEMENTS_KIND));
- __ Branch(&fail, lt, scratch,
- Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ Branch(&done, le, scratch,
- Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&done);
- }
+ __ LoadRoot(result, instr->index());
}
@@ -2609,20 +2973,147 @@ void LCodeGen::DoLoadExternalArrayPointer(
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them, add one more.
- __ subu(length, length, index);
- __ Addu(length, length, Operand(1));
- __ sll(length, length, kPointerSizeLog2);
- __ Addu(at, arguments, Operand(length));
- __ lw(result, MemOperand(at, 0));
+ if (instr->length()->IsConstantOperand() &&
+ instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+ int index = (const_length - const_index) + 1;
+ __ lw(result, MemOperand(arguments, index * kPointerSize));
+ } else {
+ Register length = ToRegister(instr->length());
+ Register index = ToRegister(instr->index());
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them, add one more.
+ __ subu(length, length, index);
+ __ Addu(length, length, Operand(1));
+ __ sll(length, length, kPointerSizeLog2);
+ __ Addu(at, arguments, Operand(length));
+ __ lw(result, MemOperand(at, 0));
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ FPURegister result = ToDoubleRegister(instr->result());
+ if (key_is_constant) {
+ __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
+ } else {
+ __ sll(scratch0(), key, shift_size);
+ __ Addu(scratch0(), scratch0(), external_pointer);
+ }
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ lwc1(result, MemOperand(scratch0(), additional_offset));
+ __ cvt_d_s(result, result);
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ ldc1(result, MemOperand(scratch0(), additional_offset));
+ }
+ } else {
+ Register result = ToRegister(instr->result());
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ __ lb(result, mem_operand);
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ lbu(result, mem_operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ __ lh(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ lhu(result, mem_operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ __ lw(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ lw(result, mem_operand);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ DeoptimizeIf(Ugreater_equal, instr->environment(),
+ result, Operand(0x80000000));
+ }
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+
+ int base_offset =
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ (instr->additional_index() << element_size_shift);
+ if (key_is_constant) {
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ base_offset += constant_key << element_size_shift;
+ }
+ __ Addu(scratch, elements, Operand(base_offset));
+
+ if (!key_is_constant) {
+ key = ToRegister(instr->key());
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ sll(at, key, shift_size);
+ __ Addu(scratch, scratch, at);
+ }
+
+ __ ldc1(result, MemOperand(scratch));
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+ }
}
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
Register elements = ToRegister(instr->elements());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -2635,12 +3126,12 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
instr->additional_index());
store_base = elements;
} else {
- Register key = EmitLoadRegister(instr->key(), scratch);
- // Even though the HLoadKeyedFastElement instruction forces the input
+ Register key = ToRegister(instr->key());
+ // Even though the HLoadKeyed instruction forces the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
__ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
__ addu(scratch, elements, scratch);
} else {
@@ -2664,46 +3155,14 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
}
-void LCodeGen::DoLoadKeyedFastDoubleElement(
- LLoadKeyedFastDoubleElement* instr) {
- Register elements = ToRegister(instr->elements());
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- DoubleRegister result = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_external()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
} else {
- key = ToRegister(instr->key());
+ DoLoadKeyedFixedArray(instr);
}
-
- if (key_is_constant) {
- __ Addu(elements, elements,
- Operand(((constant_key + instr->additional_index()) <<
- element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- } else {
- __ sll(scratch, key, shift_size);
- __ Addu(elements, elements, Operand(scratch));
- __ Addu(elements, elements,
- Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << element_size_shift)));
- }
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
- }
-
- __ ldc1(result, MemOperand(elements));
}
@@ -2751,90 +3210,8 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
}
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
- LLoadKeyedSpecializedArrayElement* instr) {
- Register external_pointer = ToRegister(instr->external_pointer());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- FPURegister result = ToDoubleRegister(instr->result());
- if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
- } else {
- __ sll(scratch0(), key, shift_size);
- __ Addu(scratch0(), scratch0(), external_pointer);
- }
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), additional_offset));
- __ cvt_d_s(result, result);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0(), additional_offset));
- }
- } else {
- Register result = ToRegister(instr->result());
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ lb(result, mem_operand);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ lbu(result, mem_operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ lh(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ lhu(result, mem_operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ lw(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ lw(result, mem_operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr->environment(),
- result, Operand(0x80000000));
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(a1));
ASSERT(ToRegister(instr->key()).is(a0));
@@ -2932,6 +3309,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ bind(&receiver_ok);
}
+
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
@@ -2971,7 +3349,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is a0, as expected
@@ -2979,14 +3356,13 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
ParameterCount actual(receiver);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->value();
if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort("DoPushArgument not implemented for double type.");
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
} else {
Register argument_reg = EmitLoadRegister(argument, at);
__ push(argument_reg);
@@ -3006,8 +3382,14 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
Register result = ToRegister(instr->result());
- __ mov(result, cp);
+ if (info()->IsOptimizing()) {
+ __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ ASSERT(result.is(cp));
+ }
}
@@ -3020,6 +3402,7 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
__ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
// The context is the first argument.
@@ -3029,8 +3412,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ lw(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
}
@@ -3042,15 +3426,17 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
int arity,
LInstruction* instr,
CallKind call_kind,
A1State a1_state) {
- bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
- function->shared()->formal_parameter_count() == arity;
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
if (can_invoke_directly) {
if (a1_state == A1_UNINITIALIZED) {
@@ -3062,7 +3448,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Set r0 to arguments count if adaption is not needed. Assumes that r0
// is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
+ if (dont_adapt_arguments) {
__ li(a0, Operand(arity));
}
@@ -3076,18 +3462,18 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
} else {
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
- __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+ ParameterCount expected(formal_parameter_count);
+ __ InvokeFunction(
+ function, expected, count, CALL_FUNCTION, generator, call_kind);
}
-
- // Restore context.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ mov(a0, v0);
- CallKnownFunction(instr->function(),
+ CallKnownFunction(instr->hydrogen()->function(),
+ instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
CALL_AS_METHOD,
@@ -3095,7 +3481,9 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
}
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+ ASSERT(instr->context() != NULL);
+ ASSERT(ToRegister(instr->context()).is(cp));
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -3137,7 +3525,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(v0))
__ mov(tmp1, v0);
@@ -3160,14 +3549,13 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
}
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Label done;
__ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
__ mov(result, input);
- ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
__ subu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
@@ -3175,19 +3563,18 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
- LUnaryMathOperation* instr_;
+ LMathAbs* instr_;
};
Representation r = instr->hydrogen()->value()->representation();
@@ -3195,7 +3582,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
FPURegister input = ToDoubleRegister(instr->value());
FPURegister result = ToDoubleRegister(instr->result());
__ abs_d(result, input);
- } else if (r.IsInteger32()) {
+ } else if (r.IsSmiOrInteger32()) {
EmitIntegerMathAbs(instr);
} else {
// Representation is tagged.
@@ -3211,25 +3598,22 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
- FPURegister single_scratch = double_scratch0().low();
Register scratch1 = scratch0();
Register except_flag = ToRegister(instr->temp());
__ EmitFPUTruncate(kRoundToMinusInf,
- single_scratch,
+ result,
input,
scratch1,
+ double_scratch0(),
except_flag);
// Deopt if the operation did not succeed.
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
- // Load the result.
- __ mfc1(result, single_scratch);
-
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
Label done;
@@ -3242,9 +3626,10 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathRound(LMathRound* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
Register scratch = scratch0();
Label done, check_sign_on_zero;
@@ -3296,17 +3681,15 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
}
Register except_flag = scratch;
-
__ EmitFPUTruncate(kRoundToMinusInf,
- double_scratch0().low(),
- double_scratch0(),
result,
+ double_scratch0(),
+ at,
+ double_scratch1,
except_flag);
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
- __ mfc1(result, double_scratch0().low());
-
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
__ Branch(&done, ne, result, Operand(zero_reg));
@@ -3319,14 +3702,14 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
__ sqrt_d(result, input);
}
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister temp = ToDoubleRegister(instr->temp());
@@ -3361,7 +3744,10 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->left()).is(f2));
ASSERT(ToDoubleRegister(instr->result()).is(f0));
- if (exponent_type.IsTagged()) {
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(a2, &no_deopt);
__ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
@@ -3381,162 +3767,139 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(f0));
- ASSERT(ToRegister(instr->global_object()).is(a0));
-
+ // Assert that the register size is indeed the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
+ // Load native context.
+ Register global_object = ToRegister(instr->global_object());
+ Register native_context = global_object;
+ __ lw(native_context, FieldMemOperand(
+ global_object, GlobalObject::kNativeContextOffset));
+
+ // Load state (FixedArray of the native context's random seeds).
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
- // a2: FixedArray of the native context's random seeds
+ Register state = native_context;
+ __ lw(state, FieldMemOperand(native_context, kRandomSeedOffset));
// Load state[0].
- __ lw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
- __ Branch(deferred->entry(), eq, a1, Operand(zero_reg));
+ Register state0 = ToRegister(instr->scratch());
+ __ lw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
// Load state[1].
- __ lw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
- // a1: state[0].
- // a0: state[1].
+ Register state1 = ToRegister(instr->scratch2());
+ __ lw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ And(a3, a1, Operand(0xFFFF));
- __ li(t0, Operand(18273));
- __ Mul(a3, a3, t0);
- __ srl(a1, a1, 16);
- __ Addu(a1, a3, a1);
+ Register scratch3 = ToRegister(instr->scratch3());
+ Register scratch4 = scratch0();
+ __ And(scratch3, state0, Operand(0xFFFF));
+ __ li(scratch4, Operand(18273));
+ __ Mul(scratch3, scratch3, scratch4);
+ __ srl(state0, state0, 16);
+ __ Addu(state0, scratch3, state0);
// Save state[0].
- __ sw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
+ __ sw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ And(a3, a0, Operand(0xFFFF));
- __ li(t0, Operand(36969));
- __ Mul(a3, a3, t0);
- __ srl(a0, a0, 16),
- __ Addu(a0, a3, a0);
+ __ And(scratch3, state1, Operand(0xFFFF));
+ __ li(scratch4, Operand(36969));
+ __ Mul(scratch3, scratch3, scratch4);
+ __ srl(state1, state1, 16),
+ __ Addu(state1, scratch3, state1);
// Save state[1].
- __ sw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
+ __ sw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ And(a0, a0, Operand(0x3FFFF));
- __ sll(a1, a1, 14);
- __ Addu(v0, a0, a1);
-
- __ bind(deferred->exit());
+ Register random = scratch4;
+ __ And(random, state1, Operand(0x3FFFF));
+ __ sll(state0, state0, 14);
+ __ Addu(random, random, state0);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
- __ li(a2, Operand(0x41300000));
+ __ li(scratch3, Operand(0x41300000));
// Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
- __ Move(f12, v0, a2);
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Move(result, random, scratch3);
// Move 0x4130000000000000 to FPU.
- __ Move(f14, zero_reg, a2);
- // Subtract to get the result.
- __ sub_d(f0, f12, f14);
+ DoubleRegister scratch5 = double_scratch0();
+ __ Move(scratch5, zero_reg, scratch3);
+ __ sub_d(result, result, scratch5);
}
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, scratch0());
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in v0.
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+ DoubleRegister double_scratch2 = double_scratch0();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ MathExpGenerator::EmitMathExp(
+ masm(), input, result, double_scratch1, double_scratch2,
+ temp1, temp2, scratch0());
}
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathLog(LMathLog* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, zero_reg);
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, zero_reg);
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, zero_reg);
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, zero_reg);
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
- default:
- Abort("Unimplemented type of LUnaryMathOperation.");
- UNREACHABLE();
- }
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(a1));
ASSERT(instr->HasPointerMap());
- if (instr->known_function().is_null()) {
+ Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
- CallKnownFunction(instr->known_function(),
+ CallKnownFunction(known_function,
+ instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
CALL_AS_METHOD,
@@ -3546,17 +3909,18 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallNamed(LCallNamed* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
@@ -3565,23 +3929,22 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ li(a2, Operand(instr->name()));
CallCode(ic, mode, instr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
@@ -3590,13 +3953,13 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ li(a2, Operand(instr->name()));
CallCode(ic, mode, instr);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
- CallKnownFunction(instr->target(),
+ CallKnownFunction(instr->hydrogen()->target(),
+ instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
CALL_AS_FUNCTION,
@@ -3605,12 +3968,60 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->constructor()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
+ __ li(a0, Operand(instr->arity()));
+ // No cell in a2 for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+ __ li(a2, Operand(undefined_value));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->constructor()).is(a1));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
__ li(a0, Operand(instr->arity()));
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ li(a2, Operand(instr->hydrogen()->property_cell()));
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+ ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here,
+ // look at the first argument.
+ __ lw(t1, MemOperand(sp, 0));
+ __ Branch(&packed_case, eq, t1, Operand(zero_reg));
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+ override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ jmp(&done);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ }
}
@@ -3619,16 +4030,61 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ Addu(code_object, code_object,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sw(code_object,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ __ Addu(result, base, Operand(instr->offset()));
+}
+
+
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
Register scratch = scratch0();
- int offset = instr->offset();
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ Register value = ToRegister(instr->value());
+ MemOperand operand = MemOperand(object, offset);
+ if (representation.IsByte()) {
+ __ sb(value, operand);
+ } else {
+ __ sw(value, operand);
+ }
+ return;
+ }
- ASSERT(!object.is(value));
+ Handle<Map> transition = instr->transition();
+
+ if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ Register value = ToRegister(instr->value());
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ And(scratch, value, Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ sdc1(value, FieldMemOperand(object, offset));
+ return;
+ }
- if (!instr->transition().is_null()) {
- __ li(scratch, Operand(instr->transition()));
+ if (!transition.is_null()) {
+ __ li(scratch, Operand(transition));
__ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
@@ -3637,7 +4093,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
HeapObject::kMapOffset,
scratch,
temp,
- kRAHasBeenSaved,
+ GetRAState(),
kSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -3645,25 +4101,37 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
- HType type = instr->hydrogen()->value()->type();
+ Register value = ToRegister(instr->value());
+ ASSERT(!object.is(value));
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
- __ sw(value, FieldMemOperand(object, offset));
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ if (access.IsInobject()) {
+ MemOperand operand = FieldMemOperand(object, offset);
+ if (representation.IsByte()) {
+ __ sb(value, operand);
+ } else {
+ __ sw(value, operand);
+ }
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
offset,
value,
scratch,
- kRAHasBeenSaved,
+ GetRAState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
}
} else {
__ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ sw(value, FieldMemOperand(scratch, offset));
+ MemOperand operand = FieldMemOperand(scratch, offset);
+ if (representation.IsByte()) {
+ __ sb(value, operand);
+ } else {
+ __ sw(value, operand);
+ }
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
@@ -3671,7 +4139,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
offset,
value,
object,
- kRAHasBeenSaved,
+ GetRAState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
@@ -3681,6 +4149,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(a1));
ASSERT(ToRegister(instr->value()).is(a0));
@@ -3693,152 +4162,48 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand) {
- if (value->representation().IsTagged() && !value->type().IsSmi()) {
- if (operand->IsRegister()) {
- __ And(at, ToRegister(operand), Operand(kSmiTagMask));
- DeoptimizeIf(ne, environment, at, Operand(zero_reg));
- } else {
- __ li(at, ToOperand(operand));
- __ And(at, at, Operand(kSmiTagMask));
- DeoptimizeIf(ne, environment, at, Operand(zero_reg));
- }
+void LCodeGen::ApplyCheckIf(Condition condition,
+ LBoundsCheck* check,
+ Register src1,
+ const Operand& src2) {
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+ Label done;
+ __ Branch(&done, NegateCondition(condition), src1, src2);
+ __ stop("eliminated bounds check failed");
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(condition, check->environment(), src1, src2);
}
}
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->length(),
- instr->length());
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->index(),
- instr->index());
+ if (instr->hydrogen()->skip_check()) return;
+
+ Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
+ if (instr->hydrogen()->length()->representation().IsSmi()) {
__ li(at, Operand(Smi::FromInt(constant_index)));
} else {
__ li(at, Operand(constant_index));
}
- DeoptimizeIf(hs,
- instr->environment(),
+ ApplyCheckIf(condition,
+ instr,
at,
Operand(ToRegister(instr->length())));
} else {
- DeoptimizeIf(hs,
- instr->environment(),
+ ApplyCheckIf(condition,
+ instr,
ToRegister(instr->index()),
Operand(ToRegister(instr->length())));
}
}
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->object());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- // Even though the HLoadKeyedFastElement instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
- __ addu(scratch, elements, scratch);
- } else {
- __ sll(scratch, key, kPointerSizeLog2);
- __ addu(scratch, elements, scratch);
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ sw(value, FieldMemOperand(store_base, offset));
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
- __ RecordWrite(elements,
- key,
- value,
- kRAHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFastDoubleElement(
- LStoreKeyedFastDoubleElement* instr) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch = scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- Label not_nan;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- if (key_is_constant) {
- __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- } else {
- __ sll(scratch, key, shift_size);
- __ Addu(scratch, elements, Operand(scratch));
- __ Addu(scratch, scratch,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- }
-
- if (instr->NeedsCanonicalization()) {
- Label is_nan;
- // Check for NaN. All NaNs must be canonicalized.
- __ BranchF(NULL, &is_nan, eq, value, value);
- __ Branch(&not_nan);
-
- // Only load canonical NaN if the comparison above set the overflow.
- __ bind(&is_nan);
- __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- }
-
- __ bind(&not_nan);
- __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
- element_size_shift));
-}
-
-
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
- LStoreKeyedSpecializedArrayElement* instr) {
-
- Register external_pointer = ToRegister(instr->external_pointer());
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
@@ -3846,32 +4211,37 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
key = ToRegister(instr->key());
}
int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ Register address = scratch0();
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key <<
- element_size_shift);
+ if (constant_key != 0) {
+ __ Addu(address, external_pointer,
+ Operand(constant_key << element_size_shift));
+ } else {
+ address = external_pointer;
+ }
} else {
- __ sll(scratch0(), key, shift_size);
- __ Addu(scratch0(), scratch0(), external_pointer);
+ __ sll(address, key, shift_size);
+ __ Addu(address, external_pointer, address);
}
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
- __ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset));
+ __ swc1(double_scratch0(), MemOperand(address, additional_offset));
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ sdc1(value, MemOperand(scratch0(), additional_offset));
+ __ sdc1(value, MemOperand(address, additional_offset));
}
} else {
Register value(ToRegister(instr->value()));
@@ -3909,7 +4279,120 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
}
}
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ DoubleRegister double_scratch = double_scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Label not_nan, done;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ if (key_is_constant) {
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ __ Addu(scratch, elements,
+ Operand((constant_key << element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ } else {
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ Addu(scratch, elements,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ sll(at, ToRegister(instr->key()), shift_size);
+ __ Addu(scratch, scratch, at);
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ Label is_nan;
+ // Check for NaN. All NaNs must be canonicalized.
+ __ BranchF(NULL, &is_nan, eq, value, value);
+ __ Branch(&not_nan);
+
+ // Only load canonical NaN if the comparison above set the overflow.
+ __ bind(&is_nan);
+ __ Move(double_scratch,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() <<
+ element_size_shift));
+ __ Branch(&done);
+ }
+
+ __ bind(&not_nan);
+ __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
+ element_size_shift));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
+ : no_reg;
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsSmi()) {
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(scratch, elements, scratch);
+ } else {
+ __ sll(scratch, key, kPointerSizeLog2);
+ __ addu(scratch, elements, scratch);
+ }
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ sw(value, FieldMemOperand(store_base, offset));
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
+ __ RecordWrite(elements,
+ key,
+ value,
+ GetRAState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ // By cases: external, fast double
+ if (instr->is_external()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
+ }
+}
+
+
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(a2));
ASSERT(ToRegister(instr->key()).is(a1));
ASSERT(ToRegister(instr->value()).is(a0));
@@ -3923,64 +4406,68 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_temp());
Register scratch = scratch0();
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = from_map->elements_kind();
- ElementsKind to_kind = to_map->elements_kind();
-
- __ mov(ToRegister(instr->result()), object_reg);
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
Label not_applicable;
__ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
__ Branch(&not_applicable, ne, scratch, Operand(from_map));
- __ li(new_map_reg, Operand(to_map));
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ li(new_map_reg, Operand(to_map));
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- scratch, kRAHasBeenSaved, kDontSaveFPRegs);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(a2));
- ASSERT(new_map_reg.is(a3));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
- RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(a2));
- ASSERT(new_map_reg.is(a3));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
- RelocInfo::CODE_TARGET, instr);
+ scratch, GetRAState(), kDontSaveFPRegs);
} else {
- UNREACHABLE();
+ ASSERT(ToRegister(instr->context()).is(cp));
+ PushSafepointRegistersScope scope(
+ this, Safepoint::kWithRegistersAndDoubles);
+ __ mov(a0, object_reg);
+ __ li(a1, Operand(to_map));
+ TransitionElementsKindStub stub(from_kind, to_kind);
+ __ CallStub(&stub);
+ RecordSafepointWithRegistersAndDoubles(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
}
__ bind(&not_applicable);
}
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
+ ne, &no_memento_found);
+ DeoptimizeIf(al, instr->environment());
+ __ bind(&no_memento_found);
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ StringAddStub stub(instr->hydrogen()->flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4019,7 +4506,8 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ instr->context());
__ AssertSmi(v0);
__ SmiUntag(v0);
__ StoreToSafepointRegisterSlot(v0, result);
@@ -4027,12 +4515,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4047,7 +4537,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
ASSERT(!char_code.is(result));
__ Branch(deferred->entry(), hi,
- char_code, Operand(String::kMaxAsciiCharCode));
+ char_code, Operand(String::kMaxOneByteCharCode));
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
__ sll(scratch, char_code, kPointerSizeLog2);
__ Addu(result, result, scratch);
@@ -4070,18 +4560,11 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
}
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- __ lw(result, FieldMemOperand(string, String::kLengthOffset));
-}
-
-
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
@@ -4099,6 +4582,19 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
+void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ Register scratch = scratch0();
+
+ __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ }
+}
+
+
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4109,17 +4605,30 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ Register scratch = scratch0();
+ __ And(scratch, ToRegister(input), Operand(0xc0000000));
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ }
+ __ SmiTag(ToRegister(output), ToRegister(input));
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
+ class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_,
instr_->value(),
SIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -4136,16 +4645,16 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_,
instr_->value(),
UNSIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4167,7 +4676,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
Label slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
- FPURegister dbl_scratch = double_scratch0();
+ DoubleRegister dbl_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
@@ -4189,8 +4698,8 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
}
if (FLAG_inline_new) {
- __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
+ __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
__ Move(dst, t1);
__ Branch(&done);
}
@@ -4202,24 +4711,36 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// register is stored, as this register is in the pointer map, but contains an
// integer value.
__ StoreToSafepointRegisterSlot(zero_reg, dst);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ Move(dst, v0);
+ __ Subu(dst, dst, kHeapObjectTag);
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+ __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
+ __ Addu(dst, dst, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(dst, dst);
}
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4233,12 +4754,16 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+ // We want the untagged address first for performance
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
+ DONT_TAG_RESULT);
} else {
__ Branch(deferred->entry());
}
__ bind(deferred->exit());
- __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
+ // Now that we have finished with the object's real address tag it
+ __ Addu(reg, reg, kHeapObjectTag);
}
@@ -4250,7 +4775,16 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ mov(reg, zero_reg);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ Subu(v0, v0, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(v0, reg);
}
@@ -4279,45 +4813,45 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
DoubleRegister result_reg,
- bool deoptimize_on_undefined,
+ bool can_convert_undefined_to_nan,
bool deoptimize_on_minus_zero,
- LEnvironment* env) {
+ LEnvironment* env,
+ NumberUntagDMode mode) {
Register scratch = scratch0();
-
- Label load_smi, heap_number, done;
-
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
- // Heap number map check.
- __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- if (deoptimize_on_undefined) {
- DeoptimizeIf(ne, env, scratch, Operand(at));
- } else {
- Label heap_number;
- __ Branch(&heap_number, eq, scratch, Operand(at));
-
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, env, input_reg, Operand(at));
-
- // Convert undefined to NaN.
- __ LoadRoot(at, Heap::kNanValueRootIndex);
- __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
+ Label convert, load_smi, done;
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+ // Heap number map check.
+ __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ if (can_convert_undefined_to_nan) {
+ __ Branch(&convert, ne, scratch, Operand(at));
+ } else {
+ DeoptimizeIf(ne, env, scratch, Operand(at));
+ }
+ // Load heap number.
+ __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ __ mfc1(at, result_reg.low());
+ __ Branch(&done, ne, at, Operand(zero_reg));
+ __ mfc1(scratch, result_reg.high());
+ DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+ }
__ Branch(&done);
-
- __ bind(&heap_number);
- }
- // Heap number to double register conversion.
- __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- __ mfc1(at, result_reg.low());
- __ Branch(&done, ne, at, Operand(zero_reg));
- __ mfc1(scratch, result_reg.high());
- DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+ // Convert undefined (and hole) to NaN.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(ne, env, input_reg, Operand(at));
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ Branch(&done);
+ }
+ } else {
+ __ SmiUntag(scratch, input_reg);
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
- __ Branch(&done);
-
// Smi to double register conversion
__ bind(&load_smi);
// scratch: untagged value of input_reg
@@ -4332,7 +4866,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_scratch = double_scratch0();
- FPURegister single_scratch = double_scratch.low();
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
@@ -4347,32 +4881,34 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// of the if.
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
- ASSERT(!scratch3.is(input_reg) &&
- !scratch3.is(scratch1) &&
- !scratch3.is(scratch2));
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
- Label heap_number;
- __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map?
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
+ Label no_heap_number, check_bools, check_false;
+ __ Branch(&no_heap_number, ne, scratch1, Operand(at)); // HeapNumber map?
+ __ mov(scratch2, input_reg);
+ __ TruncateHeapNumberToI(input_reg, scratch2);
+ __ Branch(&done);
+
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
+ __ bind(&no_heap_number);
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
+ __ Branch(&check_bools, ne, input_reg, Operand(at));
ASSERT(ToRegister(instr->result()).is(input_reg));
- __ mov(input_reg, zero_reg);
- __ Branch(&done);
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ mov(input_reg, zero_reg); // In delay slot.
- __ bind(&heap_number);
- __ ldc1(double_scratch2,
- FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- __ EmitECMATruncate(input_reg,
- double_scratch2,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
+ __ bind(&check_bools);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(&check_false, ne, scratch2, Operand(at));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ li(input_reg, Operand(1)); // In delay slot.
+
+ __ bind(&check_false);
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ mov(input_reg, zero_reg); // In delay slot.
} else {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
@@ -4383,18 +4919,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- single_scratch,
+ input_reg,
double_scratch,
scratch1,
+ double_scratch2,
except_flag,
kCheckForInexactConversion);
// Deopt if the operation did not succeed.
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
- // Load the result.
- __ mfc1(input_reg, single_scratch);
-
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, input_reg, Operand(zero_reg));
@@ -4408,12 +4942,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -4424,14 +4960,18 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- // Let the deferred code handle the HeapObject case.
- __ JumpIfNotSmi(input_reg, deferred->entry());
+ // Let the deferred code handle the HeapObject case.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
- // Smi to int32 conversion.
- __ SmiUntag(input_reg);
- __ bind(deferred->exit());
+ // Smi to int32 conversion.
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+ }
}
@@ -4444,44 +4984,83 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
DoubleRegister result_reg = ToDoubleRegister(result);
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
+ instr->hydrogen()->can_convert_undefined_to_nan(),
instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment());
+ instr->environment(),
+ mode);
}
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_input = ToDoubleRegister(instr->value());
- FPURegister single_scratch = double_scratch0().low();
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- __ EmitECMATruncate(result_reg,
- double_input,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
+ __ TruncateDoubleToI(result_reg, double_input);
} else {
- Register except_flag = scratch2;
+ Register except_flag = LCodeGen::scratch1();
+
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ result_reg,
+ double_input,
+ scratch1,
+ double_scratch0(),
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Deopt if the operation did not succeed (except_flag != 0).
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ Branch(&done, ne, result_reg, Operand(zero_reg));
+ __ mfc1(scratch1, double_input.high());
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ __ bind(&done);
+ }
+ }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = LCodeGen::scratch0();
+ DoubleRegister double_input = ToDoubleRegister(instr->value());
+
+ if (instr->truncating()) {
+ __ TruncateDoubleToI(result_reg, double_input);
+ } else {
+ Register except_flag = LCodeGen::scratch1();
__ EmitFPUTruncate(kRoundToMinusInf,
- single_scratch,
+ result_reg,
double_input,
scratch1,
+ double_scratch0(),
except_flag,
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
- // Load the result.
- __ mfc1(result_reg, single_scratch);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ Branch(&done, ne, result_reg, Operand(zero_reg));
+ __ mfc1(scratch1, double_input.high());
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ __ bind(&done);
+ }
}
+ __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
+ DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg));
}
@@ -4493,9 +5072,11 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- __ And(at, ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ LOperand* input = instr->value();
+ __ And(at, ToRegister(input), Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ }
}
@@ -4538,50 +5119,84 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (isolate()->heap()->InNewSpace(*target)) {
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ AllowDeferredHandleDereference smi_check;
+ if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(Handle<Object>(cell)));
- __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+ __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
DeoptimizeIf(ne, instr->environment(), reg,
Operand(at));
} else {
DeoptimizeIf(ne, instr->environment(), reg,
- Operand(target));
+ Operand(object));
}
}
-void LCodeGen::DoCheckMapCommon(Register reg,
- Register scratch,
- Handle<Map> map,
- CompareMapMode mode,
- LEnvironment* env) {
- Label success;
- __ CompareMapAndBranch(reg, scratch, map, &success, eq, &success, mode);
- DeoptimizeIf(al, env);
- __ bind(&success);
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ push(object);
+ __ mov(cp, zero_reg);
+ __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(v0, scratch0());
+ }
+ __ And(at, scratch0(), Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- Register scratch = scratch0();
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->CanOmitMapChecks()) return;
+ Register map_reg = scratch0();
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
+ __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->has_migration_target()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+ __ bind(deferred->check_maps());
+ }
+
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- SmallMapList* map_set = instr->hydrogen()->map_set();
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMapAndBranch(
- reg, scratch, map, &success, eq, &success, REQUIRE_EXACT_MAP);
- }
- Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
+ __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
+ }
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
+ if (instr->hydrogen()->has_migration_target()) {
+ __ Branch(deferred->entry(), ne, map_reg, Operand(map));
+ } else {
+ DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
+ }
+
__ bind(&success);
}
@@ -4636,103 +5251,77 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
-
- Handle<JSObject> holder = instr->holder();
- Handle<JSObject> current_prototype = instr->prototype();
-
- // Load prototype object.
- __ LoadHeapObject(temp1, current_prototype);
-
- // Check prototype maps up to the holder.
- while (!current_prototype.is_identical_to(holder)) {
- DoCheckMapCommon(temp1, temp2,
- Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
- current_prototype =
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
- // Load next prototype object.
- __ LoadHeapObject(temp1, current_prototype);
- }
-
- // Check the holder map.
- DoCheckMapCommon(temp1, temp2,
- Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
-}
-
-
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
- LAllocateObject* instr_;
+ LAllocate* instr_;
};
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr);
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
+ Register scratch = ToRegister(instr->temp1());
Register scratch2 = ToRegister(instr->temp2());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- // Allocate memory for the object. The initial map might change when
- // the constructor's prototype changes, but instance size and property
- // counts remain unchanged (if slack tracking finished).
- ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- TAG_OBJECT);
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ Allocate(size,
+ result,
+ scratch,
+ scratch2,
+ deferred->entry(),
+ flags);
+ }
__ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(map, constructor);
- __ lw(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ sw(map, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
- __ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ sw(scratch, FieldMemOperand(result, property_offset));
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ li(scratch, Operand(size));
+ } else {
+ scratch = ToRegister(instr->size());
}
+ __ Subu(scratch, scratch, Operand(kPointerSize));
+ __ Subu(result, result, Operand(kHeapObjectTag));
+ Label loop;
+ __ bind(&loop);
+ __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ __ Addu(at, result, Operand(scratch));
+ __ sw(scratch2, MemOperand(at));
+ __ Subu(scratch, scratch, Operand(kPointerSize));
+ __ Branch(&loop, ge, scratch, Operand(zero_reg));
+ __ Addu(result, result, Operand(kHeapObjectTag));
}
}
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register result = ToRegister(instr->result());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
@@ -4740,238 +5329,30 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
__ mov(result, zero_reg);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ li(a0, Operand(Smi::FromInt(instance_size)));
- __ push(a0);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
- // Load map into a2.
- __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- DeoptimizeIf(ne,
- instr->environment(),
- a2,
- Operand(boilerplate_elements_kind));
- }
-
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(a3, literals);
- __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ li(a1, Operand(isolate()->factory()->empty_fixed_array()));
- __ Push(a3, a2, a1);
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ SmiTag(size);
+ __ push(size);
} else {
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset) {
- ASSERT(!source.is(a2));
- ASSERT(!result.is(a2));
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_offset = *offset + object_size;
- int elements_size = has_elements ? elements->Size() : 0;
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ Addu(a2, result, Operand(elements_offset));
- } else {
- __ lw(a2, FieldMemOperand(source, i));
- }
- __ sw(a2, FieldMemOperand(result, object_offset + i));
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ Addu(a2, result, Operand(*offset));
- __ sw(a2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
- __ sw(a2, FieldMemOperand(result, total_offset));
- } else {
- __ li(a2, Operand(value));
- __ sw(a2, FieldMemOperand(result, total_offset));
- }
- }
-
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ lw(a2, FieldMemOperand(source, i));
- __ sw(a2, FieldMemOperand(result, elements_offset + i));
- }
-
- // Copy elements backing store content.
- int elements_length = has_elements ? elements->length() : 0;
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- // We only support little endian mode...
- int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
- int32_t value_high = static_cast<int32_t>(value >> 32);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ li(a2, Operand(value_low));
- __ sw(a2, FieldMemOperand(result, total_offset));
- __ li(a2, Operand(value_high));
- __ sw(a2, FieldMemOperand(result, total_offset + 4));
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i));
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ Addu(a2, result, Operand(*offset));
- __ sw(a2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
- __ sw(a2, FieldMemOperand(result, total_offset));
- } else {
- __ li(a2, Operand(value));
- __ sw(a2, FieldMemOperand(result, total_offset));
- }
- }
- } else {
- UNREACHABLE();
- }
- }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
- // Load map into a2.
- __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- DeoptimizeIf(ne, instr->environment(), a2,
- Operand(boilerplate_elements_kind));
- }
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ li(a0, Operand(Smi::FromInt(size)));
- __ push(a0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
- ASSERT_EQ(size, offset);
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- ASSERT(ToRegister(instr->result()).is(v0));
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(t0, literals);
- __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(a2, Operand(constant_properties));
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- __ li(a1, Operand(Smi::FromInt(flags)));
- __ Push(t0, a3, a2, a1);
-
- // Pick the right runtime function or stub to call.
- int properties_count = constant_properties->length() / 2;
- if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Push(Smi::FromInt(size));
+ }
+
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr,
+ instr->context());
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr,
+ instr->context());
} else {
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr,
+ instr->context());
}
+ __ StoreToSafepointRegisterSlot(v0, result);
}
@@ -4984,6 +5365,7 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Label materialized;
// Registers will be used as follows:
// t3 = literals array.
@@ -5010,7 +5392,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
@@ -5036,20 +5418,19 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
- __ li(a1, Operand(shared_info));
- __ push(a1);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ if (!pretenure && instr->hydrogen()->has_no_literals()) {
+ FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ instr->hydrogen()->is_generator());
+ __ li(a2, Operand(instr->hydrogen()->shared_info()));
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
- __ li(a2, Operand(shared_info));
- __ li(a1, Operand(pretenure
- ? factory()->true_value()
- : factory()->false_value()));
+ __ li(a2, Operand(instr->hydrogen()->shared_info()));
+ __ li(a1, Operand(pretenure ? factory()->true_value()
+ : factory()->false_value()));
__ Push(cp, a2, a1);
CallRuntime(Runtime::kNewClosure, 3, instr);
}
@@ -5066,16 +5447,12 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
Register cmp1 = no_reg;
Operand cmp2 = Operand(no_reg);
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
+ Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
+ instr->FalseLabel(chunk_),
input,
instr->type_literal(),
cmp1,
@@ -5085,7 +5462,7 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
if (final_branch_condition != kNoCondition) {
- EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
+ EmitBranch(instr, final_branch_condition, cmp1, cmp2);
}
}
@@ -5101,7 +5478,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
// register.
Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
- if (type_name->Equals(heap()->number_symbol())) {
+ if (type_name->Equals(heap()->number_string())) {
__ JumpIfSmi(input, true_label);
__ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
@@ -5109,7 +5486,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(at);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->string_symbol())) {
+ } else if (type_name->Equals(heap()->string_string())) {
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, input, scratch);
__ Branch(USE_DELAY_SLOT, false_label,
@@ -5122,7 +5499,14 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(zero_reg);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->boolean_symbol())) {
+ } else if (type_name->Equals(heap()->symbol_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ GetObjectType(input, input, scratch);
+ cmp1 = scratch;
+ cmp2 = Operand(SYMBOL_TYPE);
+ final_branch_condition = eq;
+
+ } else if (type_name->Equals(heap()->boolean_string())) {
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
@@ -5130,13 +5514,13 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(input);
final_branch_condition = eq;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
cmp1 = at;
cmp2 = Operand(input);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->undefined_symbol())) {
+ } else if (type_name->Equals(heap()->undefined_string())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
// The first instruction of JumpIfSmi is an And - it is safe in the delay
@@ -5150,7 +5534,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(zero_reg);
final_branch_condition = ne;
- } else if (type_name->Equals(heap()->function_symbol())) {
+ } else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, scratch, input);
@@ -5159,23 +5543,21 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->object_symbol())) {
+ } else if (type_name->Equals(heap()->object_string())) {
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
}
- // input is an object, it is safe to use GetObjectType in the delay slot.
- __ GetObjectType(input, input, scratch);
- __ Branch(USE_DELAY_SLOT, false_label,
+ Register map = input;
+ __ GetObjectType(input, map, scratch);
+ __ Branch(false_label,
lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- // Still an object, so the InstanceType can be loaded.
- __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
__ Branch(USE_DELAY_SLOT, false_label,
gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- // Still an object, so the BitField can be loaded.
+ // map is still valid, so the BitField can be loaded in delay slot.
// Check for undetectable objects => false.
- __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+ __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, at, 1 << Map::kIsUndetectable);
cmp1 = at;
cmp2 = Operand(zero_reg);
@@ -5193,12 +5575,10 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp1 = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
EmitIsConstructCall(temp1, scratch0());
- EmitBranch(true_block, false_block, eq, temp1,
+ EmitBranch(instr, eq, temp1,
Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
}
@@ -5221,25 +5601,25 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
}
-void LCodeGen::EnsureSpaceForLazyDeopt() {
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
while (padding_size > 0) {
__ nop();
padding_size -= Assembler::kInstrSize;
}
}
- last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5248,39 +5628,33 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
+ DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
}
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- Register object = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- Register strict = scratch0();
- __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
- __ Push(object, key, strict);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
}
-void LCodeGen::DoIn(LIn* instr) {
- Register obj = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- __ Push(key, obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
}
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -5291,12 +5665,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5310,9 +5686,13 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
Label done;
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&done, hs, sp, Operand(at));
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- EnsureSpaceForLazyDeopt();
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(cp));
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5323,7 +5703,8 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
new(zone()) DeferredStackCheck(this, instr);
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5339,15 +5720,13 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// properly registered for deoptimization and records the assembler's PC
// offset.
LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
+
+ GenerateOsrPrologue();
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 38c5255a4..f643d0219 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -28,11 +28,13 @@
#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
-#include "mips/lithium-mips.h"
-#include "mips/lithium-gap-resolver-mips.h"
#include "deoptimizer.h"
+#include "mips/lithium-gap-resolver-mips.h"
+#include "mips/lithium-mips.h"
+#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
+#include "v8utils.h"
namespace v8 {
namespace internal {
@@ -41,26 +43,19 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
+ : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@@ -68,13 +63,27 @@ class LCodeGen BASE_EMBEDDED {
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub() ||
+ info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
+ RAStatus GetRAState() const {
+ return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved;
+ }
// Support for converting LOperands to assembler types.
// LOperand must be a register.
@@ -90,7 +99,9 @@ class LCodeGen BASE_EMBEDDED {
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
FloatRegister flt_scratch,
DoubleRegister dbl_scratch);
- int ToInteger32(LConstantOperand* op) const;
+ int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
@@ -98,6 +109,7 @@ class LCodeGen BASE_EMBEDDED {
MemOperand ToHighMemOperand(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
@@ -117,17 +129,15 @@ class LCodeGen BASE_EMBEDDED {
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
+ void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
- void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -143,10 +153,7 @@ class LCodeGen BASE_EMBEDDED {
int additional_offset);
// Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
@@ -154,31 +161,16 @@ class LCodeGen BASE_EMBEDDED {
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
Register scratch0() { return kLithiumScratchReg; }
Register scratch1() { return kLithiumScratchReg2; }
DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
- int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
@@ -189,21 +181,21 @@ class LCodeGen BASE_EMBEDDED {
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
- void Abort(const char* reason);
- void Comment(const char* format, ...);
+ void Abort(BailoutReason reason);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
@@ -220,7 +212,8 @@ class LCodeGen BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
@@ -229,9 +222,11 @@ class LCodeGen BASE_EMBEDDED {
CallRuntime(function, num_arguments, instr);
}
+ void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ LOperand* context);
enum A1State {
A1_UNINITIALIZED,
@@ -241,6 +236,7 @@ class LCodeGen BASE_EMBEDDED {
// Generate a direct call to a known function. Expects the function
// to be in a1.
void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
int arity,
LInstruction* instr,
CallKind call_kind,
@@ -253,17 +249,28 @@ class LCodeGen BASE_EMBEDDED {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc,
+ void DeoptimizeIf(Condition condition,
LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type,
+ Register src1 = zero_reg,
+ const Operand& src2 = Operand(zero_reg));
+ void DeoptimizeIf(Condition condition,
+ LEnvironment* environment,
+ Register src1 = zero_reg,
+ const Operand& src2 = Operand(zero_reg));
+ void ApplyCheckIf(Condition condition,
+ LBoundsCheck* check,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void AddToTranslation(Translation* translation,
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
- int arguments_index,
- int arguments_count);
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+ void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -272,17 +279,7 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
+ void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
@@ -297,30 +294,33 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordPosition(int position);
+
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
- void EmitBranch(int left_block,
- int right_block,
- Condition cc,
+ template<class InstrType>
+ void EmitBranch(InstrType instr,
+ Condition condition,
Register src1,
const Operand& src2);
- void EmitBranchF(int left_block,
- int right_block,
- Condition cc,
+ template<class InstrType>
+ void EmitBranchF(InstrType instr,
+ Condition condition,
FPURegister src1,
FPURegister src2);
+ template<class InstrType>
+ void EmitFalseBranchF(InstrType instr,
+ Condition condition,
+ FPURegister src1,
+ FPURegister src2);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
DoubleRegister result,
- bool deoptimize_on_undefined,
+ bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
- LEnvironment* env);
-
- void DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand);
+ LEnvironment* env,
+ NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -348,53 +348,49 @@ class LCodeGen BASE_EMBEDDED {
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input,
Register temp1,
- Label* is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed);
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset);
-
- struct JumpTableEntry {
- explicit inline JumpTableEntry(Address entry)
- : label(),
- address(entry) { }
- Label label;
- Address address;
- };
-
- void EnsureSpaceForLazyDeopt();
-
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
+ int* offset,
+ AllocationSiteMode mode);
+ // Emit optimized code for integer division.
+ // Inputs are signed.
+ // All registers are clobbered.
+ // If 'remainder' is no_reg, it is not computed.
+ void EmitSignedIntegerDivisionByConstant(Register result,
+ Register dividend,
+ int32_t divisor,
+ Register remainder,
+ Register scratch,
+ LEnvironment* environment);
+
+
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> deopt_jump_table_;
+ ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -405,11 +401,12 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope BASE_EMBEDDED {
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;
@@ -452,7 +449,7 @@ class LCodeGen BASE_EMBEDDED {
};
-class LDeferredCode: public ZoneObject {
+class LDeferredCode : public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
@@ -461,7 +458,7 @@ class LDeferredCode: public ZoneObject {
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() { }
+ virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
index 87efae5f4..460e13bf0 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -222,7 +222,6 @@ void LGapResolver::EmitMove(int index) {
ASSERT(destination->IsStackSlot());
__ sw(source_register, cgen_->ToMemOperand(destination));
}
-
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
@@ -252,17 +251,25 @@ void LGapResolver::EmitMove(int index) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
- __ li(dst, Operand(cgen_->ToInteger32(constant_source)));
+ __ li(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
+ } else if (destination->IsDoubleRegister()) {
+ DoubleRegister result = cgen_->ToDoubleRegister(destination);
+ double v = cgen_->ToDouble(constant_source);
+ __ Move(result, v);
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
__ li(kLithiumScratchReg,
- Operand(cgen_->ToInteger32(constant_source)));
+ Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
__ LoadObject(kLithiumScratchReg,
cgen_->ToHandle(constant_source));
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.h b/deps/v8/src/mips/lithium-gap-resolver-mips.h
index 2506e38c3..ea1ea3cbb 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.h
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.h
@@ -38,7 +38,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver BASE_EMBEDDED {
+class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 0b6dcaea5..6b76ff742 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -30,6 +30,7 @@
#include "lithium-allocator-inl.h"
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -41,24 +42,6 @@ namespace internal {
LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
#ifdef DEBUG
void LInstruction::VerifyCall() {
// Call instructions can use only fixed registers as temporaries and
@@ -81,14 +64,6 @@ void LInstruction::VerifyCall() {
#endif
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
@@ -112,7 +87,11 @@ void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
- InputAt(i)->PrintTo(stream);
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
}
}
@@ -177,6 +156,7 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
case Token::SHL: return "sll-t";
case Token::SAR: return "sra-t";
case Token::SHR: return "srl-t";
@@ -187,6 +167,11 @@ const char* LArithmeticT::Mnemonic() const {
}
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+
void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
@@ -198,20 +183,16 @@ void LBranch::PrintDataTo(StringStream* stream) {
}
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
}
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
+ left()->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ right()->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@@ -285,14 +266,23 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
}
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + %d", offset());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("#%d / ", arity());
}
@@ -345,6 +335,15 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
stream->Add(" length ");
@@ -356,8 +355,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ hydrogen()->access().PrintTo(stream);
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -372,21 +370,35 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", additional_index());
+ } else {
+ stream->Add("]");
+ }
}
-void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", additional_index());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
}
@@ -405,18 +417,19 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
+ if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
return spill_slot_count_++;
}
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
+ ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@@ -425,8 +438,17 @@ LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
+ LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+ }
+ }
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -439,7 +461,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LCodeGen::Abort(const char* reason) {
+void LCodeGen::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -523,6 +545,11 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@@ -590,8 +617,10 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
+ &argument_index_accumulator,
+ &objects_to_materialize));
return instr;
}
@@ -599,6 +628,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
#ifdef DEBUG
instr->VerifyCall();
#endif
@@ -631,7 +661,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -639,8 +669,12 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand =
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- operand->set_virtual_register(allocator_->GetVirtualRegister());
- if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
return operand;
}
@@ -664,8 +698,14 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
}
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
}
@@ -676,53 +716,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseRegisterAtStart(right_value);
- }
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- bool does_deopt = false;
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseRegisterAtStart(right_value);
+ }
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
// Shift operations can only deoptimize if we do a logical shift
// by 0 and the result cannot be truncated to int32.
- bool may_deopt = (op == Token::SHR && constant_value == 0);
- if (may_deopt) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
- }
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -731,29 +762,34 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), f2);
+ LOperand* right = UseFixedDouble(instr->right(), f4);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ // We call a C function for double modulo. It can't trigger a GC. We need
+ // to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ return MarkAsCall(DefineFixedDouble(result, f2), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left_operand = UseFixed(left, a1);
LOperand* right_operand = UseFixed(right, a0);
LArithmeticT* result =
- new(zone()) LArithmeticT(op, left_operand, right_operand);
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -791,11 +827,15 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
HEnvironment* last_environment = pred->last_environment();
for (int i = 0; i < block->phis()->length(); ++i) {
HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
}
for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
}
block->UpdateEnvironment(last_environment);
// Pick up the outgoing argument count of one of the predecessors.
@@ -825,16 +865,68 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ ASSERT(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -843,15 +935,17 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+ LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
+ argument_index_accumulator,
+ objects_to_materialize);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
+ int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
LEnvironment* result = new(zone()) LEnvironment(
hydrogen_env->closure(),
hydrogen_env->frame_type(),
@@ -863,13 +957,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
hydrogen_env->entry(),
zone());
int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
+ int object_index = objects_to_materialize->length();
+ for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
+ LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
@@ -880,6 +976,39 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
+ for (int i = object_index; i < objects_to_materialize->length(); ++i) {
+ HValue* object_to_materialize = objects_to_materialize->at(i);
+ int previously_materialized_object = -1;
+ for (int prev = 0; prev < i; ++prev) {
+ if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
+ previously_materialized_object = prev;
+ break;
+ }
+ }
+ int length = object_to_materialize->OperandCount();
+ bool is_arguments = object_to_materialize->IsArgumentsObject();
+ if (previously_materialized_object >= 0) {
+ result->AddDuplicateObject(previously_materialized_object);
+ continue;
+ } else {
+ result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+ }
+ for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+ LOperand* op;
+ HValue* value = object_to_materialize->OperandAt(i);
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
+ } else {
+ ASSERT(!value->IsPushArgument());
+ op = UseAny(value);
+ }
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
+ }
+ }
+
if (hydrogen_env->frame_type() == JS_FUNCTION) {
*argument_index_accumulator = argument_index;
}
@@ -889,25 +1018,24 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor());
}
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ HValue* value = instr->value();
LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
- // deoptimization environment.
+ // deoptimization environment. If the instruction is generic no
+ // environment is needed since all cases are handled.
Representation rep = value->representation();
HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
+ !expected.IsGeneric()) {
return AssignEnvironment(result);
}
return result;
@@ -923,19 +1051,22 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+ info()->MarkAsRequiresFrame();
return DefineAsRegister(
new(zone()) LArgumentsLength(UseRegister(length->value())));
}
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ info()->MarkAsRequiresFrame();
return DefineAsRegister(new(zone()) LArgumentsElements);
}
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LInstanceOf* result =
- new(zone()) LInstanceOf(UseFixed(instr->left(), a0),
+ new(zone()) LInstanceOf(context, UseFixed(instr->left(), a0),
UseFixed(instr->right(), a1));
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -944,8 +1075,10 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), a0),
- FixedTemp(t0));
+ new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), cp),
+ UseFixed(instr->left(), a0),
+ FixedTemp(t0));
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -972,12 +1105,28 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
LOperand* argument = Use(instr->argument());
return new(zone()) LPushArgument(argument);
}
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* inner_object) {
+ LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
+ LInnerAllocatedObject* result =
+ new(zone()) LInnerAllocatedObject(base_object);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
return instr->HasNoUses()
? NULL
@@ -986,7 +1135,13 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, cp);
+ }
+
+ return DefineAsRegister(new(zone()) LContext);
}
@@ -997,7 +1152,8 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- return MarkAsCall(new(zone()) LDeclareGlobals, instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
@@ -1015,98 +1171,178 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, v0), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(function);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* input = UseFixedDouble(instr->value(), f4);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
- } else if (op == kMathPowHalf) {
- // Input cannot be the same as the result.
- // See lithium-codegen-mips.cc::DoMathPowHalf.
- LOperand* input = UseFixedDouble(instr->value(), f8);
- LOperand* temp = FixedTemp(f6);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- return DefineFixedDouble(result, f4);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
- LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathFloor:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathSqrt:
- return DefineAsRegister(result);
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- default:
- UNREACHABLE();
- return NULL;
- }
+ switch (instr->op()) {
+ case kMathFloor: return DoMathFloor(instr);
+ case kMathRound: return DoMathRound(instr);
+ case kMathAbs: return DoMathAbs(instr);
+ case kMathLog: return DoMathLog(instr);
+ case kMathSin: return DoMathSin(instr);
+ case kMathCos: return DoMathCos(instr);
+ case kMathTan: return DoMathTan(instr);
+ case kMathExp: return DoMathExp(instr);
+ case kMathSqrt: return DoMathSqrt(instr);
+ case kMathPowHalf: return DoMathPowHalf(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
}
}
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ LMathLog* result = new(zone()) LMathLog(input);
+ return MarkAsCall(DefineFixedDouble(result, f4), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ LMathSin* result = new(zone()) LMathSin(input);
+ return MarkAsCall(DefineFixedDouble(result, f4), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ LMathCos* result = new(zone()) LMathCos(input);
+ return MarkAsCall(DefineFixedDouble(result, f4), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ LMathTan* result = new(zone()) LMathTan(input);
+ return MarkAsCall(DefineFixedDouble(result, f4), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll.
+ LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ // Input cannot be the same as the result, see LCodeGen::DoMathPowHalf.
+ LOperand* input = UseFixedDouble(instr->value(), f8);
+ LOperand* temp = FixedTemp(f6);
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
+ return DefineFixedDouble(result, f4);
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ Representation r = instr->value()->representation();
+ LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+ ? NULL
+ : UseFixed(instr->context(), cp);
+ LOperand* input = UseRegister(instr->value());
+ LMathAbs* result = new(zone()) LMathAbs(context, input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LMathFloor* result = new(zone()) LMathFloor(input, temp);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathSqrt* result = new(zone()) LMathSqrt(input);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = FixedTemp(f6);
+ LMathRound* result = new(zone()) LMathRound(input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
- argument_count_ -= instr->argument_count();
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* key = UseFixed(instr->key(), a2);
- return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), v0), instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LCallKeyed(context, key), v0), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), v0), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), v0), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, v0), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* constructor = UseFixed(instr->constructor(), a1);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), a1);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(constructor);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), v0),
- instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LCallFunction(context, function), v0), instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
}
@@ -1126,126 +1362,186 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LBitNotI(value));
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LDivI* div = new(zone()) LDivI(dividend, divisor);
+ return AssignEnvironment(DefineAsRegister(div));
+ } else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
- // TODO(1042) The fixed register allocation
- // is needed because we call TypeRecordingBinaryOpStub from
- // the generated code, which requires registers a0
- // and a1 to be used. We should remove that
- // when we provide a native implementation.
- LOperand* dividend = UseFixed(instr->left(), a0);
- LOperand* divisor = UseFixed(instr->right(), a1);
- return AssignEnvironment(AssignPointerMap(
- DefineFixed(new(zone()) LDivI(dividend, divisor), v0)));
} else {
return DoArithmeticT(Token::DIV, instr);
}
}
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- UNIMPLEMENTED();
+bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
+ uint32_t divisor_abs = abs(divisor);
+ // Dividing by 0, 1, and powers of 2 is easy.
+ // Note that IsPowerOf2(0) returns true;
+ ASSERT(IsPowerOf2(0) == true);
+ if (IsPowerOf2(divisor_abs)) return true;
+
+ // We have magic numbers for a few specific divisors.
+ // Details and proofs can be found in:
+ // - Hacker's Delight, Henry S. Warren, Jr.
+ // - The PowerPC Compiler Writer's Guide
+ // and probably many others.
+ //
+ // We handle
+ // <divisor with magic numbers> * <power of 2>
+ // but not
+ // <divisor with magic numbers> * <other divisor with magic numbers>
+ int32_t power_of_2_factor =
+ CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+ DivMagicNumbers magic_numbers =
+ DivMagicNumberFor(divisor_abs >> power_of_2_factor);
+ if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
+
+ return false;
+}
+
+
+HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
+ // Only optimize when we have magic numbers for the divisor.
+ // The standard integer division routine is usually slower than transitionning
+ // to FPU.
+ if (divisor->IsConstant() &&
+ HConstant::cast(divisor)->HasInteger32Value()) {
+ HConstant* constant_val = HConstant::cast(divisor);
+ return constant_val->CopyToRepresentation(Representation::Integer32(),
+ divisor->block()->zone());
+ }
return NULL;
}
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ HValue* right = instr->right();
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegisterOrConstant(right);
+ LOperand* remainder = TempRegister();
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
+}
- LModI* mod;
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
- } else {
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- mod = new(zone()) LModI(dividend,
- divisor,
- TempRegister(),
- FixedTemp(f20),
- FixedTemp(f22));
- }
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero)) {
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ if (instr->HasPowerOf2Divisor()) {
+ ASSERT(!right->CanBeZero());
+ LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
+ UseOrConstant(right));
+ LInstruction* result = DefineAsRegister(mod);
+ return (left->CanBeNegative() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero))
+ ? AssignEnvironment(result)
+ : result;
+ } else if (instr->fixed_right_arg().has_value) {
+ LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
+ UseRegisterAtStart(right));
return AssignEnvironment(DefineAsRegister(mod));
} else {
- return DefineAsRegister(mod);
+ LModI* mod = new(zone()) LModI(UseRegister(left),
+ UseRegister(right),
+ TempRegister(),
+ FixedTemp(f20),
+ FixedTemp(f22));
+ LInstruction* result = DefineAsRegister(mod);
+ return (right->CanBeZero() ||
+ (left->RangeCanInclude(kMinInt) &&
+ right->RangeCanInclude(-1)) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero))
+ ? AssignEnvironment(result)
+ : result;
}
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), f2);
- LOperand* right = UseFixedDouble(instr->right(), f4);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, f2), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left;
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
- (instr->CheckFlag(HValue::kCanOverflow) ||
- !right->IsConstantOperand())) {
- left = UseRegister(instr->LeastConstantOperand());
- temp = TempRegister();
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ HValue* left = instr->BetterLeftOperand();
+ HValue* right = instr->BetterRightOperand();
+ LOperand* left_op;
+ LOperand* right_op;
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (right->IsConstant()) {
+ HConstant* constant = HConstant::cast(right);
+ int32_t constant_value = constant->Integer32Value();
+ // Constants -1, 0 and 1 can be optimized if the result can overflow.
+ // For other constants, it can be optimized only without overflow.
+ if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+ left_op = UseRegisterAtStart(left);
+ right_op = UseConstant(right);
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
} else {
- left = UseRegisterAtStart(instr->LeastConstantOperand());
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
}
- LMulI* mul = new(zone()) LMulI(left, right, temp);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ LMulI* mul = new(zone()) LMulI(left_op, right_op);
+ if (can_overflow || bailout_on_minus_zero) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
+ if (kArchVariant == kMips32r2) {
+ if (instr->UseCount() == 1 && instr->uses().value()->IsAdd()) {
+ HAdd* add = HAdd::cast(instr->uses().value());
+ if (instr == add->left()) {
+ // This mul is the lhs of an add. The add and mul will be folded
+ // into a multiply-add.
+ return NULL;
+ }
+ if (instr == add->right() && !add->left()->IsMul()) {
+ // This mul is the rhs of an add, where the lhs is not another mul.
+ // The add and mul will be folded into a multiply-add.
+ return NULL;
+ }
+ }
+ }
return DoArithmeticD(Token::MUL, instr);
-
} else {
return DoArithmeticT(Token::MUL, instr);
}
@@ -1253,9 +1549,9 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@@ -1272,12 +1568,21 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
}
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+ LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+ LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+ LOperand* addend_op = UseRegisterAtStart(addend);
+ return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
+ multiplicand_op));
+}
+
+
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@@ -1285,9 +1590,17 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsDouble()) {
+ if (kArchVariant == kMips32r2) {
+ if (instr->left()->IsMul())
+ return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
+
+ if (instr->right()->IsMul()) {
+ ASSERT(!instr->left()->IsMul());
+ return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
+ }
+ }
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1296,11 +1609,11 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -1332,60 +1645,61 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), a0);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, f0), instr);
+ LOperand* global_object = UseTempRegister(instr->global_object());
+ LOperand* scratch = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LOperand* scratch3 = TempRegister();
+ LRandom* result = new(zone()) LRandom(
+ global_object, scratch, scratch2, scratch3);
+ return DefineFixedDouble(result, f0);
}
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), a1);
LOperand* right = UseFixed(instr->right(), a0);
- LCmpT* result = new(zone()) LCmpT(left, right);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
return MarkAsCall(DefineFixed(result, v0), instr);
}
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
}
}
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
}
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
- return new(zone()) LCmpConstantEqAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()));
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
}
@@ -1423,10 +1737,11 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
HStringCompareAndBranch* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), a1);
LOperand* right = UseFixed(instr->right(), a0);
LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(left, right);
+ new(zone()) LStringCompareAndBranch(context, left, right);
return MarkAsCall(result, instr);
}
@@ -1464,19 +1779,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
LOperand* map = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LMapEnumLength(map));
@@ -1504,6 +1806,14 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegisterOrConstant(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
@@ -1511,6 +1821,13 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -1519,8 +1836,9 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), a0);
- return MarkAsCall(new(zone()) LThrow(value), instr);
+ return MarkAsCall(new(zone()) LThrow(context, value), instr);
}
@@ -1540,33 +1858,47 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = NULL;
LInstruction* res = NULL;
- if (instr->value()->type().IsSmi()) {
+ HValue* val = instr->value();
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ value = UseRegisterAtStart(val);
res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
+ value = UseRegister(val);
LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
- : NULL;
- LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(f22)
- : NULL;
+ LOperand* temp2 = FixedTemp(f22);
res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
temp1,
- temp2,
- temp3));
+ temp2));
res = AssignEnvironment(res);
}
return res;
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
@@ -1577,15 +1909,18 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
Define(result, result_temp);
return AssignPointerMap(result);
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(instr->value());
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
- LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
+ LDoubleToI* res = new(zone()) LDoubleToI(value);
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
@@ -1598,6 +1933,16 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ LInstruction* result = val->CheckFlag(HInstruction::kUint32)
+ ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
+ : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return result;
+ }
+ return AssignEnvironment(result);
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@@ -1614,43 +1959,43 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckNonSmi(value));
}
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(result);
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
}
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
+ LInstruction* result = new(zone()) LCheckInstanceType(value);
+ return AssignEnvironment(result);
}
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckMaps(value);
- return AssignEnvironment(result);
+ LOperand* value = NULL;
+ if (!instr->CanOmitMapChecks()) {
+ value = UseRegisterAtStart(instr->value());
+ if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
+ }
+ LCheckMaps* result = new(zone()) LCheckMaps(value);
+ if (!instr->CanOmitMapChecks()) {
+ AssignEnvironment(result);
+ if (instr->has_migration_target()) return AssignPointerMap(result);
+ }
+ return result;
}
@@ -1664,7 +2009,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new(zone()) LClampIToUint8(reg));
} else {
- ASSERT(input_rep.IsTagged());
+ ASSERT(input_rep.IsSmiOrTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve f22 explicitly.
LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(f22));
@@ -1674,16 +2019,25 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new(zone()) LReturn(UseFixed(instr->value(), v0));
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), cp)
+ : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new(zone()) LReturn(UseFixed(instr->value(), v0), context,
+ parameter_count);
}
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
- if (r.IsInteger32()) {
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
return DefineAsRegister(new(zone()) LConstantD);
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new(zone()) LConstantE);
} else if (r.IsTagged()) {
return DefineAsRegister(new(zone()) LConstantT);
} else {
@@ -1702,8 +2056,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), a0);
- LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -1719,10 +2075,11 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), a1);
LOperand* value = UseFixed(instr->value(), a0);
LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(global_object, value);
+ new(zone()) LStoreGlobalGeneric(context, global_object, value);
return MarkAsCall(result, instr);
}
@@ -1751,31 +2108,16 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- return DefineAsRegister(
- new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), a0);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, v0), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), a0);
- LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), v0);
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadNamedGeneric(context, object), v0);
return MarkAsCall(result, instr);
}
@@ -1787,9 +2129,8 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
}
@@ -1800,130 +2141,99 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
- HLoadKeyedFastElement* instr) {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
- if (instr->RequiresHoleCheck()) AssignEnvironment(result);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
- HLoadKeyedFastDoubleElement* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* elements = UseTempRegister(instr->elements());
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
+ ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastDoubleElement* result =
- new(zone()) LLoadKeyedFastDoubleElement(elements, key);
- return AssignEnvironment(DefineAsRegister(result));
-}
+ LLoadKeyed* result = NULL;
+ if (!instr->is_external()) {
+ LOperand* obj = NULL;
+ if (instr->representation().IsDouble()) {
+ obj = UseRegister(instr->elements());
+ } else {
+ ASSERT(instr->representation().IsSmiOrTagged());
+ obj = UseRegisterAtStart(instr->elements());
+ }
+ result = new(zone()) LLoadKeyed(obj, key);
+ } else {
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ LOperand* external_pointer = UseRegister(instr->elements());
+ result = new(zone()) LLoadKeyed(external_pointer, key);
+ }
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
- HLoadKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LLoadKeyedSpecializedArrayElement* result =
- new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
- LInstruction* load_instr = DefineAsRegister(result);
+ DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
- AssignEnvironment(load_instr) : load_instr;
+ bool can_deoptimize = instr->RequiresHoleCheck() ||
+ (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
+ return can_deoptimize ? AssignEnvironment(result) : result;
}
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), a1);
LOperand* key = UseFixed(instr->key(), a0);
LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), v0);
+ DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), v0);
return MarkAsCall(result, instr);
}
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
- HStoreKeyedFastElement* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- ASSERT(instr->value()->representation().IsTagged());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* obj = UseTempRegister(instr->object());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedFastElement(obj, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
- HStoreKeyedFastDoubleElement* instr) {
- ASSERT(instr->value()->representation().IsDouble());
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ if (!instr->is_external()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ LOperand* object = NULL;
+ LOperand* val = NULL;
+ LOperand* key = NULL;
- return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
-}
+ if (instr->value()->representation().IsDouble()) {
+ object = UseRegisterAtStart(instr->elements());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ val = UseRegister(instr->value());
+ } else {
+ ASSERT(instr->value()->representation().IsSmiOrTagged());
+ if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegisterAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
+ }
+ return new(zone()) LStoreKeyed(object, key, val);
+ }
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
- HStoreKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
+ (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstant(instr->key());
+ ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
+ (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->elements()->representation().IsExternal());
+ LOperand* val = UseRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LOperand* external_pointer = UseRegister(instr->elements());
- return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ return new(zone()) LStoreKeyed(external_pointer, key, val);
}
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), a2);
LOperand* key = UseFixed(instr->key(), a1);
LOperand* val = UseFixed(instr->value(), a0);
@@ -1932,41 +2242,47 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
ASSERT(instr->key()->representation().IsTagged());
ASSERT(instr->value()->representation().IsTagged());
- return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
+ return MarkAsCall(
+ new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
}
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- ElementsKind from_kind = instr->original_map()->elements_kind();
- ElementsKind to_kind = instr->transitioned_map()->elements_kind();
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- LOperand* object = UseRegister(instr->object());
+ LOperand* object = UseRegister(instr->object());
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
- return DefineSameAsFirst(result);
+ new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
+ return result;
} else {
- LOperand* object = UseFixed(instr->object(), a0);
- LOperand* fixed_object_reg = FixedTemp(a2);
- LOperand* new_map_reg = FixedTemp(a3);
+ LOperand* context = UseFixed(instr->context(), cp);
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object,
- new_map_reg,
- fixed_object_reg);
- return MarkAsCall(DefineFixed(result, v0), instr);
+ new(zone()) LTransitionElementsKind(object, context, NULL);
+ return AssignPointerMap(result);
}
}
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp);
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
+ bool needs_write_barrier_for_map = instr->has_transition() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
if (needs_write_barrier) {
- obj = instr->is_in_object()
+ obj = is_in_object
? UseRegister(instr->object())
: UseTempRegister(instr->object());
} else {
@@ -1975,92 +2291,94 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
: UseRegisterAtStart(instr->object());
}
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ LOperand* val;
+ if (needs_write_barrier ||
+ (FLAG_track_fields && instr->field_representation().IsSmi())) {
+ val = UseTempRegister(instr->value());
+ } else if (FLAG_track_double_fields &&
+ instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp);
+ LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
+ if (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject()) {
+ if (!instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), a1);
LOperand* val = UseFixed(instr->value(), a0);
- LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
+ LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), v0),
- instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LStringAdd(context, left, right), v0),
+ instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
return AssignPointerMap(DefineAsRegister(result));
}
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- LAllocateObject* result =
- new(zone()) LAllocateObject(TempRegister(), TempRegister());
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = instr->size()->IsConstant()
+ ? UseConstant(instr->size())
+ : UseTempRegister(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
}
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), v0), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* object = UseFixed(instr->object(), a0);
- LOperand* key = UseFixed(instr->key(), a1);
- LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
- return MarkAsCall(DefineFixed(result, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), v0), instr);
}
@@ -2073,24 +2391,42 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ int index = static_cast<int>(instr->index());
+ Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
+ return DefineFixed(result, reg);
+ }
}
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallStub, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), v0), instr);
}
@@ -2103,10 +2439,26 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
}
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = UseRegister(instr->index());
+ LOperand* length;
+ LOperand* index;
+ if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+ length = UseRegisterOrConstant(instr->length());
+ index = UseOrConstant(instr->index());
+ } else {
+ length = UseTempRegister(instr->length());
+ index = UseRegisterAtStart(instr->index());
+ }
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@@ -2119,7 +2471,8 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), a0));
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTypeof* result = new(zone()) LTypeof(context, UseFixed(instr->value(), a0));
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -2136,20 +2489,7 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
+ instr->ReplayEnvironment(current_block_->last_environment());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
@@ -2171,10 +2511,13 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
if (instr->is_function_entry()) {
- return MarkAsCall(new(zone()) LStackCheck, instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
} else {
ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
}
}
@@ -2186,10 +2529,11 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->call_kind(),
- instr->inlining_kind());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
+ instr->inlining_kind(),
+ instr->undefined_receiver());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
}
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
@@ -2206,7 +2550,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
+ ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
@@ -2217,17 +2561,10 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* key = UseRegisterAtStart(instr->key());
- LOperand* object = UseRegisterAtStart(instr->object());
- LIn* result = new(zone()) LIn(key, object);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->enumerable(), a0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 3a9aa7acc..518a3b8c6 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -40,24 +40,16 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
- V(AllocateObject) \
+ V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
V(ArithmeticD) \
V(ArithmeticT) \
- V(ArrayLiteral) \
V(BitI) \
- V(BitNotI) \
V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \
@@ -67,35 +59,43 @@ class LCodeGen;
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
+ V(CheckMapValue) \
V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
- V(CmpConstantEqAndBranch) \
- V(CmpIDAndBranch) \
+ V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(ConstantD) \
+ V(ConstantE) \
V(ConstantI) \
+ V(ConstantS) \
V(ConstantT) \
V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
V(DeclareGlobals) \
- V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(DoubleToSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
V(ElementsKind) \
- V(FastLiteral) \
- V(FixedArrayBaseLength) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -103,44 +103,51 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
- V(In) \
+ V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Uint32ToDouble) \
+ V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
+ V(LoadRoot) \
+ V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyedFastDoubleElement) \
- V(LoadKeyedFastElement) \
+ V(LoadKeyed) \
V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathCos) \
+ V(MathExp) \
+ V(MathFloor) \
+ V(MathFloorOfDiv) \
+ V(MathLog) \
V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSin) \
+ V(MathSqrt) \
+ V(MathTan) \
V(ModI) \
V(MulI) \
+ V(MultiplyAddD) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
V(NumberUntagD) \
- V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -149,50 +156,49 @@ class LCodeGen;
V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyedFastDoubleElement) \
- V(StoreKeyedFastElement) \
+ V(StoreKeyed) \
V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
V(StringCompareAndBranch) \
- V(StringLength) \
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
+ V(Uint32ToDouble) \
+ V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop)
+ V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -202,13 +208,15 @@ class LCodeGen;
}
-class LInstruction: public ZoneObject {
+class LInstruction : public ZoneObject {
public:
LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false) { }
- virtual ~LInstruction() { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) {
+ }
+
+ virtual ~LInstruction() {}
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
@@ -251,17 +259,25 @@ class LInstruction: public ZoneObject {
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
- void MarkAsCall() { is_call_ = true; }
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
// Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsCall() const { return IsCall(); }
virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
+ virtual LOperand* result() const = 0;
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
#ifdef DEBUG
void VerifyCall();
#endif
@@ -276,11 +292,12 @@ class LInstruction: public ZoneObject {
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
+ class IsCallBits: public BitField<bool, 0, 1> {};
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
- bool is_call_;
- bool is_save_doubles_;
+ int bit_field_;
};
@@ -288,13 +305,15 @@ class LInstruction: public ZoneObject {
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
+class LTemplateInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
+ LOperand* result() const { return results_[0]; }
protected:
EmbeddedContainer<LOperand*, R> results_;
@@ -302,15 +321,15 @@ class LTemplateInstruction: public LInstruction {
EmbeddedContainer<LOperand*, T> temps_;
private:
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
};
-class LGap: public LTemplateInstruction<0, 0, 0> {
+class LGap : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
@@ -321,8 +340,8 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -358,30 +377,35 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap: public LGap {
+class LInstructionGap V8_FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return !IsRedundant();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
};
-class LGoto: public LTemplateInstruction<0, 0, 0> {
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
- int block_id() const { return block_id_; }
+ int block_id() const { return block_->block_id(); }
private:
- int block_id_;
+ HBasicBlock* block_;
};
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -397,23 +421,44 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel: public LGap {
+class LLabel V8_FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
Label* label() { return &label_; }
LLabel* replacement() const { return replacement_; }
void set_replacement(LLabel* label) { replacement_ = label; }
@@ -425,14 +470,23 @@ class LLabel: public LGap {
};
-class LParameter: public LTemplateInstruction<1, 0, 0> {
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -442,30 +496,60 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> {
};
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
- virtual bool IsControl() const { return true; }
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
private:
HControlInstruction* hydrogen() {
return HControlInstruction::cast(this->hydrogen_value());
}
+
+ Label* false_label_;
+ Label* true_label_;
};
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -479,7 +563,7 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -500,7 +584,7 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -514,11 +598,11 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -530,14 +614,14 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModI: public LTemplateInstruction<1, 2, 3> {
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
public:
// Used when the right hand is a constant power of 2.
LModI(LOperand* left,
@@ -573,7 +657,7 @@ class LModI: public LTemplateInstruction<1, 2, 3> {
};
-class LDivI: public LTemplateInstruction<1, 2, 0> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LDivI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -588,9 +672,11 @@ class LDivI: public LTemplateInstruction<1, 2, 0> {
};
-class LMulI: public LTemplateInstruction<1, 2, 1> {
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ LMathFloorOfDiv(LOperand* left,
+ LOperand* right,
+ LOperand* temp = NULL) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
@@ -600,14 +686,53 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
};
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = addend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* addend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
@@ -615,21 +740,22 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LUnaryMathOperation(LOperand* value, LOperand* temp) {
+ LMathFloor(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -637,63 +763,166 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
+
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathRound(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
+ inputs_[0] = value;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
+ explicit LMathLog(LOperand* value) {
+ inputs_[0] = value;
}
- LOperand* left() { return inputs_[0]; }
+ LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
};
-class LIsNilAndBranch: public LControlInstruction<1, 0> {
+class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LIsNilAndBranch(LOperand* value) {
+ explicit LMathSin(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
- virtual void PrintDataTo(StringStream* stream);
+class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathCos(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+
+class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathTan(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
+};
+
+
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* double_temp,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = double_temp;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* double_temp() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSqrt(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathPowHalf(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
};
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranch(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -710,7 +939,7 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
};
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -723,11 +952,11 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -738,11 +967,11 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -756,19 +985,21 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
public:
- LStringCompareAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
@@ -776,11 +1007,11 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -792,11 +1023,11 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -809,7 +1040,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -821,11 +1053,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -839,19 +1071,21 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@@ -860,28 +1094,32 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
temps_[0] = temp;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
@@ -892,7 +1130,8 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -901,7 +1140,7 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -916,7 +1155,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
};
-class LBitI: public LTemplateInstruction<1, 2, 0> {
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -933,7 +1172,7 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -954,7 +1193,7 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
};
-class LSubI: public LTemplateInstruction<1, 2, 0> {
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -969,7 +1208,7 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -978,7 +1217,16 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD: public LTemplateInstruction<1, 0, 0> {
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -987,16 +1235,29 @@ class LConstantD: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- Handle<Object> value() const { return hydrogen()->handle(); }
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
};
-class LBranch: public LControlInstruction<1, 0> {
+class LBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1007,11 +1268,11 @@ class LBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1024,46 +1285,11 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- virtual bool IsControl() const { return true; }
-
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LJSArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
-};
-
-
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1075,7 +1301,7 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
@@ -1088,7 +1314,7 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> {
};
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
+class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1103,7 +1329,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
-class LDateField: public LTemplateInstruction<1, 1, 1> {
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1114,39 +1340,53 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
Smi* index() const { return index_; }
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
private:
Smi* index_;
};
-class LThrow: public LTemplateInstruction<0, 1, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
+ LSeqStringSetChar(String::Encoding encoding,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) : encoding_(encoding) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ inputs_[2] = value;
}
- LOperand* value() { return inputs_[0]; }
+ String::Encoding encoding() { return encoding_; }
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
};
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
+ LThrow(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+ DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
-class LAddI: public LTemplateInstruction<1, 2, 0> {
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1161,7 +1401,7 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1171,12 +1411,12 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
};
-class LPower: public LTemplateInstruction<1, 2, 0> {
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1191,20 +1431,29 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
-class LRandom: public LTemplateInstruction<1, 1, 0> {
+class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
- explicit LRandom(LOperand* global_object) {
+ LRandom(LOperand* global_object,
+ LOperand* scratch,
+ LOperand* scratch2,
+ LOperand* scratch3) {
inputs_[0] = global_object;
+ temps_[0] = scratch;
+ temps_[1] = scratch2;
+ temps_[2] = scratch3;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* global_object() const { return inputs_[0]; }
+ LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
+ LOperand* scratch3() const { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1216,49 +1465,67 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
: op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_FINAL { return LInstruction::kArithmeticT; }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LReturn: public LTemplateInstruction<0, 1, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LReturn(LOperand* value) {
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
}
LOperand* value() { return inputs_[0]; }
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ ASSERT(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+ LOperand* parameter_count() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1271,26 +1538,15 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@@ -1299,7 +1555,7 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
@@ -1312,19 +1568,17 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
};
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
@@ -1337,91 +1591,62 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
- "load-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
-
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
+ bool is_external() const {
+ return hydrogen()->is_external();
}
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
-
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
+ virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyedGeneric(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
};
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
@@ -1431,7 +1656,7 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1446,16 +1671,19 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
+ LStoreGlobalGeneric(LOperand* context,
+ LOperand* global_object,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ inputs_[2] = value;
}
- LOperand* global_object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
@@ -1465,7 +1693,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1482,7 +1710,7 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreContextSlot(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -1497,11 +1725,11 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1513,7 +1741,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LDrop: public LTemplateInstruction<0, 0, 0> {
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1526,20 +1754,54 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ temps_[0] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInnerAllocatedObject(LOperand* base_object) {
+ inputs_[0] = base_object;
+ }
+
+ LOperand* base_object() { return inputs_[0]; }
+ int offset() { return hydrogen()->offset(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
+ DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
+};
+
+
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext: public LTemplateInstruction<1, 0, 0> {
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
@@ -1551,14 +1813,20 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
@@ -1570,7 +1838,7 @@ class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
};
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1582,73 +1850,84 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
};
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInvokeFunction(LOperand* function) {
- inputs_[0] = function;
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- LOperand* function() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
- Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
};
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
+ LCallKeyed(LOperand* context, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = key;
}
- LOperand* key() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallNamed(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallFunction(LOperand* function) {
- inputs_[0] = function;
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- LOperand* function() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
@@ -1657,8 +1936,14 @@ class LCallFunction: public LTemplateInstruction<1, 1, 0> {
};
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallGlobal(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
@@ -1669,46 +1954,77 @@ class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
};
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<JSFunction> target() const { return hydrogen()->target(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
- LOperand* constructor() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1720,7 +2036,20 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1732,7 +2061,20 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -1744,7 +2086,7 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagU(LOperand* value) {
inputs_[0] = value;
@@ -1756,7 +2098,7 @@ class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -1769,21 +2111,33 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LDoubleToI(LOperand* value, LOperand* temp, LOperand* temp2) {
+ explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1793,31 +2147,28 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LTaggedToI(LOperand* value,
LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
+ LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp;
temps_[1] = temp2;
- temps_[2] = temp3;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -1829,7 +2180,7 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -1842,7 +2193,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -1859,7 +2210,7 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
@@ -1874,222 +2225,197 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
+ Handle<Map> transition() const { return hydrogen()->transition_map(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- LStoreNamedGeneric(LOperand* object, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = value;
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedFastDoubleElement(LOperand* elements,
- LOperand* key,
- LOperand* value) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
+ bool is_external() const { return hydrogen()->is_external(); }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
- "store-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = value;
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* obj,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ inputs_[3] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* value) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
-
- ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object,
- LOperand* new_map_temp,
- LOperand* temp) {
+ LOperand* context,
+ LOperand* new_map_temp) {
inputs_[0] = object;
+ inputs_[1] = context;
temps_[0] = new_map_temp;
- temps_[1] = temp;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* object() { return inputs_[0]; }
LOperand* new_map_temp() { return temps_[0]; }
- LOperand* temp() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LTrapAllocationMemento(LOperand* object,
+ LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+ "trap-allocation-memento")
};
-
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
};
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
}
- LOperand* char_code() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
+ explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
}
- LOperand* string() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
};
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -2102,7 +2428,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value) {
inputs_[0] = value;
@@ -2115,25 +2441,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
- public:
- LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
- Handle<JSObject> holder() const { return hydrogen()->holder(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2145,7 +2453,7 @@ class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2154,10 +2462,11 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampDToUint8(LOperand* unclamped, LOperand* temp) {
inputs_[0] = unclamped;
@@ -2171,7 +2480,7 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2183,7 +2492,7 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped, LOperand* temp) {
inputs_[0] = unclamped;
@@ -2197,59 +2506,55 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LAllocateObject(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
+ LAllocate(LOperand* context,
+ LOperand* size,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp1;
temps_[1] = temp2;
}
- LOperand* temp() { return temps_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+ LOperand* context() { return inputs_[0]; }
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
};
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2262,19 +2567,21 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2287,11 +2594,11 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -2304,45 +2611,25 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- LDeleteProperty(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
- }
+ LOsrEntry() {}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- LOsrEntry();
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
-};
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+ LOperand* context() { return inputs_[0]; }
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
- public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2353,33 +2640,21 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> {
};
-class LIn: public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LIn(LOperand* key, LOperand* object) {
- inputs_[0] = key;
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
inputs_[1] = object;
}
- LOperand* key() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInPrepareMap(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2395,7 +2670,7 @@ class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2409,7 +2684,7 @@ class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2424,17 +2699,17 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk: public LChunk {
+class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
};
-class LChunkBuilder BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
@@ -2454,11 +2729,29 @@ class LChunkBuilder BASE_EMBEDDED {
// Build the sequence for the graph.
LPlatformChunk* Build();
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+
+ static bool HasMagicNumberForDivisor(int32_t divisor);
+ static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
+
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathTan(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+
private:
enum Status {
UNUSED,
@@ -2477,7 +2770,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* reason);
+ void Abort(BailoutReason reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2515,6 +2808,9 @@ class LChunkBuilder BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
@@ -2556,7 +2852,8 @@ class LChunkBuilder BASE_EMBEDDED {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize);
void VisitInstruction(HInstruction* current);
@@ -2566,7 +2863,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LPlatformChunk* chunk_;
CompilationInfo* info_;
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 052387ab0..e0cb1ba82 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -29,11 +29,13 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "bootstrapper.h"
#include "codegen.h"
+#include "cpu-profiler.h"
#include "debug.h"
+#include "isolate-inl.h"
#include "runtime.h"
namespace v8 {
@@ -83,11 +85,11 @@ void MacroAssembler::StoreRoot(Register source,
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
+ AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
li(result, Operand(cell));
- lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+ lw(result, FieldMemOperand(result, Cell::kValueOffset));
} else {
li(result, Operand(object));
}
@@ -118,8 +120,8 @@ void MacroAssembler::PopSafepointRegisters() {
void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
- Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
- for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+ Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
+ for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
FPURegister reg = FPURegister::FromAllocationIndex(i);
sdc1(reg, MemOperand(sp, i * kDoubleSize));
}
@@ -127,11 +129,11 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() {
void MacroAssembler::PopSafepointRegistersAndDoubles() {
- for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+ for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
FPURegister reg = FPURegister::FromAllocationIndex(i);
ldc1(reg, MemOperand(sp, i * kDoubleSize));
}
- Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
+ Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
PopSafepointRegisters();
}
@@ -167,7 +169,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
- int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
+ int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -247,15 +249,11 @@ void MacroAssembler::RecordWrite(Register object,
SmiCheck smi_check) {
ASSERT(!AreAliased(object, address, value, t8));
ASSERT(!AreAliased(object, address, value, t9));
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!address.is(cp) && !value.is(cp));
if (emit_debug_code()) {
lw(at, MemOperand(address));
Assert(
- eq, "Wrong address or value passed to RecordWrite", at, Operand(value));
+ eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
}
Label done;
@@ -357,7 +355,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
- Check(ne, "we should not have an empty lexical context",
+ Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
scratch, Operand(zero_reg));
#endif
@@ -369,12 +367,11 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
- // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
push(holder_reg); // Temporarily save holder on the stack.
// Read the first word and compare to the native_context_map.
lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
LoadRoot(at, Heap::kNativeContextMapRootIndex);
- Check(eq, "JSGlobalObject::native_context should be a native context.",
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
holder_reg, Operand(at));
pop(holder_reg); // Restore holder.
}
@@ -385,16 +382,15 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
- // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
push(holder_reg); // Temporarily save holder on the stack.
mov(holder_reg, at); // Move at to its holding place.
LoadRoot(at, Heap::kNullValueRootIndex);
- Check(ne, "JSGlobalProxy::context() should not be null.",
+ Check(ne, kJSGlobalProxyContextShouldNotBeNull,
holder_reg, Operand(at));
lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
LoadRoot(at, Heap::kNativeContextMapRootIndex);
- Check(eq, "JSGlobalObject::native_context should be a native context.",
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
holder_reg, Operand(at));
// Restore at is not needed. at is reloaded below.
pop(holder_reg); // Restore holder.
@@ -769,6 +765,7 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
}
+
//------------Pseudo-instructions-------------
void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
@@ -853,7 +850,6 @@ void MacroAssembler::MultiPopReversed(RegList regs) {
void MacroAssembler::MultiPushFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -868,7 +864,6 @@ void MacroAssembler::MultiPushFPU(RegList regs) {
void MacroAssembler::MultiPushReversedFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -883,7 +878,6 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) {
void MacroAssembler::MultiPopFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
@@ -897,7 +891,6 @@ void MacroAssembler::MultiPopFPU(RegList regs) {
void MacroAssembler::MultiPopReversedFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
@@ -1026,6 +1019,7 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(t8, fd);
}
+
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kLoongson && fd.is(fs)) {
mfc1(t8, FPURegister::from_code(fs.code() + 1));
@@ -1036,6 +1030,7 @@ void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
}
}
+
void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kLoongson && fd.is(fs)) {
mfc1(t8, FPURegister::from_code(fs.code() + 1));
@@ -1108,6 +1103,7 @@ void MacroAssembler::BranchF(Label* target,
FPURegister cmp1,
FPURegister cmp2,
BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
if (cc == al) {
Branch(bd, target);
return;
@@ -1125,23 +1121,19 @@ void MacroAssembler::BranchF(Label* target,
// have been handled by the caller.
// Unsigned conditions are treated as their signed counterpart.
switch (cc) {
- case Uless:
- case less:
+ case lt:
c(OLT, D, cmp1, cmp2);
bc1t(target);
break;
- case Ugreater:
- case greater:
+ case gt:
c(ULE, D, cmp1, cmp2);
bc1f(target);
break;
- case Ugreater_equal:
- case greater_equal:
+ case ge:
c(ULT, D, cmp1, cmp2);
bc1f(target);
break;
- case Uless_equal:
- case less_equal:
+ case le:
c(OLE, D, cmp1, cmp2);
bc1t(target);
break;
@@ -1149,10 +1141,18 @@ void MacroAssembler::BranchF(Label* target,
c(EQ, D, cmp1, cmp2);
bc1t(target);
break;
+ case ueq:
+ c(UEQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
case ne:
c(EQ, D, cmp1, cmp2);
bc1f(target);
break;
+ case nue:
+ c(UEQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
default:
CHECK(0);
};
@@ -1165,7 +1165,6 @@ void MacroAssembler::BranchF(Label* target,
void MacroAssembler::Move(FPURegister dst, double imm) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
@@ -1296,250 +1295,76 @@ void MacroAssembler::Clz(Register rd, Register rs) {
}
-// Tries to get a signed int32 out of a double precision floating point heap
-// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
-// 32bits signed integer range.
-// This method implementation differs from the ARM version for performance
-// reasons.
-void MacroAssembler::ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- FPURegister double_scratch,
- Label *not_int32) {
- Label right_exponent, done;
- // Get exponent word (ENDIAN issues).
- lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
- // Load dest with zero. We use this either for the final shift or
- // for the answer.
- mov(dest, zero_reg);
- // Check whether the exponent matches a 32 bit signed int that is not a Smi.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
- // the exponent that we are fastest at and also the highest exponent we can
- // handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- // If we have a match of the int32-but-not-Smi exponent then skip some logic.
- Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
- // If the exponent is higher than that then go to not_int32 case. This
- // catches numbers that don't fit in a signed int32, infinities and NaNs.
- Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
-
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- Subu(scratch2, scratch2, Operand(zero_exponent));
- // Dest already has a Smi zero.
- Branch(&done, lt, scratch2, Operand(zero_reg));
- if (!CpuFeatures::IsSupported(FPU)) {
- // We have a shifted exponent between 0 and 30 in scratch2.
- srl(dest, scratch2, HeapNumber::kExponentShift);
- // We now have the exponent in dest. Subtract from 30 to get
- // how much to shift down.
- li(at, Operand(30));
- subu(dest, at, dest);
- }
- bind(&right_exponent);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // MIPS FPU instructions implementing double precision to integer
- // conversion using round to zero. Since the FP value was qualified
- // above, the resulting integer should be a legal int32.
- // The original 'Exponent' word is still in scratch.
- lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
- trunc_w_d(double_scratch, double_scratch);
- mfc1(dest, double_scratch);
- } else {
- // On entry, dest has final downshift, scratch has original sign/exp/mant.
- // Save sign bit in top bit of dest.
- And(scratch2, scratch, Operand(0x80000000));
- Or(dest, dest, Operand(scratch2));
- // Put back the implicit 1, just above mantissa field.
- Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
-
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance. But we want to clear the sign-bit so shift one more bit
- // left, then shift right one bit.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- sll(scratch, scratch, shift_distance + 1);
- srl(scratch, scratch, 1);
-
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
- // The width of the field here is the same as the shift amount above.
- const int field_width = shift_distance;
- Ext(scratch2, scratch2, 32-shift_distance, field_width);
- Ins(scratch, scratch2, 0, field_width);
- // Move down according to the exponent.
- srlv(scratch, scratch, dest);
- // Prepare the negative version of our integer.
- subu(scratch2, zero_reg, scratch);
- // Trick to check sign bit (msb) held in dest, count leading zero.
- // 0 indicates negative, save negative version with conditional move.
- Clz(dest, dest);
- Movz(scratch, scratch2, dest);
- mov(dest, scratch);
- }
- bind(&done);
-}
-
-
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
- FPURegister result,
+ Register result,
DoubleRegister double_input,
- Register scratch1,
+ Register scratch,
+ DoubleRegister double_scratch,
Register except_flag,
CheckForInexactConversion check_inexact) {
- ASSERT(CpuFeatures::IsSupported(FPU));
- CpuFeatures::Scope scope(FPU);
+ ASSERT(!result.is(scratch));
+ ASSERT(!double_input.is(double_scratch));
+ ASSERT(!except_flag.is(scratch));
+
+ Label done;
+
+ // Clear the except flag (0 = no exception)
+ mov(except_flag, zero_reg);
+
+ // Test for values that can be exactly represented as a signed 32-bit integer.
+ cvt_w_d(double_scratch, double_input);
+ mfc1(result, double_scratch);
+ cvt_d_w(double_scratch, double_scratch);
+ BranchF(&done, NULL, eq, double_input, double_scratch);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
if (check_inexact == kDontCheckForInexactConversion) {
- // Ingore inexact exceptions.
+ // Ignore inexact exceptions.
except_mask &= ~kFCSRInexactFlagMask;
}
// Save FCSR.
- cfc1(scratch1, FCSR);
+ cfc1(scratch, FCSR);
// Disable FPU exceptions.
ctc1(zero_reg, FCSR);
// Do operation based on rounding mode.
switch (rounding_mode) {
case kRoundToNearest:
- Round_w_d(result, double_input);
+ Round_w_d(double_scratch, double_input);
break;
case kRoundToZero:
- Trunc_w_d(result, double_input);
+ Trunc_w_d(double_scratch, double_input);
break;
case kRoundToPlusInf:
- Ceil_w_d(result, double_input);
+ Ceil_w_d(double_scratch, double_input);
break;
case kRoundToMinusInf:
- Floor_w_d(result, double_input);
+ Floor_w_d(double_scratch, double_input);
break;
} // End of switch-statement.
// Retrieve FCSR.
cfc1(except_flag, FCSR);
// Restore FCSR.
- ctc1(scratch1, FCSR);
+ ctc1(scratch, FCSR);
+ // Move the converted value into the result register.
+ mfc1(result, double_scratch);
// Check for fpu exceptions.
And(except_flag, except_flag, Operand(except_mask));
-}
-
-void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch) {
- Label done, normal_exponent, restore_sign;
- // Extract the biased exponent in result.
- Ext(result,
- input_high,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Check for Infinity and NaNs, which should return 0.
- Subu(scratch, result, HeapNumber::kExponentMask);
- Movz(result, zero_reg, scratch);
- Branch(&done, eq, scratch, Operand(zero_reg));
-
- // Express exponent as delta to (number of mantissa bits + 31).
- Subu(result,
- result,
- Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
-
- // If the delta is strictly positive, all bits would be shifted away,
- // which means that we can return 0.
- Branch(&normal_exponent, le, result, Operand(zero_reg));
- mov(result, zero_reg);
- Branch(&done);
-
- bind(&normal_exponent);
- const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
- // Calculate shift.
- Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
-
- // Save the sign.
- Register sign = result;
- result = no_reg;
- And(sign, input_high, Operand(HeapNumber::kSignMask));
-
- // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
- // to check for this specific case.
- Label high_shift_needed, high_shift_done;
- Branch(&high_shift_needed, lt, scratch, Operand(32));
- mov(input_high, zero_reg);
- Branch(&high_shift_done);
- bind(&high_shift_needed);
-
- // Set the implicit 1 before the mantissa part in input_high.
- Or(input_high,
- input_high,
- Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- // Shift the mantissa bits to the correct position.
- // We don't need to clear non-mantissa bits as they will be shifted away.
- // If they weren't, it would mean that the answer is in the 32bit range.
- sllv(input_high, input_high, scratch);
-
- bind(&high_shift_done);
-
- // Replace the shifted bits with bits from the lower mantissa word.
- Label pos_shift, shift_done;
- li(at, 32);
- subu(scratch, at, scratch);
- Branch(&pos_shift, ge, scratch, Operand(zero_reg));
-
- // Negate scratch.
- Subu(scratch, zero_reg, scratch);
- sllv(input_low, input_low, scratch);
- Branch(&shift_done);
-
- bind(&pos_shift);
- srlv(input_low, input_low, scratch);
-
- bind(&shift_done);
- Or(input_high, input_high, Operand(input_low));
- // Restore sign if necessary.
- mov(scratch, sign);
- result = sign;
- sign = no_reg;
- Subu(result, zero_reg, input_high);
- Movz(result, input_high, scratch);
bind(&done);
}
-void MacroAssembler::EmitECMATruncate(Register result,
- FPURegister double_input,
- FPURegister single_scratch,
- Register scratch,
- Register scratch2,
- Register scratch3) {
- CpuFeatures::Scope scope(FPU);
- ASSERT(!scratch2.is(result));
- ASSERT(!scratch3.is(result));
- ASSERT(!scratch3.is(scratch2));
- ASSERT(!scratch.is(result) &&
- !scratch.is(scratch2) &&
- !scratch.is(scratch3));
- ASSERT(!single_scratch.is(double_input));
-
- Label done;
- Label manual;
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ DoubleRegister single_scratch = kLithiumScratchDouble.low();
+ Register scratch = at;
+ Register scratch2 = t9;
// Clear cumulative exception flags and save the FCSR.
cfc1(scratch2, FCSR);
@@ -1555,16 +1380,66 @@ void MacroAssembler::EmitECMATruncate(Register result,
scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
// If we had no exceptions we are done.
- Branch(&done, eq, scratch, Operand(zero_reg));
-
- // Load the double value and perform a manual truncation.
- Register input_high = scratch2;
- Register input_low = scratch3;
- Move(input_low, input_high, double_input);
- EmitOutOfInt32RangeTruncate(result,
- input_high,
- input_low,
- scratch);
+ Branch(done, eq, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DoubleRegister double_input) {
+ Label done;
+
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(ra);
+ Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
+ sdc1(double_input, MemOperand(sp, 0));
+
+ DoubleToIStub stub(sp, result, 0, true, true);
+ CallStub(&stub);
+
+ Addu(sp, sp, Operand(kDoubleSize));
+ pop(ra);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
+ Label done;
+ DoubleRegister double_scratch = f12;
+ ASSERT(!result.is(object));
+
+ ldc1(double_scratch,
+ MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
+ TryInlineTruncateDoubleToI(result, double_scratch, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(ra);
+ DoubleToIStub stub(object,
+ result,
+ HeapNumber::kValueOffset - kHeapObjectTag,
+ true,
+ true);
+ CallStub(&stub);
+ pop(ra);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number) {
+ Label done;
+ ASSERT(!result.is(object));
+
+ UntagAndJumpIfSmi(result, object, &done);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
+ TruncateHeapNumberToI(result, object);
+
bind(&done);
}
@@ -1677,6 +1552,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
if (rt.is_reg()) {
// NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
// rt.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
r2 = rt.rm_;
switch (cond) {
case cc_always:
@@ -1762,6 +1638,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
// Be careful to always use shifted_branch_offset only just before the
// branch instruction, as the location will be remember for patching the
// target.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
b(offset);
@@ -1902,10 +1779,11 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
- int32_t offset;
+ int32_t offset = 0;
Register r2 = no_reg;
Register scratch = at;
if (rt.is_reg()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
r2 = rt.rm_;
// Be careful to always use shifted_branch_offset only just before the
// branch instruction, as the location will be remember for patching the
@@ -2012,6 +1890,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
// Be careful to always use shifted_branch_offset only just before the
// branch instruction, as the location will be remember for patching the
// target.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
offset = shifted_branch_offset(L, false);
@@ -2248,67 +2127,70 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
li(r2, rt);
}
- switch (cond) {
- case cc_always:
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- bal(offset);
- break;
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
- // Signed comparison.
- case greater:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
+ // Signed comparison.
+ case greater:
+ slt(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case greater_equal:
+ slt(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
+ case less:
+ slt(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case less_equal:
+ slt(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
+ // Unsigned comparison.
+ case Ugreater:
+ sltu(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case Ugreater_equal:
+ sltu(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
+ case Uless:
+ sltu(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case Uless_equal:
+ sltu(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
- default:
- UNREACHABLE();
+ default:
+ UNREACHABLE();
+ }
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
@@ -2330,7 +2212,7 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
- int32_t offset;
+ int32_t offset = 0;
Register r2 = no_reg;
Register scratch = at;
if (rt.is_reg()) {
@@ -2340,80 +2222,82 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
li(r2, rt);
}
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
- // Signed comparison.
- case greater:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
+ // Signed comparison.
+ case greater:
+ slt(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case greater_equal:
+ slt(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
+ case less:
+ slt(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case less_equal:
+ slt(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
+ // Unsigned comparison.
+ case Ugreater:
+ sltu(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case Ugreater_equal:
+ sltu(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
+ case Uless:
+ sltu(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case Uless_equal:
+ sltu(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
- default:
- UNREACHABLE();
+ default:
+ UNREACHABLE();
+ }
}
-
// Check that offset could actually hold on an int16_t.
ASSERT(is_int16(offset));
@@ -2478,6 +2362,7 @@ void MacroAssembler::Jump(Handle<Code> code,
const Operand& rt,
BranchDelaySlot bd) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
+ AllowDeferredHandleDereference embedding_raw_address;
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
}
@@ -2565,6 +2450,7 @@ int MacroAssembler::CallSize(Handle<Code> code,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
+ AllowDeferredHandleDereference using_raw_address;
return CallSize(reinterpret_cast<Address>(code.location()),
rmode, cond, rs, rt, bd);
}
@@ -2585,6 +2471,7 @@ void MacroAssembler::Call(Handle<Code> code,
SetRecordedAstId(ast_id);
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
+ AllowDeferredHandleDereference embedding_raw_address;
Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
@@ -2656,6 +2543,7 @@ void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
nop();
}
+
void MacroAssembler::DropAndRet(int drop) {
Ret(USE_DELAY_SLOT);
addiu(sp, sp, drop * kPointerSize);
@@ -2736,7 +2624,7 @@ void MacroAssembler::DebugBreak() {
PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(1);
ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
}
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -2889,12 +2777,13 @@ void MacroAssembler::ThrowUncatchable(Register value) {
}
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2922,21 +2811,20 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Check relative positions of allocation top and limit addresses.
// ARM adds additional checks to make sure the ldm instruction can be
// used. On MIPS we don't have ldm so we don't need additional checks either.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+ reinterpret_cast<intptr_t>(allocation_limit.address());
ASSERT((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
Register topaddr = scratch1;
- Register obj_size_reg = scratch2;
- li(topaddr, Operand(new_space_allocation_top));
- li(obj_size_reg, Operand(object_size));
+ li(topaddr, Operand(allocation_top));
// This code stores a temporary value in t9.
if ((flags & RESULT_CONTAINS_TOP) == 0) {
@@ -2949,15 +2837,32 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// immediately below so this use of t9 does not cause difference with
// respect to register content between debug and release mode.
lw(t9, MemOperand(topaddr));
- Check(eq, "Unexpected allocation top", result, Operand(t9));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
}
// Load allocation limit into t9. Result already contains allocation top.
lw(t9, MemOperand(topaddr, limit - top));
}
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ And(scratch2, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ Branch(&aligned, eq, scratch2, Operand(zero_reg));
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ Branch(gc_required, Ugreater_equal, result, Operand(t9));
+ }
+ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ sw(scratch2, MemOperand(result));
+ Addu(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- Addu(scratch2, result, Operand(obj_size_reg));
+ Addu(scratch2, result, Operand(object_size));
Branch(gc_required, Ugreater, scratch2, Operand(t9));
sw(scratch2, MemOperand(topaddr));
@@ -2968,12 +2873,12 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
}
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2994,19 +2899,19 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Check relative positions of allocation top and limit addresses.
// ARM adds additional checks to make sure the ldm instruction can be
// used. On MIPS we don't have ldm so we don't need additional checks either.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+ reinterpret_cast<intptr_t>(allocation_limit.address());
ASSERT((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
Register topaddr = scratch1;
- li(topaddr, Operand(new_space_allocation_top));
+ li(topaddr, Operand(allocation_top));
// This code stores a temporary value in t9.
if ((flags & RESULT_CONTAINS_TOP) == 0) {
@@ -3019,12 +2924,29 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// immediately below so this use of t9 does not cause difference with
// respect to register content between debug and release mode.
lw(t9, MemOperand(topaddr));
- Check(eq, "Unexpected allocation top", result, Operand(t9));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
}
// Load allocation limit into t9. Result already contains allocation top.
lw(t9, MemOperand(topaddr, limit - top));
}
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ And(scratch2, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ Branch(&aligned, eq, scratch2, Operand(zero_reg));
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ Branch(gc_required, Ugreater_equal, result, Operand(t9));
+ }
+ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ sw(scratch2, MemOperand(result));
+ Addu(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
@@ -3039,7 +2961,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
And(t9, scratch2, Operand(kObjectAlignmentMask));
- Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
+ Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
}
sw(scratch2, MemOperand(topaddr));
@@ -3061,7 +2983,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object,
// Check that the object un-allocated is below the current top.
li(scratch, Operand(new_space_allocation_top));
lw(scratch, MemOperand(scratch));
- Check(less, "Undo allocation of non allocated memory",
+ Check(less, kUndoAllocationOfNonAllocatedMemory,
object, Operand(scratch));
#endif
// Write the address of the object to un-allocate as the current top.
@@ -3085,12 +3007,12 @@ void MacroAssembler::AllocateTwoByteString(Register result,
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Set the map, length and hash field.
InitializeNewString(result,
@@ -3109,18 +3031,18 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string
// while observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kCharSize == 1);
- addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
+ addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Set the map, length and hash field.
InitializeNewString(result,
@@ -3136,12 +3058,8 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
Heap::kConsStringMapRootIndex,
@@ -3155,12 +3073,34 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+ li(scratch1, Operand(high_promotion_mode));
+ lw(scratch1, MemOperand(scratch1, 0));
+ Branch(&allocate_new_space, eq, scratch1, Operand(zero_reg));
+
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+
+ jmp(&install_map);
+
+ bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ bind(&install_map);
+
InitializeNewString(result,
length,
Heap::kConsAsciiStringMapRootIndex,
@@ -3174,12 +3114,8 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
@@ -3194,12 +3130,8 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
@@ -3209,25 +3141,38 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result,
}
+void MacroAssembler::JumpIfNotUniqueName(Register reg,
+ Label* not_unique_name) {
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ Branch(&succeed, eq, at, Operand(zero_reg));
+ Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
+
+ bind(&succeed);
+}
+
+
// Allocates a heap number or jumps to the label if the young space is full and
// a scavenge is needed.
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* need_gc) {
+ Label* need_gc,
+ TaggingMode tagging_mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- TAG_OBJECT);
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
+ tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
// Store heap number map in the allocated object.
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ if (tagging_mode == TAG_RESULT) {
+ sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ } else {
+ sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+ }
}
@@ -3272,11 +3217,10 @@ void MacroAssembler::CopyBytes(Register src,
Register dst,
Register length,
Register scratch) {
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+ Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
- bind(&align_loop);
- Branch(&done, eq, length, Operand(zero_reg));
+ Branch(&byte_loop, le, length, Operand(kPointerSize));
bind(&align_loop_1);
And(scratch, src, kPointerSize - 1);
Branch(&word_loop, eq, scratch, Operand(zero_reg));
@@ -3285,13 +3229,13 @@ void MacroAssembler::CopyBytes(Register src,
sb(scratch, MemOperand(dst));
Addu(dst, dst, 1);
Subu(length, length, Operand(1));
- Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+ Branch(&align_loop_1, ne, length, Operand(zero_reg));
// Copy bytes in word size chunks.
bind(&word_loop);
if (emit_debug_code()) {
And(scratch, src, kPointerSize - 1);
- Assert(eq, "Expecting alignment for CopyBytes",
+ Assert(eq, kExpectingAlignmentForCopyBytes,
scratch, Operand(zero_reg));
}
Branch(&byte_loop, lt, length, Operand(kPointerSize));
@@ -3380,13 +3324,12 @@ void MacroAssembler::CheckFastSmiElements(Register map,
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- Register scratch4,
- Label* fail) {
+ Label* fail,
+ int elements_offset) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@@ -3412,8 +3355,10 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&have_double_value);
sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, elements_reg);
- sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ sw(mantissa_reg, FieldMemOperand(
+ scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
+ uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
+ sizeof(kHoleNanLower32);
sw(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
@@ -3433,35 +3378,17 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&smi_value);
Addu(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
+ elements_offset));
sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(FPU)) {
- destination = FloatingPointHelper::kFPURegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
-
Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(this,
- untagged_value,
- destination,
- f0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- f2);
- if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
- sdc1(f0, MemOperand(scratch1, 0));
- } else {
- sw(mantissa_reg, MemOperand(scratch1, 0));
- sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
- }
+ mtc1(untagged_value, f2);
+ cvt_d_w(f0, f2);
+ sdc1(f0, MemOperand(scratch1, 0));
bind(&done);
}
@@ -3471,10 +3398,9 @@ void MacroAssembler::CompareMapAndBranch(Register obj,
Handle<Map> map,
Label* early_success,
Condition cond,
- Label* branch_to,
- CompareMapMode mode) {
+ Label* branch_to) {
lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMapAndBranch(scratch, map, early_success, cond, branch_to, mode);
+ CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
}
@@ -3482,25 +3408,8 @@ void MacroAssembler::CompareMapAndBranch(Register obj_map,
Handle<Map> map,
Label* early_success,
Condition cond,
- Label* branch_to,
- CompareMapMode mode) {
- Operand right = Operand(map);
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- Branch(early_success, eq, obj_map, right);
- right = Operand(Handle<Map>(current_map));
- }
- }
- }
-
- Branch(branch_to, cond, obj_map, right);
+ Label* branch_to) {
+ Branch(branch_to, cond, obj_map, Operand(map));
}
@@ -3508,13 +3417,12 @@ void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
+ SmiCheckType smi_check_type) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
Label success;
- CompareMapAndBranch(obj, scratch, map, &success, ne, fail, mode);
+ CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
bind(&success);
}
@@ -3549,7 +3457,6 @@ void MacroAssembler::CheckMap(Register obj,
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
- CpuFeatures::Scope scope(FPU);
if (IsMipsSoftFloatABI) {
Move(dst, v0, v1);
} else {
@@ -3559,7 +3466,6 @@ void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
- CpuFeatures::Scope scope(FPU);
if (!IsMipsSoftFloatABI) {
Move(f12, dreg);
} else {
@@ -3570,7 +3476,6 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
DoubleRegister dreg2) {
- CpuFeatures::Scope scope(FPU);
if (!IsMipsSoftFloatABI) {
if (dreg2.is(f12)) {
ASSERT(!dreg1.is(f14));
@@ -3589,7 +3494,6 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
Register reg) {
- CpuFeatures::Scope scope(FPU);
if (!IsMipsSoftFloatABI) {
Move(f12, dreg);
Move(a2, reg);
@@ -3784,6 +3688,7 @@ void MacroAssembler::InvokeFunction(Register function,
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -3795,7 +3700,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
LoadHeapObject(a1, function);
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- ParameterCount expected(function->shared()->formal_parameter_count());
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
@@ -3834,6 +3738,15 @@ void MacroAssembler::IsObjectJSStringType(Register object,
}
+void MacroAssembler::IsObjectNameType(Register object,
+ Register scratch,
+ Label* fail) {
+ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
+}
+
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -3907,19 +3820,21 @@ void MacroAssembler::GetObjectType(Register object,
// Runtime calls.
void MacroAssembler::CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id,
Condition cond,
Register r1,
const Operand& r2,
BranchDelaySlot bd) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(),
+ Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id,
cond, r1, r2, bd);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
+ ASSERT(allow_stub_calls_ ||
+ stub->CompilingCallsToThisStubIsGCSafe(isolate()));
+ Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -3928,16 +3843,22 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
}
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ ExternalReference function,
+ Address function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
+ ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(),
+ ExternalReference::handle_scope_limit_address(isolate()),
next_address);
const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(),
+ ExternalReference::handle_scope_level_address(isolate()),
next_address);
// Allocate HandleScope in callee-save registers.
@@ -3948,41 +3869,65 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
Addu(s2, s2, Operand(1));
sw(s2, MemOperand(s3, kLevelOffset));
- // The O32 ABI requires us to pass a pointer in a0 where the returned struct
- // (4 bytes) will be placed. This is also built into the Simulator.
- // Set up the pointer to the returned value (a0). It was allocated in
- // EnterExitFrame.
- addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, a0);
+ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ bool* is_profiling_flag =
+ isolate()->cpu_profiler()->is_profiling_address();
+ STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
+ li(t9, reinterpret_cast<int32_t>(is_profiling_flag));
+ lb(t9, MemOperand(t9, 0));
+ beq(t9, zero_reg, &profiler_disabled);
+
+ // Third parameter is the address of the actual getter function.
+ li(thunk_last_arg, reinterpret_cast<int32_t>(function_address));
+ li(t9, Operand(thunk_ref));
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ li(t9, Operand(function));
+
+ bind(&end_profiler_check);
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
DirectCEntryStub stub;
- stub.GenerateCall(this, function);
+ stub.GenerateCall(this, t9);
- // As mentioned above, on MIPS a pointer is returned - we need to dereference
- // it to get the actual return value (which is also a pointer).
- lw(v0, MemOperand(v0));
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, a0);
+ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
+ Label return_value_loaded;
- // If result is non-zero, dereference to get the result value
- // otherwise set it to undefined.
- Label skip;
- LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- Branch(&skip, eq, v0, Operand(zero_reg));
- lw(a0, MemOperand(v0));
- bind(&skip);
- mov(v0, a0);
+ // Load value from ReturnValue.
+ lw(v0, return_value_operand);
+ bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
sw(s0, MemOperand(s3, kNextOffset));
if (emit_debug_code()) {
lw(a1, MemOperand(s3, kLevelOffset));
- Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
+ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
}
Subu(s2, s2, Operand(1));
sw(s2, MemOperand(s3, kLevelOffset));
@@ -3995,14 +3940,23 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
lw(t1, MemOperand(at));
Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
+ bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ lw(cp, *context_restore_operand);
+ }
li(s0, Operand(stack_space));
- LeaveExitFrame(false, s0, true);
+ LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
bind(&promote_scheduled_exception);
- TailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -4010,7 +3964,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
mov(s0, v0);
mov(a0, v0);
PrepareCallCFunction(1, s1);
- li(a0, Operand(ExternalReference::isolate_address()));
+ li(a0, Operand(ExternalReference::isolate_address(isolate())));
CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
1);
mov(v0, s0);
@@ -4020,7 +3974,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+ return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
}
@@ -4179,7 +4133,8 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst,
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// All parameters are on the stack. v0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -4196,25 +4151,11 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference(f, isolate()));
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- PrepareCEntryArgs(function->nargs);
- PrepareCEntryFunction(ExternalReference(function, isolate()));
- CEntryStub stub(1, kSaveFPRegs);
+ CEntryStub stub(1, save_doubles);
CallStub(&stub);
}
-void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments);
-}
-
-
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
int num_arguments,
BranchDelaySlot bd) {
@@ -4222,7 +4163,7 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
PrepareCEntryFunction(ext);
CEntryStub stub(1);
- CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
+ CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
}
@@ -4251,7 +4192,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd) {
PrepareCEntryFunction(builtin);
CEntryStub stub(1);
- Jump(stub.GetCode(),
+ Jump(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
al,
zero_reg,
@@ -4336,19 +4277,10 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
// -----------------------------------------------------------------------------
// Debugging.
-void MacroAssembler::Assert(Condition cc, const char* msg,
+void MacroAssembler::Assert(Condition cc, BailoutReason reason,
Register rs, Operand rt) {
if (emit_debug_code())
- Check(cc, msg, rs, rt);
-}
-
-
-void MacroAssembler::AssertRegisterIsRoot(Register reg,
- Heap::RootListIndex index) {
- if (emit_debug_code()) {
- LoadRoot(at, index);
- Check(eq, "Register did not match expected root", reg, Operand(at));
- }
+ Check(cc, reason, rs, rt);
}
@@ -4364,24 +4296,24 @@ void MacroAssembler::AssertFastElements(Register elements) {
Branch(&ok, eq, elements, Operand(at));
LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
Branch(&ok, eq, elements, Operand(at));
- Abort("JSObject with fast elements map has slow elements");
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
bind(&ok);
pop(elements);
}
}
-void MacroAssembler::Check(Condition cc, const char* msg,
+void MacroAssembler::Check(Condition cc, BailoutReason reason,
Register rs, Operand rt) {
Label L;
Branch(&L, cc, rs, rt);
- Abort(msg);
+ Abort(reason);
// Will not return here.
bind(&L);
}
-void MacroAssembler::Abort(const char* msg) {
+void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
// We want to pass the msg string like a smi to avoid GC
@@ -4389,6 +4321,7 @@ void MacroAssembler::Abort(const char* msg) {
// properly. Instead, we pass an aligned pointer that is
// a proper v8 smi, but also pass the alignment difference
// from the real pointer as a smi.
+ const char* msg = GetBailoutReason(reason);
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
@@ -4397,6 +4330,11 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ stop(msg);
+ return;
+ }
#endif
li(a0, Operand(p0));
@@ -4509,6 +4447,19 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
+void MacroAssembler::LoadArrayFunction(Register function) {
+ // Load the global or builtins object from the current context.
+ lw(function,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the global context from the global or builtins object.
+ lw(function,
+ FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
+ // Load the array function from the native context.
+ lw(function,
+ MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
@@ -4519,12 +4470,156 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
Branch(&ok);
bind(&fail);
- Abort("Global functions must have initial map");
+ Abort(kGlobalFunctionsMustHaveInitialMap);
bind(&ok);
}
}
+void MacroAssembler::LoadNumber(Register object,
+ FPURegister dst,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number) {
+ Label is_smi, done;
+
+ UntagAndJumpIfSmi(scratch, object, &is_smi);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
+
+ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+ Branch(&done);
+
+ bind(&is_smi);
+ mtc1(scratch, dst);
+ cvt_d_w(dst, dst);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadNumberAsInt32Double(Register object,
+ DoubleRegister double_dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label* not_int32) {
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!heap_number_map.is(object) &&
+ !heap_number_map.is(scratch1) &&
+ !heap_number_map.is(scratch2));
+
+ Label done, obj_is_not_smi;
+
+ UntagAndJumpIfNotSmi(scratch1, object, &obj_is_not_smi);
+ mtc1(scratch1, double_scratch);
+ cvt_d_w(double_dst, double_scratch);
+ Branch(&done);
+
+ bind(&obj_is_not_smi);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ // Load the number.
+ // Load the double value.
+ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ EmitFPUTruncate(kRoundToZero,
+ scratch1,
+ double_dst,
+ at,
+ double_scratch,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadNumberAsInt32(Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch0,
+ FPURegister double_scratch1,
+ Label* not_int32) {
+ ASSERT(!dst.is(object));
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ ASSERT(!scratch1.is(scratch2));
+
+ Label done, maybe_undefined;
+
+ UntagAndJumpIfSmi(dst, object, &done);
+
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
+
+ // Object is a heap number.
+ // Convert the floating point value to a 32-bit integer.
+ // Load the double value.
+ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ EmitFPUTruncate(kRoundToZero,
+ dst,
+ double_scratch0,
+ scratch1,
+ double_scratch1,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ Branch(&done);
+
+ bind(&maybe_undefined);
+ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ Branch(not_int32, ne, object, Operand(at));
+ // |undefined| is truncated to 0.
+ li(dst, Operand(Smi::FromInt(0)));
+ // Fall through.
+
+ bind(&done);
+}
+
+
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ Push(ra, fp, cp);
+ Push(Smi::FromInt(StackFrame::STUB));
+ // Adjust FP to point to saved FP.
+ Addu(fp, sp, Operand(2 * kPointerSize));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ nop(Assembler::CODE_AGE_MARKER_NOP);
+ // Save the function's original return address
+ // (it will be clobbered by Call(t9))
+ mov(at, ra);
+ // Load the stub address to t9 and call it
+ li(t9,
+ Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
+ Call(t9);
+ // Record the stub address in the empty space for GetCodeAgeAndParity()
+ dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ } else {
+ Push(ra, fp, cp, a1);
+ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ // Adjust fp to point to caller's fp.
+ Addu(fp, sp, Operand(2 * kPointerSize));
+ }
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
addiu(sp, sp, -5 * kPointerSize);
li(t8, Operand(Smi::FromInt(type)));
@@ -4590,10 +4685,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
ASSERT(IsPowerOf2(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
- int space = FPURegister::kNumRegisters * kDoubleSize;
+ int space = FPURegister::kMaxNumRegisters * kDoubleSize;
Subu(sp, sp, Operand(space));
// Remember: we only need to save every 2nd double FPU value.
- for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+ for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
FPURegister reg = FPURegister::from_code(i);
sdc1(reg, MemOperand(sp, i * kDoubleSize));
}
@@ -4618,12 +4713,13 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count,
+ bool restore_context,
bool do_return) {
// Optionally restore all double registers.
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
- for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+ for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
FPURegister reg = FPURegister::from_code(i);
ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
}
@@ -4634,9 +4730,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
sw(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
- li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- lw(cp, MemOperand(t8));
+ if (restore_context) {
+ li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ lw(cp, MemOperand(t8));
+ }
#ifdef DEBUG
+ li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
sw(a3, MemOperand(t8));
#endif
@@ -4673,19 +4772,19 @@ void MacroAssembler::InitializeNewString(Register string,
int MacroAssembler::ActivationFrameAlignment() {
-#if defined(V8_HOST_ARCH_MIPS)
+#if V8_HOST_ARCH_MIPS
// Running on the real platform. Use the alignment as mandated by the local
// environment.
// Note: This will break if we ever start generating snapshots on one Mips
// platform for another Mips platform with a different alignment.
return OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_MIPS)
+#else // V8_HOST_ARCH_MIPS
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
// flag.
return FLAG_sim_stack_alignment;
-#endif // defined(V8_HOST_ARCH_MIPS)
+#endif // V8_HOST_ARCH_MIPS
}
@@ -4802,7 +4901,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
andi(at, object, kSmiTagMask);
- Check(ne, "Operand is a smi", at, Operand(zero_reg));
+ Check(ne, kOperandIsASmi, at, Operand(zero_reg));
}
}
@@ -4811,7 +4910,7 @@ void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
andi(at, object, kSmiTagMask);
- Check(eq, "Operand is a smi", at, Operand(zero_reg));
+ Check(eq, kOperandIsASmi, at, Operand(zero_reg));
}
}
@@ -4820,23 +4919,35 @@ void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
And(t0, object, Operand(kSmiTagMask));
- Check(ne, "Operand is a smi and not a string", t0, Operand(zero_reg));
+ Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
push(object);
lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
- Check(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
+ Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
pop(object);
}
}
-void MacroAssembler::AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
+void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
- ASSERT(!src.is(at));
- LoadRoot(at, root_value_index);
- Check(eq, message, src, Operand(at));
+ STATIC_ASSERT(kSmiTag == 0);
+ And(t0, object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
+ push(object);
+ lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+ Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
+ pop(object);
+ }
+}
+
+
+void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
+ if (emit_debug_code()) {
+ ASSERT(!reg.is(at));
+ LoadRoot(at, index);
+ Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
}
}
@@ -4846,11 +4957,91 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
Register scratch,
Label* on_not_heap_number) {
lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ sra(mask, mask, kSmiTagSize + 1);
+ Addu(mask, mask, -1); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ Addu(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ lw(scratch2, MemOperand(scratch1, kPointerSize));
+ lw(scratch1, MemOperand(scratch1, 0));
+ Xor(scratch1, scratch1, Operand(scratch2));
+ And(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
+ Addu(scratch1, number_string_cache, scratch1);
+
+ Register probe = mask;
+ lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
+ Branch(not_found);
+
+ bind(&is_smi);
+ Register scratch = scratch1;
+ sra(scratch, object, 1); // Shift away the tag.
+ And(scratch, mask, Operand(scratch));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ sll(scratch, scratch, kPointerSizeLog2 + 1);
+ Addu(scratch, number_string_cache, scratch);
+
+ // Check if the entry is the smi we are looking for.
+ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ Branch(not_found, ne, object, Operand(probe));
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+
+ IncrementCounter(isolate()->counters()->number_to_string_native(),
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
@@ -4895,9 +5086,10 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register scratch1,
Register scratch2,
Label* failure) {
- int kFlatAsciiStringMask =
+ const int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
andi(scratch1, first, kFlatAsciiStringMask);
Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
@@ -4909,9 +5101,10 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
Register scratch,
Label* failure) {
- int kFlatAsciiStringMask =
+ const int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
And(scratch, type, Operand(kFlatAsciiStringMask));
Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
}
@@ -5002,7 +5195,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// The argument stots are presumed to have been set up by
// PrepareCallCFunction. The C function must be called via t9, for mips ABI.
-#if defined(V8_HOST_ARCH_MIPS)
+#if V8_HOST_ARCH_MIPS
if (emit_debug_code()) {
int frame_alignment = OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
@@ -5051,7 +5244,7 @@ void MacroAssembler::PatchRelocatedValue(Register li_location,
// At this point scratch is a lui(at, ...) instruction.
if (emit_debug_code()) {
And(scratch, scratch, kOpcodeMask);
- Check(eq, "The instruction to patch should be a lui.",
+ Check(eq, kTheInstructionToPatchShouldBeALui,
scratch, Operand(LUI));
lw(scratch, MemOperand(li_location));
}
@@ -5063,7 +5256,7 @@ void MacroAssembler::PatchRelocatedValue(Register li_location,
// scratch is now ori(at, ...).
if (emit_debug_code()) {
And(scratch, scratch, kOpcodeMask);
- Check(eq, "The instruction to patch should be an ori.",
+ Check(eq, kTheInstructionToPatchShouldBeAnOri,
scratch, Operand(ORI));
lw(scratch, MemOperand(li_location, kInstrSize));
}
@@ -5080,7 +5273,7 @@ void MacroAssembler::GetRelocatedValue(Register li_location,
lw(value, MemOperand(li_location));
if (emit_debug_code()) {
And(value, value, kOpcodeMask);
- Check(eq, "The instruction should be a lui.",
+ Check(eq, kTheInstructionShouldBeALui,
value, Operand(LUI));
lw(value, MemOperand(li_location));
}
@@ -5091,7 +5284,7 @@ void MacroAssembler::GetRelocatedValue(Register li_location,
lw(scratch, MemOperand(li_location, kInstrSize));
if (emit_debug_code()) {
And(scratch, scratch, kOpcodeMask);
- Check(eq, "The instruction should be an ori.",
+ Check(eq, kTheInstructionShouldBeAnOri,
scratch, Operand(ORI));
lw(scratch, MemOperand(li_location, kInstrSize));
}
@@ -5116,6 +5309,18 @@ void MacroAssembler::CheckPageFlag(
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ li(scratch, Operand(map));
+ lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
+ And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
+ Branch(if_deprecated, ne, scratch, Operand(zero_reg));
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
@@ -5272,7 +5477,7 @@ void MacroAssembler::EnsureNotWhite(
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
- ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+ ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
lw(t9, FieldMemOperand(value, String::kLengthOffset));
And(t8, instance_type, Operand(kStringEncodingMask));
@@ -5397,6 +5602,54 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
}
+void MacroAssembler::TestJSArrayForAllocationMemento(
+ Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found,
+ Condition cond,
+ Label* allocation_memento_present) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ Addu(scratch_reg, receiver_reg,
+ Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
+ Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
+ li(at, Operand(new_space_allocation_top));
+ lw(at, MemOperand(at));
+ Branch(no_memento_found, gt, scratch_reg, Operand(at));
+ lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
+ if (allocation_memento_present) {
+ Branch(allocation_memento_present, cond, scratch_reg,
+ Operand(isolate()->factory()->allocation_memento_map()));
+ }
+}
+
+
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+}
+
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
@@ -5410,7 +5663,6 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
- instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
masm_(NULL, address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index b57e51486..0805bb967 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -51,18 +51,18 @@ class JumpTarget;
// MIPS generated code calls C code, it must be via t9 register.
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1,
- // Specify that the requested size of the space to allocate is specified in
- // words instead of bytes.
- SIZE_IN_WORDS = 1 << 2
+// Flags used for LeaveExitFrame function.
+enum LeaveExitFrameMode {
+ EMIT_RETURN = true,
+ NO_EMIT_RETURN = false
+};
+
+// Flags used for AllocateHeapNumber
+enum TaggingMode {
+ // Tag the result.
+ TAG_RESULT,
+ // Don't tag
+ DONT_TAG_RESULT
};
// Flags used for the ObjectToDoubleFPURegister function.
@@ -96,6 +96,13 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2 = no_reg,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
@@ -180,10 +187,10 @@ class MacroAssembler: public Assembler {
void Call(Register target, COND_ARGS);
static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
- static int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- COND_ARGS);
+ int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
@@ -241,6 +248,14 @@ class MacroAssembler: public Assembler {
mfc1(dst_high, FPURegister::from_code(src.code() + 1));
}
+ inline void FmoveHigh(Register dst_high, FPURegister src) {
+ mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+ }
+
+ inline void FmoveLow(Register dst_low, FPURegister src) {
+ mfc1(dst_low, src);
+ }
+
inline void Move(FPURegister dst, Register src_low, Register src_high) {
mtc1(src_low, dst);
mtc1(src_high, FPURegister::from_code(dst.code() + 1));
@@ -281,6 +296,7 @@ class MacroAssembler: public Assembler {
void LoadHeapObject(Register dst, Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
@@ -316,6 +332,10 @@ class MacroAssembler: public Assembler {
Condition cc,
Label* condition_met);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfNotInNewSpace(Register object,
@@ -469,25 +489,26 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Allocation support.
- // Allocate an object in new space. The object_size is specified
- // either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the new space is exhausted control continues at the
- // gc_required label. The allocated object is returned in result. If
- // the flag tag_allocated_object is true the result is tagged as as
- // a heap object. All registers are clobbered also when control
- // continues at the gc_required label.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ // Allocate an object in new space or old pointer space. The object_size is
+ // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. If the space is exhausted control continues at the gc_required
+ // label. The allocated object is returned in result. If the flag
+ // tag_allocated_object is true the result is tagged as as a heap object.
+ // All registers are clobbered also when control continues at the gc_required
+ // label.
+ void Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. The caller must make sure that no pointers
@@ -536,7 +557,8 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* gc_required);
+ Label* gc_required,
+ TaggingMode tagging_mode = TAG_RESULT);
void AllocateHeapNumberWithValue(Register result,
FPURegister value,
Register scratch1,
@@ -612,14 +634,15 @@ class MacroAssembler: public Assembler {
void MultiPushFPU(RegList regs);
void MultiPushReversedFPU(RegList regs);
- // Lower case push() for compatibility with arch-independent code.
void push(Register src) {
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
}
+ void Push(Register src) { push(src); }
// Push a handle.
void Push(Handle<Object> handle);
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
@@ -660,11 +683,11 @@ class MacroAssembler: public Assembler {
void MultiPopFPU(RegList regs);
void MultiPopReversedFPU(RegList regs);
- // Lower case pop() for compatibility with arch-independent code.
void pop(Register dst) {
lw(dst, MemOperand(sp, 0));
Addu(sp, sp, Operand(kPointerSize));
}
+ void Pop(Register dst) { pop(dst); }
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
@@ -741,49 +764,85 @@ class MacroAssembler: public Assembler {
BranchF(target, nan, cc, cmp1, cmp2, bd);
};
- // Convert the HeapNumber pointed to by source to a 32bits signed integer
- // dest. If the HeapNumber does not fit into a 32bits signed integer branch
- // to not_int32 label. If FPU is available double_scratch is used but not
- // scratch2.
- void ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- FPURegister double_scratch,
- Label *not_int32);
-
- // Truncates a double using a specific rounding mode.
+ // Truncates a double using a specific rounding mode, and writes the value
+ // to the result register.
// The except_flag will contain any exceptions caused by the instruction.
- // If check_inexact is kDontCheckForInexactConversion, then the inexacat
+ // If check_inexact is kDontCheckForInexactConversion, then the inexact
// exception is masked.
void EmitFPUTruncate(FPURoundingMode rounding_mode,
- FPURegister result,
+ Register result,
DoubleRegister double_input,
- Register scratch1,
+ Register scratch,
+ DoubleRegister double_scratch,
Register except_flag,
CheckForInexactConversion check_inexact
= kDontCheckForInexactConversion);
- // Helper for EmitECMATruncate.
- // This will truncate a floating-point value outside of the singed 32bit
- // integer range to a 32bit signed integer.
- // Expects the double value loaded in input_high and input_low.
- // Exits with the answer in 'result'.
- // Note that this code does not work for values in the 32bit range!
- void EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch);
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-arm.cc.
+ void TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister input,
+ Label* done);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer and all other registers clobbered.
- void EmitECMATruncate(Register result,
- FPURegister double_input,
- FPURegister single_scratch,
- Register scratch,
- Register scratch2,
- Register scratch3);
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DoubleRegister double_input);
+
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst register.
+ // If |object| is neither smi nor heap number, |not_number| is jumped to
+ // with |object| still intact.
+ void LoadNumber(Register object,
+ FPURegister dst,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number);
+
+ // Loads the number from object into double_dst in the double format.
+ // Control will jump to not_int32 if the value cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be loaded.
+ void LoadNumberAsInt32Double(Register object,
+ DoubleRegister double_dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst as a 32-bit integer.
+ // Control will jump to not_int32 if the object cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be converted.
+ void LoadNumberAsInt32(Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch0,
+ FPURegister double_scratch1,
+ Label* not_int32);
// Enter exit frame.
// argc - argument count to be dropped by LeaveExitFrame.
@@ -795,7 +854,8 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame.
void LeaveExitFrame(bool save_doubles,
Register arg_count,
- bool do_return = false);
+ bool restore_context,
+ bool do_return = NO_EMIT_RETURN);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -823,6 +883,7 @@ class MacroAssembler: public Assembler {
bool can_have_holes);
void LoadGlobalFunction(int index, Register function);
+ void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -868,6 +929,7 @@ class MacroAssembler: public Assembler {
CallKind call_kind);
void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -887,6 +949,10 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
+ void IsObjectNameType(Register object,
+ Register scratch,
+ Label* fail);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// -------------------------------------------------------------------------
// Debugger Support.
@@ -968,18 +1034,15 @@ class MacroAssembler: public Assembler {
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail, in which
- // case scratch2, scratch3 and scratch4 are unmodified.
+ // the FastDoubleElements array elements. Otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- Register receiver_reg,
- // All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- Register scratch4,
- Label* fail);
+ Label* fail,
+ int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
@@ -990,8 +1053,7 @@ class MacroAssembler: public Assembler {
Handle<Map> map,
Label* early_success,
Condition cond,
- Label* branch_to,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ Label* branch_to);
// As above, but the map of the object is already loaded into the register
// which is preserved by the code generated.
@@ -999,8 +1061,7 @@ class MacroAssembler: public Assembler {
Handle<Map> map,
Label* early_success,
Condition cond,
- Label* branch_to,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ Label* branch_to);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@@ -1010,8 +1071,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ SmiCheckType smi_check_type);
void CheckMap(Register obj,
@@ -1129,6 +1189,7 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = cc_always,
Register r1 = zero_reg,
const Operand& r2 = Operand(zero_reg),
@@ -1140,11 +1201,18 @@ class MacroAssembler: public Assembler {
void CallJSExitStub(CodeStub* stub);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ }
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
@@ -1212,7 +1280,13 @@ class MacroAssembler: public Assembler {
// from handle and propagates exceptions. Restores context. stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
+ void CallApiFunctionAndReturn(ExternalReference function,
+ Address function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin,
@@ -1258,15 +1332,14 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, const char* msg, Register rs, Operand rt);
- void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
+ void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg, Register rs, Operand rt);
+ void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
// Print a message to stdout and abort execution.
- void Abort(const char* msg);
+ void Abort(BailoutReason msg);
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
@@ -1343,11 +1416,12 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
- // Abort execution if argument is not the root value with the given index,
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
+ // Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
- void AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
+ void AssertIsRoot(Register reg, Heap::RootListIndex index);
// ---------------------------------------------------------------------------
// HeapNumber utilities.
@@ -1360,6 +1434,18 @@ class MacroAssembler: public Assembler {
// -------------------------------------------------------------------------
// String utilities.
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
// Checks if both instance types are sequential ASCII strings and jumps to
// label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialAscii(
@@ -1375,6 +1461,8 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* failure);
+ void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+
// Test that both first and second are sequential ASCII strings.
// Assume that they are non-smis.
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
@@ -1410,6 +1498,9 @@ class MacroAssembler: public Assembler {
And(reg, reg, Operand(mask));
}
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
@@ -1427,6 +1518,28 @@ class MacroAssembler: public Assembler {
// in a0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, jump to allocation_memento_present.
+ void TestJSArrayForAllocationMemento(
+ Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found,
+ Condition cond = al,
+ Label* allocation_memento_present = NULL);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found, eq, memento_found);
+ bind(&no_memento_found);
+ }
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
@@ -1501,9 +1614,9 @@ class MacroAssembler: public Assembler {
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class StandardFrame;
};
@@ -1532,7 +1645,6 @@ class CodePatcher {
private:
byte* address_; // The address of the code being patched.
- int instructions_; // Number of instructions of the expected patch size.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index 672ba0eee..49dec3c02 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "unicode.h"
#include "log.h"
@@ -122,7 +122,7 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -235,55 +235,6 @@ void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
}
-void RegExpMacroAssemblerMIPS::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- if (on_failure == NULL) {
- // Instead of inlining a backtrack for each test, (re)use the global
- // backtrack target.
- on_failure = &backtrack_label_;
- }
-
- if (check_end_of_string) {
- // Is last character of required match inside string.
- CheckPosition(cp_offset + str.length() - 1, on_failure);
- }
-
- __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
- if (cp_offset != 0) {
- int byte_offset = cp_offset * char_size();
- __ Addu(a0, a0, Operand(byte_offset));
- }
-
- // a0 : Address of characters to match against str.
- int stored_high_byte = 0;
- for (int i = 0; i < str.length(); i++) {
- if (mode_ == ASCII) {
- __ lbu(a1, MemOperand(a0, 0));
- __ addiu(a0, a0, char_size());
- ASSERT(str[i] <= String::kMaxAsciiCharCode);
- BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
- } else {
- __ lhu(a1, MemOperand(a0, 0));
- __ addiu(a0, a0, char_size());
- uc16 match_char = str[i];
- int match_high_byte = (match_char >> 8);
- if (match_high_byte == 0) {
- BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
- } else {
- if (match_high_byte != stored_high_byte) {
- __ li(a2, Operand(match_high_byte));
- stored_high_byte = match_high_byte;
- }
- __ Addu(a3, a2, Operand(match_char & 0xff));
- BranchOrBacktrack(on_failure, ne, a1, Operand(a3));
- }
- }
- }
-}
-
-
void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
Label backtrack_non_equal;
__ lw(a0, MemOperand(backtrack_stackpointer(), 0));
@@ -341,7 +292,13 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ Or(t0, t0, Operand(0x20)); // Also convert input character.
__ Branch(&fail, ne, t0, Operand(a3));
__ Subu(a3, a3, Operand('a'));
- __ Branch(&fail, hi, a3, Operand('z' - 'a')); // Is a3 a lowercase letter?
+ __ Branch(&loop_check, ls, a3, Operand('z' - 'a'));
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ Subu(a3, a3, Operand(224 - 'a'));
+ // Weren't Latin-1 letters.
+ __ Branch(&fail, hi, a3, Operand(254 - 224));
+ // Check for 247.
+ __ Branch(&fail, eq, a3, Operand(247 - 224));
__ bind(&loop_check);
__ Branch(&loop, lt, a0, Operand(a1));
@@ -382,7 +339,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// Address of current input position.
__ Addu(a1, current_input_offset(), Operand(end_of_input_address()));
// Isolate.
- __ li(a3, Operand(ExternalReference::isolate_address()));
+ __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
{
AllowExternalCallThatCantCauseGC scope(masm_);
@@ -511,7 +468,7 @@ void RegExpMacroAssemblerMIPS::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ li(a0, Operand(table));
- if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
+ if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ And(a1, current_character(), Operand(kTableSize - 1));
__ Addu(a0, a0, a1);
} else {
@@ -531,25 +488,20 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
case 's':
// Match space-characters.
if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ Branch(&success, eq, current_character(), Operand(' '));
// Check range 0x09..0x0d.
__ Subu(a0, current_character(), Operand('\t'));
- BranchOrBacktrack(on_no_match, hi, a0, Operand('\r' - '\t'));
+ __ Branch(&success, ls, a0, Operand('\r' - '\t'));
+ // \u00a0 (NBSP).
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00a0 - '\t'));
__ bind(&success);
return true;
}
return false;
case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- BranchOrBacktrack(on_no_match, eq, current_character(), Operand(' '));
- __ Subu(a0, current_character(), Operand('\t'));
- BranchOrBacktrack(on_no_match, ls, a0, Operand('\r' - '\t'));
- return true;
- }
+ // The emitted code for generic character classes is good enough.
return false;
case 'd':
// Match ASCII digits ('0'..'9').
@@ -900,7 +852,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ PrepareCallCFunction(num_arguments, a0);
__ mov(a0, backtrack_stackpointer());
__ Addu(a1, frame_pointer(), Operand(kStackHighEnd));
- __ li(a2, Operand(ExternalReference::isolate_address()));
+ __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate())));
ExternalReference grow_stack =
ExternalReference::re_grow_stack(masm_->isolate());
__ CallCFunction(grow_stack, num_arguments);
@@ -928,10 +880,9 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(&code_desc);
- Handle<Code> code = FACTORY->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- LOG(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
+ Handle<Code> code = isolate()->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ LOG(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code);
}
@@ -1004,6 +955,7 @@ void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
int target = label->pos();
__ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
} else {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Label after_constant;
__ Branch(&after_constant);
int offset = masm_->pc_offset();
@@ -1111,15 +1063,56 @@ bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
// Private methods:
void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
+ int stack_alignment = OS::ActivationFrameAlignment();
+
+ // Align the stack pointer and save the original sp value on the stack.
+ __ mov(scratch, sp);
+ __ Subu(sp, sp, Operand(kPointerSize));
+ ASSERT(IsPowerOf2(stack_alignment));
+ __ And(sp, sp, Operand(-stack_alignment));
+ __ sw(scratch, MemOperand(sp));
+
__ mov(a2, frame_pointer());
// Code* of self.
__ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
- // a0 becomes return address pointer.
+
+ // We need to make room for the return address on the stack.
+ ASSERT(IsAligned(stack_alignment, kPointerSize));
+ __ Subu(sp, sp, Operand(stack_alignment));
+
+ // Stack pointer now points to cell where return address is to be written.
+ // Arguments are in registers, meaning we teat the return address as
+ // argument 5. Since DirectCEntryStub will handleallocating space for the C
+ // argument slots, we don't need to care about that here. This is how the
+ // stack will look (sp meaning the value of sp at this moment):
+ // [sp + 3] - empty slot if needed for alignment.
+ // [sp + 2] - saved sp.
+ // [sp + 1] - second word reserved for return value.
+ // [sp + 0] - first word reserved for return value.
+
+ // a0 will point to the return address, placed by DirectCEntry.
+ __ mov(a0, sp);
+
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(masm_->isolate());
- CallCFunctionUsingStub(stack_guard_check, num_arguments);
+ __ li(t9, Operand(stack_guard_check));
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm_, t9);
+
+ // DirectCEntryStub allocated space for the C argument slots so we have to
+ // drop them with the return address from the stack with loading saved sp.
+ // At this point stack must look:
+ // [sp + 7] - empty slot if needed for alignment.
+ // [sp + 6] - saved sp.
+ // [sp + 5] - second word reserved for return value.
+ // [sp + 4] - first word reserved for return value.
+ // [sp + 3] - C argument slot.
+ // [sp + 2] - C argument slot.
+ // [sp + 1] - C argument slot.
+ // [sp + 0] - C argument slot.
+ __ lw(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize));
+
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
}
@@ -1134,7 +1127,6 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
@@ -1155,7 +1147,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1186,7 +1178,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
@@ -1325,21 +1317,6 @@ void RegExpMacroAssemblerMIPS::CheckStackLimit() {
}
-void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
- ExternalReference function,
- int num_arguments) {
- // Must pass all arguments in registers. The stub pushes on the stack.
- ASSERT(num_arguments <= 4);
- __ li(code_pointer(), Operand(function));
- RegExpCEntryStub stub;
- __ CallStub(&stub);
- if (OS::ActivationFrameAlignment() != 0) {
- __ lw(sp, MemOperand(sp, 16));
- }
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
-}
-
-
void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
@@ -1361,23 +1338,6 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
}
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- int stack_alignment = OS::ActivationFrameAlignment();
- if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
- // Stack is already aligned for call, so decrement by alignment
- // to make room for storing the return address.
- __ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
- const int return_address_offset = kCArgsSlotsSize;
- __ Addu(a0, sp, return_address_offset);
- __ sw(ra, MemOperand(a0, 0));
- __ mov(t9, t1);
- __ Call(t9);
- __ lw(ra, MemOperand(sp, return_address_offset));
- __ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
- __ Jump(ra);
-}
-
-
#undef __
#endif // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.h b/deps/v8/src/mips/regexp-macro-assembler-mips.h
index 8dd52a484..063582c64 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.h
@@ -55,10 +55,6 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
@@ -221,14 +217,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// and increments it by a word size.
inline void Pop(Register target);
- // Calls a C function and cleans up the frame alignment done by
- // by FrameAlign. The called function *is* allowed to trigger a garbage
- // collection, but may not take more than four arguments (no arguments
- // passed on the stack), and the first argument will be a pointer to the
- // return address.
- inline void CallCFunctionUsingStub(ExternalReference function,
- int num_arguments);
-
+ Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index cf87f9360..5a96efe9c 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -26,12 +26,12 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
-#include <math.h>
#include <limits.h>
+#include <cmath>
#include <cstdarg>
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "cpu.h"
#include "disasm.h"
@@ -132,8 +132,8 @@ void MipsDebugger::Stop(Instruction* instr) {
ASSERT(msg != NULL);
// Update this stop description.
- if (!watched_stops[code].desc) {
- watched_stops[code].desc = msg;
+ if (!watched_stops_[code].desc) {
+ watched_stops_[code].desc = msg;
}
if (strlen(msg) > 0) {
@@ -163,8 +163,8 @@ void MipsDebugger::Stop(Instruction* instr) {
char* msg = *reinterpret_cast<char**>(sim_->get_pc() +
Instruction::kInstrSize);
// Update this stop description.
- if (!sim_->watched_stops[code].desc) {
- sim_->watched_stops[code].desc = msg;
+ if (!sim_->watched_stops_[code].desc) {
+ sim_->watched_stops_[code].desc = msg;
}
PrintF("Simulator hit %s (%u)\n", msg, code);
sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
@@ -513,7 +513,7 @@ void MipsDebugger::Debug() {
int32_t words;
if (argc == next_arg) {
words = 10;
- } else if (argc == next_arg + 1) {
+ } else {
if (!GetValue(argv[next_arg], &words)) {
words = 10;
}
@@ -526,7 +526,7 @@ void MipsDebugger::Debug() {
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
Heap* current_heap = v8::internal::Isolate::Current()->heap();
- if (current_heap->Contains(obj) || ((value & 1) == 0)) {
+ if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
PrintF("smi %d", value / 2);
@@ -867,7 +867,7 @@ void Simulator::CheckICache(v8::internal::HashMap* i_cache,
Instruction::kInstrSize) == 0);
} else {
// Cache miss. Load memory into the cache.
- memcpy(cached_line, line, CachePage::kLineLength);
+ OS::MemCopy(cached_line, line, CachePage::kLineLength);
*cache_valid_byte = CachePage::LINE_VALID;
}
}
@@ -1016,6 +1016,13 @@ void Simulator::set_register(int reg, int32_t value) {
}
+void Simulator::set_dw_register(int reg, const int* dbl) {
+ ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+ registers_[reg] = dbl[0];
+ registers_[reg + 1] = dbl[1];
+}
+
+
void Simulator::set_fpu_register(int fpureg, int32_t value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
FPUregisters_[fpureg] = value;
@@ -1045,6 +1052,19 @@ int32_t Simulator::get_register(int reg) const {
}
+double Simulator::get_double_from_register_pair(int reg) {
+ ASSERT((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ char buffer[2 * sizeof(registers_[0])];
+ OS::MemCopy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+ OS::MemCopy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ return(dm_val);
+}
+
+
int32_t Simulator::get_fpu_register(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
return FPUregisters_[fpureg];
@@ -1071,12 +1091,14 @@ double Simulator::get_fpu_register_double(int fpureg) const {
}
-// For use in calls that take two double values, constructed either
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are constructed here,
// from a0-a3 or f12 and f14.
-void Simulator::GetFpArgs(double* x, double* y) {
+void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (!IsMipsSoftFloatABI) {
*x = get_fpu_register_double(12);
*y = get_fpu_register_double(14);
+ *z = get_register(a2);
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
@@ -1086,53 +1108,14 @@ void Simulator::GetFpArgs(double* x, double* y) {
// Registers a0 and a1 -> x.
reg_buffer[0] = get_register(a0);
reg_buffer[1] = get_register(a1);
- memcpy(x, buffer, sizeof(buffer));
-
+ OS::MemCopy(x, buffer, sizeof(buffer));
// Registers a2 and a3 -> y.
reg_buffer[0] = get_register(a2);
reg_buffer[1] = get_register(a3);
- memcpy(y, buffer, sizeof(buffer));
- }
-}
-
-
-// For use in calls that take one double value, constructed either
-// from a0 and a1 or f12.
-void Simulator::GetFpArgs(double* x) {
- if (!IsMipsSoftFloatABI) {
- *x = get_fpu_register_double(12);
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
- // Registers a0 and a1 -> x.
- reg_buffer[0] = get_register(a0);
- reg_buffer[1] = get_register(a1);
- memcpy(x, buffer, sizeof(buffer));
- }
-}
-
-
-// For use in calls that take one double value constructed either
-// from a0 and a1 or f12 and one integer value.
-void Simulator::GetFpArgs(double* x, int32_t* y) {
- if (!IsMipsSoftFloatABI) {
- *x = get_fpu_register_double(12);
- *y = get_register(a2);
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
- // Registers 0 and 1 -> x.
- reg_buffer[0] = get_register(a0);
- reg_buffer[1] = get_register(a1);
- memcpy(x, buffer, sizeof(buffer));
-
- // Register 2 -> y.
+ OS::MemCopy(y, buffer, sizeof(buffer));
+ // Register 2 -> z.
reg_buffer[0] = get_register(a2);
- memcpy(y, buffer, sizeof(*y));
+ OS::MemCopy(z, buffer, sizeof(*z));
}
}
@@ -1144,7 +1127,7 @@ void Simulator::SetFpResult(const double& result) {
} else {
char buffer[2 * sizeof(registers_[0])];
int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
- memcpy(buffer, &result, sizeof(buffer));
+ OS::MemCopy(buffer, &result, sizeof(buffer));
// Copy result to v0 and v1.
set_register(v0, reg_buffer[0]);
set_register(v1, reg_buffer[1]);
@@ -1172,7 +1155,7 @@ bool Simulator::test_fcsr_bit(uint32_t cc) {
bool Simulator::set_fcsr_round_error(double original, double rounded) {
bool ret = false;
- if (!isfinite(original) || !isfinite(rounded)) {
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
ret = true;
}
@@ -1395,18 +1378,22 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg3,
int32_t arg4,
int32_t arg5);
-typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3);
+
+// These prototypes handle the four types of FP calls.
+typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPCall)(double darg0);
+typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, int32_t arg1);
// This signature supports direct call to accessor getter callback.
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
- int32_t arg1);
+typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(
+ int32_t arg0, int32_t arg1, int32_t arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime. They are also used for debugging with simulator.
@@ -1475,69 +1462,117 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// simulator. Soft-float has additional abstraction of ExternalReference,
// to support serialization.
if (fp_call) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ double dval0, dval1; // one or two double parameters
+ int32_t ival; // zero or one integer parameters
+ int64_t iresult = 0; // integer return value
+ double dresult = 0; // double return value
+ GetFpArgs(&dval0, &dval1, &ival);
+ SimulatorRuntimeCall generic_target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim) {
- double dval0, dval1;
- int32_t ival;
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
- GetFpArgs(&dval0, &dval1);
PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(target), dval0, dval1);
+ FUNCTION_ADDR(generic_target), dval0, dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
- GetFpArgs(&dval0);
PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(target), dval0);
+ FUNCTION_ADDR(generic_target), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
- GetFpArgs(&dval0, &ival);
PrintF("Call to host function at %p with args %f, %d",
- FUNCTION_ADDR(target), dval0, ival);
+ FUNCTION_ADDR(generic_target), dval0, ival);
break;
default:
UNREACHABLE();
break;
}
}
- if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(v0, static_cast<int32_t>(iresult));
+ set_register(v1, static_cast<int32_t>(iresult >> 32));
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double result = target(arg0, arg1, arg2, arg3);
- SetFpResult(result);
- } else {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- uint64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- int32_t gpreg_pair[2];
- memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
- set_register(v0, gpreg_pair[0]);
- set_register(v1, gpreg_pair[1]);
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
- // See DirectCEntryStub::GenerateCall for explanation of register usage.
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Call to host function at %p args %08x\n",
- FUNCTION_ADDR(target), arg1);
+ reinterpret_cast<void*>(external), arg0);
}
- v8::Handle<v8::Value> result = target(arg1);
- *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
- set_register(v0, arg0);
- } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
- // See DirectCEntryStub::GenerateCall for explanation of register usage.
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ target(arg0);
+ } else if (
+ redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x\n",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ }
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ target(arg0, arg1);
+ } else if (
+ redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim) {
PrintF("Call to host function at %p args %08x %08x\n",
- FUNCTION_ADDR(target), arg1, arg2);
+ reinterpret_cast<void*>(external), arg0, arg1);
+ }
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ target(arg0, arg1);
+ } else if (
+ redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x %08x\n",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
}
- v8::Handle<v8::Value> result = target(arg1, arg2);
- *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
- set_register(v0, arg0);
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+ target(arg0, arg1, arg2);
} else {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
@@ -1616,33 +1651,33 @@ bool Simulator::IsStopInstruction(Instruction* instr) {
bool Simulator::IsEnabledStop(uint32_t code) {
ASSERT(code <= kMaxStopCode);
ASSERT(code > kMaxWatchpointCode);
- return !(watched_stops[code].count & kStopDisabledBit);
+ return !(watched_stops_[code].count & kStopDisabledBit);
}
void Simulator::EnableStop(uint32_t code) {
if (!IsEnabledStop(code)) {
- watched_stops[code].count &= ~kStopDisabledBit;
+ watched_stops_[code].count &= ~kStopDisabledBit;
}
}
void Simulator::DisableStop(uint32_t code) {
if (IsEnabledStop(code)) {
- watched_stops[code].count |= kStopDisabledBit;
+ watched_stops_[code].count |= kStopDisabledBit;
}
}
void Simulator::IncreaseStopCounter(uint32_t code) {
ASSERT(code <= kMaxStopCode);
- if ((watched_stops[code].count & ~(1 << 31)) == 0x7fffffff) {
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
PrintF("Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n", code);
- watched_stops[code].count = 0;
+ watched_stops_[code].count = 0;
EnableStop(code);
} else {
- watched_stops[code].count++;
+ watched_stops_[code].count++;
}
}
@@ -1657,12 +1692,12 @@ void Simulator::PrintStopInfo(uint32_t code) {
return;
}
const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled";
- int32_t count = watched_stops[code].count & ~kStopDisabledBit;
+ int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
// Don't print the state of unused breakpoints.
if (count != 0) {
- if (watched_stops[code].desc) {
+ if (watched_stops_[code].desc) {
PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
- code, code, state, count, watched_stops[code].desc);
+ code, code, state, count, watched_stops_[code].desc);
} else {
PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
code, code, state, count);
@@ -1740,6 +1775,8 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
UNIMPLEMENTED_MIPS();
};
break;
+ case COP1X:
+ break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR:
@@ -1929,6 +1966,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
const uint32_t rt_u = static_cast<uint32_t>(rt);
const int32_t rd_reg = instr->RdValue();
+ const int32_t fr_reg = instr->FrValue();
const int32_t fs_reg = instr->FsValue();
const int32_t ft_reg = instr->FtValue();
const int32_t fd_reg = instr->FdValue();
@@ -2032,7 +2070,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
set_fpu_register_double(fd_reg, fs / ft);
break;
case ABS_D:
- set_fpu_register_double(fd_reg, fs < 0 ? -fs : fs);
+ set_fpu_register_double(fd_reg, fabs(fs));
break;
case MOV_D:
set_fpu_register_double(fd_reg, fs);
@@ -2044,25 +2082,28 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
set_fpu_register_double(fd_reg, sqrt(fs));
break;
case C_UN_D:
- set_fcsr_bit(fcsr_cc, isnan(fs) || isnan(ft));
+ set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
case C_EQ_D:
set_fcsr_bit(fcsr_cc, (fs == ft));
break;
case C_UEQ_D:
- set_fcsr_bit(fcsr_cc, (fs == ft) || (isnan(fs) || isnan(ft)));
+ set_fcsr_bit(fcsr_cc,
+ (fs == ft) || (std::isnan(fs) || std::isnan(ft)));
break;
case C_OLT_D:
set_fcsr_bit(fcsr_cc, (fs < ft));
break;
case C_ULT_D:
- set_fcsr_bit(fcsr_cc, (fs < ft) || (isnan(fs) || isnan(ft)));
+ set_fcsr_bit(fcsr_cc,
+ (fs < ft) || (std::isnan(fs) || std::isnan(ft)));
break;
case C_OLE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft));
break;
case C_ULE_D:
- set_fcsr_bit(fcsr_cc, (fs <= ft) || (isnan(fs) || isnan(ft)));
+ set_fcsr_bit(fcsr_cc,
+ (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
break;
case CVT_W_D: // Convert double to word.
// Rounding modes are not yet supported.
@@ -2173,8 +2214,8 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case CVT_D_L: // Mips32r2 instruction.
// Watch the signs here, we want 2 32-bit vals
// to make a sign-64.
- i64 = (uint32_t) get_fpu_register(fs_reg);
- i64 |= ((int64_t) get_fpu_register(fs_reg + 1) << 32);
+ i64 = static_cast<uint32_t>(get_fpu_register(fs_reg));
+ i64 |= static_cast<int64_t>(get_fpu_register(fs_reg + 1)) << 32;
set_fpu_register_double(fd_reg, static_cast<double>(i64));
break;
case CVT_S_L:
@@ -2190,6 +2231,19 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
UNREACHABLE();
};
break;
+ case COP1X:
+ switch (instr->FunctionFieldRaw()) {
+ case MADD_D:
+ double fr, ft, fs;
+ fr = get_fpu_register_double(fr_reg);
+ fs = get_fpu_register_double(fs_reg);
+ ft = get_fpu_register_double(ft_reg);
+ set_fpu_register_double(fd_reg, fs * ft + fr);
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR: {
@@ -2219,10 +2273,14 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
set_register(HI, static_cast<int32_t>(u64hilo >> 32));
break;
case DIV:
- // Divide by zero was not checked in the configuration step - div and
- // divu do not raise exceptions. On division by 0, the result will
- // be UNPREDICTABLE.
- if (rt != 0) {
+ // Divide by zero and overflow was not checked in the configuration
+ // step - div and divu do not raise exceptions. On division by 0
+ // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
+ // return INT_MIN which is what the hardware does.
+ if (rs == INT_MIN && rt == -1) {
+ set_register(LO, INT_MIN);
+ set_register(HI, 0);
+ } else if (rt != 0) {
set_register(LO, rs / rt);
set_register(HI, rs % rt);
}
@@ -2718,34 +2776,7 @@ void Simulator::Execute() {
}
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
- // Set up arguments.
-
- // First four arguments passed in registers.
- ASSERT(argument_count >= 4);
- set_register(a0, va_arg(parameters, int32_t));
- set_register(a1, va_arg(parameters, int32_t));
- set_register(a2, va_arg(parameters, int32_t));
- set_register(a3, va_arg(parameters, int32_t));
-
- // Remaining arguments passed on stack.
- int original_stack = get_register(sp);
- // Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
- - kCArgsSlotsSize);
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- // Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
- set_register(sp, entry_stack);
-
+void Simulator::CallInternal(byte* entry) {
// Prepare to execute the code at entry.
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
@@ -2809,6 +2840,38 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
set_register(gp, gp_val);
set_register(sp, sp_val);
set_register(fp, fp_val);
+}
+
+
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+ // Set up arguments.
+
+ // First four arguments passed in registers.
+ ASSERT(argument_count >= 4);
+ set_register(a0, va_arg(parameters, int32_t));
+ set_register(a1, va_arg(parameters, int32_t));
+ set_register(a2, va_arg(parameters, int32_t));
+ set_register(a3, va_arg(parameters, int32_t));
+
+ // Remaining arguments passed on stack.
+ int original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
+ - kCArgsSlotsSize);
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ for (int i = 4; i < argument_count; i++) {
+ stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t);
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
+ CallInternal(entry);
// Pop stack passed arguments.
CHECK_EQ(entry_stack, get_register(sp));
@@ -2819,6 +2882,27 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
+double Simulator::CallFP(byte* entry, double d0, double d1) {
+ if (!IsMipsSoftFloatABI) {
+ set_fpu_register_double(f12, d0);
+ set_fpu_register_double(f14, d1);
+ } else {
+ int buffer[2];
+ ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
+ OS::MemCopy(buffer, &d0, sizeof(d0));
+ set_dw_register(a0, buffer);
+ OS::MemCopy(buffer, &d1, sizeof(d1));
+ set_dw_register(a2, buffer);
+ }
+ CallInternal(entry);
+ if (!IsMipsSoftFloatABI) {
+ return get_fpu_register_double(f0);
+ } else {
+ return get_double_from_register_pair(v0);
+ }
+}
+
+
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 776badc29..601cd6d99 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -184,7 +184,9 @@ class Simulator {
// architecture specification and is off by a 8 from the currently executing
// instruction.
void set_register(int reg, int32_t value);
+ void set_dw_register(int dreg, const int* dbl);
int32_t get_register(int reg) const;
+ double get_double_from_register_pair(int reg);
// Same for FPURegisters.
void set_fpu_register(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
@@ -214,6 +216,8 @@ class Simulator {
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
+ // Alternative: call a 2-argument double function.
+ double CallFP(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -312,8 +316,6 @@ class Simulator {
if (instr->InstructionBits() == nopInstr) {
// Short-cut generic nop instructions. They are always valid and they
// never change the simulator state.
- set_register(pc, reinterpret_cast<int32_t>(instr) +
- Instruction::kInstrSize);
return;
}
@@ -347,12 +349,11 @@ class Simulator {
static void* RedirectExternalReference(void* external_function,
ExternalReference::Type type);
- // For use in calls that take double value arguments.
- void GetFpArgs(double* x, double* y);
- void GetFpArgs(double* x);
- void GetFpArgs(double* x, int32_t* y);
+ // Handle arguments and return value for runtime FP functions.
+ void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
+ void CallInternal(byte* entry);
// Architecture state.
// Registers.
@@ -386,14 +387,14 @@ class Simulator {
static const uint32_t kStopDisabledBit = 1 << 31;
// A stop is enabled, meaning the simulator will stop when meeting the
- // instruction, if bit 31 of watched_stops[code].count is unset.
- // The value watched_stops[code].count & ~(1 << 31) indicates how many times
+ // instruction, if bit 31 of watched_stops_[code].count is unset.
+ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through.
struct StopCountAndDesc {
uint32_t count;
char* desc;
};
- StopCountAndDesc watched_stops[kMaxStopCode + 1];
+ StopCountAndDesc watched_stops_[kMaxStopCode + 1];
};
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index bd15775d4..17aa4aade 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
#include "ic-inl.h"
#include "codegen.h"
@@ -117,18 +117,14 @@ static void ProbeTable(Isolate* isolate,
}
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<String> name,
- Register scratch0,
- Register scratch1) {
- ASSERT(name->IsSymbol());
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(name->IsUniqueName());
+ ASSERT(!receiver.is(scratch0));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
@@ -162,13 +158,13 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
__ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- StringDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
@@ -217,7 +213,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
- __ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset));
+ __ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Addu(scratch, scratch, at);
uint32_t mask = kPrimaryTableSize - 1;
@@ -307,26 +303,21 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
}
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
- Handle<JSObject> holder,
- int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ lw(dst, FieldMemOperand(src, offset));
- } else {
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ int offset = index * kPointerSize;
+ if (!inobject) {
// Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ offset = offset + FixedArray::kHeaderSize;
__ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- __ lw(dst, FieldMemOperand(dst, offset));
+ src = dst;
}
+ __ lw(dst, FieldMemOperand(src, offset));
}
@@ -342,8 +333,8 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
__ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
// Load length directly from the JS array.
+ __ Ret(USE_DELAY_SLOT);
__ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Ret();
}
@@ -379,30 +370,26 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
// Load length directly from the string.
+ __ Ret(USE_DELAY_SLOT);
__ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
- __ Ret();
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
- // Unwrap the value and check if the wrapped value is a string.
- __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
- }
+ // Unwrap the value and check if the wrapped value is a string.
+ __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+ __ Ret(USE_DELAY_SLOT);
+ __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
}
@@ -412,71 +399,92 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register scratch2,
Label* miss_label) {
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, scratch1);
- __ Ret();
}
-// Generate StoreField code, value is passed in a0 register.
-// After executing generated code, the receiver_reg and name_reg
-// may be clobbered.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name,
- Register receiver_reg,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- // a0 : value.
- Label exit;
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ __ li(scratch, Operand(cell));
+ __ lw(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(miss, ne, scratch, Operand(at));
+}
- LookupResult lookup(masm->isolate());
- object->Lookup(*name, &lookup);
- if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
- // In sloppy mode, we could just return the value and be done. However, we
- // might be in strict mode, where we have to throw. Since we cannot tell,
- // go into slow case unconditionally.
- __ jmp(miss_label);
- return;
+
+void StoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
}
+}
- // Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK, mode);
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
+// Generate StoreTransition code, value is passed in a0 register.
+// After executing generated code, the receiver_reg and name_reg
+// may be clobbered.
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow) {
+ // a0 : value.
+ Label exit;
- // Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- if (lookup.IsFound()) {
- holder = lookup.holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- // We need an extra register, push
- __ push(name_reg);
- Label miss_pop, done_check;
- CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, &miss_pop);
- __ jmp(&done_check);
- __ bind(&miss_pop);
- __ pop(name_reg);
- __ jmp(miss_label);
- __ bind(&done_check);
- __ pop(name_reg);
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ LoadObject(scratch1, constant);
+ __ Branch(miss_label, ne, value_reg, Operand(scratch1));
+ } else if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch1, value_reg);
+ __ mtc1(scratch1, f6);
+ __ cvt_d_w(f4, f6);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ sdc1(f4, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
}
// Stub never generated for non-global objects that require access
@@ -484,7 +492,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
+ if (details.type() == FIELD &&
+ object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ push(receiver_reg);
@@ -497,80 +506,229 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- if (!transition.is_null()) {
- // Update the map of the object.
- __ li(scratch1, Operand(transition));
- __ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- name_reg,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ // Update the map of the object.
+ __ li(scratch1, Operand(transition));
+ __ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ scratch2,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ ASSERT(value_reg.is(a0));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ return;
+ }
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ sw(storage_reg, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ sw(value_reg, FieldMemOperand(receiver_reg, offset));
+ }
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg,
+ offset,
+ storage_reg,
+ scratch1,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ lw(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ sw(storage_reg, FieldMemOperand(scratch1, offset));
+ } else {
+ __ sw(value_reg, FieldMemOperand(scratch1, offset));
+ }
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1,
+ offset,
+ storage_reg,
+ receiver_reg,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
}
+ // Return the value (register v0).
+ ASSERT(value_reg.is(a0));
+ __ bind(&exit);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+}
+
+
+// Generate StoreField code, value is passed in a0 register.
+// When leaving generated code after success, the receiver_reg and name_reg
+// may be clobbered. Upon branch to miss_label, the receiver and name
+// registers have their original values.
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // a0 : value
+ Label exit;
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ int index = lookup->GetFieldIndex().field_index();
+
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ // Load the double storage.
+ if (index < 0) {
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ lw(scratch1, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ lw(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ lw(scratch1, FieldMemOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch2, value_reg);
+ __ mtc1(scratch2, f6);
+ __ cvt_d_w(f4, f6);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
+ // Return the value (register v0).
+ ASSERT(value_reg.is(a0));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ return;
+ }
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ sw(a0, FieldMemOperand(receiver_reg, offset));
-
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(a0, &exit, scratch1);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, a0);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ __ sw(value_reg, FieldMemOperand(receiver_reg, offset));
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array.
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ sw(a0, FieldMemOperand(scratch1, offset));
-
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(a0, &exit);
-
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, a0);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ __ sw(value_reg, FieldMemOperand(scratch1, offset));
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
}
// Return the value (register v0).
+ ASSERT(value_reg.is(a0));
__ bind(&exit);
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
- __ Ret();
}
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Handle<Code> code = (kind == Code::LOAD_IC)
- ? masm->isolate()->builtins()->LoadIC_Miss()
- : masm->isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ bind(label);
+ __ li(this->name(), Operand(name));
+ }
}
@@ -608,16 +766,17 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
Register scratch = name;
__ li(scratch, Operand(interceptor));
__ Push(scratch, receiver, holder);
- __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(scratch);
- __ li(scratch, Operand(ExternalReference::isolate_address()));
- __ push(scratch);
}
@@ -632,7 +791,7 @@ static void CompileCallLoadPropertyWithInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate());
- __ PrepareCEntryArgs(6);
+ __ PrepareCEntryArgs(StubCache::kInterceptorArgsLength);
__ PrepareCEntryFunction(ref);
CEntryStub stub(1);
@@ -640,8 +799,7 @@ static void CompileCallLoadPropertyWithInterceptor(
}
-static const int kFastApiCallArguments = 4;
-
+static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
// Reserves space for the extra arguments to API function in the
// caller's frame.
@@ -664,40 +822,46 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
static void GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
- // -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee JS function
- // -- sp[8] : call data
- // -- sp[12] : isolate
- // -- sp[16] : last JS argument
+ // -- sp[0] - sp[24] : FunctionCallbackInfo, incl.
+ // : holder (set by CheckPrototypes)
+ // -- sp[28] : last JS argument
// -- ...
- // -- sp[(argc + 3) * 4] : first JS argument
- // -- sp[(argc + 4) * 4] : receiver
+ // -- sp[(argc + 6) * 4] : first JS argument
+ // -- sp[(argc + 7) * 4] : receiver
// -----------------------------------
+ typedef FunctionCallbackArguments FCA;
+ // Save calling context.
+ __ sw(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize));
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ LoadHeapObject(t1, function);
__ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
+ __ sw(t1, MemOperand(sp, FCA::kCalleeIndex * kPointerSize));
- // Pass the additional arguments.
+ // Construct the FunctionCallbackInfo.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data());
+ Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ li(a0, api_call_info);
__ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
} else {
__ li(t2, call_data);
}
-
- __ li(t3, Operand(ExternalReference::isolate_address()));
- // Store JS function, call data and isolate.
- __ sw(t1, MemOperand(sp, 1 * kPointerSize));
- __ sw(t2, MemOperand(sp, 2 * kPointerSize));
- __ sw(t3, MemOperand(sp, 3 * kPointerSize));
+ // Store call data.
+ __ sw(t2, MemOperand(sp, FCA::kDataIndex * kPointerSize));
+ // Store isolate.
+ __ li(t3, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ sw(t3, MemOperand(sp, FCA::kIsolateIndex * kPointerSize));
+ // Store ReturnValue default and ReturnValue.
+ __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+ __ sw(t1, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize));
+ __ sw(t1, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize));
// Prepare arguments.
- __ Addu(a2, sp, Operand(3 * kPointerSize));
+ __ Move(a2, sp);
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -706,37 +870,81 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
- // struct from the function (which is currently the case). This means we pass
- // the first argument in a1 instead of a0. TryCallApiFunctionAndReturn
- // will handle setting up a0.
-
- // a1 = v8::Arguments&
+ // a0 = FunctionCallbackInfo&
// Arguments is built at sp + 1 (sp is a reserved spot for ra).
- __ Addu(a1, sp, kPointerSize);
-
- // v8::Arguments::implicit_args_
- __ sw(a2, MemOperand(a1, 0 * kPointerSize));
- // v8::Arguments::values_
- __ Addu(t0, a2, Operand(argc * kPointerSize));
- __ sw(t0, MemOperand(a1, 1 * kPointerSize));
- // v8::Arguments::length_ = argc
+ __ Addu(a0, sp, kPointerSize);
+ // FunctionCallbackInfo::implicit_args_
+ __ sw(a2, MemOperand(a0, 0 * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ Addu(t0, a2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize));
+ __ sw(t0, MemOperand(a0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
__ li(t0, Operand(argc));
- __ sw(t0, MemOperand(a1, 2 * kPointerSize));
- // v8::Arguments::is_construct_call = 0
- __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
+ __ sw(t0, MemOperand(a0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
ExternalReference ref =
ExternalReference(&fun,
- ExternalReference::DIRECT_API_CALL,
+ type,
masm->isolate());
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+
AllowExternalCallThatCantCauseGC scope(masm);
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ MemOperand return_value_operand(
+ fp, (2 + FCA::kReturnValueOffset) * kPointerSize);
+
+ __ CallApiFunctionAndReturn(ref,
+ function_address,
+ thunk_ref,
+ a1,
+ kStackUnwindSpace,
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
+}
+
+
+// Generate call to api function.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+ ASSERT(!receiver.is(scratch));
+
+ typedef FunctionCallbackArguments FCA;
+ const int stack_space = kFastApiCallArguments + argc + 1;
+ // Assign stack space for the call arguments.
+ __ Subu(sp, sp, Operand(stack_space * kPointerSize));
+ // Write holder to stack frame.
+ __ sw(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
+ // Write receiver to stack frame.
+ int index = stack_space - 1;
+ __ sw(receiver, MemOperand(sp, index * kPointerSize));
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ ASSERT(!receiver.is(values[i]));
+ ASSERT(!scratch.is(values[i]));
+ __ sw(receiver, MemOperand(sp, index-- * kPointerSize));
+ }
+
+ GenerateFastApiDirectCall(masm, optimization, argc, true);
}
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -751,7 +959,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
void Compile(MacroAssembler* masm,
Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name,
+ Handle<Name> name,
LookupResult* lookup,
Register receiver,
Register scratch1,
@@ -782,7 +990,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register scratch3,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
- Handle<String> name,
+ Handle<Name> name,
const CallOptimization& optimization,
Label* miss_label) {
ASSERT(optimization.is_constant_call());
@@ -847,12 +1055,15 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiDirectCall(
+ masm, optimization, arguments_.immediate(), false);
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- __ InvokeFunction(optimization.constant_function(), arguments_,
+ Handle<JSFunction> function = optimization.constant_function();
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments_,
JUMP_FUNCTION, NullCallWrapper(), call_kind);
}
@@ -876,7 +1087,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<String> name,
+ Handle<Name> name,
Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
@@ -895,7 +1106,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
// Leave the internal frame.
@@ -931,39 +1142,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
};
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- __ li(scratch, Operand(cell));
- __ lw(scratch,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(miss, ne, scratch, Operand(at));
-}
-
-
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Register scratch,
- Label* miss) {
+void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
Handle<JSObject> current = object;
while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
+ if (current->IsJSGlobalObject()) {
GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
+ Handle<JSGlobalObject>::cast(current),
name,
scratch,
miss);
@@ -973,119 +1162,8 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
}
-// Convert and store int passed in register ival to IEEE 754 single precision
-// floating point value at memory location (dst + 4 * wordoffset)
-// If FPU is available use it for conversion.
-static void StoreIntAsFloat(MacroAssembler* masm,
- Register dst,
- Register wordoffset,
- Register ival,
- Register fval,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ mtc1(ival, f0);
- __ cvt_s_w(f0, f0);
- __ sll(scratch1, wordoffset, 2);
- __ addu(scratch1, dst, scratch1);
- __ swc1(f0, MemOperand(scratch1, 0));
- } else {
- // FPU is not available, do manual conversions.
-
- Label not_special, done;
- // Move sign bit from source to destination. This works because the sign
- // bit in the exponent word of the double has the same position and polarity
- // as the 2's complement sign bit in a Smi.
- ASSERT(kBinary32SignMask == 0x80000000u);
-
- __ And(fval, ival, Operand(kBinary32SignMask));
- // Negate value if it is negative.
- __ subu(scratch1, zero_reg, ival);
- __ Movn(ival, scratch1, fval);
-
- // We have -1, 0 or 1, which we treat specially. Register ival contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ Branch(&not_special, gt, ival, Operand(1));
-
- // For 1 or -1 we need to or in the 0 exponent (biased).
- static const uint32_t exponent_word_for_1 =
- kBinary32ExponentBias << kBinary32ExponentShift;
-
- __ Xor(scratch1, ival, Operand(1));
- __ li(scratch2, exponent_word_for_1);
- __ or_(scratch2, fval, scratch2);
- __ Movz(fval, scratch2, scratch1); // Only if ival is equal to 1.
- __ Branch(&done);
-
- __ bind(&not_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- Register zeros = scratch2;
- __ Clz(zeros, ival);
-
- // Compute exponent and or it into the exponent register.
- __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
- __ subu(scratch1, scratch1, zeros);
-
- __ sll(scratch1, scratch1, kBinary32ExponentShift);
- __ or_(fval, fval, scratch1);
-
- // Shift up the source chopping the top bit off.
- __ Addu(zeros, zeros, Operand(1));
- // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
- __ sllv(ival, ival, zeros);
- // And the top (top 20 bits).
- __ srl(scratch1, ival, kBitsPerInt - kBinary32MantissaBits);
- __ or_(fval, fval, scratch1);
-
- __ bind(&done);
-
- __ sll(scratch1, wordoffset, 2);
- __ addu(scratch1, dst, scratch1);
- __ sw(fval, MemOperand(scratch1, 0));
- }
-}
-
-
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
-
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
-
- __ li(scratch, biased_exponent << HeapNumber::kExponentShift);
- if (mantissa_shift_for_hi_word > 0) {
- __ sll(loword, hiword, mantissa_shift_for_lo_word);
- __ srl(hiword, hiword, mantissa_shift_for_hi_word);
- __ or_(hiword, scratch, hiword);
- } else {
- __ mov(loword, zero_reg);
- __ sll(hiword, hiword, mantissa_shift_for_hi_word);
- __ or_(hiword, scratch, hiword);
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- __ li(scratch, 1 << HeapNumber::kExponentShift);
- __ nor(scratch, scratch, scratch);
- __ and_(hiword, hiword, scratch);
- }
+void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1099,9 +1177,15 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register holder_reg,
Register scratch1,
Register scratch2,
- Handle<String> name,
+ Handle<Name> name,
int save_at_depth,
- Label* miss) {
+ Label* miss,
+ PrototypeCheckType check) {
+ // Make sure that the type feedback oracle harvests the receiver map.
+ // TODO(svenpanne) Remove this hack when all ICs are reworked.
+ __ li(scratch1, Operand(Handle<Map>(object->map())));
+
+ Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -1111,8 +1195,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register reg = object_reg;
int depth = 0;
+ typedef FunctionCallbackArguments FCA;
if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp));
+ __ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
// Check the maps in the prototype chain.
@@ -1129,11 +1214,12 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- name = factory()->LookupSymbol(name);
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
}
ASSERT(current->property_dictionary()->FindEntry(*name) ==
- StringDictionary::kNotFound);
+ NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
scratch1, scratch2);
@@ -1142,9 +1228,14 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
reg = holder_reg; // From now on the object will be in holder_reg.
__ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
- Handle<Map> current_map(current->map());
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
+ Register map_reg = scratch1;
+ if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ Handle<Map> current_map(current->map());
+ // CheckMap implicitly loads the map of |reg| into |map_reg|.
+ __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+ } else {
+ __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
@@ -1156,7 +1247,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (heap()->InNewSpace(*prototype)) {
// The prototype is in new space; we cannot store a reference to it
// in the code. Load it from the map.
- __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ __ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
} else {
// The prototype is in old space; load it directly.
__ li(reg, Operand(prototype));
@@ -1164,7 +1255,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp));
+ __ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
// Go to the next object in the prototype chain.
@@ -1172,11 +1263,13 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
// Log the check depth.
- LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- // Check the holder map.
- __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
- DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
+ DONT_DO_SMI_CHECK);
+ }
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1194,171 +1287,186 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
- GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
- __ Ret();
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ Branch(success);
+ __ bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ }
}
-void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSFunction> value,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss, scratch1);
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ b(success);
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ }
+}
- // Check that the maps haven't changed.
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
- // Return the constant value.
- __ LoadHeapObject(v0, value);
- __ Ret();
-}
+Register LoadStubCompiler::CallbackHandlerFrontend(
+ Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* success,
+ Handle<Object> callback) {
+ Label miss;
+ Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
-void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- ASSERT(!receiver.is(scratch1));
- ASSERT(!receiver.is(scratch2));
- ASSERT(!receiver.is(scratch3));
-
- // Load the properties dictionary.
- Register dictionary = scratch1;
- __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- miss,
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ ASSERT(!reg.is(scratch2()));
+ ASSERT(!reg.is(scratch3()));
+ ASSERT(!reg.is(scratch4()));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch4();
+ __ lw(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &miss,
&probe_done,
dictionary,
- name_reg,
- scratch2,
- scratch3);
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch3 contains the
- // pointer into the dictionary. Check that the value is the callback.
- Register pointer = scratch3;
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lw(scratch2, FieldMemOperand(pointer, kValueOffset));
- __ Branch(miss, ne, scratch2, Operand(callback));
+ this->name(),
+ scratch2(),
+ scratch3());
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3();
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ lw(scratch2(), FieldMemOperand(pointer, kValueOffset));
+ __ Branch(&miss, ne, scratch2(), Operand(callback));
+ }
+
+ HandlerFrontendFooter(name, success, &miss);
+ return reg;
}
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss, scratch1);
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
+}
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- GenerateDictionaryLoadCallback(
- reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
- }
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ LoadObject(v0, value);
+ __ Ret();
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+}
+
+void LoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
- __ push(receiver);
- __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+ ASSERT(!scratch2().is(reg));
+ ASSERT(!scratch3().is(reg));
+ ASSERT(!scratch4().is(reg));
+ __ push(receiver());
if (heap()->InNewSpace(callback->data())) {
- __ li(scratch3, callback);
- __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
+ __ li(scratch3(), callback);
+ __ lw(scratch3(), FieldMemOperand(scratch3(),
+ ExecutableAccessorInfo::kDataOffset));
} else {
- __ li(scratch3, Handle<Object>(callback->data()));
- }
- __ Subu(sp, sp, 4 * kPointerSize);
- __ sw(reg, MemOperand(sp, 3 * kPointerSize));
- __ sw(scratch3, MemOperand(sp, 2 * kPointerSize));
- __ li(scratch3, Operand(ExternalReference::isolate_address()));
- __ sw(scratch3, MemOperand(sp, 1 * kPointerSize));
- __ sw(name_reg, MemOperand(sp, 0 * kPointerSize));
-
- __ mov(a2, scratch2); // Saved in case scratch2 == a1.
- __ mov(a1, sp); // a1 (first argument - see note below) = Handle<String>
-
- // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
- // struct from the function (which is currently the case). This means we pass
- // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
- // will handle setting up a0.
+ __ li(scratch3(), Handle<Object>(callback->data(), isolate()));
+ }
+ __ Subu(sp, sp, 6 * kPointerSize);
+ __ sw(scratch3(), MemOperand(sp, 5 * kPointerSize));
+ __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
+ __ sw(scratch3(), MemOperand(sp, 4 * kPointerSize));
+ __ sw(scratch3(), MemOperand(sp, 3 * kPointerSize));
+ __ li(scratch4(),
+ Operand(ExternalReference::isolate_address(isolate())));
+ __ sw(scratch4(), MemOperand(sp, 2 * kPointerSize));
+ __ sw(reg, MemOperand(sp, 1 * kPointerSize));
+ __ sw(name(), MemOperand(sp, 0 * kPointerSize));
+ __ Addu(scratch2(), sp, 1 * kPointerSize);
+
+ __ mov(a2, scratch2()); // Saved in case scratch2 == a1.
+ __ mov(a0, sp); // (first argument - a0) = Handle<Name>
const int kApiStackSpace = 1;
FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // Create AccessorInfo instance on the stack above the exit frame with
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object** args_) as the data.
__ sw(a2, MemOperand(sp, kPointerSize));
- // a2 (second argument - see note above) = AccessorInfo&
- __ Addu(a2, sp, kPointerSize);
+ // (second argument - a1) = AccessorInfo&
+ __ Addu(a1, sp, kPointerSize);
- const int kStackUnwindSpace = 5;
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
- ExternalReference ref =
- ExternalReference(&fun,
- ExternalReference::DIRECT_GETTER_CALL,
- masm()->isolate());
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
-}
-
-
-void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Label* miss) {
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, isolate());
+
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ ExternalReference::Type thunk_type =
+ ExternalReference::PROFILING_GETTER_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ isolate());
+ __ CallApiFunctionAndReturn(ref,
+ getter_address,
+ thunk_ref,
+ a2,
+ kStackUnwindSpace,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
+void LoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<Name> name) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
@@ -1367,8 +1475,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo()) {
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
compile_followup_inline = callback->getter() != NULL &&
callback->IsCompatibleReceiver(*object);
}
@@ -1378,17 +1487,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
// Preserve the receiver register explicitly whenever it is different from
// the holder and it is needed should the interceptor return without any
// result. The CALLBACKS case needs the receiver to be passed into C++ code,
// the FIELD case might cause a miss during the prototype check.
bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+ bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
(lookup->type() == CALLBACKS || must_perfrom_prototype_check);
// Save necessary data before invoking an interceptor.
@@ -1396,95 +1502,49 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
{
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
- __ Push(receiver, holder_reg, name_reg);
+ __ Push(receiver(), holder_reg, this->name());
} else {
- __ Push(holder_reg, name_reg);
+ __ Push(holder_reg, this->name());
}
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method).
CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
+ receiver(),
holder_reg,
- name_reg,
+ this->name(),
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+ __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+ __ Branch(&interceptor_failed, eq, v0, Operand(scratch1()));
frame_scope.GenerateLeaveFrame();
__ Ret();
__ bind(&interceptor_failed);
- __ pop(name_reg);
+ __ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
- __ pop(receiver);
+ __ pop(receiver());
}
// Leave the internal frame.
}
- // Check that the maps from interceptor's holder to lookup's holder
- // haven't changed. And load lookup's holder into |holder| register.
- if (must_perfrom_prototype_check) {
- holder_reg = CheckPrototypes(interceptor_holder,
- holder_reg,
- Handle<JSObject>(lookup->holder()),
- scratch1,
- scratch2,
- scratch3,
- name,
- miss);
- }
-
- if (lookup->IsField()) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Retrieve a field from field's holder.
- GenerateFastPropertyLoad(masm(), v0, holder_reg,
- Handle<JSObject>(lookup->holder()),
- lookup->GetFieldIndex());
- __ Ret();
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- Handle<AccessorInfo> callback(
- AccessorInfo::cast(lookup->GetCallbackObject()));
- ASSERT(callback->getter() != NULL);
-
- // Tail call to runtime.
- // Important invariant in CALLBACKS case: the code above must be
- // structured to never clobber |receiver| register.
- __ li(scratch2, callback);
-
- __ Push(receiver, holder_reg);
- __ lw(scratch3,
- FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
- __ li(scratch1, Operand(ExternalReference::isolate_address()));
- __ Push(scratch3, scratch1, scratch2, name_reg);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- PushInterceptorArguments(masm(), receiver, holder_reg,
- name_reg, interceptor_holder);
+ PushInterceptorArguments(masm(), receiver(), holder_reg,
+ this->name(), interceptor_holder);
ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
__ Branch(miss, ne, a2, Operand(name));
}
@@ -1493,7 +1553,7 @@ void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name,
+ Handle<Name> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
@@ -1510,12 +1570,12 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Label* miss) {
// Get the value from the cell.
__ li(a3, Operand(cell));
- __ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
+ __ lw(a1, FieldMemOperand(a3, Cell::kValueOffset));
// Check that the cell contains the same function.
if (heap()->InNewSpace(*function)) {
@@ -1549,8 +1609,8 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
- Handle<String> name) {
+ PropertyIndex index,
+ Handle<Name> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -1568,7 +1628,8 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
- GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
+ GenerateFastPropertyLoad(masm(), a1, reg, index.is_inobject(holder),
+ index.translate(holder), Representation::Tagged());
GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
@@ -1581,12 +1642,61 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
+Handle<Code> CallStubCompiler::CompileArrayCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Cell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name,
+ Code::StubType type) {
+ Label miss;
+
+ // Check that function is still array.
+ const int argc = arguments().immediate();
+ GenerateNameCheck(name, &miss);
+ Register receiver = a1;
+
+ if (cell.is_null()) {
+ __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the maps haven't changed.
+ CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, a0,
+ t0, name, &miss);
+ } else {
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
+ site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
+ Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
+ __ li(a0, Operand(argc));
+ __ li(a2, Operand(site_feedback_cell));
+ __ li(a1, Operand(function));
+
+ ArrayConstructorStub stub(isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(type, name);
+}
+
+
Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -1618,12 +1728,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
if (argc == 0) {
// Nothing to do, just return the length.
__ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
} else {
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements;
+ Label attempt_to_grow_elements, with_write_barrier, check_double;
Register elements = t2;
Register end_elements = t1;
@@ -1634,7 +1743,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ CheckMap(elements,
v0,
Heap::kFixedArrayMapRootIndex,
- &call_builtin,
+ &check_double,
DONT_DO_SMI_CHECK);
// Get the array's length into v0 and calculate new length.
@@ -1650,7 +1759,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
// Check if value is a smi.
- Label with_write_barrier;
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(t0, &with_write_barrier);
@@ -1668,8 +1776,39 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ sw(t0, MemOperand(end_elements));
// Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
+
+ __ bind(&check_double);
+
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ a0,
+ Heap::kFixedDoubleArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+
+ // Get the array's length into v0 and calculate new length.
+ __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
+
+ // Get the elements' length.
+ __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ Branch(&call_builtin, gt, v0, Operand(t0));
+
+ __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ StoreNumberToDoubleElements(
+ t0, v0, elements, a3, t1, a2,
+ &call_builtin, argc * kDoubleSize);
+
+ // Save new length.
+ __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Check for a smi.
+ __ DropAndRet(argc + 1);
__ bind(&with_write_barrier);
@@ -1682,8 +1821,12 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(a3, t3, &call_builtin);
+
+ __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&call_builtin, eq, t3, Operand(at));
// edx: receiver
- // r3: map
+ // a3: map
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
@@ -1692,7 +1835,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&try_holey_map);
__ mov(a2, receiver);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
__ jmp(&fast_object);
__ bind(&try_holey_map);
@@ -1703,7 +1848,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&call_builtin);
__ mov(a2, receiver);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(a3, a3, &call_builtin);
@@ -1727,8 +1874,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
__ bind(&attempt_to_grow_elements);
// v0: array's length + 1.
@@ -1748,11 +1894,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ bind(&no_fast_elements_check);
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(
- masm()->isolate());
+ ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(
- masm()->isolate());
+ ExternalReference::new_space_allocation_limit_address(isolate());
const int kAllocationDelta = 4;
// Load top and check if it is the end of elements.
@@ -1785,14 +1929,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Elements are in new space, so write barrier is not required.
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
}
__ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
- masm()->isolate()),
- argc + 1,
- 1);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1);
}
// Handle call cache miss.
@@ -1800,16 +1941,17 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -1867,35 +2009,32 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
// Fill with the hole.
__ sw(t2, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
__ bind(&return_undefined);
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
__ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
- masm()->isolate()),
- argc + 1,
- 1);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1);
// Handle call cache miss.
__ bind(&miss);
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -1928,8 +2067,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
v0,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- v0, holder, a1, a3, t0, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ v0, holder, a1, a3, t0, name, &miss);
Register receiver = a1;
Register index = t1;
@@ -1949,8 +2089,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper);
@@ -1958,8 +2097,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
__ LoadRoot(v0, Heap::kNanValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
}
__ bind(&miss);
@@ -1969,16 +2107,17 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2008,8 +2147,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
v0,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- v0, holder, a1, a3, t0, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ v0, holder, a1, a3, t0, name, &miss);
Register receiver = v0;
Register index = t1;
@@ -2031,17 +2171,15 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
- __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
- __ Drop(argc + 1);
- __ Ret();
+ __ LoadRoot(v0, Heap::kempty_stringRootIndex);
+ __ DropAndRet(argc + 1);
}
__ bind(&miss);
@@ -2051,16 +2189,17 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2107,8 +2246,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
StringCharFromCodeGenerator generator(code, v0);
generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper);
@@ -2116,24 +2254,26 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// a2: function name.
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2142,11 +2282,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// -- sp[argc * 4] : receiver
// -----------------------------------
- if (!CpuFeatures::IsSupported(FPU)) {
- return Handle<Code>::null();
- }
- CpuFeatures::Scope scope_fpu(FPU);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
@@ -2174,8 +2310,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// If the argument is a smi, just return.
STATIC_ASSERT(kSmiTag == 0);
__ And(t0, v0, Operand(kSmiTagMask));
- __ Drop(argc + 1, eq, t0, Operand(zero_reg));
- __ Ret(eq, t0, Operand(zero_reg));
+ __ DropAndRet(argc + 1, eq, t0, Operand(zero_reg));
__ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
@@ -2240,8 +2375,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Restore FCSR and return.
__ ctc1(a3, FCSR);
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
__ bind(&wont_fit_smi);
// Restore FCSR and fall to slow case.
@@ -2250,24 +2384,26 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ bind(&slow);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// a2: function name.
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2319,8 +2455,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
__ Branch(&slow, lt, v0, Operand(zero_reg));
// Smi case done.
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
// Check if the argument is a heap number and load its exponent and
// sign.
@@ -2333,8 +2468,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
Label negative_sign;
__ And(t0, a1, Operand(HeapNumber::kSignMask));
__ Branch(&negative_sign, ne, t0, Operand(zero_reg));
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
// If the argument is negative, clear the sign, and return a new
// number.
@@ -2345,21 +2479,21 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
__ AllocateHeapNumber(v0, t0, t1, t2, &slow);
__ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
__ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ Drop(argc + 1);
- __ Ret();
+ __ DropAndRet(argc + 1);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// a2: function name.
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
@@ -2367,7 +2501,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name) {
@@ -2403,7 +2537,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name,
depth, &miss);
- GenerateFastApiDirectCall(masm(), optimization, argc);
+ GenerateFastApiDirectCall(masm(), optimization, argc, false);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
@@ -2416,25 +2550,16 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
}
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function,
- Handle<String> name,
- CheckType check) {
+void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Label* success) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
// -----------------------------------
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
@@ -2451,8 +2576,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
switch (check) {
case RECEIVER_MAP_CHECK:
- __ IncrementCounter(masm()->isolate()->counters()->call_const(),
- 1, a0, a3);
+ __ IncrementCounter(isolate()->counters()->call_const(), 1, a0, a3);
// Check that the maps haven't changed.
CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
@@ -2467,77 +2591,101 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
break;
case STRING_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- // Check that the object is a two-byte string or a symbol.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- a0, holder, a3, a1, t0, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ // Check that the object is a string.
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ a0, holder, a3, a1, t0, name, &miss);
break;
- case NUMBER_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(a1, &fast);
- __ GetObjectType(a1, a0, a0);
- __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- a0, holder, a3, a1, t0, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case SYMBOL_CHECK:
+ // Check that the object is a symbol.
+ __ GetObjectType(a1, a1, a3);
+ __ Branch(&miss, ne, a3, Operand(SYMBOL_TYPE));
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::SYMBOL_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ a0, holder, a3, a1, t0, name, &miss);
break;
- case BOOLEAN_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(t0, Heap::kTrueValueRootIndex);
- __ Branch(&fast, eq, a1, Operand(t0));
- __ LoadRoot(t0, Heap::kFalseValueRootIndex);
- __ Branch(&miss, ne, a1, Operand(t0));
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- a0, holder, a3, a1, t0, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case NUMBER_CHECK: {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ JumpIfSmi(a1, &fast);
+ __ GetObjectType(a1, a0, a0);
+ __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ a0, holder, a3, a1, t0, name, &miss);
break;
}
+ case BOOLEAN_CHECK: {
+ Label fast;
+ // Check that the object is a boolean.
+ __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+ __ Branch(&fast, eq, a1, Operand(t0));
+ __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+ __ Branch(&miss, ne, a1, Operand(t0));
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ a0, holder, a3, a1, t0, name, &miss);
+ break;
+ }
+ }
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ __ jmp(success);
// Handle call cache miss.
__ bind(&miss);
GenerateMissBranch();
+}
+
+
+void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), call_kind);
+}
+
+
+Handle<Code> CallStubCompiler::CompileCallConstant(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Handle<JSFunction> function) {
+ if (HasCustomCallGenerator(function)) {
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<Cell>::null(),
+ function, Handle<String>::cast(name),
+ Code::CONSTANT);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
+ }
+
+ Label success;
+
+ CompileHandlerFrontend(object, holder, name, check, &success);
+ __ bind(&success);
+ CompileHandlerBackend(function);
// Return the generated code.
return GetCode(function);
@@ -2546,7 +2694,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name) {
+ Handle<Name> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -2587,16 +2735,18 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<Name> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ Handle<Code> code = CompileCustomCall(
+ object, holder, cell, function, Handle<String>::cast(name),
+ Code::NORMAL);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
@@ -2620,7 +2770,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
ParameterCount expected(function->shared()->formal_parameter_count());
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
@@ -2643,75 +2793,50 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
}
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Name register might be clobbered.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- a1, a2, a3, t0,
- &miss);
- __ bind(&miss);
- __ li(a2, Operand(Handle<String>(name))); // Restore name.
- Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
- // Check that the maps haven't changed.
- __ JumpIfSmi(a1, &miss, a3);
- CheckPrototypes(receiver, a1, holder, a3, t0, t1, name, &miss);
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
// Stub never generated for non-global objects that require access
// checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- __ push(a1); // Receiver.
- __ li(a3, Operand(callback)); // Callback info.
- __ Push(a3, a2, a0);
+ __ push(receiver()); // Receiver.
+ __ li(at, Operand(callback)); // Callback info.
+ __ push(at);
+ __ li(at, Operand(name));
+ __ Push(at, value());
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
- masm()->isolate());
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 1, values);
// Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+ return GetCode(kind(), Code::CALLBACKS, name);
}
@@ -2739,8 +2864,9 @@ void StoreStubCompiler::GenerateStoreViaSetter(
__ push(a1);
__ push(a0);
ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2761,198 +2887,101 @@ void StoreStubCompiler::GenerateStoreViaSetter(
#define __ ACCESS_MASM(masm())
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(a1, &miss);
- CheckPrototypes(receiver, a1, holder, a3, t0, t1, name, &miss);
-
- GenerateStoreViaSetter(masm(), setter);
-
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> receiver,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
+ Handle<JSObject> object,
+ Handle<Name> name) {
Label miss;
// Check that the map of the object hasn't changed.
- __ CheckMap(a1, a3, Handle<Map>(receiver->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
+ DO_SMI_CHECK);
// Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(a1, a3, &miss);
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
}
// Stub is never generated for non-global objects that require access
// checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- __ Push(a1, a2, a0); // Receiver, name, value.
+ __ Push(receiver(), this->name(), value());
- __ li(a0, Operand(Smi::FromInt(strict_mode_)));
- __ push(a0); // Strict mode.
+ __ li(scratch1(), Operand(Smi::FromInt(strict_mode())));
+ __ push(scratch1()); // strict mode
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
- masm()->isolate());
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::INTERCEPTOR, name);
}
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<Name> name,
+ Handle<JSGlobalObject> global) {
+ Label success;
- // Check that the map of the global has not changed.
- __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ li(t0, Operand(cell));
- __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
- __ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
- __ Branch(&miss, eq, t1, Operand(t2));
-
- // Store the value in the cell.
- __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
- __ mov(v0, a0); // Stored value must be returned in v0.
- // Cells are always rescanned, so no write barrier here.
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
- __ Ret();
+ NonexistentHandlerFrontend(object, last, name, &success, global);
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1, a1, a3);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ __ bind(&success);
+ // Return undefined if maps of the full prototype chain is still the same.
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ Ret();
// Return the generated code.
- return GetCode(Code::NORMAL, name);
+ return GetCode(kind(), Code::NONEXISTENT, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> last) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Check that the receiver is not a smi.
- __ JumpIfSmi(a0, &miss);
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { a0, a2, a3, a1, t0, t1 };
+ return registers;
+}
- // Check the maps of the full prototype chain.
- CheckPrototypes(object, a0, last, a3, a1, t0, name, &miss);
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (last->IsGlobalObject()) {
- GenerateCheckPropertyCell(
- masm(), Handle<GlobalObject>::cast(last), name, a1, &miss);
- }
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { a1, a0, a2, a3, t0, t1 };
+ return registers;
+}
- // Return undefined if maps of the full prototype chain is still the same.
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ Ret();
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::NONEXISTENT, factory()->empty_string());
+Register* StoreStubCompiler::registers() {
+ // receiver, name, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { a1, a2, a0, a3, t0, t1 };
+ return registers;
}
-Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- int index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- __ mov(v0, a0);
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { a2, a1, a0, a3, t0, t1 };
+ return registers;
+}
- GenerateLoadField(object, holder, v0, a3, a1, t0, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
- // Return the generated code.
- return GetCode(Code::FIELD, name);
+void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
+ Register name_reg,
+ Label* miss) {
+ __ Branch(miss, ne, name_reg, Operand(name));
}
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
- GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, t1, callback, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
+ Register name_reg,
+ Label* miss) {
+ __ Branch(miss, ne, name_reg, Operand(name));
}
@@ -2961,6 +2990,7 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
// -- a0 : receiver
@@ -2972,10 +3002,11 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(a0);
+ __ push(receiver);
ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2993,95 +3024,22 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(a0, &miss);
- CheckPrototypes(receiver, a0, holder, a3, t0, a1, name, &miss);
-
- GenerateLoadViaGetter(masm(), getter);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> value,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadConstant(object, holder, a0, a3, a1, t0, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -- [sp] : receiver
- // -----------------------------------
- Label miss;
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(object, holder, &lookup, a0, a2, a3, a1, t0, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
+ Handle<GlobalObject> global,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
bool is_dont_delete) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
+ Label success, miss;
- // Check that the map of the global has not changed.
- __ JumpIfSmi(a0, &miss);
- CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
+ __ CheckMap(
+ receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
+ HandlerFrontendHeader(
+ object, receiver(), Handle<JSObject>::cast(global), name, &miss);
// Get the value from the cell.
__ li(a3, Operand(cell));
- __ lw(t0, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
+ __ lw(t0, FieldMemOperand(a3, Cell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
@@ -3089,293 +3047,54 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Branch(&miss, eq, t0, Operand(at));
}
- __ mov(v0, t0);
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
- __ Ret();
-
- __ bind(&miss);
- __ IncrementCounter(counters->named_load_global_stub_miss(), 1, a1, a3);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- int index) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, t1, callback,
- name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
+ HandlerFrontendFooter(name, &success, &miss);
+ __ bind(&success);
- GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, t0);
// Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
+ return GetICCode(kind(), Code::NORMAL, name);
}
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver, holder, &lookup, a1, a0, a2, a3, t0, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadArrayLength(masm(), a1, a2, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
-
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
Label miss;
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
-
- // Check the name hasn't changed.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
-}
+ if (check == PROPERTY) {
+ GenerateNameCheck(name, this->name(), &miss);
+ }
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_ics) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
- __ JumpIfSmi(a1, &miss);
+ __ JumpIfSmi(receiver(), &miss);
+ Register map_reg = scratch1();
int receiver_count = receiver_maps->length();
- __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ int number_of_handled_maps = 0;
+ __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET,
- eq, a2, Operand(receiver_maps->at(current)));
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
+ eq, map_reg, Operand(receiver_maps->at(current)));
+ }
}
+ ASSERT(number_of_handled_maps != 0);
__ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -----------------------------------
-
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
-
- // Check that the name has not changed.
- __ Branch(&miss, ne, a1, Operand(name));
-
- // a3 is used as scratch register. a1 and a2 keep their values if a jump to
- // the miss label is generated.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- a2, a1, a3, t0,
- &miss);
- __ bind(&miss);
-
- __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
- Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : scratch
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub =
- KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
-
- __ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetICCode(kind(), type, name, state);
}
@@ -3383,187 +3102,30 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : scratch
- // -----------------------------------
Label miss;
- __ JumpIfSmi(a2, &miss);
+ __ JumpIfSmi(receiver(), &miss);
int receiver_count = receiver_maps->length();
- __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int i = 0; i < receiver_count; ++i) {
if (transitioned_maps->at(i).is_null()) {
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
- a3, Operand(receiver_maps->at(i)));
+ scratch1(), Operand(receiver_maps->at(i)));
} else {
Label next_map;
- __ Branch(&next_map, ne, a3, Operand(receiver_maps->at(i)));
- __ li(a3, Operand(transitioned_maps->at(i)));
+ __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
+ __ li(transition_map(), Operand(transitioned_maps->at(i)));
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
}
__ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
-}
-
-
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // a0 : argc
- // a1 : constructor
- // ra : return address
- // [sp] : last argument
- Label generic_stub_call;
-
- // Use t7 for holding undefined which is used in several places below.
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ lw(t5, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(t5, SharedFunctionInfo::kDebugInfoOffset));
- __ Branch(&generic_stub_call, ne, a2, Operand(t7));
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // a1: constructor function
- // t7: undefined
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &generic_stub_call);
- __ GetObjectType(a2, a3, t0);
- __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // a0: argc
- // a1: constructor function
- // a2: initial map
- // t7: undefined
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Check(ne, "Function constructed by construct stub.",
- a3, Operand(JS_FUNCTION_TYPE));
-#endif
-
- // Now allocate the JSObject in new space.
- // a0: argc
- // a1: constructor function
- // a2: initial map
- // t7: undefined
- ASSERT(function->has_initial_map());
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-#ifdef DEBUG
- int instance_size = function->initial_map()->instance_size();
- __ Check(eq, "Instance size of initial map changed.",
- a3, Operand(instance_size >> kPointerSizeLog2));
-#endif
- __ AllocateInNewSpace(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // a0: argc
- // a1: constructor function
- // a2: initial map
- // a3: object size (in words)
- // t4: JSObject (not tagged)
- // t7: undefined
- __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t5, t4);
- __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
- __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
- __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
- __ Addu(t5, t5, Operand(3 * kPointerSize));
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
-
- // Calculate the location of the first argument. The stack contains only the
- // argc arguments.
- __ sll(a1, a0, kPointerSizeLog2);
- __ Addu(a1, a1, sp);
-
- // Fill all the in-object properties with undefined.
- // a0: argc
- // a1: first argument
- // a3: object size (in words)
- // t4: JSObject (not tagged)
- // t5: First in-object property of JSObject (not tagged)
- // t7: undefined
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- Label not_passed, next;
- // Check if the argument assigned to the property is actually passed.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ Branch(&not_passed, less_equal, a0, Operand(arg_number));
- // Argument passed - find it on the stack.
- __ lw(a2, MemOperand(a1, (arg_number + 1) * -kPointerSize));
- __ sw(a2, MemOperand(t5));
- __ Addu(t5, t5, kPointerSize);
- __ jmp(&next);
- __ bind(&not_passed);
- // Set the property to undefined.
- __ sw(t7, MemOperand(t5));
- __ Addu(t5, t5, Operand(kPointerSize));
- __ bind(&next);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
- __ li(a2, Operand(constant));
- __ sw(a2, MemOperand(t5));
- __ Addu(t5, t5, kPointerSize);
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ sw(t7, MemOperand(t5));
- __ Addu(t5, t5, kPointerSize);
- }
-
- // a0: argc
- // t4: JSObject (not tagged)
- // Move argc to a1 and the JSObject to return to v0 and tag it.
- __ mov(a1, a0);
- __ mov(v0, t4);
- __ Or(v0, v0, Operand(kHeapObjectTag));
-
- // v0: JSObject
- // a1: argc
- // Remove caller arguments and receiver from the stack and return.
- __ sll(t0, a1, kPointerSizeLog2);
- __ Addu(sp, sp, t0);
- __ Addu(sp, sp, Operand(kPointerSize));
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1, a1, a2);
- __ IncrementCounter(counters->constructed_objects_stub(), 1, a1, a2);
- __ Ret();
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> generic_construct_stub =
- masm()->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode();
+ return GetICCode(
+ kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
}
@@ -3600,9 +3162,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
// Miss case, call the runtime.
__ bind(&miss_force_generic);
@@ -3612,1234 +3172,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-static bool IsElementTypeSigned(ElementsKind elements_kind) {
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- return true;
-
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- return false;
-
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- return false;
- }
- return false;
-}
-
-
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch0,
- Register scratch1,
- FPURegister double_scratch0,
- Label* fail) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- Label key_ok;
- // Check for smi or a smi inside a heap number. We convert the heap
- // number and check if the conversion is exact and fits into the smi
- // range.
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- scratch0,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
- __ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
- __ EmitFPUTruncate(kRoundToZero,
- double_scratch0,
- double_scratch0,
- scratch0,
- scratch1,
- kCheckForInexactConversion);
-
- __ Branch(fail, ne, scratch1, Operand(zero_reg));
-
- __ mfc1(scratch0, double_scratch0);
- __ SmiTagCheckOverflow(key, scratch0, scratch1);
- __ BranchOnOverflow(fail, scratch1);
- __ bind(&key_ok);
- } else {
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, fail);
- }
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow, failed_allocation;
-
- Register key = a0;
- Register receiver = a1;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic);
-
- __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // a3: elements array
-
- // Check that the index is in range.
- __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
- __ sra(t2, key, kSmiTagSize);
- // Unsigned comparison catches both negative and too-large values.
- __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
-
- __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
- // a3: base pointer of external storage
-
- // We are not untagging smi key and instead work with it
- // as if it was premultiplied by 2.
- STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
-
- Register value = a2;
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ srl(t2, key, 1);
- __ addu(t3, a3, t2);
- __ lb(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t2, key, 1);
- __ addu(t3, a3, t2);
- __ lbu(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ addu(t3, a3, key);
- __ lh(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t3, a3, key);
- __ lhu(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t2, key, 1);
- __ addu(t3, a3, t2);
- __ lw(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- __ sll(t3, t2, 2);
- __ addu(t3, a3, t3);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ lwc1(f0, MemOperand(t3, 0));
- } else {
- __ lw(value, MemOperand(t3, 0));
- }
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ sll(t2, key, 2);
- __ addu(t3, a3, t2);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ ldc1(f0, MemOperand(t3, 0));
- } else {
- // t3: pointer to the beginning of the double we want to load.
- __ lw(a2, MemOperand(t3, 0));
- __ lw(a3, MemOperand(t3, Register::kSizeInBytes));
- }
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // a2: value
- // For float array type:
- // f0: value (if FPU is supported)
- // a2: value (if FPU is not supported)
- // For double array type:
- // f0: value (if FPU is supported)
- // a2/a3: value (if FPU is not supported)
-
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- __ Subu(t3, value, Operand(0xC0000000)); // Non-smi value gives neg result.
- __ Branch(&box_int, lt, t3, Operand(zero_reg));
- // Tag integer as smi and return it.
- __ sll(v0, value, kSmiTagSize);
- __ Ret();
-
- __ bind(&box_int);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion.
- // The arm version uses a temporary here to save r0, but we don't need to
- // (a0 is not modified).
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, a3, t0, t1, &slow);
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ mtc1(value, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- Register dst1 = t2;
- Register dst2 = t3;
- FloatingPointHelper::Destination dest =
- FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::ConvertIntToDouble(masm,
- value,
- dest,
- f0,
- dst1,
- dst2,
- t1,
- f2);
- __ sw(dst1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ sw(dst2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- Label pl_box_int;
- __ And(t2, value, Operand(0xC0000000));
- __ Branch(&pl_box_int, ne, t2, Operand(zero_reg));
-
- // It can fit in an Smi.
- // Tag integer as smi and return it.
- __ sll(v0, value, kSmiTagSize);
- __ Ret();
-
- __ bind(&pl_box_int);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
- // registers - also when jumping due to exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t2, t3, t6, &slow);
-
- // This is replaced by a macro:
- // __ mtc1(value, f0); // LS 32-bits.
- // __ mtc1(zero_reg, f1); // MS 32-bits are all zero.
- // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
-
- __ Cvt_d_uw(f0, value, f22);
-
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
-
- __ Ret();
- } else {
- // Check whether unsigned integer fits into smi.
- Label box_int_0, box_int_1, done;
- __ And(t2, value, Operand(0x80000000));
- __ Branch(&box_int_0, ne, t2, Operand(zero_reg));
- __ And(t2, value, Operand(0x40000000));
- __ Branch(&box_int_1, ne, t2, Operand(zero_reg));
-
- // Tag integer as smi and return it.
- __ sll(v0, value, kSmiTagSize);
- __ Ret();
-
- Register hiword = value; // a2.
- Register loword = a3;
-
- __ bind(&box_int_0);
- // Integer does not have leading zeros.
- GenerateUInt2Double(masm, hiword, loword, t0, 0);
- __ Branch(&done);
-
- __ bind(&box_int_1);
- // Integer has one leading zero.
- GenerateUInt2Double(masm, hiword, loword, t0, 1);
-
-
- __ bind(&done);
- // Integer was converted to double in registers hiword:loword.
- // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber
- // clobbers all registers - also when jumping due to exhausted young
- // space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t2, t3, t5, t6, &slow);
-
- __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
- __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
-
- __ mov(v0, t2);
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Allocate a HeapNumber for the result. Don't use a0 and a1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
- // The float (single) value is already in fpu reg f0 (if we use float).
- __ cvt_d_s(f0, f0);
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use a0 and a1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
- // FPU is not available, do manual single to double conversion.
-
- // a2: floating point value (binary32).
- // v0: heap number for result
-
- // Extract mantissa to t4.
- __ And(t4, value, Operand(kBinary32MantissaMask));
-
- // Extract exponent to t5.
- __ srl(t5, value, kBinary32MantissaBits);
- __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg));
-
- __ li(t0, 0x7ff);
- __ Xor(t1, t5, Operand(0xFF));
- __ Movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff.
- __ Branch(&exponent_rebiased, eq, t1, Operand(zero_reg));
-
- // Rebias exponent.
- __ Addu(t5,
- t5,
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ And(a2, value, Operand(kBinary32SignMask));
- value = no_reg;
- __ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord);
- __ or_(a2, a2, t0);
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ srl(t0, t4, kMantissaShiftForHiWord);
- __ or_(a2, a2, t0);
- __ sll(a0, t4, kMantissaShiftForLoWord);
-
- __ sw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ sw(a0, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ Ret();
- }
-
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Allocate a HeapNumber for the result. Don't use a0 and a1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
- // The double value is already in f0
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use a0 and a1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
-
- __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ Ret();
- }
-
- } else {
- // Tag integer as smi and return it.
- __ sll(v0, value, kSmiTagSize);
- __ Ret();
- }
-
- // Slow case, key and receiver still in a0 and a1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, a2, a3);
-
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
-
- __ Push(a1, a0);
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -----------------------------------
-
- Label slow, check_heap_number, miss_force_generic;
-
- // Register usage.
- Register value = a0;
- Register key = a1;
- Register receiver = a2;
- // a3 mostly holds the elements array or the destination external array.
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic);
-
- __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check that the index is in range.
- __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // a3: external array.
-
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Double to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(value, &slow);
- } else {
- __ JumpIfNotSmi(value, &check_heap_number);
- }
- __ SmiUntag(t1, value);
- __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
-
- // a3: base pointer of external storage.
- // t1: value (integer).
-
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- // Clamp the value to [0..255].
- // v0 is used as a scratch register here.
- Label done;
- __ li(v0, Operand(255));
- // Normal branch: nop in delay slot.
- __ Branch(&done, gt, t1, Operand(v0));
- // Use delay slot in this branch.
- __ Branch(USE_DELAY_SLOT, &done, lt, t1, Operand(zero_reg));
- __ mov(v0, zero_reg); // In delay slot.
- __ mov(v0, t1); // Value is in range 0..255.
- __ bind(&done);
- __ mov(t1, v0);
-
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t1, MemOperand(t8, 0));
- }
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t1, MemOperand(t8, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
- __ sh(t1, MemOperand(t8, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sw(t1, MemOperand(t8, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Perform int-to-float conversion and store to memory.
- __ SmiUntag(t0, key);
- StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ sll(t8, key, 2);
- __ addu(a3, a3, t8);
- // a3: effective address of the double element
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(FPU)) {
- destination = FloatingPointHelper::kFPURegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
- FloatingPointHelper::ConvertIntToDouble(
- masm, t1, destination,
- f0, t2, t3, // These are: double_dst, dst1, dst2.
- t0, f2); // These are: scratch2, single_scratch.
- if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
- __ sdc1(f0, MemOperand(a3, 0));
- } else {
- __ sw(t2, MemOperand(a3, 0));
- __ sw(t3, MemOperand(a3, Register::kSizeInBytes));
- }
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // Entry registers are intact, a0 holds the value which is the return value.
- __ mov(v0, a0);
- __ Ret();
-
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- // a3: external array.
- __ bind(&check_heap_number);
- __ GetObjectType(value, t1, t2);
- __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
-
- __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
-
- // a3: base pointer of external storage.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
-
- __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ cvt_s_d(f0, f0);
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ swc1(f0, MemOperand(t8, 0));
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sll(t8, key, 2);
- __ addu(t8, a3, t8);
- __ sdc1(f0, MemOperand(t8, 0));
- } else {
- __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
-
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
- __ sh(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sw(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, a0 holds the value
- // which is the return value.
- __ mov(v0, a0);
- __ Ret();
- } else {
- // FPU is not available, do manual conversions.
-
- __ lw(t3, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ lw(t4, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Label done, nan_or_infinity_or_zero;
- static const int kMantissaInHiWordShift =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaInLoWordShift =
- kBitsPerInt - kMantissaInHiWordShift;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ li(t5, HeapNumber::kExponentMask);
- __ and_(t6, t3, t5);
- __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(zero_reg));
-
- __ xor_(t1, t6, t5);
- __ li(t2, kBinary32ExponentMask);
- __ Movz(t6, t2, t1); // Only if t6 is equal to t5.
- __ Branch(&nan_or_infinity_or_zero, eq, t1, Operand(zero_reg));
-
- // Rebias exponent.
- __ srl(t6, t6, HeapNumber::kExponentShift);
- __ Addu(t6,
- t6,
- Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
-
- __ li(t1, Operand(kBinary32MaxExponent));
- __ Slt(t1, t1, t6);
- __ And(t2, t3, Operand(HeapNumber::kSignMask));
- __ Or(t2, t2, Operand(kBinary32ExponentMask));
- __ Movn(t3, t2, t1); // Only if t6 is gt kBinary32MaxExponent.
- __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
-
- __ Slt(t1, t6, Operand(kBinary32MinExponent));
- __ And(t2, t3, Operand(HeapNumber::kSignMask));
- __ Movn(t3, t2, t1); // Only if t6 is lt kBinary32MinExponent.
- __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
-
- __ And(t7, t3, Operand(HeapNumber::kSignMask));
- __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
- __ sll(t3, t3, kMantissaInHiWordShift);
- __ or_(t7, t7, t3);
- __ srl(t4, t4, kMantissaInLoWordShift);
- __ or_(t7, t7, t4);
- __ sll(t6, t6, kBinary32ExponentShift);
- __ or_(t3, t7, t6);
-
- __ bind(&done);
- __ sll(t9, key, 1);
- __ addu(t9, a3, t9);
- __ sw(t3, MemOperand(t9, 0));
-
- // Entry registers are intact, a0 holds the value which is the return
- // value.
- __ mov(v0, a0);
- __ Ret();
-
- __ bind(&nan_or_infinity_or_zero);
- __ And(t7, t3, Operand(HeapNumber::kSignMask));
- __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
- __ or_(t6, t6, t7);
- __ sll(t3, t3, kMantissaInHiWordShift);
- __ or_(t6, t6, t3);
- __ srl(t4, t4, kMantissaInLoWordShift);
- __ or_(t3, t6, t4);
- __ Branch(&done);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sll(t8, key, 2);
- __ addu(t8, a3, t8);
- // t8: effective address of destination element.
- __ sw(t4, MemOperand(t8, 0));
- __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
- __ mov(v0, a0);
- __ Ret();
- } else {
- bool is_signed_type = IsElementTypeSigned(elements_kind);
- int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
-
- Label done, sign;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ li(t5, HeapNumber::kExponentMask);
- __ and_(t6, t3, t5);
- __ Movz(t3, zero_reg, t6); // Only if t6 is equal to zero.
- __ Branch(&done, eq, t6, Operand(zero_reg));
-
- __ xor_(t2, t6, t5);
- __ Movz(t3, zero_reg, t2); // Only if t6 is equal to t5.
- __ Branch(&done, eq, t6, Operand(t5));
-
- // Unbias exponent.
- __ srl(t6, t6, HeapNumber::kExponentShift);
- __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
- // If exponent is negative then result is 0.
- __ slt(t2, t6, zero_reg);
- __ Movn(t3, zero_reg, t2); // Only if exponent is negative.
- __ Branch(&done, lt, t6, Operand(zero_reg));
-
- // If exponent is too big then result is minimal value.
- __ slti(t1, t6, meaningfull_bits - 1);
- __ li(t2, min_value);
- __ Movz(t3, t2, t1); // Only if t6 is ge meaningfull_bits - 1.
- __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
-
- __ And(t5, t3, Operand(HeapNumber::kSignMask));
- __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
- __ Or(t3, t3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
-
- __ li(t9, HeapNumber::kMantissaBitsInTopWord);
- __ subu(t6, t9, t6);
- __ slt(t1, t6, zero_reg);
- __ srlv(t2, t3, t6);
- __ Movz(t3, t2, t1); // Only if t6 is positive.
- __ Branch(&sign, ge, t6, Operand(zero_reg));
-
- __ subu(t6, zero_reg, t6);
- __ sllv(t3, t3, t6);
- __ li(t9, meaningfull_bits);
- __ subu(t6, t9, t6);
- __ srlv(t4, t4, t6);
- __ or_(t3, t3, t4);
-
- __ bind(&sign);
- __ subu(t2, t3, zero_reg);
- __ Movz(t3, t2, t5); // Only if t5 is zero.
-
- __ bind(&done);
-
- // Result is in t3.
- // This switch block should be exactly the same as above (FPU mode).
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
- __ sh(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sw(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
- }
- }
-
- // Slow case, key and receiver still in a0 and a1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, a2, a3);
- // Entry registers are intact.
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- // Miss case, call the runtime.
- __ bind(&miss_force_generic);
-
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, a0, t0, t1, f2, &miss_force_generic);
-
- // Get the elements array.
- __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ AssertFastElements(a2);
-
- // Check that the key is within bounds.
- __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ Branch(USE_DELAY_SLOT, &miss_force_generic, hs, a0, Operand(a3));
-
- // Load the result and make sure it's not the hole.
- __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, t0, a3);
- __ lw(t0, MemOperand(t0));
- __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
- __ Branch(&miss_force_generic, eq, t0, Operand(t1));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, t0);
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- Register key_reg = a0;
- Register receiver_reg = a1;
- Register elements_reg = a2;
- Register heap_number_reg = a2;
- Register indexed_double_offset = a3;
- Register scratch = t0;
- Register scratch2 = t1;
- Register scratch3 = t2;
- Register heap_number_map = t3;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
-
- // Get the elements array.
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
-
- // Load the upper word of the double in the fixed array and test for NaN.
- __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- __ Addu(indexed_double_offset, elements_reg, Operand(scratch2));
- uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
- __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32));
-
- // Non-NaN. Allocate a new heap number and copy the double value into it.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
- heap_number_map, &slow_allocate_heapnumber);
-
- // Don't need to reload the upper 32 bits of the double, it's already in
- // scratch.
- __ sw(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kExponentOffset));
- __ lw(scratch, FieldMemOperand(indexed_double_offset,
- FixedArray::kHeaderSize));
- __ sw(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kMantissaOffset));
-
- __ mov(v0, heap_number_reg);
- __ Ret();
-
- __ bind(&slow_allocate_heapnumber);
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : scratch
- // -- a4 : scratch (elements)
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = a0;
- Register key_reg = a1;
- Register receiver_reg = a2;
- Register scratch = t0;
- Register elements_reg = a3;
- Register length_reg = t1;
- Register scratch2 = t2;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
- }
-
- // Check that the key is within bounds.
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- if (is_js_array) {
- __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis.
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ Branch(&grow, hs, key_reg, Operand(scratch));
- } else {
- __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
- }
-
- // Make sure elements is a fast element array, not 'cow'.
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ bind(&finish_store);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ Addu(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(scratch, scratch, scratch2);
- __ sw(value_reg, MemOperand(scratch));
- } else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ Addu(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(scratch, scratch, scratch2);
- __ sw(value_reg, MemOperand(scratch));
- __ mov(receiver_reg, value_reg);
- __ RecordWrite(elements_reg, // Object.
- scratch, // Address.
- receiver_reg, // Value.
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- }
- // value_reg (a0) is preserved.
- // Done.
- __ Ret();
-
- __ bind(&miss_force_generic);
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime.
- __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch));
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ lw(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&check_capacity, ne, elements_reg, Operand(at));
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
- TAG_OBJECT);
-
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ sw(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ li(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
- }
-
- // Store the element at index zero.
- __ sw(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
-
- // Install the new backing store in the JSArray.
- __ sw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ li(length_reg, Operand(Smi::FromInt(1)));
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ Ret();
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedCOWArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ Branch(&slow, hs, length_reg, Operand(scratch));
-
- // Grow the array and finish the store.
- __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : scratch
- // -- t0 : scratch (elements_reg)
- // -- t1 : scratch (mantissa_reg)
- // -- t2 : scratch (exponent_reg)
- // -- t3 : scratch4
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = a0;
- Register key_reg = a1;
- Register receiver_reg = a2;
- Register elements_reg = a3;
- Register scratch1 = t0;
- Register scratch2 = t1;
- Register scratch3 = t2;
- Register scratch4 = t3;
- Register length_reg = t3;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
-
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ lw(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ lw(scratch1,
- FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis, unsigned compare catches both negative and out-of-bound
- // indexes.
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ Branch(&grow, hs, key_reg, Operand(scratch1));
- } else {
- __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
- }
-
- __ bind(&finish_store);
-
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- receiver_reg,
- // All registers after this are overwritten.
- elements_reg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- &transition_elements_kind);
-
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, value_reg); // In delay slot.
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime.
- __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch1));
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(value_reg, &value_is_smi);
- __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&transition_elements_kind, ne, scratch1, Operand(at));
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ lw(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&check_capacity, ne, elements_reg, Operand(at));
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
- TAG_OBJECT);
-
- // Initialize the new FixedDoubleArray. Leave elements unitialized for
- // efficiency, they are guaranteed to be initialized before use.
- __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
- __ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ sw(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
-
- // Install the new backing store in the JSArray.
- __ sw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ li(length_reg, Operand(Smi::FromInt(1)));
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ jmp(&finish_store);
-
- __ bind(&check_capacity);
- // Make sure that the backing store can hold additional elements.
- __ lw(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
- __ Branch(&slow, hs, length_reg, Operand(scratch1));
-
- // Grow the array and finish the store.
- __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(ic_slow, RelocInfo::CODE_TARGET);
- }
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
}
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 7f1a05aed..4277136b6 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -117,7 +117,7 @@ function LookupMirror(handle) {
* @returns {Mirror} the mirror reflects the undefined value
*/
function GetUndefinedMirror() {
- return MakeMirror(void 0);
+ return MakeMirror(UNDEFINED);
}
@@ -173,7 +173,7 @@ PropertyKind.Indexed = 2;
var PropertyType = {};
PropertyType.Normal = 0;
PropertyType.Field = 1;
-PropertyType.ConstantFunction = 2;
+PropertyType.Constant = 2;
PropertyType.Callbacks = 3;
PropertyType.Handler = 4;
PropertyType.Interceptor = 5;
@@ -482,7 +482,7 @@ ValueMirror.prototype.value = function() {
* @extends ValueMirror
*/
function UndefinedMirror() {
- %_CallFunction(this, UNDEFINED_TYPE, void 0, ValueMirror);
+ %_CallFunction(this, UNDEFINED_TYPE, UNDEFINED, ValueMirror);
}
inherits(UndefinedMirror, ValueMirror);
@@ -638,7 +638,7 @@ ObjectMirror.prototype.propertyNames = function(kind, limit) {
// Find all the named properties.
if (kind & PropertyKind.Named) {
// Get the local property names.
- propertyNames = %GetLocalPropertyNames(this.value_);
+ propertyNames = %GetLocalPropertyNames(this.value_, true);
total += propertyNames.length;
// Get names for named interceptor properties if any.
@@ -957,7 +957,7 @@ FunctionMirror.prototype.scopeCount = function() {
FunctionMirror.prototype.scope = function(index) {
if (this.resolved()) {
- return new ScopeMirror(void 0, this, index);
+ return new ScopeMirror(UNDEFINED, this, index);
}
};
@@ -1509,6 +1509,11 @@ FrameDetails.prototype.scopeCount = function() {
};
+FrameDetails.prototype.stepInPositionsImpl = function() {
+ return %GetStepInPositions(this.break_id_, this.frameId());
+};
+
+
/**
* Mirror object for stack frames.
* @param {number} break_id The break id in the VM for which this frame is
@@ -1665,19 +1670,41 @@ FrameMirror.prototype.scopeCount = function() {
FrameMirror.prototype.scope = function(index) {
- return new ScopeMirror(this, void 0, index);
+ return new ScopeMirror(this, UNDEFINED, index);
+};
+
+
+FrameMirror.prototype.stepInPositions = function() {
+ var script = this.func().script();
+ var funcOffset = this.func().sourcePosition_();
+
+ var stepInRaw = this.details_.stepInPositionsImpl();
+ var result = [];
+ if (stepInRaw) {
+ for (var i = 0; i < stepInRaw.length; i++) {
+ var posStruct = {};
+ var offset = script.locationFromPosition(funcOffset + stepInRaw[i],
+ true);
+ serializeLocationFields(offset, posStruct);
+ var item = {
+ position: posStruct
+ };
+ result.push(item);
+ }
+ }
+
+ return result;
};
FrameMirror.prototype.evaluate = function(source, disable_break,
opt_context_object) {
- var result = %DebugEvaluate(this.break_id_,
- this.details_.frameId(),
- this.details_.inlinedFrameIndex(),
- source,
- Boolean(disable_break),
- opt_context_object);
- return MakeMirror(result);
+ return MakeMirror(%DebugEvaluate(this.break_id_,
+ this.details_.frameId(),
+ this.details_.inlinedFrameIndex(),
+ source,
+ Boolean(disable_break),
+ opt_context_object));
};
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index d7775517b..95d3daada 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -43,49 +43,6 @@
using namespace v8;
-static const unsigned int kMaxCounters = 256;
-
-// A single counter in a counter collection.
-class Counter {
- public:
- static const int kMaxNameSize = 64;
- int32_t* Bind(const char* name) {
- int i;
- for (i = 0; i < kMaxNameSize - 1 && name[i]; i++) {
- name_[i] = name[i];
- }
- name_[i] = '\0';
- return &counter_;
- }
- private:
- int32_t counter_;
- uint8_t name_[kMaxNameSize];
-};
-
-
-// A set of counters and associated information. An instance of this
-// class is stored directly in the memory-mapped counters file if
-// the --save-counters options is used
-class CounterCollection {
- public:
- CounterCollection() {
- magic_number_ = 0xDEADFACE;
- max_counters_ = kMaxCounters;
- max_name_size_ = Counter::kMaxNameSize;
- counters_in_use_ = 0;
- }
- Counter* GetNextCounter() {
- if (counters_in_use_ == kMaxCounters) return NULL;
- return &counters_[counters_in_use_++];
- }
- private:
- uint32_t magic_number_;
- uint32_t max_counters_;
- uint32_t max_name_size_;
- uint32_t counters_in_use_;
- Counter counters_[kMaxCounters];
-};
-
class Compressor {
public:
@@ -172,7 +129,8 @@ class CppByteSink : public PartialSnapshotSink {
int data_space_used,
int code_space_used,
int map_space_used,
- int cell_space_used) {
+ int cell_space_used,
+ int property_cell_space_used) {
fprintf(fp_,
"const int Snapshot::%snew_space_used_ = %d;\n",
prefix,
@@ -197,6 +155,10 @@ class CppByteSink : public PartialSnapshotSink {
"const int Snapshot::%scell_space_used_ = %d;\n",
prefix,
cell_space_used);
+ fprintf(fp_,
+ "const int Snapshot::%sproperty_cell_space_used_ = %d;\n",
+ prefix,
+ property_cell_space_used);
}
void WritePartialSnapshot() {
@@ -291,7 +253,22 @@ class BZip2Decompressor : public StartupDataDecompressor {
#endif
+void DumpException(Handle<Message> message) {
+ String::Utf8Value message_string(message->Get());
+ String::Utf8Value message_line(message->GetSourceLine());
+ fprintf(stderr, "%s at line %d\n", *message_string, message->GetLineNumber());
+ fprintf(stderr, "%s\n", *message_line);
+ for (int i = 0; i <= message->GetEndColumn(); ++i) {
+ fprintf(stderr, "%c", i < message->GetStartColumn() ? ' ' : '^');
+ }
+ fprintf(stderr, "\n");
+}
+
+
int main(int argc, char** argv) {
+ V8::InitializeICU();
+ i::Isolate::SetCrashIfDefaultIsolateInitialized();
+
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
@@ -311,18 +288,28 @@ int main(int argc, char** argv) {
exit(1);
}
#endif
- i::Serializer::Enable();
- Persistent<Context> context = v8::Context::New();
+ i::FLAG_logfile_per_isolate = false;
+
+ Isolate* isolate = v8::Isolate::New();
+ isolate->Enter();
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Serializer::Enable(internal_isolate);
+ Persistent<Context> context;
+ {
+ HandleScope handle_scope(isolate);
+ context.Reset(isolate, Context::New(isolate));
+ }
+
if (context.IsEmpty()) {
fprintf(stderr,
"\nException thrown while compiling natives - see above.\n\n");
exit(1);
}
if (i::FLAG_extra_code != NULL) {
- context->Enter();
// Capture 100 frames if anything happens.
V8::SetCaptureStackTraceForUncaughtExceptions(true, 100);
- HandleScope scope;
+ HandleScope scope(isolate);
+ v8::Context::Scope(v8::Local<v8::Context>::New(isolate, context));
const char* name = i::FLAG_extra_code;
FILE* file = i::OS::FOpen(name, "rb");
if (file == NULL) {
@@ -349,49 +336,37 @@ int main(int argc, char** argv) {
TryCatch try_catch;
Local<Script> script = Script::Compile(source);
if (try_catch.HasCaught()) {
- fprintf(stderr, "Failure compiling '%s' (see above)\n", name);
+ fprintf(stderr, "Failure compiling '%s'\n", name);
+ DumpException(try_catch.Message());
exit(1);
}
script->Run();
if (try_catch.HasCaught()) {
fprintf(stderr, "Failure running '%s'\n", name);
- Local<Message> message = try_catch.Message();
- Local<String> message_string = message->Get();
- Local<String> message_line = message->GetSourceLine();
- int len = 2 + message_string->Utf8Length() + message_line->Utf8Length();
- char* buf = new char(len);
- message_string->WriteUtf8(buf);
- fprintf(stderr, "%s at line %d\n", buf, message->GetLineNumber());
- message_line->WriteUtf8(buf);
- fprintf(stderr, "%s\n", buf);
- int from = message->GetStartColumn();
- int to = message->GetEndColumn();
- int i;
- for (i = 0; i < from; i++) fprintf(stderr, " ");
- for ( ; i <= to; i++) fprintf(stderr, "^");
- fprintf(stderr, "\n");
+ DumpException(try_catch.Message());
exit(1);
}
- context->Exit();
}
// Make sure all builtin scripts are cached.
- { HandleScope scope;
+ { HandleScope scope(isolate);
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
- i::Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
+ internal_isolate->bootstrapper()->NativesSourceLookup(i);
}
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags, "mksnapshot");
- i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
+ internal_isolate->heap()->CollectAllGarbage(
+ i::Heap::kNoGCFlags, "mksnapshot");
+ i::Object* raw_context = *v8::Utils::OpenPersistent(context);
context.Dispose();
CppByteSink sink(argv[1]);
// This results in a somewhat smaller snapshot, probably because it gets rid
// of some things that are cached between garbage collections.
- i::StartupSerializer ser(&sink);
+ i::StartupSerializer ser(internal_isolate, &sink);
ser.SerializeStrongReferences();
- i::PartialSerializer partial_ser(&ser, sink.partial_sink());
+ i::PartialSerializer partial_ser(
+ internal_isolate, &ser, sink.partial_sink());
partial_ser.Serialize(&raw_context);
ser.SerializeWeakReferences();
@@ -413,7 +388,8 @@ int main(int argc, char** argv) {
partial_ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
partial_ser.CurrentAllocationAddress(i::CODE_SPACE),
partial_ser.CurrentAllocationAddress(i::MAP_SPACE),
- partial_ser.CurrentAllocationAddress(i::CELL_SPACE));
+ partial_ser.CurrentAllocationAddress(i::CELL_SPACE),
+ partial_ser.CurrentAllocationAddress(i::PROPERTY_CELL_SPACE));
sink.WriteSpaceUsed(
"",
ser.CurrentAllocationAddress(i::NEW_SPACE),
@@ -421,6 +397,7 @@ int main(int argc, char** argv) {
ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
ser.CurrentAllocationAddress(i::CODE_SPACE),
ser.CurrentAllocationAddress(i::MAP_SPACE),
- ser.CurrentAllocationAddress(i::CELL_SPACE));
+ ser.CurrentAllocationAddress(i::CELL_SPACE),
+ ser.CurrentAllocationAddress(i::PROPERTY_CELL_SPACE));
return 0;
}
diff --git a/deps/v8/src/platform-tls.h b/deps/v8/src/msan.h
index 32516636b..484c9fa39 100644
--- a/deps/v8/src/platform-tls.h
+++ b/deps/v8/src/msan.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,26 +25,25 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Platform and architecture specific thread local store functions.
+// MemorySanitizer support.
-#ifndef V8_PLATFORM_TLS_H_
-#define V8_PLATFORM_TLS_H_
+#ifndef V8_MSAN_H_
+#define V8_MSAN_H_
-#ifndef V8_NO_FAST_TLS
-
-// When fast TLS is requested we include the appropriate
-// implementation header.
-//
-// The implementation header defines V8_FAST_TLS_SUPPORTED if it
-// provides fast TLS support for the current platform and architecture
-// combination.
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
-#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
-#include "platform-tls-win32.h"
-#elif defined(__APPLE__)
-#include "platform-tls-mac.h"
+#if __has_feature(memory_sanitizer) && !defined(MEMORY_SANITIZER)
+# define MEMORY_SANITIZER
#endif
+#ifdef MEMORY_SANITIZER
+# include <sanitizer/msan_interface.h>
+// Marks a memory range as fully initialized.
+# define MSAN_MEMORY_IS_INITIALIZED(p, s) __msan_unpoison((p), (s))
+#else
+# define MSAN_MEMORY_IS_INITIALIZED(p, s)
#endif
-#endif // V8_PLATFORM_TLS_H_
+#endif // V8_MSAN_H_
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
new file mode 100644
index 000000000..9c7ac3889
--- /dev/null
+++ b/deps/v8/src/object-observe.js
@@ -0,0 +1,568 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+// Overview:
+//
+// This file contains all of the routing and accounting for Object.observe.
+// User code will interact with these mechanisms via the Object.observe APIs
+// and, as a side effect of mutation objects which are observed. The V8 runtime
+// (both C++ and JS) will interact with these mechanisms primarily by enqueuing
+// proper change records for objects which were mutated. The Object.observe
+// routing and accounting consists primarily of three participants
+//
+// 1) ObjectInfo. This represents the observed state of a given object. It
+// records what callbacks are observing the object, with what options, and
+// what "change types" are in progress on the object (i.e. via
+// notifier.performChange).
+//
+// 2) CallbackInfo. This represents a callback used for observation. It holds
+// the records which must be delivered to the callback, as well as the global
+// priority of the callback (which determines delivery order between
+// callbacks).
+//
+// 3) observationState.pendingObservers. This is the set of observers which
+// have change records which must be delivered. During "normal" delivery
+// (i.e. not Object.deliverChangeRecords), this is the mechanism by which
+// callbacks are invoked in the proper order until there are no more
+// change records pending to a callback.
+//
+// Note that in order to reduce allocation and processing costs, the
+// implementation of (1) and (2) have "optimized" states which represent
+// common cases which can be handled more efficiently.
+
+var observationState = %GetObservationState();
+if (IS_UNDEFINED(observationState.callbackInfoMap)) {
+ observationState.callbackInfoMap = %ObservationWeakMapCreate();
+ observationState.objectInfoMap = %ObservationWeakMapCreate();
+ observationState.notifierObjectInfoMap = %ObservationWeakMapCreate();
+ observationState.pendingObservers = null;
+ observationState.nextCallbackPriority = 0;
+}
+
+function ObservationWeakMap(map) {
+ this.map_ = map;
+}
+
+ObservationWeakMap.prototype = {
+ get: function(key) {
+ key = %UnwrapGlobalProxy(key);
+ if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
+ return %WeakCollectionGet(this.map_, key);
+ },
+ set: function(key, value) {
+ key = %UnwrapGlobalProxy(key);
+ if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
+ %WeakCollectionSet(this.map_, key, value);
+ },
+ has: function(key) {
+ return !IS_UNDEFINED(this.get(key));
+ }
+};
+
+var callbackInfoMap =
+ new ObservationWeakMap(observationState.callbackInfoMap);
+var objectInfoMap = new ObservationWeakMap(observationState.objectInfoMap);
+var notifierObjectInfoMap =
+ new ObservationWeakMap(observationState.notifierObjectInfoMap);
+
+function TypeMapCreate() {
+ return { __proto__: null };
+}
+
+function TypeMapAddType(typeMap, type, ignoreDuplicate) {
+ typeMap[type] = ignoreDuplicate ? 1 : (typeMap[type] || 0) + 1;
+}
+
+function TypeMapRemoveType(typeMap, type) {
+ typeMap[type]--;
+}
+
+function TypeMapCreateFromList(typeList) {
+ var typeMap = TypeMapCreate();
+ for (var i = 0; i < typeList.length; i++) {
+ TypeMapAddType(typeMap, typeList[i], true);
+ }
+ return typeMap;
+}
+
+function TypeMapHasType(typeMap, type) {
+ return !!typeMap[type];
+}
+
+function TypeMapIsDisjointFrom(typeMap1, typeMap2) {
+ if (!typeMap1 || !typeMap2)
+ return true;
+
+ for (var type in typeMap1) {
+ if (TypeMapHasType(typeMap1, type) && TypeMapHasType(typeMap2, type))
+ return false;
+ }
+
+ return true;
+}
+
+var defaultAcceptTypes = TypeMapCreateFromList([
+ 'new',
+ 'updated',
+ 'deleted',
+ 'prototype',
+ 'reconfigured'
+]);
+
+// An Observer is a registration to observe an object by a callback with
+// a given set of accept types. If the set of accept types is the default
+// set for Object.observe, the observer is represented as a direct reference
+// to the callback. An observer never changes its accept types and thus never
+// needs to "normalize".
+function ObserverCreate(callback, acceptList) {
+ return IS_UNDEFINED(acceptList) ? callback : {
+ __proto__: null,
+ callback: callback,
+ accept: TypeMapCreateFromList(acceptList)
+ };
+}
+
+function ObserverGetCallback(observer) {
+ return IS_SPEC_FUNCTION(observer) ? observer : observer.callback;
+}
+
+function ObserverGetAcceptTypes(observer) {
+ return IS_SPEC_FUNCTION(observer) ? defaultAcceptTypes : observer.accept;
+}
+
+function ObserverIsActive(observer, objectInfo) {
+ return TypeMapIsDisjointFrom(ObjectInfoGetPerformingTypes(objectInfo),
+ ObserverGetAcceptTypes(observer));
+}
+
+function ObjectInfoGet(object) {
+ var objectInfo = objectInfoMap.get(object);
+ if (IS_UNDEFINED(objectInfo)) {
+ if (!%IsJSProxy(object))
+ %SetIsObserved(object);
+
+ objectInfo = {
+ object: object,
+ changeObservers: null,
+ notifier: null,
+ performing: null,
+ performingCount: 0,
+ };
+ objectInfoMap.set(object, objectInfo);
+ }
+ return objectInfo;
+}
+
+function ObjectInfoGetFromNotifier(notifier) {
+ return notifierObjectInfoMap.get(notifier);
+}
+
+function ObjectInfoGetNotifier(objectInfo) {
+ if (IS_NULL(objectInfo.notifier)) {
+ objectInfo.notifier = { __proto__: notifierPrototype };
+ notifierObjectInfoMap.set(objectInfo.notifier, objectInfo);
+ }
+
+ return objectInfo.notifier;
+}
+
+function ObjectInfoGetObject(objectInfo) {
+ return objectInfo.object;
+}
+
+function ChangeObserversIsOptimized(changeObservers) {
+ return typeof changeObservers === 'function' ||
+ typeof changeObservers.callback === 'function';
+}
+
+// The set of observers on an object is called 'changeObservers'. The first
+// observer is referenced directly via objectInfo.changeObservers. When a second
+// is added, changeObservers "normalizes" to become a mapping of callback
+// priority -> observer and is then stored on objectInfo.changeObservers.
+function ObjectInfoNormalizeChangeObservers(objectInfo) {
+ if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
+ var observer = objectInfo.changeObservers;
+ var callback = ObserverGetCallback(observer);
+ var callbackInfo = CallbackInfoGet(callback);
+ var priority = CallbackInfoGetPriority(callbackInfo);
+ objectInfo.changeObservers = { __proto__: null };
+ objectInfo.changeObservers[priority] = observer;
+ }
+}
+
+function ObjectInfoAddObserver(objectInfo, callback, acceptList) {
+ var callbackInfo = CallbackInfoGetOrCreate(callback);
+ var observer = ObserverCreate(callback, acceptList);
+
+ if (!objectInfo.changeObservers) {
+ objectInfo.changeObservers = observer;
+ return;
+ }
+
+ ObjectInfoNormalizeChangeObservers(objectInfo);
+ var priority = CallbackInfoGetPriority(callbackInfo);
+ objectInfo.changeObservers[priority] = observer;
+}
+
+function ObjectInfoRemoveObserver(objectInfo, callback) {
+ if (!objectInfo.changeObservers)
+ return;
+
+ if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
+ if (callback === ObserverGetCallback(objectInfo.changeObservers))
+ objectInfo.changeObservers = null;
+ return;
+ }
+
+ var callbackInfo = CallbackInfoGet(callback);
+ var priority = CallbackInfoGetPriority(callbackInfo);
+ delete objectInfo.changeObservers[priority];
+}
+
+function ObjectInfoHasActiveObservers(objectInfo) {
+ if (IS_UNDEFINED(objectInfo) || !objectInfo.changeObservers)
+ return false;
+
+ if (ChangeObserversIsOptimized(objectInfo.changeObservers))
+ return ObserverIsActive(objectInfo.changeObservers, objectInfo);
+
+ for (var priority in objectInfo.changeObservers) {
+ if (ObserverIsActive(objectInfo.changeObservers[priority], objectInfo))
+ return true;
+ }
+
+ return false;
+}
+
+function ObjectInfoAddPerformingType(objectInfo, type) {
+ objectInfo.performing = objectInfo.performing || TypeMapCreate();
+ TypeMapAddType(objectInfo.performing, type);
+ objectInfo.performingCount++;
+}
+
+function ObjectInfoRemovePerformingType(objectInfo, type) {
+ objectInfo.performingCount--;
+ TypeMapRemoveType(objectInfo.performing, type);
+}
+
+function ObjectInfoGetPerformingTypes(objectInfo) {
+ return objectInfo.performingCount > 0 ? objectInfo.performing : null;
+}
+
+function AcceptArgIsValid(arg) {
+ if (IS_UNDEFINED(arg))
+ return true;
+
+ if (!IS_SPEC_OBJECT(arg) ||
+ !IS_NUMBER(arg.length) ||
+ arg.length < 0)
+ return false;
+
+ return true;
+}
+
+// CallbackInfo's optimized state is just a number which represents its global
+// priority. When a change record must be enqueued for the callback, it
+// normalizes. When delivery clears any pending change records, it re-optimizes.
+function CallbackInfoGet(callback) {
+ return callbackInfoMap.get(callback);
+}
+
+function CallbackInfoGetOrCreate(callback) {
+ var callbackInfo = callbackInfoMap.get(callback);
+ if (!IS_UNDEFINED(callbackInfo))
+ return callbackInfo;
+
+ var priority = observationState.nextCallbackPriority++
+ callbackInfoMap.set(callback, priority);
+ return priority;
+}
+
+function CallbackInfoGetPriority(callbackInfo) {
+ if (IS_NUMBER(callbackInfo))
+ return callbackInfo;
+ else
+ return callbackInfo.priority;
+}
+
+function CallbackInfoNormalize(callback) {
+ var callbackInfo = callbackInfoMap.get(callback);
+ if (IS_NUMBER(callbackInfo)) {
+ var priority = callbackInfo;
+ callbackInfo = new InternalArray;
+ callbackInfo.priority = priority;
+ callbackInfoMap.set(callback, callbackInfo);
+ }
+ return callbackInfo;
+}
+
+function ObjectObserve(object, callback, acceptList) {
+ if (!IS_SPEC_OBJECT(object))
+ throw MakeTypeError("observe_non_object", ["observe"]);
+ if (!IS_SPEC_FUNCTION(callback))
+ throw MakeTypeError("observe_non_function", ["observe"]);
+ if (ObjectIsFrozen(callback))
+ throw MakeTypeError("observe_callback_frozen");
+ if (!AcceptArgIsValid(acceptList))
+ throw MakeTypeError("observe_accept_invalid");
+
+ var objectInfo = ObjectInfoGet(object);
+ ObjectInfoAddObserver(objectInfo, callback, acceptList);
+ return object;
+}
+
+function ObjectUnobserve(object, callback) {
+ if (!IS_SPEC_OBJECT(object))
+ throw MakeTypeError("observe_non_object", ["unobserve"]);
+ if (!IS_SPEC_FUNCTION(callback))
+ throw MakeTypeError("observe_non_function", ["unobserve"]);
+
+ var objectInfo = objectInfoMap.get(object);
+ if (IS_UNDEFINED(objectInfo))
+ return object;
+
+ ObjectInfoRemoveObserver(objectInfo, callback);
+ return object;
+}
+
+function ArrayObserve(object, callback) {
+ return ObjectObserve(object, callback, ['new',
+ 'updated',
+ 'deleted',
+ 'splice']);
+}
+
+function ArrayUnobserve(object, callback) {
+ return ObjectUnobserve(object, callback);
+}
+
+function ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
+ needsAccessCheck) {
+ if (!ObserverIsActive(observer, objectInfo) ||
+ !TypeMapHasType(ObserverGetAcceptTypes(observer), changeRecord.type)) {
+ return;
+ }
+
+ var callback = ObserverGetCallback(observer);
+ if (needsAccessCheck &&
+ // Drop all splice records on the floor for access-checked objects
+ (changeRecord.type == 'splice' ||
+ !%IsAccessAllowedForObserver(
+ callback, changeRecord.object, changeRecord.name))) {
+ return;
+ }
+
+ var callbackInfo = CallbackInfoNormalize(callback);
+ if (!observationState.pendingObservers)
+ observationState.pendingObservers = { __proto__: null };
+ observationState.pendingObservers[callbackInfo.priority] = callback;
+ callbackInfo.push(changeRecord);
+ %SetObserverDeliveryPending();
+}
+
+function ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord,
+ skipAccessCheck) {
+ // TODO(rossberg): adjust once there is a story for symbols vs proxies.
+ if (IS_SYMBOL(changeRecord.name)) return;
+
+ var needsAccessCheck = !skipAccessCheck &&
+ %IsAccessCheckNeeded(changeRecord.object);
+
+ if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
+ var observer = objectInfo.changeObservers;
+ ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
+ needsAccessCheck);
+ return;
+ }
+
+ for (var priority in objectInfo.changeObservers) {
+ var observer = objectInfo.changeObservers[priority];
+ ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
+ needsAccessCheck);
+ }
+}
+
+function BeginPerformSplice(array) {
+ var objectInfo = objectInfoMap.get(array);
+ if (!IS_UNDEFINED(objectInfo))
+ ObjectInfoAddPerformingType(objectInfo, 'splice');
+}
+
+function EndPerformSplice(array) {
+ var objectInfo = objectInfoMap.get(array);
+ if (!IS_UNDEFINED(objectInfo))
+ ObjectInfoRemovePerformingType(objectInfo, 'splice');
+}
+
+function EnqueueSpliceRecord(array, index, removed, addedCount) {
+ var objectInfo = objectInfoMap.get(array);
+ if (!ObjectInfoHasActiveObservers(objectInfo))
+ return;
+
+ var changeRecord = {
+ type: 'splice',
+ object: array,
+ index: index,
+ removed: removed,
+ addedCount: addedCount
+ };
+
+ ObjectFreeze(changeRecord);
+ ObjectFreeze(changeRecord.removed);
+ ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord);
+}
+
+function NotifyChange(type, object, name, oldValue) {
+ var objectInfo = objectInfoMap.get(object);
+ if (!ObjectInfoHasActiveObservers(objectInfo))
+ return;
+
+ var changeRecord = (arguments.length < 4) ?
+ { type: type, object: object, name: name } :
+ { type: type, object: object, name: name, oldValue: oldValue };
+ ObjectFreeze(changeRecord);
+ ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord);
+}
+
+var notifierPrototype = {};
+
+function ObjectNotifierNotify(changeRecord) {
+ if (!IS_SPEC_OBJECT(this))
+ throw MakeTypeError("called_on_non_object", ["notify"]);
+
+ var objectInfo = ObjectInfoGetFromNotifier(this);
+ if (IS_UNDEFINED(objectInfo))
+ throw MakeTypeError("observe_notify_non_notifier");
+ if (!IS_STRING(changeRecord.type))
+ throw MakeTypeError("observe_type_non_string");
+
+ if (!ObjectInfoHasActiveObservers(objectInfo))
+ return;
+
+ var newRecord = { object: ObjectInfoGetObject(objectInfo) };
+ for (var prop in changeRecord) {
+ if (prop === 'object') continue;
+ %DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
+ READ_ONLY + DONT_DELETE);
+ }
+ ObjectFreeze(newRecord);
+
+ ObjectInfoEnqueueChangeRecord(objectInfo, newRecord,
+ true /* skip access check */);
+}
+
+function ObjectNotifierPerformChange(changeType, changeFn) {
+ if (!IS_SPEC_OBJECT(this))
+ throw MakeTypeError("called_on_non_object", ["performChange"]);
+
+ var objectInfo = ObjectInfoGetFromNotifier(this);
+
+ if (IS_UNDEFINED(objectInfo))
+ throw MakeTypeError("observe_notify_non_notifier");
+ if (!IS_STRING(changeType))
+ throw MakeTypeError("observe_perform_non_string");
+ if (!IS_SPEC_FUNCTION(changeFn))
+ throw MakeTypeError("observe_perform_non_function");
+
+ ObjectInfoAddPerformingType(objectInfo, changeType);
+ try {
+ %_CallFunction(UNDEFINED, changeFn);
+ } finally {
+ ObjectInfoRemovePerformingType(objectInfo, changeType);
+ }
+}
+
+function ObjectGetNotifier(object) {
+ if (!IS_SPEC_OBJECT(object))
+ throw MakeTypeError("observe_non_object", ["getNotifier"]);
+
+ if (ObjectIsFrozen(object)) return null;
+
+ var objectInfo = ObjectInfoGet(object);
+ return ObjectInfoGetNotifier(objectInfo);
+}
+
+function CallbackDeliverPending(callback) {
+ var callbackInfo = callbackInfoMap.get(callback);
+ if (IS_UNDEFINED(callbackInfo) || IS_NUMBER(callbackInfo))
+ return false;
+
+ // Clear the pending change records from callback and return it to its
+ // "optimized" state.
+ var priority = callbackInfo.priority;
+ callbackInfoMap.set(callback, priority);
+
+ if (observationState.pendingObservers)
+ delete observationState.pendingObservers[priority];
+
+ var delivered = [];
+ %MoveArrayContents(callbackInfo, delivered);
+
+ try {
+ %_CallFunction(UNDEFINED, delivered, callback);
+ } catch (ex) {}
+ return true;
+}
+
+function ObjectDeliverChangeRecords(callback) {
+ if (!IS_SPEC_FUNCTION(callback))
+ throw MakeTypeError("observe_non_function", ["deliverChangeRecords"]);
+
+ while (CallbackDeliverPending(callback)) {}
+}
+
+function DeliverChangeRecords() {
+ while (observationState.pendingObservers) {
+ var pendingObservers = observationState.pendingObservers;
+ observationState.pendingObservers = null;
+ for (var i in pendingObservers) {
+ CallbackDeliverPending(pendingObservers[i]);
+ }
+ }
+}
+
+function SetupObjectObserve() {
+ %CheckIsBootstrapping();
+ InstallFunctions($Object, DONT_ENUM, $Array(
+ "deliverChangeRecords", ObjectDeliverChangeRecords,
+ "getNotifier", ObjectGetNotifier,
+ "observe", ObjectObserve,
+ "unobserve", ObjectUnobserve
+ ));
+ InstallFunctions($Array, DONT_ENUM, $Array(
+ "observe", ArrayObserve,
+ "unobserve", ArrayUnobserve
+ ));
+ InstallFunctions(notifierPrototype, DONT_ENUM, $Array(
+ "notify", ObjectNotifierNotify,
+ "performChange", ObjectNotifierPerformChange
+ ));
+}
+
+SetupObjectObserve();
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index be9659296..6ab2ddffe 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -30,6 +30,7 @@
#include "disassembler.h"
#include "disasm.h"
#include "jsregexp.h"
+#include "macro-assembler.h"
#include "objects-visiting.h"
namespace v8 {
@@ -79,6 +80,9 @@ void HeapObject::HeapObjectVerify() {
}
switch (instance_type) {
+ case SYMBOL_TYPE:
+ Symbol::cast(this)->SymbolVerify();
+ break;
case MAP_TYPE:
Map::cast(this)->MapVerify();
break;
@@ -91,6 +95,9 @@ void HeapObject::HeapObjectVerify() {
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayVerify();
break;
+ case CONSTANT_POOL_ARRAY_TYPE:
+ ConstantPoolArray::cast(this)->ConstantPoolArrayVerify();
+ break;
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayVerify();
break;
@@ -135,6 +142,9 @@ void HeapObject::HeapObjectVerify() {
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
JSObject::cast(this)->JSObjectVerify();
break;
+ case JS_GENERATOR_OBJECT_TYPE:
+ JSGeneratorObject::cast(this)->JSGeneratorObjectVerify();
+ break;
case JS_MODULE_TYPE:
JSModule::cast(this)->JSModuleVerify();
break;
@@ -156,8 +166,11 @@ void HeapObject::HeapObjectVerify() {
case JS_BUILTINS_OBJECT_TYPE:
JSBuiltinsObject::cast(this)->JSBuiltinsObjectVerify();
break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellVerify();
+ case CELL_TYPE:
+ Cell::cast(this)->CellVerify();
+ break;
+ case PROPERTY_CELL_TYPE:
+ PropertyCell::cast(this)->PropertyCellVerify();
break;
case JS_ARRAY_TYPE:
JSArray::cast(this)->JSArrayVerify();
@@ -171,6 +184,9 @@ void HeapObject::HeapObjectVerify() {
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(this)->JSWeakMapVerify();
break;
+ case JS_WEAK_SET_TYPE:
+ JSWeakSet::cast(this)->JSWeakSetVerify();
+ break;
case JS_REGEXP_TYPE:
JSRegExp::cast(this)->JSRegExpVerify();
break;
@@ -191,6 +207,15 @@ void HeapObject::HeapObjectVerify() {
case JS_MESSAGE_OBJECT_TYPE:
JSMessageObject::cast(this)->JSMessageObjectVerify();
break;
+ case JS_ARRAY_BUFFER_TYPE:
+ JSArrayBuffer::cast(this)->JSArrayBufferVerify();
+ break;
+ case JS_TYPED_ARRAY_TYPE:
+ JSTypedArray::cast(this)->JSTypedArrayVerify();
+ break;
+ case JS_DATA_VIEW_TYPE:
+ JSDataView::cast(this)->JSDataViewVerify();
+ break;
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: \
@@ -208,7 +233,16 @@ void HeapObject::HeapObjectVerify() {
void HeapObject::VerifyHeapPointer(Object* p) {
CHECK(p->IsHeapObject());
- CHECK(HEAP->Contains(HeapObject::cast(p)));
+ HeapObject* ho = HeapObject::cast(p);
+ CHECK(ho->GetHeap()->Contains(ho));
+}
+
+
+void Symbol::SymbolVerify() {
+ CHECK(IsSymbol());
+ CHECK(HasHashCode());
+ CHECK_GT(Hash(), 0);
+ CHECK(name()->IsUndefined() || name()->IsString());
}
@@ -272,6 +306,13 @@ void ExternalDoubleArray::ExternalDoubleArrayVerify() {
}
+bool JSObject::ElementsAreSafeToExamine() {
+ return (FLAG_use_gvn && FLAG_use_allocation_folding) ||
+ reinterpret_cast<Map*>(elements()) !=
+ GetHeap()->one_pointer_filler_map();
+}
+
+
void JSObject::JSObjectVerify() {
VerifyHeapPointer(properties());
VerifyHeapPointer(elements());
@@ -285,32 +326,49 @@ void JSObject::JSObjectVerify() {
CHECK_EQ(map()->unused_property_fields(),
(map()->inobject_properties() + properties()->length() -
map()->NextFreePropertyIndex()));
+ DescriptorArray* descriptors = map()->instance_descriptors();
+ for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
+ if (descriptors->GetDetails(i).type() == FIELD) {
+ Representation r = descriptors->GetDetails(i).representation();
+ int field = descriptors->GetFieldIndex(i);
+ Object* value = RawFastPropertyAt(field);
+ if (r.IsDouble()) ASSERT(value->IsHeapNumber());
+ if (value->IsUninitialized()) continue;
+ if (r.IsSmi()) ASSERT(value->IsSmi());
+ if (r.IsHeapObject()) ASSERT(value->IsHeapObject());
+ }
+ }
+ }
+
+ // If a GC was caused while constructing this object, the elements
+ // pointer may point to a one pointer filler map.
+ if (ElementsAreSafeToExamine()) {
+ CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
+ (elements() == GetHeap()->empty_fixed_array())),
+ (elements()->map() == GetHeap()->fixed_array_map() ||
+ elements()->map() == GetHeap()->fixed_cow_array_map()));
+ CHECK(map()->has_fast_object_elements() == HasFastObjectElements());
}
- CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
- (elements() == GetHeap()->empty_fixed_array())),
- (elements()->map() == GetHeap()->fixed_array_map() ||
- elements()->map() == GetHeap()->fixed_cow_array_map()));
- CHECK(map()->has_fast_object_elements() == HasFastObjectElements());
}
void Map::MapVerify() {
- CHECK(!HEAP->InNewSpace(this));
+ Heap* heap = GetHeap();
+ CHECK(!heap->InNewSpace(this));
CHECK(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
CHECK(instance_size() == kVariableSizeSentinel ||
(kPointerSize <= instance_size() &&
- instance_size() < HEAP->Capacity()));
+ instance_size() < heap->Capacity()));
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
- DescriptorArray* descriptors = instance_descriptors();
- for (int i = 0; i < NumberOfOwnDescriptors(); ++i) {
- CHECK_EQ(i, descriptors->GetDetails(i).descriptor_index() - 1);
- }
SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates());
if (HasTransitionArray()) {
SLOW_ASSERT(transitions()->IsSortedNoDuplicates());
SLOW_ASSERT(transitions()->IsConsistentWithBackPointers(this));
}
+ ASSERT(!is_observed() || instance_type() < FIRST_JS_OBJECT_TYPE ||
+ instance_type() > LAST_JS_OBJECT_TYPE ||
+ has_slow_elements_kind() || has_external_array_elements());
}
@@ -325,6 +383,18 @@ void Map::SharedMapVerify() {
}
+void Map::VerifyOmittedMapChecks() {
+ if (!FLAG_omit_map_checks_for_leaf_maps) return;
+ if (!is_stable() ||
+ is_deprecated() ||
+ HasTransitionArray() ||
+ is_dictionary_map()) {
+ CHECK_EQ(0, dependent_code()->number_of_entries(
+ DependentCode::kPrototypeCheckGroup));
+ }
+}
+
+
void CodeCache::CodeCacheVerify() {
VerifyHeapPointer(default_cache());
VerifyHeapPointer(normal_type_cache());
@@ -368,7 +438,7 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
for (int i = 0; i < length(); i++) {
if (!is_the_hole(i)) {
double value = get_scalar(i);
- CHECK(!isnan(value) ||
+ CHECK(!std::isnan(value) ||
(BitCast<uint64_t>(value) ==
BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
((BitCast<uint64_t>(value) & Double::kSignMask) != 0));
@@ -377,6 +447,24 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
}
+void ConstantPoolArray::ConstantPoolArrayVerify() {
+ CHECK(IsConstantPoolArray());
+}
+
+
+void JSGeneratorObject::JSGeneratorObjectVerify() {
+ // In an expression like "new g()", there can be a point where a generator
+ // object is allocated but its fields are all undefined, as it hasn't yet been
+ // initialized by the generator. Hence these weak checks.
+ VerifyObjectField(kFunctionOffset);
+ VerifyObjectField(kContextOffset);
+ VerifyObjectField(kReceiverOffset);
+ VerifyObjectField(kOperandStackOffset);
+ VerifyObjectField(kContinuationOffset);
+ VerifyObjectField(kStackHandlerIndexOffset);
+}
+
+
void JSModule::JSModuleVerify() {
VerifyObjectField(kContextOffset);
VerifyObjectField(kScopeInfoOffset);
@@ -435,7 +523,7 @@ void JSDate::JSDateVerify() {
}
if (cache_stamp()->IsSmi()) {
CHECK(Smi::cast(cache_stamp())->value() <=
- Smi::cast(Isolate::Current()->date_cache()->stamp())->value());
+ Smi::cast(GetIsolate()->date_cache()->stamp())->value());
}
}
@@ -456,24 +544,17 @@ void JSMessageObject::JSMessageObjectVerify() {
void String::StringVerify() {
CHECK(IsString());
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
- if (IsSymbol()) {
- CHECK(!HEAP->InNewSpace(this));
+ if (IsInternalizedString()) {
+ CHECK(!GetHeap()->InNewSpace(this));
}
if (IsConsString()) {
ConsString::cast(this)->ConsStringVerify();
} else if (IsSlicedString()) {
SlicedString::cast(this)->SlicedStringVerify();
- } else if (IsSeqAsciiString()) {
- SeqAsciiString::cast(this)->SeqAsciiStringVerify();
}
}
-void SeqAsciiString::SeqAsciiStringVerify() {
- CHECK(String::IsAscii(GetChars(), length()));
-}
-
-
void ConsString::ConsStringVerify() {
CHECK(this->first()->IsString());
CHECK(this->second() == GetHeap()->empty_string() ||
@@ -499,7 +580,8 @@ void JSFunction::JSFunctionVerify() {
VerifyObjectField(kPrototypeOrInitialMapOffset);
VerifyObjectField(kNextFunctionLinkOffset);
CHECK(code()->IsCode());
- CHECK(next_function_link()->IsUndefined() ||
+ CHECK(next_function_link() == NULL ||
+ next_function_link()->IsUndefined() ||
next_function_link()->IsJSFunction());
}
@@ -555,7 +637,7 @@ void Oddball::OddballVerify() {
VerifyHeapPointer(to_string());
Object* number = to_number();
if (number->IsHeapObject()) {
- CHECK(number == HEAP->nan_value());
+ CHECK(number == HeapObject::cast(number)->GetHeap()->nan_value());
} else {
CHECK(number->IsSmi());
int value = Smi::cast(number)->value();
@@ -567,9 +649,16 @@ void Oddball::OddballVerify() {
}
-void JSGlobalPropertyCell::JSGlobalPropertyCellVerify() {
- CHECK(IsJSGlobalPropertyCell());
+void Cell::CellVerify() {
+ CHECK(IsCell());
+ VerifyObjectField(kValueOffset);
+}
+
+
+void PropertyCell::PropertyCellVerify() {
+ CHECK(IsPropertyCell());
VerifyObjectField(kValueOffset);
+ VerifyObjectField(kTypeOffset);
}
@@ -589,12 +678,36 @@ void Code::CodeVerify() {
}
+void Code::VerifyEmbeddedObjectsDependency() {
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
+ Object* obj = it.rinfo()->target_object();
+ if (IsWeakEmbeddedObject(kind(), obj)) {
+ if (obj->IsMap()) {
+ Map* map = Map::cast(obj);
+ CHECK(map->dependent_code()->Contains(
+ DependentCode::kWeaklyEmbeddedGroup, this));
+ } else if (obj->IsJSObject()) {
+ Object* raw_table = GetIsolate()->heap()->weak_object_to_code_table();
+ WeakHashTable* table = WeakHashTable::cast(raw_table);
+ CHECK(DependentCode::cast(table->Lookup(obj))->Contains(
+ DependentCode::kWeaklyEmbeddedGroup, this));
+ }
+ }
+ }
+}
+
+
void JSArray::JSArrayVerify() {
JSObjectVerify();
CHECK(length()->IsNumber() || length()->IsUndefined());
- CHECK(elements()->IsUndefined() ||
- elements()->IsFixedArray() ||
- elements()->IsFixedDoubleArray());
+ // If a GC was caused while constructing this array, the elements
+ // pointer may point to a one pointer filler map.
+ if (ElementsAreSafeToExamine()) {
+ CHECK(elements()->IsUndefined() ||
+ elements()->IsFixedArray() ||
+ elements()->IsFixedDoubleArray());
+ }
}
@@ -622,6 +735,14 @@ void JSWeakMap::JSWeakMapVerify() {
}
+void JSWeakSet::JSWeakSetVerify() {
+ CHECK(IsJSWeakSet());
+ JSObjectVerify();
+ VerifyHeapPointer(table());
+ CHECK(table()->IsHashTable() || table()->IsUndefined());
+}
+
+
void JSRegExp::JSRegExpVerify() {
JSObjectVerify();
CHECK(data()->IsUndefined() || data()->IsFixedArray());
@@ -679,26 +800,93 @@ void JSFunctionProxy::JSFunctionProxyVerify() {
}
+void JSArrayBuffer::JSArrayBufferVerify() {
+ CHECK(IsJSArrayBuffer());
+ JSObjectVerify();
+ VerifyPointer(byte_length());
+ CHECK(byte_length()->IsSmi() || byte_length()->IsHeapNumber()
+ || byte_length()->IsUndefined());
+}
+
+
+void JSArrayBufferView::JSArrayBufferViewVerify() {
+ CHECK(IsJSArrayBufferView());
+ JSObjectVerify();
+ VerifyPointer(buffer());
+ CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined());
+
+ VerifyPointer(byte_offset());
+ CHECK(byte_offset()->IsSmi() || byte_offset()->IsHeapNumber()
+ || byte_offset()->IsUndefined());
+
+ VerifyPointer(byte_length());
+ CHECK(byte_length()->IsSmi() || byte_length()->IsHeapNumber()
+ || byte_length()->IsUndefined());
+}
+
+
+void JSTypedArray::JSTypedArrayVerify() {
+ CHECK(IsJSTypedArray());
+ JSArrayBufferViewVerify();
+ VerifyPointer(length());
+ CHECK(length()->IsSmi() || length()->IsHeapNumber()
+ || length()->IsUndefined());
+
+ VerifyPointer(elements());
+}
+
+
+void JSDataView::JSDataViewVerify() {
+ CHECK(IsJSDataView());
+ JSArrayBufferViewVerify();
+}
+
+
void Foreign::ForeignVerify() {
CHECK(IsForeign());
}
+void Box::BoxVerify() {
+ CHECK(IsBox());
+ value()->Verify();
+}
+
+
void AccessorInfo::AccessorInfoVerify() {
- CHECK(IsAccessorInfo());
- VerifyPointer(getter());
- VerifyPointer(setter());
VerifyPointer(name());
- VerifyPointer(data());
VerifyPointer(flag());
VerifyPointer(expected_receiver_type());
}
+void ExecutableAccessorInfo::ExecutableAccessorInfoVerify() {
+ CHECK(IsExecutableAccessorInfo());
+ AccessorInfoVerify();
+ VerifyPointer(getter());
+ VerifyPointer(setter());
+ VerifyPointer(data());
+}
+
+
+void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorVerify() {
+ CHECK(IsDeclaredAccessorDescriptor());
+ VerifyPointer(serialized_data());
+}
+
+
+void DeclaredAccessorInfo::DeclaredAccessorInfoVerify() {
+ CHECK(IsDeclaredAccessorInfo());
+ AccessorInfoVerify();
+ VerifyPointer(descriptor());
+}
+
+
void AccessorPair::AccessorPairVerify() {
CHECK(IsAccessorPair());
VerifyPointer(getter());
VerifyPointer(setter());
+ VerifySmiField(kAccessFlagsOffset);
}
@@ -731,14 +919,15 @@ void CallHandlerInfo::CallHandlerInfoVerify() {
void TemplateInfo::TemplateInfoVerify() {
VerifyPointer(tag());
VerifyPointer(property_list());
+ VerifyPointer(property_accessors());
}
+
void FunctionTemplateInfo::FunctionTemplateInfoVerify() {
CHECK(IsFunctionTemplateInfo());
TemplateInfoVerify();
VerifyPointer(serial_number());
VerifyPointer(call_code());
- VerifyPointer(property_accessors());
VerifyPointer(prototype_template());
VerifyPointer(parent_template());
VerifyPointer(named_property_handler());
@@ -770,6 +959,18 @@ void TypeSwitchInfo::TypeSwitchInfoVerify() {
}
+void AllocationSite::AllocationSiteVerify() {
+ CHECK(IsAllocationSite());
+}
+
+
+void AllocationMemento::AllocationMementoVerify() {
+ CHECK(IsAllocationMemento());
+ VerifyHeapPointer(allocation_site());
+ CHECK(!IsValid() || GetAllocationSite()->IsAllocationSite());
+}
+
+
void Script::ScriptVerify() {
CHECK(IsScript());
VerifyPointer(source());
@@ -855,7 +1056,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex();
info->number_of_fast_unused_fields_ += map()->unused_property_fields();
} else {
- StringDictionary* dict = property_dictionary();
+ NameDictionary* dict = property_dictionary();
info->number_of_slow_used_properties_ += dict->NumberOfElements();
info->number_of_slow_unused_properties_ +=
dict->Capacity() - dict->NumberOfElements();
@@ -872,7 +1073,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
int holes = 0;
FixedArray* e = FixedArray::cast(elements());
int len = e->length();
- Heap* heap = HEAP;
+ Heap* heap = GetHeap();
for (int i = 0; i < len; i++) {
if (e->get(i) == heap->the_hole_value()) holes++;
}
@@ -921,6 +1122,7 @@ void JSObject::SpillInformation::Clear() {
number_of_slow_unused_elements_ = 0;
}
+
void JSObject::SpillInformation::Print() {
PrintF("\n JSObject Spill Statistics (#%d):\n", number_of_objects_);
@@ -946,10 +1148,10 @@ void JSObject::SpillInformation::Print() {
bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
if (valid_entries == -1) valid_entries = number_of_descriptors();
- String* current_key = NULL;
+ Name* current_key = NULL;
uint32_t current = 0;
for (int i = 0; i < number_of_descriptors(); i++) {
- String* key = GetSortedKey(i);
+ Name* key = GetSortedKey(i);
if (key == current_key) {
PrintDescriptors();
return false;
@@ -968,10 +1170,10 @@ bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
ASSERT(valid_entries == -1);
- String* current_key = NULL;
+ Name* current_key = NULL;
uint32_t current = 0;
for (int i = 0; i < number_of_transitions(); i++) {
- String* key = GetSortedKey(i);
+ Name* key = GetSortedKey(i);
if (key == current_key) {
PrintTransitions();
return false;
@@ -994,10 +1196,6 @@ static bool CheckOneBackPointer(Map* current_map, Object* target) {
bool TransitionArray::IsConsistentWithBackPointers(Map* current_map) {
- if (HasElementsTransition() &&
- !CheckOneBackPointer(current_map, elements_transition())) {
- return false;
- }
for (int i = 0; i < number_of_transitions(); ++i) {
if (!CheckOneBackPointer(current_map, GetTarget(i))) return false;
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 4834fa63a..deb33653f 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -58,7 +58,10 @@ PropertyDetails::PropertyDetails(Smi* smi) {
Smi* PropertyDetails::AsSmi() {
- return Smi::FromInt(value_);
+ // Ensure the upper 2 bits have the same value by sign extending it. This is
+ // necessary to be able to use the 31st bit of the property details.
+ int value = value_ << 1;
+ return Smi::FromInt(value >> 1);
}
@@ -77,7 +80,7 @@ PropertyDetails PropertyDetails::AsDeleted() {
#define CAST_ACCESSOR(type) \
type* type::cast(Object* object) { \
- ASSERT(object->Is##type()); \
+ SLOW_ASSERT(object->Is##type()); \
return reinterpret_cast<type*>(object); \
}
@@ -130,7 +133,20 @@ PropertyDetails PropertyDetails::AsDeleted() {
bool Object::IsFixedArrayBase() {
- return IsFixedArray() || IsFixedDoubleArray();
+ return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray();
+}
+
+
+// External objects are not extensible, so the map check is enough.
+bool Object::IsExternal() {
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map() ==
+ HeapObject::cast(this)->GetHeap()->external_map();
+}
+
+
+bool Object::IsAccessorInfo() {
+ return IsExecutableAccessorInfo() || IsDeclaredAccessorInfo();
}
@@ -170,6 +186,7 @@ bool Object::NonFailureIsHeapObject() {
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
+TYPE_CHECKER(Symbol, SYMBOL_TYPE)
bool Object::IsString() {
@@ -178,6 +195,16 @@ bool Object::IsString() {
}
+bool Object::IsName() {
+ return IsString() || IsSymbol();
+}
+
+
+bool Object::IsUniqueName() {
+ return IsInternalizedString() || IsSymbol();
+}
+
+
bool Object::IsSpecObject() {
return Object::IsHeapObject()
&& HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE;
@@ -191,15 +218,12 @@ bool Object::IsSpecFunction() {
}
-bool Object::IsSymbol() {
+bool Object::IsInternalizedString() {
if (!this->IsHeapObject()) return false;
uint32_t type = HeapObject::cast(this)->map()->instance_type();
- // Because the symbol tag is non-zero and no non-string types have the
- // symbol bit set we can test for symbols with a very simple test
- // operation.
- STATIC_ASSERT(kSymbolTag != 0);
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
- return (type & kIsSymbolMask) != 0;
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ return (type & (kIsNotStringMask | kIsNotInternalizedMask)) ==
+ (kStringTag | kInternalizedTag);
}
@@ -221,10 +245,10 @@ bool Object::IsSeqString() {
}
-bool Object::IsSeqAsciiString() {
+bool Object::IsSeqOneByteString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential() &&
- String::cast(this)->IsAsciiRepresentation();
+ String::cast(this)->IsOneByteRepresentation();
}
@@ -244,7 +268,7 @@ bool Object::IsExternalString() {
bool Object::IsExternalAsciiString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal() &&
- String::cast(this)->IsAsciiRepresentation();
+ String::cast(this)->IsOneByteRepresentation();
}
@@ -259,6 +283,18 @@ bool Object::HasValidElements() {
return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray();
}
+
+MaybeObject* Object::AllocateNewStorageFor(Heap* heap,
+ Representation representation) {
+ if (!FLAG_track_double_fields) return this;
+ if (!representation.IsDouble()) return this;
+ if (IsUninitialized()) {
+ return heap->AllocateHeapNumber(0);
+ }
+ return heap->AllocateHeapNumber(Number());
+}
+
+
StringShape::StringShape(String* str)
: type_(str->map()->instance_type()) {
set_valid();
@@ -280,16 +316,17 @@ StringShape::StringShape(InstanceType t)
}
-bool StringShape::IsSymbol() {
+bool StringShape::IsInternalized() {
ASSERT(valid());
- STATIC_ASSERT(kSymbolTag != 0);
- return (type_ & kIsSymbolMask) != 0;
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) ==
+ (kStringTag | kInternalizedTag);
}
-bool String::IsAsciiRepresentation() {
+bool String::IsOneByteRepresentation() {
uint32_t type = map()->instance_type();
- return (type & kStringEncodingMask) == kAsciiStringTag;
+ return (type & kStringEncodingMask) == kOneByteStringTag;
}
@@ -299,18 +336,18 @@ bool String::IsTwoByteRepresentation() {
}
-bool String::IsAsciiRepresentationUnderneath() {
+bool String::IsOneByteRepresentationUnderneath() {
uint32_t type = map()->instance_type();
STATIC_ASSERT(kIsIndirectStringTag != 0);
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
ASSERT(IsFlat());
switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
- case kAsciiStringTag:
+ case kOneByteStringTag:
return true;
case kTwoByteStringTag:
return false;
default: // Cons or sliced string. Need to go deeper.
- return GetUnderlying()->IsAsciiRepresentation();
+ return GetUnderlying()->IsOneByteRepresentation();
}
}
@@ -321,7 +358,7 @@ bool String::IsTwoByteRepresentationUnderneath() {
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
ASSERT(IsFlat());
switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
- case kAsciiStringTag:
+ case kOneByteStringTag:
return false;
case kTwoByteStringTag:
return true;
@@ -331,10 +368,10 @@ bool String::IsTwoByteRepresentationUnderneath() {
}
-bool String::HasOnlyAsciiChars() {
+bool String::HasOnlyOneByteChars() {
uint32_t type = map()->instance_type();
- return (type & kStringEncodingMask) == kAsciiStringTag ||
- (type & kAsciiDataHintMask) == kAsciiDataHintTag;
+ return (type & kOneByteDataHintMask) == kOneByteDataHintTag ||
+ IsOneByteRepresentation();
}
@@ -387,7 +424,7 @@ STATIC_CHECK(static_cast<uint32_t>(kStringEncodingMask) ==
bool StringShape::IsSequentialAscii() {
- return full_representation_tag() == (kSeqStringTag | kAsciiStringTag);
+ return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
}
@@ -397,14 +434,14 @@ bool StringShape::IsSequentialTwoByte() {
bool StringShape::IsExternalAscii() {
- return full_representation_tag() == (kExternalStringTag | kAsciiStringTag);
+ return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
}
-STATIC_CHECK((kExternalStringTag | kAsciiStringTag) ==
+STATIC_CHECK((kExternalStringTag | kOneByteStringTag) ==
Internals::kExternalAsciiRepresentationTag);
-STATIC_CHECK(v8::String::ASCII_ENCODING == kAsciiStringTag);
+STATIC_CHECK(v8::String::ASCII_ENCODING == kOneByteStringTag);
bool StringShape::IsExternalTwoByte() {
@@ -493,6 +530,11 @@ bool MaybeObject::IsTheHole() {
}
+bool MaybeObject::IsUninitialized() {
+ return !IsFailure() && ToObjectUnchecked()->IsUninitialized();
+}
+
+
Failure* Failure::cast(MaybeObject* obj) {
ASSERT(HAS_FAILURE_TAG(obj));
return reinterpret_cast<Failure*>(obj);
@@ -524,10 +566,17 @@ TYPE_CHECKER(JSFunctionProxy, JS_FUNCTION_PROXY_TYPE)
TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSMap, JS_MAP_TYPE)
TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
+TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
TYPE_CHECKER(Map, MAP_TYPE)
TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE)
TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
+TYPE_CHECKER(ConstantPoolArray, CONSTANT_POOL_ARRAY_TYPE)
+
+
+bool Object::IsJSWeakCollection() {
+ return IsJSWeakMap() || IsJSWeakSet();
+}
bool Object::IsDescriptorArray() {
@@ -567,6 +616,14 @@ bool Object::IsDeoptimizationOutputData() {
}
+bool Object::IsDependentCode() {
+ if (!IsFixedArray()) return false;
+ // There's actually no way to see the difference between a fixed array and
+ // a dependent codes array.
+ return true;
+}
+
+
bool Object::IsTypeFeedbackCells() {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
@@ -615,8 +672,10 @@ template <> inline bool Is<JSFunction>(Object* obj) {
TYPE_CHECKER(Code, CODE_TYPE)
TYPE_CHECKER(Oddball, ODDBALL_TYPE)
-TYPE_CHECKER(JSGlobalPropertyCell, JS_GLOBAL_PROPERTY_CELL_TYPE)
+TYPE_CHECKER(Cell, CELL_TYPE)
+TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
+TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
TYPE_CHECKER(JSModule, JS_MODULE_TYPE)
TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
TYPE_CHECKER(JSDate, JS_DATE_TYPE)
@@ -638,6 +697,16 @@ bool Object::IsBoolean() {
TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
+TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
+TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
+TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
+
+
+bool Object::IsJSArrayBufferView() {
+ return IsJSDataView() || IsJSTypedArray();
+}
+
+
TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
@@ -655,13 +724,13 @@ bool Object::IsHashTable() {
bool Object::IsDictionary() {
return IsHashTable() &&
- this != HeapObject::cast(this)->GetHeap()->symbol_table();
+ this != HeapObject::cast(this)->GetHeap()->string_table();
}
-bool Object::IsSymbolTable() {
- return IsHashTable() && this ==
- HeapObject::cast(this)->GetHeap()->raw_unchecked_symbol_table();
+bool Object::IsStringTable() {
+ return IsHashTable() &&
+ this == HeapObject::cast(this)->GetHeap()->raw_unchecked_string_table();
}
@@ -718,6 +787,11 @@ bool Object::IsMapCache() {
}
+bool Object::IsObjectHashTable() {
+ return IsHashTable();
+}
+
+
bool Object::IsPrimitive() {
return IsOddball() || IsNumber() || IsString();
}
@@ -792,6 +866,11 @@ bool Object::IsTheHole() {
}
+bool Object::IsUninitialized() {
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUninitialized;
+}
+
+
bool Object::IsTrue() {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTrue;
}
@@ -816,7 +895,7 @@ double Object::Number() {
bool Object::IsNaN() {
- return this->IsHeapNumber() && isnan(HeapNumber::cast(this)->value());
+ return this->IsHeapNumber() && std::isnan(HeapNumber::cast(this)->value());
}
@@ -838,17 +917,17 @@ bool Object::HasSpecificClassOf(String* name) {
}
-MaybeObject* Object::GetElement(uint32_t index) {
+MaybeObject* Object::GetElement(Isolate* isolate, uint32_t index) {
// GetElement can trigger a getter which can cause allocation.
// This was not always the case. This ASSERT is here to catch
// leftover incorrect uses.
- ASSERT(HEAP->IsAllocationAllowed());
- return GetElementWithReceiver(this, index);
+ ASSERT(AllowHeapAllocation::IsAllowed());
+ return GetElementWithReceiver(isolate, this, index);
}
-Object* Object::GetElementNoExceptionThrown(uint32_t index) {
- MaybeObject* maybe = GetElementWithReceiver(this, index);
+Object* Object::GetElementNoExceptionThrown(Isolate* isolate, uint32_t index) {
+ MaybeObject* maybe = GetElementWithReceiver(isolate, this, index);
ASSERT(!maybe->IsFailure());
Object* result = NULL; // Initialization to please compiler.
maybe->ToObject(&result);
@@ -856,13 +935,13 @@ Object* Object::GetElementNoExceptionThrown(uint32_t index) {
}
-MaybeObject* Object::GetProperty(String* key) {
+MaybeObject* Object::GetProperty(Name* key) {
PropertyAttributes attributes;
return GetPropertyWithReceiver(this, key, &attributes);
}
-MaybeObject* Object::GetProperty(String* key, PropertyAttributes* attributes) {
+MaybeObject* Object::GetProperty(Name* key, PropertyAttributes* attributes) {
return GetPropertyWithReceiver(this, key, attributes);
}
@@ -949,6 +1028,12 @@ MaybeObject* Object::GetProperty(String* key, PropertyAttributes* attributes) {
#define WRITE_UINT32_FIELD(p, offset, value) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
+#define READ_INT32_FIELD(p, offset) \
+ (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_INT32_FIELD(p, offset, value) \
+ (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
+
#define READ_INT64_FIELD(p, offset) \
(*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)))
@@ -980,10 +1065,7 @@ int Smi::value() {
Smi* Smi::FromInt(int value) {
ASSERT(Smi::IsValid(value));
- int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
- intptr_t tagged_value =
- (static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
- return reinterpret_cast<Smi*>(tagged_value);
+ return reinterpret_cast<Smi*>(Internals::IntToSmi(value));
}
@@ -1026,8 +1108,8 @@ Failure* Failure::Exception() {
}
-Failure* Failure::OutOfMemoryException() {
- return Construct(OUT_OF_MEMORY_EXCEPTION);
+Failure* Failure::OutOfMemoryException(intptr_t value) {
+ return Construct(OUT_OF_MEMORY_EXCEPTION, value);
}
@@ -1052,33 +1134,17 @@ Failure* Failure::Construct(Type type, intptr_t value) {
uintptr_t info =
(static_cast<uintptr_t>(value) << kFailureTypeTagSize) | type;
ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info);
- return reinterpret_cast<Failure*>((info << kFailureTagSize) | kFailureTag);
+ // Fill the unused bits with a pattern that's easy to recognize in crash
+ // dumps.
+ static const int kFailureMagicPattern = 0x0BAD0000;
+ return reinterpret_cast<Failure*>(
+ (info << kFailureTagSize) | kFailureTag | kFailureMagicPattern);
}
bool Smi::IsValid(intptr_t value) {
-#ifdef DEBUG
- bool in_range = (value >= kMinValue) && (value <= kMaxValue);
-#endif
-
-#ifdef V8_TARGET_ARCH_X64
- // To be representable as a long smi, the value must be a 32-bit integer.
- bool result = (value == static_cast<int32_t>(value));
-#else
- // To be representable as an tagged small integer, the two
- // most-significant bits of 'value' must be either 00 or 11 due to
- // sign-extension. To check this we add 01 to the two
- // most-significant bits, and check if the most-significant bit is 0
- //
- // CAUTION: The original code below:
- // bool result = ((value + 0x40000000) & 0x80000000) == 0;
- // may lead to incorrect results according to the C language spec, and
- // in fact doesn't work correctly with gcc4.1.1 in some cases: The
- // compiler may produce undefined results in case of signed integer
- // overflow. The computation must be done w/ unsigned ints.
- bool result = (static_cast<uintptr_t>(value + 0x40000000U) < 0x80000000U);
-#endif
- ASSERT(result == in_range);
+ bool result = Internals::IsValidSmi(value);
+ ASSERT_EQ(result, value >= kMinValue && value <= kMaxValue);
return result;
}
@@ -1124,8 +1190,7 @@ void HeapObject::VerifySmiField(int offset) {
Heap* HeapObject::GetHeap() {
Heap* heap =
MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
- ASSERT(heap != NULL);
- ASSERT(heap->isolate() == Isolate::Current());
+ SLOW_ASSERT(heap != NULL);
return heap;
}
@@ -1242,7 +1307,7 @@ FixedArrayBase* JSObject::elements() {
void JSObject::ValidateElements() {
-#if DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
ElementsAccessor* accessor = GetElementsAccessor();
accessor->Validate(this);
@@ -1251,17 +1316,67 @@ void JSObject::ValidateElements() {
}
-MaybeObject* JSObject::EnsureCanContainHeapObjectElements() {
- ValidateElements();
- ElementsKind elements_kind = map()->elements_kind();
+bool JSObject::ShouldTrackAllocationInfo() {
+ if (AllocationSite::CanTrack(map()->instance_type())) {
+ if (!IsJSArray()) {
+ return true;
+ }
+
+ return AllocationSite::GetMode(GetElementsKind()) ==
+ TRACK_ALLOCATION_SITE;
+ }
+ return false;
+}
+
+
+void AllocationSite::Initialize() {
+ SetElementsKind(GetInitialFastElementsKind());
+ set_nested_site(Smi::FromInt(0));
+ set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
+}
+
+
+// Heuristic: We only need to create allocation site info if the boilerplate
+// elements kind is the initial elements kind.
+AllocationSiteMode AllocationSite::GetMode(
+ ElementsKind boilerplate_elements_kind) {
+ if (FLAG_track_allocation_sites &&
+ IsFastSmiElementsKind(boilerplate_elements_kind)) {
+ return TRACK_ALLOCATION_SITE;
+ }
+
+ return DONT_TRACK_ALLOCATION_SITE;
+}
+
+
+AllocationSiteMode AllocationSite::GetMode(ElementsKind from,
+ ElementsKind to) {
+ if (FLAG_track_allocation_sites &&
+ IsFastSmiElementsKind(from) &&
+ IsMoreGeneralElementsKindTransition(from, to)) {
+ return TRACK_ALLOCATION_SITE;
+ }
+
+ return DONT_TRACK_ALLOCATION_SITE;
+}
+
+
+inline bool AllocationSite::CanTrack(InstanceType type) {
+ return type == JS_ARRAY_TYPE;
+}
+
+
+void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
+ object->ValidateElements();
+ ElementsKind elements_kind = object->map()->elements_kind();
if (!IsFastObjectElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
- return TransitionElementsKind(FAST_HOLEY_ELEMENTS);
+ TransitionElementsKind(object, FAST_HOLEY_ELEMENTS);
} else {
- return TransitionElementsKind(FAST_ELEMENTS);
+ TransitionElementsKind(object, FAST_ELEMENTS);
}
}
- return this;
}
@@ -1392,62 +1507,80 @@ void JSObject::initialize_properties() {
void JSObject::initialize_elements() {
- ASSERT(map()->has_fast_smi_or_object_elements() ||
- map()->has_fast_double_elements());
- ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
- WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
+ if (map()->has_fast_smi_or_object_elements() ||
+ map()->has_fast_double_elements()) {
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
+ WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
+ } else if (map()->has_external_array_elements()) {
+ ExternalArray* empty_array = GetHeap()->EmptyExternalArrayForMap(map());
+ ASSERT(!GetHeap()->InNewSpace(empty_array));
+ WRITE_FIELD(this, kElementsOffset, empty_array);
+ } else {
+ UNREACHABLE();
+ }
}
MaybeObject* JSObject::ResetElements() {
- Object* obj;
+ if (map()->is_observed()) {
+ // Maintain invariant that observed elements are always in dictionary mode.
+ SeededNumberDictionary* dictionary;
+ MaybeObject* maybe = SeededNumberDictionary::Allocate(GetHeap(), 0);
+ if (!maybe->To(&dictionary)) return maybe;
+ if (map() == GetHeap()->non_strict_arguments_elements_map()) {
+ FixedArray::cast(elements())->set(1, dictionary);
+ } else {
+ set_elements(dictionary);
+ }
+ return this;
+ }
+
ElementsKind elements_kind = GetInitialFastElementsKind();
if (!FLAG_smi_only_arrays) {
elements_kind = FastSmiToObjectElementsKind(elements_kind);
}
- MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
- elements_kind);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- set_map(Map::cast(obj));
+ MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(), elements_kind);
+ Map* map;
+ if (!maybe->To(&map)) return maybe;
+ set_map(map);
initialize_elements();
+
return this;
}
-MaybeObject* JSObject::AddFastPropertyUsingMap(Map* map) {
- ASSERT(this->map()->NumberOfOwnDescriptors() + 1 ==
- map->NumberOfOwnDescriptors());
- if (this->map()->unused_property_fields() == 0) {
- int new_size = properties()->length() + map->unused_property_fields() + 1;
- FixedArray* new_properties;
- MaybeObject* maybe_properties = properties()->CopySize(new_size);
- if (!maybe_properties->To(&new_properties)) return maybe_properties;
- set_properties(new_properties);
- }
- set_map(map);
- return this;
+Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) {
+ DisallowHeapAllocation no_gc;
+ if (!map->HasTransitionArray()) return Handle<String>::null();
+ TransitionArray* transitions = map->transitions();
+ if (!transitions->IsSimpleTransition()) return Handle<String>::null();
+ int transition = TransitionArray::kSimpleTransitionIndex;
+ PropertyDetails details = transitions->GetTargetDetails(transition);
+ Name* name = transitions->GetKey(transition);
+ if (details.type() != FIELD) return Handle<String>::null();
+ if (details.attributes() != NONE) return Handle<String>::null();
+ if (!name->IsString()) return Handle<String>::null();
+ return Handle<String>(String::cast(name));
}
-bool JSObject::TryTransitionToField(Handle<JSObject> object,
- Handle<String> key) {
- if (!object->map()->HasTransitionArray()) return false;
- Handle<TransitionArray> transitions(object->map()->transitions());
- int transition = transitions->Search(*key);
- if (transition == TransitionArray::kNotFound) return false;
- PropertyDetails target_details = transitions->GetTargetDetails(transition);
- if (target_details.type() != FIELD) return false;
- if (target_details.attributes() != NONE) return false;
- Handle<Map> target(transitions->GetTarget(transition));
- JSObject::AddFastPropertyUsingMap(object, target);
- return true;
+Handle<Map> JSObject::ExpectedTransitionTarget(Handle<Map> map) {
+ ASSERT(!ExpectedTransitionKey(map).is_null());
+ return Handle<Map>(map->transitions()->GetTarget(
+ TransitionArray::kSimpleTransitionIndex));
}
-int JSObject::LastAddedFieldIndex() {
- Map* map = this->map();
- int last_added = map->LastAdded();
- return map->instance_descriptors()->GetFieldIndex(last_added);
+Handle<Map> JSObject::FindTransitionToField(Handle<Map> map, Handle<Name> key) {
+ DisallowHeapAllocation no_allocation;
+ if (!map->HasTransitionArray()) return Handle<Map>::null();
+ TransitionArray* transitions = map->transitions();
+ int transition = transitions->Search(*key);
+ if (transition == TransitionArray::kNotFound) return Handle<Map>::null();
+ PropertyDetails target_details = transitions->GetTargetDetails(transition);
+ if (target_details.type() != FIELD) return Handle<Map>::null();
+ if (target_details.attributes() != NONE) return Handle<Map>::null();
+ return Handle<Map>(transitions->GetTarget(transition));
}
@@ -1465,17 +1598,28 @@ void Oddball::set_kind(byte value) {
}
-Object* JSGlobalPropertyCell::value() {
+Object* Cell::value() {
return READ_FIELD(this, kValueOffset);
}
-void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) {
+void Cell::set_value(Object* val, WriteBarrierMode ignored) {
// The write barrier is not used for global property cells.
- ASSERT(!val->IsJSGlobalPropertyCell());
+ ASSERT(!val->IsPropertyCell() && !val->IsCell());
WRITE_FIELD(this, kValueOffset, val);
}
+ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
+
+Object* PropertyCell::type_raw() {
+ return READ_FIELD(this, kTypeOffset);
+}
+
+
+void PropertyCell::set_type_raw(Object* val, WriteBarrierMode ignored) {
+ WRITE_FIELD(this, kTypeOffset, val);
+}
+
int JSObject::GetHeaderSize() {
InstanceType type = map()->instance_type();
@@ -1484,6 +1628,8 @@ int JSObject::GetHeaderSize() {
// field operations considerably on average.
if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
switch (type) {
+ case JS_GENERATOR_OBJECT_TYPE:
+ return JSGeneratorObject::kSize;
case JS_MODULE_TYPE:
return JSModule::kSize;
case JS_GLOBAL_PROXY_TYPE:
@@ -1500,8 +1646,20 @@ int JSObject::GetHeaderSize() {
return JSDate::kSize;
case JS_ARRAY_TYPE:
return JSArray::kSize;
+ case JS_ARRAY_BUFFER_TYPE:
+ return JSArrayBuffer::kSize;
+ case JS_TYPED_ARRAY_TYPE:
+ return JSTypedArray::kSize;
+ case JS_DATA_VIEW_TYPE:
+ return JSDataView::kSize;
+ case JS_SET_TYPE:
+ return JSSet::kSize;
+ case JS_MAP_TYPE:
+ return JSMap::kSize;
case JS_WEAK_MAP_TYPE:
return JSWeakMap::kSize;
+ case JS_WEAK_SET_TYPE:
+ return JSWeakSet::kSize;
case JS_REGEXP_TYPE:
return JSRegExp::kSize;
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -1509,7 +1667,9 @@ int JSObject::GetHeaderSize() {
case JS_MESSAGE_OBJECT_TYPE:
return JSMessageObject::kSize;
default:
- UNREACHABLE();
+ // TODO(jkummerow): Re-enable this. Blink currently hits this
+ // from its CustomElementConstructorBuilder.
+ // UNREACHABLE();
return 0;
}
}
@@ -1560,10 +1720,17 @@ void JSObject::SetInternalField(int index, Smi* value) {
}
+MaybeObject* JSObject::FastPropertyAt(Representation representation,
+ int index) {
+ Object* raw_value = RawFastPropertyAt(index);
+ return raw_value->AllocateNewStorageFor(GetHeap(), representation);
+}
+
+
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
-Object* JSObject::FastPropertyAt(int index) {
+Object* JSObject::RawFastPropertyAt(int index) {
// Adjust for the number of properties stored in the object.
index -= map()->inobject_properties();
if (index < 0) {
@@ -1576,7 +1743,7 @@ Object* JSObject::FastPropertyAt(int index) {
}
-Object* JSObject::FastPropertyAtPut(int index, Object* value) {
+void JSObject::FastPropertyAtPut(int index, Object* value) {
// Adjust for the number of properties stored in the object.
index -= map()->inobject_properties();
if (index < 0) {
@@ -1587,7 +1754,6 @@ Object* JSObject::FastPropertyAtPut(int index, Object* value) {
ASSERT(index < properties()->length());
properties()->set(index, value);
}
- return value;
}
@@ -1652,14 +1818,15 @@ bool JSObject::HasFastProperties() {
}
-bool JSObject::TooManyFastProperties(int properties,
- JSObject::StoreFromKeyed store_mode) {
+bool JSObject::TooManyFastProperties(StoreFromKeyed store_mode) {
// Allow extra fast properties if the object has more than
- // kFastPropertiesSoftLimit in-object properties. When this is the case,
- // it is very unlikely that the object is being used as a dictionary
- // and there is a good chance that allowing more map transitions
- // will be worth it.
- int inobject = map()->inobject_properties();
+ // kFastPropertiesSoftLimit in-object properties. When this is the case, it is
+ // very unlikely that the object is being used as a dictionary and there is a
+ // good chance that allowing more map transitions will be worth it.
+ Map* map = this->map();
+ if (map->unused_property_fields() != 0) return false;
+
+ int inobject = map->inobject_properties();
int limit;
if (store_mode == CERTAINLY_NOT_STORE_FROM_KEYED) {
@@ -1667,7 +1834,7 @@ bool JSObject::TooManyFastProperties(int properties,
} else {
limit = Max(inobject, kFastPropertiesSoftLimit);
}
- return properties > limit;
+ return properties()->length() > limit;
}
@@ -1705,7 +1872,7 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
if (!js_value->value()->IsString()) return false;
String* str = String::cast(js_value->value());
- if (index >= (uint32_t)str->length()) return false;
+ if (index >= static_cast<uint32_t>(str->length())) return false;
return true;
}
@@ -1729,13 +1896,14 @@ void Object::VerifyApiCallResultType() {
FixedArrayBase* FixedArrayBase::cast(Object* object) {
- ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray());
+ ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray() ||
+ object->IsConstantPoolArray());
return reinterpret_cast<FixedArrayBase*>(object);
}
Object* FixedArray::get(int index) {
- ASSERT(index >= 0 && index < this->length());
+ SLOW_ASSERT(index >= 0 && index < this->length());
return READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
@@ -1746,7 +1914,7 @@ bool FixedArray::is_the_hole(int index) {
void FixedArray::set(int index, Smi* value) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
int offset = kHeaderSize + index * kPointerSize;
@@ -1755,7 +1923,7 @@ void FixedArray::set(int index, Smi* value) {
void FixedArray::set(int index, Object* value) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
@@ -1781,8 +1949,8 @@ inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() {
double FixedDoubleArray::get_scalar(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
ASSERT(index >= 0 && index < this->length());
double result = READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
ASSERT(!is_the_hole_nan(result));
@@ -1790,8 +1958,8 @@ double FixedDoubleArray::get_scalar(int index) {
}
int64_t FixedDoubleArray::get_representation(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
ASSERT(index >= 0 && index < this->length());
return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize);
}
@@ -1806,17 +1974,17 @@ MaybeObject* FixedDoubleArray::get(int index) {
void FixedDoubleArray::set(int index, double value) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
- if (isnan(value)) value = canonical_not_the_hole_nan_as_double();
+ if (std::isnan(value)) value = canonical_not_the_hole_nan_as_double();
WRITE_DOUBLE_FIELD(this, offset, value);
}
void FixedDoubleArray::set_the_hole(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
}
@@ -1828,7 +1996,100 @@ bool FixedDoubleArray::is_the_hole(int index) {
}
-WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
+SMI_ACCESSORS(ConstantPoolArray, first_ptr_index, kFirstPointerIndexOffset)
+SMI_ACCESSORS(ConstantPoolArray, first_int32_index, kFirstInt32IndexOffset)
+
+
+int ConstantPoolArray::first_int64_index() {
+ return 0;
+}
+
+
+int ConstantPoolArray::count_of_int64_entries() {
+ return first_ptr_index();
+}
+
+
+int ConstantPoolArray::count_of_ptr_entries() {
+ return first_int32_index() - first_ptr_index();
+}
+
+
+int ConstantPoolArray::count_of_int32_entries() {
+ return length() - first_int32_index();
+}
+
+
+void ConstantPoolArray::SetEntryCounts(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ set_first_ptr_index(number_of_int64_entries);
+ set_first_int32_index(number_of_int64_entries + number_of_ptr_entries);
+ set_length(number_of_int64_entries + number_of_ptr_entries +
+ number_of_int32_entries);
+}
+
+
+int64_t ConstantPoolArray::get_int64_entry(int index) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= 0 && index < first_ptr_index());
+ return READ_INT64_FIELD(this, OffsetOfElementAt(index));
+}
+
+double ConstantPoolArray::get_int64_entry_as_double(int index) {
+ STATIC_ASSERT(kDoubleSize == kInt64Size);
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= 0 && index < first_ptr_index());
+ return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index));
+}
+
+
+Object* ConstantPoolArray::get_ptr_entry(int index) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_ptr_index() && index < first_int32_index());
+ return READ_FIELD(this, OffsetOfElementAt(index));
+}
+
+
+int32_t ConstantPoolArray::get_int32_entry(int index) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_int32_index() && index < length());
+ return READ_INT32_FIELD(this, OffsetOfElementAt(index));
+}
+
+
+void ConstantPoolArray::set(int index, Object* value) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_ptr_index() && index < first_int32_index());
+ WRITE_FIELD(this, OffsetOfElementAt(index), value);
+ WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value);
+}
+
+
+void ConstantPoolArray::set(int index, int64_t value) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_int64_index() && index < first_ptr_index());
+ WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value);
+}
+
+
+void ConstantPoolArray::set(int index, double value) {
+ STATIC_ASSERT(kDoubleSize == kInt64Size);
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_int64_index() && index < first_ptr_index());
+ WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value);
+}
+
+
+void ConstantPoolArray::set(int index, int32_t value) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= this->first_int32_index() && index < length());
+ WRITE_INT32_FIELD(this, OffsetOfElementAt(index), value);
+}
+
+
+WriteBarrierMode HeapObject::GetWriteBarrierMode(
+ const DisallowHeapAllocation& promise) {
Heap* heap = GetHeap();
if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER;
@@ -1839,7 +2100,7 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
void FixedArray::set(int index,
Object* value,
WriteBarrierMode mode) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
@@ -1850,7 +2111,7 @@ void FixedArray::set(int index,
void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
- ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
+ ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(array, offset, value);
@@ -1864,70 +2125,44 @@ void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
void FixedArray::NoWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
- ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
+ ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
- ASSERT(!HEAP->InNewSpace(value));
+ ASSERT(!array->GetHeap()->InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
void FixedArray::set_undefined(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
- set_undefined(GetHeap(), index);
-}
-
-
-void FixedArray::set_undefined(Heap* heap, int index) {
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->undefined_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize,
- heap->undefined_value());
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->undefined_value()));
+ WRITE_FIELD(this,
+ kHeaderSize + index * kPointerSize,
+ GetHeap()->undefined_value());
}
void FixedArray::set_null(int index) {
- set_null(GetHeap(), index);
-}
-
-
-void FixedArray::set_null(Heap* heap, int index) {
ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->null_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->null_value()));
+ WRITE_FIELD(this,
+ kHeaderSize + index * kPointerSize,
+ GetHeap()->null_value());
}
void FixedArray::set_the_hole(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
- ASSERT(!HEAP->InNewSpace(HEAP->the_hole_value()));
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->the_hole_value()));
WRITE_FIELD(this,
kHeaderSize + index * kPointerSize,
GetHeap()->the_hole_value());
}
-void FixedArray::set_unchecked(int index, Smi* value) {
- ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
-}
-
-
-void FixedArray::set_unchecked(Heap* heap,
- int index,
- Object* value,
- WriteBarrierMode mode) {
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(heap, this, offset, value, mode);
-}
-
-
-void FixedArray::set_null_unchecked(Heap* heap, int index) {
- ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->null_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
+double* FixedDoubleArray::data_start() {
+ return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize));
}
@@ -1938,7 +2173,7 @@ Object** FixedArray::data_start() {
bool DescriptorArray::IsEmpty() {
ASSERT(length() >= kFirstIndex ||
- this == HEAP->empty_descriptor_array());
+ this == GetHeap()->empty_descriptor_array());
return length() < kFirstIndex;
}
@@ -1953,7 +2188,7 @@ void DescriptorArray::SetNumberOfDescriptors(int number_of_descriptors) {
// there are three entries in this array it should be called with low=0 and
// high=2.
template<SearchMode search_mode, typename T>
-int BinarySearch(T* array, String* name, int low, int high, int valid_entries) {
+int BinarySearch(T* array, Name* name, int low, int high, int valid_entries) {
uint32_t hash = name->Hash();
int limit = high;
@@ -1961,7 +2196,7 @@ int BinarySearch(T* array, String* name, int low, int high, int valid_entries) {
while (low != high) {
int mid = (low + high) / 2;
- String* mid_name = array->GetSortedKey(mid);
+ Name* mid_name = array->GetSortedKey(mid);
uint32_t mid_hash = mid_name->Hash();
if (mid_hash >= hash) {
@@ -1973,7 +2208,7 @@ int BinarySearch(T* array, String* name, int low, int high, int valid_entries) {
for (; low <= limit; ++low) {
int sort_index = array->GetSortedKeyIndex(low);
- String* entry = array->GetKey(sort_index);
+ Name* entry = array->GetKey(sort_index);
if (entry->Hash() != hash) break;
if (entry->Equals(name)) {
if (search_mode == ALL_ENTRIES || sort_index < valid_entries) {
@@ -1990,12 +2225,12 @@ int BinarySearch(T* array, String* name, int low, int high, int valid_entries) {
// Perform a linear search in this fixed array. len is the number of entry
// indices that are valid.
template<SearchMode search_mode, typename T>
-int LinearSearch(T* array, String* name, int len, int valid_entries) {
+int LinearSearch(T* array, Name* name, int len, int valid_entries) {
uint32_t hash = name->Hash();
if (search_mode == ALL_ENTRIES) {
for (int number = 0; number < len; number++) {
int sorted_index = array->GetSortedKeyIndex(number);
- String* entry = array->GetKey(sorted_index);
+ Name* entry = array->GetKey(sorted_index);
uint32_t current_hash = entry->Hash();
if (current_hash > hash) break;
if (current_hash == hash && entry->Equals(name)) return sorted_index;
@@ -2003,7 +2238,7 @@ int LinearSearch(T* array, String* name, int len, int valid_entries) {
} else {
ASSERT(len >= valid_entries);
for (int number = 0; number < valid_entries; number++) {
- String* entry = array->GetKey(number);
+ Name* entry = array->GetKey(number);
uint32_t current_hash = entry->Hash();
if (current_hash == hash && entry->Equals(name)) return number;
}
@@ -2013,7 +2248,7 @@ int LinearSearch(T* array, String* name, int len, int valid_entries) {
template<SearchMode search_mode, typename T>
-int Search(T* array, String* name, int valid_entries) {
+int Search(T* array, Name* name, int valid_entries) {
if (search_mode == VALID_ENTRIES) {
SLOW_ASSERT(array->IsSortedNoDuplicates(valid_entries));
} else {
@@ -2037,12 +2272,12 @@ int Search(T* array, String* name, int valid_entries) {
}
-int DescriptorArray::Search(String* name, int valid_descriptors) {
+int DescriptorArray::Search(Name* name, int valid_descriptors) {
return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors);
}
-int DescriptorArray::SearchWithCache(String* name, Map* map) {
+int DescriptorArray::SearchWithCache(Name* name, Map* map) {
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors == 0) return kNotFound;
@@ -2059,7 +2294,7 @@ int DescriptorArray::SearchWithCache(String* name, Map* map) {
void Map::LookupDescriptor(JSObject* holder,
- String* name,
+ Name* name,
LookupResult* result) {
DescriptorArray* descriptors = this->instance_descriptors();
int number = descriptors->SearchWithCache(name, this);
@@ -2069,7 +2304,7 @@ void Map::LookupDescriptor(JSObject* holder,
void Map::LookupTransition(JSObject* holder,
- String* name,
+ Name* name,
LookupResult* result) {
if (HasTransitionArray()) {
TransitionArray* transition_array = transitions();
@@ -2090,9 +2325,19 @@ Object** DescriptorArray::GetKeySlot(int descriptor_number) {
}
-String* DescriptorArray::GetKey(int descriptor_number) {
+Object** DescriptorArray::GetDescriptorStartSlot(int descriptor_number) {
+ return GetKeySlot(descriptor_number);
+}
+
+
+Object** DescriptorArray::GetDescriptorEndSlot(int descriptor_number) {
+ return GetValueSlot(descriptor_number - 1) + 1;
+}
+
+
+Name* DescriptorArray::GetKey(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
- return String::cast(get(ToKeyIndex(descriptor_number)));
+ return Name::cast(get(ToKeyIndex(descriptor_number)));
}
@@ -2101,7 +2346,7 @@ int DescriptorArray::GetSortedKeyIndex(int descriptor_number) {
}
-String* DescriptorArray::GetSortedKey(int descriptor_number) {
+Name* DescriptorArray::GetSortedKey(int descriptor_number) {
return GetKey(GetSortedKeyIndex(descriptor_number));
}
@@ -2112,6 +2357,23 @@ void DescriptorArray::SetSortedKey(int descriptor_index, int pointer) {
}
+void DescriptorArray::SetRepresentation(int descriptor_index,
+ Representation representation) {
+ ASSERT(!representation.IsNone());
+ PropertyDetails details = GetDetails(descriptor_index);
+ set(ToDetailsIndex(descriptor_index),
+ details.CopyWithRepresentation(representation).AsSmi());
+}
+
+
+void DescriptorArray::InitializeRepresentations(Representation representation) {
+ int length = number_of_descriptors();
+ for (int i = 0; i < length; i++) {
+ SetRepresentation(i, representation);
+ }
+}
+
+
Object** DescriptorArray::GetValueSlot(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
return HeapObject::RawField(
@@ -2139,12 +2401,13 @@ PropertyType DescriptorArray::GetType(int descriptor_number) {
int DescriptorArray::GetFieldIndex(int descriptor_number) {
- return Descriptor::IndexFromValue(GetValue(descriptor_number));
+ ASSERT(GetDetails(descriptor_number).type() == FIELD);
+ return GetDetails(descriptor_number).field_index();
}
-JSFunction* DescriptorArray::GetConstantFunction(int descriptor_number) {
- return JSFunction::cast(GetValue(descriptor_number));
+Object* DescriptorArray::GetConstant(int descriptor_number) {
+ return GetValue(descriptor_number);
}
@@ -2173,9 +2436,6 @@ void DescriptorArray::Set(int descriptor_number,
const WhitenessWitness&) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() <=
- number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() > 0);
NoIncrementalWriteBarrierSet(this,
ToKeyIndex(descriptor_number),
@@ -2192,9 +2452,6 @@ void DescriptorArray::Set(int descriptor_number,
void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() <=
- number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() > 0);
set(ToKeyIndex(descriptor_number), desc->GetKey());
set(ToValueIndex(descriptor_number), desc->GetValue());
@@ -2205,9 +2462,7 @@ void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
void DescriptorArray::Append(Descriptor* desc,
const WhitenessWitness& witness) {
int descriptor_number = number_of_descriptors();
- int enumeration_index = descriptor_number + 1;
SetNumberOfDescriptors(descriptor_number + 1);
- desc->SetEnumerationIndex(enumeration_index);
Set(descriptor_number, desc, witness);
uint32_t hash = desc->GetKey()->Hash();
@@ -2215,7 +2470,7 @@ void DescriptorArray::Append(Descriptor* desc,
int insertion;
for (insertion = descriptor_number; insertion > 0; --insertion) {
- String* key = GetSortedKey(insertion - 1);
+ Name* key = GetSortedKey(insertion - 1);
if (key->Hash() <= hash) break;
SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1));
}
@@ -2226,9 +2481,7 @@ void DescriptorArray::Append(Descriptor* desc,
void DescriptorArray::Append(Descriptor* desc) {
int descriptor_number = number_of_descriptors();
- int enumeration_index = descriptor_number + 1;
SetNumberOfDescriptors(descriptor_number + 1);
- desc->SetEnumerationIndex(enumeration_index);
Set(descriptor_number, desc);
uint32_t hash = desc->GetKey()->Hash();
@@ -2236,7 +2489,7 @@ void DescriptorArray::Append(Descriptor* desc) {
int insertion;
for (insertion = descriptor_number; insertion > 0; --insertion) {
- String* key = GetSortedKey(insertion - 1);
+ Name* key = GetSortedKey(insertion - 1);
if (key->Hash() <= hash) break;
SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1));
}
@@ -2290,7 +2543,8 @@ int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) {
// EnsureCapacity will guarantee the hash table is never full.
while (true) {
Object* element = KeyAt(entry);
- // Empty entry.
+ // Empty entry. Uses raw unchecked accessors because it is called by the
+ // string table during bootstrapping.
if (element == isolate->heap()->raw_unchecked_undefined_value()) break;
if (element != isolate->heap()->raw_unchecked_the_hole_value() &&
Shape::IsMatch(key, element)) return entry;
@@ -2326,11 +2580,13 @@ void SeededNumberDictionary::set_requires_slow_elements() {
CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(FixedDoubleArray)
+CAST_ACCESSOR(ConstantPoolArray)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
+CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(TypeFeedbackCells)
-CAST_ACCESSOR(SymbolTable)
+CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(JSFunctionResultCache)
CAST_ACCESSOR(NormalizedMapCache)
CAST_ACCESSOR(ScopeInfo)
@@ -2340,20 +2596,23 @@ CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
CAST_ACCESSOR(MapCache)
CAST_ACCESSOR(String)
CAST_ACCESSOR(SeqString)
-CAST_ACCESSOR(SeqAsciiString)
+CAST_ACCESSOR(SeqOneByteString)
CAST_ACCESSOR(SeqTwoByteString)
CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(ConsString)
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalAsciiString)
CAST_ACCESSOR(ExternalTwoByteString)
+CAST_ACCESSOR(Symbol)
+CAST_ACCESSOR(Name)
CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSObject)
CAST_ACCESSOR(Smi)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(HeapNumber)
CAST_ACCESSOR(Oddball)
-CAST_ACCESSOR(JSGlobalPropertyCell)
+CAST_ACCESSOR(Cell)
+CAST_ACCESSOR(PropertyCell)
CAST_ACCESSOR(SharedFunctionInfo)
CAST_ACCESSOR(Map)
CAST_ACCESSOR(JSFunction)
@@ -2363,12 +2622,17 @@ CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSBuiltinsObject)
CAST_ACCESSOR(Code)
CAST_ACCESSOR(JSArray)
+CAST_ACCESSOR(JSArrayBuffer)
+CAST_ACCESSOR(JSArrayBufferView)
+CAST_ACCESSOR(JSTypedArray)
+CAST_ACCESSOR(JSDataView)
CAST_ACCESSOR(JSRegExp)
CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSFunctionProxy)
CAST_ACCESSOR(JSSet)
CAST_ACCESSOR(JSMap)
CAST_ACCESSOR(JSWeakMap)
+CAST_ACCESSOR(JSWeakSet)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(FreeSpace)
@@ -2383,6 +2647,7 @@ CAST_ACCESSOR(ExternalFloatArray)
CAST_ACCESSOR(ExternalDoubleArray)
CAST_ACCESSOR(ExternalPixelArray)
CAST_ACCESSOR(Struct)
+CAST_ACCESSOR(AccessorInfo)
#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
@@ -2403,12 +2668,12 @@ SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
SMI_ACCESSORS(String, length, kLengthOffset)
-uint32_t String::hash_field() {
+uint32_t Name::hash_field() {
return READ_UINT32_FIELD(this, kHashFieldOffset);
}
-void String::set_hash_field(uint32_t value) {
+void Name::set_hash_field(uint32_t value) {
WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
#if V8_HOST_ARCH_64_BIT
WRITE_UINT32_FIELD(this, kHashFieldOffset + kIntSize, 0);
@@ -2416,9 +2681,22 @@ void String::set_hash_field(uint32_t value) {
}
+bool Name::Equals(Name* other) {
+ if (other == this) return true;
+ if ((this->IsInternalizedString() && other->IsInternalizedString()) ||
+ this->IsSymbol() || other->IsSymbol()) {
+ return false;
+ }
+ return String::cast(this)->SlowEquals(String::cast(other));
+}
+
+
+ACCESSORS(Symbol, name, Object, kNameOffset)
+
+
bool String::Equals(String* other) {
if (other == this) return true;
- if (StringShape(this).IsSymbol() && StringShape(other).IsSymbol()) {
+ if (this->IsInternalizedString() && other->IsInternalizedString()) {
return false;
}
return SlowEquals(other);
@@ -2444,18 +2722,18 @@ String* String::TryFlattenGetString(PretenureFlag pretenure) {
uint16_t String::Get(int index) {
ASSERT(index >= 0 && index < length());
switch (StringShape(this).full_representation_tag()) {
- case kSeqStringTag | kAsciiStringTag:
- return SeqAsciiString::cast(this)->SeqAsciiStringGet(index);
+ case kSeqStringTag | kOneByteStringTag:
+ return SeqOneByteString::cast(this)->SeqOneByteStringGet(index);
case kSeqStringTag | kTwoByteStringTag:
return SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index);
- case kConsStringTag | kAsciiStringTag:
+ case kConsStringTag | kOneByteStringTag:
case kConsStringTag | kTwoByteStringTag:
return ConsString::cast(this)->ConsStringGet(index);
- case kExternalStringTag | kAsciiStringTag:
+ case kExternalStringTag | kOneByteStringTag:
return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index);
case kExternalStringTag | kTwoByteStringTag:
return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
- case kSlicedStringTag | kAsciiStringTag:
+ case kSlicedStringTag | kOneByteStringTag:
case kSlicedStringTag | kTwoByteStringTag:
return SlicedString::cast(this)->SlicedStringGet(index);
default:
@@ -2471,8 +2749,8 @@ void String::Set(int index, uint16_t value) {
ASSERT(index >= 0 && index < length());
ASSERT(StringShape(this).IsSequential());
- return this->IsAsciiRepresentation()
- ? SeqAsciiString::cast(this)->SeqAsciiStringSet(index, value)
+ return this->IsOneByteRepresentation()
+ ? SeqOneByteString::cast(this)->SeqOneByteStringSet(index, value)
: SeqTwoByteString::cast(this)->SeqTwoByteStringSet(index, value);
}
@@ -2494,26 +2772,116 @@ String* String::GetUnderlying() {
}
-uint16_t SeqAsciiString::SeqAsciiStringGet(int index) {
+template<class Visitor, class ConsOp>
+void String::Visit(
+ String* string,
+ unsigned offset,
+ Visitor& visitor,
+ ConsOp& cons_op,
+ int32_t type,
+ unsigned length) {
+ ASSERT(length == static_cast<unsigned>(string->length()));
+ ASSERT(offset <= length);
+ unsigned slice_offset = offset;
+ while (true) {
+ ASSERT(type == string->map()->instance_type());
+
+ switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
+ case kSeqStringTag | kOneByteStringTag:
+ visitor.VisitOneByteString(
+ SeqOneByteString::cast(string)->GetChars() + slice_offset,
+ length - offset);
+ return;
+
+ case kSeqStringTag | kTwoByteStringTag:
+ visitor.VisitTwoByteString(
+ SeqTwoByteString::cast(string)->GetChars() + slice_offset,
+ length - offset);
+ return;
+
+ case kExternalStringTag | kOneByteStringTag:
+ visitor.VisitOneByteString(
+ ExternalAsciiString::cast(string)->GetChars() + slice_offset,
+ length - offset);
+ return;
+
+ case kExternalStringTag | kTwoByteStringTag:
+ visitor.VisitTwoByteString(
+ ExternalTwoByteString::cast(string)->GetChars() + slice_offset,
+ length - offset);
+ return;
+
+ case kSlicedStringTag | kOneByteStringTag:
+ case kSlicedStringTag | kTwoByteStringTag: {
+ SlicedString* slicedString = SlicedString::cast(string);
+ slice_offset += slicedString->offset();
+ string = slicedString->parent();
+ type = string->map()->instance_type();
+ continue;
+ }
+
+ case kConsStringTag | kOneByteStringTag:
+ case kConsStringTag | kTwoByteStringTag:
+ string = cons_op.Operate(string, &offset, &type, &length);
+ if (string == NULL) return;
+ slice_offset = offset;
+ ASSERT(length == static_cast<unsigned>(string->length()));
+ continue;
+
+ default:
+ UNREACHABLE();
+ return;
+ }
+ }
+}
+
+
+// TODO(dcarney): Remove this class after conversion to VisitFlat.
+class ConsStringCaptureOp {
+ public:
+ inline ConsStringCaptureOp() : cons_string_(NULL) {}
+ inline String* Operate(String* string, unsigned*, int32_t*, unsigned*) {
+ cons_string_ = ConsString::cast(string);
+ return NULL;
+ }
+ ConsString* cons_string_;
+};
+
+
+template<class Visitor>
+ConsString* String::VisitFlat(Visitor* visitor,
+ String* string,
+ int offset,
+ int length,
+ int32_t type) {
+ ASSERT(length >= 0 && length == string->length());
+ ASSERT(offset >= 0 && offset <= length);
+ ConsStringCaptureOp op;
+ Visit(string, offset, *visitor, op, type, static_cast<unsigned>(length));
+ return op.cons_string_;
+}
+
+
+uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
ASSERT(index >= 0 && index < length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
}
-void SeqAsciiString::SeqAsciiStringSet(int index, uint16_t value) {
- ASSERT(index >= 0 && index < length() && value <= kMaxAsciiCharCode);
+void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
+ ASSERT(index >= 0 && index < length() && value <= kMaxOneByteCharCode);
WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize,
static_cast<byte>(value));
}
-Address SeqAsciiString::GetCharsAddress() {
+Address SeqOneByteString::GetCharsAddress() {
return FIELD_ADDR(this, kHeaderSize);
}
-char* SeqAsciiString::GetChars() {
- return reinterpret_cast<char*>(GetCharsAddress());
+uint8_t* SeqOneByteString::GetChars() {
+ return reinterpret_cast<uint8_t*>(GetCharsAddress());
}
@@ -2544,7 +2912,7 @@ int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
}
-int SeqAsciiString::SeqAsciiStringSize(InstanceType instance_type) {
+int SeqOneByteString::SeqOneByteStringSize(InstanceType instance_type) {
return SizeFor(length());
}
@@ -2623,8 +2991,8 @@ void ExternalAsciiString::set_resource(
}
-const char* ExternalAsciiString::GetChars() {
- return resource()->data();
+const uint8_t* ExternalAsciiString::GetChars() {
+ return reinterpret_cast<const uint8_t*>(resource()->data());
}
@@ -2672,6 +3040,135 @@ const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
}
+String* ConsStringNullOp::Operate(String*, unsigned*, int32_t*, unsigned*) {
+ return NULL;
+}
+
+
+unsigned ConsStringIteratorOp::OffsetForDepth(unsigned depth) {
+ return depth & kDepthMask;
+}
+
+
+void ConsStringIteratorOp::PushLeft(ConsString* string) {
+ frames_[depth_++ & kDepthMask] = string;
+}
+
+
+void ConsStringIteratorOp::PushRight(ConsString* string) {
+ // Inplace update.
+ frames_[(depth_-1) & kDepthMask] = string;
+}
+
+
+void ConsStringIteratorOp::AdjustMaximumDepth() {
+ if (depth_ > maximum_depth_) maximum_depth_ = depth_;
+}
+
+
+void ConsStringIteratorOp::Pop() {
+ ASSERT(depth_ > 0);
+ ASSERT(depth_ <= maximum_depth_);
+ depth_--;
+}
+
+
+bool ConsStringIteratorOp::HasMore() {
+ return depth_ != 0;
+}
+
+
+void ConsStringIteratorOp::Reset() {
+ depth_ = 0;
+}
+
+
+String* ConsStringIteratorOp::ContinueOperation(int32_t* type_out,
+ unsigned* length_out) {
+ bool blew_stack = false;
+ String* string = NextLeaf(&blew_stack, type_out, length_out);
+ // String found.
+ if (string != NULL) {
+ // Verify output.
+ ASSERT(*length_out == static_cast<unsigned>(string->length()));
+ ASSERT(*type_out == string->map()->instance_type());
+ return string;
+ }
+ // Traversal complete.
+ if (!blew_stack) return NULL;
+ // Restart search from root.
+ unsigned offset_out;
+ string = Search(&offset_out, type_out, length_out);
+ // Verify output.
+ ASSERT(string == NULL || offset_out == 0);
+ ASSERT(string == NULL ||
+ *length_out == static_cast<unsigned>(string->length()));
+ ASSERT(string == NULL || *type_out == string->map()->instance_type());
+ return string;
+}
+
+
+uint16_t StringCharacterStream::GetNext() {
+ ASSERT(buffer8_ != NULL && end_ != NULL);
+ // Advance cursor if needed.
+ // TODO(dcarney): Ensure uses of the api call HasMore first and avoid this.
+ if (buffer8_ == end_) HasMore();
+ ASSERT(buffer8_ < end_);
+ return is_one_byte_ ? *buffer8_++ : *buffer16_++;
+}
+
+
+StringCharacterStream::StringCharacterStream(String* string,
+ ConsStringIteratorOp* op,
+ unsigned offset)
+ : is_one_byte_(false),
+ op_(op) {
+ Reset(string, offset);
+}
+
+
+void StringCharacterStream::Reset(String* string, unsigned offset) {
+ op_->Reset();
+ buffer8_ = NULL;
+ end_ = NULL;
+ int32_t type = string->map()->instance_type();
+ unsigned length = string->length();
+ String::Visit(string, offset, *this, *op_, type, length);
+}
+
+
+bool StringCharacterStream::HasMore() {
+ if (buffer8_ != end_) return true;
+ if (!op_->HasMore()) return false;
+ unsigned length;
+ int32_t type;
+ String* string = op_->ContinueOperation(&type, &length);
+ if (string == NULL) return false;
+ ASSERT(!string->IsConsString());
+ ASSERT(string->length() != 0);
+ ConsStringNullOp null_op;
+ String::Visit(string, 0, *this, null_op, type, length);
+ ASSERT(buffer8_ != end_);
+ return true;
+}
+
+
+void StringCharacterStream::VisitOneByteString(
+ const uint8_t* chars, unsigned length) {
+ is_one_byte_ = true;
+ buffer8_ = chars;
+ end_ = chars + length;
+}
+
+
+void StringCharacterStream::VisitTwoByteString(
+ const uint16_t* chars, unsigned length) {
+ is_one_byte_ = false;
+ buffer16_ = chars;
+ end_ = reinterpret_cast<const uint8_t*>(chars + length);
+}
+
+
void JSFunctionResultCache::MakeZeroSize() {
set_finger_index(kEntriesIndex);
set_size(kEntriesIndex);
@@ -2954,16 +3451,15 @@ int Map::pre_allocated_property_fields() {
int HeapObject::SizeFromMap(Map* map) {
int instance_size = map->instance_size();
if (instance_size != kVariableSizeSentinel) return instance_size;
- // We can ignore the "symbol" bit becase it is only set for symbols
- // and implies a string type.
- int instance_type = static_cast<int>(map->instance_type()) & ~kIsSymbolMask;
// Only inline the most frequent cases.
+ int instance_type = static_cast<int>(map->instance_type());
if (instance_type == FIXED_ARRAY_TYPE) {
return FixedArray::BodyDescriptor::SizeOf(map, this);
}
- if (instance_type == ASCII_STRING_TYPE) {
- return SeqAsciiString::SizeFor(
- reinterpret_cast<SeqAsciiString*>(this)->length());
+ if (instance_type == ASCII_STRING_TYPE ||
+ instance_type == ASCII_INTERNALIZED_STRING_TYPE) {
+ return SeqOneByteString::SizeFor(
+ reinterpret_cast<SeqOneByteString*>(this)->length());
}
if (instance_type == BYTE_ARRAY_TYPE) {
return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
@@ -2971,7 +3467,8 @@ int HeapObject::SizeFromMap(Map* map) {
if (instance_type == FREE_SPACE_TYPE) {
return reinterpret_cast<FreeSpace*>(this)->size();
}
- if (instance_type == STRING_TYPE) {
+ if (instance_type == STRING_TYPE ||
+ instance_type == INTERNALIZED_STRING_TYPE) {
return SeqTwoByteString::SizeFor(
reinterpret_cast<SeqTwoByteString*>(this)->length());
}
@@ -2979,6 +3476,12 @@ int HeapObject::SizeFromMap(Map* map) {
return FixedDoubleArray::SizeFor(
reinterpret_cast<FixedDoubleArray*>(this)->length());
}
+ if (instance_type == CONSTANT_POOL_ARRAY_TYPE) {
+ return ConstantPoolArray::SizeFor(
+ reinterpret_cast<ConstantPoolArray*>(this)->count_of_int64_entries(),
+ reinterpret_cast<ConstantPoolArray*>(this)->count_of_ptr_entries(),
+ reinterpret_cast<ConstantPoolArray*>(this)->count_of_int32_entries());
+ }
ASSERT(instance_type == CODE_TYPE);
return reinterpret_cast<Code*>(this)->CodeSize();
}
@@ -3121,6 +3624,7 @@ bool Map::is_shared() {
void Map::set_dictionary_map(bool value) {
+ if (value) mark_unstable();
set_bit_field3(DictionaryMap::update(bit_field3(), value));
}
@@ -3130,11 +3634,6 @@ bool Map::is_dictionary_map() {
}
-JSFunction* Map::unchecked_constructor() {
- return reinterpret_cast<JSFunction*>(READ_FIELD(this, kConstructorOffset));
-}
-
-
Code::Flags Code::flags() {
return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
}
@@ -3150,6 +3649,168 @@ bool Map::owns_descriptors() {
}
+void Map::set_is_observed(bool is_observed) {
+ ASSERT(instance_type() < FIRST_JS_OBJECT_TYPE ||
+ instance_type() > LAST_JS_OBJECT_TYPE ||
+ has_slow_elements_kind() || has_external_array_elements());
+ set_bit_field3(IsObserved::update(bit_field3(), is_observed));
+}
+
+
+bool Map::is_observed() {
+ return IsObserved::decode(bit_field3());
+}
+
+
+void Map::deprecate() {
+ set_bit_field3(Deprecated::update(bit_field3(), true));
+}
+
+
+bool Map::is_deprecated() {
+ if (!FLAG_track_fields) return false;
+ return Deprecated::decode(bit_field3());
+}
+
+
+void Map::set_migration_target(bool value) {
+ set_bit_field3(IsMigrationTarget::update(bit_field3(), value));
+}
+
+
+bool Map::is_migration_target() {
+ if (!FLAG_track_fields) return false;
+ return IsMigrationTarget::decode(bit_field3());
+}
+
+
+void Map::freeze() {
+ set_bit_field3(IsFrozen::update(bit_field3(), true));
+}
+
+
+bool Map::is_frozen() {
+ return IsFrozen::decode(bit_field3());
+}
+
+
+void Map::mark_unstable() {
+ set_bit_field3(IsUnstable::update(bit_field3(), true));
+}
+
+
+bool Map::is_stable() {
+ return !IsUnstable::decode(bit_field3());
+}
+
+
+bool Map::has_code_cache() {
+ return code_cache() != GetIsolate()->heap()->empty_fixed_array();
+}
+
+
+bool Map::CanBeDeprecated() {
+ int descriptor = LastAdded();
+ for (int i = 0; i <= descriptor; i++) {
+ PropertyDetails details = instance_descriptors()->GetDetails(i);
+ if (FLAG_track_fields && details.representation().IsNone()) {
+ return true;
+ }
+ if (FLAG_track_fields && details.representation().IsSmi()) {
+ return true;
+ }
+ if (FLAG_track_double_fields && details.representation().IsDouble()) {
+ return true;
+ }
+ if (FLAG_track_heap_object_fields &&
+ details.representation().IsHeapObject()) {
+ return true;
+ }
+ if (FLAG_track_fields && details.type() == CONSTANT) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void Map::NotifyLeafMapLayoutChange() {
+ if (is_stable()) {
+ mark_unstable();
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ GetIsolate(),
+ DependentCode::kPrototypeCheckGroup);
+ }
+}
+
+
+bool Map::CanOmitMapChecks() {
+ return is_stable() && FLAG_omit_map_checks_for_leaf_maps;
+}
+
+
+int DependentCode::number_of_entries(DependencyGroup group) {
+ if (length() == 0) return 0;
+ return Smi::cast(get(group))->value();
+}
+
+
+void DependentCode::set_number_of_entries(DependencyGroup group, int value) {
+ set(group, Smi::FromInt(value));
+}
+
+
+bool DependentCode::is_code_at(int i) {
+ return get(kCodesStartIndex + i)->IsCode();
+}
+
+Code* DependentCode::code_at(int i) {
+ return Code::cast(get(kCodesStartIndex + i));
+}
+
+
+CompilationInfo* DependentCode::compilation_info_at(int i) {
+ return reinterpret_cast<CompilationInfo*>(
+ Foreign::cast(get(kCodesStartIndex + i))->foreign_address());
+}
+
+
+void DependentCode::set_object_at(int i, Object* object) {
+ set(kCodesStartIndex + i, object);
+}
+
+
+Object* DependentCode::object_at(int i) {
+ return get(kCodesStartIndex + i);
+}
+
+
+Object** DependentCode::slot_at(int i) {
+ return HeapObject::RawField(
+ this, FixedArray::OffsetOfElementAt(kCodesStartIndex + i));
+}
+
+
+void DependentCode::clear_at(int i) {
+ set_undefined(kCodesStartIndex + i);
+}
+
+
+void DependentCode::copy(int from, int to) {
+ set(kCodesStartIndex + to, get(kCodesStartIndex + from));
+}
+
+
+void DependentCode::ExtendGroup(DependencyGroup group) {
+ GroupStartIndexes starts(this);
+ for (int g = kGroupCount - 1; g > group; g--) {
+ if (starts.at(g) < starts.at(g + 1)) {
+ copy(starts.at(g), starts.at(g + 1));
+ }
+ }
+}
+
+
void Code::set_flags(Code::Flags flags) {
STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
// Make sure that all call stubs have an arguments count.
@@ -3172,34 +3833,59 @@ InlineCacheState Code::ic_state() {
// a call to code object has been replaced with a debug break call.
ASSERT(is_inline_cache_stub() ||
result == UNINITIALIZED ||
- result == DEBUG_BREAK ||
- result == DEBUG_PREPARE_STEP_IN);
+ result == DEBUG_STUB);
return result;
}
Code::ExtraICState Code::extra_ic_state() {
- ASSERT(is_inline_cache_stub());
+ ASSERT((is_inline_cache_stub() && !needs_extended_extra_ic_state(kind()))
+ || ic_state() == DEBUG_STUB);
return ExtractExtraICStateFromFlags(flags());
}
+Code::ExtraICState Code::extended_extra_ic_state() {
+ ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
+ ASSERT(needs_extended_extra_ic_state(kind()));
+ return ExtractExtendedExtraICStateFromFlags(flags());
+}
+
+
Code::StubType Code::type() {
return ExtractTypeFromFlags(flags());
}
int Code::arguments_count() {
- ASSERT(is_call_stub() || is_keyed_call_stub() || kind() == STUB);
+ ASSERT(is_call_stub() || is_keyed_call_stub() ||
+ kind() == STUB || is_handler());
return ExtractArgumentsCountFromFlags(flags());
}
+inline bool Code::is_crankshafted() {
+ return IsCrankshaftedField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
+}
+
+
+inline void Code::set_is_crankshafted(bool value) {
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+ int updated = IsCrankshaftedField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
+}
+
+
int Code::major_key() {
ASSERT(kind() == STUB ||
- kind() == UNARY_OP_IC ||
+ kind() == HANDLER ||
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
+ kind() == COMPARE_NIL_IC ||
+ kind() == STORE_IC ||
+ kind() == LOAD_IC ||
+ kind() == KEYED_LOAD_IC ||
kind() == TO_BOOLEAN_IC);
return StubMajorKeyField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
@@ -3208,9 +3894,14 @@ int Code::major_key() {
void Code::set_major_key(int major) {
ASSERT(kind() == STUB ||
- kind() == UNARY_OP_IC ||
+ kind() == HANDLER ||
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
+ kind() == COMPARE_NIL_IC ||
+ kind() == LOAD_IC ||
+ kind() == KEYED_LOAD_IC ||
+ kind() == STORE_IC ||
+ kind() == KEYED_STORE_IC ||
kind() == TO_BOOLEAN_IC);
ASSERT(0 <= major && major < 256);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
@@ -3220,7 +3911,7 @@ void Code::set_major_key(int major) {
bool Code::is_pregenerated() {
- return kind() == STUB && IsPregeneratedField::decode(flags());
+ return (kind() == STUB && IsPregeneratedField::decode(flags()));
}
@@ -3316,7 +4007,7 @@ void Code::set_profiler_ticks(int ticks) {
unsigned Code::stack_slots() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(is_crankshafted());
return StackSlotsField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
@@ -3324,7 +4015,7 @@ unsigned Code::stack_slots() {
void Code::set_stack_slots(unsigned slots) {
CHECK(slots <= (1 << kStackSlotsBitCount));
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(is_crankshafted());
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
int updated = StackSlotsField::update(previous, slots);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
@@ -3332,7 +4023,7 @@ void Code::set_stack_slots(unsigned slots) {
unsigned Code::safepoint_table_offset() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(is_crankshafted());
return SafepointTableOffsetField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
@@ -3340,7 +4031,7 @@ unsigned Code::safepoint_table_offset() {
void Code::set_safepoint_table_offset(unsigned offset) {
CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(is_crankshafted());
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = SafepointTableOffsetField::update(previous, offset);
@@ -3348,143 +4039,104 @@ void Code::set_safepoint_table_offset(unsigned offset) {
}
-unsigned Code::stack_check_table_offset() {
+unsigned Code::back_edge_table_offset() {
ASSERT_EQ(FUNCTION, kind());
- return StackCheckTableOffsetField::decode(
+ return BackEdgeTableOffsetField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
-void Code::set_stack_check_table_offset(unsigned offset) {
+void Code::set_back_edge_table_offset(unsigned offset) {
ASSERT_EQ(FUNCTION, kind());
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- int updated = StackCheckTableOffsetField::update(previous, offset);
+ int updated = BackEdgeTableOffsetField::update(previous, offset);
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
-CheckType Code::check_type() {
- ASSERT(is_call_stub() || is_keyed_call_stub());
- byte type = READ_BYTE_FIELD(this, kCheckTypeOffset);
- return static_cast<CheckType>(type);
-}
-
-
-void Code::set_check_type(CheckType value) {
- ASSERT(is_call_stub() || is_keyed_call_stub());
- WRITE_BYTE_FIELD(this, kCheckTypeOffset, value);
-}
-
-
-byte Code::unary_op_type() {
- ASSERT(is_unary_op_stub());
- return UnaryOpTypeField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_unary_op_type(byte value) {
- ASSERT(is_unary_op_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = UnaryOpTypeField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-byte Code::binary_op_type() {
- ASSERT(is_binary_op_stub());
- return BinaryOpTypeField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+bool Code::back_edges_patched_for_osr() {
+ ASSERT_EQ(FUNCTION, kind());
+ return BackEdgesPatchedForOSRField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
-void Code::set_binary_op_type(byte value) {
- ASSERT(is_binary_op_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = BinaryOpTypeField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+void Code::set_back_edges_patched_for_osr(bool value) {
+ ASSERT_EQ(FUNCTION, kind());
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+ int updated = BackEdgesPatchedForOSRField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
-byte Code::binary_op_result_type() {
- ASSERT(is_binary_op_stub());
- return BinaryOpResultTypeField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-void Code::set_binary_op_result_type(byte value) {
- ASSERT(is_binary_op_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = BinaryOpResultTypeField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+CheckType Code::check_type() {
+ ASSERT(is_call_stub() || is_keyed_call_stub());
+ byte type = READ_BYTE_FIELD(this, kCheckTypeOffset);
+ return static_cast<CheckType>(type);
}
-byte Code::compare_state() {
- ASSERT(is_compare_ic_stub());
- return CompareStateField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+void Code::set_check_type(CheckType value) {
+ ASSERT(is_call_stub() || is_keyed_call_stub());
+ WRITE_BYTE_FIELD(this, kCheckTypeOffset, value);
}
-void Code::set_compare_state(byte value) {
- ASSERT(is_compare_ic_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = CompareStateField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+byte Code::to_boolean_state() {
+ return extended_extra_ic_state();
}
-byte Code::compare_operation() {
- ASSERT(is_compare_ic_stub());
- return CompareOperationField::decode(
+bool Code::has_function_cache() {
+ ASSERT(kind() == STUB);
+ return HasFunctionCacheField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
-void Code::set_compare_operation(byte value) {
- ASSERT(is_compare_ic_stub());
+void Code::set_has_function_cache(bool flag) {
+ ASSERT(kind() == STUB);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = CompareOperationField::update(previous, value);
+ int updated = HasFunctionCacheField::update(previous, flag);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
-byte Code::to_boolean_state() {
- ASSERT(is_to_boolean_ic_stub());
- return ToBooleanStateField::decode(
+bool Code::marked_for_deoptimization() {
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
+ return MarkedForDeoptimizationField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
-void Code::set_to_boolean_state(byte value) {
- ASSERT(is_to_boolean_ic_stub());
+void Code::set_marked_for_deoptimization(bool flag) {
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = ToBooleanStateField::update(previous, value);
+ int updated = MarkedForDeoptimizationField::update(previous, flag);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
-bool Code::has_function_cache() {
- ASSERT(kind() == STUB);
- return HasFunctionCacheField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+bool Code::is_inline_cache_stub() {
+ Kind kind = this->kind();
+ switch (kind) {
+#define CASE(name) case name: return true;
+ IC_KIND_LIST(CASE)
+#undef CASE
+ default: return false;
+ }
}
-void Code::set_has_function_cache(bool flag) {
- ASSERT(kind() == STUB);
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = HasFunctionCacheField::update(previous, flag);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+bool Code::is_keyed_stub() {
+ return is_keyed_load_stub() || is_keyed_store_stub() || is_keyed_call_stub();
}
-bool Code::is_inline_cache_stub() {
- Kind kind = this->kind();
- return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
+bool Code::is_debug_stub() {
+ return ic_state() == DEBUG_STUB;
}
@@ -3494,29 +4146,30 @@ Code::Flags Code::ComputeFlags(Kind kind,
StubType type,
int argc,
InlineCacheHolderFlag holder) {
- // Extra IC state is only allowed for call IC stubs or for store IC
- // stubs.
- ASSERT(extra_ic_state == kNoExtraICState ||
- kind == CALL_IC ||
- kind == STORE_IC ||
- kind == KEYED_STORE_IC);
ASSERT(argc <= Code::kMaxArguments);
+ // Since the extended extra ic state overlaps with the argument count
+ // for CALL_ICs, do so checks to make sure that they don't interfere.
+ ASSERT((kind != Code::CALL_IC &&
+ kind != Code::KEYED_CALL_IC) ||
+ (ExtraICStateField::encode(extra_ic_state) | true));
// Compute the bit mask.
unsigned int bits = KindField::encode(kind)
| ICStateField::encode(ic_state)
| TypeField::encode(type)
- | ExtraICStateField::encode(extra_ic_state)
- | (argc << kArgumentsCountShift)
+ | ExtendedExtraICStateField::encode(extra_ic_state)
| CacheHolderField::encode(holder);
+ if (!Code::needs_extended_extra_ic_state(kind)) {
+ bits |= (argc << kArgumentsCountShift);
+ }
return static_cast<Flags>(bits);
}
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
- StubType type,
ExtraICState extra_ic_state,
- InlineCacheHolderFlag holder,
- int argc) {
+ StubType type,
+ int argc,
+ InlineCacheHolderFlag holder) {
return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, argc, holder);
}
@@ -3536,6 +4189,12 @@ Code::ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
}
+Code::ExtraICState Code::ExtractExtendedExtraICStateFromFlags(
+ Flags flags) {
+ return ExtendedExtraICStateField::decode(flags);
+}
+
+
Code::StubType Code::ExtractTypeFromFlags(Flags flags) {
return TypeField::decode(flags);
}
@@ -3592,7 +4251,7 @@ static MaybeObject* EnsureHasTransitionArray(Map* map) {
TransitionArray* transitions;
MaybeObject* maybe_transitions;
if (!map->HasTransitionArray()) {
- maybe_transitions = TransitionArray::Allocate(0);
+ maybe_transitions = TransitionArray::Allocate(map->GetIsolate(), 0);
if (!maybe_transitions->To(&transitions)) return maybe_transitions;
transitions->set_back_pointer_storage(map->GetBackPointer());
} else if (!map->transitions()->IsFullTransitionArray()) {
@@ -3608,30 +4267,26 @@ static MaybeObject* EnsureHasTransitionArray(Map* map) {
void Map::InitializeDescriptors(DescriptorArray* descriptors) {
int len = descriptors->number_of_descriptors();
-#ifdef DEBUG
- ASSERT(len <= DescriptorArray::kMaxNumberOfDescriptors);
-
- bool used_indices[DescriptorArray::kMaxNumberOfDescriptors];
- for (int i = 0; i < len; ++i) used_indices[i] = false;
-
- // Ensure that all enumeration indexes between 1 and length occur uniquely in
- // the descriptor array.
- for (int i = 0; i < len; ++i) {
- int enum_index = descriptors->GetDetails(i).descriptor_index() -
- PropertyDetails::kInitialIndex;
- ASSERT(0 <= enum_index && enum_index < len);
- ASSERT(!used_indices[enum_index]);
- used_indices[enum_index] = true;
- }
-#endif
-
set_instance_descriptors(descriptors);
SetNumberOfOwnDescriptors(len);
}
ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
-SMI_ACCESSORS(Map, bit_field3, kBitField3Offset)
+
+
+void Map::set_bit_field3(uint32_t bits) {
+ // Ensure the upper 2 bits have the same value by sign extending it. This is
+ // necessary to be able to use the 31st bit.
+ int value = bits << 1;
+ WRITE_FIELD(this, kBitField3Offset, Smi::FromInt(value >> 1));
+}
+
+
+uint32_t Map::bit_field3() {
+ Object* value = READ_FIELD(this, kBitField3Offset);
+ return Smi::cast(value)->value();
+}
void Map::ClearTransitions(Heap* heap, WriteBarrierMode mode) {
@@ -3680,7 +4335,8 @@ bool Map::HasTransitionArray() {
Map* Map::elements_transition_map() {
- return transitions()->elements_transition();
+ int index = transitions()->Search(GetHeap()->elements_transition_symbol());
+ return transitions()->GetTarget(index);
}
@@ -3692,7 +4348,7 @@ bool Map::CanHaveMoreTransitions() {
}
-MaybeObject* Map::AddTransition(String* key,
+MaybeObject* Map::AddTransition(Name* key,
Map* target,
SimpleTransitionFlag flag) {
if (HasTransitionArray()) return transitions()->CopyInsert(key, target);
@@ -3711,10 +4367,14 @@ Map* Map::GetTransition(int transition_index) {
MaybeObject* Map::set_elements_transition_map(Map* transitioned_map) {
- MaybeObject* allow_elements = EnsureHasTransitionArray(this);
- if (allow_elements->IsFailure()) return allow_elements;
- transitions()->set_elements_transition(transitioned_map);
- return this;
+ TransitionArray* transitions;
+ MaybeObject* maybe_transitions = AddTransition(
+ GetHeap()->elements_transition_symbol(),
+ transitioned_map,
+ FULL_TRANSITION);
+ if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+ set_transitions(transitions);
+ return transitions;
}
@@ -3730,6 +4390,7 @@ FixedArray* Map::GetPrototypeTransitions() {
MaybeObject* Map::SetPrototypeTransitions(FixedArray* proto_transitions) {
MaybeObject* allow_prototype = EnsureHasTransitionArray(this);
if (allow_prototype->IsFailure()) return allow_prototype;
+ int old_number_of_transitions = NumberOfProtoTransitions();
#ifdef DEBUG
if (HasPrototypeTransitions()) {
ASSERT(GetPrototypeTransitions() != proto_transitions);
@@ -3737,6 +4398,7 @@ MaybeObject* Map::SetPrototypeTransitions(FixedArray* proto_transitions) {
}
#endif
transitions()->SetPrototypeTransitions(proto_transitions);
+ SetNumberOfProtoTransitions(old_number_of_transitions);
return this;
}
@@ -3755,9 +4417,12 @@ TransitionArray* Map::transitions() {
void Map::set_transitions(TransitionArray* transition_array,
WriteBarrierMode mode) {
- // In release mode, only run this code if verify_heap is on.
- if (Heap::ShouldZapGarbage() && HasTransitionArray()) {
- CHECK(transitions() != transition_array);
+ // Transition arrays are not shared. When one is replaced, it should not
+ // keep referenced objects alive, so we zap it.
+ // When there is another reference to the array somewhere (e.g. a handle),
+ // not zapping turns from a waste of memory into a source of crashes.
+ if (HasTransitionArray()) {
+ ASSERT(transitions() != transition_array);
ZapTransitions();
}
@@ -3806,6 +4471,7 @@ HeapObject* Map::UncheckedPrototypeTransitions() {
ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
+ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(Map, constructor, Object, kConstructorOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
@@ -3819,16 +4485,26 @@ ACCESSORS(GlobalObject, global_receiver, JSObject, kGlobalReceiverOffset)
ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
-ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
-ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
-ACCESSORS(AccessorInfo, data, Object, kDataOffset)
ACCESSORS(AccessorInfo, name, Object, kNameOffset)
ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset)
ACCESSORS(AccessorInfo, expected_receiver_type, Object,
kExpectedReceiverTypeOffset)
+ACCESSORS(DeclaredAccessorDescriptor, serialized_data, ByteArray,
+ kSerializedDataOffset)
+
+ACCESSORS(DeclaredAccessorInfo, descriptor, DeclaredAccessorDescriptor,
+ kDescriptorOffset)
+
+ACCESSORS(ExecutableAccessorInfo, getter, Object, kGetterOffset)
+ACCESSORS(ExecutableAccessorInfo, setter, Object, kSetterOffset)
+ACCESSORS(ExecutableAccessorInfo, data, Object, kDataOffset)
+
+ACCESSORS(Box, value, Object, kValueOffset)
+
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
+ACCESSORS_TO_SMI(AccessorPair, access_flags, kAccessFlagsOffset)
ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
@@ -3846,11 +4522,10 @@ ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
ACCESSORS(TemplateInfo, property_list, Object, kPropertyListOffset)
+ACCESSORS(TemplateInfo, property_accessors, Object, kPropertyAccessorsOffset)
ACCESSORS(FunctionTemplateInfo, serial_number, Object, kSerialNumberOffset)
ACCESSORS(FunctionTemplateInfo, call_code, Object, kCallCodeOffset)
-ACCESSORS(FunctionTemplateInfo, property_accessors, Object,
- kPropertyAccessorsOffset)
ACCESSORS(FunctionTemplateInfo, prototype_template, Object,
kPrototypeTemplateOffset)
ACCESSORS(FunctionTemplateInfo, parent_template, Object, kParentTemplateOffset)
@@ -3877,21 +4552,46 @@ ACCESSORS(SignatureInfo, args, Object, kArgsOffset)
ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
+ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset)
+ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
+ACCESSORS(AllocationSite, dependent_code, DependentCode,
+ kDependentCodeOffset)
+ACCESSORS(AllocationSite, weak_next, Object, kWeakNextOffset)
+ACCESSORS(AllocationMemento, allocation_site, Object, kAllocationSiteOffset)
+
ACCESSORS(Script, source, Object, kSourceOffset)
ACCESSORS(Script, name, Object, kNameOffset)
-ACCESSORS(Script, id, Object, kIdOffset)
+ACCESSORS(Script, id, Smi, kIdOffset)
ACCESSORS_TO_SMI(Script, line_offset, kLineOffsetOffset)
ACCESSORS_TO_SMI(Script, column_offset, kColumnOffsetOffset)
ACCESSORS(Script, data, Object, kDataOffset)
ACCESSORS(Script, context_data, Object, kContextOffset)
ACCESSORS(Script, wrapper, Foreign, kWrapperOffset)
ACCESSORS_TO_SMI(Script, type, kTypeOffset)
-ACCESSORS_TO_SMI(Script, compilation_type, kCompilationTypeOffset)
-ACCESSORS_TO_SMI(Script, compilation_state, kCompilationStateOffset)
ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
ACCESSORS_TO_SMI(Script, eval_from_instructions_offset,
kEvalFrominstructionsOffsetOffset)
+ACCESSORS_TO_SMI(Script, flags, kFlagsOffset)
+BOOL_ACCESSORS(Script, flags, is_shared_cross_origin, kIsSharedCrossOriginBit)
+
+Script::CompilationType Script::compilation_type() {
+ return BooleanBit::get(flags(), kCompilationTypeBit) ?
+ COMPILATION_TYPE_EVAL : COMPILATION_TYPE_HOST;
+}
+void Script::set_compilation_type(CompilationType type) {
+ set_flags(BooleanBit::set(flags(), kCompilationTypeBit,
+ type == COMPILATION_TYPE_EVAL));
+}
+Script::CompilationState Script::compilation_state() {
+ return BooleanBit::get(flags(), kCompilationStateBit) ?
+ COMPILATION_STATE_COMPILED : COMPILATION_STATE_INITIAL;
+}
+void Script::set_compilation_state(CompilationState state) {
+ set_flags(BooleanBit::set(flags(), kCompilationStateBit,
+ state == COMPILATION_STATE_COMPILED));
+}
+
#ifdef ENABLE_DEBUGGER_SUPPORT
ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
@@ -3916,11 +4616,10 @@ ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
-ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
- kThisPropertyAssignmentsOffset)
SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
+SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
kHiddenPrototypeBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
@@ -3928,14 +4627,15 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, needs_access_check,
kNeedsAccessCheckBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, read_only_prototype,
kReadOnlyPrototypeBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, remove_prototype,
+ kRemovePrototypeBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache,
+ kDoNotCacheBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
kIsExpressionBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
kIsTopLevelBit)
-BOOL_GETTER(SharedFunctionInfo,
- compiler_hints,
- has_only_simple_this_property_assignments,
- kHasOnlySimpleThisPropertyAssignments)
+
BOOL_ACCESSORS(SharedFunctionInfo,
compiler_hints,
allows_lazy_compilation,
@@ -3968,13 +4668,10 @@ SMI_ACCESSORS(SharedFunctionInfo, function_token_position,
kFunctionTokenPositionOffset)
SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
kCompilerHintsOffset)
-SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
- kThisPropertyAssignmentsCountOffset)
-SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
+SMI_ACCESSORS(SharedFunctionInfo, opt_count_and_bailout_reason,
+ kOptCountAndBailoutReasonOffset)
SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset)
-SMI_ACCESSORS(SharedFunctionInfo,
- stress_deopt_counter,
- kStressDeoptCounterOffset)
+
#else
#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
@@ -4022,14 +4719,11 @@ PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
kCompilerHintsOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
- this_property_assignments_count,
- kThisPropertyAssignmentsCountOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, opt_count, kOptCountOffset)
+ opt_count_and_bailout_reason,
+ kOptCountAndBailoutReasonOffset)
+
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, counters, kCountersOffset)
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, counters, kCountersOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
- stress_deopt_counter,
- kStressDeoptCounterOffset)
#endif
@@ -4123,15 +4817,11 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_optimize,
kDontOptimize)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
void SharedFunctionInfo::BeforeVisitingPointers() {
if (IsInobjectSlackTrackingInProgress()) DetachInitialMap();
-
- // Flush optimized code map on major GC.
- // Note: we may experiment with rebuilding it or retaining entries
- // which should survive as we iterate through optimized functions
- // anyway.
- set_optimized_code_map(Smi::FromInt(0));
}
@@ -4145,7 +4835,7 @@ bool Script::HasValidSource() {
if (!src->IsString()) return true;
String* src_str = String::cast(src);
if (!StringShape(src_str).IsExternal()) return true;
- if (src_str->IsAsciiRepresentation()) {
+ if (src_str->IsOneByteRepresentation()) {
return ExternalAsciiString::cast(src)->resource() != NULL;
} else if (src_str->IsTwoByteRepresentation()) {
return ExternalTwoByteString::cast(src)->resource() != NULL;
@@ -4176,17 +4866,25 @@ Code* SharedFunctionInfo::code() {
}
-Code* SharedFunctionInfo::unchecked_code() {
- return reinterpret_cast<Code*>(READ_FIELD(this, kCodeOffset));
-}
-
-
void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kCodeOffset, value);
CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
}
+void SharedFunctionInfo::ReplaceCode(Code* value) {
+ // If the GC metadata field is already used then the function was
+ // enqueued as a code flushing candidate and we remove it now.
+ if (code()->gc_metadata() != NULL) {
+ CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
+ flusher->EvictCandidate(this);
+ }
+
+ ASSERT(code()->gc_metadata() == NULL && value->gc_metadata() == NULL);
+ set_code(value);
+}
+
+
ScopeInfo* SharedFunctionInfo::scope_info() {
return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
}
@@ -4205,7 +4903,7 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
bool SharedFunctionInfo::is_compiled() {
return code() !=
- Isolate::Current()->builtins()->builtin(Builtins::kLazyCompile);
+ GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
}
@@ -4231,17 +4929,6 @@ BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
}
-int SharedFunctionInfo::code_age() {
- return (compiler_hints() >> kCodeAgeShift) & kCodeAgeMask;
-}
-
-
-void SharedFunctionInfo::set_code_age(int code_age) {
- int hints = compiler_hints() & ~(kCodeAgeMask << kCodeAgeShift);
- set_compiler_hints(hints | ((code_age & kCodeAgeMask) << kCodeAgeShift));
-}
-
-
int SharedFunctionInfo::ic_age() {
return ICAgeBits::decode(counters());
}
@@ -4280,6 +4967,24 @@ void SharedFunctionInfo::set_opt_reenable_tries(int tries) {
}
+int SharedFunctionInfo::opt_count() {
+ return OptCountBits::decode(opt_count_and_bailout_reason());
+}
+
+
+void SharedFunctionInfo::set_opt_count(int opt_count) {
+ set_opt_count_and_bailout_reason(
+ OptCountBits::update(opt_count_and_bailout_reason(), opt_count));
+}
+
+
+BailoutReason SharedFunctionInfo::DisableOptimizationReason() {
+ BailoutReason reason = static_cast<BailoutReason>(
+ DisabledOptimizationReasonBits::decode(opt_count_and_bailout_reason()));
+ return reason;
+}
+
+
bool SharedFunctionInfo::has_deoptimization_support() {
Code* code = this->code();
return code->kind() == Code::FUNCTION && code->has_deoptimization_support();
@@ -4326,9 +5031,9 @@ bool JSFunction::IsMarkedForLazyRecompilation() {
}
-bool JSFunction::IsMarkedForParallelRecompilation() {
- return code() ==
- GetIsolate()->builtins()->builtin(Builtins::kParallelRecompile);
+bool JSFunction::IsMarkedForConcurrentRecompilation() {
+ return code() == GetIsolate()->builtins()->builtin(
+ Builtins::kConcurrentRecompile);
}
@@ -4339,18 +5044,13 @@ bool JSFunction::IsInRecompileQueue() {
Code* JSFunction::code() {
- return Code::cast(unchecked_code());
-}
-
-
-Code* JSFunction::unchecked_code() {
- return reinterpret_cast<Code*>(
+ return Code::cast(
Code::GetObjectFromEntryAddress(FIELD_ADDR(this, kCodeEntryOffset)));
}
void JSFunction::set_code(Code* value) {
- ASSERT(!HEAP->InNewSpace(value));
+ ASSERT(!GetHeap()->InNewSpace(value));
Address entry = value->entry();
WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
GetHeap()->incremental_marking()->RecordWriteOfCodeEntry(
@@ -4360,6 +5060,13 @@ void JSFunction::set_code(Code* value) {
}
+void JSFunction::set_code_no_write_barrier(Code* value) {
+ ASSERT(!GetHeap()->InNewSpace(value));
+ Address entry = value->entry();
+ WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
+}
+
+
void JSFunction::ReplaceCode(Code* code) {
bool was_optimized = IsOptimized();
bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
@@ -4372,6 +5079,7 @@ void JSFunction::ReplaceCode(Code* code) {
context()->native_context()->AddOptimizedFunction(this);
}
if (was_optimized && !is_optimized) {
+ // TODO(titzer): linear in the number of optimized functions; fix!
context()->native_context()->RemoveOptimizedFunction(this);
}
}
@@ -4382,17 +5090,6 @@ Context* JSFunction::context() {
}
-Object* JSFunction::unchecked_context() {
- return READ_FIELD(this, kContextOffset);
-}
-
-
-SharedFunctionInfo* JSFunction::unchecked_shared() {
- return reinterpret_cast<SharedFunctionInfo*>(
- READ_FIELD(this, kSharedFunctionInfoOffset));
-}
-
-
void JSFunction::set_context(Object* value) {
ASSERT(value->IsUndefined() || value->IsContext());
WRITE_FIELD(this, kContextOffset, value);
@@ -4514,7 +5211,7 @@ void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
Code* value) {
ASSERT(id < kJSBuiltinsCount); // id is unsigned.
WRITE_FIELD(this, OffsetOfCodeWithId(id), value);
- ASSERT(!HEAP->InNewSpace(value));
+ ASSERT(!GetHeap()->InNewSpace(value));
}
@@ -4534,8 +5231,8 @@ void JSProxy::InitializeBody(int object_size, Object* value) {
ACCESSORS(JSSet, table, Object, kTableOffset)
ACCESSORS(JSMap, table, Object, kTableOffset)
-ACCESSORS(JSWeakMap, table, Object, kTableOffset)
-ACCESSORS(JSWeakMap, next, Object, kNextOffset)
+ACCESSORS(JSWeakCollection, table, Object, kTableOffset)
+ACCESSORS(JSWeakCollection, next, Object, kNextOffset)
Address Foreign::foreign_address() {
@@ -4548,6 +5245,21 @@ void Foreign::set_foreign_address(Address value) {
}
+ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)
+ACCESSORS(JSGeneratorObject, context, Context, kContextOffset)
+ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
+SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
+ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset)
+SMI_ACCESSORS(JSGeneratorObject, stack_handler_index, kStackHandlerIndexOffset)
+
+
+JSGeneratorObject* JSGeneratorObject::cast(Object* obj) {
+ ASSERT(obj->IsJSGeneratorObject());
+ ASSERT(HeapObject::cast(obj)->Size() == JSGeneratorObject::kSize);
+ return reinterpret_cast<JSGeneratorObject*>(obj);
+}
+
+
ACCESSORS(JSModule, context, Object, kContextOffset)
ACCESSORS(JSModule, scope_info, ScopeInfo, kScopeInfoOffset)
@@ -4604,13 +5316,71 @@ JSMessageObject* JSMessageObject::cast(Object* obj) {
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
+INT_ACCESSORS(Code, prologue_offset, kPrologueOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-ACCESSORS(Code, type_feedback_info, Object, kTypeFeedbackInfoOffset)
+
+
+// Type feedback slot: type_feedback_info for FUNCTIONs, stub_info for STUBs.
+void Code::InitializeTypeFeedbackInfoNoWriteBarrier(Object* value) {
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+}
+
+
+Object* Code::type_feedback_info() {
+ ASSERT(kind() == FUNCTION);
+ return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
+}
+
+
+void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
+ ASSERT(kind() == FUNCTION);
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
+ value, mode);
+}
+
+
+Object* Code::next_code_link() {
+ CHECK(kind() == OPTIMIZED_FUNCTION);
+ return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
+}
+
+
+void Code::set_next_code_link(Object* value, WriteBarrierMode mode) {
+ CHECK(kind() == OPTIMIZED_FUNCTION);
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
+ value, mode);
+}
+
+
+int Code::stub_info() {
+ ASSERT(kind() == COMPARE_IC || kind() == COMPARE_NIL_IC ||
+ kind() == BINARY_OP_IC || kind() == LOAD_IC);
+ Object* value = READ_FIELD(this, kTypeFeedbackInfoOffset);
+ return Smi::cast(value)->value();
+}
+
+
+void Code::set_stub_info(int value) {
+ ASSERT(kind() == COMPARE_IC ||
+ kind() == COMPARE_NIL_IC ||
+ kind() == BINARY_OP_IC ||
+ kind() == STUB ||
+ kind() == LOAD_IC ||
+ kind() == KEYED_LOAD_IC ||
+ kind() == STORE_IC ||
+ kind() == KEYED_STORE_IC);
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, Smi::FromInt(value));
+}
+
+
ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
INT_ACCESSORS(Code, ic_age, kICAgeOffset)
+
byte* Code::instruction_start() {
return FIELD_ADDR(this, kHeaderSize);
}
@@ -4626,12 +5396,6 @@ int Code::body_size() {
}
-FixedArray* Code::unchecked_deoptimization_data() {
- return reinterpret_cast<FixedArray*>(
- READ_FIELD(this, kDeoptimizationDataOffset));
-}
-
-
ByteArray* Code::unchecked_relocation_info() {
return reinterpret_cast<ByteArray*>(READ_FIELD(this, kRelocationInfoOffset));
}
@@ -4660,6 +5424,42 @@ bool Code::contains(byte* inner_pointer) {
ACCESSORS(JSArray, length, Object, kLengthOffset)
+void* JSArrayBuffer::backing_store() {
+ intptr_t ptr = READ_INTPTR_FIELD(this, kBackingStoreOffset);
+ return reinterpret_cast<void*>(ptr);
+}
+
+
+void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) {
+ intptr_t ptr = reinterpret_cast<intptr_t>(value);
+ WRITE_INTPTR_FIELD(this, kBackingStoreOffset, ptr);
+}
+
+
+ACCESSORS(JSArrayBuffer, byte_length, Object, kByteLengthOffset)
+ACCESSORS_TO_SMI(JSArrayBuffer, flag, kFlagOffset)
+
+
+bool JSArrayBuffer::is_external() {
+ return BooleanBit::get(flag(), kIsExternalBit);
+}
+
+
+void JSArrayBuffer::set_is_external(bool value) {
+ set_flag(BooleanBit::set(flag(), kIsExternalBit, value));
+}
+
+
+ACCESSORS(JSArrayBuffer, weak_next, Object, kWeakNextOffset)
+ACCESSORS(JSArrayBuffer, weak_first_view, Object, kWeakFirstViewOffset)
+
+
+ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset)
+ACCESSORS(JSArrayBufferView, byte_offset, Object, kByteOffsetOffset)
+ACCESSORS(JSArrayBufferView, byte_length, Object, kByteLengthOffset)
+ACCESSORS(JSArrayBufferView, weak_next, Object, kWeakNextOffset)
+ACCESSORS(JSTypedArray, length, Object, kLengthOffset)
+
ACCESSORS(JSRegExp, data, Object, kDataOffset)
@@ -4671,12 +5471,6 @@ JSRegExp::Type JSRegExp::TypeTag() {
}
-JSRegExp::Type JSRegExp::TypeTagUnchecked() {
- Smi* smi = Smi::cast(DataAtUnchecked(kTagIndex));
- return static_cast<JSRegExp::Type>(smi->value());
-}
-
-
int JSRegExp::CaptureCount() {
switch (TypeTag()) {
case ATOM:
@@ -4712,13 +5506,6 @@ Object* JSRegExp::DataAt(int index) {
}
-Object* JSRegExp::DataAtUnchecked(int index) {
- FixedArray* fa = reinterpret_cast<FixedArray*>(data());
- int offset = FixedArray::kHeaderSize + index * kPointerSize;
- return READ_FIELD(fa, offset);
-}
-
-
void JSRegExp::SetDataAt(int index, Object* value) {
ASSERT(TypeTag() != NOT_COMPILED);
ASSERT(index >= kDataIndex); // Only implementation data can be set this way.
@@ -4726,36 +5513,29 @@ void JSRegExp::SetDataAt(int index, Object* value) {
}
-void JSRegExp::SetDataAtUnchecked(int index, Object* value, Heap* heap) {
- ASSERT(index >= kDataIndex); // Only implementation data can be set this way.
- FixedArray* fa = reinterpret_cast<FixedArray*>(data());
- if (value->IsSmi()) {
- fa->set_unchecked(index, Smi::cast(value));
- } else {
- // We only do this during GC, so we don't need to notify the write barrier.
- fa->set_unchecked(heap, index, value, SKIP_WRITE_BARRIER);
- }
-}
-
-
ElementsKind JSObject::GetElementsKind() {
ElementsKind kind = map()->elements_kind();
#if DEBUG
FixedArrayBase* fixed_array =
reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
- Map* map = fixed_array->map();
- ASSERT((IsFastSmiOrObjectElementsKind(kind) &&
- (map == GetHeap()->fixed_array_map() ||
- map == GetHeap()->fixed_cow_array_map())) ||
- (IsFastDoubleElementsKind(kind) &&
- (fixed_array->IsFixedDoubleArray() ||
- fixed_array == GetHeap()->empty_fixed_array())) ||
- (kind == DICTIONARY_ELEMENTS &&
+
+ // If a GC was caused while constructing this object, the elements
+ // pointer may point to a one pointer filler map.
+ if (ElementsAreSafeToExamine()) {
+ Map* map = fixed_array->map();
+ ASSERT((IsFastSmiOrObjectElementsKind(kind) &&
+ (map == GetHeap()->fixed_array_map() ||
+ map == GetHeap()->fixed_cow_array_map())) ||
+ (IsFastDoubleElementsKind(kind) &&
+ (fixed_array->IsFixedDoubleArray() ||
+ fixed_array == GetHeap()->empty_fixed_array())) ||
+ (kind == DICTIONARY_ELEMENTS &&
fixed_array->IsFixedArray() &&
- fixed_array->IsDictionary()) ||
- (kind > DICTIONARY_ELEMENTS));
- ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
- (elements()->IsFixedArray() && elements()->length() >= 2));
+ fixed_array->IsDictionary()) ||
+ (kind > DICTIONARY_ELEMENTS));
+ ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
+ (elements()->IsFixedArray() && elements()->length() >= 2));
+ }
#endif
return kind;
}
@@ -4791,6 +5571,11 @@ bool JSObject::HasFastHoleyElements() {
}
+bool JSObject::HasFastElements() {
+ return IsFastElementsKind(GetElementsKind());
+}
+
+
bool JSObject::HasDictionaryElements() {
return GetElementsKind() == DICTIONARY_ELEMENTS;
}
@@ -4861,9 +5646,9 @@ MaybeObject* JSObject::EnsureWritableFastElements() {
}
-StringDictionary* JSObject::property_dictionary() {
+NameDictionary* JSObject::property_dictionary() {
ASSERT(!HasFastProperties());
- return StringDictionary::cast(properties());
+ return NameDictionary::cast(properties());
}
@@ -4873,22 +5658,22 @@ SeededNumberDictionary* JSObject::element_dictionary() {
}
-bool String::IsHashFieldComputed(uint32_t field) {
+bool Name::IsHashFieldComputed(uint32_t field) {
return (field & kHashNotComputedMask) == 0;
}
-bool String::HasHashCode() {
+bool Name::HasHashCode() {
return IsHashFieldComputed(hash_field());
}
-uint32_t String::Hash() {
+uint32_t Name::Hash() {
// Fast case: has hash code already been computed?
uint32_t field = hash_field();
if (IsHashFieldComputed(field)) return field >> kHashShift;
- // Slow case: compute hash code and set it.
- return ComputeAndSetHash();
+ // Slow case: compute hash code and set it. Has to be a string.
+ return String::cast(this)->ComputeAndSetHash();
}
@@ -4907,7 +5692,7 @@ bool StringHasher::has_trivial_hash() {
}
-uint32_t StringHasher::AddCharacterCore(uint32_t running_hash, uint32_t c) {
+uint32_t StringHasher::AddCharacterCore(uint32_t running_hash, uint16_t c) {
running_hash += c;
running_hash += (running_hash << 10);
running_hash ^= (running_hash >> 6);
@@ -4926,70 +5711,71 @@ uint32_t StringHasher::GetHashCore(uint32_t running_hash) {
}
-void StringHasher::AddCharacter(uint32_t c) {
- if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- AddSurrogatePair(c); // Not inlined.
- return;
- }
+void StringHasher::AddCharacter(uint16_t c) {
// Use the Jenkins one-at-a-time hash function to update the hash
// for the given character.
raw_running_hash_ = AddCharacterCore(raw_running_hash_, c);
- // Incremental array index computation.
- if (is_array_index_) {
- if (c < '0' || c > '9') {
- is_array_index_ = false;
- } else {
- int d = c - '0';
- if (is_first_char_) {
- is_first_char_ = false;
- if (c == '0' && length_ > 1) {
- is_array_index_ = false;
- return;
- }
- }
- if (array_index_ > 429496729U - ((d + 2) >> 3)) {
- is_array_index_ = false;
- } else {
- array_index_ = array_index_ * 10 + d;
- }
- }
- }
}
-void StringHasher::AddCharacterNoIndex(uint32_t c) {
- ASSERT(!is_array_index());
- if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- AddSurrogatePairNoIndex(c); // Not inlined.
- return;
+bool StringHasher::UpdateIndex(uint16_t c) {
+ ASSERT(is_array_index_);
+ if (c < '0' || c > '9') {
+ is_array_index_ = false;
+ return false;
}
- raw_running_hash_ = AddCharacterCore(raw_running_hash_, c);
+ int d = c - '0';
+ if (is_first_char_) {
+ is_first_char_ = false;
+ if (c == '0' && length_ > 1) {
+ is_array_index_ = false;
+ return false;
+ }
+ }
+ if (array_index_ > 429496729U - ((d + 2) >> 3)) {
+ is_array_index_ = false;
+ return false;
+ }
+ array_index_ = array_index_ * 10 + d;
+ return true;
}
-uint32_t StringHasher::GetHash() {
- // Get the calculated raw hash value and do some more bit ops to distribute
- // the hash further. Ensure that we never return zero as the hash value.
- return GetHashCore(raw_running_hash_);
+template<typename Char>
+inline void StringHasher::AddCharacters(const Char* chars, int length) {
+ ASSERT(sizeof(Char) == 1 || sizeof(Char) == 2);
+ int i = 0;
+ if (is_array_index_) {
+ for (; i < length; i++) {
+ AddCharacter(chars[i]);
+ if (!UpdateIndex(chars[i])) {
+ i++;
+ break;
+ }
+ }
+ }
+ for (; i < length; i++) {
+ ASSERT(!is_array_index_);
+ AddCharacter(chars[i]);
+ }
}
template <typename schar>
-uint32_t HashSequentialString(const schar* chars, int length, uint32_t seed) {
+uint32_t StringHasher::HashSequentialString(const schar* chars,
+ int length,
+ uint32_t seed) {
StringHasher hasher(length, seed);
- if (!hasher.has_trivial_hash()) {
- int i;
- for (i = 0; hasher.is_array_index() && (i < length); i++) {
- hasher.AddCharacter(chars[i]);
- }
- for (; i < length; i++) {
- hasher.AddCharacterNoIndex(chars[i]);
- }
- }
+ if (!hasher.has_trivial_hash()) hasher.AddCharacters(chars, length);
return hasher.GetHashField();
}
+bool Name::AsArrayIndex(uint32_t* index) {
+ return IsString() && String::cast(this)->AsArrayIndex(index);
+}
+
+
bool String::AsArrayIndex(uint32_t* index) {
uint32_t field = hash_field();
if (IsHashFieldComputed(field) && (field & kIsNotArrayIndexMask)) {
@@ -5009,26 +5795,44 @@ Object* JSReceiver::GetConstructor() {
}
-bool JSReceiver::HasProperty(String* name) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasPropertyWithHandler(name);
+bool JSReceiver::HasProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return GetPropertyAttribute(name) != ABSENT;
+ return object->GetPropertyAttribute(*name) != ABSENT;
}
-bool JSReceiver::HasLocalProperty(String* name) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasPropertyWithHandler(name);
+bool JSReceiver::HasLocalProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return GetLocalPropertyAttribute(name) != ABSENT;
+ return object->GetLocalPropertyAttribute(*name) != ABSENT;
}
-PropertyAttributes JSReceiver::GetPropertyAttribute(String* key) {
+PropertyAttributes JSReceiver::GetPropertyAttribute(Name* key) {
+ uint32_t index;
+ if (IsJSObject() && key->AsArrayIndex(&index)) {
+ return GetElementAttribute(index);
+ }
return GetPropertyAttributeWithReceiver(this, key);
}
+
+PropertyAttributes JSReceiver::GetElementAttribute(uint32_t index) {
+ if (IsJSProxy()) {
+ return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index);
+ }
+ return JSObject::cast(this)->GetElementAttributeWithReceiver(
+ this, index, true);
+}
+
+
// TODO(504): this may be useful in other places too where JSGlobalProxy
// is used.
Object* JSObject::BypassGlobalProxy() {
@@ -5049,11 +5853,32 @@ MaybeObject* JSReceiver::GetIdentityHash(CreationFlag flag) {
}
-bool JSReceiver::HasElement(uint32_t index) {
+bool JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasElementWithHandler(proxy, index);
+ }
+ return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver(
+ *object, index, true) != ABSENT;
+}
+
+
+bool JSReceiver::HasLocalElement(Handle<JSReceiver> object, uint32_t index) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasElementWithHandler(proxy, index);
+ }
+ return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver(
+ *object, index, false) != ABSENT;
+}
+
+
+PropertyAttributes JSReceiver::GetLocalElementAttribute(uint32_t index) {
if (IsJSProxy()) {
- return JSProxy::cast(this)->HasElementWithHandler(index);
+ return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index);
}
- return JSObject::cast(this)->HasElementWithReceiver(this, index);
+ return JSObject::cast(this)->GetElementAttributeWithReceiver(
+ this, index, false);
}
@@ -5104,6 +5929,36 @@ bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
}
+void AccessorPair::set_access_flags(v8::AccessControl access_control) {
+ int current = access_flags()->value();
+ current = BooleanBit::set(current,
+ kProhibitsOverwritingBit,
+ access_control & PROHIBITS_OVERWRITING);
+ current = BooleanBit::set(current,
+ kAllCanReadBit,
+ access_control & ALL_CAN_READ);
+ current = BooleanBit::set(current,
+ kAllCanWriteBit,
+ access_control & ALL_CAN_WRITE);
+ set_access_flags(Smi::FromInt(current));
+}
+
+
+bool AccessorPair::all_can_read() {
+ return BooleanBit::get(access_flags(), kAllCanReadBit);
+}
+
+
+bool AccessorPair::all_can_write() {
+ return BooleanBit::get(access_flags(), kAllCanWriteBit);
+}
+
+
+bool AccessorPair::prohibits_overwriting() {
+ return BooleanBit::get(access_flags(), kProhibitsOverwritingBit);
+}
+
+
template<typename Shape, typename Key>
void Dictionary<Shape, Key>::SetEntry(int entry,
Object* key,
@@ -5117,11 +5972,11 @@ void Dictionary<Shape, Key>::SetEntry(int entry,
Object* key,
Object* value,
PropertyDetails details) {
- ASSERT(!key->IsString() ||
+ ASSERT(!key->IsName() ||
details.IsDeleted() ||
details.dictionary_index() > 0);
int index = HashTable<Shape, Key>::EntryToIndex(entry);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
FixedArray::set(index, key, mode);
FixedArray::set(index+1, value, mode);
@@ -5157,30 +6012,31 @@ uint32_t SeededNumberDictionaryShape::SeededHashForObject(uint32_t key,
return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), seed);
}
-MaybeObject* NumberDictionaryShape::AsObject(uint32_t key) {
- return Isolate::Current()->heap()->NumberFromUint32(key);
+MaybeObject* NumberDictionaryShape::AsObject(Heap* heap, uint32_t key) {
+ return heap->NumberFromUint32(key);
}
-bool StringDictionaryShape::IsMatch(String* key, Object* other) {
+bool NameDictionaryShape::IsMatch(Name* key, Object* other) {
// We know that all entries in a hash table had their hash keys created.
// Use that knowledge to have fast failure.
- if (key->Hash() != String::cast(other)->Hash()) return false;
- return key->Equals(String::cast(other));
+ if (key->Hash() != Name::cast(other)->Hash()) return false;
+ return key->Equals(Name::cast(other));
}
-uint32_t StringDictionaryShape::Hash(String* key) {
+uint32_t NameDictionaryShape::Hash(Name* key) {
return key->Hash();
}
-uint32_t StringDictionaryShape::HashForObject(String* key, Object* other) {
- return String::cast(other)->Hash();
+uint32_t NameDictionaryShape::HashForObject(Name* key, Object* other) {
+ return Name::cast(other)->Hash();
}
-MaybeObject* StringDictionaryShape::AsObject(String* key) {
+MaybeObject* NameDictionaryShape::AsObject(Heap* heap, Name* key) {
+ ASSERT(key->IsUniqueName());
return key;
}
@@ -5207,7 +6063,36 @@ uint32_t ObjectHashTableShape<entrysize>::HashForObject(Object* key,
template <int entrysize>
-MaybeObject* ObjectHashTableShape<entrysize>::AsObject(Object* key) {
+MaybeObject* ObjectHashTableShape<entrysize>::AsObject(Heap* heap,
+ Object* key) {
+ return key;
+}
+
+
+template <int entrysize>
+bool WeakHashTableShape<entrysize>::IsMatch(Object* key, Object* other) {
+ return key->SameValue(other);
+}
+
+
+template <int entrysize>
+uint32_t WeakHashTableShape<entrysize>::Hash(Object* key) {
+ intptr_t hash = reinterpret_cast<intptr_t>(key);
+ return (uint32_t)(hash & 0xFFFFFFFF);
+}
+
+
+template <int entrysize>
+uint32_t WeakHashTableShape<entrysize>::HashForObject(Object* key,
+ Object* other) {
+ intptr_t hash = reinterpret_cast<intptr_t>(other);
+ return (uint32_t)(hash & 0xFFFFFFFF);
+}
+
+
+template <int entrysize>
+MaybeObject* WeakHashTableShape<entrysize>::AsObject(Heap* heap,
+ Object* key) {
return key;
}
@@ -5217,8 +6102,8 @@ void Map::ClearCodeCache(Heap* heap) {
// Please note this function is used during marking:
// - MarkCompactCollector::MarkUnmarkedObject
// - IncrementalMarking::Step
- ASSERT(!heap->InNewSpace(heap->raw_unchecked_empty_fixed_array()));
- WRITE_FIELD(this, kCodeCacheOffset, heap->raw_unchecked_empty_fixed_array());
+ ASSERT(!heap->InNewSpace(heap->empty_fixed_array()));
+ WRITE_FIELD(this, kCodeCacheOffset, heap->empty_fixed_array());
}
@@ -5281,6 +6166,12 @@ MaybeObject* FixedDoubleArray::Copy() {
}
+MaybeObject* ConstantPoolArray::Copy() {
+ if (length() == 0) return this;
+ return GetHeap()->CopyConstantPoolArray(this);
+}
+
+
void TypeFeedbackCells::SetAstId(int index, TypeFeedbackId id) {
set(1 + index * 2, Smi::FromInt(id.ToInt()));
}
@@ -5291,13 +6182,13 @@ TypeFeedbackId TypeFeedbackCells::AstId(int index) {
}
-void TypeFeedbackCells::SetCell(int index, JSGlobalPropertyCell* cell) {
+void TypeFeedbackCells::SetCell(int index, Cell* cell) {
set(index * 2, cell);
}
-JSGlobalPropertyCell* TypeFeedbackCells::Cell(int index) {
- return JSGlobalPropertyCell::cast(get(index * 2));
+Cell* TypeFeedbackCells::GetCell(int index) {
+ return Cell::cast(get(index * 2));
}
@@ -5311,8 +6202,14 @@ Handle<Object> TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) {
}
+Handle<Object> TypeFeedbackCells::MonomorphicArraySentinel(Isolate* isolate,
+ ElementsKind elements_kind) {
+ return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
+}
+
+
Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
- return heap->raw_unchecked_the_hole_value();
+ return heap->the_hole_value();
}
@@ -5402,7 +6299,6 @@ SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
Relocatable::Relocatable(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
isolate_ = isolate;
prev_ = isolate->relocatable_top();
isolate->set_relocatable_top(this);
@@ -5410,7 +6306,6 @@ Relocatable::Relocatable(Isolate* isolate) {
Relocatable::~Relocatable() {
- ASSERT(isolate_ == Isolate::Current());
ASSERT_EQ(isolate_->relocatable_top(), this);
isolate_->set_relocatable_top(prev_);
}
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index b1118de9c..60c1ef4c3 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -37,7 +37,9 @@ namespace internal {
#ifdef OBJECT_PRINT
-static const char* TypeToString(InstanceType type);
+void MaybeObject::Print() {
+ Print(stdout);
+}
void MaybeObject::Print(FILE* out) {
@@ -55,6 +57,11 @@ void MaybeObject::Print(FILE* out) {
}
+void MaybeObject::PrintLn() {
+ PrintLn(stdout);
+}
+
+
void MaybeObject::PrintLn(FILE* out) {
Print(out);
PrintF(out, "\n");
@@ -69,13 +76,16 @@ void HeapObject::PrintHeader(FILE* out, const char* id) {
void HeapObject::HeapObjectPrint(FILE* out) {
InstanceType instance_type = map()->instance_type();
- HandleScope scope;
+ HandleScope scope(GetIsolate());
if (instance_type < FIRST_NONSTRING_TYPE) {
String::cast(this)->StringPrint(out);
return;
}
switch (instance_type) {
+ case SYMBOL_TYPE:
+ Symbol::cast(this)->SymbolPrint(out);
+ break;
case MAP_TYPE:
Map::cast(this)->MapPrint(out);
break;
@@ -85,6 +95,9 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out);
break;
+ case CONSTANT_POOL_ARRAY_TYPE:
+ ConstantPoolArray::cast(this)->ConstantPoolArrayPrint(out);
+ break;
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayPrint(out);
break;
@@ -129,6 +142,7 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case JS_OBJECT_TYPE: // fall through
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_ARRAY_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
case JS_REGEXP_TYPE:
JSObject::cast(this)->JSObjectPrint(out);
break;
@@ -166,9 +180,18 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case JS_FUNCTION_PROXY_TYPE:
JSFunctionProxy::cast(this)->JSFunctionProxyPrint(out);
break;
+ case JS_SET_TYPE:
+ JSSet::cast(this)->JSSetPrint(out);
+ break;
+ case JS_MAP_TYPE:
+ JSMap::cast(this)->JSMapPrint(out);
+ break;
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(this)->JSWeakMapPrint(out);
break;
+ case JS_WEAK_SET_TYPE:
+ JSWeakSet::cast(this)->JSWeakSetPrint(out);
+ break;
case FOREIGN_TYPE:
Foreign::cast(this)->ForeignPrint(out);
break;
@@ -178,8 +201,20 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case JS_MESSAGE_OBJECT_TYPE:
JSMessageObject::cast(this)->JSMessageObjectPrint(out);
break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellPrint(out);
+ case CELL_TYPE:
+ Cell::cast(this)->CellPrint(out);
+ break;
+ case PROPERTY_CELL_TYPE:
+ PropertyCell::cast(this)->PropertyCellPrint(out);
+ break;
+ case JS_ARRAY_BUFFER_TYPE:
+ JSArrayBuffer::cast(this)->JSArrayBufferPrint(out);
+ break;
+ case JS_TYPED_ARRAY_TYPE:
+ JSTypedArray::cast(this)->JSTypedArrayPrint(out);
+ break;
+ case JS_DATA_VIEW_TYPE:
+ JSDataView::cast(this)->JSDataViewPrint(out);
break;
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: \
@@ -256,18 +291,18 @@ void JSObject::PrintProperties(FILE* out) {
DescriptorArray* descs = map()->instance_descriptors();
for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
PrintF(out, " ");
- descs->GetKey(i)->StringPrint(out);
+ descs->GetKey(i)->NamePrint(out);
PrintF(out, ": ");
switch (descs->GetType(i)) {
case FIELD: {
int index = descs->GetFieldIndex(i);
- FastPropertyAt(index)->ShortPrint(out);
+ RawFastPropertyAt(index)->ShortPrint(out);
PrintF(out, " (field at offset %d)\n", index);
break;
}
- case CONSTANT_FUNCTION:
- descs->GetConstantFunction(i)->ShortPrint(out);
- PrintF(out, " (constant function)\n");
+ case CONSTANT:
+ descs->GetConstant(i)->ShortPrint(out);
+ PrintF(out, " (constant)\n");
break;
case CALLBACKS:
descs->GetCallbacksObject(i)->ShortPrint(out);
@@ -384,7 +419,7 @@ void JSObject::PrintElements(FILE* out) {
case EXTERNAL_DOUBLE_ELEMENTS: {
ExternalDoubleArray* p = ExternalDoubleArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %f\n", i, p->get_scalar(i));
+ PrintF(out, " %d: %f\n", i, p->get_scalar(i));
}
break;
}
@@ -393,11 +428,16 @@ void JSObject::PrintElements(FILE* out) {
break;
case NON_STRICT_ARGUMENTS_ELEMENTS: {
FixedArray* p = FixedArray::cast(elements());
+ PrintF(out, " parameter map:");
for (int i = 2; i < p->length(); i++) {
- PrintF(out, " %d: ", i);
+ PrintF(out, " %d:", i - 2);
p->get(i)->ShortPrint(out);
- PrintF(out, "\n");
}
+ PrintF(out, "\n context: ");
+ p->get(0)->ShortPrint(out);
+ PrintF(out, "\n arguments: ");
+ p->get(1)->ShortPrint(out);
+ PrintF(out, "\n");
break;
}
}
@@ -409,15 +449,15 @@ void JSObject::PrintTransitions(FILE* out) {
TransitionArray* transitions = map()->transitions();
for (int i = 0; i < transitions->number_of_transitions(); i++) {
PrintF(out, " ");
- transitions->GetKey(i)->StringPrint(out);
+ transitions->GetKey(i)->NamePrint(out);
PrintF(out, ": ");
switch (transitions->GetTargetDetails(i).type()) {
case FIELD: {
PrintF(out, " (transition to field)\n");
break;
}
- case CONSTANT_FUNCTION:
- PrintF(out, " (transition to constant function)\n");
+ case CONSTANT:
+ PrintF(out, " (transition to constant)\n");
break;
case CALLBACKS:
PrintF(out, " (transition to callback)\n");
@@ -454,7 +494,7 @@ void JSObject::JSObjectPrint(FILE* out) {
void JSModule::JSModulePrint(FILE* out) {
HeapObject::PrintHeader(out, "JSModule");
- PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
PrintF(out, " - context = ");
context()->Print(out);
PrintF(out, " - scope_info = ");
@@ -469,68 +509,21 @@ void JSModule::JSModulePrint(FILE* out) {
static const char* TypeToString(InstanceType type) {
switch (type) {
- case INVALID_TYPE: return "INVALID";
- case MAP_TYPE: return "MAP";
- case HEAP_NUMBER_TYPE: return "HEAP_NUMBER";
- case SYMBOL_TYPE: return "SYMBOL";
- case ASCII_SYMBOL_TYPE: return "ASCII_SYMBOL";
- case CONS_SYMBOL_TYPE: return "CONS_SYMBOL";
- case CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL";
- case EXTERNAL_ASCII_SYMBOL_TYPE:
- case EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
- case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
- case SHORT_EXTERNAL_ASCII_SYMBOL_TYPE:
- case SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
- case SHORT_EXTERNAL_SYMBOL_TYPE: return "SHORT_EXTERNAL_SYMBOL";
- case ASCII_STRING_TYPE: return "ASCII_STRING";
- case STRING_TYPE: return "TWO_BYTE_STRING";
- case CONS_STRING_TYPE:
- case CONS_ASCII_STRING_TYPE: return "CONS_STRING";
- case EXTERNAL_ASCII_STRING_TYPE:
- case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
- case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
- case SHORT_EXTERNAL_ASCII_STRING_TYPE:
- case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
- case SHORT_EXTERNAL_STRING_TYPE: return "SHORT_EXTERNAL_STRING";
- case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
- case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
- case FREE_SPACE_TYPE: return "FREE_SPACE";
- case EXTERNAL_PIXEL_ARRAY_TYPE: return "EXTERNAL_PIXEL_ARRAY";
- case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return "EXTERNAL_UNSIGNED_BYTE_ARRAY";
- case EXTERNAL_SHORT_ARRAY_TYPE: return "EXTERNAL_SHORT_ARRAY";
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return "EXTERNAL_UNSIGNED_SHORT_ARRAY";
- case EXTERNAL_INT_ARRAY_TYPE: return "EXTERNAL_INT_ARRAY";
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return "EXTERNAL_UNSIGNED_INT_ARRAY";
- case EXTERNAL_FLOAT_ARRAY_TYPE: return "EXTERNAL_FLOAT_ARRAY";
- case EXTERNAL_DOUBLE_ARRAY_TYPE: return "EXTERNAL_DOUBLE_ARRAY";
- case FILLER_TYPE: return "FILLER";
- case JS_OBJECT_TYPE: return "JS_OBJECT";
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
- case ODDBALL_TYPE: return "ODDBALL";
- case JS_GLOBAL_PROPERTY_CELL_TYPE: return "JS_GLOBAL_PROPERTY_CELL";
- case SHARED_FUNCTION_INFO_TYPE: return "SHARED_FUNCTION_INFO";
- case JS_MODULE_TYPE: return "JS_MODULE";
- case JS_FUNCTION_TYPE: return "JS_FUNCTION";
- case CODE_TYPE: return "CODE";
- case JS_ARRAY_TYPE: return "JS_ARRAY";
- case JS_PROXY_TYPE: return "JS_PROXY";
- case JS_WEAK_MAP_TYPE: return "JS_WEAK_MAP";
- case JS_REGEXP_TYPE: return "JS_REGEXP";
- case JS_VALUE_TYPE: return "JS_VALUE";
- case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
- case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
- case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
- case FOREIGN_TYPE: return "FOREIGN";
- case JS_MESSAGE_OBJECT_TYPE: return "JS_MESSAGE_OBJECT_TYPE";
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- default: return "UNKNOWN";
+#define TYPE_TO_STRING(TYPE) case TYPE: return #TYPE;
+ INSTANCE_TYPE_LIST(TYPE_TO_STRING)
+#undef TYPE_TO_STRING
}
+ UNREACHABLE();
+ return "UNKNOWN"; // Keep the compiler happy.
+}
+
+
+void Symbol::SymbolPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "Symbol");
+ PrintF(out, " - hash: %d\n", Hash());
+ PrintF(out, " - name: ");
+ name()->ShortPrint();
+ PrintF(out, "\n");
}
@@ -564,8 +557,8 @@ void Map::MapPrint(FILE* out) {
}
PrintF(out, " - back pointer: ");
GetBackPointer()->ShortPrint(out);
- PrintF(out, "\n - instance descriptors %i #%i: ",
- owns_descriptors(),
+ PrintF(out, "\n - instance descriptors %s#%i: ",
+ owns_descriptors() ? "(own) " : "",
NumberOfOwnDescriptors());
instance_descriptors()->ShortPrint(out);
if (HasTransitionArray()) {
@@ -578,6 +571,8 @@ void Map::MapPrint(FILE* out) {
constructor()->ShortPrint(out);
PrintF(out, "\n - code cache: ");
code_cache()->ShortPrint(out);
+ PrintF(out, "\n - dependent code: ");
+ dependent_code()->ShortPrint(out);
PrintF(out, "\n");
}
@@ -638,6 +633,23 @@ void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
}
+void ConstantPoolArray::ConstantPoolArrayPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "ConstantPoolArray");
+ PrintF(out, " - length: %d", length());
+ for (int i = 0; i < length(); i++) {
+ if (i < first_ptr_index()) {
+ PrintF(out, "\n [%d]: double: %g", i, get_int64_entry_as_double(i));
+ } else if (i < first_int32_index()) {
+ PrintF(out, "\n [%d]: pointer: %p", i,
+ reinterpret_cast<void*>(get_ptr_entry(i)));
+ } else {
+ PrintF(out, "\n [%d]: int32: %d", i, get_int32_entry(i));
+ }
+ }
+ PrintF(out, "\n");
+}
+
+
void JSValue::JSValuePrint(FILE* out) {
HeapObject::PrintHeader(out, "ValueObject");
value()->Print(out);
@@ -663,7 +675,7 @@ void JSMessageObject::JSMessageObjectPrint(FILE* out) {
void String::StringPrint(FILE* out) {
- if (StringShape(this).IsSymbol()) {
+ if (StringShape(this).IsInternalized()) {
PrintF(out, "#");
} else if (StringShape(this).IsCons()) {
PrintF(out, "c\"");
@@ -685,7 +697,15 @@ void String::StringPrint(FILE* out) {
PrintF(out, "%s", truncated_epilogue);
}
- if (!StringShape(this).IsSymbol()) PrintF(out, "\"");
+ if (!StringShape(this).IsInternalized()) PrintF(out, "\"");
+}
+
+
+void Name::NamePrint(FILE* out) {
+ if (IsString())
+ String::cast(this)->StringPrint(out);
+ else
+ ShortPrint();
}
@@ -698,7 +718,7 @@ char* String::ToAsciiArray() {
static char* buffer = NULL;
if (buffer != NULL) free(buffer);
buffer = new char[length()+1];
- WriteToFlat(this, buffer, 0, length());
+ WriteToFlat(this, reinterpret_cast<uint8_t*>(buffer), 0, length());
buffer[length()] = 0;
return buffer;
}
@@ -708,9 +728,10 @@ static const char* const weekdays[] = {
"???", "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
};
+
void JSDate::JSDatePrint(FILE* out) {
HeapObject::PrintHeader(out, "JSDate");
- PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
PrintF(out, " - value = ");
value()->Print(out);
if (!year()->IsSmi()) {
@@ -730,7 +751,7 @@ void JSDate::JSDatePrint(FILE* out) {
void JSProxy::JSProxyPrint(FILE* out) {
HeapObject::PrintHeader(out, "JSProxy");
- PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
PrintF(out, " - handler = ");
handler()->Print(out);
PrintF(out, " - hash = ");
@@ -741,7 +762,7 @@ void JSProxy::JSProxyPrint(FILE* out) {
void JSFunctionProxy::JSFunctionProxyPrint(FILE* out) {
HeapObject::PrintHeader(out, "JSFunctionProxy");
- PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
PrintF(out, " - handler = ");
handler()->Print(out);
PrintF(out, " - call_trap = ");
@@ -752,18 +773,84 @@ void JSFunctionProxy::JSFunctionProxyPrint(FILE* out) {
}
+void JSSet::JSSetPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "JSSet");
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - table = ");
+ table()->ShortPrint(out);
+ PrintF(out, "\n");
+}
+
+
+void JSMap::JSMapPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "JSMap");
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - table = ");
+ table()->ShortPrint(out);
+ PrintF(out, "\n");
+}
+
+
void JSWeakMap::JSWeakMapPrint(FILE* out) {
HeapObject::PrintHeader(out, "JSWeakMap");
- PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
PrintF(out, " - table = ");
table()->ShortPrint(out);
PrintF(out, "\n");
}
+void JSWeakSet::JSWeakSetPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "JSWeakSet");
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - table = ");
+ table()->ShortPrint(out);
+ PrintF(out, "\n");
+}
+
+
+void JSArrayBuffer::JSArrayBufferPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "JSArrayBuffer");
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - backing_store = %p\n", backing_store());
+ PrintF(out, " - byte_length = ");
+ byte_length()->ShortPrint(out);
+ PrintF(out, "\n");
+}
+
+
+void JSTypedArray::JSTypedArrayPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "JSTypedArray");
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - buffer =");
+ buffer()->ShortPrint(out);
+ PrintF(out, "\n - byte_offset = ");
+ byte_offset()->ShortPrint(out);
+ PrintF(out, "\n - byte_length = ");
+ byte_length()->ShortPrint(out);
+ PrintF(out, "\n - length = ");
+ length()->ShortPrint(out);
+ PrintF("\n");
+ PrintElements(out);
+}
+
+
+void JSDataView::JSDataViewPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "JSDataView");
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - buffer =");
+ buffer()->ShortPrint(out);
+ PrintF(out, "\n - byte_offset = ");
+ byte_offset()->ShortPrint(out);
+ PrintF(out, "\n - byte_length = ");
+ byte_length()->ShortPrint(out);
+ PrintF("\n");
+}
+
+
void JSFunction::JSFunctionPrint(FILE* out) {
HeapObject::PrintHeader(out, "Function");
- PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
PrintF(out, " - initial_map = ");
if (has_initial_map()) {
initial_map()->ShortPrint(out);
@@ -773,7 +860,7 @@ void JSFunction::JSFunctionPrint(FILE* out) {
PrintF(out, "\n - name = ");
shared()->name()->Print(out);
PrintF(out, "\n - context = ");
- unchecked_context()->ShortPrint(out);
+ context()->ShortPrint(out);
PrintF(out, "\n - literals = ");
literals()->ShortPrint(out);
PrintF(out, "\n - code = ");
@@ -817,10 +904,8 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(FILE* out) {
PrintF(out, "\n - debug info = ");
debug_info()->ShortPrint(out);
PrintF(out, "\n - length = %d", length());
- PrintF(out, "\n - has_only_simple_this_property_assignments = %d",
- has_only_simple_this_property_assignments());
- PrintF(out, "\n - this_property_assignments = ");
- this_property_assignments()->ShortPrint(out);
+ PrintF(out, "\n - optimized_code_map = ");
+ optimized_code_map()->ShortPrint(out);
PrintF(out, "\n");
}
@@ -849,8 +934,13 @@ void JSBuiltinsObject::JSBuiltinsObjectPrint(FILE* out) {
}
-void JSGlobalPropertyCell::JSGlobalPropertyCellPrint(FILE* out) {
- HeapObject::PrintHeader(out, "JSGlobalPropertyCell");
+void Cell::CellPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "Cell");
+}
+
+
+void PropertyCell::PropertyCellPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "PropertyCell");
}
@@ -869,18 +959,43 @@ void Foreign::ForeignPrint(FILE* out) {
}
-void AccessorInfo::AccessorInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "AccessorInfo");
+void ExecutableAccessorInfo::ExecutableAccessorInfoPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "ExecutableAccessorInfo");
+ PrintF(out, "\n - name: ");
+ name()->ShortPrint(out);
+ PrintF(out, "\n - flag: ");
+ flag()->ShortPrint(out);
PrintF(out, "\n - getter: ");
getter()->ShortPrint(out);
PrintF(out, "\n - setter: ");
setter()->ShortPrint(out);
- PrintF(out, "\n - name: ");
- name()->ShortPrint(out);
PrintF(out, "\n - data: ");
data()->ShortPrint(out);
+}
+
+
+void DeclaredAccessorInfo::DeclaredAccessorInfoPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "DeclaredAccessorInfo");
+ PrintF(out, "\n - name: ");
+ name()->ShortPrint(out);
PrintF(out, "\n - flag: ");
flag()->ShortPrint(out);
+ PrintF(out, "\n - descriptor: ");
+ descriptor()->ShortPrint(out);
+}
+
+
+void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "DeclaredAccessorDescriptor");
+ PrintF(out, "\n - internal field: ");
+ serialized_data()->ShortPrint(out);
+}
+
+
+void Box::BoxPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "Box");
+ PrintF(out, "\n - value: ");
+ value()->ShortPrint(out);
}
@@ -890,6 +1005,8 @@ void AccessorPair::AccessorPairPrint(FILE* out) {
getter()->ShortPrint(out);
PrintF(out, "\n - setter: ");
setter()->ShortPrint(out);
+ PrintF(out, "\n - flag: ");
+ access_flags()->ShortPrint(out);
}
@@ -973,6 +1090,8 @@ void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) {
tag()->ShortPrint(out);
PrintF(out, "\n - property_list: ");
property_list()->ShortPrint(out);
+ PrintF(out, "\n - property_accessors: ");
+ property_accessors()->ShortPrint(out);
PrintF(out, "\n - constructor: ");
constructor()->ShortPrint(out);
PrintF(out, "\n - internal_field_count: ");
@@ -997,6 +1116,50 @@ void TypeSwitchInfo::TypeSwitchInfoPrint(FILE* out) {
}
+void AllocationSite::AllocationSitePrint(FILE* out) {
+ HeapObject::PrintHeader(out, "AllocationSite");
+ PrintF(out, " - weak_next: ");
+ weak_next()->ShortPrint(out);
+ PrintF(out, "\n - dependent code: ");
+ dependent_code()->ShortPrint(out);
+ PrintF(out, "\n - nested site: ");
+ nested_site()->ShortPrint(out);
+ PrintF(out, "\n - transition_info: ");
+ if (transition_info()->IsCell()) {
+ Cell* cell = Cell::cast(transition_info());
+ Object* cell_contents = cell->value();
+ if (cell_contents->IsSmi()) {
+ ElementsKind kind = static_cast<ElementsKind>(
+ Smi::cast(cell_contents)->value());
+ PrintF(out, "Array allocation with ElementsKind ");
+ PrintElementsKind(out, kind);
+ PrintF(out, "\n");
+ return;
+ }
+ } else if (transition_info()->IsJSArray()) {
+ PrintF(out, "Array literal ");
+ transition_info()->ShortPrint(out);
+ PrintF(out, "\n");
+ return;
+ }
+
+ PrintF(out, "unknown transition_info");
+ transition_info()->ShortPrint(out);
+ PrintF(out, "\n");
+}
+
+
+void AllocationMemento::AllocationMementoPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "AllocationMemento");
+ PrintF(out, " - allocation site: ");
+ if (IsValid()) {
+ GetAllocationSite()->Print();
+ } else {
+ PrintF(out, "<invalid>\n");
+ }
+}
+
+
void Script::ScriptPrint(FILE* out) {
HeapObject::PrintHeader(out, "Script");
PrintF(out, "\n - source: ");
@@ -1017,8 +1180,7 @@ void Script::ScriptPrint(FILE* out) {
context_data()->ShortPrint(out);
PrintF(out, "\n - wrapper: ");
wrapper()->ShortPrint(out);
- PrintF(out, "\n - compilation type: ");
- compilation_type()->ShortPrint(out);
+ PrintF(out, "\n - compilation type: %d", compilation_type());
PrintF(out, "\n - line ends: ");
line_ends()->ShortPrint(out);
PrintF(out, "\n - eval from shared: ");
@@ -1070,15 +1232,15 @@ void TransitionArray::PrintTransitions(FILE* out) {
PrintF(out, "Transition array %d\n", number_of_transitions());
for (int i = 0; i < number_of_transitions(); i++) {
PrintF(out, " %d: ", i);
- GetKey(i)->StringPrint(out);
+ GetKey(i)->NamePrint(out);
PrintF(out, ": ");
switch (GetTargetDetails(i).type()) {
case FIELD: {
PrintF(out, " (transition to field)\n");
break;
}
- case CONSTANT_FUNCTION:
- PrintF(out, " (transition to constant function)\n");
+ case CONSTANT:
+ PrintF(out, " (transition to constant)\n");
break;
case CALLBACKS:
PrintF(out, " (transition to callback)\n");
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
index d698a8df0..93b7cb96a 100644
--- a/deps/v8/src/objects-visiting-inl.h
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -49,6 +49,11 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
SlicedString::BodyDescriptor,
int>::Visit);
+ table_.Register(kVisitSymbol,
+ &FixedBodyVisitor<StaticVisitor,
+ Symbol::BodyDescriptor,
+ int>::Visit);
+
table_.Register(kVisitFixedArray,
&FlexibleBodyVisitor<StaticVisitor,
FixedArray::BodyDescriptor,
@@ -68,16 +73,24 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
SharedFunctionInfo::BodyDescriptor,
int>::Visit);
- table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
+ table_.Register(kVisitSeqOneByteString, &VisitSeqOneByteString);
table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
table_.Register(kVisitJSFunction, &VisitJSFunction);
+ table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+
+ table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
+
+ table_.Register(kVisitJSDataView, &VisitJSDataView);
+
table_.Register(kVisitFreeSpace, &VisitFreeSpace);
table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit);
+ table_.Register(kVisitJSWeakSet, &JSObjectVisitor::Visit);
+
table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
table_.template RegisterSpecializations<DataObjectVisitor,
@@ -94,6 +107,59 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
template<typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+
+ STATIC_ASSERT(
+ JSArrayBuffer::kWeakFirstViewOffset ==
+ JSArrayBuffer::kWeakNextOffset + kPointerSize);
+ VisitPointers(
+ heap,
+ HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset));
+ VisitPointers(
+ heap,
+ HeapObject::RawField(object,
+ JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize),
+ HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
+ return JSArrayBuffer::kSizeWithInternalFields;
+}
+
+
+template<typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSTypedArray(
+ Map* map, HeapObject* object) {
+ VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSTypedArray::kWeakNextOffset));
+ VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object,
+ JSTypedArray::kWeakNextOffset + kPointerSize),
+ HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
+ return JSTypedArray::kSizeWithInternalFields;
+}
+
+
+template<typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSDataView(
+ Map* map, HeapObject* object) {
+ VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSDataView::kWeakNextOffset));
+ VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object,
+ JSDataView::kWeakNextOffset + kPointerSize),
+ HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
+ return JSDataView::kSizeWithInternalFields;
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitShortcutCandidate,
&FixedBodyVisitor<StaticVisitor,
@@ -110,24 +176,35 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
SlicedString::BodyDescriptor,
void>::Visit);
- table_.Register(kVisitFixedArray,
- &FlexibleBodyVisitor<StaticVisitor,
- FixedArray::BodyDescriptor,
+ table_.Register(kVisitSymbol,
+ &FixedBodyVisitor<StaticVisitor,
+ Symbol::BodyDescriptor,
void>::Visit);
+ table_.Register(kVisitFixedArray, &FixedArrayVisitor::Visit);
+
table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
+ table_.Register(kVisitConstantPoolArray, &VisitConstantPoolArray);
+
table_.Register(kVisitNativeContext, &VisitNativeContext);
+ table_.Register(kVisitAllocationSite,
+ &FixedBodyVisitor<StaticVisitor,
+ AllocationSite::BodyDescriptor,
+ void>::Visit);
+
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
- table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
+ table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit);
table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
- table_.Register(kVisitJSWeakMap, &StaticVisitor::VisitJSWeakMap);
+ table_.Register(kVisitJSWeakMap, &StaticVisitor::VisitWeakCollection);
+
+ table_.Register(kVisitJSWeakSet, &StaticVisitor::VisitWeakCollection);
table_.Register(kVisitOddball,
&FixedBodyVisitor<StaticVisitor,
@@ -142,13 +219,21 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitJSFunction, &VisitJSFunction);
+ table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+
+ table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
+
+ table_.Register(kVisitJSDataView, &VisitJSDataView);
+
// Registration for kVisitJSRegExp is done by StaticVisitor.
- table_.Register(kVisitPropertyCell,
+ table_.Register(kVisitCell,
&FixedBodyVisitor<StaticVisitor,
- JSGlobalPropertyCell::BodyDescriptor,
+ Cell::BodyDescriptor,
void>::Visit);
+ table_.Register(kVisitPropertyCell, &VisitPropertyCell);
+
table_.template RegisterSpecializations<DataObjectVisitor,
kVisitDataObject,
kVisitDataObjectGeneric>();
@@ -179,15 +264,17 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
ASSERT(!rinfo->target_object()->IsConsString());
HeapObject* object = HeapObject::cast(rinfo->target_object());
heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
- StaticVisitor::MarkObject(heap, object);
+ if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), object)) {
+ StaticVisitor::MarkObject(heap, object);
+ }
}
template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitGlobalPropertyCell(
+void StaticMarkingVisitor<StaticVisitor>::VisitCell(
Heap* heap, RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
- JSGlobalPropertyCell* cell = rinfo->target_cell();
+ ASSERT(rinfo->rmode() == RelocInfo::CELL);
+ Cell* cell = rinfo->target_cell();
StaticVisitor::MarkObject(heap, cell);
}
@@ -214,9 +301,10 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(
// when they might be keeping a Context alive, or when the heap is about
// to be serialized.
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
- && (target->ic_state() == MEGAMORPHIC || heap->flush_monomorphic_ics() ||
+ && (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC ||
+ target->ic_state() == POLYMORPHIC || heap->flush_monomorphic_ics() ||
Serializer::enabled() || target->ic_age() != heap->global_ic_age())) {
- IC::Clear(rinfo->pc());
+ IC::Clear(target->GetIsolate(), rinfo->pc());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
@@ -225,6 +313,17 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
+ Heap* heap, RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+ Code* target = rinfo->code_age_stub();
+ ASSERT(target != NULL);
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+ StaticVisitor::MarkObject(heap, target);
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
Map* map, HeapObject* object) {
FixedBodyVisitor<StaticVisitor,
@@ -253,12 +352,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitMap(
map_object->ClearCodeCache(heap);
}
- // When map collection is enabled we have to mark through map's
- // transitions and back pointers in a special way to make these links
- // weak. Only maps for subclasses of JSReceiver can have transitions.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (FLAG_collect_maps &&
- map_object->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+ // When map collection is enabled we have to mark through map's transitions
+ // and back pointers in a special way to make these links weak.
+ if (FLAG_collect_maps && map_object->CanTransition()) {
MarkMapContents(heap, map_object);
} else {
StaticVisitor::VisitPointers(heap,
@@ -269,6 +365,30 @@ void StaticMarkingVisitor<StaticVisitor>::VisitMap(
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+
+ Object** slot =
+ HeapObject::RawField(object, PropertyCell::kDependentCodeOffset);
+ if (FLAG_collect_maps) {
+ // Mark property cell dependent codes array but do not push it onto marking
+ // stack, this will make references from it weak. We will clean dead
+ // codes when we iterate over property cells in ClearNonLiveReferences.
+ HeapObject* obj = HeapObject::cast(*slot);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+ StaticVisitor::MarkObjectWithoutPush(heap, obj);
+ } else {
+ StaticVisitor::VisitPointer(heap, slot);
+ }
+
+ StaticVisitor::VisitPointers(heap,
+ HeapObject::RawField(object, PropertyCell::kPointerFieldsBeginOffset),
+ HeapObject::RawField(object, PropertyCell::kPointerFieldsEndOffset));
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitCode(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
@@ -276,6 +396,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCode(
if (FLAG_cleanup_code_caches_at_gc) {
code->ClearTypeFeedbackCells(heap);
}
+ if (FLAG_age_code && !Serializer::enabled()) {
+ code->MakeOlder(heap->mark_compact_collector()->marking_parity());
+ }
code->CodeIterateBody<StaticVisitor>(heap);
}
@@ -288,8 +411,23 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
if (shared->ic_age() != heap->global_ic_age()) {
shared->ResetForNewContext(heap->global_ic_age());
}
+ if (FLAG_cache_optimized_code &&
+ FLAG_flush_optimized_code_cache &&
+ !shared->optimized_code_map()->IsSmi()) {
+ // Always flush the optimized code map if requested by flag.
+ shared->ClearOptimizedCodeMap();
+ }
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
+ if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
+ // Add the shared function info holding an optimized code map to
+ // the code flusher for processing of code maps after marking.
+ collector->code_flusher()->AddOptimizedCodeMap(shared);
+ // Treat all references within the code map weakly by marking the
+ // code map itself but not pushing it onto the marking deque.
+ FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
+ StaticVisitor::MarkObjectWithoutPush(heap, code_map);
+ }
if (IsFlushable(heap, shared)) {
// This function's code looks flushable. But we have to postpone
// the decision until we see all functions that point to the same
@@ -302,12 +440,34 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
VisitSharedFunctionInfoWeakCode(heap, object);
return;
}
+ } else {
+ if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
+ // Flush optimized code map on major GCs without code flushing,
+ // needed because cached code doesn't contain breakpoints.
+ shared->ClearOptimizedCodeMap();
+ }
}
VisitSharedFunctionInfoStrongCode(heap, object);
}
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
+ int first_ptr_offset = constant_pool->OffsetOfElementAt(
+ constant_pool->first_ptr_index());
+ int last_ptr_offset = constant_pool->OffsetOfElementAt(
+ constant_pool->first_ptr_index() + constant_pool->count_of_ptr_entries());
+ StaticVisitor::VisitPointers(
+ heap,
+ HeapObject::RawField(object, first_ptr_offset),
+ HeapObject::RawField(object, last_ptr_offset));
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
@@ -325,7 +485,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(
// Visit shared function info immediately to avoid double checking
// of its flushability later. This is just an optimization because
// the shared function info would eventually be visited.
- SharedFunctionInfo* shared = function->unchecked_shared();
+ SharedFunctionInfo* shared = function->shared();
if (StaticVisitor::MarkObjectWithoutPush(heap, shared)) {
StaticVisitor::MarkObject(heap, shared->map());
VisitSharedFunctionInfoWeakCode(heap, shared);
@@ -357,6 +517,56 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+
+ STATIC_ASSERT(
+ JSArrayBuffer::kWeakFirstViewOffset ==
+ JSArrayBuffer::kWeakNextOffset + kPointerSize);
+ StaticVisitor::VisitPointers(
+ heap,
+ HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset));
+ StaticVisitor::VisitPointers(
+ heap,
+ HeapObject::RawField(object,
+ JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize),
+ HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSTypedArray(
+ Map* map, HeapObject* object) {
+ StaticVisitor::VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSTypedArray::kWeakNextOffset));
+ StaticVisitor::VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object,
+ JSTypedArray::kWeakNextOffset + kPointerSize),
+ HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView(
+ Map* map, HeapObject* object) {
+ StaticVisitor::VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSDataView::kWeakNextOffset));
+ StaticVisitor::VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object,
+ JSDataView::kWeakNextOffset + kPointerSize),
+ HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
Heap* heap, Map* map) {
// Make sure that the back pointer stored either in the map itself or
@@ -376,6 +586,34 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
ASSERT(transitions->IsMap() || transitions->IsUndefined());
}
+ // Since descriptor arrays are potentially shared, ensure that only the
+ // descriptors that belong to this map are marked. The first time a
+ // non-empty descriptor array is marked, its header is also visited. The slot
+ // holding the descriptor array will be implicitly recorded when the pointer
+ // fields of this map are visited.
+ DescriptorArray* descriptors = map->instance_descriptors();
+ if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) &&
+ descriptors->length() > 0) {
+ StaticVisitor::VisitPointers(heap,
+ descriptors->GetFirstElementAddress(),
+ descriptors->GetDescriptorEndSlot(0));
+ }
+ int start = 0;
+ int end = map->NumberOfOwnDescriptors();
+ if (start < end) {
+ StaticVisitor::VisitPointers(heap,
+ descriptors->GetDescriptorStartSlot(start),
+ descriptors->GetDescriptorEndSlot(end));
+ }
+
+ // Mark prototype dependent codes array but do not push it onto marking
+ // stack, this will make references from it weak. We will clean dead
+ // codes when we iterate over maps in ClearNonLiveTransitions.
+ Object** slot = HeapObject::RawField(map, Map::kDependentCodeOffset);
+ HeapObject* obj = HeapObject::cast(*slot);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+ StaticVisitor::MarkObjectWithoutPush(heap, obj);
+
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
@@ -443,28 +681,30 @@ inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
template<typename StaticVisitor>
bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
Heap* heap, JSFunction* function) {
- SharedFunctionInfo* shared_info = function->unchecked_shared();
+ SharedFunctionInfo* shared_info = function->shared();
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
MarkBit code_mark = Marking::MarkBitFrom(function->code());
if (code_mark.Get()) {
- if (!Marking::MarkBitFrom(shared_info).Get()) {
- shared_info->set_code_age(0);
- }
return false;
}
// The function must have a valid context and not be a builtin.
- if (!IsValidNonBuiltinContext(function->unchecked_context())) {
+ if (!IsValidNonBuiltinContext(function->context())) {
return false;
}
- // We do not flush code for optimized functions.
+ // We do not (yet) flush code for optimized functions.
if (function->code() != shared_info->code()) {
return false;
}
+ // Check age of optimized code.
+ if (FLAG_age_code && !function->code()->IsOld()) {
+ return false;
+ }
+
return IsFlushable(heap, shared_info);
}
@@ -501,21 +741,25 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
return false;
}
- // If this is a full script wrapped in a function we do no flush the code.
- if (shared_info->is_toplevel()) {
+ // We do not (yet?) flush code for generator functions, because we don't know
+ // if there are still live activations (generator objects) on the heap.
+ if (shared_info->is_generator()) {
return false;
}
- // TODO(mstarzinger): The following will soon be replaced by a new way of
- // aging code, that is based on an aging stub in the function prologue.
+ // If this is a full script wrapped in a function we do not flush the code.
+ if (shared_info->is_toplevel()) {
+ return false;
+ }
- // How many collections newly compiled code object will survive before being
- // flushed.
- static const int kCodeAgeThreshold = 5;
+ // If this is a function initialized with %SetCode then the one-to-one
+ // relation between SharedFunctionInfo and Code is broken.
+ if (shared_info->dont_flush()) {
+ return false;
+ }
- // Age this shared function info.
- if (shared_info->code_age() < kCodeAgeThreshold) {
- shared_info->set_code_age(shared_info->code_age() + 1);
+ // Check age of code. If code aging is disabled we never flush.
+ if (!FLAG_age_code || !shared_info->code()->IsOld()) {
return false;
}
@@ -606,22 +850,23 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
void Code::CodeIterateBody(ObjectVisitor* v) {
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+ RelocInfo::ModeMask(RelocInfo::CELL) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
// There are two places where we iterate code bodies: here and the
- // templated CodeIterateBody (below). They should be kept in sync.
+ // templated CodeIterateBody (below). They should be kept in sync.
IteratePointer(v, kRelocationInfoOffset);
IteratePointer(v, kHandlerTableOffset);
IteratePointer(v, kDeoptimizationDataOffset);
IteratePointer(v, kTypeFeedbackInfoOffset);
RelocIterator it(this, mode_mask);
+ Isolate* isolate = this->GetIsolate();
for (; !it.done(); it.next()) {
- it.rinfo()->Visit(v);
+ it.rinfo()->Visit(isolate, v);
}
}
@@ -630,14 +875,14 @@ template<typename StaticVisitor>
void Code::CodeIterateBody(Heap* heap) {
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+ RelocInfo::ModeMask(RelocInfo::CELL) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
- // There are two places where we iterate code bodies: here and the
- // non-templated CodeIterateBody (above). They should be kept in sync.
+ // There are two places where we iterate code bodies: here and the non-
+ // templated CodeIterateBody (above). They should be kept in sync.
StaticVisitor::VisitPointer(
heap,
reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc
index a2dc43e24..5ced2cf7a 100644
--- a/deps/v8/src/objects-visiting.cc
+++ b/deps/v8/src/objects-visiting.cc
@@ -45,8 +45,8 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
if (instance_type < FIRST_NONSTRING_TYPE) {
switch (instance_type & kStringRepresentationMask) {
case kSeqStringTag:
- if ((instance_type & kStringEncodingMask) == kAsciiStringTag) {
- return kVisitSeqAsciiString;
+ if ((instance_type & kStringEncodingMask) == kOneByteStringTag) {
+ return kVisitSeqOneByteString;
} else {
return kVisitSeqTwoByteString;
}
@@ -82,6 +82,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case FIXED_DOUBLE_ARRAY_TYPE:
return kVisitFixedDoubleArray;
+ case CONSTANT_POOL_ARRAY_TYPE:
+ return kVisitConstantPoolArray;
+
case ODDBALL_TYPE:
return kVisitOddball;
@@ -91,7 +94,10 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case CODE_TYPE:
return kVisitCode;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ case CELL_TYPE:
+ return kVisitCell;
+
+ case PROPERTY_CELL_TYPE:
return kVisitPropertyCell;
case JS_SET_TYPE:
@@ -107,6 +113,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_WEAK_MAP_TYPE:
return kVisitJSWeakMap;
+ case JS_WEAK_SET_TYPE:
+ return kVisitJSWeakSet;
+
case JS_REGEXP_TYPE:
return kVisitJSRegExp;
@@ -128,11 +137,24 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
kVisitDataObjectGeneric,
Foreign::kSize);
+ case SYMBOL_TYPE:
+ return kVisitSymbol;
+
case FILLER_TYPE:
return kVisitDataObjectGeneric;
+ case JS_ARRAY_BUFFER_TYPE:
+ return kVisitJSArrayBuffer;
+
+ case JS_TYPED_ARRAY_TYPE:
+ return kVisitJSTypedArray;
+
+ case JS_DATA_VIEW_TYPE:
+ return kVisitJSDataView;
+
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_TYPE:
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
@@ -166,6 +188,10 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case NAME##_TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
+ if (instance_type == ALLOCATION_SITE_TYPE) {
+ return kVisitAllocationSite;
+ }
+
return GetVisitorIdForSize(kVisitStruct,
kVisitStructGeneric,
instance_size);
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
index 26d1b121d..60e6f6747 100644
--- a/deps/v8/src/objects-visiting.h
+++ b/deps/v8/src/objects-visiting.h
@@ -47,14 +47,16 @@ namespace internal {
class StaticVisitorBase : public AllStatic {
public:
#define VISITOR_ID_LIST(V) \
- V(SeqAsciiString) \
+ V(SeqOneByteString) \
V(SeqTwoByteString) \
V(ShortcutCandidate) \
V(ByteArray) \
V(FreeSpace) \
V(FixedArray) \
V(FixedDoubleArray) \
+ V(ConstantPoolArray) \
V(NativeContext) \
+ V(AllocationSite) \
V(DataObject2) \
V(DataObject3) \
V(DataObject4) \
@@ -84,13 +86,19 @@ class StaticVisitorBase : public AllStatic {
V(StructGeneric) \
V(ConsString) \
V(SlicedString) \
+ V(Symbol) \
V(Oddball) \
V(Code) \
V(Map) \
+ V(Cell) \
V(PropertyCell) \
V(SharedFunctionInfo) \
V(JSFunction) \
V(JSWeakMap) \
+ V(JSWeakSet) \
+ V(JSArrayBuffer) \
+ V(JSTypedArray) \
+ V(JSDataView) \
V(JSRegExp)
// For data objects, JS objects and structs along with generic visitor which
@@ -134,7 +142,7 @@ class StaticVisitorBase : public AllStatic {
(base == kVisitJSObject));
ASSERT(IsAligned(object_size, kPointerSize));
ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
- ASSERT(object_size < Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
const VisitorId specialization = static_cast<VisitorId>(
base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
@@ -221,7 +229,7 @@ class BodyVisitorBase : public AllStatic {
template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
public:
- static inline ReturnType Visit(Map* map, HeapObject* object) {
+ INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
int object_size = BodyDescriptor::SizeOf(map, object);
BodyVisitorBase<StaticVisitor>::IteratePointers(
map->GetHeap(),
@@ -247,7 +255,7 @@ class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
public:
- static inline ReturnType Visit(Map* map, HeapObject* object) {
+ INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
BodyVisitorBase<StaticVisitor>::IteratePointers(
map->GetHeap(),
object,
@@ -279,16 +287,16 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
public:
static void Initialize();
- static inline int IterateBody(Map* map, HeapObject* obj) {
+ INLINE(static int IterateBody(Map* map, HeapObject* obj)) {
return table_.GetVisitor(map)(map, obj);
}
- static inline void VisitPointers(Heap* heap, Object** start, Object** end) {
+ INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
}
private:
- static inline int VisitJSFunction(Map* map, HeapObject* object) {
+ INLINE(static int VisitJSFunction(Map* map, HeapObject* object)) {
Heap* heap = map->GetHeap();
VisitPointers(heap,
HeapObject::RawField(object, JSFunction::kPropertiesOffset),
@@ -305,33 +313,37 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return JSFunction::kSize;
}
- static inline int VisitByteArray(Map* map, HeapObject* object) {
+ INLINE(static int VisitByteArray(Map* map, HeapObject* object)) {
return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
}
- static inline int VisitFixedDoubleArray(Map* map, HeapObject* object) {
+ INLINE(static int VisitFixedDoubleArray(Map* map, HeapObject* object)) {
int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
return FixedDoubleArray::SizeFor(length);
}
- static inline int VisitJSObject(Map* map, HeapObject* object) {
+ INLINE(static int VisitJSObject(Map* map, HeapObject* object)) {
return JSObjectVisitor::Visit(map, object);
}
- static inline int VisitSeqAsciiString(Map* map, HeapObject* object) {
- return SeqAsciiString::cast(object)->
- SeqAsciiStringSize(map->instance_type());
+ INLINE(static int VisitSeqOneByteString(Map* map, HeapObject* object)) {
+ return SeqOneByteString::cast(object)->
+ SeqOneByteStringSize(map->instance_type());
}
- static inline int VisitSeqTwoByteString(Map* map, HeapObject* object) {
+ INLINE(static int VisitSeqTwoByteString(Map* map, HeapObject* object)) {
return SeqTwoByteString::cast(object)->
SeqTwoByteStringSize(map->instance_type());
}
- static inline int VisitFreeSpace(Map* map, HeapObject* object) {
+ INLINE(static int VisitFreeSpace(Map* map, HeapObject* object)) {
return FreeSpace::cast(object)->Size();
}
+ INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
+ INLINE(static int VisitJSTypedArray(Map* map, HeapObject* object));
+ INLINE(static int VisitJSDataView(Map* map, HeapObject* object));
+
class DataObjectVisitor {
public:
template<int object_size>
@@ -339,7 +351,7 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return object_size;
}
- static inline int Visit(Map* map, HeapObject* object) {
+ INLINE(static int Visit(Map* map, HeapObject* object)) {
return map->instance_size();
}
};
@@ -382,20 +394,19 @@ class StaticMarkingVisitor : public StaticVisitorBase {
public:
static void Initialize();
- static inline void IterateBody(Map* map, HeapObject* obj) {
+ INLINE(static void IterateBody(Map* map, HeapObject* obj)) {
table_.GetVisitor(map)(map, obj);
}
- static inline void VisitCodeEntry(Heap* heap, Address entry_address);
- static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo);
- static inline void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo);
- static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo);
- static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo);
- static inline void VisitExternalReference(RelocInfo* rinfo) { }
- static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
-
- // TODO(mstarzinger): This should be made protected once refactoring is done.
- static inline void VisitNativeContext(Map* map, HeapObject* object);
+ INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
+ INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
+ INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
+ INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
+ INLINE(static void VisitDebugTarget(Heap* heap, RelocInfo* rinfo));
+ INLINE(static void VisitCodeTarget(Heap* heap, RelocInfo* rinfo));
+ INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo));
+ INLINE(static void VisitExternalReference(RelocInfo* rinfo)) { }
+ INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) { }
// TODO(mstarzinger): This should be made protected once refactoring is done.
// Mark non-optimize code for functions inlined into the given optimized
@@ -403,11 +414,16 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
protected:
- static inline void VisitMap(Map* map, HeapObject* object);
- static inline void VisitCode(Map* map, HeapObject* object);
- static inline void VisitSharedFunctionInfo(Map* map, HeapObject* object);
- static inline void VisitJSFunction(Map* map, HeapObject* object);
- static inline void VisitJSRegExp(Map* map, HeapObject* object);
+ INLINE(static void VisitMap(Map* map, HeapObject* object));
+ INLINE(static void VisitCode(Map* map, HeapObject* object));
+ INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
+ INLINE(static void VisitConstantPoolArray(Map* map, HeapObject* object));
+ INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
+ INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
+ INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
+ INLINE(static void VisitJSTypedArray(Map* map, HeapObject* object));
+ INLINE(static void VisitJSDataView(Map* map, HeapObject* object));
+ INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
// Mark pointers in a Map and its TransitionArray together, possibly
// treating transitions or back pointers weak.
@@ -415,8 +431,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static void MarkTransitionArray(Heap* heap, TransitionArray* transitions);
// Code flushing support.
- static inline bool IsFlushable(Heap* heap, JSFunction* function);
- static inline bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info);
+ INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
+ INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info));
// Helpers used by code flushing support that visit pointer fields and treat
// references to code objects either strongly or weakly.
@@ -431,11 +447,15 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static inline void VisitSpecialized(Map* map, HeapObject* object) {
}
- static inline void Visit(Map* map, HeapObject* object) {
+ INLINE(static void Visit(Map* map, HeapObject* object)) {
}
};
typedef FlexibleBodyVisitor<StaticVisitor,
+ FixedArray::BodyDescriptor,
+ void> FixedArrayVisitor;
+
+ typedef FlexibleBodyVisitor<StaticVisitor,
JSObject::BodyDescriptor,
void> JSObjectVisitor;
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 37f8361d8..22c01bf1a 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,10 +27,13 @@
#include "v8.h"
+#include "accessors.h"
+#include "allocation-site-scopes.h"
#include "api.h"
#include "arguments.h"
#include "bootstrapper.h"
#include "codegen.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "deoptimizer.h"
#include "date.h"
@@ -38,6 +41,8 @@
#include "execution.h"
#include "full-codegen.h"
#include "hydrogen.h"
+#include "isolate-inl.h"
+#include "log.h"
#include "objects-inl.h"
#include "objects-visiting.h"
#include "objects-visiting-inl.h"
@@ -46,7 +51,6 @@
#include "safepoint-table.h"
#include "string-stream.h"
#include "utils.h"
-#include "vm-state-inl.h"
#ifdef ENABLE_DISASSEMBLER
#include "disasm.h"
@@ -82,21 +86,21 @@ MaybeObject* Object::ToObject(Context* native_context) {
}
-MaybeObject* Object::ToObject() {
+MaybeObject* Object::ToObject(Isolate* isolate) {
if (IsJSReceiver()) {
return this;
} else if (IsNumber()) {
- Isolate* isolate = Isolate::Current();
Context* native_context = isolate->context()->native_context();
return CreateJSValue(native_context->number_function(), this);
} else if (IsBoolean()) {
- Isolate* isolate = HeapObject::cast(this)->GetIsolate();
Context* native_context = isolate->context()->native_context();
return CreateJSValue(native_context->boolean_function(), this);
} else if (IsString()) {
- Isolate* isolate = HeapObject::cast(this)->GetIsolate();
Context* native_context = isolate->context()->native_context();
return CreateJSValue(native_context->string_function(), this);
+ } else if (IsSymbol()) {
+ Context* native_context = isolate->context()->native_context();
+ return CreateJSValue(native_context->symbol_function(), this);
}
// Throw a type error.
@@ -104,45 +108,33 @@ MaybeObject* Object::ToObject() {
}
-Object* Object::ToBoolean() {
- if (IsTrue()) return this;
- if (IsFalse()) return this;
- if (IsSmi()) {
- return Isolate::Current()->heap()->ToBoolean(Smi::cast(this)->value() != 0);
- }
- HeapObject* heap_object = HeapObject::cast(this);
- if (heap_object->IsUndefined() || heap_object->IsNull()) {
- return heap_object->GetHeap()->false_value();
- }
- // Undetectable object is false
- if (heap_object->IsUndetectableObject()) {
- return heap_object->GetHeap()->false_value();
- }
- if (heap_object->IsString()) {
- return heap_object->GetHeap()->ToBoolean(
- String::cast(this)->length() != 0);
- }
- if (heap_object->IsHeapNumber()) {
- return HeapNumber::cast(this)->HeapNumberToBoolean();
- }
- return heap_object->GetHeap()->true_value();
+bool Object::BooleanValue() {
+ if (IsBoolean()) return IsTrue();
+ if (IsSmi()) return Smi::cast(this)->value() != 0;
+ if (IsUndefined() || IsNull()) return false;
+ if (IsUndetectableObject()) return false; // Undetectable object is false.
+ if (IsString()) return String::cast(this)->length() != 0;
+ if (IsHeapNumber()) return HeapNumber::cast(this)->HeapNumberBooleanValue();
+ return true;
}
-void Object::Lookup(String* name, LookupResult* result) {
+void Object::Lookup(Name* name, LookupResult* result) {
Object* holder = NULL;
if (IsJSReceiver()) {
holder = this;
} else {
- Context* native_context = Isolate::Current()->context()->native_context();
+ Context* native_context = result->isolate()->context()->native_context();
if (IsNumber()) {
holder = native_context->number_function()->instance_prototype();
} else if (IsString()) {
holder = native_context->string_function()->instance_prototype();
+ } else if (IsSymbol()) {
+ holder = native_context->symbol_function()->instance_prototype();
} else if (IsBoolean()) {
holder = native_context->boolean_function()->instance_prototype();
} else {
- Isolate::Current()->PushStackTraceAndDie(
+ result->isolate()->PushStackTraceAndDie(
0xDEAD0000, this, JSReceiver::cast(this)->map(), 0xDEAD0001);
}
}
@@ -151,8 +143,22 @@ void Object::Lookup(String* name, LookupResult* result) {
}
+Handle<Object> Object::GetPropertyWithReceiver(
+ Handle<Object> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
+ PropertyAttributes* attributes) {
+ LookupResult lookup(name->GetIsolate());
+ object->Lookup(*name, &lookup);
+ Handle<Object> result =
+ GetProperty(object, receiver, &lookup, name, attributes);
+ ASSERT(*attributes <= ABSENT);
+ return result;
+}
+
+
MaybeObject* Object::GetPropertyWithReceiver(Object* receiver,
- String* name,
+ Name* name,
PropertyAttributes* attributes) {
LookupResult result(name->GetIsolate());
Lookup(name, &result);
@@ -162,9 +168,193 @@ MaybeObject* Object::GetPropertyWithReceiver(Object* receiver,
}
-MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
- Object* structure,
- String* name) {
+bool Object::ToInt32(int32_t* value) {
+ if (IsSmi()) {
+ *value = Smi::cast(this)->value();
+ return true;
+ }
+ if (IsHeapNumber()) {
+ double num = HeapNumber::cast(this)->value();
+ if (FastI2D(FastD2I(num)) == num) {
+ *value = FastD2I(num);
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool Object::ToUint32(uint32_t* value) {
+ if (IsSmi()) {
+ int num = Smi::cast(this)->value();
+ if (num >= 0) {
+ *value = static_cast<uint32_t>(num);
+ return true;
+ }
+ }
+ if (IsHeapNumber()) {
+ double num = HeapNumber::cast(this)->value();
+ if (num >= 0 && FastUI2D(FastD2UI(num)) == num) {
+ *value = FastD2UI(num);
+ return true;
+ }
+ }
+ return false;
+}
+
+
+template<typename To>
+static inline To* CheckedCast(void *from) {
+ uintptr_t temp = reinterpret_cast<uintptr_t>(from);
+ ASSERT(temp % sizeof(To) == 0);
+ return reinterpret_cast<To*>(temp);
+}
+
+
+static MaybeObject* PerformCompare(const BitmaskCompareDescriptor& descriptor,
+ char* ptr,
+ Heap* heap) {
+ uint32_t bitmask = descriptor.bitmask;
+ uint32_t compare_value = descriptor.compare_value;
+ uint32_t value;
+ switch (descriptor.size) {
+ case 1:
+ value = static_cast<uint32_t>(*CheckedCast<uint8_t>(ptr));
+ compare_value &= 0xff;
+ bitmask &= 0xff;
+ break;
+ case 2:
+ value = static_cast<uint32_t>(*CheckedCast<uint16_t>(ptr));
+ compare_value &= 0xffff;
+ bitmask &= 0xffff;
+ break;
+ case 4:
+ value = *CheckedCast<uint32_t>(ptr);
+ break;
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ return heap->ToBoolean((bitmask & value) == (bitmask & compare_value));
+}
+
+
+static MaybeObject* PerformCompare(const PointerCompareDescriptor& descriptor,
+ char* ptr,
+ Heap* heap) {
+ uintptr_t compare_value =
+ reinterpret_cast<uintptr_t>(descriptor.compare_value);
+ uintptr_t value = *CheckedCast<uintptr_t>(ptr);
+ return heap->ToBoolean(compare_value == value);
+}
+
+
+static MaybeObject* GetPrimitiveValue(
+ const PrimitiveValueDescriptor& descriptor,
+ char* ptr,
+ Heap* heap) {
+ int32_t int32_value = 0;
+ switch (descriptor.data_type) {
+ case kDescriptorInt8Type:
+ int32_value = *CheckedCast<int8_t>(ptr);
+ break;
+ case kDescriptorUint8Type:
+ int32_value = *CheckedCast<uint8_t>(ptr);
+ break;
+ case kDescriptorInt16Type:
+ int32_value = *CheckedCast<int16_t>(ptr);
+ break;
+ case kDescriptorUint16Type:
+ int32_value = *CheckedCast<uint16_t>(ptr);
+ break;
+ case kDescriptorInt32Type:
+ int32_value = *CheckedCast<int32_t>(ptr);
+ break;
+ case kDescriptorUint32Type: {
+ uint32_t value = *CheckedCast<uint32_t>(ptr);
+ return heap->NumberFromUint32(value);
+ }
+ case kDescriptorBoolType: {
+ uint8_t byte = *CheckedCast<uint8_t>(ptr);
+ return heap->ToBoolean(byte & (0x1 << descriptor.bool_offset));
+ }
+ case kDescriptorFloatType: {
+ float value = *CheckedCast<float>(ptr);
+ return heap->NumberFromDouble(value);
+ }
+ case kDescriptorDoubleType: {
+ double value = *CheckedCast<double>(ptr);
+ return heap->NumberFromDouble(value);
+ }
+ }
+ return heap->NumberFromInt32(int32_value);
+}
+
+
+static MaybeObject* GetDeclaredAccessorProperty(Object* receiver,
+ DeclaredAccessorInfo* info,
+ Isolate* isolate) {
+ char* current = reinterpret_cast<char*>(receiver);
+ DeclaredAccessorDescriptorIterator iterator(info->descriptor());
+ while (true) {
+ const DeclaredAccessorDescriptorData* data = iterator.Next();
+ switch (data->type) {
+ case kDescriptorReturnObject: {
+ ASSERT(iterator.Complete());
+ current = *CheckedCast<char*>(current);
+ return *CheckedCast<Object*>(current);
+ }
+ case kDescriptorPointerDereference:
+ ASSERT(!iterator.Complete());
+ current = *reinterpret_cast<char**>(current);
+ break;
+ case kDescriptorPointerShift:
+ ASSERT(!iterator.Complete());
+ current += data->pointer_shift_descriptor.byte_offset;
+ break;
+ case kDescriptorObjectDereference: {
+ ASSERT(!iterator.Complete());
+ Object* object = CheckedCast<Object>(current);
+ int field = data->object_dereference_descriptor.internal_field;
+ Object* smi = JSObject::cast(object)->GetInternalField(field);
+ ASSERT(smi->IsSmi());
+ current = reinterpret_cast<char*>(smi);
+ break;
+ }
+ case kDescriptorBitmaskCompare:
+ ASSERT(iterator.Complete());
+ return PerformCompare(data->bitmask_compare_descriptor,
+ current,
+ isolate->heap());
+ case kDescriptorPointerCompare:
+ ASSERT(iterator.Complete());
+ return PerformCompare(data->pointer_compare_descriptor,
+ current,
+ isolate->heap());
+ case kDescriptorPrimitiveValue:
+ ASSERT(iterator.Complete());
+ return GetPrimitiveValue(data->primitive_value_descriptor,
+ current,
+ isolate->heap());
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+Handle<FixedArray> JSObject::EnsureWritableFastElements(
+ Handle<JSObject> object) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->EnsureWritableFastElements(),
+ FixedArray);
+}
+
+
+Handle<Object> JSObject::GetPropertyWithCallback(Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Object> structure,
+ Handle<Name> name) {
Isolate* isolate = name->GetIsolate();
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually foreign
@@ -172,71 +362,83 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
if (structure->IsForeign()) {
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
- Foreign::cast(structure)->foreign_address());
- MaybeObject* value = (callback->getter)(receiver, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return value;
+ Handle<Foreign>::cast(structure)->foreign_address());
+ CALL_HEAP_FUNCTION(isolate,
+ (callback->getter)(isolate, *receiver, callback->data),
+ Object);
}
// api style callbacks.
if (structure->IsAccessorInfo()) {
- AccessorInfo* data = AccessorInfo::cast(structure);
- if (!data->IsCompatibleReceiver(receiver)) {
- Handle<Object> name_handle(name);
- Handle<Object> receiver_handle(receiver);
- Handle<Object> args[2] = { name_handle, receiver_handle };
+ Handle<AccessorInfo> accessor_info = Handle<AccessorInfo>::cast(structure);
+ if (!accessor_info->IsCompatibleReceiver(*receiver)) {
+ Handle<Object> args[2] = { name, receiver };
Handle<Object> error =
isolate->factory()->NewTypeError("incompatible_method_receiver",
HandleVector(args,
ARRAY_SIZE(args)));
- return isolate->Throw(*error);
- }
- Object* fun_obj = data->getter();
- v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
- if (call_fun == NULL) return isolate->heap()->undefined_value();
+ isolate->Throw(*error);
+ return Handle<Object>::null();
+ }
+ // TODO(rossberg): Handling symbols in the API requires changing the API,
+ // so we do not support it for now.
+ if (name->IsSymbol()) return isolate->factory()->undefined_value();
+ if (structure->IsDeclaredAccessorInfo()) {
+ CALL_HEAP_FUNCTION(
+ isolate,
+ GetDeclaredAccessorProperty(*receiver,
+ DeclaredAccessorInfo::cast(*structure),
+ isolate),
+ Object);
+ }
+
+ Handle<ExecutableAccessorInfo> data =
+ Handle<ExecutableAccessorInfo>::cast(structure);
+ v8::AccessorGetterCallback call_fun =
+ v8::ToCData<v8::AccessorGetterCallback>(data->getter());
+ if (call_fun == NULL) return isolate->factory()->undefined_value();
+
HandleScope scope(isolate);
- JSObject* self = JSObject::cast(receiver);
- Handle<String> key(name);
- LOG(isolate, ApiNamedPropertyAccess("load", self, name));
- CustomArguments args(isolate, data->data(), self, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = call_fun(v8::Utils::ToLocal(key), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ Handle<JSObject> self = Handle<JSObject>::cast(receiver);
+ Handle<String> key = Handle<String>::cast(name);
+ LOG(isolate, ApiNamedPropertyAccess("load", *self, *name));
+ PropertyCallbackArguments args(isolate, data->data(), *self, *object);
+ v8::Handle<v8::Value> result =
+ args.Call(call_fun, v8::Utils::ToLocal(key));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.IsEmpty()) {
- return isolate->heap()->undefined_value();
+ return isolate->factory()->undefined_value();
}
- Object* return_value = *v8::Utils::OpenHandle(*result);
+ Handle<Object> return_value = v8::Utils::OpenHandle(*result);
return_value->VerifyApiCallResultType();
- return return_value;
+ return scope.CloseAndEscape(return_value);
}
// __defineGetter__ callback
- if (structure->IsAccessorPair()) {
- Object* getter = AccessorPair::cast(structure)->getter();
- if (getter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
- }
- // Getter is not a function.
- return isolate->heap()->undefined_value();
+ Handle<Object> getter(Handle<AccessorPair>::cast(structure)->getter(),
+ isolate);
+ if (getter->IsSpecFunction()) {
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ CALL_HEAP_FUNCTION(
+ isolate,
+ object->GetPropertyWithDefinedGetter(*receiver,
+ JSReceiver::cast(*getter)),
+ Object);
}
-
- UNREACHABLE();
- return NULL;
+ // Getter is not a function.
+ return isolate->factory()->undefined_value();
}
MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
- String* name_raw) {
+ Name* name_raw) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
- Handle<Object> receiver(receiver_raw);
- Handle<Object> name(name_raw);
+ Handle<Object> receiver(receiver_raw, isolate);
+ Handle<Object> name(name_raw, isolate);
+
+ // TODO(rossberg): adjust once there is a story for symbols vs proxies.
+ if (name->IsSymbol()) return isolate->heap()->undefined_value();
Handle<Object> args[] = { receiver, name };
Handle<Object> result = CallTrap(
@@ -247,11 +449,22 @@ MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
}
-Handle<Object> Object::GetElement(Handle<Object> object, uint32_t index) {
- Isolate* isolate = object->IsHeapObject()
- ? Handle<HeapObject>::cast(object)->GetIsolate()
- : Isolate::Current();
- CALL_HEAP_FUNCTION(isolate, object->GetElement(index), Object);
+Handle<Object> Object::GetProperty(Handle<Object> object,
+ Handle<Name> name) {
+ // TODO(rossberg): The index test should not be here but in the GetProperty
+ // method (or somewhere else entirely). Needs more global clean-up.
+ uint32_t index;
+ Isolate* isolate = name->GetIsolate();
+ if (name->AsArrayIndex(&index))
+ return GetElement(isolate, object, index);
+ CALL_HEAP_FUNCTION(isolate, object->GetProperty(*name), Object);
+}
+
+
+Handle<Object> Object::GetElement(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index) {
+ CALL_HEAP_FUNCTION(isolate, object->GetElement(isolate, index), Object);
}
@@ -264,32 +477,33 @@ MaybeObject* JSProxy::GetElementWithHandler(Object* receiver,
}
-MaybeObject* JSProxy::SetElementWithHandler(JSReceiver* receiver,
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) return maybe;
- return SetPropertyWithHandler(receiver, name, value, NONE, strict_mode);
+Handle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ return SetPropertyWithHandler(
+ proxy, receiver, name, value, NONE, strict_mode);
}
-bool JSProxy::HasElementWithHandler(uint32_t index) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) return maybe;
- return HasPropertyWithHandler(name);
+bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) {
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ return HasPropertyWithHandler(proxy, name);
}
MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
JSReceiver* getter) {
- HandleScope scope;
+ Isolate* isolate = getter->GetIsolate();
+ HandleScope scope(isolate);
Handle<JSReceiver> fun(getter);
- Handle<Object> self(receiver);
+ Handle<Object> self(receiver, isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug* debug = fun->GetHeap()->isolate()->debug();
+ Debug* debug = isolate->debug();
// Handle stepping into a getter if step into is active.
// TODO(rossberg): should this apply to getters that are function proxies?
if (debug->StepInActive() && fun->IsJSFunction()) {
@@ -299,8 +513,8 @@ MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
#endif
bool has_pending_exception;
- Handle<Object> result =
- Execution::Call(fun, self, 0, NULL, &has_pending_exception, true);
+ Handle<Object> result = Execution::Call(
+ isolate, fun, self, 0, NULL, &has_pending_exception, true);
// Check for pending exception and return the result.
if (has_pending_exception) return Failure::Exception();
return *result;
@@ -308,50 +522,51 @@ MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
// Only deal with CALLBACKS and INTERCEPTOR
-MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
- Object* receiver,
+Handle<Object> JSObject::GetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
LookupResult* result,
- String* name,
+ Handle<Name> name,
PropertyAttributes* attributes) {
+ Isolate* isolate = name->GetIsolate();
if (result->IsProperty()) {
switch (result->type()) {
case CALLBACKS: {
// Only allow API accessors.
- Object* obj = result->GetCallbackObject();
- if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
- if (info->all_can_read()) {
- *attributes = result->GetAttributes();
- return result->holder()->GetPropertyWithCallback(
- receiver, result->GetCallbackObject(), name);
- }
+ Handle<Object> callback_obj(result->GetCallbackObject(), isolate);
+ if (callback_obj->IsAccessorInfo()) {
+ if (!AccessorInfo::cast(*callback_obj)->all_can_read()) break;
+ *attributes = result->GetAttributes();
+ // Fall through to GetPropertyWithCallback.
+ } else if (callback_obj->IsAccessorPair()) {
+ if (!AccessorPair::cast(*callback_obj)->all_can_read()) break;
+ // Fall through to GetPropertyWithCallback.
+ } else {
+ break;
}
- break;
+ Handle<JSObject> holder(result->holder(), isolate);
+ return GetPropertyWithCallback(holder, receiver, callback_obj, name);
}
case NORMAL:
case FIELD:
- case CONSTANT_FUNCTION: {
+ case CONSTANT: {
// Search ALL_CAN_READ accessors in prototype chain.
- LookupResult r(GetIsolate());
- result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
+ LookupResult r(isolate);
+ result->holder()->LookupRealNamedPropertyInPrototypes(*name, &r);
if (r.IsProperty()) {
- return GetPropertyWithFailedAccessCheck(receiver,
- &r,
- name,
- attributes);
+ return GetPropertyWithFailedAccessCheck(
+ object, receiver, &r, name, attributes);
}
break;
}
case INTERCEPTOR: {
// If the object has an interceptor, try real named properties.
// No access check in GetPropertyAttributeWithInterceptor.
- LookupResult r(GetIsolate());
- result->holder()->LookupRealNamedProperty(name, &r);
+ LookupResult r(isolate);
+ result->holder()->LookupRealNamedProperty(*name, &r);
if (r.IsProperty()) {
- return GetPropertyWithFailedAccessCheck(receiver,
- &r,
- name,
- attributes);
+ return GetPropertyWithFailedAccessCheck(
+ object, receiver, &r, name, attributes);
}
break;
}
@@ -362,16 +577,16 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
// No accessible property found.
*attributes = ABSENT;
- Heap* heap = name->GetHeap();
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
- return heap->undefined_value();
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_GET);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
}
PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
Object* receiver,
LookupResult* result,
- String* name,
+ Name* name,
bool continue_search) {
if (result->IsProperty()) {
switch (result->type()) {
@@ -383,13 +598,18 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
if (info->all_can_read()) {
return result->GetAttributes();
}
+ } else if (obj->IsAccessorPair()) {
+ AccessorPair* pair = AccessorPair::cast(obj);
+ if (pair->all_can_read()) {
+ return result->GetAttributes();
+ }
}
break;
}
case NORMAL:
case FIELD:
- case CONSTANT_FUNCTION: {
+ case CONSTANT: {
if (!continue_search) break;
// Search ALL_CAN_READ accessors in prototype chain.
LookupResult r(GetIsolate());
@@ -435,65 +655,70 @@ Object* JSObject::GetNormalizedProperty(LookupResult* result) {
ASSERT(!HasFastProperties());
Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
if (IsGlobalObject()) {
- value = JSGlobalPropertyCell::cast(value)->value();
+ value = PropertyCell::cast(value)->value();
}
- ASSERT(!value->IsJSGlobalPropertyCell());
+ ASSERT(!value->IsPropertyCell() && !value->IsCell());
return value;
}
-Object* JSObject::SetNormalizedProperty(LookupResult* result, Object* value) {
- ASSERT(!HasFastProperties());
- if (IsGlobalObject()) {
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(
- property_dictionary()->ValueAt(result->GetDictionaryEntry()));
- cell->set_value(value);
+void JSObject::SetNormalizedProperty(Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Object> value) {
+ ASSERT(!object->HasFastProperties());
+ NameDictionary* property_dictionary = object->property_dictionary();
+ if (object->IsGlobalObject()) {
+ Handle<PropertyCell> cell(PropertyCell::cast(
+ property_dictionary->ValueAt(result->GetDictionaryEntry())));
+ PropertyCell::SetValueInferType(cell, value);
} else {
- property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
+ property_dictionary->ValueAtPut(result->GetDictionaryEntry(), *value);
}
- return value;
}
-Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetNormalizedProperty(*key, *value, details),
- Object);
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyDetails details) {
+ CALL_HEAP_FUNCTION(dict->GetIsolate(),
+ dict->Add(*name, *value, details),
+ NameDictionary);
}
-MaybeObject* JSObject::SetNormalizedProperty(String* name,
- Object* value,
- PropertyDetails details) {
- ASSERT(!HasFastProperties());
- int entry = property_dictionary()->FindEntry(name);
- if (entry == StringDictionary::kNotFound) {
- Object* store_value = value;
- if (IsGlobalObject()) {
- Heap* heap = name->GetHeap();
- MaybeObject* maybe_store_value =
- heap->AllocateJSGlobalPropertyCell(value);
- if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
- }
- Object* dict;
- { MaybeObject* maybe_dict =
- property_dictionary()->Add(name, store_value, details);
- if (!maybe_dict->ToObject(&dict)) return maybe_dict;
+void JSObject::SetNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyDetails details) {
+ ASSERT(!object->HasFastProperties());
+ Handle<NameDictionary> property_dictionary(object->property_dictionary());
+
+ if (!name->IsUniqueName()) {
+ name = object->GetIsolate()->factory()->InternalizedStringFromString(
+ Handle<String>::cast(name));
+ }
+
+ int entry = property_dictionary->FindEntry(*name);
+ if (entry == NameDictionary::kNotFound) {
+ Handle<Object> store_value = value;
+ if (object->IsGlobalObject()) {
+ store_value = object->GetIsolate()->factory()->NewPropertyCell(value);
}
- set_properties(StringDictionary::cast(dict));
- return value;
+
+ property_dictionary =
+ NameDictionaryAdd(property_dictionary, name, store_value, details);
+ object->set_properties(*property_dictionary);
+ return;
}
- PropertyDetails original_details = property_dictionary()->DetailsAt(entry);
+ PropertyDetails original_details = property_dictionary->DetailsAt(entry);
int enumeration_index;
// Preserve the enumeration index unless the property was deleted.
if (original_details.IsDeleted()) {
- enumeration_index = property_dictionary()->NextEnumerationIndex();
- property_dictionary()->SetNextEnumerationIndex(enumeration_index + 1);
+ enumeration_index = property_dictionary->NextEnumerationIndex();
+ property_dictionary->SetNextEnumerationIndex(enumeration_index + 1);
} else {
enumeration_index = original_details.dictionary_index();
ASSERT(enumeration_index > 0);
@@ -502,58 +727,61 @@ MaybeObject* JSObject::SetNormalizedProperty(String* name,
details = PropertyDetails(
details.attributes(), details.type(), enumeration_index);
- if (IsGlobalObject()) {
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(property_dictionary()->ValueAt(entry));
- cell->set_value(value);
+ if (object->IsGlobalObject()) {
+ Handle<PropertyCell> cell(
+ PropertyCell::cast(property_dictionary->ValueAt(entry)));
+ PropertyCell::SetValueInferType(cell, value);
// Please note we have to update the property details.
- property_dictionary()->DetailsAtPut(entry, details);
+ property_dictionary->DetailsAtPut(entry, details);
} else {
- property_dictionary()->SetEntry(entry, name, value, details);
+ property_dictionary->SetEntry(entry, *name, *value, details);
}
- return value;
}
-MaybeObject* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
- ASSERT(!HasFastProperties());
- StringDictionary* dictionary = property_dictionary();
- int entry = dictionary->FindEntry(name);
- if (entry != StringDictionary::kNotFound) {
+// TODO(mstarzinger): Temporary wrapper until target is handlified.
+Handle<NameDictionary> NameDictionaryShrink(Handle<NameDictionary> dict,
+ Handle<Name> name) {
+ CALL_HEAP_FUNCTION(dict->GetIsolate(), dict->Shrink(*name), NameDictionary);
+}
+
+
+Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ DeleteMode mode) {
+ ASSERT(!object->HasFastProperties());
+ Isolate* isolate = object->GetIsolate();
+ Handle<NameDictionary> dictionary(object->property_dictionary());
+ int entry = dictionary->FindEntry(*name);
+ if (entry != NameDictionary::kNotFound) {
// If we have a global object set the cell to the hole.
- if (IsGlobalObject()) {
+ if (object->IsGlobalObject()) {
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.IsDontDelete()) {
- if (mode != FORCE_DELETION) return GetHeap()->false_value();
+ if (mode != FORCE_DELETION) return isolate->factory()->false_value();
// When forced to delete global properties, we have to make a
// map change to invalidate any ICs that think they can load
// from the DontDelete cell without checking if it contains
// the hole value.
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
+ Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
ASSERT(new_map->is_dictionary_map());
- set_map(new_map);
+ object->set_map(*new_map);
}
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
- cell->set_value(cell->GetHeap()->the_hole_value());
+ Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
+ Handle<Object> value = isolate->factory()->the_hole_value();
+ PropertyCell::SetValueInferType(cell, value);
dictionary->DetailsAtPut(entry, details.AsDeleted());
} else {
- Object* deleted = dictionary->DeleteProperty(entry, mode);
- if (deleted == GetHeap()->true_value()) {
- FixedArray* new_properties = NULL;
- MaybeObject* maybe_properties = dictionary->Shrink(name);
- if (!maybe_properties->To(&new_properties)) {
- return maybe_properties;
- }
- set_properties(new_properties);
+ Handle<Object> deleted(dictionary->DeleteProperty(entry, mode), isolate);
+ if (*deleted == isolate->heap()->true_value()) {
+ Handle<NameDictionary> new_properties =
+ NameDictionaryShrink(dictionary, name);
+ object->set_properties(*new_properties);
}
return deleted;
}
}
- return GetHeap()->true_value();
+ return isolate->factory()->true_value();
}
@@ -575,11 +803,9 @@ bool JSObject::IsDirty() {
Handle<Object> Object::GetProperty(Handle<Object> object,
Handle<Object> receiver,
LookupResult* result,
- Handle<String> key,
+ Handle<Name> key,
PropertyAttributes* attributes) {
- Isolate* isolate = object->IsHeapObject()
- ? Handle<HeapObject>::cast(object)->GetIsolate()
- : Isolate::Current();
+ Isolate* isolate = result->isolate();
CALL_HEAP_FUNCTION(
isolate,
object->GetProperty(*receiver, result, *key, attributes),
@@ -587,14 +813,35 @@ Handle<Object> Object::GetProperty(Handle<Object> object,
}
+MaybeObject* Object::GetPropertyOrFail(Handle<Object> object,
+ Handle<Object> receiver,
+ LookupResult* result,
+ Handle<Name> key,
+ PropertyAttributes* attributes) {
+ Isolate* isolate = result->isolate();
+ CALL_HEAP_FUNCTION_PASS_EXCEPTION(
+ isolate,
+ object->GetProperty(*receiver, result, *key, attributes));
+}
+
+
+// TODO(yangguo): handlify this and get rid of.
MaybeObject* Object::GetProperty(Object* receiver,
LookupResult* result,
- String* name,
+ Name* name,
PropertyAttributes* attributes) {
+ Isolate* isolate = name->GetIsolate();
+ Heap* heap = isolate->heap();
+
+#ifdef DEBUG
+ // TODO(mstarzinger): Only because of the AssertNoContextChange, drop as soon
+ // as this method has been fully handlified.
+ HandleScope scope(isolate);
+#endif
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
- Heap* heap = name->GetHeap();
+ AssertNoContextChange ncc(isolate);
// Traverse the prototype chain from the current object (this) to
// the holder and check for access rights. This avoids traversing the
@@ -607,19 +854,26 @@ MaybeObject* Object::GetProperty(Object* receiver,
Object* last = result->IsProperty()
? result->holder()
: Object::cast(heap->null_value());
- ASSERT(this != this->GetPrototype());
- for (Object* current = this; true; current = current->GetPrototype()) {
+ ASSERT(this != this->GetPrototype(isolate));
+ for (Object* current = this;
+ true;
+ current = current->GetPrototype(isolate)) {
if (current->IsAccessCheckNeeded()) {
// Check if we're allowed to read from the current object. Note
// that even though we may not actually end up loading the named
// property from the current object, we still check that we have
// access to it.
JSObject* checked = JSObject::cast(current);
- if (!heap->isolate()->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
- return checked->GetPropertyWithFailedAccessCheck(receiver,
- result,
- name,
- attributes);
+ if (!isolate->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
+ HandleScope scope(isolate);
+ Handle<Object> value = JSObject::GetPropertyWithFailedAccessCheck(
+ handle(checked, isolate),
+ handle(receiver, isolate),
+ result,
+ handle(name, isolate),
+ attributes);
+ RETURN_IF_EMPTY_HANDLE(isolate, value);
+ return *value;
}
}
// Stop traversing the chain once we reach the last object in the
@@ -640,20 +894,38 @@ MaybeObject* Object::GetProperty(Object* receiver,
value = result->holder()->GetNormalizedProperty(result);
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
- case FIELD:
- value = result->holder()->FastPropertyAt(result->GetFieldIndex());
+ case FIELD: {
+ MaybeObject* maybe_result = result->holder()->FastPropertyAt(
+ result->representation(),
+ result->GetFieldIndex().field_index());
+ if (!maybe_result->To(&value)) return maybe_result;
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
- case CONSTANT_FUNCTION:
- return result->GetConstantFunction();
- case CALLBACKS:
- return result->holder()->GetPropertyWithCallback(
- receiver, result->GetCallbackObject(), name);
+ }
+ case CONSTANT:
+ return result->GetConstant();
+ case CALLBACKS: {
+ HandleScope scope(isolate);
+ Handle<Object> value = JSObject::GetPropertyWithCallback(
+ handle(result->holder(), isolate),
+ handle(receiver, isolate),
+ handle(result->GetCallbackObject(), isolate),
+ handle(name, isolate));
+ RETURN_IF_EMPTY_HANDLE(isolate, value);
+ return *value;
+ }
case HANDLER:
return result->proxy()->GetPropertyWithHandler(receiver, name);
- case INTERCEPTOR:
- return result->holder()->GetPropertyWithInterceptor(
- receiver, name, attributes);
+ case INTERCEPTOR: {
+ HandleScope scope(isolate);
+ Handle<Object> value = JSObject::GetPropertyWithInterceptor(
+ handle(result->holder(), isolate),
+ handle(receiver, isolate),
+ handle(name, isolate),
+ attributes);
+ RETURN_IF_EMPTY_HANDLE(isolate, value);
+ return *value;
+ }
case TRANSITION:
case NONEXISTENT:
UNREACHABLE();
@@ -664,24 +936,25 @@ MaybeObject* Object::GetProperty(Object* receiver,
}
-MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
- Heap* heap = IsSmi()
- ? Isolate::Current()->heap()
- : HeapObject::cast(this)->GetHeap();
+MaybeObject* Object::GetElementWithReceiver(Isolate* isolate,
+ Object* receiver,
+ uint32_t index) {
+ Heap* heap = isolate->heap();
Object* holder = this;
// Iterate up the prototype chain until an element is found or the null
// prototype is encountered.
for (holder = this;
holder != heap->null_value();
- holder = holder->GetPrototype()) {
+ holder = holder->GetPrototype(isolate)) {
if (!holder->IsJSObject()) {
- Isolate* isolate = heap->isolate();
Context* native_context = isolate->context()->native_context();
if (holder->IsNumber()) {
holder = native_context->number_function()->instance_prototype();
} else if (holder->IsString()) {
holder = native_context->string_function()->instance_prototype();
+ } else if (holder->IsSymbol()) {
+ holder = native_context->symbol_function()->instance_prototype();
} else if (holder->IsBoolean()) {
holder = native_context->boolean_function()->instance_prototype();
} else if (holder->IsJSProxy()) {
@@ -703,6 +976,7 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
Isolate* isolate = heap->isolate();
if (!isolate->MayIndexedAccess(js_object, index, v8::ACCESS_GET)) {
isolate->ReportFailedAccessCheck(js_object, v8::ACCESS_GET);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return heap->undefined_value();
}
}
@@ -722,10 +996,9 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
}
-Object* Object::GetPrototype() {
+Object* Object::GetPrototype(Isolate* isolate) {
if (IsSmi()) {
- Heap* heap = Isolate::Current()->heap();
- Context* context = heap->isolate()->context()->native_context();
+ Context* context = isolate->context()->native_context();
return context->number_function()->instance_prototype();
}
@@ -736,8 +1009,7 @@ Object* Object::GetPrototype() {
if (heap_object->IsJSReceiver()) {
return heap_object->map()->prototype();
}
- Heap* heap = heap_object->GetHeap();
- Context* context = heap->isolate()->context()->native_context();
+ Context* context = isolate->context()->native_context();
if (heap_object->IsHeapNumber()) {
return context->number_function()->instance_prototype();
@@ -745,23 +1017,26 @@ Object* Object::GetPrototype() {
if (heap_object->IsString()) {
return context->string_function()->instance_prototype();
}
+ if (heap_object->IsSymbol()) {
+ return context->symbol_function()->instance_prototype();
+ }
if (heap_object->IsBoolean()) {
return context->boolean_function()->instance_prototype();
} else {
- return heap->null_value();
+ return isolate->heap()->null_value();
}
}
MaybeObject* Object::GetHash(CreationFlag flag) {
- // The object is either a number, a string, an odd-ball,
+ // The object is either a number, a name, an odd-ball,
// a real JS object, or a Harmony proxy.
if (IsNumber()) {
uint32_t hash = ComputeLongHash(double_to_uint64(Number()));
return Smi::FromInt(hash & Smi::kMaxValue);
}
- if (IsString()) {
- uint32_t hash = String::cast(this)->Hash();
+ if (IsName()) {
+ uint32_t hash = Name::cast(this)->Hash();
return Smi::FromInt(hash);
}
if (IsOddball()) {
@@ -780,13 +1055,16 @@ MaybeObject* Object::GetHash(CreationFlag flag) {
bool Object::SameValue(Object* other) {
if (other == this) return true;
- // The object is either a number, a string, an odd-ball,
+ // The object is either a number, a name, an odd-ball,
// a real JS object, or a Harmony proxy.
if (IsNumber() && other->IsNumber()) {
double this_value = Number();
double other_value = other->Number();
- return (this_value == other_value) ||
- (isnan(this_value) && isnan(other_value));
+ bool equal = this_value == other_value;
+ // SameValue(NaN, NaN) is true.
+ if (!equal) return std::isnan(this_value) && std::isnan(other_value);
+ // SameValue(0.0, -0.0) is false.
+ return (this_value != 0) || ((1 / this_value) == (1 / other_value));
}
if (IsString() && other->IsString()) {
return String::cast(this)->Equals(String::cast(other));
@@ -864,7 +1142,7 @@ MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) {
// allowed. This is to avoid an assertion failure when allocating.
// Flattening strings is the only case where we always allow
// allocation because no GC is performed if the allocation fails.
- if (!HEAP->IsAllocationAllowed()) return this;
+ if (!AllowHeapAllocation::IsAllowed()) return this;
#endif
Heap* heap = GetHeap();
@@ -881,14 +1159,15 @@ MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) {
int len = length();
Object* object;
String* result;
- if (IsAsciiRepresentation()) {
- { MaybeObject* maybe_object = heap->AllocateRawAsciiString(len, tenure);
+ if (IsOneByteRepresentation()) {
+ { MaybeObject* maybe_object =
+ heap->AllocateRawOneByteString(len, tenure);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
result = String::cast(object);
String* first = cs->first();
int first_length = first->length();
- char* dest = SeqAsciiString::cast(result)->GetChars();
+ uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
WriteToFlat(first, dest, 0, first_length);
String* second = cs->second();
WriteToFlat(second,
@@ -925,7 +1204,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
ASSERT(!this->IsExternalString());
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
@@ -941,29 +1220,34 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (size < ExternalString::kShortSize) {
return false;
}
- bool is_ascii = this->IsAsciiRepresentation();
- bool is_symbol = this->IsSymbol();
+ bool is_ascii = this->IsOneByteRepresentation();
+ bool is_internalized = this->IsInternalizedString();
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
if (size >= ExternalString::kSize) {
this->set_map_no_write_barrier(
- is_symbol
- ? (is_ascii ? heap->external_symbol_with_ascii_data_map()
- : heap->external_symbol_map())
- : (is_ascii ? heap->external_string_with_ascii_data_map()
- : heap->external_string_map()));
+ is_internalized
+ ? (is_ascii
+ ? heap->external_internalized_string_with_one_byte_data_map()
+ : heap->external_internalized_string_map())
+ : (is_ascii
+ ? heap->external_string_with_one_byte_data_map()
+ : heap->external_string_map()));
} else {
this->set_map_no_write_barrier(
- is_symbol
- ? (is_ascii ? heap->short_external_symbol_with_ascii_data_map()
- : heap->short_external_symbol_map())
- : (is_ascii ? heap->short_external_string_with_ascii_data_map()
- : heap->short_external_string_map()));
+ is_internalized
+ ? (is_ascii
+ ? heap->
+ short_external_internalized_string_with_one_byte_data_map()
+ : heap->short_external_internalized_string_map())
+ : (is_ascii
+ ? heap->short_external_string_with_one_byte_data_map()
+ : heap->short_external_string_map()));
}
ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
self->set_resource(resource);
- if (is_symbol) self->Hash(); // Force regeneration of the hash value.
+ if (is_internalized) self->Hash(); // Force regeneration of the hash value.
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
@@ -977,10 +1261,15 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
+ if (this->IsTwoByteRepresentation()) {
+ ScopedVector<uint16_t> smart_chars(this->length());
+ String::WriteToFlat(this, smart_chars.start(), 0, this->length());
+ ASSERT(String::IsOneByte(smart_chars.start(), this->length()));
+ }
ScopedVector<char> smart_chars(this->length());
String::WriteToFlat(this, smart_chars.start(), 0, this->length());
ASSERT(memcmp(smart_chars.start(),
@@ -993,22 +1282,22 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
if (size < ExternalString::kShortSize) {
return false;
}
- bool is_symbol = this->IsSymbol();
+ bool is_internalized = this->IsInternalizedString();
// Morph the object to an external string by adjusting the map and
// reinitializing the fields. Use short version if space is limited.
if (size >= ExternalString::kSize) {
this->set_map_no_write_barrier(
- is_symbol ? heap->external_ascii_symbol_map()
- : heap->external_ascii_string_map());
+ is_internalized ? heap->external_ascii_internalized_string_map()
+ : heap->external_ascii_string_map());
} else {
this->set_map_no_write_barrier(
- is_symbol ? heap->short_external_ascii_symbol_map()
- : heap->short_external_ascii_string_map());
+ is_internalized ? heap->short_external_ascii_internalized_string_map()
+ : heap->short_external_ascii_string_map());
}
ExternalAsciiString* self = ExternalAsciiString::cast(this);
self->set_resource(resource);
- if (is_symbol) self->Hash(); // Force regeneration of the hash value.
+ if (is_internalized) self->Hash(); // Force regeneration of the hash value.
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
@@ -1033,7 +1322,8 @@ void String::StringShortPrint(StringStream* accumulator) {
return;
}
- StringInputBuffer buf(this);
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(this, &op);
bool truncated = false;
if (len > kMaxShortPrintLength) {
@@ -1042,17 +1332,17 @@ void String::StringShortPrint(StringStream* accumulator) {
}
bool ascii = true;
for (int i = 0; i < len; i++) {
- int c = buf.GetNext();
+ uint16_t c = stream.GetNext();
if (c < 32 || c >= 127) {
ascii = false;
}
}
- buf.Reset(this);
+ stream.Reset(this);
if (ascii) {
accumulator->Add("<String[%u]: ", length());
for (int i = 0; i < len; i++) {
- accumulator->Put(buf.GetNext());
+ accumulator->Put(static_cast<char>(stream.GetNext()));
}
accumulator->Put('>');
} else {
@@ -1060,7 +1350,7 @@ void String::StringShortPrint(StringStream* accumulator) {
// characters and that backslashes are therefore escaped.
accumulator->Add("<String[%u]\\: ", length());
for (int i = 0; i < len; i++) {
- int c = buf.GetNext();
+ uint16_t c = stream.GetNext();
if (c == '\n') {
accumulator->Add("\\n");
} else if (c == '\r') {
@@ -1070,7 +1360,7 @@ void String::StringShortPrint(StringStream* accumulator) {
} else if (c < 32 || c > 126) {
accumulator->Add("\\x%02x", c);
} else {
- accumulator->Put(c);
+ accumulator->Put(static_cast<char>(c));
}
}
if (truncated) {
@@ -1097,25 +1387,40 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<JS WeakMap>");
break;
}
+ case JS_WEAK_SET_TYPE: {
+ accumulator->Add("<JS WeakSet>");
+ break;
+ }
case JS_REGEXP_TYPE: {
accumulator->Add("<JS RegExp>");
break;
}
case JS_FUNCTION_TYPE: {
- Object* fun_name = JSFunction::cast(this)->shared()->name();
+ JSFunction* function = JSFunction::cast(this);
+ Object* fun_name = function->shared()->DebugName();
bool printed = false;
if (fun_name->IsString()) {
String* str = String::cast(fun_name);
if (str->length() > 0) {
accumulator->Add("<JS Function ");
accumulator->Put(str);
- accumulator->Put('>');
printed = true;
}
}
if (!printed) {
- accumulator->Add("<JS Function>");
+ accumulator->Add("<JS Function");
}
+ accumulator->Add(" (SharedFunctionInfo %p)",
+ reinterpret_cast<void*>(function->shared()));
+ accumulator->Put('>');
+ break;
+ }
+ case JS_GENERATOR_OBJECT_TYPE: {
+ accumulator->Add("<JS Generator>");
+ break;
+ }
+ case JS_MODULE_TYPE: {
+ accumulator->Add("<JS Module>");
break;
}
// All other JSObjects are rather similar to each other (JSObject,
@@ -1144,6 +1449,9 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
global_object ? "Global Object: " : "",
vowel ? "n" : "");
accumulator->Put(str);
+ accumulator->Add(" with %smap %p",
+ map_of_this->is_deprecated() ? "deprecated " : "",
+ map_of_this);
printed = true;
}
}
@@ -1173,7 +1481,7 @@ void JSObject::PrintElementsTransition(
PrintF(file, " -> ");
PrintElementsKind(file, to_kind);
PrintF(file, "] in ");
- JavaScriptFrame::PrintTop(file, false, true);
+ JavaScriptFrame::PrintTop(GetIsolate(), file, false, true);
PrintF(file, " for ");
ShortPrint(file);
PrintF(file, " from ");
@@ -1185,6 +1493,66 @@ void JSObject::PrintElementsTransition(
}
+void Map::PrintGeneralization(FILE* file,
+ const char* reason,
+ int modify_index,
+ int split,
+ int descriptors,
+ bool constant_to_field,
+ Representation old_representation,
+ Representation new_representation) {
+ PrintF(file, "[generalizing ");
+ constructor_name()->PrintOn(file);
+ PrintF(file, "] ");
+ String::cast(instance_descriptors()->GetKey(modify_index))->PrintOn(file);
+ if (constant_to_field) {
+ PrintF(file, ":c->f");
+ } else {
+ PrintF(file, ":%s->%s",
+ old_representation.Mnemonic(),
+ new_representation.Mnemonic());
+ }
+ PrintF(file, " (");
+ if (strlen(reason) > 0) {
+ PrintF(file, "%s", reason);
+ } else {
+ PrintF(file, "+%i maps", descriptors - split);
+ }
+ PrintF(file, ") [");
+ JavaScriptFrame::PrintTop(GetIsolate(), file, false, true);
+ PrintF(file, "]\n");
+}
+
+
+void JSObject::PrintInstanceMigration(FILE* file,
+ Map* original_map,
+ Map* new_map) {
+ PrintF(file, "[migrating ");
+ map()->constructor_name()->PrintOn(file);
+ PrintF(file, "] ");
+ DescriptorArray* o = original_map->instance_descriptors();
+ DescriptorArray* n = new_map->instance_descriptors();
+ for (int i = 0; i < original_map->NumberOfOwnDescriptors(); i++) {
+ Representation o_r = o->GetDetails(i).representation();
+ Representation n_r = n->GetDetails(i).representation();
+ if (!o_r.Equals(n_r)) {
+ String::cast(o->GetKey(i))->PrintOn(file);
+ PrintF(file, ":%s->%s ", o_r.Mnemonic(), n_r.Mnemonic());
+ } else if (o->GetDetails(i).type() == CONSTANT &&
+ n->GetDetails(i).type() == FIELD) {
+ Name* name = o->GetKey(i);
+ if (name->IsString()) {
+ String::cast(name)->PrintOn(file);
+ } else {
+ PrintF(file, "???");
+ }
+ PrintF(file, " ");
+ }
+ }
+ PrintF(file, "\n");
+}
+
+
void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
Heap* heap = GetHeap();
if (!heap->Contains(this)) {
@@ -1259,9 +1627,17 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<ExternalDoubleArray[%u]>",
ExternalDoubleArray::cast(this)->length());
break;
- case SHARED_FUNCTION_INFO_TYPE:
- accumulator->Add("<SharedFunctionInfo>");
+ case SHARED_FUNCTION_INFO_TYPE: {
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(this);
+ SmartArrayPointer<char> debug_name =
+ shared->DebugName()->ToCString();
+ if (debug_name[0] != 0) {
+ accumulator->Add("<SharedFunctionInfo %s>", *debug_name);
+ } else {
+ accumulator->Add("<SharedFunctionInfo>");
+ }
break;
+ }
case JS_MESSAGE_OBJECT_TYPE:
accumulator->Add("<JSMessageObject>");
break;
@@ -1291,6 +1667,16 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<Odd Oddball>");
break;
}
+ case SYMBOL_TYPE: {
+ Symbol* symbol = Symbol::cast(this);
+ accumulator->Add("<Symbol: %d", symbol->Hash());
+ if (!symbol->name()->IsUndefined()) {
+ accumulator->Add(" ");
+ String::cast(symbol->name())->StringShortPrint(accumulator);
+ }
+ accumulator->Add(">");
+ break;
+ }
case HEAP_NUMBER_TYPE:
accumulator->Add("<Number: ");
HeapNumber::cast(this)->HeapNumberPrint(accumulator);
@@ -1305,9 +1691,13 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
case FOREIGN_TYPE:
accumulator->Add("<Foreign>");
break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ case CELL_TYPE:
accumulator->Add("Cell for ");
- JSGlobalPropertyCell::cast(this)->value()->ShortPrint(accumulator);
+ Cell::cast(this)->value()->ShortPrint(accumulator);
+ break;
+ case PROPERTY_CELL_TYPE:
+ accumulator->Add("PropertyCell for ");
+ PropertyCell::cast(this)->value()->ShortPrint(accumulator);
break;
default:
accumulator->Add("<Other heap object (%d)>", map()->instance_type());
@@ -1340,7 +1730,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
SlicedString::BodyDescriptor::IterateBody(this, v);
break;
case kExternalStringTag:
- if ((type & kStringEncodingMask) == kAsciiStringTag) {
+ if ((type & kStringEncodingMask) == kOneByteStringTag) {
reinterpret_cast<ExternalAsciiString*>(this)->
ExternalAsciiStringIterateBody(v);
} else {
@@ -1356,17 +1746,25 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case FIXED_ARRAY_TYPE:
FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
break;
+ case CONSTANT_POOL_ARRAY_TYPE:
+ reinterpret_cast<ConstantPoolArray*>(this)->ConstantPoolIterateBody(v);
+ break;
case FIXED_DOUBLE_ARRAY_TYPE:
break;
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_TYPE:
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
case JS_ARRAY_TYPE:
+ case JS_ARRAY_BUFFER_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
case JS_MAP_TYPE:
case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
case JS_REGEXP_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
@@ -1396,8 +1794,14 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case CODE_TYPE:
reinterpret_cast<Code*>(this)->CodeIterateBody(v);
break;
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- JSGlobalPropertyCell::BodyDescriptor::IterateBody(this, v);
+ case CELL_TYPE:
+ Cell::BodyDescriptor::IterateBody(this, v);
+ break;
+ case PROPERTY_CELL_TYPE:
+ PropertyCell::BodyDescriptor::IterateBody(this, v);
+ break;
+ case SYMBOL_TYPE:
+ Symbol::BodyDescriptor::IterateBody(this, v);
break;
case HEAP_NUMBER_TYPE:
case FILLER_TYPE:
@@ -1422,7 +1826,11 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case NAME##_TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
- StructBodyDescriptor::IterateBody(this, object_size, v);
+ if (type == ALLOCATION_SITE_TYPE) {
+ AllocationSite::BodyDescriptor::IterateBody(this, v);
+ } else {
+ StructBodyDescriptor::IterateBody(this, object_size, v);
+ }
break;
default:
PrintF("Unknown type: %d\n", type);
@@ -1431,7 +1839,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
}
-Object* HeapNumber::HeapNumberToBoolean() {
+bool HeapNumber::HeapNumberBooleanValue() {
// NaN, +0, and -0 should return the false object
#if __BYTE_ORDER == __LITTLE_ENDIAN
union IeeeDoubleLittleEndianArchType u;
@@ -1441,15 +1849,13 @@ Object* HeapNumber::HeapNumberToBoolean() {
u.d = value();
if (u.bits.exp == 2047) {
// Detect NaN for IEEE double precision floating point.
- if ((u.bits.man_low | u.bits.man_high) != 0)
- return GetHeap()->false_value();
+ if ((u.bits.man_low | u.bits.man_high) != 0) return false;
}
if (u.bits.exp == 0) {
// Detect +0, and -0 for IEEE double precision floating point.
- if ((u.bits.man_low | u.bits.man_high) == 0)
- return GetHeap()->false_value();
+ if ((u.bits.man_low | u.bits.man_high) == 0) return false;
}
- return GetHeap()->true_value();
+ return true;
}
@@ -1473,277 +1879,352 @@ void HeapNumber::HeapNumberPrint(StringStream* accumulator) {
String* JSReceiver::class_name() {
if (IsJSFunction() && IsJSFunctionProxy()) {
- return GetHeap()->function_class_symbol();
+ return GetHeap()->function_class_string();
}
if (map()->constructor()->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(map()->constructor());
return String::cast(constructor->shared()->instance_class_name());
}
// If the constructor is not present, return "Object".
- return GetHeap()->Object_symbol();
+ return GetHeap()->Object_string();
}
-String* JSReceiver::constructor_name() {
- if (map()->constructor()->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(map()->constructor());
+String* Map::constructor_name() {
+ if (constructor()->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(this->constructor());
String* name = String::cast(constructor->shared()->name());
if (name->length() > 0) return name;
String* inferred_name = constructor->shared()->inferred_name();
if (inferred_name->length() > 0) return inferred_name;
- Object* proto = GetPrototype();
+ Object* proto = prototype();
if (proto->IsJSObject()) return JSObject::cast(proto)->constructor_name();
}
// TODO(rossberg): what about proxies?
// If the constructor is not present, return "Object".
- return GetHeap()->Object_symbol();
+ return GetHeap()->Object_string();
}
-MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
- String* name,
- Object* value,
- int field_index) {
- if (map()->unused_property_fields() == 0) {
+String* JSReceiver::constructor_name() {
+ return map()->constructor_name();
+}
+
+
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<Object> NewStorageFor(Isolate* isolate,
+ Handle<Object> object,
+ Representation representation) {
+ Heap* heap = isolate->heap();
+ CALL_HEAP_FUNCTION(isolate,
+ object->AllocateNewStorageFor(heap, representation),
+ Object);
+}
+
+
+void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
+ Handle<Map> new_map,
+ Handle<Name> name,
+ Handle<Object> value,
+ int field_index,
+ Representation representation) {
+ Isolate* isolate = object->GetIsolate();
+
+ // This method is used to transition to a field. If we are transitioning to a
+ // double field, allocate new storage.
+ Handle<Object> storage = NewStorageFor(isolate, value, representation);
+
+ if (object->map()->unused_property_fields() == 0) {
int new_unused = new_map->unused_property_fields();
- FixedArray* values;
- { MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + new_unused + 1);
- if (!maybe_values->To(&values)) return maybe_values;
- }
- set_properties(values);
+ Handle<FixedArray> properties(object->properties());
+ Handle<FixedArray> values = isolate->factory()->CopySizeFixedArray(
+ properties, properties->length() + new_unused + 1);
+ object->set_properties(*values);
}
- set_map(new_map);
- return FastPropertyAtPut(field_index, value);
+
+ object->set_map(*new_map);
+ object->FastPropertyAtPut(field_index, *storage);
}
-static bool IsIdentifier(UnicodeCache* cache,
- unibrow::CharacterStream* buffer) {
- // Checks whether the buffer contains an identifier (no escape).
- if (!buffer->has_more()) return false;
- if (!cache->IsIdentifierStart(buffer->GetNext())) {
- return false;
- }
- while (buffer->has_more()) {
- if (!cache->IsIdentifierPart(buffer->GetNext())) {
- return false;
- }
+static MaybeObject* CopyAddFieldDescriptor(Map* map,
+ Name* name,
+ int index,
+ PropertyAttributes attributes,
+ Representation representation,
+ TransitionFlag flag) {
+ Map* new_map;
+ FieldDescriptor new_field_desc(name, index, attributes, representation);
+ MaybeObject* maybe_map = map->CopyAddDescriptor(&new_field_desc, flag);
+ if (!maybe_map->To(&new_map)) return maybe_map;
+ int unused_property_fields = map->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += JSObject::kFieldsAdded;
}
- return true;
+ new_map->set_unused_property_fields(unused_property_fields);
+ return new_map;
}
-MaybeObject* JSObject::AddFastProperty(String* name,
- Object* value,
- PropertyAttributes attributes,
- StoreFromKeyed store_mode) {
- ASSERT(!IsJSGlobalProxy());
+static Handle<Map> CopyAddFieldDescriptor(Handle<Map> map,
+ Handle<Name> name,
+ int index,
+ PropertyAttributes attributes,
+ Representation representation,
+ TransitionFlag flag) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ CopyAddFieldDescriptor(
+ *map, *name, index, attributes, representation, flag),
+ Map);
+}
+
+
+void JSObject::AddFastProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StoreFromKeyed store_mode,
+ ValueType value_type,
+ TransitionFlag flag) {
+ ASSERT(!object->IsJSGlobalProxy());
ASSERT(DescriptorArray::kNotFound ==
- map()->instance_descriptors()->Search(
- name, map()->NumberOfOwnDescriptors()));
+ object->map()->instance_descriptors()->Search(
+ *name, object->map()->NumberOfOwnDescriptors()));
- // Normalize the object if the name is an actual string (not the
- // hidden symbols) and is not a real identifier.
+ // Normalize the object if the name is an actual name (not the
+ // hidden strings) and is not a real identifier.
// Normalize the object if it will have too many fast properties.
- Isolate* isolate = GetHeap()->isolate();
- StringInputBuffer buffer(name);
- if ((!IsIdentifier(isolate->unicode_cache(), &buffer)
- && name != isolate->heap()->hidden_symbol()) ||
- (map()->unused_property_fields() == 0 &&
- TooManyFastProperties(properties()->length(), store_mode))) {
- Object* obj;
- MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-
- return AddSlowProperty(name, value, attributes);
+ Isolate* isolate = object->GetIsolate();
+ if (!name->IsCacheable(isolate) ||
+ object->TooManyFastProperties(store_mode)) {
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
+ AddSlowProperty(object, name, value, attributes);
+ return;
}
// Compute the new index for new field.
- int index = map()->NextFreePropertyIndex();
+ int index = object->map()->NextFreePropertyIndex();
// Allocate new instance descriptors with (name, index) added
- FieldDescriptor new_field(name, index, attributes, 0);
+ if (object->IsJSContextExtensionObject()) value_type = FORCE_TAGGED;
+ Representation representation = value->OptimalRepresentation(value_type);
+ Handle<Map> new_map = CopyAddFieldDescriptor(
+ handle(object->map()), name, index, attributes, representation, flag);
- ASSERT(index < map()->inobject_properties() ||
- (index - map()->inobject_properties()) < properties()->length() ||
- map()->unused_property_fields() == 0);
-
- FixedArray* values = NULL;
-
- if (map()->unused_property_fields() == 0) {
- // Make room for the new value
- MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_values->To(&values)) return maybe_values;
- }
+ AddFastPropertyUsingMap(object, new_map, name, value, index, representation);
+}
- // Only allow map transition if the object isn't the global object.
- TransitionFlag flag = isolate->empty_object_map() != map()
- ? INSERT_TRANSITION
- : OMIT_TRANSITION;
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+static MaybeObject* CopyAddConstantDescriptor(Map* map,
+ Name* name,
+ Object* value,
+ PropertyAttributes attributes,
+ TransitionFlag flag) {
+ ConstantDescriptor new_constant_desc(name, value, attributes);
+ return map->CopyAddDescriptor(&new_constant_desc, flag);
+}
- if (map()->unused_property_fields() == 0) {
- ASSERT(values != NULL);
- set_properties(values);
- new_map->set_unused_property_fields(kFieldsAdded - 1);
- } else {
- new_map->set_unused_property_fields(map()->unused_property_fields() - 1);
- }
- set_map(new_map);
- return FastPropertyAtPut(index, value);
+static Handle<Map> CopyAddConstantDescriptor(Handle<Map> map,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ TransitionFlag flag) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ CopyAddConstantDescriptor(
+ *map, *name, *value, attributes, flag),
+ Map);
}
-MaybeObject* JSObject::AddConstantFunctionProperty(
- String* name,
- JSFunction* function,
- PropertyAttributes attributes) {
- // Allocate new instance descriptors with (name, function) added
- ConstantFunctionDescriptor d(name, function, attributes, 0);
-
- Heap* heap = GetHeap();
+void JSObject::AddConstantProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> constant,
+ PropertyAttributes attributes,
+ TransitionFlag initial_flag) {
TransitionFlag flag =
- // Do not add transitions to the empty object map (map of "new Object()"),
- // nor to global objects.
- (map() == heap->isolate()->empty_object_map() || IsGlobalObject() ||
+ // Do not add transitions to global objects.
+ (object->IsGlobalObject() ||
// Don't add transitions to special properties with non-trivial
// attributes.
- // TODO(verwaest): Once we support attribute changes, these transitions
- // should be kept as well.
attributes != NONE)
? OMIT_TRANSITION
- : INSERT_TRANSITION;
+ : initial_flag;
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&d, flag);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ // Allocate new instance descriptors with (name, constant) added.
+ Handle<Map> new_map = CopyAddConstantDescriptor(
+ handle(object->map()), name, constant, attributes, flag);
- set_map(new_map);
- return function;
+ object->set_map(*new_map);
}
-// Add property in slow mode
-MaybeObject* JSObject::AddSlowProperty(String* name,
- Object* value,
- PropertyAttributes attributes) {
- ASSERT(!HasFastProperties());
- StringDictionary* dict = property_dictionary();
- Object* store_value = value;
- if (IsGlobalObject()) {
+void JSObject::AddSlowProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ ASSERT(!object->HasFastProperties());
+ Isolate* isolate = object->GetIsolate();
+ Handle<NameDictionary> dict(object->property_dictionary());
+ if (object->IsGlobalObject()) {
// In case name is an orphaned property reuse the cell.
- int entry = dict->FindEntry(name);
- if (entry != StringDictionary::kNotFound) {
- store_value = dict->ValueAt(entry);
- JSGlobalPropertyCell::cast(store_value)->set_value(value);
+ int entry = dict->FindEntry(*name);
+ if (entry != NameDictionary::kNotFound) {
+ Handle<PropertyCell> cell(PropertyCell::cast(dict->ValueAt(entry)));
+ PropertyCell::SetValueInferType(cell, value);
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = dict->NextEnumerationIndex();
PropertyDetails details = PropertyDetails(attributes, NORMAL, index);
dict->SetNextEnumerationIndex(index + 1);
- dict->SetEntry(entry, name, store_value, details);
- return value;
- }
- Heap* heap = GetHeap();
- { MaybeObject* maybe_store_value =
- heap->AllocateJSGlobalPropertyCell(value);
- if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
+ dict->SetEntry(entry, *name, *cell, details);
+ return;
}
- JSGlobalPropertyCell::cast(store_value)->set_value(value);
- }
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
- Object* result;
- { MaybeObject* maybe_result = dict->Add(name, store_value, details);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ Handle<PropertyCell> cell = isolate->factory()->NewPropertyCell(value);
+ PropertyCell::SetValueInferType(cell, value);
+ value = cell;
}
- if (dict != result) set_properties(StringDictionary::cast(result));
- return value;
+ PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
+ Handle<NameDictionary> result = NameDictionaryAdd(dict, name, value, details);
+ if (*dict != *result) object->set_properties(*result);
}
-MaybeObject* JSObject::AddProperty(String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode,
- ExtensibilityCheck extensibility_check) {
- ASSERT(!IsJSGlobalProxy());
- Map* map_of_this = map();
- Heap* heap = GetHeap();
+Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ JSReceiver::StoreFromKeyed store_mode,
+ ExtensibilityCheck extensibility_check,
+ ValueType value_type,
+ StoreMode mode,
+ TransitionFlag transition_flag) {
+ ASSERT(!object->IsJSGlobalProxy());
+ Isolate* isolate = object->GetIsolate();
+
+ if (!name->IsUniqueName()) {
+ name = isolate->factory()->InternalizedStringFromString(
+ Handle<String>::cast(name));
+ }
+
if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
- !map_of_this->is_extensible()) {
+ !object->map()->is_extensible()) {
if (strict_mode == kNonStrictMode) {
return value;
} else {
- Handle<Object> args[1] = {Handle<String>(name)};
- return heap->isolate()->Throw(
- *FACTORY->NewTypeError("object_not_extensible",
- HandleVector(args, 1)));
+ Handle<Object> args[1] = { name };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "object_not_extensible", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
- if (HasFastProperties()) {
+
+ if (object->HasFastProperties()) {
// Ensure the descriptor array does not get too big.
- if (map_of_this->NumberOfOwnDescriptors() <
+ if (object->map()->NumberOfOwnDescriptors() <
DescriptorArray::kMaxNumberOfDescriptors) {
+ // TODO(verwaest): Support other constants.
+ // if (mode == ALLOW_AS_CONSTANT &&
+ // !value->IsTheHole() &&
+ // !value->IsConsString()) {
if (value->IsJSFunction()) {
- return AddConstantFunctionProperty(name,
- JSFunction::cast(value),
- attributes);
+ AddConstantProperty(object, name, value, attributes, transition_flag);
} else {
- return AddFastProperty(name, value, attributes, store_mode);
+ AddFastProperty(object, name, value, attributes, store_mode,
+ value_type, transition_flag);
}
} else {
// Normalize the object to prevent very large instance descriptors.
// This eliminates unwanted N^2 allocation and lookup behavior.
- Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
+ AddSlowProperty(object, name, value, attributes);
}
+ } else {
+ AddSlowProperty(object, name, value, attributes);
+ }
+
+ if (FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string()) {
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ EnqueueChangeRecord(object, "new", name, old_value);
}
- return AddSlowProperty(name, value, attributes);
+
+ return value;
}
-MaybeObject* JSObject::SetPropertyPostInterceptor(
- String* name,
- Object* value,
+void JSObject::EnqueueChangeRecord(Handle<JSObject> object,
+ const char* type_str,
+ Handle<Name> name,
+ Handle<Object> old_value) {
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<String> type = isolate->factory()->InternalizeUtf8String(type_str);
+ if (object->IsJSGlobalObject()) {
+ object = handle(JSGlobalObject::cast(*object)->global_receiver(), isolate);
+ }
+ Handle<Object> args[] = { type, object, name, old_value };
+ bool threw;
+ Execution::Call(isolate,
+ Handle<JSFunction>(isolate->observers_notify_change()),
+ isolate->factory()->undefined_value(),
+ old_value->IsTheHole() ? 3 : 4, args,
+ &threw);
+ ASSERT(!threw);
+}
+
+
+void JSObject::DeliverChangeRecords(Isolate* isolate) {
+ ASSERT(isolate->observer_delivery_pending());
+ bool threw = false;
+ Execution::Call(
+ isolate,
+ isolate->observers_deliver_changes(),
+ isolate->factory()->undefined_value(),
+ 0,
+ NULL,
+ &threw);
+ ASSERT(!threw);
+ isolate->set_observer_delivery_pending(false);
+}
+
+
+Handle<Object> JSObject::SetPropertyPostInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- ExtensibilityCheck extensibility_check) {
+ StrictModeFlag strict_mode) {
// Check local property, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (!result.IsFound()) map()->LookupTransition(this, name, &result);
+ LookupResult result(object->GetIsolate());
+ object->LocalLookupRealNamedProperty(*name, &result);
+ if (!result.IsFound()) {
+ object->map()->LookupTransition(*object, *name, &result);
+ }
if (result.IsFound()) {
// An existing property or a map transition was found. Use set property to
// handle all these cases.
- return SetProperty(&result, name, value, attributes, strict_mode);
+ return SetPropertyForResult(object, &result, name, value, attributes,
+ strict_mode, MAY_BE_STORE_FROM_KEYED);
}
bool done = false;
- MaybeObject* result_object;
- result_object =
- SetPropertyViaPrototypes(name, value, attributes, strict_mode, &done);
+ Handle<Object> result_object = SetPropertyViaPrototypes(
+ object, name, value, attributes, strict_mode, &done);
if (done) return result_object;
// Add a new real property.
- return AddProperty(name, value, attributes, strict_mode,
- MAY_BE_STORE_FROM_KEYED, extensibility_check);
+ return AddProperty(object, name, value, attributes, strict_mode);
}
-MaybeObject* JSObject::ReplaceSlowProperty(String* name,
- Object* value,
- PropertyAttributes attributes) {
- StringDictionary* dictionary = property_dictionary();
- int old_index = dictionary->FindEntry(name);
+static void ReplaceSlowProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ NameDictionary* dictionary = object->property_dictionary();
+ int old_index = dictionary->FindEntry(*name);
int new_enumeration_index = 0; // 0 means "Use the next available index."
if (old_index != -1) {
// All calls to ReplaceSlowProperty have had all transitions removed.
@@ -1751,182 +2232,629 @@ MaybeObject* JSObject::ReplaceSlowProperty(String* name,
}
PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
- return SetNormalizedProperty(name, value, new_details);
+ JSObject::SetNormalizedProperty(object, name, value, new_details);
}
-MaybeObject* JSObject::ConvertTransitionToMapTransition(
- int transition_index,
- String* name,
- Object* new_value,
- PropertyAttributes attributes) {
- Map* old_map = map();
- Map* old_target = old_map->GetTransition(transition_index);
- Object* result;
+const char* Representation::Mnemonic() const {
+ switch (kind_) {
+ case kNone: return "v";
+ case kTagged: return "t";
+ case kSmi: return "s";
+ case kDouble: return "d";
+ case kInteger32: return "i";
+ case kHeapObject: return "h";
+ case kExternal: return "x";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
- MaybeObject* maybe_result =
- ConvertDescriptorToField(name, new_value, attributes);
- if (!maybe_result->To(&result)) return maybe_result;
- if (!HasFastProperties()) return result;
-
- // This method should only be used to convert existing transitions. Objects
- // with the map of "new Object()" cannot have transitions in the first place.
- Map* new_map = map();
- ASSERT(new_map != GetIsolate()->empty_object_map());
-
- // TODO(verwaest): From here on we lose existing map transitions, causing
- // invalid back pointers. This will change once we can store multiple
- // transitions with the same key.
-
- bool owned_descriptors = old_map->owns_descriptors();
- if (owned_descriptors ||
- old_target->instance_descriptors() == old_map->instance_descriptors()) {
- // Since the conversion above generated a new fast map with an additional
- // property which can be shared as well, install this descriptor pointer
- // along the entire chain of smaller maps.
- Map* map;
- DescriptorArray* new_descriptors = new_map->instance_descriptors();
- DescriptorArray* old_descriptors = old_map->instance_descriptors();
- for (Object* current = old_map;
- !current->IsUndefined();
- current = map->GetBackPointer()) {
- map = Map::cast(current);
- if (map->instance_descriptors() != old_descriptors) break;
- map->SetEnumLength(Map::kInvalidEnumCache);
- map->set_instance_descriptors(new_descriptors);
- }
- old_map->set_owns_descriptors(false);
- }
-
- old_map->SetTransition(transition_index, new_map);
- new_map->SetBackPointer(old_map);
+enum RightTrimMode { FROM_GC, FROM_MUTATOR };
+
+
+static void ZapEndOfFixedArray(Address new_end, int to_trim) {
+ // If we are doing a big trim in old space then we zap the space.
+ Object** zap = reinterpret_cast<Object**>(new_end);
+ zap++; // Header of filler must be at least one word so skip that.
+ for (int i = 1; i < to_trim; i++) {
+ *zap++ = Smi::FromInt(0);
+ }
+}
+
+
+template<RightTrimMode trim_mode>
+static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
+ ASSERT(elms->map() != heap->fixed_cow_array_map());
+ // For now this trick is only applied to fixed arrays in new and paged space.
+ ASSERT(!heap->lo_space()->Contains(elms));
+
+ const int len = elms->length();
+
+ ASSERT(to_trim < len);
+
+ Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
+
+ if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
+ ZapEndOfFixedArray(new_end, to_trim);
+ }
+
+ int size_delta = to_trim * kPointerSize;
+
+ // Technically in new space this write might be omitted (except for
+ // debug mode which iterates through the heap), but to play safer
+ // we still do it.
+ heap->CreateFillerObjectAt(new_end, size_delta);
+
+ elms->set_length(len - to_trim);
+
+ // Maintain marking consistency for IncrementalMarking.
+ if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
+ if (trim_mode == FROM_GC) {
+ MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
+ } else {
+ MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
+ }
+ }
+
+ // The array may not be moved during GC,
+ // and size has to be adjusted nevertheless.
+ HeapProfiler* profiler = heap->isolate()->heap_profiler();
+ if (profiler->is_tracking_allocations()) {
+ profiler->UpdateObjectSizeEvent(elms->address(), elms->Size());
+ }
+}
+
+
+bool Map::InstancesNeedRewriting(Map* target,
+ int target_number_of_fields,
+ int target_inobject,
+ int target_unused) {
+ // If fields were added (or removed), rewrite the instance.
+ int number_of_fields = NumberOfFields();
+ ASSERT(target_number_of_fields >= number_of_fields);
+ if (target_number_of_fields != number_of_fields) return true;
+
+ if (FLAG_track_double_fields) {
+ // If smi descriptors were replaced by double descriptors, rewrite.
+ DescriptorArray* old_desc = instance_descriptors();
+ DescriptorArray* new_desc = target->instance_descriptors();
+ int limit = NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ if (new_desc->GetDetails(i).representation().IsDouble() &&
+ !old_desc->GetDetails(i).representation().IsDouble()) {
+ return true;
+ }
+ }
+ }
+
+ // If no fields were added, and no inobject properties were removed, setting
+ // the map is sufficient.
+ if (target_inobject == inobject_properties()) return false;
+ // In-object slack tracking may have reduced the object size of the new map.
+ // In that case, succeed if all existing fields were inobject, and they still
+ // fit within the new inobject size.
+ ASSERT(target_inobject < inobject_properties());
+ if (target_number_of_fields <= target_inobject) {
+ ASSERT(target_number_of_fields + target_unused == target_inobject);
+ return false;
+ }
+ // Otherwise, properties will need to be moved to the backing store.
+ return true;
+}
+
+
+// To migrate an instance to a map:
+// - First check whether the instance needs to be rewritten. If not, simply
+// change the map.
+// - Otherwise, allocate a fixed array large enough to hold all fields, in
+// addition to unused space.
+// - Copy all existing properties in, in the following order: backing store
+// properties, unused fields, inobject properties.
+// - If all allocation succeeded, commit the state atomically:
+// * Copy inobject properties from the backing store back into the object.
+// * Trim the difference in instance size of the object. This also cleanly
+// frees inobject properties that moved to the backing store.
+// * If there are properties left in the backing store, trim of the space used
+// to temporarily store the inobject properties.
+// * If there are properties left in the backing store, install the backing
+// store.
+void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<Map> old_map(object->map());
+ int number_of_fields = new_map->NumberOfFields();
+ int inobject = new_map->inobject_properties();
+ int unused = new_map->unused_property_fields();
+
+ // Nothing to do if no functions were converted to fields and no smis were
+ // converted to doubles.
+ if (!old_map->InstancesNeedRewriting(
+ *new_map, number_of_fields, inobject, unused)) {
+ object->set_map(*new_map);
+ return;
+ }
+
+ int total_size = number_of_fields + unused;
+ int external = total_size - inobject;
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(total_size);
+
+ Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
+ Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors());
+ int descriptors = new_map->NumberOfOwnDescriptors();
+
+ for (int i = 0; i < descriptors; i++) {
+ PropertyDetails details = new_descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ PropertyDetails old_details = old_descriptors->GetDetails(i);
+ if (old_details.type() == CALLBACKS) {
+ ASSERT(details.representation().IsTagged());
+ continue;
+ }
+ ASSERT(old_details.type() == CONSTANT ||
+ old_details.type() == FIELD);
+ Object* raw_value = old_details.type() == CONSTANT
+ ? old_descriptors->GetValue(i)
+ : object->RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
+ Handle<Object> value(raw_value, isolate);
+ if (FLAG_track_double_fields &&
+ !old_details.representation().IsDouble() &&
+ details.representation().IsDouble()) {
+ if (old_details.representation().IsNone()) {
+ value = handle(Smi::FromInt(0), isolate);
+ }
+ value = NewStorageFor(isolate, value, details.representation());
+ }
+ ASSERT(!(FLAG_track_double_fields &&
+ details.representation().IsDouble() &&
+ value->IsSmi()));
+ int target_index = new_descriptors->GetFieldIndex(i) - inobject;
+ if (target_index < 0) target_index += total_size;
+ array->set(target_index, *value);
+ }
+
+ // From here on we cannot fail and we shouldn't GC anymore.
+ DisallowHeapAllocation no_allocation;
+
+ // Copy (real) inobject properties. If necessary, stop at number_of_fields to
+ // avoid overwriting |one_pointer_filler_map|.
+ int limit = Min(inobject, number_of_fields);
+ for (int i = 0; i < limit; i++) {
+ object->FastPropertyAtPut(i, array->get(external + i));
+ }
+
+ // Create filler object past the new instance size.
+ int new_instance_size = new_map->instance_size();
+ int instance_size_delta = old_map->instance_size() - new_instance_size;
+ ASSERT(instance_size_delta >= 0);
+ Address address = object->address() + new_instance_size;
+ isolate->heap()->CreateFillerObjectAt(address, instance_size_delta);
+
+ // If there are properties in the new backing store, trim it to the correct
+ // size and install the backing store into the object.
+ if (external > 0) {
+ RightTrimFixedArray<FROM_MUTATOR>(isolate->heap(), *array, inobject);
+ object->set_properties(*array);
+ }
+
+ object->set_map(*new_map);
+}
+
+
+Handle<TransitionArray> Map::AddTransition(Handle<Map> map,
+ Handle<Name> key,
+ Handle<Map> target,
+ SimpleTransitionFlag flag) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->AddTransition(*key, *target, flag),
+ TransitionArray);
+}
+
+
+void JSObject::GeneralizeFieldRepresentation(Handle<JSObject> object,
+ int modify_index,
+ Representation new_representation,
+ StoreMode store_mode) {
+ Handle<Map> new_map = Map::GeneralizeRepresentation(
+ handle(object->map()), modify_index, new_representation, store_mode);
+ if (object->map() == *new_map) return;
+ return MigrateToMap(object, new_map);
+}
+
+
+int Map::NumberOfFields() {
+ DescriptorArray* descriptors = instance_descriptors();
+ int result = 0;
+ for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
+ if (descriptors->GetDetails(i).type() == FIELD) result++;
+ }
return result;
}
-MaybeObject* JSObject::ConvertDescriptorToField(String* name,
- Object* new_value,
- PropertyAttributes attributes) {
- if (map()->unused_property_fields() == 0 &&
- TooManyFastProperties(properties()->length(), MAY_BE_STORE_FROM_KEYED)) {
- Object* obj;
- MaybeObject* maybe_obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- return ReplaceSlowProperty(name, new_value, attributes);
+Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
+ int modify_index,
+ StoreMode store_mode,
+ PropertyAttributes attributes,
+ const char* reason) {
+ Handle<Map> new_map = Copy(map);
+
+ DescriptorArray* descriptors = new_map->instance_descriptors();
+ descriptors->InitializeRepresentations(Representation::Tagged());
+
+ // Unless the instance is being migrated, ensure that modify_index is a field.
+ PropertyDetails details = descriptors->GetDetails(modify_index);
+ if (store_mode == FORCE_FIELD && details.type() != FIELD) {
+ FieldDescriptor d(descriptors->GetKey(modify_index),
+ new_map->NumberOfFields(),
+ attributes,
+ Representation::Tagged());
+ d.SetSortedKeyIndex(details.pointer());
+ descriptors->Set(modify_index, &d);
+ int unused_property_fields = new_map->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += JSObject::kFieldsAdded;
+ }
+ new_map->set_unused_property_fields(unused_property_fields);
}
- int index = map()->NextFreePropertyIndex();
- FieldDescriptor new_field(name, index, attributes, 0);
+ if (FLAG_trace_generalization) {
+ map->PrintGeneralization(stdout, reason, modify_index,
+ new_map->NumberOfOwnDescriptors(),
+ new_map->NumberOfOwnDescriptors(),
+ details.type() == CONSTANT && store_mode == FORCE_FIELD,
+ Representation::Tagged(), Representation::Tagged());
+ }
+ return new_map;
+}
- // Make a new map for the object.
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyInsertDescriptor(&new_field,
- OMIT_TRANSITION);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- // Make new properties array if necessary.
- FixedArray* new_properties = NULL;
- int new_unused_property_fields = map()->unused_property_fields() - 1;
- if (map()->unused_property_fields() == 0) {
- new_unused_property_fields = kFieldsAdded - 1;
- MaybeObject* maybe_new_properties =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_new_properties->To(&new_properties)) return maybe_new_properties;
+void Map::DeprecateTransitionTree() {
+ if (!FLAG_track_fields) return;
+ if (is_deprecated()) return;
+ if (HasTransitionArray()) {
+ TransitionArray* transitions = this->transitions();
+ for (int i = 0; i < transitions->number_of_transitions(); i++) {
+ transitions->GetTarget(i)->DeprecateTransitionTree();
+ }
}
+ deprecate();
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ GetIsolate(), DependentCode::kTransitionGroup);
+ NotifyLeafMapLayoutChange();
+}
+
- // Update pointers to commit changes.
- // Object points to the new map.
- new_map->set_unused_property_fields(new_unused_property_fields);
- set_map(new_map);
- if (new_properties != NULL) {
- set_properties(new_properties);
+// Invalidates a transition target at |key|, and installs |new_descriptors| over
+// the current instance_descriptors to ensure proper sharing of descriptor
+// arrays.
+void Map::DeprecateTarget(Name* key, DescriptorArray* new_descriptors) {
+ if (HasTransitionArray()) {
+ TransitionArray* transitions = this->transitions();
+ int transition = transitions->Search(key);
+ if (transition != TransitionArray::kNotFound) {
+ transitions->GetTarget(transition)->DeprecateTransitionTree();
+ }
}
- return FastPropertyAtPut(index, new_value);
+
+ // Don't overwrite the empty descriptor array.
+ if (NumberOfOwnDescriptors() == 0) return;
+
+ DescriptorArray* to_replace = instance_descriptors();
+ Map* current = this;
+ while (current->instance_descriptors() == to_replace) {
+ current->SetEnumLength(Map::kInvalidEnumCache);
+ current->set_instance_descriptors(new_descriptors);
+ Object* next = current->GetBackPointer();
+ if (next->IsUndefined()) break;
+ current = Map::cast(next);
+ }
+
+ set_owns_descriptors(false);
}
+Map* Map::FindRootMap() {
+ Map* result = this;
+ while (true) {
+ Object* back = result->GetBackPointer();
+ if (back->IsUndefined()) return result;
+ result = Map::cast(back);
+ }
+}
+
+
+// Returns NULL if the updated map is incompatible.
+Map* Map::FindUpdatedMap(int verbatim,
+ int length,
+ DescriptorArray* descriptors) {
+ // This can only be called on roots of transition trees.
+ ASSERT(GetBackPointer()->IsUndefined());
+
+ Map* current = this;
+
+ for (int i = verbatim; i < length; i++) {
+ if (!current->HasTransitionArray()) break;
+ Name* name = descriptors->GetKey(i);
+ TransitionArray* transitions = current->transitions();
+ int transition = transitions->Search(name);
+ if (transition == TransitionArray::kNotFound) break;
+ current = transitions->GetTarget(transition);
+ PropertyDetails details = descriptors->GetDetails(i);
+ PropertyDetails target_details =
+ current->instance_descriptors()->GetDetails(i);
+ if (details.attributes() != target_details.attributes()) return NULL;
+ if (details.type() == CALLBACKS) {
+ if (target_details.type() != CALLBACKS) return NULL;
+ if (descriptors->GetValue(i) !=
+ current->instance_descriptors()->GetValue(i)) {
+ return NULL;
+ }
+ }
+ }
+
+ return current;
+}
-MaybeObject* JSObject::SetPropertyWithInterceptor(
- String* name,
- Object* value,
+
+Map* Map::FindLastMatchMap(int verbatim,
+ int length,
+ DescriptorArray* descriptors) {
+ // This can only be called on roots of transition trees.
+ ASSERT(GetBackPointer()->IsUndefined());
+
+ Map* current = this;
+
+ for (int i = verbatim; i < length; i++) {
+ if (!current->HasTransitionArray()) break;
+ Name* name = descriptors->GetKey(i);
+ TransitionArray* transitions = current->transitions();
+ int transition = transitions->Search(name);
+ if (transition == TransitionArray::kNotFound) break;
+
+ Map* next = transitions->GetTarget(transition);
+ DescriptorArray* next_descriptors = next->instance_descriptors();
+
+ if (next_descriptors->GetValue(i) != descriptors->GetValue(i)) break;
+
+ PropertyDetails details = descriptors->GetDetails(i);
+ PropertyDetails next_details = next_descriptors->GetDetails(i);
+ if (details.type() != next_details.type()) break;
+ if (details.attributes() != next_details.attributes()) break;
+ if (!details.representation().Equals(next_details.representation())) break;
+
+ current = next;
+ }
+ return current;
+}
+
+
+// Generalize the representation of the descriptor at |modify_index|.
+// This method rewrites the transition tree to reflect the new change. To avoid
+// high degrees over polymorphism, and to stabilize quickly, on every rewrite
+// the new type is deduced by merging the current type with any potential new
+// (partial) version of the type in the transition tree.
+// To do this, on each rewrite:
+// - Search the root of the transition tree using FindRootMap.
+// - Find |updated|, the newest matching version of this map using
+// FindUpdatedMap. This uses the keys in the own map's descriptor array to
+// walk the transition tree.
+// - Merge/generalize the descriptor array of the current map and |updated|.
+// - Generalize the |modify_index| descriptor using |new_representation|.
+// - Walk the tree again starting from the root towards |updated|. Stop at
+// |split_map|, the first map who's descriptor array does not match the merged
+// descriptor array.
+// - If |updated| == |split_map|, |updated| is in the expected state. Return it.
+// - Otherwise, invalidate the outdated transition target from |updated|, and
+// replace its transition tree with a new branch for the updated descriptors.
+Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
+ int modify_index,
+ Representation new_representation,
+ StoreMode store_mode) {
+ Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
+ PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
+ Representation old_representation = old_details.representation();
+
+ // It's fine to transition from None to anything but double without any
+ // modification to the object, because the default uninitialized value for
+ // representation None can be overwritten by both smi and tagged values.
+ // Doubles, however, would require a box allocation.
+ if (old_representation.IsNone() &&
+ !new_representation.IsNone() &&
+ !new_representation.IsDouble()) {
+ old_descriptors->SetRepresentation(modify_index, new_representation);
+ return old_map;
+ }
+
+ int descriptors = old_map->NumberOfOwnDescriptors();
+ Handle<Map> root_map(old_map->FindRootMap());
+
+ // Check the state of the root map.
+ if (!old_map->EquivalentToForTransition(*root_map)) {
+ return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ old_details.attributes(), "not equivalent");
+ }
+
+ int verbatim = root_map->NumberOfOwnDescriptors();
+
+ if (store_mode != ALLOW_AS_CONSTANT && modify_index < verbatim) {
+ return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ old_details.attributes(), "root modification");
+ }
+
+ Map* raw_updated = root_map->FindUpdatedMap(
+ verbatim, descriptors, *old_descriptors);
+ if (raw_updated == NULL) {
+ return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ old_details.attributes(), "incompatible");
+ }
+
+ Handle<Map> updated(raw_updated);
+ Handle<DescriptorArray> updated_descriptors(updated->instance_descriptors());
+
+ int valid = updated->NumberOfOwnDescriptors();
+
+ // Directly change the map if the target map is more general. Ensure that the
+ // target type of the modify_index is a FIELD, unless we are migrating.
+ if (updated_descriptors->IsMoreGeneralThan(
+ verbatim, valid, descriptors, *old_descriptors) &&
+ (store_mode == ALLOW_AS_CONSTANT ||
+ updated_descriptors->GetDetails(modify_index).type() == FIELD)) {
+ Representation updated_representation =
+ updated_descriptors->GetDetails(modify_index).representation();
+ if (new_representation.fits_into(updated_representation)) return updated;
+ }
+
+ Handle<DescriptorArray> new_descriptors = DescriptorArray::Merge(
+ updated_descriptors, verbatim, valid, descriptors, modify_index,
+ store_mode, old_descriptors);
+ ASSERT(store_mode == ALLOW_AS_CONSTANT ||
+ new_descriptors->GetDetails(modify_index).type() == FIELD);
+
+ old_representation =
+ new_descriptors->GetDetails(modify_index).representation();
+ Representation updated_representation =
+ new_representation.generalize(old_representation);
+ if (!updated_representation.Equals(old_representation)) {
+ new_descriptors->SetRepresentation(modify_index, updated_representation);
+ }
+
+ Handle<Map> split_map(root_map->FindLastMatchMap(
+ verbatim, descriptors, *new_descriptors));
+
+ int split_descriptors = split_map->NumberOfOwnDescriptors();
+ // This is shadowed by |updated_descriptors| being more general than
+ // |old_descriptors|.
+ ASSERT(descriptors != split_descriptors);
+
+ int descriptor = split_descriptors;
+ split_map->DeprecateTarget(
+ old_descriptors->GetKey(descriptor), *new_descriptors);
+
+ if (FLAG_trace_generalization) {
+ old_map->PrintGeneralization(
+ stdout, "", modify_index, descriptor, descriptors,
+ old_descriptors->GetDetails(modify_index).type() == CONSTANT &&
+ store_mode == FORCE_FIELD,
+ old_representation, updated_representation);
+ }
+
+ // Add missing transitions.
+ Handle<Map> new_map = split_map;
+ for (; descriptor < descriptors; descriptor++) {
+ new_map = Map::CopyInstallDescriptors(new_map, descriptor, new_descriptors);
+ new_map->set_migration_target(true);
+ }
+
+ new_map->set_owns_descriptors(true);
+ return new_map;
+}
+
+
+// Generalize the representation of all FIELD descriptors.
+Handle<Map> Map::GeneralizeAllFieldRepresentations(
+ Handle<Map> map,
+ Representation new_representation) {
+ Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() == FIELD) {
+ map = GeneralizeRepresentation(map, i, new_representation, FORCE_FIELD);
+ }
+ }
+ return map;
+}
+
+
+Map* Map::CurrentMapForDeprecated() {
+ DisallowHeapAllocation no_allocation;
+ if (!is_deprecated()) return this;
+
+ DescriptorArray* old_descriptors = instance_descriptors();
+
+ int descriptors = NumberOfOwnDescriptors();
+ Map* root_map = FindRootMap();
+
+ // Check the state of the root map.
+ if (!EquivalentToForTransition(root_map)) return NULL;
+ int verbatim = root_map->NumberOfOwnDescriptors();
+
+ Map* updated = root_map->FindUpdatedMap(
+ verbatim, descriptors, old_descriptors);
+ if (updated == NULL) return NULL;
+
+ DescriptorArray* updated_descriptors = updated->instance_descriptors();
+ int valid = updated->NumberOfOwnDescriptors();
+ if (!updated_descriptors->IsMoreGeneralThan(
+ verbatim, valid, descriptors, old_descriptors)) {
+ return NULL;
+ }
+
+ return updated;
+}
+
+
+Handle<Object> JSObject::SetPropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSObject> this_handle(this);
- Handle<String> name_handle(name);
- Handle<Object> value_handle(value, isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
+ // TODO(rossberg): Support symbols in the API.
+ if (name->IsSymbol()) return value;
+ Isolate* isolate = object->GetIsolate();
+ Handle<String> name_string = Handle<String>::cast(name);
+ Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
if (!interceptor->setter()->IsUndefined()) {
- LOG(isolate, ApiNamedPropertyAccess("interceptor-named-set", this, name));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
- v8::NamedPropertySetter setter =
- v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- Handle<Object> value_unhole(value->IsTheHole() ?
- isolate->heap()->undefined_value() :
- value,
- isolate);
- result = setter(v8::Utils::ToLocal(name_handle),
- v8::Utils::ToLocal(value_unhole),
- info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) return *value_handle;
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-set", *object, *name));
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *object, *object);
+ v8::NamedPropertySetterCallback setter =
+ v8::ToCData<v8::NamedPropertySetterCallback>(interceptor->setter());
+ Handle<Object> value_unhole = value->IsTheHole()
+ ? Handle<Object>(isolate->factory()->undefined_value()) : value;
+ v8::Handle<v8::Value> result = args.Call(setter,
+ v8::Utils::ToLocal(name_string),
+ v8::Utils::ToLocal(value_unhole));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (!result.IsEmpty()) return value;
}
- MaybeObject* raw_result =
- this_handle->SetPropertyPostInterceptor(*name_handle,
- *value_handle,
- attributes,
- strict_mode,
- PERFORM_EXTENSIBILITY_CHECK);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
+ Handle<Object> result =
+ SetPropertyPostInterceptor(object, name, value, attributes, strict_mode);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return result;
}
Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
- Handle<String> key,
+ Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetProperty(*key, *value, attributes, strict_mode),
- Object);
-}
-
-
-MaybeObject* JSReceiver::SetProperty(String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode) {
- LookupResult result(GetIsolate());
- LocalLookup(name, &result);
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode) {
+ LookupResult result(object->GetIsolate());
+ object->LocalLookup(*name, &result, true);
if (!result.IsFound()) {
- map()->LookupTransition(JSObject::cast(this), name, &result);
+ object->map()->LookupTransition(JSObject::cast(*object), *name, &result);
}
- return SetProperty(&result, name, value, attributes, strict_mode, store_mode);
+ return SetProperty(object, &result, name, value, attributes, strict_mode,
+ store_mode);
}
-MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
- String* name,
- Object* value,
- JSObject* holder,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+Handle<Object> JSObject::SetPropertyWithCallback(Handle<JSObject> object,
+ Handle<Object> structure,
+ Handle<Name> name,
+ Handle<Object> value,
+ Handle<JSObject> holder,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = object->GetIsolate();
// We should never get here to initialize a const with the hole
// value since a const declaration would conflict with the setter.
ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value, isolate);
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually foreign
@@ -1934,88 +2862,97 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
if (structure->IsForeign()) {
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
- Foreign::cast(structure)->foreign_address());
- MaybeObject* obj = (callback->setter)(this, value, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (obj->IsFailure()) return obj;
- return *value_handle;
+ Handle<Foreign>::cast(structure)->foreign_address());
+ CALL_AND_RETRY_OR_DIE(isolate,
+ (callback->setter)(
+ isolate, *object, *value, callback->data),
+ break,
+ return Handle<Object>());
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
- if (structure->IsAccessorInfo()) {
+ if (structure->IsExecutableAccessorInfo()) {
// api style callbacks
- AccessorInfo* data = AccessorInfo::cast(structure);
- if (!data->IsCompatibleReceiver(this)) {
- Handle<Object> name_handle(name);
- Handle<Object> receiver_handle(this);
- Handle<Object> args[2] = { name_handle, receiver_handle };
+ ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(*structure);
+ if (!data->IsCompatibleReceiver(*object)) {
+ Handle<Object> args[2] = { name, object };
Handle<Object> error =
isolate->factory()->NewTypeError("incompatible_method_receiver",
HandleVector(args,
ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
+ // TODO(rossberg): Support symbols in the API.
+ if (name->IsSymbol()) return value;
Object* call_obj = data->setter();
- v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
+ v8::AccessorSetterCallback call_fun =
+ v8::ToCData<v8::AccessorSetterCallback>(call_obj);
if (call_fun == NULL) return value;
- Handle<String> key(name);
- LOG(isolate, ApiNamedPropertyAccess("store", this, name));
- CustomArguments args(isolate, data->data(), this, JSObject::cast(holder));
- v8::AccessorInfo info(args.end());
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- call_fun(v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle),
- info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
+ Handle<String> key = Handle<String>::cast(name);
+ LOG(isolate, ApiNamedPropertyAccess("store", *object, *name));
+ PropertyCallbackArguments args(
+ isolate, data->data(), *object, JSObject::cast(*holder));
+ args.Call(call_fun,
+ v8::Utils::ToLocal(key),
+ v8::Utils::ToLocal(value));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
if (structure->IsAccessorPair()) {
- Object* setter = AccessorPair::cast(structure)->setter();
+ Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
if (setter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(JSReceiver::cast(setter), value);
+ return SetPropertyWithDefinedSetter(
+ object, Handle<JSReceiver>::cast(setter), value);
} else {
if (strict_mode == kNonStrictMode) {
return value;
}
- Handle<String> key(name);
- Handle<Object> holder_handle(holder, isolate);
- Handle<Object> args[2] = { key, holder_handle };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
+ Handle<Object> args[2] = { name, holder };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("no_setter_in_callback",
+ HandleVector(args, 2));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
+ // TODO(dcarney): Handle correctly.
+ if (structure->IsDeclaredAccessorInfo()) {
+ return value;
+ }
+
UNREACHABLE();
- return NULL;
+ return Handle<Object>();
}
-MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter,
- Object* value) {
- Isolate* isolate = GetIsolate();
- Handle<Object> value_handle(value, isolate);
- Handle<JSReceiver> fun(setter, isolate);
- Handle<JSReceiver> self(this, isolate);
+Handle<Object> JSReceiver::SetPropertyWithDefinedSetter(
+ Handle<JSReceiver> object,
+ Handle<JSReceiver> setter,
+ Handle<Object> value) {
+ Isolate* isolate = object->GetIsolate();
+
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = isolate->debug();
// Handle stepping into a setter if step into is active.
// TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->StepInActive() && fun->IsJSFunction()) {
+ if (debug->StepInActive() && setter->IsJSFunction()) {
debug->HandleStepIn(
- Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
+ Handle<JSFunction>::cast(setter), Handle<Object>::null(), 0, false);
}
#endif
+
bool has_pending_exception;
- Handle<Object> argv[] = { value_handle };
- Execution::Call(fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
+ Handle<Object> argv[] = { value };
+ Execution::Call(
+ isolate, setter, object, ARRAY_SIZE(argv), argv, &has_pending_exception);
// Check for pending exception and return the result.
- if (has_pending_exception) return Failure::Exception();
- return *value_handle;
+ if (has_pending_exception) return Handle<Object>();
+ return value;
}
@@ -2027,16 +2964,18 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
Heap* heap = GetHeap();
for (Object* pt = GetPrototype();
pt != heap->null_value();
- pt = pt->GetPrototype()) {
+ pt = pt->GetPrototype(GetIsolate())) {
if (pt->IsJSProxy()) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) {
- *found = true; // Force abort
- return maybe;
- }
- return JSProxy::cast(pt)->SetPropertyViaPrototypesWithHandler(
- this, name, value, NONE, strict_mode, found);
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSProxy> proxy(JSProxy::cast(pt));
+ Handle<JSObject> self(this, isolate);
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ Handle<Object> value_handle(value, isolate);
+ Handle<Object> result = JSProxy::SetPropertyViaPrototypesWithHandler(
+ proxy, self, name, value_handle, NONE, strict_mode, found);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
if (!JSObject::cast(pt)->HasDictionaryElements()) {
continue;
@@ -2048,11 +2987,16 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
*found = true;
- return SetElementWithCallback(dictionary->ValueAt(entry),
- index,
- value,
- JSObject::cast(pt),
- strict_mode);
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSObject> self(this, isolate);
+ Handle<Object> structure(dictionary->ValueAt(entry), isolate);
+ Handle<Object> value_handle(value, isolate);
+ Handle<JSObject> holder(JSObject::cast(pt));
+ Handle<Object> result = SetElementWithCallback(
+ self, structure, index, value_handle, holder, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
}
}
@@ -2060,44 +3004,46 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
return heap->the_hole_value();
}
-MaybeObject* JSObject::SetPropertyViaPrototypes(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool* done) {
- Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
+
+Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ bool* done) {
+ Isolate* isolate = object->GetIsolate();
*done = false;
// We could not find a local property so let's check whether there is an
// accessor that wants to handle the property, or whether the property is
// read-only on the prototype chain.
LookupResult result(isolate);
- LookupRealNamedPropertyInPrototypes(name, &result);
+ object->LookupRealNamedPropertyInPrototypes(*name, &result);
if (result.IsFound()) {
switch (result.type()) {
case NORMAL:
case FIELD:
- case CONSTANT_FUNCTION:
+ case CONSTANT:
*done = result.IsReadOnly();
break;
case INTERCEPTOR: {
PropertyAttributes attr =
result.holder()->GetPropertyAttributeWithInterceptor(
- this, name, true);
+ *object, *name, true);
*done = !!(attr & READ_ONLY);
break;
}
case CALLBACKS: {
if (!FLAG_es5_readonly && result.IsReadOnly()) break;
*done = true;
- return SetPropertyWithCallback(result.GetCallbackObject(),
- name, value, result.holder(), strict_mode);
+ Handle<Object> callback_object(result.GetCallbackObject(), isolate);
+ return SetPropertyWithCallback(object, callback_object, name, value,
+ handle(result.holder()), strict_mode);
}
case HANDLER: {
- return result.proxy()->SetPropertyViaPrototypesWithHandler(
- this, name, value, attributes, strict_mode, done);
+ Handle<JSProxy> proxy(result.proxy());
+ return JSProxy::SetPropertyViaPrototypesWithHandler(
+ proxy, object, name, value, attributes, strict_mode, done);
}
case TRANSITION:
case NONEXISTENT:
@@ -2110,60 +3056,13 @@ MaybeObject* JSObject::SetPropertyViaPrototypes(
if (!FLAG_es5_readonly) *done = false;
if (*done) {
if (strict_mode == kNonStrictMode) return value;
- Handle<Object> args[] = { Handle<Object>(name), Handle<Object>(this)};
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
- }
- return heap->the_hole_value();
-}
-
-
-enum RightTrimMode { FROM_GC, FROM_MUTATOR };
-
-
-static void ZapEndOfFixedArray(Address new_end, int to_trim) {
- // If we are doing a big trim in old space then we zap the space.
- Object** zap = reinterpret_cast<Object**>(new_end);
- zap++; // Header of filler must be at least one word so skip that.
- for (int i = 1; i < to_trim; i++) {
- *zap++ = Smi::FromInt(0);
- }
-}
-
-
-template<RightTrimMode trim_mode>
-static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
- ASSERT(elms->map() != HEAP->fixed_cow_array_map());
- // For now this trick is only applied to fixed arrays in new and paged space.
- ASSERT(!HEAP->lo_space()->Contains(elms));
-
- const int len = elms->length();
-
- ASSERT(to_trim < len);
-
- Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
-
- if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
- ZapEndOfFixedArray(new_end, to_trim);
- }
-
- int size_delta = to_trim * kPointerSize;
-
- // Technically in new space this write might be omitted (except for
- // debug mode which iterates through the heap), but to play safer
- // we still do it.
- heap->CreateFillerObjectAt(new_end, size_delta);
-
- elms->set_length(len - to_trim);
-
- // Maintain marking consistency for IncrementalMarking.
- if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
- if (trim_mode == FROM_GC) {
- MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
- } else {
- MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
- }
+ Handle<Object> args[] = { name, object };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
+ return isolate->factory()->the_hole_value();
}
@@ -2184,46 +3083,101 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
}
-void Map::AppendCallbackDescriptors(Handle<Map> map,
- Handle<Object> descriptors) {
- Isolate* isolate = map->GetIsolate();
- Handle<DescriptorArray> array(map->instance_descriptors());
- NeanderArray callbacks(descriptors);
- int nof_callbacks = callbacks.length();
-
- ASSERT(array->NumberOfSlackDescriptors() >= nof_callbacks);
+template<class T>
+static int AppendUniqueCallbacks(NeanderArray* callbacks,
+ Handle<typename T::Array> array,
+ int valid_descriptors) {
+ int nof_callbacks = callbacks->length();
- // Ensure the keys are symbols before writing them into the instance
- // descriptor. Since it may cause a GC, it has to be done before we
+ Isolate* isolate = array->GetIsolate();
+ // Ensure the keys are unique names before writing them into the
+ // instance descriptor. Since it may cause a GC, it has to be done before we
// temporarily put the heap in an invalid state while appending descriptors.
for (int i = 0; i < nof_callbacks; ++i) {
- Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks.get(i)));
+ Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i)));
+ if (entry->name()->IsUniqueName()) continue;
Handle<String> key =
- isolate->factory()->SymbolFromString(
+ isolate->factory()->InternalizedStringFromString(
Handle<String>(String::cast(entry->name())));
entry->set_name(*key);
}
- int nof = map->NumberOfOwnDescriptors();
-
// Fill in new callback descriptors. Process the callbacks from
// back to front so that the last callback with a given name takes
// precedence over previously added callbacks with that name.
for (int i = nof_callbacks - 1; i >= 0; i--) {
- AccessorInfo* entry = AccessorInfo::cast(callbacks.get(i));
- String* key = String::cast(entry->name());
+ AccessorInfo* entry = AccessorInfo::cast(callbacks->get(i));
+ Name* key = Name::cast(entry->name());
// Check if a descriptor with this name already exists before writing.
- if (array->Search(key, nof) == DescriptorArray::kNotFound) {
- CallbacksDescriptor desc(key, entry, entry->property_attributes());
- array->Append(&desc);
- nof += 1;
+ if (!T::Contains(key, entry, valid_descriptors, array)) {
+ T::Insert(key, entry, valid_descriptors, array);
+ valid_descriptors++;
+ }
+ }
+
+ return valid_descriptors;
+}
+
+struct DescriptorArrayAppender {
+ typedef DescriptorArray Array;
+ static bool Contains(Name* key,
+ AccessorInfo* entry,
+ int valid_descriptors,
+ Handle<DescriptorArray> array) {
+ return array->Search(key, valid_descriptors) != DescriptorArray::kNotFound;
+ }
+ static void Insert(Name* key,
+ AccessorInfo* entry,
+ int valid_descriptors,
+ Handle<DescriptorArray> array) {
+ CallbacksDescriptor desc(key, entry, entry->property_attributes());
+ array->Append(&desc);
+ }
+};
+
+
+struct FixedArrayAppender {
+ typedef FixedArray Array;
+ static bool Contains(Name* key,
+ AccessorInfo* entry,
+ int valid_descriptors,
+ Handle<FixedArray> array) {
+ for (int i = 0; i < valid_descriptors; i++) {
+ if (key == AccessorInfo::cast(array->get(i))->name()) return true;
}
+ return false;
}
+ static void Insert(Name* key,
+ AccessorInfo* entry,
+ int valid_descriptors,
+ Handle<FixedArray> array) {
+ array->set(valid_descriptors, entry);
+ }
+};
+
+void Map::AppendCallbackDescriptors(Handle<Map> map,
+ Handle<Object> descriptors) {
+ int nof = map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> array(map->instance_descriptors());
+ NeanderArray callbacks(descriptors);
+ ASSERT(array->NumberOfSlackDescriptors() >= callbacks.length());
+ nof = AppendUniqueCallbacks<DescriptorArrayAppender>(&callbacks, array, nof);
map->SetNumberOfOwnDescriptors(nof);
}
+int AccessorInfo::AppendUnique(Handle<Object> descriptors,
+ Handle<FixedArray> array,
+ int valid_descriptors) {
+ NeanderArray callbacks(descriptors);
+ ASSERT(array->length() >= callbacks.length() + valid_descriptors);
+ return AppendUniqueCallbacks<FixedArrayAppender>(&callbacks,
+ array,
+ valid_descriptors);
+}
+
+
static bool ContainsMap(MapHandleList* maps, Handle<Map> map) {
ASSERT(!map.is_null());
for (int i = 0; i < maps->length(); ++i) {
@@ -2294,6 +3248,20 @@ Map* Map::LookupElementsTransitionMap(ElementsKind to_kind) {
}
+bool Map::IsMapInArrayPrototypeChain() {
+ Isolate* isolate = GetIsolate();
+ if (isolate->initial_array_prototype()->map() == this) {
+ return true;
+ }
+
+ if (isolate->initial_object_prototype()->map() == this) {
+ return true;
+ }
+
+ return false;
+}
+
+
static MaybeObject* AddMissingElementsTransitions(Map* map,
ElementsKind to_kind) {
ASSERT(IsFastElementsKind(map->elements_kind()));
@@ -2344,10 +3312,8 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
}
bool allow_store_transition =
- // Only remember the map transition if the object's map is NOT equal to
- // the global object_function's map and there is not an already existing
+ // Only remember the map transition if there is not an already existing
// non-matching element transition.
- (GetIsolate()->empty_object_map() != map()) &&
!start_map->IsUndefined() && !start_map->is_shared() &&
IsFastElementsKind(from_kind);
@@ -2362,23 +3328,26 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
return start_map->CopyAsElementsKind(to_kind, OMIT_TRANSITION);
}
- Map* closest_map = FindClosestElementsTransition(start_map, to_kind);
+ return start_map->AsElementsKind(to_kind);
+}
+
+
+MaybeObject* Map::AsElementsKind(ElementsKind kind) {
+ Map* closest_map = FindClosestElementsTransition(this, kind);
- if (closest_map->elements_kind() == to_kind) {
+ if (closest_map->elements_kind() == kind) {
return closest_map;
}
- return AddMissingElementsTransitions(closest_map, to_kind);
+ return AddMissingElementsTransitions(closest_map, kind);
}
-void JSObject::LocalLookupRealNamedProperty(String* name,
- LookupResult* result) {
+void JSObject::LocalLookupRealNamedProperty(Name* name, LookupResult* result) {
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
ASSERT(proto->IsJSGlobalObject());
- // A GlobalProxy's prototype should always be a proper JSObject.
return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result);
}
@@ -2393,14 +3362,14 @@ void JSObject::LocalLookupRealNamedProperty(String* name,
// occur as fields.
if (result->IsField() &&
result->IsReadOnly() &&
- FastPropertyAt(result->GetFieldIndex())->IsTheHole()) {
+ RawFastPropertyAt(result->GetFieldIndex().field_index())->IsTheHole()) {
result->DisallowCaching();
}
return;
}
int entry = property_dictionary()->FindEntry(name);
- if (entry != StringDictionary::kNotFound) {
+ if (entry != NameDictionary::kNotFound) {
Object* value = property_dictionary()->ValueAt(entry);
if (IsGlobalObject()) {
PropertyDetails d = property_dictionary()->DetailsAt(entry);
@@ -2408,7 +3377,7 @@ void JSObject::LocalLookupRealNamedProperty(String* name,
result->NotFound();
return;
}
- value = JSGlobalPropertyCell::cast(value)->value();
+ value = PropertyCell::cast(value)->value();
}
// Make sure to disallow caching for uninitialized constants
// found in the dictionary-mode objects.
@@ -2421,7 +3390,7 @@ void JSObject::LocalLookupRealNamedProperty(String* name,
}
-void JSObject::LookupRealNamedProperty(String* name, LookupResult* result) {
+void JSObject::LookupRealNamedProperty(Name* name, LookupResult* result) {
LocalLookupRealNamedProperty(name, result);
if (result->IsFound()) return;
@@ -2429,12 +3398,13 @@ void JSObject::LookupRealNamedProperty(String* name, LookupResult* result) {
}
-void JSObject::LookupRealNamedPropertyInPrototypes(String* name,
+void JSObject::LookupRealNamedPropertyInPrototypes(Name* name,
LookupResult* result) {
- Heap* heap = GetHeap();
+ Isolate* isolate = GetIsolate();
+ Heap* heap = isolate->heap();
for (Object* pt = GetPrototype();
pt != heap->null_value();
- pt = pt->GetPrototype()) {
+ pt = pt->GetPrototype(isolate)) {
if (pt->IsJSProxy()) {
return result->HandlerResult(JSProxy::cast(pt));
}
@@ -2447,14 +3417,15 @@ void JSObject::LookupRealNamedPropertyInPrototypes(String* name,
// We only need to deal with CALLBACKS and INTERCEPTORS
-MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
+Handle<Object> JSObject::SetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
LookupResult* result,
- String* name,
- Object* value,
+ Handle<Name> name,
+ Handle<Object> value,
bool check_prototype,
StrictModeFlag strict_mode) {
if (check_prototype && !result->IsProperty()) {
- LookupRealNamedPropertyInPrototypes(name, result);
+ object->LookupRealNamedPropertyInPrototypes(*name, result);
}
if (result->IsProperty()) {
@@ -2463,12 +3434,23 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
case CALLBACKS: {
Object* obj = result->GetCallbackObject();
if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
+ Handle<AccessorInfo> info(AccessorInfo::cast(obj));
if (info->all_can_write()) {
- return SetPropertyWithCallback(result->GetCallbackObject(),
+ return SetPropertyWithCallback(object,
+ info,
name,
value,
- result->holder(),
+ handle(result->holder()),
+ strict_mode);
+ }
+ } else if (obj->IsAccessorPair()) {
+ Handle<AccessorPair> pair(AccessorPair::cast(obj));
+ if (pair->all_can_read()) {
+ return SetPropertyWithCallback(object,
+ pair,
+ name,
+ value,
+ handle(result->holder()),
strict_mode);
}
}
@@ -2477,10 +3459,11 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
case INTERCEPTOR: {
// Try lookup real named properties. Note that only property can be
// set is callbacks marked as ALL_CAN_WRITE on the prototype chain.
- LookupResult r(GetIsolate());
- LookupRealNamedProperty(name, &r);
+ LookupResult r(object->GetIsolate());
+ object->LookupRealNamedProperty(*name, &r);
if (r.IsProperty()) {
- return SetPropertyWithFailedAccessCheck(&r,
+ return SetPropertyWithFailedAccessCheck(object,
+ &r,
name,
value,
check_prototype,
@@ -2495,199 +3478,213 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
}
}
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> value_handle(value);
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return *value_handle;
+ Isolate* isolate = object->GetIsolate();
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
-MaybeObject* JSReceiver::SetProperty(LookupResult* result,
- String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode) {
+Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
+ LookupResult* result,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode) {
if (result->IsHandler()) {
- return result->proxy()->SetPropertyWithHandler(
- this, key, value, attributes, strict_mode);
+ return JSProxy::SetPropertyWithHandler(handle(result->proxy()),
+ object, key, value, attributes, strict_mode);
} else {
- return JSObject::cast(this)->SetPropertyForResult(
+ return JSObject::SetPropertyForResult(Handle<JSObject>::cast(object),
result, key, value, attributes, strict_mode, store_mode);
}
}
-bool JSProxy::HasPropertyWithHandler(String* name_raw) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> receiver(this);
- Handle<Object> name(name_raw);
+bool JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name) {
+ Isolate* isolate = proxy->GetIsolate();
+
+ // TODO(rossberg): adjust once there is a story for symbols vs proxies.
+ if (name->IsSymbol()) return false;
Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
+ Handle<Object> result = proxy->CallTrap(
"has", isolate->derived_has_trap(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return false;
- return result->ToBoolean()->IsTrue();
+ return result->BooleanValue();
}
-MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler(
- JSReceiver* receiver_raw,
- String* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<Object> name(name_raw);
- Handle<Object> value(value_raw);
+Handle<Object> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = proxy->GetIsolate();
+
+ // TODO(rossberg): adjust once there is a story for symbols vs proxies.
+ if (name->IsSymbol()) return value;
Handle<Object> args[] = { receiver, name, value };
- CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ proxy->CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
+ if (isolate->has_pending_exception()) return Handle<Object>();
- return *value;
+ return value;
}
-MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
- JSReceiver* receiver_raw,
- String* name_raw,
- Object* value_raw,
+Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool* done) {
- Isolate* isolate = GetIsolate();
- Handle<JSProxy> proxy(this);
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<String> name(name_raw);
- Handle<Object> value(value_raw);
- Handle<Object> handler(this->handler()); // Trap might morph proxy.
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<Object> handler(proxy->handler(), isolate); // Trap might morph proxy.
+
+ // TODO(rossberg): adjust once there is a story for symbols vs proxies.
+ if (name->IsSymbol()) {
+ *done = false;
+ return isolate->factory()->the_hole_value();
+ }
*done = true; // except where redefined...
Handle<Object> args[] = { name };
Handle<Object> result = proxy->CallTrap(
"getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ if (isolate->has_pending_exception()) return Handle<Object>();
if (result->IsUndefined()) {
*done = false;
- return GetHeap()->the_hole_value();
+ return isolate->factory()->the_hole_value();
}
// Emulate [[GetProperty]] semantics for proxies.
bool has_pending_exception;
Handle<Object> argv[] = { result };
- Handle<Object> desc =
- Execution::Call(isolate->to_complete_property_descriptor(), result,
- ARRAY_SIZE(argv), argv, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
+ Handle<Object> desc = Execution::Call(
+ isolate, isolate->to_complete_property_descriptor(), result,
+ ARRAY_SIZE(argv), argv, &has_pending_exception);
+ if (has_pending_exception) return Handle<Object>();
// [[GetProperty]] requires to check that all properties are configurable.
Handle<String> configurable_name =
- isolate->factory()->LookupAsciiSymbol("configurable_");
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("configurable_"));
Handle<Object> configurable(
- v8::internal::GetProperty(desc, configurable_name));
+ v8::internal::GetProperty(isolate, desc, configurable_name));
ASSERT(!isolate->has_pending_exception());
ASSERT(configurable->IsTrue() || configurable->IsFalse());
if (configurable->IsFalse()) {
Handle<String> trap =
- isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("getPropertyDescriptor"));
Handle<Object> args[] = { handler, trap, name };
Handle<Object> error = isolate->factory()->NewTypeError(
"proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
ASSERT(configurable->IsTrue());
// Check for DataDescriptor.
Handle<String> hasWritable_name =
- isolate->factory()->LookupAsciiSymbol("hasWritable_");
- Handle<Object> hasWritable(v8::internal::GetProperty(desc, hasWritable_name));
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("hasWritable_"));
+ Handle<Object> hasWritable(
+ v8::internal::GetProperty(isolate, desc, hasWritable_name));
ASSERT(!isolate->has_pending_exception());
ASSERT(hasWritable->IsTrue() || hasWritable->IsFalse());
if (hasWritable->IsTrue()) {
Handle<String> writable_name =
- isolate->factory()->LookupAsciiSymbol("writable_");
- Handle<Object> writable(v8::internal::GetProperty(desc, writable_name));
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("writable_"));
+ Handle<Object> writable(
+ v8::internal::GetProperty(isolate, desc, writable_name));
ASSERT(!isolate->has_pending_exception());
ASSERT(writable->IsTrue() || writable->IsFalse());
*done = writable->IsFalse();
- if (!*done) return GetHeap()->the_hole_value();
- if (strict_mode == kNonStrictMode) return *value;
+ if (!*done) return isolate->factory()->the_hole_value();
+ if (strict_mode == kNonStrictMode) return value;
Handle<Object> args[] = { name, receiver };
Handle<Object> error = isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// We have an AccessorDescriptor.
- Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_");
- Handle<Object> setter(v8::internal::GetProperty(desc, set_name));
+ Handle<String> set_name = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("set_"));
+ Handle<Object> setter(v8::internal::GetProperty(isolate, desc, set_name));
ASSERT(!isolate->has_pending_exception());
if (!setter->IsUndefined()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
- return receiver->SetPropertyWithDefinedSetter(
- JSReceiver::cast(*setter), *value);
+ return SetPropertyWithDefinedSetter(
+ receiver, Handle<JSReceiver>::cast(setter), value);
}
- if (strict_mode == kNonStrictMode) return *value;
+ if (strict_mode == kNonStrictMode) return value;
Handle<Object> args2[] = { name, proxy };
Handle<Object> error = isolate->factory()->NewTypeError(
"no_setter_in_callback", HandleVector(args2, ARRAY_SIZE(args2)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
-MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
- String* name_raw, DeleteMode mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSProxy> receiver(this);
- Handle<Object> name(name_raw);
+Handle<Object> JSProxy::DeletePropertyWithHandler(
+ Handle<JSProxy> proxy, Handle<Name> name, DeleteMode mode) {
+ Isolate* isolate = proxy->GetIsolate();
- Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
- "delete", Handle<Object>(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ // TODO(rossberg): adjust once there is a story for symbols vs proxies.
+ if (name->IsSymbol()) return isolate->factory()->false_value();
- Object* bool_result = result->ToBoolean();
- if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) {
- Handle<Object> handler(receiver->handler());
- Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
+ Handle<Object> args[] = { name };
+ Handle<Object> result = proxy->CallTrap(
+ "delete", Handle<Object>(), ARRAY_SIZE(args), args);
+ if (isolate->has_pending_exception()) return Handle<Object>();
+
+ bool result_bool = result->BooleanValue();
+ if (mode == STRICT_DELETION && !result_bool) {
+ Handle<Object> handler(proxy->handler(), isolate);
+ Handle<String> trap_name = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("delete"));
Handle<Object> args[] = { handler, trap_name };
Handle<Object> error = isolate->factory()->NewTypeError(
"handler_failed", HandleVector(args, ARRAY_SIZE(args)));
isolate->Throw(*error);
- return Failure::Exception();
+ return Handle<Object>();
}
- return bool_result;
+ return isolate->factory()->ToBoolean(result_bool);
}
-MUST_USE_RESULT MaybeObject* JSProxy::DeleteElementWithHandler(
- uint32_t index,
- DeleteMode mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+Handle<Object> JSProxy::DeleteElementWithHandler(
+ Handle<JSProxy> proxy, uint32_t index, DeleteMode mode) {
+ Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
- return JSProxy::DeletePropertyWithHandler(*name, mode);
+ return JSProxy::DeletePropertyWithHandler(proxy, name, mode);
}
MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
JSReceiver* receiver_raw,
- String* name_raw) {
+ Name* name_raw) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
Handle<JSProxy> proxy(this);
- Handle<Object> handler(this->handler()); // Trap might morph proxy.
+ Handle<Object> handler(this->handler(), isolate); // Trap might morph proxy.
Handle<JSReceiver> receiver(receiver_raw);
- Handle<Object> name(name_raw);
+ Handle<Object> name(name_raw, isolate);
+
+ // TODO(rossberg): adjust once there is a story for symbols vs proxies.
+ if (name->IsSymbol()) return ABSENT;
Handle<Object> args[] = { name };
Handle<Object> result = CallTrap(
@@ -2698,25 +3695,35 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
bool has_pending_exception;
Handle<Object> argv[] = { result };
- Handle<Object> desc =
- Execution::Call(isolate->to_complete_property_descriptor(), result,
- ARRAY_SIZE(argv), argv, &has_pending_exception);
+ Handle<Object> desc = Execution::Call(
+ isolate, isolate->to_complete_property_descriptor(), result,
+ ARRAY_SIZE(argv), argv, &has_pending_exception);
if (has_pending_exception) return NONE;
// Convert result to PropertyAttributes.
- Handle<String> enum_n = isolate->factory()->LookupAsciiSymbol("enumerable");
- Handle<Object> enumerable(v8::internal::GetProperty(desc, enum_n));
+ Handle<String> enum_n = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("enumerable_"));
+ Handle<Object> enumerable(v8::internal::GetProperty(isolate, desc, enum_n));
if (isolate->has_pending_exception()) return NONE;
- Handle<String> conf_n = isolate->factory()->LookupAsciiSymbol("configurable");
- Handle<Object> configurable(v8::internal::GetProperty(desc, conf_n));
+ Handle<String> conf_n = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("configurable_"));
+ Handle<Object> configurable(v8::internal::GetProperty(isolate, desc, conf_n));
if (isolate->has_pending_exception()) return NONE;
- Handle<String> writ_n = isolate->factory()->LookupAsciiSymbol("writable");
- Handle<Object> writable(v8::internal::GetProperty(desc, writ_n));
+ Handle<String> writ_n = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("writable_"));
+ Handle<Object> writable(v8::internal::GetProperty(isolate, desc, writ_n));
if (isolate->has_pending_exception()) return NONE;
+ if (!writable->BooleanValue()) {
+ Handle<String> set_n = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("set_"));
+ Handle<Object> setter(v8::internal::GetProperty(isolate, desc, set_n));
+ if (isolate->has_pending_exception()) return NONE;
+ writable = isolate->factory()->ToBoolean(!setter->IsUndefined());
+ }
if (configurable->IsFalse()) {
- Handle<String> trap =
- isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+ Handle<String> trap = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("getPropertyDescriptor"));
Handle<Object> args[] = { handler, trap, name };
Handle<Object> error = isolate->factory()->NewTypeError(
"proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
@@ -2725,44 +3732,42 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
}
int attributes = NONE;
- if (enumerable->ToBoolean()->IsFalse()) attributes |= DONT_ENUM;
- if (configurable->ToBoolean()->IsFalse()) attributes |= DONT_DELETE;
- if (writable->ToBoolean()->IsFalse()) attributes |= READ_ONLY;
+ if (!enumerable->BooleanValue()) attributes |= DONT_ENUM;
+ if (!configurable->BooleanValue()) attributes |= DONT_DELETE;
+ if (!writable->BooleanValue()) attributes |= READ_ONLY;
return static_cast<PropertyAttributes>(attributes);
}
MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
- JSReceiver* receiver,
+ JSReceiver* receiver_raw,
uint32_t index) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
+ Handle<JSProxy> proxy(this);
+ Handle<JSReceiver> receiver(receiver_raw);
Handle<String> name = isolate->factory()->Uint32ToString(index);
- return GetPropertyAttributeWithHandler(receiver, *name);
+ return proxy->GetPropertyAttributeWithHandler(*receiver, *name);
}
-void JSProxy::Fix() {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSProxy> self(this);
+void JSProxy::Fix(Handle<JSProxy> proxy) {
+ Isolate* isolate = proxy->GetIsolate();
// Save identity hash.
- MaybeObject* maybe_hash = GetIdentityHash(OMIT_CREATION);
+ Handle<Object> hash = JSProxy::GetIdentityHash(proxy, OMIT_CREATION);
- if (IsJSFunctionProxy()) {
- isolate->factory()->BecomeJSFunction(self);
+ if (proxy->IsJSFunctionProxy()) {
+ isolate->factory()->BecomeJSFunction(proxy);
// Code will be set on the JavaScript side.
} else {
- isolate->factory()->BecomeJSObject(self);
+ isolate->factory()->BecomeJSObject(proxy);
}
- ASSERT(self->IsJSObject());
+ ASSERT(proxy->IsJSObject());
// Inherit identity, if it was present.
- Object* hash;
- if (maybe_hash->To<Object>(&hash) && hash->IsSmi()) {
- Handle<JSObject> new_self(JSObject::cast(*self));
- isolate->factory()->SetIdentityHash(new_self, Smi::cast(hash));
+ if (hash->IsSmi()) {
+ JSObject::SetIdentityHash(Handle<JSObject>::cast(proxy), Smi::cast(*hash));
}
}
@@ -2772,10 +3777,10 @@ MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
int argc,
Handle<Object> argv[]) {
Isolate* isolate = GetIsolate();
- Handle<Object> handler(this->handler());
+ Handle<Object> handler(this->handler(), isolate);
- Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol(name);
- Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ Handle<String> trap_name = isolate->factory()->InternalizeUtf8String(name);
+ Handle<Object> trap(v8::internal::GetProperty(isolate, handler, trap_name));
if (isolate->has_pending_exception()) return trap;
if (trap->IsUndefined()) {
@@ -2790,153 +3795,342 @@ MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
}
bool threw;
- return Execution::Call(trap, handler, argc, argv, &threw);
+ return Execution::Call(isolate, trap, handler, argc, argv, &threw);
}
-void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
- Handle<Map> map) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->AddFastPropertyUsingMap(*map));
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<Map> MapAsElementsKind(Handle<Map> map, ElementsKind kind) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(), map->AsElementsKind(kind), Map);
}
-MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
- String* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode) {
- Heap* heap = GetHeap();
+void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
+ ASSERT(object->map()->inobject_properties() == map->inobject_properties());
+ ElementsKind obj_kind = object->map()->elements_kind();
+ ElementsKind map_kind = map->elements_kind();
+ if (map_kind != obj_kind) {
+ ElementsKind to_kind = map_kind;
+ if (IsMoreGeneralElementsKindTransition(map_kind, obj_kind) ||
+ IsDictionaryElementsKind(obj_kind)) {
+ to_kind = obj_kind;
+ }
+ if (IsDictionaryElementsKind(to_kind)) {
+ NormalizeElements(object);
+ } else {
+ TransitionElementsKind(object, to_kind);
+ }
+ map = MapAsElementsKind(map, to_kind);
+ }
+ int total_size =
+ map->NumberOfOwnDescriptors() + map->unused_property_fields();
+ int out_of_object = total_size - map->inobject_properties();
+ if (out_of_object != object->properties()->length()) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<FixedArray> new_properties = isolate->factory()->CopySizeFixedArray(
+ handle(object->properties()), out_of_object);
+ object->set_properties(*new_properties);
+ }
+ object->set_map(*map);
+}
+
+
+void JSObject::MigrateInstance(Handle<JSObject> object) {
+ // Converting any field to the most specific type will cause the
+ // GeneralizeFieldRepresentation algorithm to create the most general existing
+ // transition that matches the object. This achieves what is needed.
+ Handle<Map> original_map(object->map());
+ GeneralizeFieldRepresentation(
+ object, 0, Representation::None(), ALLOW_AS_CONSTANT);
+ if (FLAG_trace_migration) {
+ object->PrintInstanceMigration(stdout, *original_map, object->map());
+ }
+}
+
+
+Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
+ Map* new_map = object->map()->CurrentMapForDeprecated();
+ if (new_map == NULL) return Handle<Object>();
+ Handle<Map> original_map(object->map());
+ JSObject::MigrateToMap(object, handle(new_map));
+ if (FLAG_trace_migration) {
+ object->PrintInstanceMigration(stdout, *original_map, object->map());
+ }
+ return object;
+}
+
+
+Handle<Object> JSObject::SetPropertyUsingTransition(
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Handle<Map> transition_map(lookup->GetTransitionTarget());
+ int descriptor = transition_map->LastAdded();
+
+ DescriptorArray* descriptors = transition_map->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+
+ if (details.type() == CALLBACKS || attributes != details.attributes()) {
+ // AddProperty will either normalize the object, or create a new fast copy
+ // of the map. If we get a fast copy of the map, all field representations
+ // will be tagged since the transition is omitted.
+ return JSObject::AddProperty(
+ object, name, value, attributes, kNonStrictMode,
+ JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED,
+ JSReceiver::OMIT_EXTENSIBILITY_CHECK,
+ JSObject::FORCE_TAGGED, FORCE_FIELD, OMIT_TRANSITION);
+ }
+
+ // Keep the target CONSTANT if the same value is stored.
+ // TODO(verwaest): Also support keeping the placeholder
+ // (value->IsUninitialized) as constant.
+ if (details.type() == CONSTANT &&
+ descriptors->GetValue(descriptor) == *value) {
+ object->set_map(*transition_map);
+ return value;
+ }
+
+ Representation representation = details.representation();
+
+ if (!value->FitsRepresentation(representation) ||
+ details.type() == CONSTANT) {
+ transition_map = Map::GeneralizeRepresentation(transition_map,
+ descriptor, value->OptimalRepresentation(), FORCE_FIELD);
+ Object* back = transition_map->GetBackPointer();
+ if (back->IsMap()) {
+ MigrateToMap(object, handle(Map::cast(back)));
+ }
+ descriptors = transition_map->instance_descriptors();
+ representation = descriptors->GetDetails(descriptor).representation();
+ }
+
+ int field_index = descriptors->GetFieldIndex(descriptor);
+ AddFastPropertyUsingMap(
+ object, transition_map, name, value, field_index, representation);
+ return value;
+}
+
+
+static void SetPropertyToField(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value) {
+ Representation representation = lookup->representation();
+ if (!value->FitsRepresentation(representation) ||
+ lookup->type() == CONSTANT) {
+ JSObject::GeneralizeFieldRepresentation(handle(lookup->holder()),
+ lookup->GetDescriptorIndex(),
+ value->OptimalRepresentation(),
+ FORCE_FIELD);
+ DescriptorArray* desc = lookup->holder()->map()->instance_descriptors();
+ int descriptor = lookup->GetDescriptorIndex();
+ representation = desc->GetDetails(descriptor).representation();
+ }
+
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapNumber* storage = HeapNumber::cast(lookup->holder()->RawFastPropertyAt(
+ lookup->GetFieldIndex().field_index()));
+ storage->set_value(value->Number());
+ return;
+ }
+
+ lookup->holder()->FastPropertyAtPut(
+ lookup->GetFieldIndex().field_index(), *value);
+}
+
+
+static void ConvertAndSetLocalProperty(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Handle<JSObject> object(lookup->holder());
+ if (object->TooManyFastProperties()) {
+ JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
+ }
+
+ if (!object->HasFastProperties()) {
+ ReplaceSlowProperty(object, name, value, attributes);
+ return;
+ }
+
+ int descriptor_index = lookup->GetDescriptorIndex();
+ if (lookup->GetAttributes() == attributes) {
+ JSObject::GeneralizeFieldRepresentation(
+ object, descriptor_index, Representation::Tagged(), FORCE_FIELD);
+ } else {
+ Handle<Map> old_map(object->map());
+ Handle<Map> new_map = Map::CopyGeneralizeAllRepresentations(old_map,
+ descriptor_index, FORCE_FIELD, attributes, "attributes mismatch");
+ JSObject::MigrateToMap(object, new_map);
+ }
+
+ DescriptorArray* descriptors = object->map()->instance_descriptors();
+ int index = descriptors->GetDetails(descriptor_index).field_index();
+ object->FastPropertyAtPut(index, *value);
+}
+
+
+static void SetPropertyToFieldWithAttributes(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ if (lookup->GetAttributes() == attributes) {
+ if (value->IsUninitialized()) return;
+ SetPropertyToField(lookup, name, value);
+ } else {
+ ConvertAndSetLocalProperty(lookup, name, value, attributes);
+ }
+}
+
+
+Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode) {
+ Isolate* isolate = object->GetIsolate();
+
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
// Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. We make these short keys into symbols to avoid constantly
+ // dictionary. We internalize these short keys to avoid constantly
// reallocating them.
- if (!name_raw->IsSymbol() && name_raw->length() <= 2) {
- Object* symbol_version;
- { MaybeObject* maybe_symbol_version = heap->LookupSymbol(name_raw);
- if (maybe_symbol_version->ToObject(&symbol_version)) {
- name_raw = String::cast(symbol_version);
- }
- }
+ if (name->IsString() && !name->IsInternalizedString() &&
+ Handle<String>::cast(name)->length() <= 2) {
+ name = isolate->factory()->InternalizeString(Handle<String>::cast(name));
}
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!heap->isolate()->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(
- result, name_raw, value_raw, true, strict_mode);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(object, lookup, name, value,
+ true, strict_mode);
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value_raw;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetPropertyForResult(
- result, name_raw, value_raw, attributes, strict_mode, store_mode);
+ return SetPropertyForResult(Handle<JSObject>::cast(proto),
+ lookup, name, value, attributes, strict_mode, store_mode);
}
- // From this point on everything needs to be handlified, because
- // SetPropertyViaPrototypes might call back into JavaScript.
- HandleScope scope(GetIsolate());
- Handle<JSObject> self(this);
- Handle<String> name(name_raw);
- Handle<Object> value(value_raw);
+ ASSERT(!lookup->IsFound() || lookup->holder() == *object ||
+ lookup->holder()->map()->is_hidden_prototype());
- if (!result->IsProperty() && !self->IsJSContextExtensionObject()) {
+ if (!lookup->IsProperty() && !object->IsJSContextExtensionObject()) {
bool done = false;
- MaybeObject* result_object = self->SetPropertyViaPrototypes(
- *name, *value, attributes, strict_mode, &done);
+ Handle<Object> result_object = SetPropertyViaPrototypes(
+ object, name, value, attributes, strict_mode, &done);
if (done) return result_object;
}
- if (!result->IsFound()) {
+ if (!lookup->IsFound()) {
// Neither properties nor transitions found.
- return self->AddProperty(
- *name, *value, attributes, strict_mode, store_mode);
+ return AddProperty(
+ object, name, value, attributes, strict_mode, store_mode);
}
- if (result->IsProperty() && result->IsReadOnly()) {
+
+ if (lookup->IsProperty() && lookup->IsReadOnly()) {
if (strict_mode == kStrictMode) {
- Handle<Object> args[] = { name, self };
- return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
+ Handle<Object> args[] = { name, object };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
} else {
- return *value;
+ return value;
}
}
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ bool is_observed = FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string();
+ if (is_observed && lookup->IsDataProperty()) {
+ old_value = Object::GetProperty(object, name);
+ }
+
// This is a real property that is not read-only, or it is a
// transition or null descriptor and there are no setters in the prototypes.
- switch (result->type()) {
+ Handle<Object> result = value;
+ switch (lookup->type()) {
case NORMAL:
- return self->SetNormalizedProperty(result, *value);
+ SetNormalizedProperty(handle(lookup->holder()), lookup, value);
+ break;
case FIELD:
- return self->FastPropertyAtPut(result->GetFieldIndex(), *value);
- case CONSTANT_FUNCTION:
- // Only replace the function if necessary.
- if (*value == result->GetConstantFunction()) return *value;
- // Preserve the attributes of this existing property.
- attributes = result->GetAttributes();
- return self->ConvertDescriptorToField(*name, *value, attributes);
+ SetPropertyToField(lookup, name, value);
+ break;
+ case CONSTANT:
+ // Only replace the constant if necessary.
+ if (*value == lookup->GetConstant()) return value;
+ SetPropertyToField(lookup, name, value);
+ break;
case CALLBACKS: {
- Object* callback_object = result->GetCallbackObject();
- return self->SetPropertyWithCallback(callback_object,
- *name,
- *value,
- result->holder(),
- strict_mode);
+ Handle<Object> callback_object(lookup->GetCallbackObject(), isolate);
+ return SetPropertyWithCallback(object, callback_object, name, value,
+ handle(lookup->holder()), strict_mode);
}
case INTERCEPTOR:
- return self->SetPropertyWithInterceptor(*name,
- *value,
- attributes,
- strict_mode);
- case TRANSITION: {
- Map* transition_map = result->GetTransitionTarget();
- int descriptor = transition_map->LastAdded();
-
- DescriptorArray* descriptors = transition_map->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
-
- if (details.type() == FIELD) {
- if (attributes == details.attributes()) {
- int field_index = descriptors->GetFieldIndex(descriptor);
- return self->AddFastPropertyUsingMap(transition_map,
- *name,
- *value,
- field_index);
- }
- return self->ConvertDescriptorToField(*name, *value, attributes);
- } else if (details.type() == CALLBACKS) {
- return ConvertDescriptorToField(*name, *value, attributes);
- }
+ result = SetPropertyWithInterceptor(handle(lookup->holder()), name, value,
+ attributes, strict_mode);
+ break;
+ case TRANSITION:
+ result = SetPropertyUsingTransition(handle(lookup->holder()), lookup,
+ name, value, attributes);
+ break;
+ case HANDLER:
+ case NONEXISTENT:
+ UNREACHABLE();
+ }
- ASSERT(details.type() == CONSTANT_FUNCTION);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
- Object* constant_function = descriptors->GetValue(descriptor);
- // If the same constant function is being added we can simply
- // transition to the target map.
- if (constant_function == *value) {
- self->set_map(transition_map);
- return constant_function;
+ if (is_observed) {
+ if (lookup->IsTransition()) {
+ EnqueueChangeRecord(object, "new", name, old_value);
+ } else {
+ LookupResult new_lookup(isolate);
+ object->LocalLookup(*name, &new_lookup, true);
+ if (new_lookup.IsDataProperty()) {
+ Handle<Object> new_value = Object::GetProperty(object, name);
+ if (!new_value->SameValue(*old_value)) {
+ EnqueueChangeRecord(object, "updated", name, old_value);
+ }
}
- // Otherwise, replace with a map transition to a new map with a FIELD,
- // even if the value is a constant function.
- return ConvertTransitionToMapTransition(
- result->GetTransitionIndex(), *name, *value, attributes);
}
- case HANDLER:
- case NONEXISTENT:
- UNREACHABLE();
- return *value;
}
- UNREACHABLE(); // keep the compiler happy
- return *value;
+
+ return result;
+}
+
+
+MaybeObject* JSObject::SetLocalPropertyIgnoreAttributesTrampoline(
+ Name* key,
+ Object* value,
+ PropertyAttributes attributes,
+ ValueType value_type,
+ StoreMode mode,
+ ExtensibilityCheck extensibility_check) {
+ // TODO(mstarzinger): The trampoline is a giant hack, don't use it anywhere
+ // else or handlification people will start hating you for all eternity.
+ HandleScope scope(GetIsolate());
+ IdempotentPointerToHandleCodeTrampoline trampoline(GetIsolate());
+ return trampoline.CallWithReturnValue(
+ &JSObject::SetLocalPropertyIgnoreAttributes,
+ Handle<JSObject>(this),
+ Handle<Name>(key),
+ Handle<Object>(value, GetIsolate()),
+ attributes,
+ value_type,
+ mode,
+ extensibility_check);
}
@@ -2951,111 +4145,122 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
// doesn't handle function prototypes correctly.
Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
- Handle<String> key,
+ Handle<Name> name,
Handle<Object> value,
- PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes),
- Object);
-}
-
+ PropertyAttributes attributes,
+ ValueType value_type,
+ StoreMode mode,
+ ExtensibilityCheck extensibility_check) {
+ Isolate* isolate = object->GetIsolate();
-MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
- String* name,
- Object* value,
- PropertyAttributes attributes) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
- Isolate* isolate = GetIsolate();
- LookupResult result(isolate);
- LocalLookup(name, &result);
- if (!result.IsFound()) map()->LookupTransition(this, name, &result);
+ AssertNoContextChange ncc(isolate);
+
+ LookupResult lookup(isolate);
+ object->LocalLookup(*name, &lookup, true);
+ if (!lookup.IsFound()) {
+ object->map()->LookupTransition(*object, *name, &lookup);
+ }
+
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(&result,
- name,
- value,
- false,
- kNonStrictMode);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(object, &lookup, name, value,
+ false, kNonStrictMode);
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetLocalPropertyIgnoreAttributes(
- name,
- value,
- attributes);
+ return SetLocalPropertyIgnoreAttributes(Handle<JSObject>::cast(proto),
+ name, value, attributes, value_type, mode, extensibility_check);
+ }
+
+ if (lookup.IsFound() &&
+ (lookup.type() == INTERCEPTOR || lookup.type() == CALLBACKS)) {
+ object->LocalLookupRealNamedProperty(*name, &lookup);
}
// Check for accessor in prototype chain removed here in clone.
- if (!result.IsFound()) {
+ if (!lookup.IsFound()) {
// Neither properties nor transitions found.
- return AddProperty(name, value, attributes, kNonStrictMode);
+ return AddProperty(object, name, value, attributes, kNonStrictMode,
+ MAY_BE_STORE_FROM_KEYED, extensibility_check, value_type, mode);
+ }
+
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ PropertyAttributes old_attributes = ABSENT;
+ bool is_observed = FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string();
+ if (is_observed && lookup.IsProperty()) {
+ if (lookup.IsDataProperty()) old_value =
+ Object::GetProperty(object, name);
+ old_attributes = lookup.GetAttributes();
}
// Check of IsReadOnly removed from here in clone.
- switch (result.type()) {
- case NORMAL: {
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
- return SetNormalizedProperty(name, value, details);
- }
+ switch (lookup.type()) {
+ case NORMAL:
+ ReplaceSlowProperty(object, name, value, attributes);
+ break;
case FIELD:
- return FastPropertyAtPut(result.GetFieldIndex(), value);
- case CONSTANT_FUNCTION:
- // Only replace the function if necessary.
- if (value == result.GetConstantFunction()) return value;
- // Preserve the attributes of this existing property.
- attributes = result.GetAttributes();
- return ConvertDescriptorToField(name, value, attributes);
+ SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
+ break;
+ case CONSTANT:
+ // Only replace the constant if necessary.
+ if (lookup.GetAttributes() != attributes ||
+ *value != lookup.GetConstant()) {
+ SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
+ }
+ break;
case CALLBACKS:
- case INTERCEPTOR:
- // Override callback in clone
- return ConvertDescriptorToField(name, value, attributes);
+ ConvertAndSetLocalProperty(&lookup, name, value, attributes);
+ break;
case TRANSITION: {
- Map* transition_map = result.GetTransitionTarget();
- int descriptor = transition_map->LastAdded();
-
- DescriptorArray* descriptors = transition_map->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
-
- if (details.type() == FIELD) {
- if (attributes == details.attributes()) {
- int field_index = descriptors->GetFieldIndex(descriptor);
- return AddFastPropertyUsingMap(transition_map,
- name,
- value,
- field_index);
- }
- return ConvertDescriptorToField(name, value, attributes);
- } else if (details.type() == CALLBACKS) {
- return ConvertDescriptorToField(name, value, attributes);
- }
-
- ASSERT(details.type() == CONSTANT_FUNCTION);
-
- // Replace transition to CONSTANT FUNCTION with a map transition to a new
- // map with a FIELD, even if the value is a function.
- return ConvertTransitionToMapTransition(
- result.GetTransitionIndex(), name, value, attributes);
+ Handle<Object> result = SetPropertyUsingTransition(
+ handle(lookup.holder()), &lookup, name, value, attributes);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
+ break;
}
- case HANDLER:
case NONEXISTENT:
+ case HANDLER:
+ case INTERCEPTOR:
UNREACHABLE();
}
- UNREACHABLE(); // keep the compiler happy
+
+ if (is_observed) {
+ if (lookup.IsTransition()) {
+ EnqueueChangeRecord(object, "new", name, old_value);
+ } else if (old_value->IsTheHole()) {
+ EnqueueChangeRecord(object, "reconfigured", name, old_value);
+ } else {
+ LookupResult new_lookup(isolate);
+ object->LocalLookup(*name, &new_lookup, true);
+ bool value_changed = false;
+ if (new_lookup.IsDataProperty()) {
+ Handle<Object> new_value = Object::GetProperty(object, name);
+ value_changed = !old_value->SameValue(*new_value);
+ }
+ if (new_lookup.GetAttributes() != old_attributes) {
+ if (!value_changed) old_value = isolate->factory()->the_hole_value();
+ EnqueueChangeRecord(object, "reconfigured", name, old_value);
+ } else if (value_changed) {
+ EnqueueChangeRecord(object, "updated", name, old_value);
+ }
+ }
+ }
+
return value;
}
PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
JSObject* receiver,
- String* name,
+ Name* name,
bool continue_search) {
// Check local property, ignore interceptor.
LookupResult result(GetIsolate());
@@ -3076,47 +4281,41 @@ PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
JSObject* receiver,
- String* name,
+ Name* name,
bool continue_search) {
+ // TODO(rossberg): Support symbols in the API.
+ if (name->IsSymbol()) return ABSENT;
+
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
- HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
- Handle<String> name_handle(name);
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
+ Handle<String> name_handle(String::cast(name));
+ PropertyCallbackArguments args(isolate, interceptor->data(), receiver, this);
if (!interceptor->query()->IsUndefined()) {
- v8::NamedPropertyQuery query =
- v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
+ v8::NamedPropertyQueryCallback query =
+ v8::ToCData<v8::NamedPropertyQueryCallback>(interceptor->query());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
- v8::Handle<v8::Integer> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = query(v8::Utils::ToLocal(name_handle), info);
- }
+ v8::Handle<v8::Integer> result =
+ args.Call(query, v8::Utils::ToLocal(name_handle));
if (!result.IsEmpty()) {
ASSERT(result->IsInt32());
return static_cast<PropertyAttributes>(result->Int32Value());
}
} else if (!interceptor->getter()->IsUndefined()) {
- v8::NamedPropertyGetter getter =
- v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
+ v8::NamedPropertyGetterCallback getter =
+ v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = getter(v8::Utils::ToLocal(name_handle), info);
- }
+ v8::Handle<v8::Value> result =
+ args.Call(getter, v8::Utils::ToLocal(name_handle));
if (!result.IsEmpty()) return DONT_ENUM;
}
return holder_handle->GetPropertyAttributePostInterceptor(*receiver_handle,
@@ -3127,45 +4326,46 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver(
JSReceiver* receiver,
- String* key) {
+ Name* key) {
uint32_t index = 0;
if (IsJSObject() && key->AsArrayIndex(&index)) {
- return JSObject::cast(this)->HasElementWithReceiver(receiver, index)
- ? NONE : ABSENT;
+ return JSObject::cast(this)->GetElementAttributeWithReceiver(
+ receiver, index, true);
}
// Named property.
- LookupResult result(GetIsolate());
- Lookup(key, &result);
- return GetPropertyAttribute(receiver, &result, key, true);
+ LookupResult lookup(GetIsolate());
+ Lookup(key, &lookup);
+ return GetPropertyAttributeForResult(receiver, &lookup, key, true);
}
-PropertyAttributes JSReceiver::GetPropertyAttribute(JSReceiver* receiver,
- LookupResult* result,
- String* name,
- bool continue_search) {
+PropertyAttributes JSReceiver::GetPropertyAttributeForResult(
+ JSReceiver* receiver,
+ LookupResult* lookup,
+ Name* name,
+ bool continue_search) {
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
JSObject* this_obj = JSObject::cast(this);
Heap* heap = GetHeap();
if (!heap->isolate()->MayNamedAccess(this_obj, name, v8::ACCESS_HAS)) {
return this_obj->GetPropertyAttributeWithFailedAccessCheck(
- receiver, result, name, continue_search);
+ receiver, lookup, name, continue_search);
}
}
- if (result->IsFound()) {
- switch (result->type()) {
+ if (lookup->IsFound()) {
+ switch (lookup->type()) {
case NORMAL: // fall through
case FIELD:
- case CONSTANT_FUNCTION:
+ case CONSTANT:
case CALLBACKS:
- return result->GetAttributes();
+ return lookup->GetAttributes();
case HANDLER: {
- return JSProxy::cast(result->proxy())->GetPropertyAttributeWithHandler(
+ return JSProxy::cast(lookup->proxy())->GetPropertyAttributeWithHandler(
receiver, name);
}
case INTERCEPTOR:
- return result->holder()->GetPropertyAttributeWithInterceptor(
+ return lookup->holder()->GetPropertyAttributeWithInterceptor(
JSObject::cast(receiver), name, continue_search);
case TRANSITION:
case NONEXISTENT:
@@ -3176,64 +4376,151 @@ PropertyAttributes JSReceiver::GetPropertyAttribute(JSReceiver* receiver,
}
-PropertyAttributes JSReceiver::GetLocalPropertyAttribute(String* name) {
+PropertyAttributes JSReceiver::GetLocalPropertyAttribute(Name* name) {
// Check whether the name is an array index.
uint32_t index = 0;
if (IsJSObject() && name->AsArrayIndex(&index)) {
- if (JSObject::cast(this)->HasLocalElement(index)) return NONE;
- return ABSENT;
+ return GetLocalElementAttribute(index);
}
// Named property.
- LookupResult result(GetIsolate());
- LocalLookup(name, &result);
- return GetPropertyAttribute(this, &result, name, false);
+ LookupResult lookup(GetIsolate());
+ LocalLookup(name, &lookup, true);
+ return GetPropertyAttributeForResult(this, &lookup, name, false);
+}
+
+
+PropertyAttributes JSObject::GetElementAttributeWithReceiver(
+ JSReceiver* receiver, uint32_t index, bool continue_search) {
+ Isolate* isolate = GetIsolate();
+
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded()) {
+ if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return ABSENT;
+ }
+ }
+
+ if (IsJSGlobalProxy()) {
+ Object* proto = GetPrototype();
+ if (proto->IsNull()) return ABSENT;
+ ASSERT(proto->IsJSGlobalObject());
+ return JSObject::cast(proto)->GetElementAttributeWithReceiver(
+ receiver, index, continue_search);
+ }
+
+ // Check for lookup interceptor except when bootstrapping.
+ if (HasIndexedInterceptor() && !isolate->bootstrapper()->IsActive()) {
+ return GetElementAttributeWithInterceptor(receiver, index, continue_search);
+ }
+
+ return GetElementAttributeWithoutInterceptor(
+ receiver, index, continue_search);
+}
+
+
+PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
+ JSReceiver* receiver, uint32_t index, bool continue_search) {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+
+ // Make sure that the top context does not change when doing
+ // callbacks or interceptor calls.
+ AssertNoContextChange ncc(isolate);
+
+ Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
+ Handle<JSReceiver> hreceiver(receiver);
+ Handle<JSObject> holder(this);
+ PropertyCallbackArguments args(isolate, interceptor->data(), receiver, this);
+ if (!interceptor->query()->IsUndefined()) {
+ v8::IndexedPropertyQueryCallback query =
+ v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
+ v8::Handle<v8::Integer> result = args.Call(query, index);
+ if (!result.IsEmpty())
+ return static_cast<PropertyAttributes>(result->Int32Value());
+ } else if (!interceptor->getter()->IsUndefined()) {
+ v8::IndexedPropertyGetterCallback getter =
+ v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-get-has", this, index));
+ v8::Handle<v8::Value> result = args.Call(getter, index);
+ if (!result.IsEmpty()) return NONE;
+ }
+
+ return holder->GetElementAttributeWithoutInterceptor(
+ *hreceiver, index, continue_search);
}
-MaybeObject* NormalizedMapCache::Get(JSObject* obj,
- PropertyNormalizationMode mode) {
- Isolate* isolate = obj->GetIsolate();
- Map* fast = obj->map();
- int index = fast->Hash() % kEntries;
- Object* result = get(index);
+PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor(
+ JSReceiver* receiver, uint32_t index, bool continue_search) {
+ PropertyAttributes attr = GetElementsAccessor()->GetAttributes(
+ receiver, this, index);
+ if (attr != ABSENT) return attr;
+
+ // Handle [] on String objects.
+ if (IsStringObjectWithCharacterAt(index)) {
+ return static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+ }
+
+ if (!continue_search) return ABSENT;
+
+ Object* pt = GetPrototype();
+ if (pt->IsJSProxy()) {
+ // We need to follow the spec and simulate a call to [[GetOwnProperty]].
+ return JSProxy::cast(pt)->GetElementAttributeWithHandler(receiver, index);
+ }
+ if (pt->IsNull()) return ABSENT;
+ return JSObject::cast(pt)->GetElementAttributeWithReceiver(
+ receiver, index, true);
+}
+
+
+Handle<Map> NormalizedMapCache::Get(Handle<NormalizedMapCache> cache,
+ Handle<JSObject> obj,
+ PropertyNormalizationMode mode) {
+ int index = obj->map()->Hash() % kEntries;
+ Handle<Object> result = handle(cache->get(index), cache->GetIsolate());
if (result->IsMap() &&
- Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
+ Handle<Map>::cast(result)->EquivalentToForNormalization(obj->map(),
+ mode)) {
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- Map::cast(result)->SharedMapVerify();
+ Handle<Map>::cast(result)->SharedMapVerify();
}
#endif
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// The cached map should match newly created normalized map bit-by-bit,
// except for the code cache, which can contain some ics which can be
// applied to the shared map.
- Object* fresh;
- MaybeObject* maybe_fresh =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (maybe_fresh->ToObject(&fresh)) {
- ASSERT(memcmp(Map::cast(fresh)->address(),
- Map::cast(result)->address(),
- Map::kCodeCacheOffset) == 0);
- int offset = Map::kCodeCacheOffset + kPointerSize;
- ASSERT(memcmp(Map::cast(fresh)->address() + offset,
- Map::cast(result)->address() + offset,
- Map::kSize - offset) == 0);
- }
+ Handle<Map> fresh = Map::CopyNormalized(handle(obj->map()), mode,
+ SHARED_NORMALIZED_MAP);
+
+ ASSERT(memcmp(fresh->address(),
+ Handle<Map>::cast(result)->address(),
+ Map::kCodeCacheOffset) == 0);
+ STATIC_ASSERT(Map::kDependentCodeOffset ==
+ Map::kCodeCacheOffset + kPointerSize);
+ int offset = Map::kDependentCodeOffset + kPointerSize;
+ ASSERT(memcmp(fresh->address() + offset,
+ Handle<Map>::cast(result)->address() + offset,
+ Map::kSize - offset) == 0);
}
#endif
- return result;
+ return Handle<Map>::cast(result);
}
- { MaybeObject* maybe_result =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- ASSERT(Map::cast(result)->is_dictionary_map());
- set(index, result);
+ Isolate* isolate = cache->GetIsolate();
+ Handle<Map> map = Map::CopyNormalized(handle(obj->map()), mode,
+ SHARED_NORMALIZED_MAP);
+ ASSERT(map->is_dictionary_map());
+ cache->set(index, *map);
isolate->counters()->normalized_maps()->Increment();
- return result;
+ return map;
}
@@ -3245,95 +4532,66 @@ void NormalizedMapCache::Clear() {
}
-void JSObject::UpdateMapCodeCache(Handle<JSObject> object,
- Handle<String> name,
- Handle<Code> code) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION_VOID(isolate,
- object->UpdateMapCodeCache(*name, *code));
-}
-
-
-MaybeObject* JSObject::UpdateMapCodeCache(String* name, Code* code) {
- if (map()->is_shared()) {
- // Fast case maps are never marked as shared.
- ASSERT(!HasFastProperties());
- // Replace the map with an identical copy that can be safely modified.
- Object* obj;
- { MaybeObject* maybe_obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES,
- UNIQUE_NORMALIZED_MAP);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- GetIsolate()->counters()->normalized_maps()->Increment();
-
- set_map(Map::cast(obj));
- }
- return map()->UpdateCodeCache(name, code);
+void HeapObject::UpdateMapCodeCache(Handle<HeapObject> object,
+ Handle<Name> name,
+ Handle<Code> code) {
+ Handle<Map> map(object->map());
+ Map::UpdateCodeCache(map, name, code);
}
void JSObject::NormalizeProperties(Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->NormalizeProperties(
- mode, expected_additional_properties));
-}
-
-
-MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
- int expected_additional_properties) {
- if (!HasFastProperties()) return this;
+ if (!object->HasFastProperties()) return;
// The global object is always normalized.
- ASSERT(!IsGlobalObject());
+ ASSERT(!object->IsGlobalObject());
// JSGlobalProxy must never be normalized
- ASSERT(!IsJSGlobalProxy());
+ ASSERT(!object->IsJSGlobalProxy());
- Map* map_of_this = map();
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Map> map(object->map());
// Allocate new content.
- int real_size = map_of_this->NumberOfOwnDescriptors();
+ int real_size = map->NumberOfOwnDescriptors();
int property_count = real_size;
if (expected_additional_properties > 0) {
property_count += expected_additional_properties;
} else {
property_count += 2; // Make space for two more properties.
}
- StringDictionary* dictionary;
- MaybeObject* maybe_dictionary = StringDictionary::Allocate(property_count);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ Handle<NameDictionary> dictionary =
+ isolate->factory()->NewNameDictionary(property_count);
- DescriptorArray* descs = map_of_this->instance_descriptors();
+ Handle<DescriptorArray> descs(map->instance_descriptors());
for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
- case CONSTANT_FUNCTION: {
- PropertyDetails d = PropertyDetails(details.attributes(),
- NORMAL,
- details.descriptor_index());
- Object* value = descs->GetConstantFunction(i);
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ case CONSTANT: {
+ Handle<Name> key(descs->GetKey(i));
+ Handle<Object> value(descs->GetConstant(i), isolate);
+ PropertyDetails d = PropertyDetails(
+ details.attributes(), NORMAL, i + 1);
+ dictionary = NameDictionaryAdd(dictionary, key, value, d);
break;
}
case FIELD: {
- PropertyDetails d = PropertyDetails(details.attributes(),
- NORMAL,
- details.descriptor_index());
- Object* value = FastPropertyAt(descs->GetFieldIndex(i));
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ Handle<Name> key(descs->GetKey(i));
+ Handle<Object> value(
+ object->RawFastPropertyAt(descs->GetFieldIndex(i)), isolate);
+ PropertyDetails d =
+ PropertyDetails(details.attributes(), NORMAL, i + 1);
+ dictionary = NameDictionaryAdd(dictionary, key, value, d);
break;
}
case CALLBACKS: {
- Object* value = descs->GetCallbacksObject(i);
- details = details.set_pointer(0);
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, details);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ Handle<Name> key(descs->GetKey(i));
+ Handle<Object> value(descs->GetCallbacksObject(i), isolate);
+ PropertyDetails d = PropertyDetails(
+ details.attributes(), CALLBACKS, i + 1);
+ dictionary = NameDictionaryAdd(dictionary, key, value, d);
break;
}
case INTERCEPTOR:
@@ -3347,61 +4605,100 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
}
}
- Heap* current_heap = GetHeap();
-
// Copy the next enumeration index from instance descriptor.
dictionary->SetNextEnumerationIndex(real_size + 1);
- Map* new_map;
- MaybeObject* maybe_map =
- current_heap->isolate()->context()->native_context()->
- normalized_map_cache()->Get(this, mode);
- if (!maybe_map->To(&new_map)) return maybe_map;
+ Handle<NormalizedMapCache> cache(
+ isolate->context()->native_context()->normalized_map_cache());
+ Handle<Map> new_map = NormalizedMapCache::Get(cache, object, mode);
ASSERT(new_map->is_dictionary_map());
- // We have now successfully allocated all the necessary objects.
- // Changes can now be made with the guarantee that all of them take effect.
+ // From here on we cannot fail and we shouldn't GC anymore.
+ DisallowHeapAllocation no_allocation;
// Resize the object in the heap if necessary.
int new_instance_size = new_map->instance_size();
- int instance_size_delta = map_of_this->instance_size() - new_instance_size;
+ int instance_size_delta = map->instance_size() - new_instance_size;
ASSERT(instance_size_delta >= 0);
- current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
- instance_size_delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
+ isolate->heap()->CreateFillerObjectAt(object->address() + new_instance_size,
+ instance_size_delta);
+ if (Marking::IsBlack(Marking::MarkBitFrom(*object))) {
+ MemoryChunk::IncrementLiveBytesFromMutator(object->address(),
-instance_size_delta);
}
- set_map(new_map);
+ object->set_map(*new_map);
+ map->NotifyLeafMapLayoutChange();
- set_properties(dictionary);
+ object->set_properties(*dictionary);
- current_heap->isolate()->counters()->props_to_dictionary()->Increment();
+ isolate->counters()->props_to_dictionary()->Increment();
#ifdef DEBUG
if (FLAG_trace_normalization) {
PrintF("Object properties have been normalized:\n");
- Print();
+ object->Print();
}
#endif
- return this;
}
void JSObject::TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields) {
+ if (object->HasFastProperties()) return;
+ ASSERT(!object->IsGlobalObject());
CALL_HEAP_FUNCTION_VOID(
object->GetIsolate(),
- object->TransformToFastProperties(unused_property_fields));
+ object->property_dictionary()->TransformPropertiesToFastFor(
+ *object, unused_property_fields));
}
-MaybeObject* JSObject::TransformToFastProperties(int unused_property_fields) {
- if (HasFastProperties()) return this;
- ASSERT(!IsGlobalObject());
- return property_dictionary()->
- TransformPropertiesToFastFor(this, unused_property_fields);
+static MUST_USE_RESULT MaybeObject* CopyFastElementsToDictionary(
+ Isolate* isolate,
+ FixedArrayBase* array,
+ int length,
+ SeededNumberDictionary* dictionary) {
+ Heap* heap = isolate->heap();
+ bool has_double_elements = array->IsFixedDoubleArray();
+ for (int i = 0; i < length; i++) {
+ Object* value = NULL;
+ if (has_double_elements) {
+ FixedDoubleArray* double_array = FixedDoubleArray::cast(array);
+ if (double_array->is_the_hole(i)) {
+ value = isolate->heap()->the_hole_value();
+ } else {
+ // Objects must be allocated in the old object space, since the
+ // overall number of HeapNumbers needed for the conversion might
+ // exceed the capacity of new space, and we would fail repeatedly
+ // trying to convert the FixedDoubleArray.
+ MaybeObject* maybe_value_object =
+ heap->AllocateHeapNumber(double_array->get_scalar(i), TENURED);
+ if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
+ }
+ } else {
+ value = FixedArray::cast(array)->get(i);
+ }
+ if (!value->IsTheHole()) {
+ PropertyDetails details = PropertyDetails(NONE, NORMAL, 0);
+ MaybeObject* maybe_result =
+ dictionary->AddNumberEntry(i, value, details);
+ if (!maybe_result->To(&dictionary)) return maybe_result;
+ }
+ }
+ return dictionary;
+}
+
+
+static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
+ Handle<FixedArrayBase> array,
+ int length,
+ Handle<SeededNumberDictionary> dict) {
+ Isolate* isolate = array->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ CopyFastElementsToDictionary(
+ isolate, *array, length, *dict),
+ SeededNumberDictionary);
}
@@ -3436,43 +4733,14 @@ MaybeObject* JSObject::NormalizeElements() {
int old_capacity = 0;
int used_elements = 0;
GetElementsCapacityAndUsage(&old_capacity, &used_elements);
- SeededNumberDictionary* dictionary = NULL;
- { Object* object;
- MaybeObject* maybe = SeededNumberDictionary::Allocate(used_elements);
- if (!maybe->ToObject(&object)) return maybe;
- dictionary = SeededNumberDictionary::cast(object);
- }
+ SeededNumberDictionary* dictionary;
+ MaybeObject* maybe_dictionary =
+ SeededNumberDictionary::Allocate(GetHeap(), used_elements);
+ if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
- // Copy the elements to the new backing store.
- bool has_double_elements = array->IsFixedDoubleArray();
- for (int i = 0; i < length; i++) {
- Object* value = NULL;
- if (has_double_elements) {
- FixedDoubleArray* double_array = FixedDoubleArray::cast(array);
- if (double_array->is_the_hole(i)) {
- value = GetIsolate()->heap()->the_hole_value();
- } else {
- // Objects must be allocated in the old object space, since the
- // overall number of HeapNumbers needed for the conversion might
- // exceed the capacity of new space, and we would fail repeatedly
- // trying to convert the FixedDoubleArray.
- MaybeObject* maybe_value_object =
- GetHeap()->AllocateHeapNumber(double_array->get_scalar(i), TENURED);
- if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
- }
- } else {
- ASSERT(old_map->has_fast_smi_or_object_elements());
- value = FixedArray::cast(array)->get(i);
- }
- PropertyDetails details = PropertyDetails(NONE, NORMAL);
- if (!value->IsTheHole()) {
- Object* result;
- MaybeObject* maybe_result =
- dictionary->AddNumberEntry(i, value, details);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- dictionary = SeededNumberDictionary::cast(result);
- }
- }
+ maybe_dictionary = CopyFastElementsToDictionary(
+ GetIsolate(), array, length, dictionary);
+ if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
// Switch to using the dictionary as the backing storage for elements.
if (is_arguments) {
@@ -3480,11 +4748,11 @@ MaybeObject* JSObject::NormalizeElements() {
} else {
// Set the new map first to satify the elements type assert in
// set_elements().
- Object* new_map;
+ Map* new_map;
MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(),
DICTIONARY_ELEMENTS);
- if (!maybe->ToObject(&new_map)) return maybe;
- set_map(Map::cast(new_map));
+ if (!maybe->To(&new_map)) return maybe;
+ set_map(new_map);
set_elements(dictionary);
}
@@ -3511,7 +4779,7 @@ Smi* JSReceiver::GenerateIdentityHash() {
do {
// Generate a random 32-bit hash value but limit range to fit
// within a smi.
- hash_value = V8::RandomPrivate(isolate) & Smi::kMaxValue;
+ hash_value = isolate->random_number_generator()->NextInt() & Smi::kMaxValue;
attempts++;
} while (hash_value == 0 && attempts < 30);
hash_value = hash_value != 0 ? hash_value : 1; // never return 0
@@ -3520,31 +4788,30 @@ Smi* JSReceiver::GenerateIdentityHash() {
}
-MaybeObject* JSObject::SetIdentityHash(Smi* hash, CreationFlag flag) {
- MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
- hash);
- if (maybe->IsFailure()) return maybe;
- return this;
+void JSObject::SetIdentityHash(Handle<JSObject> object, Smi* hash) {
+ CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
+ object->SetHiddenProperty(
+ object->GetHeap()->identity_hash_string(), hash));
}
-int JSObject::GetIdentityHash(Handle<JSObject> obj) {
- CALL_AND_RETRY(obj->GetIsolate(),
- obj->GetIdentityHash(ALLOW_CREATION),
- return Smi::cast(__object__)->value(),
- return 0);
+int JSObject::GetIdentityHash(Handle<JSObject> object) {
+ CALL_AND_RETRY_OR_DIE(object->GetIsolate(),
+ object->GetIdentityHash(ALLOW_CREATION),
+ return Smi::cast(__object__)->value(),
+ return 0);
}
MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
- Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_symbol());
+ Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_string());
if (stored_value->IsSmi()) return stored_value;
// Do not generate permanent identity hash code if not requested.
if (flag == OMIT_CREATION) return GetHeap()->undefined_value();
Smi* hash = GenerateIdentityHash();
- MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
+ MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_string(),
hash);
if (result->IsFailure()) return result;
if (result->ToObjectUnchecked()->IsUndefined()) {
@@ -3555,6 +4822,12 @@ MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
}
+Handle<Object> JSProxy::GetIdentityHash(Handle<JSProxy> proxy,
+ CreationFlag flag) {
+ CALL_HEAP_FUNCTION(proxy->GetIsolate(), proxy->GetIdentityHash(flag), Object);
+}
+
+
MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
Object* hash = this->hash();
if (!hash->IsSmi() && flag == ALLOW_CREATION) {
@@ -3565,13 +4838,13 @@ MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
}
-Object* JSObject::GetHiddenProperty(String* key) {
- ASSERT(key->IsSymbol());
+Object* JSObject::GetHiddenProperty(Name* key) {
+ ASSERT(key->IsUniqueName());
if (IsJSGlobalProxy()) {
// For a proxy, use the prototype as target object.
Object* proxy_parent = GetPrototype();
// If the proxy is detached, return undefined.
- if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
+ if (proxy_parent->IsNull()) return GetHeap()->the_hole_value();
ASSERT(proxy_parent->IsJSGlobalObject());
return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
}
@@ -3582,24 +4855,23 @@ Object* JSObject::GetHiddenProperty(String* key) {
if (inline_value->IsSmi()) {
// Handle inline-stored identity hash.
- if (key == GetHeap()->identity_hash_symbol()) {
+ if (key == GetHeap()->identity_hash_string()) {
return inline_value;
} else {
- return GetHeap()->undefined_value();
+ return GetHeap()->the_hole_value();
}
}
- if (inline_value->IsUndefined()) return GetHeap()->undefined_value();
+ if (inline_value->IsUndefined()) return GetHeap()->the_hole_value();
ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
Object* entry = hashtable->Lookup(key);
- if (entry->IsTheHole()) return GetHeap()->undefined_value();
return entry;
}
Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj,
- Handle<String> key,
+ Handle<Name> key,
Handle<Object> value) {
CALL_HEAP_FUNCTION(obj->GetIsolate(),
obj->SetHiddenProperty(*key, *value),
@@ -3607,8 +4879,8 @@ Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj,
}
-MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
- ASSERT(key->IsSymbol());
+MaybeObject* JSObject::SetHiddenProperty(Name* key, Object* value) {
+ ASSERT(key->IsUniqueName());
if (IsJSGlobalProxy()) {
// For a proxy, use the prototype as target object.
Object* proxy_parent = GetPrototype();
@@ -3624,7 +4896,7 @@ MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
// If there is no backing store yet, store the identity hash inline.
if (value->IsSmi() &&
- key == GetHeap()->identity_hash_symbol() &&
+ key == GetHeap()->identity_hash_string() &&
(inline_value->IsUndefined() || inline_value->IsSmi())) {
return SetHiddenPropertiesHashTable(value);
}
@@ -3648,36 +4920,33 @@ MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
}
-void JSObject::DeleteHiddenProperty(String* key) {
- ASSERT(key->IsSymbol());
- if (IsJSGlobalProxy()) {
- // For a proxy, use the prototype as target object.
- Object* proxy_parent = GetPrototype();
- // If the proxy is detached, return immediately.
- if (proxy_parent->IsNull()) return;
- ASSERT(proxy_parent->IsJSGlobalObject());
- JSObject::cast(proxy_parent)->DeleteHiddenProperty(key);
- return;
+void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
+ Isolate* isolate = object->GetIsolate();
+ ASSERT(key->IsUniqueName());
+
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return;
+ ASSERT(proto->IsJSGlobalObject());
+ return DeleteHiddenProperty(Handle<JSObject>::cast(proto), key);
}
- ASSERT(!IsJSGlobalProxy());
+
MaybeObject* hidden_lookup =
- GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
+ object->GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
Object* inline_value = hidden_lookup->ToObjectUnchecked();
// We never delete (inline-stored) identity hashes.
- ASSERT(key != GetHeap()->identity_hash_symbol());
+ ASSERT(*key != isolate->heap()->identity_hash_string());
if (inline_value->IsUndefined() || inline_value->IsSmi()) return;
- ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
- MaybeObject* delete_result = hashtable->Put(key, GetHeap()->the_hole_value());
- USE(delete_result);
- ASSERT(!delete_result->IsFailure()); // Delete does not cause GC.
+ Handle<ObjectHashTable> hashtable(ObjectHashTable::cast(inline_value));
+ PutIntoObjectHashTable(hashtable, key, isolate->factory()->the_hole_value());
}
bool JSObject::HasHiddenProperties() {
return GetPropertyAttributePostInterceptor(this,
- GetHeap()->hidden_symbol(),
+ GetHeap()->hidden_string(),
false) != ABSENT;
}
@@ -3688,17 +4957,19 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable(
Object* inline_value;
if (HasFastProperties()) {
// If the object has fast properties, check whether the first slot
- // in the descriptor array matches the hidden symbol. Since the
- // hidden symbols hash code is zero (and no other string has hash
+ // in the descriptor array matches the hidden string. Since the
+ // hidden strings hash code is zero (and no other name has hash
// code zero) it will always occupy the first entry if present.
DescriptorArray* descriptors = this->map()->instance_descriptors();
if (descriptors->number_of_descriptors() > 0) {
int sorted_index = descriptors->GetSortedKeyIndex(0);
- if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_symbol() &&
+ if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
sorted_index < map()->NumberOfOwnDescriptors()) {
ASSERT(descriptors->GetType(sorted_index) == FIELD);
- inline_value =
- this->FastPropertyAt(descriptors->GetFieldIndex(sorted_index));
+ MaybeObject* maybe_value = this->FastPropertyAt(
+ descriptors->GetDetails(sorted_index).representation(),
+ descriptors->GetFieldIndex(sorted_index));
+ if (!maybe_value->To(&inline_value)) return maybe_value;
} else {
inline_value = GetHeap()->undefined_value();
}
@@ -3707,12 +4978,12 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable(
}
} else {
PropertyAttributes attributes;
- // You can't install a getter on a property indexed by the hidden symbol,
+ // You can't install a getter on a property indexed by the hidden string,
// so we can be sure that GetLocalPropertyPostInterceptor returns a real
// object.
inline_value =
GetLocalPropertyPostInterceptor(this,
- GetHeap()->hidden_symbol(),
+ GetHeap()->hidden_string(),
&attributes)->ToObjectUnchecked();
}
@@ -3724,7 +4995,8 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable(
ObjectHashTable* hashtable;
static const int kInitialCapacity = 4;
MaybeObject* maybe_obj =
- ObjectHashTable::Allocate(kInitialCapacity,
+ ObjectHashTable::Allocate(GetHeap(),
+ kInitialCapacity,
ObjectHashTable::USE_CUSTOM_MINIMUM_CAPACITY);
if (!maybe_obj->To<ObjectHashTable>(&hashtable)) return maybe_obj;
@@ -3732,19 +5004,20 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable(
// We were storing the identity hash inline and now allocated an actual
// dictionary. Put the identity hash into the new dictionary.
MaybeObject* insert_result =
- hashtable->Put(GetHeap()->identity_hash_symbol(), inline_value);
+ hashtable->Put(GetHeap()->identity_hash_string(), inline_value);
ObjectHashTable* new_table;
if (!insert_result->To(&new_table)) return insert_result;
// We expect no resizing for the first insert.
ASSERT_EQ(hashtable, new_table);
}
- MaybeObject* store_result =
- SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
- hashtable,
- DONT_ENUM,
- kNonStrictMode,
- OMIT_EXTENSIBILITY_CHECK);
+ MaybeObject* store_result = SetLocalPropertyIgnoreAttributesTrampoline(
+ GetHeap()->hidden_string(),
+ hashtable,
+ DONT_ENUM,
+ OPTIMAL_REPRESENTATION,
+ ALLOW_AS_CONSTANT,
+ OMIT_EXTENSIBILITY_CHECK);
if (store_result->IsFailure()) return store_result;
return hashtable;
}
@@ -3757,250 +5030,283 @@ MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) {
ASSERT(HasHiddenProperties() != value->IsSmi());
if (HasFastProperties()) {
// If the object has fast properties, check whether the first slot
- // in the descriptor array matches the hidden symbol. Since the
- // hidden symbols hash code is zero (and no other string has hash
+ // in the descriptor array matches the hidden string. Since the
+ // hidden strings hash code is zero (and no other name has hash
// code zero) it will always occupy the first entry if present.
DescriptorArray* descriptors = this->map()->instance_descriptors();
if (descriptors->number_of_descriptors() > 0) {
int sorted_index = descriptors->GetSortedKeyIndex(0);
- if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_symbol() &&
+ if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
sorted_index < map()->NumberOfOwnDescriptors()) {
ASSERT(descriptors->GetType(sorted_index) == FIELD);
- this->FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index),
- value);
+ FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index), value);
return this;
}
}
}
- MaybeObject* store_result =
- SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
- value,
- DONT_ENUM,
- kNonStrictMode,
- OMIT_EXTENSIBILITY_CHECK);
+ MaybeObject* store_result = SetLocalPropertyIgnoreAttributesTrampoline(
+ GetHeap()->hidden_string(),
+ value,
+ DONT_ENUM,
+ OPTIMAL_REPRESENTATION,
+ ALLOW_AS_CONSTANT,
+ OMIT_EXTENSIBILITY_CHECK);
if (store_result->IsFailure()) return store_result;
return this;
}
-MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
- DeleteMode mode) {
+Handle<Object> JSObject::DeletePropertyPostInterceptor(Handle<JSObject> object,
+ Handle<Name> name,
+ DeleteMode mode) {
// Check local property, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (!result.IsFound()) return GetHeap()->true_value();
+ Isolate* isolate = object->GetIsolate();
+ LookupResult result(isolate);
+ object->LocalLookupRealNamedProperty(*name, &result);
+ if (!result.IsFound()) return isolate->factory()->true_value();
// Normalize object if needed.
- Object* obj;
- { MaybeObject* maybe_obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
- return DeleteNormalizedProperty(name, mode);
+ return DeleteNormalizedProperty(object, name, mode);
}
-MaybeObject* JSObject::DeletePropertyWithInterceptor(String* name) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
- Handle<String> name_handle(name);
- Handle<JSObject> this_handle(this);
+Handle<Object> JSObject::DeletePropertyWithInterceptor(Handle<JSObject> object,
+ Handle<Name> name) {
+ Isolate* isolate = object->GetIsolate();
+
+ // TODO(rossberg): Support symbols in the API.
+ if (name->IsSymbol()) return isolate->factory()->false_value();
+
+ Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
if (!interceptor->deleter()->IsUndefined()) {
- v8::NamedPropertyDeleter deleter =
- v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
+ v8::NamedPropertyDeleterCallback deleter =
+ v8::ToCData<v8::NamedPropertyDeleterCallback>(interceptor->deleter());
LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Boolean> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = deleter(v8::Utils::ToLocal(name_handle), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ ApiNamedPropertyAccess("interceptor-named-delete", *object, *name));
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *object, *object);
+ v8::Handle<v8::Boolean> result =
+ args.Call(deleter, v8::Utils::ToLocal(Handle<String>::cast(name)));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
- return *result_internal;
+ // Rebox CustomArguments::kReturnValueOffset before returning.
+ return handle(*result_internal, isolate);
}
}
- MaybeObject* raw_result =
- this_handle->DeletePropertyPostInterceptor(*name_handle, NORMAL_DELETION);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
+ Handle<Object> result =
+ DeletePropertyPostInterceptor(object, name, NORMAL_DELETION);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return result;
}
-MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) {
- Isolate* isolate = GetIsolate();
- Heap* heap = isolate->heap();
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<Object> AccessorDelete(Handle<JSObject> object,
+ uint32_t index,
+ JSObject::DeleteMode mode) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->GetElementsAccessor()->Delete(*object,
+ index,
+ mode),
+ Object);
+}
+
+
+Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object,
+ uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- if (interceptor->deleter()->IsUndefined()) return heap->false_value();
- v8::IndexedPropertyDeleter deleter =
- v8::ToCData<v8::IndexedPropertyDeleter>(interceptor->deleter());
- Handle<JSObject> this_handle(this);
+ AssertNoContextChange ncc(isolate);
+
+ Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
+ if (interceptor->deleter()->IsUndefined()) return factory->false_value();
+ v8::IndexedPropertyDeleterCallback deleter =
+ v8::ToCData<v8::IndexedPropertyDeleterCallback>(interceptor->deleter());
LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Boolean> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = deleter(index, info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ ApiIndexedPropertyAccess("interceptor-indexed-delete", *object, index));
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *object, *object);
+ v8::Handle<v8::Boolean> result = args.Call(deleter, index);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
- return *result_internal;
+ // Rebox CustomArguments::kReturnValueOffset before returning.
+ return handle(*result_internal, isolate);
}
- MaybeObject* raw_result = this_handle->GetElementsAccessor()->Delete(
- *this_handle,
- index,
- NORMAL_DELETION);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
+ Handle<Object> delete_result = AccessorDelete(object, index, NORMAL_DELETION);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return delete_result;
}
-Handle<Object> JSObject::DeleteElement(Handle<JSObject> obj,
- uint32_t index) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->DeleteElement(index, JSObject::NORMAL_DELETION),
- Object);
-}
-
+Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
+ uint32_t index,
+ DeleteMode mode) {
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
-MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
- Isolate* isolate = GetIsolate();
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
- return isolate->heap()->false_value();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayIndexedAccess(*object, index, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_DELETE);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return factory->false_value();
}
- if (IsStringObjectWithCharacterAt(index)) {
+ if (object->IsStringObjectWithCharacterAt(index)) {
if (mode == STRICT_DELETION) {
// Deleting a non-configurable property in strict mode.
- HandleScope scope(isolate);
- Handle<Object> holder(this);
- Handle<Object> name = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[2] = { name, holder };
+ Handle<Object> name = factory->NewNumberFromUint(index);
+ Handle<Object> args[2] = { name, object };
Handle<Object> error =
- isolate->factory()->NewTypeError("strict_delete_property",
- HandleVector(args, 2));
- return isolate->Throw(*error);
+ factory->NewTypeError("strict_delete_property",
+ HandleVector(args, 2));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
- return isolate->heap()->false_value();
+ return factory->false_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return isolate->heap()->false_value();
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return factory->false_value();
ASSERT(proto->IsJSGlobalObject());
- return JSGlobalObject::cast(proto)->DeleteElement(index, mode);
+ return DeleteElement(Handle<JSObject>::cast(proto), index, mode);
}
- if (HasIndexedInterceptor()) {
- // Skip interceptor if forcing deletion.
- if (mode != FORCE_DELETION) {
- return DeleteElementWithInterceptor(index);
+ Handle<Object> old_value;
+ bool should_enqueue_change_record = false;
+ if (FLAG_harmony_observation && object->map()->is_observed()) {
+ should_enqueue_change_record = HasLocalElement(object, index);
+ if (should_enqueue_change_record) {
+ old_value = object->GetLocalElementAccessorPair(index) != NULL
+ ? Handle<Object>::cast(factory->the_hole_value())
+ : Object::GetElement(isolate, object, index);
}
- mode = JSReceiver::FORCE_DELETION;
}
- return GetElementsAccessor()->Delete(this, index, mode);
-}
+ // Skip interceptor if forcing deletion.
+ Handle<Object> result;
+ if (object->HasIndexedInterceptor() && mode != FORCE_DELETION) {
+ result = DeleteElementWithInterceptor(object, index);
+ } else {
+ result = AccessorDelete(object, index, mode);
+ }
+ if (should_enqueue_change_record && !HasLocalElement(object, index)) {
+ Handle<String> name = factory->Uint32ToString(index);
+ EnqueueChangeRecord(object, "deleted", name, old_value);
+ }
-Handle<Object> JSObject::DeleteProperty(Handle<JSObject> obj,
- Handle<String> prop) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
- Object);
+ return result;
}
-MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
- Isolate* isolate = GetIsolate();
+Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ DeleteMode mode) {
+ Isolate* isolate = object->GetIsolate();
// ECMA-262, 3rd, 8.6.2.5
- ASSERT(name->IsString());
+ ASSERT(name->IsName());
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
- return isolate->heap()->false_value();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object, *name, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_DELETE);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->false_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return isolate->heap()->false_value();
+ if (object->IsJSGlobalProxy()) {
+ Object* proto = object->GetPrototype();
+ if (proto->IsNull()) return isolate->factory()->false_value();
ASSERT(proto->IsJSGlobalObject());
- return JSGlobalObject::cast(proto)->DeleteProperty(name, mode);
+ return JSGlobalObject::DeleteProperty(
+ handle(JSGlobalObject::cast(proto)), name, mode);
}
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
- return DeleteElement(index, mode);
- } else {
- LookupResult result(isolate);
- LocalLookup(name, &result);
- if (!result.IsFound()) return isolate->heap()->true_value();
- // Ignore attributes if forcing a deletion.
- if (result.IsDontDelete() && mode != FORCE_DELETION) {
- if (mode == STRICT_DELETION) {
- // Deleting a non-configurable property in strict mode.
- HandleScope scope(isolate);
- Handle<Object> args[2] = { Handle<Object>(name), Handle<Object>(this) };
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_delete_property", HandleVector(args, 2)));
- }
- return isolate->heap()->false_value();
+ return DeleteElement(object, index, mode);
+ }
+
+ LookupResult lookup(isolate);
+ object->LocalLookup(*name, &lookup, true);
+ if (!lookup.IsFound()) return isolate->factory()->true_value();
+ // Ignore attributes if forcing a deletion.
+ if (lookup.IsDontDelete() && mode != FORCE_DELETION) {
+ if (mode == STRICT_DELETION) {
+ // Deleting a non-configurable property in strict mode.
+ Handle<Object> args[2] = { name, object };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "strict_delete_property", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
- // Check for interceptor.
- if (result.IsInterceptor()) {
- // Skip interceptor if forcing a deletion.
- if (mode == FORCE_DELETION) {
- return DeletePropertyPostInterceptor(name, mode);
- }
- return DeletePropertyWithInterceptor(name);
+ return isolate->factory()->false_value();
+ }
+
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ bool is_observed = FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string();
+ if (is_observed && lookup.IsDataProperty()) {
+ old_value = Object::GetProperty(object, name);
+ }
+ Handle<Object> result;
+
+ // Check for interceptor.
+ if (lookup.IsInterceptor()) {
+ // Skip interceptor if forcing a deletion.
+ if (mode == FORCE_DELETION) {
+ result = DeletePropertyPostInterceptor(object, name, mode);
+ } else {
+ result = DeletePropertyWithInterceptor(object, name);
}
+ } else {
// Normalize object if needed.
- Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
// Make sure the properties are normalized before removing the entry.
- return DeleteNormalizedProperty(name, mode);
+ result = DeleteNormalizedProperty(object, name, mode);
+ }
+
+ if (is_observed && !HasLocalProperty(object, name)) {
+ EnqueueChangeRecord(object, "deleted", name, old_value);
}
+
+ return result;
}
-MaybeObject* JSReceiver::DeleteElement(uint32_t index, DeleteMode mode) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->DeleteElementWithHandler(index, mode);
+Handle<Object> JSReceiver::DeleteElement(Handle<JSReceiver> object,
+ uint32_t index,
+ DeleteMode mode) {
+ if (object->IsJSProxy()) {
+ return JSProxy::DeleteElementWithHandler(
+ Handle<JSProxy>::cast(object), index, mode);
}
- return JSObject::cast(this)->DeleteElement(index, mode);
+ return JSObject::DeleteElement(Handle<JSObject>::cast(object), index, mode);
}
-MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
+Handle<Object> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
+ Handle<Name> name,
+ DeleteMode mode) {
+ if (object->IsJSProxy()) {
+ return JSProxy::DeletePropertyWithHandler(
+ Handle<JSProxy>::cast(object), name, mode);
}
- return JSObject::cast(this)->DeleteProperty(name, mode);
+ return JSObject::DeleteProperty(Handle<JSObject>::cast(object), name, mode);
}
@@ -4030,7 +5336,7 @@ bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
bool JSObject::ReferencesObject(Object* obj) {
Map* map_of_this = map();
Heap* heap = GetHeap();
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation;
// Is the object the constructor for this object?
if (map_of_this->constructor() == obj) {
@@ -4136,57 +5442,482 @@ bool JSObject::ReferencesObject(Object* obj) {
Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
-}
-
-
-MaybeObject* JSObject::PreventExtensions() {
- Isolate* isolate = GetIsolate();
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this,
+ Isolate* isolate = object->GetIsolate();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object,
isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
- return isolate->heap()->false_value();
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->false_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return object;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->PreventExtensions();
+ return PreventExtensions(Handle<JSObject>::cast(proto));
}
// It's not possible to seal objects with external array elements
- if (HasExternalArrayElements()) {
- HandleScope scope(isolate);
- Handle<Object> object(this);
+ if (object->HasExternalArrayElements()) {
Handle<Object> error =
isolate->factory()->NewTypeError(
"cant_prevent_ext_external_array_elements",
HandleVector(&object, 1));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// If there are fast elements we normalize.
- SeededNumberDictionary* dictionary = NULL;
- { MaybeObject* maybe = NormalizeElements();
- if (!maybe->To<SeededNumberDictionary>(&dictionary)) return maybe;
- }
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+ Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
+ ASSERT(object->HasDictionaryElements() ||
+ object->HasDictionaryArgumentsElements());
+
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
// Do a map transition, other objects with this map may still
// be extensible.
- Map* new_map;
- MaybeObject* maybe = map()->Copy();
- if (!maybe->To(&new_map)) return maybe;
+ // TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
+ Handle<Map> new_map = Map::Copy(handle(object->map()));
new_map->set_is_extensible(false);
- set_map(new_map);
- ASSERT(!map()->is_extensible());
- return new_map;
+ object->set_map(*new_map);
+ ASSERT(!object->map()->is_extensible());
+ return object;
+}
+
+
+template<typename Dictionary>
+static void FreezeDictionary(Dictionary* dictionary) {
+ int capacity = dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = dictionary->KeyAt(i);
+ if (dictionary->IsKey(k)) {
+ PropertyDetails details = dictionary->DetailsAt(i);
+ int attrs = DONT_DELETE;
+ // READ_ONLY is an invalid attribute for JS setters/getters.
+ if (details.type() != CALLBACKS ||
+ !dictionary->ValueAt(i)->IsAccessorPair()) {
+ attrs |= READ_ONLY;
+ }
+ details = details.CopyAddAttributes(
+ static_cast<PropertyAttributes>(attrs));
+ dictionary->DetailsAtPut(i, details);
+ }
+ }
+}
+
+
+Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
+ // Freezing non-strict arguments should be handled elsewhere.
+ ASSERT(!object->HasNonStrictArgumentsElements());
+
+ if (object->map()->is_frozen()) return object;
+
+ Isolate* isolate = object->GetIsolate();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object,
+ isolate->heap()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->false_value();
+ }
+
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return object;
+ ASSERT(proto->IsJSGlobalObject());
+ return Freeze(Handle<JSObject>::cast(proto));
+ }
+
+ // It's not possible to freeze objects with external array elements
+ if (object->HasExternalArrayElements()) {
+ Handle<Object> error =
+ isolate->factory()->NewTypeError(
+ "cant_prevent_ext_external_array_elements",
+ HandleVector(&object, 1));
+ isolate->Throw(*error);
+ return Handle<Object>();
+ }
+
+ Handle<SeededNumberDictionary> new_element_dictionary;
+ if (!object->elements()->IsDictionary()) {
+ int length = object->IsJSArray()
+ ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
+ : object->elements()->length();
+ if (length > 0) {
+ int capacity = 0;
+ int used = 0;
+ object->GetElementsCapacityAndUsage(&capacity, &used);
+ new_element_dictionary =
+ isolate->factory()->NewSeededNumberDictionary(used);
+
+ // Move elements to a dictionary; avoid calling NormalizeElements to avoid
+ // unnecessary transitions.
+ new_element_dictionary = CopyFastElementsToDictionary(
+ handle(object->elements()), length, new_element_dictionary);
+ } else {
+ // No existing elements, use a pre-allocated empty backing store
+ new_element_dictionary =
+ isolate->factory()->empty_slow_element_dictionary();
+ }
+ }
+
+ LookupResult result(isolate);
+ Handle<Map> old_map(object->map());
+ old_map->LookupTransition(*object, isolate->heap()->frozen_symbol(), &result);
+ if (result.IsTransition()) {
+ Map* transition_map = result.GetTransitionTarget();
+ ASSERT(transition_map->has_dictionary_elements());
+ ASSERT(transition_map->is_frozen());
+ ASSERT(!transition_map->is_extensible());
+ object->set_map(transition_map);
+ } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
+ // Create a new descriptor array with fully-frozen properties
+ int num_descriptors = old_map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> new_descriptors =
+ DescriptorArray::CopyUpToAddAttributes(
+ handle(old_map->instance_descriptors()), num_descriptors, FROZEN);
+ Handle<Map> new_map = Map::CopyReplaceDescriptors(
+ old_map, new_descriptors, INSERT_TRANSITION,
+ isolate->factory()->frozen_symbol());
+ new_map->freeze();
+ new_map->set_is_extensible(false);
+ new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ object->set_map(*new_map);
+ } else {
+ // Slow path: need to normalize properties for safety
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
+
+ // Create a new map, since other objects with this map may be extensible.
+ // TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
+ Handle<Map> new_map = Map::Copy(handle(object->map()));
+ new_map->freeze();
+ new_map->set_is_extensible(false);
+ new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ object->set_map(*new_map);
+
+ // Freeze dictionary-mode properties
+ FreezeDictionary(object->property_dictionary());
+ }
+
+ ASSERT(object->map()->has_dictionary_elements());
+ if (!new_element_dictionary.is_null()) {
+ object->set_elements(*new_element_dictionary);
+ }
+
+ if (object->elements() != isolate->heap()->empty_slow_element_dictionary()) {
+ SeededNumberDictionary* dictionary = object->element_dictionary();
+ // Make sure we never go back to the fast case
+ dictionary->set_requires_slow_elements();
+ // Freeze all elements in the dictionary
+ FreezeDictionary(dictionary);
+ }
+
+ return object;
+}
+
+
+void JSObject::SetObserved(Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
+
+ if (object->map()->is_observed())
+ return;
+
+ if (!object->HasExternalArrayElements()) {
+ // Go to dictionary mode, so that we don't skip map checks.
+ NormalizeElements(object);
+ ASSERT(!object->HasFastElements());
+ }
+
+ LookupResult result(isolate);
+ object->map()->LookupTransition(*object,
+ isolate->heap()->observed_symbol(),
+ &result);
+
+ Handle<Map> new_map;
+ if (result.IsTransition()) {
+ new_map = handle(result.GetTransitionTarget());
+ ASSERT(new_map->is_observed());
+ } else if (object->map()->CanHaveMoreTransitions()) {
+ new_map = Map::CopyForObserved(handle(object->map()));
+ } else {
+ new_map = Map::Copy(handle(object->map()));
+ new_map->set_is_observed(true);
+ }
+ object->set_map(*new_map);
+}
+
+
+Handle<JSObject> JSObject::Copy(Handle<JSObject> object,
+ Handle<AllocationSite> site) {
+ Isolate* isolate = object->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ isolate->heap()->CopyJSObject(*object, *site), JSObject);
+}
+
+
+Handle<JSObject> JSObject::Copy(Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ isolate->heap()->CopyJSObject(*object), JSObject);
+}
+
+
+class JSObjectWalkVisitor {
+ public:
+ explicit JSObjectWalkVisitor(AllocationSiteContext* site_context) :
+ site_context_(site_context) {}
+ virtual ~JSObjectWalkVisitor() {}
+
+ Handle<JSObject> Visit(Handle<JSObject> object) {
+ return StructureWalk(object);
+ }
+
+ virtual bool is_copying() = 0;
+
+ protected:
+ Handle<JSObject> StructureWalk(Handle<JSObject> object);
+
+ // The returned handle will be used for the object in all subsequent usages.
+ // This allows VisitObject to make a copy of the object if desired.
+ virtual Handle<JSObject> VisitObject(Handle<JSObject> object) = 0;
+ virtual Handle<JSObject> VisitElementOrProperty(Handle<JSObject> object,
+ Handle<JSObject> value) = 0;
+
+ AllocationSiteContext* site_context() { return site_context_; }
+
+ private:
+ AllocationSiteContext* site_context_;
+};
+
+
+class JSObjectCopyVisitor: public JSObjectWalkVisitor {
+ public:
+ explicit JSObjectCopyVisitor(AllocationSiteContext* site_context)
+ : JSObjectWalkVisitor(site_context) {}
+
+ virtual bool is_copying() V8_OVERRIDE { return true; }
+
+ // The returned handle will be used for the object in all
+ // subsequent usages. This allows VisitObject to make a copy
+ // of the object if desired.
+ virtual Handle<JSObject> VisitObject(Handle<JSObject> object) V8_OVERRIDE {
+ // Only create a memento if
+ // 1) we have a JSArray, and
+ // 2) the elements kind is palatable
+ // 3) allow_mementos is true
+ Handle<JSObject> copy;
+ if (site_context()->activated() &&
+ AllocationSite::CanTrack(object->map()->instance_type()) &&
+ AllocationSite::GetMode(object->GetElementsKind()) ==
+ TRACK_ALLOCATION_SITE) {
+ copy = JSObject::Copy(object, site_context()->current());
+ } else {
+ copy = JSObject::Copy(object);
+ }
+
+ return copy;
+ }
+
+ virtual Handle<JSObject> VisitElementOrProperty(
+ Handle<JSObject> object,
+ Handle<JSObject> value) V8_OVERRIDE {
+ Handle<AllocationSite> current_site = site_context()->EnterNewScope();
+ Handle<JSObject> copy_of_value = StructureWalk(value);
+ site_context()->ExitScope(current_site, value);
+ return copy_of_value;
+ }
+};
+
+
+class JSObjectCreateAllocationSitesVisitor: public JSObjectWalkVisitor {
+ public:
+ explicit JSObjectCreateAllocationSitesVisitor(
+ AllocationSiteContext* site_context)
+ : JSObjectWalkVisitor(site_context) {}
+
+ virtual bool is_copying() V8_OVERRIDE { return false; }
+
+ // The returned handle will be used for the object in all
+ // subsequent usages. This allows VisitObject to make a copy
+ // of the object if desired.
+ virtual Handle<JSObject> VisitObject(Handle<JSObject> object) V8_OVERRIDE {
+ return object;
+ }
+
+ virtual Handle<JSObject> VisitElementOrProperty(
+ Handle<JSObject> object,
+ Handle<JSObject> value) V8_OVERRIDE {
+ Handle<AllocationSite> current_site = site_context()->EnterNewScope();
+ value = StructureWalk(value);
+ site_context()->ExitScope(current_site, value);
+ return value;
+ }
+};
+
+
+Handle<JSObject> JSObjectWalkVisitor::StructureWalk(Handle<JSObject> object) {
+ bool copying = is_copying();
+ Isolate* isolate = object->GetIsolate();
+ StackLimitCheck check(isolate);
+ if (check.HasOverflowed()) {
+ isolate->StackOverflow();
+ return Handle<JSObject>::null();
+ }
+
+ if (object->map()->is_deprecated()) {
+ JSObject::MigrateInstance(object);
+ }
+
+ Handle<JSObject> copy = VisitObject(object);
+ ASSERT(copying || copy.is_identical_to(object));
+
+ HandleScope scope(isolate);
+
+ // Deep copy local properties.
+ if (copy->HasFastProperties()) {
+ Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors());
+ int limit = copy->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ int index = descriptors->GetFieldIndex(i);
+ Handle<Object> value(object->RawFastPropertyAt(index), isolate);
+ if (value->IsJSObject()) {
+ value = VisitElementOrProperty(copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<JSObject>());
+ } else {
+ Representation representation = details.representation();
+ value = NewStorageFor(isolate, value, representation);
+ }
+ if (copying) {
+ copy->FastPropertyAtPut(index, *value);
+ }
+ }
+ } else {
+ Handle<FixedArray> names =
+ isolate->factory()->NewFixedArray(copy->NumberOfLocalProperties());
+ copy->GetLocalPropertyNames(*names, 0);
+ for (int i = 0; i < names->length(); i++) {
+ ASSERT(names->get(i)->IsString());
+ Handle<String> key_string(String::cast(names->get(i)));
+ PropertyAttributes attributes =
+ copy->GetLocalPropertyAttribute(*key_string);
+ // Only deep copy fields from the object literal expression.
+ // In particular, don't try to copy the length attribute of
+ // an array.
+ if (attributes != NONE) continue;
+ Handle<Object> value(
+ copy->GetProperty(*key_string, &attributes)->ToObjectUnchecked(),
+ isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> result = VisitElementOrProperty(
+ copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ if (copying) {
+ // Creating object copy for literals. No strict mode needed.
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetProperty(
+ copy, key_string, result, NONE, kNonStrictMode));
+ }
+ }
+ }
+ }
+
+ // Deep copy local elements.
+ // Pixel elements cannot be created using an object literal.
+ ASSERT(!copy->HasExternalArrayElements());
+ switch (copy->GetElementsKind()) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
+ Handle<FixedArray> elements(FixedArray::cast(copy->elements()));
+ if (elements->map() == isolate->heap()->fixed_cow_array_map()) {
+ isolate->counters()->cow_arrays_created_runtime()->Increment();
+#ifdef DEBUG
+ for (int i = 0; i < elements->length(); i++) {
+ ASSERT(!elements->get(i)->IsJSObject());
+ }
+#endif
+ } else {
+ for (int i = 0; i < elements->length(); i++) {
+ Handle<Object> value(elements->get(i), isolate);
+ ASSERT(value->IsSmi() ||
+ value->IsTheHole() ||
+ (IsFastObjectElementsKind(copy->GetElementsKind())));
+ if (value->IsJSObject()) {
+ Handle<JSObject> result = VisitElementOrProperty(
+ copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ if (copying) {
+ elements->set(i, *result);
+ }
+ }
+ }
+ }
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ Handle<SeededNumberDictionary> element_dictionary(
+ copy->element_dictionary());
+ int capacity = element_dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = element_dictionary->KeyAt(i);
+ if (element_dictionary->IsKey(k)) {
+ Handle<Object> value(element_dictionary->ValueAt(i), isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> result = VisitElementOrProperty(
+ copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ if (copying) {
+ element_dictionary->ValueAtPut(i, *result);
+ }
+ }
+ }
+ }
+ break;
+ }
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ // No contained objects, nothing to do.
+ break;
+ }
+ return copy;
+}
+
+
+Handle<JSObject> JSObject::DeepWalk(Handle<JSObject> object,
+ AllocationSiteContext* site_context) {
+ JSObjectCreateAllocationSitesVisitor v(site_context);
+ Handle<JSObject> result = v.Visit(object);
+ ASSERT(!v.is_copying() &&
+ (result.is_null() || result.is_identical_to(object)));
+ return result;
+}
+
+
+Handle<JSObject> JSObject::DeepCopy(Handle<JSObject> object,
+ AllocationSiteContext* site_context) {
+ JSObjectCopyVisitor v(site_context);
+ Handle<JSObject> copy = v.Visit(object);
+ ASSERT(v.is_copying() && !copy.is_identical_to(object));
+ return copy;
}
@@ -4222,22 +5953,15 @@ int Map::NumberOfDescribedProperties(DescriptorFlag which,
? descs->number_of_descriptors()
: NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
- if ((descs->GetDetails(i).attributes() & filter) == 0) result++;
+ if ((descs->GetDetails(i).attributes() & filter) == 0 &&
+ ((filter & SYMBOLIC) == 0 || !descs->GetKey(i)->IsSymbol())) {
+ result++;
+ }
}
return result;
}
-int Map::PropertyIndexFor(String* name) {
- DescriptorArray* descs = instance_descriptors();
- int limit = NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- if (name->Equals(descs->GetKey(i))) return descs->GetFieldIndex(i);
- }
- return -1;
-}
-
-
int Map::NextFreePropertyIndex() {
int max_index = -1;
int number_of_own_descriptors = NumberOfOwnDescriptors();
@@ -4252,7 +5976,7 @@ int Map::NextFreePropertyIndex() {
}
-AccessorDescriptor* Map::FindAccessor(String* name) {
+AccessorDescriptor* Map::FindAccessor(Name* name) {
DescriptorArray* descs = instance_descriptors();
int number_of_own_descriptors = NumberOfOwnDescriptors();
for (int i = 0; i < number_of_own_descriptors; i++) {
@@ -4264,8 +5988,9 @@ AccessorDescriptor* Map::FindAccessor(String* name) {
}
-void JSReceiver::LocalLookup(String* name, LookupResult* result) {
- ASSERT(name->IsString());
+void JSReceiver::LocalLookup(
+ Name* name, LookupResult* result, bool search_hidden_prototypes) {
+ ASSERT(name->IsName());
Heap* heap = GetHeap();
@@ -4273,7 +5998,8 @@ void JSReceiver::LocalLookup(String* name, LookupResult* result) {
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
ASSERT(proto->IsJSGlobalObject());
- return JSReceiver::cast(proto)->LocalLookup(name, result);
+ return JSReceiver::cast(proto)->LocalLookup(
+ name, result, search_hidden_prototypes);
}
if (IsJSProxy()) {
@@ -4289,12 +6015,6 @@ void JSReceiver::LocalLookup(String* name, LookupResult* result) {
JSObject* js_object = JSObject::cast(this);
- // Check __proto__ before interceptor.
- if (name->Equals(heap->Proto_symbol()) && !IsJSContextExtensionObject()) {
- result->ConstantResult(js_object);
- return;
- }
-
// Check for lookup interceptor except when bootstrapping.
if (js_object->HasNamedInterceptor() &&
!heap->isolate()->bootstrapper()->IsActive()) {
@@ -4303,16 +6023,24 @@ void JSReceiver::LocalLookup(String* name, LookupResult* result) {
}
js_object->LocalLookupRealNamedProperty(name, result);
+ if (result->IsFound() || !search_hidden_prototypes) return;
+
+ Object* proto = js_object->GetPrototype();
+ if (!proto->IsJSReceiver()) return;
+ JSReceiver* receiver = JSReceiver::cast(proto);
+ if (receiver->map()->is_hidden_prototype()) {
+ receiver->LocalLookup(name, result, search_hidden_prototypes);
+ }
}
-void JSReceiver::Lookup(String* name, LookupResult* result) {
+void JSReceiver::Lookup(Name* name, LookupResult* result) {
// Ecma-262 3rd 8.6.2.4
Heap* heap = GetHeap();
for (Object* current = this;
current != heap->null_value();
current = JSObject::cast(current)->GetPrototype()) {
- JSReceiver::cast(current)->LocalLookup(name, result);
+ JSReceiver::cast(current)->LocalLookup(name, result, false);
if (result->IsFound()) return;
}
result->NotFound();
@@ -4320,7 +6048,7 @@ void JSReceiver::Lookup(String* name, LookupResult* result) {
// Search object and its prototype chain for callback properties.
-void JSObject::LookupCallbackProperty(String* name, LookupResult* result) {
+void JSObject::LookupCallbackProperty(Name* name, LookupResult* result) {
Heap* heap = GetHeap();
for (Object* current = this;
current != heap->null_value() && current->IsJSObject();
@@ -4347,8 +6075,9 @@ static bool UpdateGetterSetterInDictionary(
if (details.type() == CALLBACKS && result->IsAccessorPair()) {
ASSERT(!details.IsDontDelete());
if (details.attributes() != attributes) {
- dictionary->DetailsAtPut(entry,
- PropertyDetails(attributes, CALLBACKS, index));
+ dictionary->DetailsAtPut(
+ entry,
+ PropertyDetails(attributes, CALLBACKS, index));
}
AccessorPair::cast(result)->SetComponents(getter, setter);
return true;
@@ -4358,11 +6087,13 @@ static bool UpdateGetterSetterInDictionary(
}
-MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes) {
- switch (GetElementsKind()) {
+void JSObject::DefineElementAccessor(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes,
+ v8::AccessControl access_control) {
+ switch (object->GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -4380,21 +6111,21 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
// Ignore getters and setters on pixel and external array elements.
- return GetHeap()->undefined_value();
+ return;
case DICTIONARY_ELEMENTS:
- if (UpdateGetterSetterInDictionary(element_dictionary(),
+ if (UpdateGetterSetterInDictionary(object->element_dictionary(),
index,
- getter,
- setter,
+ *getter,
+ *setter,
attributes)) {
- return GetHeap()->undefined_value();
+ return;
}
break;
case NON_STRICT_ARGUMENTS_ELEMENTS: {
// Ascertain whether we have read-only properties or an existing
// getter/setter pair in an arguments elements dictionary backing
// store.
- FixedArray* parameter_map = FixedArray::cast(elements());
+ FixedArray* parameter_map = FixedArray::cast(object->elements());
uint32_t length = parameter_map->length();
Object* probe =
index < (length - 2) ? parameter_map->get(index + 2) : NULL;
@@ -4405,10 +6136,10 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
SeededNumberDictionary::cast(arguments);
if (UpdateGetterSetterInDictionary(dictionary,
index,
- getter,
- setter,
+ *getter,
+ *setter,
attributes)) {
- return GetHeap()->undefined_value();
+ return;
}
}
}
@@ -4416,19 +6147,20 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
}
}
- AccessorPair* accessors;
- { MaybeObject* maybe_accessors = GetHeap()->AllocateAccessorPair();
- if (!maybe_accessors->To(&accessors)) return maybe_accessors;
- }
- accessors->SetComponents(getter, setter);
+ Isolate* isolate = object->GetIsolate();
+ Handle<AccessorPair> accessors = isolate->factory()->NewAccessorPair();
+ accessors->SetComponents(*getter, *setter);
+ accessors->set_access_flags(access_control);
- return SetElementCallback(index, accessors, attributes);
+ SetElementCallback(object, index, accessors, attributes);
}
-MaybeObject* JSObject::CreateAccessorPairFor(String* name) {
- LookupResult result(GetHeap()->isolate());
- LocalLookupRealNamedProperty(name, &result);
+Handle<AccessorPair> JSObject::CreateAccessorPairFor(Handle<JSObject> object,
+ Handle<Name> name) {
+ Isolate* isolate = object->GetIsolate();
+ LookupResult result(isolate);
+ object->LocalLookupRealNamedProperty(*name, &result);
if (result.IsPropertyCallbacks()) {
// Note that the result can actually have IsDontDelete() == true when we
// e.g. have to fall back to the slow case while adding a setter after
@@ -4438,49 +6170,42 @@ MaybeObject* JSObject::CreateAccessorPairFor(String* name) {
// DefinePropertyAccessor below.
Object* obj = result.GetCallbackObject();
if (obj->IsAccessorPair()) {
- return AccessorPair::cast(obj)->Copy();
+ return AccessorPair::Copy(handle(AccessorPair::cast(obj), isolate));
}
}
- return GetHeap()->AllocateAccessorPair();
+ return isolate->factory()->NewAccessorPair();
}
-MaybeObject* JSObject::DefinePropertyAccessor(String* name,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes) {
+void JSObject::DefinePropertyAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes,
+ v8::AccessControl access_control) {
// We could assert that the property is configurable here, but we would need
// to do a lookup, which seems to be a bit of overkill.
- Heap* heap = GetHeap();
bool only_attribute_changes = getter->IsNull() && setter->IsNull();
- if (HasFastProperties() && !only_attribute_changes) {
- MaybeObject* getterOk = heap->undefined_value();
- if (!getter->IsNull()) {
- getterOk = DefineFastAccessor(name, ACCESSOR_GETTER, getter, attributes);
- if (getterOk->IsFailure()) return getterOk;
- }
-
- MaybeObject* setterOk = heap->undefined_value();
- if (getterOk != heap->null_value() && !setter->IsNull()) {
- setterOk = DefineFastAccessor(name, ACCESSOR_SETTER, setter, attributes);
- if (setterOk->IsFailure()) return setterOk;
- }
-
- if (getterOk != heap->null_value() && setterOk != heap->null_value()) {
- return heap->undefined_value();
- }
+ if (object->HasFastProperties() && !only_attribute_changes &&
+ access_control == v8::DEFAULT &&
+ (object->map()->NumberOfOwnDescriptors() <
+ DescriptorArray::kMaxNumberOfDescriptors)) {
+ bool getterOk = getter->IsNull() ||
+ DefineFastAccessor(object, name, ACCESSOR_GETTER, getter, attributes);
+ bool setterOk = !getterOk || setter->IsNull() ||
+ DefineFastAccessor(object, name, ACCESSOR_SETTER, setter, attributes);
+ if (getterOk && setterOk) return;
}
- AccessorPair* accessors;
- MaybeObject* maybe_accessors = CreateAccessorPairFor(name);
- if (!maybe_accessors->To(&accessors)) return maybe_accessors;
+ Handle<AccessorPair> accessors = CreateAccessorPairFor(object, name);
+ accessors->SetComponents(*getter, *setter);
+ accessors->set_access_flags(access_control);
- accessors->SetComponents(getter, setter);
- return SetPropertyCallback(name, accessors, attributes);
+ SetPropertyCallback(object, name, accessors, attributes);
}
-bool JSObject::CanSetCallback(String* name) {
+bool JSObject::CanSetCallback(Name* name) {
ASSERT(!IsAccessCheckNeeded() ||
GetIsolate()->MayNamedAccess(this, name, v8::ACCESS_SET));
@@ -4494,144 +6219,166 @@ bool JSObject::CanSetCallback(String* name) {
LookupCallbackProperty(name, &callback_result);
if (callback_result.IsFound()) {
Object* obj = callback_result.GetCallbackObject();
- if (obj->IsAccessorInfo() &&
- AccessorInfo::cast(obj)->prohibits_overwriting()) {
- return false;
+ if (obj->IsAccessorInfo()) {
+ return !AccessorInfo::cast(obj)->prohibits_overwriting();
+ }
+ if (obj->IsAccessorPair()) {
+ return !AccessorPair::cast(obj)->prohibits_overwriting();
}
}
-
return true;
}
-MaybeObject* JSObject::SetElementCallback(uint32_t index,
- Object* structure,
- PropertyAttributes attributes) {
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+void JSObject::SetElementCallback(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> structure,
+ PropertyAttributes attributes) {
+ Heap* heap = object->GetHeap();
+ PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
// Normalize elements to make this operation simple.
- SeededNumberDictionary* dictionary;
- { MaybeObject* maybe_dictionary = NormalizeElements();
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
- }
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+ Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
+ ASSERT(object->HasDictionaryElements() ||
+ object->HasDictionaryArgumentsElements());
// Update the dictionary with the new CALLBACKS property.
- { MaybeObject* maybe_dictionary = dictionary->Set(index, structure, details);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
- }
-
+ dictionary = SeededNumberDictionary::Set(dictionary, index, structure,
+ details);
dictionary->set_requires_slow_elements();
+
// Update the dictionary backing store on the object.
- if (elements()->map() == GetHeap()->non_strict_arguments_elements_map()) {
+ if (object->elements()->map() == heap->non_strict_arguments_elements_map()) {
// Also delete any parameter alias.
//
// TODO(kmillikin): when deleting the last parameter alias we could
// switch to a direct backing store without the parameter map. This
// would allow GC of the context.
- FixedArray* parameter_map = FixedArray::cast(elements());
+ FixedArray* parameter_map = FixedArray::cast(object->elements());
if (index < static_cast<uint32_t>(parameter_map->length()) - 2) {
- parameter_map->set(index + 2, GetHeap()->the_hole_value());
+ parameter_map->set(index + 2, heap->the_hole_value());
}
- parameter_map->set(1, dictionary);
+ parameter_map->set(1, *dictionary);
} else {
- set_elements(dictionary);
+ object->set_elements(*dictionary);
}
-
- return GetHeap()->undefined_value();
}
-MaybeObject* JSObject::SetPropertyCallback(String* name,
- Object* structure,
- PropertyAttributes attributes) {
+void JSObject::SetPropertyCallback(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> structure,
+ PropertyAttributes attributes) {
// Normalize object to make this operation simple.
- MaybeObject* maybe_ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe_ok->IsFailure()) return maybe_ok;
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
// For the global object allocate a new map to invalidate the global inline
// caches which have a global property cell reference directly in the code.
- if (IsGlobalObject()) {
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ if (object->IsGlobalObject()) {
+ Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
ASSERT(new_map->is_dictionary_map());
+ object->set_map(*new_map);
- set_map(new_map);
// When running crankshaft, changing the map is not enough. We
// need to deoptimize all functions that rely on this global
// object.
- Deoptimizer::DeoptimizeGlobalObject(this);
+ Deoptimizer::DeoptimizeGlobalObject(*object);
}
// Update the dictionary with the new CALLBACKS property.
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
- maybe_ok = SetNormalizedProperty(name, structure, details);
- if (maybe_ok->IsFailure()) return maybe_ok;
-
- return GetHeap()->undefined_value();
+ PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
+ SetNormalizedProperty(object, name, structure, details);
}
void JSObject::DefineAccessor(Handle<JSObject> object,
- Handle<String> name,
+ Handle<Name> name,
Handle<Object> getter,
Handle<Object> setter,
- PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->DefineAccessor(*name, *getter, *setter, attributes));
-}
-
-MaybeObject* JSObject::DefineAccessor(String* name,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes) {
- Isolate* isolate = GetIsolate();
+ PropertyAttributes attributes,
+ v8::AccessControl access_control) {
+ Isolate* isolate = object->GetIsolate();
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return isolate->heap()->undefined_value();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ return;
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->DefineAccessor(
- name, getter, setter, attributes);
+ DefineAccessor(Handle<JSObject>::cast(proto),
+ name,
+ getter,
+ setter,
+ attributes,
+ access_control);
+ return;
}
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
// Try to flatten before operating on the string.
- name->TryFlatten();
+ if (name->IsString()) String::cast(*name)->TryFlatten();
- if (!CanSetCallback(name)) return isolate->heap()->undefined_value();
+ if (!object->CanSetCallback(*name)) return;
uint32_t index = 0;
- return name->AsArrayIndex(&index) ?
- DefineElementAccessor(index, getter, setter, attributes) :
- DefinePropertyAccessor(name, getter, setter, attributes);
+ bool is_element = name->AsArrayIndex(&index);
+
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ bool is_observed = FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string();
+ bool preexists = false;
+ if (is_observed) {
+ if (is_element) {
+ preexists = HasLocalElement(object, index);
+ if (preexists && object->GetLocalElementAccessorPair(index) == NULL) {
+ old_value = Object::GetElement(isolate, object, index);
+ }
+ } else {
+ LookupResult lookup(isolate);
+ object->LocalLookup(*name, &lookup, true);
+ preexists = lookup.IsProperty();
+ if (preexists && lookup.IsDataProperty()) {
+ old_value = Object::GetProperty(object, name);
+ }
+ }
+ }
+
+ if (is_element) {
+ DefineElementAccessor(
+ object, index, getter, setter, attributes, access_control);
+ } else {
+ DefinePropertyAccessor(
+ object, name, getter, setter, attributes, access_control);
+ }
+
+ if (is_observed) {
+ const char* type = preexists ? "reconfigured" : "new";
+ EnqueueChangeRecord(object, type, name, old_value);
+ }
}
-static MaybeObject* TryAccessorTransition(JSObject* self,
- Map* transitioned_map,
- int target_descriptor,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes) {
+static bool TryAccessorTransition(JSObject* self,
+ Map* transitioned_map,
+ int target_descriptor,
+ AccessorComponent component,
+ Object* accessor,
+ PropertyAttributes attributes) {
DescriptorArray* descs = transitioned_map->instance_descriptors();
PropertyDetails details = descs->GetDetails(target_descriptor);
// If the transition target was not callbacks, fall back to the slow case.
- if (details.type() != CALLBACKS) return self->GetHeap()->null_value();
+ if (details.type() != CALLBACKS) return false;
Object* descriptor = descs->GetCallbacksObject(target_descriptor);
- if (!descriptor->IsAccessorPair()) return self->GetHeap()->null_value();
+ if (!descriptor->IsAccessorPair()) return false;
Object* target_accessor = AccessorPair::cast(descriptor)->get(component);
PropertyAttributes target_attributes = details.attributes();
@@ -4639,26 +6386,47 @@ static MaybeObject* TryAccessorTransition(JSObject* self,
// Reuse transition if adding same accessor with same attributes.
if (target_accessor == accessor && target_attributes == attributes) {
self->set_map(transitioned_map);
- return self;
+ return true;
}
// If either not the same accessor, or not the same attributes, fall back to
// the slow case.
- return self->GetHeap()->null_value();
+ return false;
}
-MaybeObject* JSObject::DefineFastAccessor(String* name,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes) {
+static MaybeObject* CopyInsertDescriptor(Map* map,
+ Name* name,
+ AccessorPair* accessors,
+ PropertyAttributes attributes) {
+ CallbacksDescriptor new_accessors_desc(name, accessors, attributes);
+ return map->CopyInsertDescriptor(&new_accessors_desc, INSERT_TRANSITION);
+}
+
+
+static Handle<Map> CopyInsertDescriptor(Handle<Map> map,
+ Handle<Name> name,
+ Handle<AccessorPair> accessors,
+ PropertyAttributes attributes) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ CopyInsertDescriptor(*map, *name, *accessors, attributes),
+ Map);
+}
+
+
+bool JSObject::DefineFastAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ AccessorComponent component,
+ Handle<Object> accessor,
+ PropertyAttributes attributes) {
ASSERT(accessor->IsSpecFunction() || accessor->IsUndefined());
- LookupResult result(GetIsolate());
- LocalLookup(name, &result);
+ Isolate* isolate = object->GetIsolate();
+ LookupResult result(isolate);
+ object->LocalLookup(*name, &result);
- if (result.IsFound()
- && !result.IsPropertyCallbacks()
- && !result.IsTransition()) return GetHeap()->null_value();
+ if (result.IsFound() && !result.IsPropertyCallbacks()) {
+ return false;
+ }
// Return success if the same accessor with the same attributes already exist.
AccessorPair* source_accessors = NULL;
@@ -4667,101 +6435,94 @@ MaybeObject* JSObject::DefineFastAccessor(String* name,
if (callback_value->IsAccessorPair()) {
source_accessors = AccessorPair::cast(callback_value);
Object* entry = source_accessors->get(component);
- if (entry == accessor && result.GetAttributes() == attributes) {
- return this;
+ if (entry == *accessor && result.GetAttributes() == attributes) {
+ return true;
}
} else {
- return GetHeap()->null_value();
+ return false;
}
int descriptor_number = result.GetDescriptorIndex();
- map()->LookupTransition(this, name, &result);
+ object->map()->LookupTransition(*object, *name, &result);
if (result.IsFound()) {
Map* target = result.GetTransitionTarget();
ASSERT(target->NumberOfOwnDescriptors() ==
- map()->NumberOfOwnDescriptors());
+ object->map()->NumberOfOwnDescriptors());
// This works since descriptors are sorted in order of addition.
- ASSERT(map()->instance_descriptors()->GetKey(descriptor_number) == name);
- return TryAccessorTransition(
- this, target, descriptor_number, component, accessor, attributes);
+ ASSERT(object->map()->instance_descriptors()->
+ GetKey(descriptor_number) == *name);
+ return TryAccessorTransition(*object, target, descriptor_number,
+ component, *accessor, attributes);
}
} else {
// If not, lookup a transition.
- map()->LookupTransition(this, name, &result);
+ object->map()->LookupTransition(*object, *name, &result);
// If there is a transition, try to follow it.
if (result.IsFound()) {
Map* target = result.GetTransitionTarget();
int descriptor_number = target->LastAdded();
- ASSERT(target->instance_descriptors()->GetKey(descriptor_number) == name);
- return TryAccessorTransition(
- this, target, descriptor_number, component, accessor, attributes);
+ ASSERT(target->instance_descriptors()->GetKey(descriptor_number)
+ ->Equals(*name));
+ return TryAccessorTransition(*object, target, descriptor_number,
+ component, *accessor, attributes);
}
}
// If there is no transition yet, add a transition to the a new accessor pair
- // containing the accessor.
- AccessorPair* accessors;
- MaybeObject* maybe_accessors;
-
- // Allocate a new pair if there were no source accessors. Otherwise, copy the
- // pair and modify the accessor.
- if (source_accessors != NULL) {
- maybe_accessors = source_accessors->Copy();
- } else {
- maybe_accessors = GetHeap()->AllocateAccessorPair();
- }
- if (!maybe_accessors->To(&accessors)) return maybe_accessors;
- accessors->set(component, accessor);
-
- CallbacksDescriptor new_accessors_desc(name, accessors, attributes);
-
- Map* new_map;
- MaybeObject* maybe_new_map =
- map()->CopyInsertDescriptor(&new_accessors_desc, INSERT_TRANSITION);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- set_map(new_map);
- return this;
+ // containing the accessor. Allocate a new pair if there were no source
+ // accessors. Otherwise, copy the pair and modify the accessor.
+ Handle<AccessorPair> accessors = source_accessors != NULL
+ ? AccessorPair::Copy(Handle<AccessorPair>(source_accessors))
+ : isolate->factory()->NewAccessorPair();
+ accessors->set(component, *accessor);
+ Handle<Map> new_map = CopyInsertDescriptor(Handle<Map>(object->map()),
+ name, accessors, attributes);
+ object->set_map(*new_map);
+ return true;
}
-MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
- Isolate* isolate = GetIsolate();
- String* name = String::cast(info->name());
+Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
+ Handle<AccessorInfo> info) {
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
+ Handle<Name> name(Name::cast(info->name()));
+
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return isolate->heap()->undefined_value();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return factory->undefined_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return object;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->DefineAccessor(info);
+ return SetAccessor(Handle<JSObject>::cast(proto), info);
}
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
// Try to flatten before operating on the string.
- name->TryFlatten();
+ if (name->IsString()) FlattenString(Handle<String>::cast(name));
- if (!CanSetCallback(name)) return isolate->heap()->undefined_value();
+ if (!object->CanSetCallback(*name)) return factory->undefined_value();
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
if (is_element) {
- if (IsJSArray()) return isolate->heap()->undefined_value();
+ if (object->IsJSArray()) return factory->undefined_value();
// Accessors overwrite previous callbacks (cf. with getters/setters).
- switch (GetElementsKind()) {
+ switch (object->GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -4780,7 +6541,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
case EXTERNAL_DOUBLE_ELEMENTS:
// Ignore getters and setters on pixel and external array
// elements.
- return isolate->heap()->undefined_value();
+ return factory->undefined_value();
case DICTIONARY_ELEMENTS:
break;
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4788,79 +6549,80 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
break;
}
- MaybeObject* maybe_ok =
- SetElementCallback(index, info, info->property_attributes());
- if (maybe_ok->IsFailure()) return maybe_ok;
+ SetElementCallback(object, index, info, info->property_attributes());
} else {
// Lookup the name.
LookupResult result(isolate);
- LocalLookup(name, &result);
+ object->LocalLookup(*name, &result, true);
// ES5 forbids turning a property into an accessor if it's not
// configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
if (result.IsFound() && (result.IsReadOnly() || result.IsDontDelete())) {
- return isolate->heap()->undefined_value();
+ return factory->undefined_value();
}
- MaybeObject* maybe_ok =
- SetPropertyCallback(name, info, info->property_attributes());
- if (maybe_ok->IsFailure()) return maybe_ok;
+ SetPropertyCallback(object, name, info, info->property_attributes());
}
- return this;
+ return object;
}
-Object* JSObject::LookupAccessor(String* name, AccessorComponent component) {
- Heap* heap = GetHeap();
+Handle<Object> JSObject::GetAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ AccessorComponent component) {
+ Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return heap->undefined_value();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object, *name, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
}
// Make the lookup and include prototypes.
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
- for (Object* obj = this;
- obj != heap->null_value();
- obj = JSReceiver::cast(obj)->GetPrototype()) {
- if (obj->IsJSObject() && JSObject::cast(obj)->HasDictionaryElements()) {
- JSObject* js_object = JSObject::cast(obj);
+ for (Handle<Object> obj = object;
+ !obj->IsNull();
+ obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) {
+ if (obj->IsJSObject() && JSObject::cast(*obj)->HasDictionaryElements()) {
+ JSObject* js_object = JSObject::cast(*obj);
SeededNumberDictionary* dictionary = js_object->element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
if (dictionary->DetailsAt(entry).type() == CALLBACKS &&
element->IsAccessorPair()) {
- return AccessorPair::cast(element)->GetComponent(component);
+ return handle(AccessorPair::cast(element)->GetComponent(component),
+ isolate);
}
}
}
}
} else {
- for (Object* obj = this;
- obj != heap->null_value();
- obj = JSReceiver::cast(obj)->GetPrototype()) {
- LookupResult result(heap->isolate());
- JSReceiver::cast(obj)->LocalLookup(name, &result);
+ for (Handle<Object> obj = object;
+ !obj->IsNull();
+ obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) {
+ LookupResult result(isolate);
+ JSReceiver::cast(*obj)->LocalLookup(*name, &result);
if (result.IsFound()) {
- if (result.IsReadOnly()) return heap->undefined_value();
+ if (result.IsReadOnly()) return isolate->factory()->undefined_value();
if (result.IsPropertyCallbacks()) {
Object* obj = result.GetCallbackObject();
if (obj->IsAccessorPair()) {
- return AccessorPair::cast(obj)->GetComponent(component);
+ return handle(AccessorPair::cast(obj)->GetComponent(component),
+ isolate);
}
}
}
}
}
- return heap->undefined_value();
+ return isolate->factory()->undefined_value();
}
@@ -4870,11 +6632,18 @@ Object* JSObject::SlowReverseLookup(Object* value) {
DescriptorArray* descs = map()->instance_descriptors();
for (int i = 0; i < number_of_own_descriptors; i++) {
if (descs->GetType(i) == FIELD) {
- if (FastPropertyAt(descs->GetFieldIndex(i)) == value) {
+ Object* property = RawFastPropertyAt(descs->GetFieldIndex(i));
+ if (FLAG_track_double_fields &&
+ descs->GetDetails(i).representation().IsDouble()) {
+ ASSERT(property->IsHeapNumber());
+ if (value->IsNumber() && property->Number() == value->Number()) {
+ return descs->GetKey(i);
+ }
+ } else if (property == value) {
return descs->GetKey(i);
}
- } else if (descs->GetType(i) == CONSTANT_FUNCTION) {
- if (descs->GetConstantFunction(i) == value) {
+ } else if (descs->GetType(i) == CONSTANT) {
+ if (descs->GetConstant(i) == value) {
return descs->GetKey(i);
}
}
@@ -4886,6 +6655,14 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
+Handle<Map> Map::RawCopy(Handle<Map> map,
+ int instance_size) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->RawCopy(instance_size),
+ Map);
+}
+
+
MaybeObject* Map::RawCopy(int instance_size) {
Map* result;
MaybeObject* maybe_result =
@@ -4896,34 +6673,34 @@ MaybeObject* Map::RawCopy(int instance_size) {
result->set_constructor(constructor());
result->set_bit_field(bit_field());
result->set_bit_field2(bit_field2());
- result->set_bit_field3(bit_field3());
int new_bit_field3 = bit_field3();
new_bit_field3 = OwnsDescriptors::update(new_bit_field3, true);
new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
new_bit_field3 = EnumLengthBits::update(new_bit_field3, kInvalidEnumCache);
+ new_bit_field3 = Deprecated::update(new_bit_field3, false);
+ new_bit_field3 = IsUnstable::update(new_bit_field3, false);
result->set_bit_field3(new_bit_field3);
return result;
}
-MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
- NormalizedMapSharingMode sharing) {
- int new_instance_size = instance_size();
+Handle<Map> Map::CopyNormalized(Handle<Map> map,
+ PropertyNormalizationMode mode,
+ NormalizedMapSharingMode sharing) {
+ int new_instance_size = map->instance_size();
if (mode == CLEAR_INOBJECT_PROPERTIES) {
- new_instance_size -= inobject_properties() * kPointerSize;
+ new_instance_size -= map->inobject_properties() * kPointerSize;
}
- Map* result;
- MaybeObject* maybe_result = RawCopy(new_instance_size);
- if (!maybe_result->To(&result)) return maybe_result;
+ Handle<Map> result = Map::RawCopy(map, new_instance_size);
if (mode != CLEAR_INOBJECT_PROPERTIES) {
- result->set_inobject_properties(inobject_properties());
+ result->set_inobject_properties(map->inobject_properties());
}
- result->set_code_cache(code_cache());
result->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
result->set_dictionary_map(true);
+ result->set_migration_target(false);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && result->is_shared()) {
@@ -4935,6 +6712,11 @@ MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
}
+Handle<Map> Map::CopyDropDescriptors(Handle<Map> map) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(), map->CopyDropDescriptors(), Map);
+}
+
+
MaybeObject* Map::CopyDropDescriptors() {
Map* result;
MaybeObject* maybe_result = RawCopy(instance_size());
@@ -4947,6 +6729,7 @@ MaybeObject* Map::CopyDropDescriptors() {
result->set_pre_allocated_property_fields(pre_allocated_property_fields());
result->set_is_shared(false);
result->ClearCodeCache(GetHeap());
+ NotifyLeafMapLayoutChange();
return result;
}
@@ -4962,7 +6745,7 @@ MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors,
MaybeObject* maybe_result = CopyDropDescriptors();
if (!maybe_result->To(&result)) return maybe_result;
- String* name = descriptor->GetKey();
+ Name* name = descriptor->GetKey();
TransitionArray* transitions;
MaybeObject* maybe_transitions =
@@ -4979,7 +6762,7 @@ MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors,
} else {
// Descriptor arrays grow by 50%.
MaybeObject* maybe_descriptors = DescriptorArray::Allocate(
- old_size, old_size < 4 ? 1 : old_size / 2);
+ GetIsolate(), old_size, old_size < 4 ? 1 : old_size / 2);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
DescriptorArray::WhitenessWitness witness(new_descriptors);
@@ -5026,10 +6809,20 @@ MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors,
}
+Handle<Map> Map::CopyReplaceDescriptors(Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ TransitionFlag flag,
+ Handle<Name> name) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->CopyReplaceDescriptors(*descriptors, flag, *name),
+ Map);
+}
+
+
MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
- String* name,
TransitionFlag flag,
- int descriptor_index) {
+ Name* name,
+ SimpleTransitionFlag simple_flag) {
ASSERT(descriptors->IsSortedNoDuplicates());
Map* result;
@@ -5040,17 +6833,48 @@ MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
if (flag == INSERT_TRANSITION && CanHaveMoreTransitions()) {
TransitionArray* transitions;
- SimpleTransitionFlag simple_flag =
- (descriptor_index == descriptors->number_of_descriptors() - 1)
- ? SIMPLE_TRANSITION
- : FULL_TRANSITION;
MaybeObject* maybe_transitions = AddTransition(name, result, simple_flag);
if (!maybe_transitions->To(&transitions)) return maybe_transitions;
-
set_transitions(transitions);
result->SetBackPointer(this);
+ } else {
+ descriptors->InitializeRepresentations(Representation::Tagged());
+ }
+
+ return result;
+}
+
+
+// Since this method is used to rewrite an existing transition tree, it can
+// always insert transitions without checking.
+Handle<Map> Map::CopyInstallDescriptors(Handle<Map> map,
+ int new_descriptor,
+ Handle<DescriptorArray> descriptors) {
+ ASSERT(descriptors->IsSortedNoDuplicates());
+
+ Handle<Map> result = Map::CopyDropDescriptors(map);
+
+ result->InitializeDescriptors(*descriptors);
+ result->SetNumberOfOwnDescriptors(new_descriptor + 1);
+
+ int unused_property_fields = map->unused_property_fields();
+ if (descriptors->GetDetails(new_descriptor).type() == FIELD) {
+ unused_property_fields = map->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += JSObject::kFieldsAdded;
+ }
}
+ result->set_unused_property_fields(unused_property_fields);
+ result->set_owns_descriptors(false);
+
+ Handle<Name> name = handle(descriptors->GetKey(new_descriptor));
+ Handle<TransitionArray> transitions = Map::AddTransition(map, name, result,
+ SIMPLE_TRANSITION);
+
+ map->set_transitions(*transitions);
+ result->SetBackPointer(*map);
+
return result;
}
@@ -5107,6 +6931,38 @@ MaybeObject* Map::CopyAsElementsKind(ElementsKind kind, TransitionFlag flag) {
}
+Handle<Map> Map::CopyForObserved(Handle<Map> map) {
+ ASSERT(!map->is_observed());
+
+ Isolate* isolate = map->GetIsolate();
+
+ // In case the map owned its own descriptors, share the descriptors and
+ // transfer ownership to the new map.
+ Handle<Map> new_map;
+ if (map->owns_descriptors()) {
+ new_map = Map::CopyDropDescriptors(map);
+ } else {
+ new_map = Map::Copy(map);
+ }
+
+ Handle<TransitionArray> transitions =
+ Map::AddTransition(map, isolate->factory()->observed_symbol(), new_map,
+ FULL_TRANSITION);
+
+ map->set_transitions(*transitions);
+
+ new_map->set_is_observed(true);
+
+ if (map->owns_descriptors()) {
+ new_map->InitializeDescriptors(map->instance_descriptors());
+ map->set_owns_descriptors(false);
+ }
+
+ new_map->SetBackPointer(*map);
+ return new_map;
+}
+
+
MaybeObject* Map::CopyWithPreallocatedFieldDescriptors() {
if (pre_allocated_property_fields() == 0) return CopyDropDescriptors();
@@ -5123,7 +6979,12 @@ MaybeObject* Map::CopyWithPreallocatedFieldDescriptors() {
descriptors->CopyUpTo(number_of_own_descriptors);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
- return CopyReplaceDescriptors(new_descriptors, NULL, OMIT_TRANSITION, 0);
+ return CopyReplaceDescriptors(new_descriptors, OMIT_TRANSITION);
+}
+
+
+Handle<Map> Map::Copy(Handle<Map> map) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(), map->Copy(), Map);
}
@@ -5135,7 +6996,7 @@ MaybeObject* Map::Copy() {
descriptors->CopyUpTo(number_of_own_descriptors);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
- return CopyReplaceDescriptors(new_descriptors, NULL, OMIT_TRANSITION, 0);
+ return CopyReplaceDescriptors(new_descriptors, OMIT_TRANSITION);
}
@@ -5143,13 +7004,12 @@ MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor,
TransitionFlag flag) {
DescriptorArray* descriptors = instance_descriptors();
- // Ensure the key is a symbol.
- MaybeObject* maybe_failure = descriptor->KeyToSymbol();
+ // Ensure the key is unique.
+ MaybeObject* maybe_failure = descriptor->KeyToUniqueName();
if (maybe_failure->IsFailure()) return maybe_failure;
int old_size = NumberOfOwnDescriptors();
int new_size = old_size + 1;
- descriptor->SetEnumerationIndex(new_size);
if (flag == INSERT_TRANSITION &&
owns_descriptors() &&
@@ -5158,7 +7018,8 @@ MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor,
}
DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors = DescriptorArray::Allocate(old_size, 1);
+ MaybeObject* maybe_descriptors =
+ DescriptorArray::Allocate(GetIsolate(), old_size, 1);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
DescriptorArray::WhitenessWitness witness(new_descriptors);
@@ -5176,10 +7037,8 @@ MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor,
new_descriptors->Append(descriptor, witness);
}
- String* key = descriptor->GetKey();
- int insertion_index = new_descriptors->number_of_descriptors() - 1;
-
- return CopyReplaceDescriptors(new_descriptors, key, flag, insertion_index);
+ Name* key = descriptor->GetKey();
+ return CopyReplaceDescriptors(new_descriptors, flag, key, SIMPLE_TRANSITION);
}
@@ -5187,8 +7046,8 @@ MaybeObject* Map::CopyInsertDescriptor(Descriptor* descriptor,
TransitionFlag flag) {
DescriptorArray* old_descriptors = instance_descriptors();
- // Ensure the key is a symbol.
- MaybeObject* maybe_result = descriptor->KeyToSymbol();
+ // Ensure the key is unique.
+ MaybeObject* maybe_result = descriptor->KeyToUniqueName();
if (maybe_result->IsFailure()) return maybe_result;
// We replace the key if it is already present.
@@ -5200,18 +7059,45 @@ MaybeObject* Map::CopyInsertDescriptor(Descriptor* descriptor,
}
-MaybeObject* DescriptorArray::CopyUpTo(int enumeration_index) {
+Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
+ Handle<DescriptorArray> desc,
+ int enumeration_index,
+ PropertyAttributes attributes) {
+ CALL_HEAP_FUNCTION(desc->GetIsolate(),
+ desc->CopyUpToAddAttributes(enumeration_index, attributes),
+ DescriptorArray);
+}
+
+
+MaybeObject* DescriptorArray::CopyUpToAddAttributes(
+ int enumeration_index, PropertyAttributes attributes) {
if (enumeration_index == 0) return GetHeap()->empty_descriptor_array();
int size = enumeration_index;
DescriptorArray* descriptors;
- MaybeObject* maybe_descriptors = Allocate(size);
+ MaybeObject* maybe_descriptors = Allocate(GetIsolate(), size);
if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
DescriptorArray::WhitenessWitness witness(descriptors);
- for (int i = 0; i < size; ++i) {
- descriptors->CopyFrom(i, this, i, witness);
+ if (attributes != NONE) {
+ for (int i = 0; i < size; ++i) {
+ Object* value = GetValue(i);
+ PropertyDetails details = GetDetails(i);
+ int mask = DONT_DELETE | DONT_ENUM;
+ // READ_ONLY is an invalid attribute for JS setters/getters.
+ if (details.type() != CALLBACKS || !value->IsAccessorPair()) {
+ mask |= READ_ONLY;
+ }
+ details = details.CopyAddAttributes(
+ static_cast<PropertyAttributes>(attributes & mask));
+ Descriptor desc(GetKey(i), value, details);
+ descriptors->Set(i, &desc, witness);
+ }
+ } else {
+ for (int i = 0; i < size; ++i) {
+ descriptors->CopyFrom(i, this, i, witness);
+ }
}
if (number_of_descriptors() != enumeration_index) descriptors->Sort();
@@ -5224,22 +7110,21 @@ MaybeObject* Map::CopyReplaceDescriptor(DescriptorArray* descriptors,
Descriptor* descriptor,
int insertion_index,
TransitionFlag flag) {
- // Ensure the key is a symbol.
- MaybeObject* maybe_failure = descriptor->KeyToSymbol();
+ // Ensure the key is unique.
+ MaybeObject* maybe_failure = descriptor->KeyToUniqueName();
if (maybe_failure->IsFailure()) return maybe_failure;
- String* key = descriptor->GetKey();
+ Name* key = descriptor->GetKey();
ASSERT(key == descriptors->GetKey(insertion_index));
int new_size = NumberOfOwnDescriptors();
ASSERT(0 <= insertion_index && insertion_index < new_size);
- PropertyDetails details = descriptors->GetDetails(insertion_index);
- ASSERT_LE(details.descriptor_index(), new_size);
- descriptor->SetEnumerationIndex(details.descriptor_index());
+ ASSERT_LT(insertion_index, new_size);
DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors = DescriptorArray::Allocate(new_size);
+ MaybeObject* maybe_descriptors =
+ DescriptorArray::Allocate(GetIsolate(), new_size);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
DescriptorArray::WhitenessWitness witness(new_descriptors);
@@ -5254,12 +7139,16 @@ MaybeObject* Map::CopyReplaceDescriptor(DescriptorArray* descriptors,
// Re-sort if descriptors were removed.
if (new_size != descriptors->length()) new_descriptors->Sort();
- return CopyReplaceDescriptors(new_descriptors, key, flag, insertion_index);
+ SimpleTransitionFlag simple_flag =
+ (insertion_index == descriptors->number_of_descriptors() - 1)
+ ? SIMPLE_TRANSITION
+ : FULL_TRANSITION;
+ return CopyReplaceDescriptors(new_descriptors, flag, key, simple_flag);
}
void Map::UpdateCodeCache(Handle<Map> map,
- Handle<String> name,
+ Handle<Name> name,
Handle<Code> code) {
Isolate* isolate = map->GetIsolate();
CALL_HEAP_FUNCTION_VOID(isolate,
@@ -5267,9 +7156,7 @@ void Map::UpdateCodeCache(Handle<Map> map,
}
-MaybeObject* Map::UpdateCodeCache(String* name, Code* code) {
- ASSERT(!is_shared() || code->allowed_in_shared_map_code_cache());
-
+MaybeObject* Map::UpdateCodeCache(Name* name, Code* code) {
// Allocate the code cache if not present.
if (code_cache()->IsFixedArray()) {
Object* result;
@@ -5284,7 +7171,7 @@ MaybeObject* Map::UpdateCodeCache(String* name, Code* code) {
}
-Object* Map::FindInCodeCache(String* name, Code::Flags flags) {
+Object* Map::FindInCodeCache(Name* name, Code::Flags flags) {
// Do a lookup if a code cache exists.
if (!code_cache()->IsFixedArray()) {
return CodeCache::cast(code_cache())->Lookup(name, flags);
@@ -5303,7 +7190,7 @@ int Map::IndexInCodeCache(Object* name, Code* code) {
}
-void Map::RemoveFromCodeCache(String* name, Code* code, int index) {
+void Map::RemoveFromCodeCache(Name* name, Code* code, int index) {
// No GC is supposed to happen between a call to IndexInCodeCache and
// RemoveFromCodeCache so the code cache must be there.
ASSERT(!code_cache()->IsFixedArray());
@@ -5336,12 +7223,6 @@ class IntrusiveMapTransitionIterator {
return transition_array_->GetTarget(index);
}
- if (index == number_of_transitions &&
- transition_array_->HasElementsTransition()) {
- Map* elements_transition = transition_array_->elements_transition();
- *TransitionArrayHeader() = Smi::FromInt(index + 1);
- return elements_transition;
- }
*TransitionArrayHeader() = transition_array_->GetHeap()->fixed_array_map();
return NULL;
}
@@ -5506,7 +7387,7 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
}
-MaybeObject* CodeCache::Update(String* name, Code* code) {
+MaybeObject* CodeCache::Update(Name* name, Code* code) {
// The number of monomorphic stubs for normal load/store/call IC's can grow to
// a large number and therefore they need to go into a hash table. They are
// used to load global properties from cells.
@@ -5515,7 +7396,8 @@ MaybeObject* CodeCache::Update(String* name, Code* code) {
if (normal_type_cache()->IsUndefined()) {
Object* result;
{ MaybeObject* maybe_result =
- CodeCacheHashTable::Allocate(CodeCacheHashTable::kInitialSize);
+ CodeCacheHashTable::Allocate(GetHeap(),
+ CodeCacheHashTable::kInitialSize);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
set_normal_type_cache(result);
@@ -5528,7 +7410,7 @@ MaybeObject* CodeCache::Update(String* name, Code* code) {
}
-MaybeObject* CodeCache::UpdateDefaultCache(String* name, Code* code) {
+MaybeObject* CodeCache::UpdateDefaultCache(Name* name, Code* code) {
// When updating the default code cache we disregard the type encoded in the
// flags. This allows call constant stubs to overwrite call field
// stubs, etc.
@@ -5551,7 +7433,7 @@ MaybeObject* CodeCache::UpdateDefaultCache(String* name, Code* code) {
cache->set(i + kCodeCacheEntryCodeOffset, code);
return this;
}
- if (name->Equals(String::cast(key))) {
+ if (name->Equals(Name::cast(key))) {
Code::Flags found =
Code::cast(cache->get(i + kCodeCacheEntryCodeOffset))->flags();
if (Code::RemoveTypeFromFlags(found) == flags) {
@@ -5588,7 +7470,7 @@ MaybeObject* CodeCache::UpdateDefaultCache(String* name, Code* code) {
}
-MaybeObject* CodeCache::UpdateNormalTypeCache(String* name, Code* code) {
+MaybeObject* CodeCache::UpdateNormalTypeCache(Name* name, Code* code) {
// Adding a new entry can cause a new cache to be allocated.
CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
Object* new_cache;
@@ -5600,16 +7482,15 @@ MaybeObject* CodeCache::UpdateNormalTypeCache(String* name, Code* code) {
}
-Object* CodeCache::Lookup(String* name, Code::Flags flags) {
- if (Code::ExtractTypeFromFlags(flags) == Code::NORMAL) {
- return LookupNormalTypeCache(name, flags);
- } else {
- return LookupDefaultCache(name, flags);
- }
+Object* CodeCache::Lookup(Name* name, Code::Flags flags) {
+ flags = Code::RemoveTypeFromFlags(flags);
+ Object* result = LookupDefaultCache(name, flags);
+ if (result->IsCode()) return result;
+ return LookupNormalTypeCache(name, flags);
}
-Object* CodeCache::LookupDefaultCache(String* name, Code::Flags flags) {
+Object* CodeCache::LookupDefaultCache(Name* name, Code::Flags flags) {
FixedArray* cache = default_cache();
int length = cache->length();
for (int i = 0; i < length; i += kCodeCacheEntrySize) {
@@ -5617,9 +7498,9 @@ Object* CodeCache::LookupDefaultCache(String* name, Code::Flags flags) {
// Skip deleted elements.
if (key->IsNull()) continue;
if (key->IsUndefined()) return key;
- if (name->Equals(String::cast(key))) {
+ if (name->Equals(Name::cast(key))) {
Code* code = Code::cast(cache->get(i + kCodeCacheEntryCodeOffset));
- if (code->flags() == flags) {
+ if (Code::RemoveTypeFromFlags(code->flags()) == flags) {
return code;
}
}
@@ -5628,7 +7509,7 @@ Object* CodeCache::LookupDefaultCache(String* name, Code::Flags flags) {
}
-Object* CodeCache::LookupNormalTypeCache(String* name, Code::Flags flags) {
+Object* CodeCache::LookupNormalTypeCache(Name* name, Code::Flags flags) {
if (!normal_type_cache()->IsUndefined()) {
CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
return cache->Lookup(name, flags);
@@ -5642,7 +7523,7 @@ int CodeCache::GetIndex(Object* name, Code* code) {
if (code->type() == Code::NORMAL) {
if (normal_type_cache()->IsUndefined()) return -1;
CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
- return cache->GetIndex(String::cast(name), code->flags());
+ return cache->GetIndex(Name::cast(name), code->flags());
}
FixedArray* array = default_cache();
@@ -5658,7 +7539,7 @@ void CodeCache::RemoveByIndex(Object* name, Code* code, int index) {
if (code->type() == Code::NORMAL) {
ASSERT(!normal_type_cache()->IsUndefined());
CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
- ASSERT(cache->GetIndex(String::cast(name), code->flags()) == index);
+ ASSERT(cache->GetIndex(Name::cast(name), code->flags()) == index);
cache->RemoveByIndex(index);
} else {
FixedArray* array = default_cache();
@@ -5679,19 +7560,17 @@ void CodeCache::RemoveByIndex(Object* name, Code* code, int index) {
// lookup not to create a new entry.
class CodeCacheHashTableKey : public HashTableKey {
public:
- CodeCacheHashTableKey(String* name, Code::Flags flags)
+ CodeCacheHashTableKey(Name* name, Code::Flags flags)
: name_(name), flags_(flags), code_(NULL) { }
- CodeCacheHashTableKey(String* name, Code* code)
- : name_(name),
- flags_(code->flags()),
- code_(code) { }
+ CodeCacheHashTableKey(Name* name, Code* code)
+ : name_(name), flags_(code->flags()), code_(code) { }
bool IsMatch(Object* other) {
if (!other->IsFixedArray()) return false;
FixedArray* pair = FixedArray::cast(other);
- String* name = String::cast(pair->get(0));
+ Name* name = Name::cast(pair->get(0));
Code::Flags flags = Code::cast(pair->get(1))->flags();
if (flags != flags_) {
return false;
@@ -5699,7 +7578,7 @@ class CodeCacheHashTableKey : public HashTableKey {
return name_->Equals(name);
}
- static uint32_t NameFlagsHashHelper(String* name, Code::Flags flags) {
+ static uint32_t NameFlagsHashHelper(Name* name, Code::Flags flags) {
return name->Hash() ^ flags;
}
@@ -5707,15 +7586,15 @@ class CodeCacheHashTableKey : public HashTableKey {
uint32_t HashForObject(Object* obj) {
FixedArray* pair = FixedArray::cast(obj);
- String* name = String::cast(pair->get(0));
+ Name* name = Name::cast(pair->get(0));
Code* code = Code::cast(pair->get(1));
return NameFlagsHashHelper(name, code->flags());
}
- MUST_USE_RESULT MaybeObject* AsObject() {
+ MUST_USE_RESULT MaybeObject* AsObject(Heap* heap) {
ASSERT(code_ != NULL);
Object* obj;
- { MaybeObject* maybe_obj = code_->GetHeap()->AllocateFixedArray(2);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArray(2);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* pair = FixedArray::cast(obj);
@@ -5725,14 +7604,14 @@ class CodeCacheHashTableKey : public HashTableKey {
}
private:
- String* name_;
+ Name* name_;
Code::Flags flags_;
// TODO(jkummerow): We should be able to get by without this.
Code* code_;
};
-Object* CodeCacheHashTable::Lookup(String* name, Code::Flags flags) {
+Object* CodeCacheHashTable::Lookup(Name* name, Code::Flags flags) {
CodeCacheHashTableKey key(name, flags);
int entry = FindEntry(&key);
if (entry == kNotFound) return GetHeap()->undefined_value();
@@ -5740,7 +7619,7 @@ Object* CodeCacheHashTable::Lookup(String* name, Code::Flags flags) {
}
-MaybeObject* CodeCacheHashTable::Put(String* name, Code* code) {
+MaybeObject* CodeCacheHashTable::Put(Name* name, Code* code) {
CodeCacheHashTableKey key(name, code);
Object* obj;
{ MaybeObject* maybe_obj = EnsureCapacity(1, &key);
@@ -5752,7 +7631,7 @@ MaybeObject* CodeCacheHashTable::Put(String* name, Code* code) {
int entry = cache->FindInsertionEntry(key.Hash());
Object* k;
- { MaybeObject* maybe_k = key.AsObject();
+ { MaybeObject* maybe_k = key.AsObject(GetHeap());
if (!maybe_k->ToObject(&k)) return maybe_k;
}
@@ -5763,7 +7642,7 @@ MaybeObject* CodeCacheHashTable::Put(String* name, Code* code) {
}
-int CodeCacheHashTable::GetIndex(String* name, Code::Flags flags) {
+int CodeCacheHashTable::GetIndex(Name* name, Code::Flags flags) {
CodeCacheHashTableKey key(name, flags);
int entry = FindEntry(&key);
return (entry == kNotFound) ? -1 : entry;
@@ -5796,6 +7675,7 @@ MaybeObject* PolymorphicCodeCache::Update(MapHandleList* maps,
Object* result;
{ MaybeObject* maybe_result =
PolymorphicCodeCacheHashTable::Allocate(
+ GetHeap(),
PolymorphicCodeCacheHashTable::kInitialSize);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -5821,7 +7701,7 @@ Handle<Object> PolymorphicCodeCache::Lookup(MapHandleList* maps,
if (!cache()->IsUndefined()) {
PolymorphicCodeCacheHashTable* hash_table =
PolymorphicCodeCacheHashTable::cast(cache());
- return Handle<Object>(hash_table->Lookup(maps, flags));
+ return Handle<Object>(hash_table->Lookup(maps, flags), GetIsolate());
} else {
return GetIsolate()->factory()->undefined_value();
}
@@ -5884,13 +7764,13 @@ class PolymorphicCodeCacheHashTableKey : public HashTableKey {
return MapsHashHelper(&other_maps, other_flags);
}
- MUST_USE_RESULT MaybeObject* AsObject() {
+ MUST_USE_RESULT MaybeObject* AsObject(Heap* heap) {
Object* obj;
// The maps in |maps_| must be copied to a newly allocated FixedArray,
// both because the referenced MapList is short-lived, and because C++
// objects can't be stored in the heap anyway.
{ MaybeObject* maybe_obj =
- HEAP->AllocateUninitializedFixedArray(maps_->length() + 1);
+ heap->AllocateUninitializedFixedArray(maps_->length() + 1);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* list = FixedArray::cast(obj);
@@ -5940,7 +7820,7 @@ MaybeObject* PolymorphicCodeCacheHashTable::Put(MapHandleList* maps,
PolymorphicCodeCacheHashTable* cache =
reinterpret_cast<PolymorphicCodeCacheHashTable*>(obj);
int entry = cache->FindInsertionEntry(key.Hash());
- { MaybeObject* maybe_obj = key.AsObject();
+ { MaybeObject* maybe_obj = key.AsObject(GetHeap());
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
cache->set(EntryToIndex(entry), obj);
@@ -5956,11 +7836,11 @@ MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
accessor->AddElementsToFixedArray(array, array, this);
FixedArray* result;
if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
- ASSERT(current->IsNumber() || current->IsString());
+ ASSERT(current->IsNumber() || current->IsName());
}
}
#endif
@@ -5974,11 +7854,11 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
accessor->AddElementsToFixedArray(NULL, NULL, this, other);
FixedArray* result;
if (!maybe_result->To(&result)) return maybe_result;
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
- ASSERT(current->IsNumber() || current->IsString());
+ ASSERT(current->IsNumber() || current->IsName());
}
}
#endif
@@ -5986,16 +7866,16 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
}
-MaybeObject* FixedArray::CopySize(int new_length) {
+MaybeObject* FixedArray::CopySize(int new_length, PretenureFlag pretenure) {
Heap* heap = GetHeap();
if (new_length == 0) return heap->empty_fixed_array();
Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length, pretenure);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* result = FixedArray::cast(obj);
// Copy the content
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
int len = length();
if (new_length < len) len = new_length;
// We are taking the map from the old fixed array so the map is sure to
@@ -6010,7 +7890,7 @@ MaybeObject* FixedArray::CopySize(int new_length) {
void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos, int len) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = dest->GetWriteBarrierMode(no_gc);
for (int index = 0; index < len; index++) {
dest->set(dest_pos+index, get(pos+index), mode);
@@ -6029,8 +7909,10 @@ bool FixedArray::IsEqualTo(FixedArray* other) {
#endif
-MaybeObject* DescriptorArray::Allocate(int number_of_descriptors, int slack) {
- Heap* heap = Isolate::Current()->heap();
+MaybeObject* DescriptorArray::Allocate(Isolate* isolate,
+ int number_of_descriptors,
+ int slack) {
+ Heap* heap = isolate->heap();
// Do not use DescriptorArray::cast on incomplete object.
int size = number_of_descriptors + slack;
if (size == 0) return heap->empty_descriptor_array();
@@ -6076,6 +7958,130 @@ void DescriptorArray::CopyFrom(int dst_index,
}
+Handle<DescriptorArray> DescriptorArray::Merge(Handle<DescriptorArray> desc,
+ int verbatim,
+ int valid,
+ int new_size,
+ int modify_index,
+ StoreMode store_mode,
+ Handle<DescriptorArray> other) {
+ CALL_HEAP_FUNCTION(desc->GetIsolate(),
+ desc->Merge(verbatim, valid, new_size, modify_index,
+ store_mode, *other),
+ DescriptorArray);
+}
+
+
+// Generalize the |other| descriptor array by merging it into the (at least
+// partly) updated |this| descriptor array.
+// The method merges two descriptor array in three parts. Both descriptor arrays
+// are identical up to |verbatim|. They also overlap in keys up to |valid|.
+// Between |verbatim| and |valid|, the resulting descriptor type as well as the
+// representation are generalized from both |this| and |other|. Beyond |valid|,
+// the descriptors are copied verbatim from |other| up to |new_size|.
+// In case of incompatible types, the type and representation of |other| is
+// used.
+MaybeObject* DescriptorArray::Merge(int verbatim,
+ int valid,
+ int new_size,
+ int modify_index,
+ StoreMode store_mode,
+ DescriptorArray* other) {
+ ASSERT(verbatim <= valid);
+ ASSERT(valid <= new_size);
+
+ DescriptorArray* result;
+ // Allocate a new descriptor array large enough to hold the required
+ // descriptors, with minimally the exact same size as this descriptor array.
+ MaybeObject* maybe_descriptors = DescriptorArray::Allocate(
+ GetIsolate(), new_size,
+ Max(new_size, other->number_of_descriptors()) - new_size);
+ if (!maybe_descriptors->To(&result)) return maybe_descriptors;
+ ASSERT(result->length() > length() ||
+ result->NumberOfSlackDescriptors() > 0 ||
+ result->number_of_descriptors() == other->number_of_descriptors());
+ ASSERT(result->number_of_descriptors() == new_size);
+
+ DescriptorArray::WhitenessWitness witness(result);
+
+ int descriptor;
+
+ // 0 -> |verbatim|
+ int current_offset = 0;
+ for (descriptor = 0; descriptor < verbatim; descriptor++) {
+ if (GetDetails(descriptor).type() == FIELD) current_offset++;
+ result->CopyFrom(descriptor, other, descriptor, witness);
+ }
+
+ // |verbatim| -> |valid|
+ for (; descriptor < valid; descriptor++) {
+ Name* key = GetKey(descriptor);
+ PropertyDetails details = GetDetails(descriptor);
+ PropertyDetails other_details = other->GetDetails(descriptor);
+
+ if (details.type() == FIELD || other_details.type() == FIELD ||
+ (store_mode == FORCE_FIELD && descriptor == modify_index) ||
+ (details.type() == CONSTANT &&
+ other_details.type() == CONSTANT &&
+ GetValue(descriptor) != other->GetValue(descriptor))) {
+ Representation representation =
+ details.representation().generalize(other_details.representation());
+ FieldDescriptor d(key,
+ current_offset++,
+ other_details.attributes(),
+ representation);
+ result->Set(descriptor, &d, witness);
+ } else {
+ result->CopyFrom(descriptor, other, descriptor, witness);
+ }
+ }
+
+ // |valid| -> |new_size|
+ for (; descriptor < new_size; descriptor++) {
+ PropertyDetails details = other->GetDetails(descriptor);
+ if (details.type() == FIELD ||
+ (store_mode == FORCE_FIELD && descriptor == modify_index)) {
+ Name* key = other->GetKey(descriptor);
+ FieldDescriptor d(key,
+ current_offset++,
+ details.attributes(),
+ details.representation());
+ result->Set(descriptor, &d, witness);
+ } else {
+ result->CopyFrom(descriptor, other, descriptor, witness);
+ }
+ }
+
+ result->Sort();
+ return result;
+}
+
+
+// Checks whether a merge of |other| into |this| would return a copy of |this|.
+bool DescriptorArray::IsMoreGeneralThan(int verbatim,
+ int valid,
+ int new_size,
+ DescriptorArray* other) {
+ ASSERT(verbatim <= valid);
+ ASSERT(valid <= new_size);
+ if (valid != new_size) return false;
+
+ for (int descriptor = verbatim; descriptor < valid; descriptor++) {
+ PropertyDetails details = GetDetails(descriptor);
+ PropertyDetails other_details = other->GetDetails(descriptor);
+ if (!other_details.representation().fits_into(details.representation())) {
+ return false;
+ }
+ if (details.type() == CONSTANT) {
+ if (other_details.type() != CONSTANT) return false;
+ if (GetValue(descriptor) != other->GetValue(descriptor)) return false;
+ }
+ }
+
+ return true;
+}
+
+
// We need the whiteness witness since sort will reshuffle the entries in the
// descriptor array. If the descriptor array were to be black, the shuffling
// would move a slot that was already recorded as pointing into an evacuation
@@ -6135,14 +8141,10 @@ void DescriptorArray::Sort() {
}
-MaybeObject* AccessorPair::Copy() {
- Heap* heap = GetHeap();
- AccessorPair* copy;
- MaybeObject* maybe_copy = heap->AllocateAccessorPair();
- if (!maybe_copy->To(&copy)) return maybe_copy;
-
- copy->set_getter(getter());
- copy->set_setter(setter());
+Handle<AccessorPair> AccessorPair::Copy(Handle<AccessorPair> pair) {
+ Handle<AccessorPair> copy = pair->GetIsolate()->factory()->NewAccessorPair();
+ copy->set_getter(pair->getter());
+ copy->set_setter(pair->setter());
return copy;
}
@@ -6153,19 +8155,21 @@ Object* AccessorPair::GetComponent(AccessorComponent component) {
}
-MaybeObject* DeoptimizationInputData::Allocate(int deopt_entry_count,
+MaybeObject* DeoptimizationInputData::Allocate(Isolate* isolate,
+ int deopt_entry_count,
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
- return HEAP->AllocateFixedArray(LengthFor(deopt_entry_count),
- pretenure);
+ return isolate->heap()->AllocateFixedArray(LengthFor(deopt_entry_count),
+ pretenure);
}
-MaybeObject* DeoptimizationOutputData::Allocate(int number_of_deopt_points,
+MaybeObject* DeoptimizationOutputData::Allocate(Isolate* isolate,
+ int number_of_deopt_points,
PretenureFlag pretenure) {
- if (number_of_deopt_points == 0) return HEAP->empty_fixed_array();
- return HEAP->AllocateFixedArray(LengthOfFixedArray(number_of_deopt_points),
- pretenure);
+ if (number_of_deopt_points == 0) return isolate->heap()->empty_fixed_array();
+ return isolate->heap()->AllocateFixedArray(
+ LengthOfFixedArray(number_of_deopt_points), pretenure);
}
@@ -6182,13 +8186,40 @@ bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
#endif
+static bool IsIdentifier(UnicodeCache* cache, Name* name) {
+ // Checks whether the buffer contains an identifier (no escape).
+ if (!name->IsString()) return false;
+ String* string = String::cast(name);
+ if (string->length() == 0) return false;
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(string, &op);
+ if (!cache->IsIdentifierStart(stream.GetNext())) {
+ return false;
+ }
+ while (stream.HasMore()) {
+ if (!cache->IsIdentifierPart(stream.GetNext())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+bool Name::IsCacheable(Isolate* isolate) {
+ return IsSymbol() ||
+ IsIdentifier(isolate->unicode_cache(), this) ||
+ this == isolate->heap()->hidden_string();
+}
+
+
bool String::LooksValid() {
- if (!Isolate::Current()->heap()->Contains(this)) return false;
+ if (!GetIsolate()->heap()->Contains(this)) return false;
return true;
}
String::FlatContent String::GetFlatContent() {
+ ASSERT(!AllowHeapAllocation::IsAllowed());
int length = this->length();
StringShape shape(this);
String* string = this;
@@ -6209,14 +8240,14 @@ String::FlatContent String::GetFlatContent() {
ASSERT(shape.representation_tag() != kConsStringTag &&
shape.representation_tag() != kSlicedStringTag);
}
- if (shape.encoding_tag() == kAsciiStringTag) {
- const char* start;
+ if (shape.encoding_tag() == kOneByteStringTag) {
+ const uint8_t* start;
if (shape.representation_tag() == kSeqStringTag) {
- start = SeqAsciiString::cast(string)->GetChars();
+ start = SeqOneByteString::cast(string)->GetChars();
} else {
start = ExternalAsciiString::cast(string)->GetChars();
}
- return FlatContent(Vector<const char>(start + offset, length));
+ return FlatContent(Vector<const uint8_t>(start + offset, length));
} else {
ASSERT(shape.encoding_tag() == kTwoByteStringTag);
const uc16* start;
@@ -6244,14 +8275,14 @@ SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
if (length < 0) length = kMaxInt - offset;
// Compute the size of the UTF-8 string. Start at the specified offset.
- Access<StringInputBuffer> buffer(
- heap->isolate()->objects_string_input_buffer());
- buffer->Reset(offset, this);
+ Access<ConsStringIteratorOp> op(
+ heap->isolate()->objects_string_iterator());
+ StringCharacterStream stream(this, op.value(), offset);
int character_position = offset;
int utf8_bytes = 0;
int last = unibrow::Utf16::kNoPreviousCharacter;
- while (buffer->has_more() && character_position++ < offset + length) {
- uint16_t character = buffer->GetNext();
+ while (stream.HasMore() && character_position++ < offset + length) {
+ uint16_t character = stream.GetNext();
utf8_bytes += unibrow::Utf8::Length(character, last);
last = character;
}
@@ -6263,13 +8294,12 @@ SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
char* result = NewArray<char>(utf8_bytes + 1);
// Convert the UTF-16 string to a UTF-8 buffer. Start at the specified offset.
- buffer->Rewind();
- buffer->Seek(offset);
+ stream.Reset(this, offset);
character_position = offset;
int utf8_byte_position = 0;
last = unibrow::Utf16::kNoPreviousCharacter;
- while (buffer->has_more() && character_position++ < offset + length) {
- uint16_t character = buffer->GetNext();
+ while (stream.HasMore() && character_position++ < offset + length) {
+ uint16_t character = stream.GetNext();
if (allow_nulls == DISALLOW_NULLS && character == 0) {
character = ' ';
}
@@ -6295,7 +8325,7 @@ const uc16* String::GetTwoByteData() {
const uc16* String::GetTwoByteData(unsigned start) {
- ASSERT(!IsAsciiRepresentationUnderneath());
+ ASSERT(!IsOneByteRepresentationUnderneath());
switch (StringShape(this).representation_tag()) {
case kSeqStringTag:
return SeqTwoByteString::cast(this)->SeqTwoByteStringGetData(start);
@@ -6321,15 +8351,15 @@ SmartArrayPointer<uc16> String::ToWideCString(RobustnessFlag robust_flag) {
}
Heap* heap = GetHeap();
- Access<StringInputBuffer> buffer(
- heap->isolate()->objects_string_input_buffer());
- buffer->Reset(this);
+ Access<ConsStringIteratorOp> op(
+ heap->isolate()->objects_string_iterator());
+ StringCharacterStream stream(this, op.value());
uc16* result = NewArray<uc16>(length() + 1);
int i = 0;
- while (buffer->has_more()) {
- uint16_t character = buffer->GetNext();
+ while (stream.HasMore()) {
+ uint16_t character = stream.GetNext();
result[i++] = character;
}
result[i] = 0;
@@ -6343,254 +8373,7 @@ const uc16* SeqTwoByteString::SeqTwoByteStringGetData(unsigned start) {
}
-void SeqTwoByteString::SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- unsigned chars_read = 0;
- unsigned offset = *offset_ptr;
- while (chars_read < max_chars) {
- uint16_t c = *reinterpret_cast<uint16_t*>(
- reinterpret_cast<char*>(this) -
- kHeapObjectTag + kHeaderSize + offset * kShortSize);
- if (c <= kMaxAsciiCharCode) {
- // Fast case for ASCII characters. Cursor is an input output argument.
- if (!unibrow::CharacterStream::EncodeAsciiCharacter(c,
- rbb->util_buffer,
- rbb->capacity,
- rbb->cursor)) {
- break;
- }
- } else {
- if (!unibrow::CharacterStream::EncodeNonAsciiCharacter(c,
- rbb->util_buffer,
- rbb->capacity,
- rbb->cursor)) {
- break;
- }
- }
- offset++;
- chars_read++;
- }
- *offset_ptr = offset;
- rbb->remaining += chars_read;
-}
-
-
-const unibrow::byte* SeqAsciiString::SeqAsciiStringReadBlock(
- unsigned* remaining,
- unsigned* offset_ptr,
- unsigned max_chars) {
- const unibrow::byte* b = reinterpret_cast<unibrow::byte*>(this) -
- kHeapObjectTag + kHeaderSize + *offset_ptr * kCharSize;
- *remaining = max_chars;
- *offset_ptr += max_chars;
- return b;
-}
-
-
-// This will iterate unless the block of string data spans two 'halves' of
-// a ConsString, in which case it will recurse. Since the block of string
-// data to be read has a maximum size this limits the maximum recursion
-// depth to something sane. Since C++ does not have tail call recursion
-// elimination, the iteration must be explicit. Since this is not an
-// -IntoBuffer method it can delegate to one of the efficient
-// *AsciiStringReadBlock routines.
-const unibrow::byte* ConsString::ConsStringReadBlock(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- ConsString* current = this;
- unsigned offset = *offset_ptr;
- int offset_correction = 0;
-
- while (true) {
- String* left = current->first();
- unsigned left_length = (unsigned)left->length();
- if (left_length > offset &&
- (max_chars <= left_length - offset ||
- (rbb->capacity <= left_length - offset &&
- (max_chars = left_length - offset, true)))) { // comma operator!
- // Left hand side only - iterate unless we have reached the bottom of
- // the cons tree. The assignment on the left of the comma operator is
- // in order to make use of the fact that the -IntoBuffer routines can
- // produce at most 'capacity' characters. This enables us to postpone
- // the point where we switch to the -IntoBuffer routines (below) in order
- // to maximize the chances of delegating a big chunk of work to the
- // efficient *AsciiStringReadBlock routines.
- if (StringShape(left).IsCons()) {
- current = ConsString::cast(left);
- continue;
- } else {
- const unibrow::byte* answer =
- String::ReadBlock(left, rbb, &offset, max_chars);
- *offset_ptr = offset + offset_correction;
- return answer;
- }
- } else if (left_length <= offset) {
- // Right hand side only - iterate unless we have reached the bottom of
- // the cons tree.
- String* right = current->second();
- offset -= left_length;
- offset_correction += left_length;
- if (StringShape(right).IsCons()) {
- current = ConsString::cast(right);
- continue;
- } else {
- const unibrow::byte* answer =
- String::ReadBlock(right, rbb, &offset, max_chars);
- *offset_ptr = offset + offset_correction;
- return answer;
- }
- } else {
- // The block to be read spans two sides of the ConsString, so we call the
- // -IntoBuffer version, which will recurse. The -IntoBuffer methods
- // are able to assemble data from several part strings because they use
- // the util_buffer to store their data and never return direct pointers
- // to their storage. We don't try to read more than the buffer capacity
- // here or we can get too much recursion.
- ASSERT(rbb->remaining == 0);
- ASSERT(rbb->cursor == 0);
- current->ConsStringReadBlockIntoBuffer(
- rbb,
- &offset,
- max_chars > rbb->capacity ? rbb->capacity : max_chars);
- *offset_ptr = offset + offset_correction;
- return rbb->util_buffer;
- }
- }
-}
-
-
-const unibrow::byte* ExternalAsciiString::ExternalAsciiStringReadBlock(
- unsigned* remaining,
- unsigned* offset_ptr,
- unsigned max_chars) {
- // Cast const char* to unibrow::byte* (signedness difference).
- const unibrow::byte* b =
- reinterpret_cast<const unibrow::byte*>(GetChars()) + *offset_ptr;
- *remaining = max_chars;
- *offset_ptr += max_chars;
- return b;
-}
-
-
-void ExternalTwoByteString::ExternalTwoByteStringReadBlockIntoBuffer(
- ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- unsigned chars_read = 0;
- unsigned offset = *offset_ptr;
- const uint16_t* data = GetChars();
- while (chars_read < max_chars) {
- uint16_t c = data[offset];
- if (c <= kMaxAsciiCharCode) {
- // Fast case for ASCII characters. Cursor is an input output argument.
- if (!unibrow::CharacterStream::EncodeAsciiCharacter(c,
- rbb->util_buffer,
- rbb->capacity,
- rbb->cursor))
- break;
- } else {
- if (!unibrow::CharacterStream::EncodeNonAsciiCharacter(c,
- rbb->util_buffer,
- rbb->capacity,
- rbb->cursor))
- break;
- }
- offset++;
- chars_read++;
- }
- *offset_ptr = offset;
- rbb->remaining += chars_read;
-}
-
-
-void SeqAsciiString::SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- unsigned capacity = rbb->capacity - rbb->cursor;
- if (max_chars > capacity) max_chars = capacity;
- memcpy(rbb->util_buffer + rbb->cursor,
- reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize +
- *offset_ptr * kCharSize,
- max_chars);
- rbb->remaining += max_chars;
- *offset_ptr += max_chars;
- rbb->cursor += max_chars;
-}
-
-
-void ExternalAsciiString::ExternalAsciiStringReadBlockIntoBuffer(
- ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- unsigned capacity = rbb->capacity - rbb->cursor;
- if (max_chars > capacity) max_chars = capacity;
- memcpy(rbb->util_buffer + rbb->cursor, GetChars() + *offset_ptr, max_chars);
- rbb->remaining += max_chars;
- *offset_ptr += max_chars;
- rbb->cursor += max_chars;
-}
-
-
-// This method determines the type of string involved and then copies
-// a whole chunk of characters into a buffer, or returns a pointer to a buffer
-// where they can be found. The pointer is not necessarily valid across a GC
-// (see AsciiStringReadBlock).
-const unibrow::byte* String::ReadBlock(String* input,
- ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- ASSERT(*offset_ptr <= static_cast<unsigned>(input->length()));
- if (max_chars == 0) {
- rbb->remaining = 0;
- return NULL;
- }
- switch (StringShape(input).representation_tag()) {
- case kSeqStringTag:
- if (input->IsAsciiRepresentation()) {
- SeqAsciiString* str = SeqAsciiString::cast(input);
- return str->SeqAsciiStringReadBlock(&rbb->remaining,
- offset_ptr,
- max_chars);
- } else {
- SeqTwoByteString* str = SeqTwoByteString::cast(input);
- str->SeqTwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return rbb->util_buffer;
- }
- case kConsStringTag:
- return ConsString::cast(input)->ConsStringReadBlock(rbb,
- offset_ptr,
- max_chars);
- case kExternalStringTag:
- if (input->IsAsciiRepresentation()) {
- return ExternalAsciiString::cast(input)->ExternalAsciiStringReadBlock(
- &rbb->remaining,
- offset_ptr,
- max_chars);
- } else {
- ExternalTwoByteString::cast(input)->
- ExternalTwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return rbb->util_buffer;
- }
- case kSlicedStringTag:
- return SlicedString::cast(input)->SlicedStringReadBlock(rbb,
- offset_ptr,
- max_chars);
- default:
- break;
- }
-
- UNREACHABLE();
- return 0;
-}
-
-
-void Relocatable::PostGarbageCollectionProcessing() {
- Isolate* isolate = Isolate::Current();
+void Relocatable::PostGarbageCollectionProcessing(Isolate* isolate) {
Relocatable* current = isolate->relocatable_top();
while (current != NULL) {
current->PostGarbageCollection();
@@ -6601,7 +8384,7 @@ void Relocatable::PostGarbageCollectionProcessing() {
// Reserve space for statics needing saving and restoring.
int Relocatable::ArchiveSpacePerThread() {
- return sizeof(Isolate::Current()->relocatable_top());
+ return sizeof(Relocatable*); // NOLINT
}
@@ -6627,8 +8410,7 @@ char* Relocatable::Iterate(ObjectVisitor* v, char* thread_storage) {
}
-void Relocatable::Iterate(ObjectVisitor* v) {
- Isolate* isolate = Isolate::Current();
+void Relocatable::Iterate(Isolate* isolate, ObjectVisitor* v) {
Iterate(v, isolate->relocatable_top());
}
@@ -6662,172 +8444,151 @@ void FlatStringReader::PostGarbageCollection() {
if (str_ == NULL) return;
Handle<String> str(str_);
ASSERT(str->IsFlat());
+ DisallowHeapAllocation no_gc;
+ // This does not actually prevent the vector from being relocated later.
String::FlatContent content = str->GetFlatContent();
ASSERT(content.IsFlat());
is_ascii_ = content.IsAscii();
if (is_ascii_) {
- start_ = content.ToAsciiVector().start();
+ start_ = content.ToOneByteVector().start();
} else {
start_ = content.ToUC16Vector().start();
}
}
-void StringInputBuffer::Seek(unsigned pos) {
- Reset(pos, input_);
+String* ConsStringIteratorOp::Operate(String* string,
+ unsigned* offset_out,
+ int32_t* type_out,
+ unsigned* length_out) {
+ ASSERT(string->IsConsString());
+ ConsString* cons_string = ConsString::cast(string);
+ // Set up search data.
+ root_ = cons_string;
+ consumed_ = *offset_out;
+ // Now search.
+ return Search(offset_out, type_out, length_out);
}
-void SafeStringInputBuffer::Seek(unsigned pos) {
- Reset(pos, input_);
-}
-
-
-// This method determines the type of string involved and then copies
-// a whole chunk of characters into a buffer. It can be used with strings
-// that have been glued together to form a ConsString and which must cooperate
-// to fill up a buffer.
-void String::ReadBlockIntoBuffer(String* input,
- ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- ASSERT(*offset_ptr <= (unsigned)input->length());
- if (max_chars == 0) return;
-
- switch (StringShape(input).representation_tag()) {
- case kSeqStringTag:
- if (input->IsAsciiRepresentation()) {
- SeqAsciiString::cast(input)->SeqAsciiStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return;
- } else {
- SeqTwoByteString::cast(input)->SeqTwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return;
+String* ConsStringIteratorOp::Search(unsigned* offset_out,
+ int32_t* type_out,
+ unsigned* length_out) {
+ ConsString* cons_string = root_;
+ // Reset the stack, pushing the root string.
+ depth_ = 1;
+ maximum_depth_ = 1;
+ frames_[0] = cons_string;
+ const unsigned consumed = consumed_;
+ unsigned offset = 0;
+ while (true) {
+ // Loop until the string is found which contains the target offset.
+ String* string = cons_string->first();
+ unsigned length = string->length();
+ int32_t type;
+ if (consumed < offset + length) {
+ // Target offset is in the left branch.
+ // Keep going if we're still in a ConString.
+ type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) == kConsStringTag) {
+ cons_string = ConsString::cast(string);
+ PushLeft(cons_string);
+ continue;
}
- case kConsStringTag:
- ConsString::cast(input)->ConsStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return;
- case kExternalStringTag:
- if (input->IsAsciiRepresentation()) {
- ExternalAsciiString::cast(input)->
- ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars);
- } else {
- ExternalTwoByteString::cast(input)->
- ExternalTwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- }
- return;
- case kSlicedStringTag:
- SlicedString::cast(input)->SlicedStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return;
- default:
- break;
+ // Tell the stack we're done decending.
+ AdjustMaximumDepth();
+ } else {
+ // Descend right.
+ // Update progress through the string.
+ offset += length;
+ // Keep going if we're still in a ConString.
+ string = cons_string->second();
+ type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) == kConsStringTag) {
+ cons_string = ConsString::cast(string);
+ PushRight(cons_string);
+ // TODO(dcarney) Add back root optimization.
+ continue;
+ }
+ // Need this to be updated for the current string.
+ length = string->length();
+ // Account for the possibility of an empty right leaf.
+ // This happens only if we have asked for an offset outside the string.
+ if (length == 0) {
+ // Reset depth so future operations will return null immediately.
+ Reset();
+ return NULL;
+ }
+ // Tell the stack we're done decending.
+ AdjustMaximumDepth();
+ // Pop stack so next iteration is in correct place.
+ Pop();
+ }
+ ASSERT(length != 0);
+ // Adjust return values and exit.
+ consumed_ = offset + length;
+ *offset_out = consumed - offset;
+ *type_out = type;
+ *length_out = length;
+ return string;
}
-
UNREACHABLE();
- return;
+ return NULL;
}
-const unibrow::byte* String::ReadBlock(String* input,
- unibrow::byte* util_buffer,
- unsigned capacity,
- unsigned* remaining,
- unsigned* offset_ptr) {
- ASSERT(*offset_ptr <= (unsigned)input->length());
- unsigned chars = input->length() - *offset_ptr;
- ReadBlockBuffer rbb(util_buffer, 0, capacity, 0);
- const unibrow::byte* answer = ReadBlock(input, &rbb, offset_ptr, chars);
- ASSERT(rbb.remaining <= static_cast<unsigned>(input->length()));
- *remaining = rbb.remaining;
- return answer;
-}
-
-
-const unibrow::byte* String::ReadBlock(String** raw_input,
- unibrow::byte* util_buffer,
- unsigned capacity,
- unsigned* remaining,
- unsigned* offset_ptr) {
- Handle<String> input(raw_input);
- ASSERT(*offset_ptr <= (unsigned)input->length());
- unsigned chars = input->length() - *offset_ptr;
- if (chars > capacity) chars = capacity;
- ReadBlockBuffer rbb(util_buffer, 0, capacity, 0);
- ReadBlockIntoBuffer(*input, &rbb, offset_ptr, chars);
- ASSERT(rbb.remaining <= static_cast<unsigned>(input->length()));
- *remaining = rbb.remaining;
- return rbb.util_buffer;
-}
-
-
-// This will iterate unless the block of string data spans two 'halves' of
-// a ConsString, in which case it will recurse. Since the block of string
-// data to be read has a maximum size this limits the maximum recursion
-// depth to something sane. Since C++ does not have tail call recursion
-// elimination, the iteration must be explicit.
-void ConsString::ConsStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- ConsString* current = this;
- unsigned offset = *offset_ptr;
- int offset_correction = 0;
-
+String* ConsStringIteratorOp::NextLeaf(bool* blew_stack,
+ int32_t* type_out,
+ unsigned* length_out) {
while (true) {
- String* left = current->first();
- unsigned left_length = (unsigned)left->length();
- if (left_length > offset &&
- max_chars <= left_length - offset) {
- // Left hand side only - iterate unless we have reached the bottom of
- // the cons tree.
- if (StringShape(left).IsCons()) {
- current = ConsString::cast(left);
- continue;
- } else {
- String::ReadBlockIntoBuffer(left, rbb, &offset, max_chars);
- *offset_ptr = offset + offset_correction;
- return;
- }
- } else if (left_length <= offset) {
- // Right hand side only - iterate unless we have reached the bottom of
- // the cons tree.
- offset -= left_length;
- offset_correction += left_length;
- String* right = current->second();
- if (StringShape(right).IsCons()) {
- current = ConsString::cast(right);
- continue;
- } else {
- String::ReadBlockIntoBuffer(right, rbb, &offset, max_chars);
- *offset_ptr = offset + offset_correction;
- return;
+ // Tree traversal complete.
+ if (depth_ == 0) {
+ *blew_stack = false;
+ return NULL;
+ }
+ // We've lost track of higher nodes.
+ if (maximum_depth_ - depth_ == kStackSize) {
+ *blew_stack = true;
+ return NULL;
+ }
+ // Go right.
+ ConsString* cons_string = frames_[OffsetForDepth(depth_ - 1)];
+ String* string = cons_string->second();
+ int32_t type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) != kConsStringTag) {
+ // Pop stack so next iteration is in correct place.
+ Pop();
+ unsigned length = static_cast<unsigned>(string->length());
+ // Could be a flattened ConsString.
+ if (length == 0) continue;
+ *length_out = length;
+ *type_out = type;
+ consumed_ += length;
+ return string;
+ }
+ cons_string = ConsString::cast(string);
+ // TODO(dcarney) Add back root optimization.
+ PushRight(cons_string);
+ // Need to traverse all the way left.
+ while (true) {
+ // Continue left.
+ string = cons_string->first();
+ type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) != kConsStringTag) {
+ AdjustMaximumDepth();
+ unsigned length = static_cast<unsigned>(string->length());
+ ASSERT(length != 0);
+ *length_out = length;
+ *type_out = type;
+ consumed_ += length;
+ return string;
}
- } else {
- // The block to be read spans two sides of the ConsString, so we recurse.
- // First recurse on the left.
- max_chars -= left_length - offset;
- String::ReadBlockIntoBuffer(left, rbb, &offset, left_length - offset);
- // We may have reached the max or there may not have been enough space
- // in the buffer for the characters in the left hand side.
- if (offset == left_length) {
- // Recurse on the right.
- String* right = String::cast(current->second());
- offset -= left_length;
- offset_correction += left_length;
- String::ReadBlockIntoBuffer(right, rbb, &offset, max_chars);
- }
- *offset_ptr = offset + offset_correction;
- return;
+ cons_string = ConsString::cast(string);
+ PushLeft(cons_string);
}
}
+ UNREACHABLE();
+ return NULL;
}
@@ -6867,26 +8628,6 @@ uint16_t SlicedString::SlicedStringGet(int index) {
}
-const unibrow::byte* SlicedString::SlicedStringReadBlock(
- ReadBlockBuffer* buffer, unsigned* offset_ptr, unsigned chars) {
- unsigned offset = this->offset();
- *offset_ptr += offset;
- const unibrow::byte* answer = String::ReadBlock(String::cast(parent()),
- buffer, offset_ptr, chars);
- *offset_ptr -= offset;
- return answer;
-}
-
-
-void SlicedString::SlicedStringReadBlockIntoBuffer(
- ReadBlockBuffer* buffer, unsigned* offset_ptr, unsigned chars) {
- unsigned offset = this->offset();
- *offset_ptr += offset;
- String::ReadBlockIntoBuffer(String::cast(parent()),
- buffer, offset_ptr, chars);
- *offset_ptr -= offset;
-}
-
template <typename sinkchar>
void String::WriteToFlat(String* src,
sinkchar* sink,
@@ -6898,7 +8639,7 @@ void String::WriteToFlat(String* src,
while (true) {
ASSERT(0 <= from && from <= to && to <= source->length());
switch (StringShape(source).full_representation_tag()) {
- case kAsciiStringTag | kExternalStringTag: {
+ case kOneByteStringTag | kExternalStringTag: {
CopyChars(sink,
ExternalAsciiString::cast(source)->GetChars() + from,
to - from);
@@ -6912,9 +8653,9 @@ void String::WriteToFlat(String* src,
to - from);
return;
}
- case kAsciiStringTag | kSeqStringTag: {
+ case kOneByteStringTag | kSeqStringTag: {
CopyChars(sink,
- SeqAsciiString::cast(source)->GetChars() + from,
+ SeqOneByteString::cast(source)->GetChars() + from,
to - from);
return;
}
@@ -6924,7 +8665,7 @@ void String::WriteToFlat(String* src,
to - from);
return;
}
- case kAsciiStringTag | kConsStringTag:
+ case kOneByteStringTag | kConsStringTag:
case kTwoByteStringTag | kConsStringTag: {
ConsString* cons_string = ConsString::cast(source);
String* first = cons_string->first();
@@ -6949,9 +8690,9 @@ void String::WriteToFlat(String* src,
// common case of sequential ascii right child.
if (to - boundary == 1) {
sink[boundary - from] = static_cast<sinkchar>(second->Get(0));
- } else if (second->IsSeqAsciiString()) {
+ } else if (second->IsSeqOneByteString()) {
CopyChars(sink + boundary - from,
- SeqAsciiString::cast(second)->GetChars(),
+ SeqOneByteString::cast(second)->GetChars(),
to - boundary);
} else {
WriteToFlat(second,
@@ -6965,7 +8706,7 @@ void String::WriteToFlat(String* src,
}
break;
}
- case kAsciiStringTag | kSlicedStringTag:
+ case kOneByteStringTag | kSlicedStringTag:
case kTwoByteStringTag | kSlicedStringTag: {
SlicedString* slice = SlicedString::cast(source);
unsigned offset = slice->offset();
@@ -6977,46 +8718,28 @@ void String::WriteToFlat(String* src,
}
-template <typename IteratorA, typename IteratorB>
-static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) {
- // General slow case check. We know that the ia and ib iterators
- // have the same length.
- while (ia->has_more()) {
- uint32_t ca = ia->GetNext();
- uint32_t cb = ib->GetNext();
- ASSERT(ca <= unibrow::Utf16::kMaxNonSurrogateCharCode);
- ASSERT(cb <= unibrow::Utf16::kMaxNonSurrogateCharCode);
- if (ca != cb)
- return false;
- }
- return true;
-}
-
-
// Compares the contents of two strings by reading and comparing
// int-sized blocks of characters.
template <typename Char>
-static inline bool CompareRawStringContents(Vector<Char> a, Vector<Char> b) {
- int length = a.length();
- ASSERT_EQ(length, b.length());
- const Char* pa = a.start();
- const Char* pb = b.start();
+static inline bool CompareRawStringContents(const Char* const a,
+ const Char* const b,
+ int length) {
int i = 0;
#ifndef V8_HOST_CAN_READ_UNALIGNED
// If this architecture isn't comfortable reading unaligned ints
// then we have to check that the strings are aligned before
// comparing them blockwise.
const int kAlignmentMask = sizeof(uint32_t) - 1; // NOLINT
- uint32_t pa_addr = reinterpret_cast<uint32_t>(pa);
- uint32_t pb_addr = reinterpret_cast<uint32_t>(pb);
+ uint32_t pa_addr = reinterpret_cast<uint32_t>(a);
+ uint32_t pb_addr = reinterpret_cast<uint32_t>(b);
if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) {
#endif
const int kStepSize = sizeof(int) / sizeof(Char); // NOLINT
int endpoint = length - kStepSize;
// Compare blocks until we reach near the end of the string.
for (; i <= endpoint; i += kStepSize) {
- uint32_t wa = *reinterpret_cast<const uint32_t*>(pa + i);
- uint32_t wb = *reinterpret_cast<const uint32_t*>(pb + i);
+ uint32_t wa = *reinterpret_cast<const uint32_t*>(a + i);
+ uint32_t wb = *reinterpret_cast<const uint32_t*>(b + i);
if (wa != wb) {
return false;
}
@@ -7034,25 +8757,147 @@ static inline bool CompareRawStringContents(Vector<Char> a, Vector<Char> b) {
}
-template <typename IteratorA>
-static inline bool CompareStringContentsPartial(Isolate* isolate,
- IteratorA* ia,
- String* b) {
- String::FlatContent content = b->GetFlatContent();
- if (content.IsFlat()) {
- if (content.IsAscii()) {
- VectorIterator<char> ib(content.ToAsciiVector());
- return CompareStringContents(ia, &ib);
- } else {
- VectorIterator<uc16> ib(content.ToUC16Vector());
- return CompareStringContents(ia, &ib);
+template<typename Chars1, typename Chars2>
+class RawStringComparator : public AllStatic {
+ public:
+ static inline bool compare(const Chars1* a, const Chars2* b, int len) {
+ ASSERT(sizeof(Chars1) != sizeof(Chars2));
+ for (int i = 0; i < len; i++) {
+ if (a[i] != b[i]) {
+ return false;
+ }
}
- } else {
- isolate->objects_string_compare_buffer_b()->Reset(0, b);
- return CompareStringContents(ia,
- isolate->objects_string_compare_buffer_b());
+ return true;
}
-}
+};
+
+
+template<>
+class RawStringComparator<uint16_t, uint16_t> {
+ public:
+ static inline bool compare(const uint16_t* a, const uint16_t* b, int len) {
+ return CompareRawStringContents(a, b, len);
+ }
+};
+
+
+template<>
+class RawStringComparator<uint8_t, uint8_t> {
+ public:
+ static inline bool compare(const uint8_t* a, const uint8_t* b, int len) {
+ return CompareRawStringContents(a, b, len);
+ }
+};
+
+
+class StringComparator {
+ class State {
+ public:
+ explicit inline State(ConsStringIteratorOp* op)
+ : op_(op), is_one_byte_(true), length_(0), buffer8_(NULL) {}
+
+ inline void Init(String* string, unsigned len) {
+ op_->Reset();
+ int32_t type = string->map()->instance_type();
+ String::Visit(string, 0, *this, *op_, type, len);
+ }
+
+ inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
+ is_one_byte_ = true;
+ buffer8_ = chars;
+ length_ = length;
+ }
+
+ inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
+ is_one_byte_ = false;
+ buffer16_ = chars;
+ length_ = length;
+ }
+
+ void Advance(unsigned consumed) {
+ ASSERT(consumed <= length_);
+ // Still in buffer.
+ if (length_ != consumed) {
+ if (is_one_byte_) {
+ buffer8_ += consumed;
+ } else {
+ buffer16_ += consumed;
+ }
+ length_ -= consumed;
+ return;
+ }
+ // Advance state.
+ ASSERT(op_->HasMore());
+ int32_t type = 0;
+ unsigned length = 0;
+ String* next = op_->ContinueOperation(&type, &length);
+ ASSERT(next != NULL);
+ ConsStringNullOp null_op;
+ String::Visit(next, 0, *this, null_op, type, length);
+ }
+
+ ConsStringIteratorOp* const op_;
+ bool is_one_byte_;
+ unsigned length_;
+ union {
+ const uint8_t* buffer8_;
+ const uint16_t* buffer16_;
+ };
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(State);
+ };
+
+ public:
+ inline StringComparator(ConsStringIteratorOp* op_1,
+ ConsStringIteratorOp* op_2)
+ : state_1_(op_1),
+ state_2_(op_2) {
+ }
+
+ template<typename Chars1, typename Chars2>
+ static inline bool Equals(State* state_1, State* state_2, unsigned to_check) {
+ const Chars1* a = reinterpret_cast<const Chars1*>(state_1->buffer8_);
+ const Chars2* b = reinterpret_cast<const Chars2*>(state_2->buffer8_);
+ return RawStringComparator<Chars1, Chars2>::compare(a, b, to_check);
+ }
+
+ bool Equals(unsigned length, String* string_1, String* string_2) {
+ ASSERT(length != 0);
+ state_1_.Init(string_1, length);
+ state_2_.Init(string_2, length);
+ while (true) {
+ unsigned to_check = Min(state_1_.length_, state_2_.length_);
+ ASSERT(to_check > 0 && to_check <= length);
+ bool is_equal;
+ if (state_1_.is_one_byte_) {
+ if (state_2_.is_one_byte_) {
+ is_equal = Equals<uint8_t, uint8_t>(&state_1_, &state_2_, to_check);
+ } else {
+ is_equal = Equals<uint8_t, uint16_t>(&state_1_, &state_2_, to_check);
+ }
+ } else {
+ if (state_2_.is_one_byte_) {
+ is_equal = Equals<uint16_t, uint8_t>(&state_1_, &state_2_, to_check);
+ } else {
+ is_equal = Equals<uint16_t, uint16_t>(&state_1_, &state_2_, to_check);
+ }
+ }
+ // Looping done.
+ if (!is_equal) return false;
+ length -= to_check;
+ // Exit condition. Strings are equal.
+ if (length == 0) return true;
+ state_1_.Advance(to_check);
+ state_2_.Advance(to_check);
+ }
+ }
+
+ private:
+ State state_1_;
+ State state_2_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringComparator);
+};
bool String::SlowEquals(String* other) {
@@ -7064,7 +8909,7 @@ bool String::SlowEquals(String* other) {
// Fast check: if hash code is computed for both strings
// a fast negative check can be performed.
if (HasHashCode() && other->HasHashCode()) {
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
if (Hash() != other->Hash()) {
bool found_difference = false;
@@ -7088,63 +8933,24 @@ bool String::SlowEquals(String* other) {
String* lhs = this->TryFlattenGetString();
String* rhs = other->TryFlattenGetString();
+ // TODO(dcarney): Compare all types of flat strings with a Visitor.
if (StringShape(lhs).IsSequentialAscii() &&
StringShape(rhs).IsSequentialAscii()) {
- const char* str1 = SeqAsciiString::cast(lhs)->GetChars();
- const char* str2 = SeqAsciiString::cast(rhs)->GetChars();
- return CompareRawStringContents(Vector<const char>(str1, len),
- Vector<const char>(str2, len));
+ const uint8_t* str1 = SeqOneByteString::cast(lhs)->GetChars();
+ const uint8_t* str2 = SeqOneByteString::cast(rhs)->GetChars();
+ return CompareRawStringContents(str1, str2, len);
}
Isolate* isolate = GetIsolate();
- String::FlatContent lhs_content = lhs->GetFlatContent();
- String::FlatContent rhs_content = rhs->GetFlatContent();
- if (lhs_content.IsFlat()) {
- if (lhs_content.IsAscii()) {
- Vector<const char> vec1 = lhs_content.ToAsciiVector();
- if (rhs_content.IsFlat()) {
- if (rhs_content.IsAscii()) {
- Vector<const char> vec2 = rhs_content.ToAsciiVector();
- return CompareRawStringContents(vec1, vec2);
- } else {
- VectorIterator<char> buf1(vec1);
- VectorIterator<uc16> ib(rhs_content.ToUC16Vector());
- return CompareStringContents(&buf1, &ib);
- }
- } else {
- VectorIterator<char> buf1(vec1);
- isolate->objects_string_compare_buffer_b()->Reset(0, rhs);
- return CompareStringContents(&buf1,
- isolate->objects_string_compare_buffer_b());
- }
- } else {
- Vector<const uc16> vec1 = lhs_content.ToUC16Vector();
- if (rhs_content.IsFlat()) {
- if (rhs_content.IsAscii()) {
- VectorIterator<uc16> buf1(vec1);
- VectorIterator<char> ib(rhs_content.ToAsciiVector());
- return CompareStringContents(&buf1, &ib);
- } else {
- Vector<const uc16> vec2(rhs_content.ToUC16Vector());
- return CompareRawStringContents(vec1, vec2);
- }
- } else {
- VectorIterator<uc16> buf1(vec1);
- isolate->objects_string_compare_buffer_b()->Reset(0, rhs);
- return CompareStringContents(&buf1,
- isolate->objects_string_compare_buffer_b());
- }
- }
- } else {
- isolate->objects_string_compare_buffer_a()->Reset(0, lhs);
- return CompareStringContentsPartial(isolate,
- isolate->objects_string_compare_buffer_a(), rhs);
- }
+ StringComparator comparator(isolate->objects_string_compare_iterator_a(),
+ isolate->objects_string_compare_iterator_b());
+
+ return comparator.Equals(static_cast<unsigned>(len), lhs, rhs);
}
bool String::MarkAsUndetectable() {
- if (StringShape(this).IsSymbol()) return false;
+ if (StringShape(this).IsInternalized()) return false;
Map* map = this->map();
Heap* heap = GetHeap();
@@ -7160,15 +8966,22 @@ bool String::MarkAsUndetectable() {
}
-bool String::IsEqualTo(Vector<const char> str) {
- Isolate* isolate = GetIsolate();
+bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
int slen = length();
- Access<UnicodeCache::Utf8Decoder>
- decoder(isolate->unicode_cache()->utf8_decoder());
- decoder->Reset(str.start(), str.length());
+ // Can't check exact length equality, but we can check bounds.
+ int str_len = str.length();
+ if (!allow_prefix_match &&
+ (str_len < slen ||
+ str_len > slen*static_cast<int>(unibrow::Utf8::kMaxEncodedSize))) {
+ return false;
+ }
int i;
- for (i = 0; i < slen && decoder->has_more(); i++) {
- uint32_t r = decoder->GetNext();
+ unsigned remaining_in_str = static_cast<unsigned>(str_len);
+ const uint8_t* utf8_data = reinterpret_cast<const uint8_t*>(str.start());
+ for (i = 0; i < slen && remaining_in_str > 0; i++) {
+ unsigned cursor = 0;
+ uint32_t r = unibrow::Utf8::ValueOf(utf8_data, remaining_in_str, &cursor);
+ ASSERT(cursor > 0 && cursor <= remaining_in_str);
if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
if (i > slen - 1) return false;
if (Get(i++) != unibrow::Utf16::LeadSurrogate(r)) return false;
@@ -7176,17 +8989,20 @@ bool String::IsEqualTo(Vector<const char> str) {
} else {
if (Get(i) != r) return false;
}
+ utf8_data += cursor;
+ remaining_in_str -= cursor;
}
- return i == slen && !decoder->has_more();
+ return (allow_prefix_match || i == slen) && remaining_in_str == 0;
}
-bool String::IsAsciiEqualTo(Vector<const char> str) {
+bool String::IsOneByteEqualTo(Vector<const uint8_t> str) {
int slen = length();
if (str.length() != slen) return false;
+ DisallowHeapAllocation no_gc;
FlatContent content = GetFlatContent();
if (content.IsAscii()) {
- return CompareChars(content.ToAsciiVector().start(),
+ return CompareChars(content.ToOneByteVector().start(),
str.start(), slen) == 0;
}
for (int i = 0; i < slen; i++) {
@@ -7199,6 +9015,7 @@ bool String::IsAsciiEqualTo(Vector<const char> str) {
bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
int slen = length();
if (str.length() != slen) return false;
+ DisallowHeapAllocation no_gc;
FlatContent content = GetFlatContent();
if (content.IsTwoByte()) {
return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
@@ -7210,28 +9027,62 @@ bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
}
+class IteratingStringHasher: public StringHasher {
+ public:
+ static inline uint32_t Hash(String* string, uint32_t seed) {
+ const unsigned len = static_cast<unsigned>(string->length());
+ IteratingStringHasher hasher(len, seed);
+ if (hasher.has_trivial_hash()) {
+ return hasher.GetHashField();
+ }
+ int32_t type = string->map()->instance_type();
+ ConsStringNullOp null_op;
+ String::Visit(string, 0, hasher, null_op, type, len);
+ // Flat strings terminate immediately.
+ if (hasher.consumed_ == len) {
+ ASSERT(!string->IsConsString());
+ return hasher.GetHashField();
+ }
+ ASSERT(string->IsConsString());
+ // This is a ConsString, iterate across it.
+ ConsStringIteratorOp op;
+ unsigned offset = 0;
+ unsigned leaf_length = len;
+ string = op.Operate(string, &offset, &type, &leaf_length);
+ while (true) {
+ ASSERT(hasher.consumed_ < len);
+ String::Visit(string, 0, hasher, null_op, type, leaf_length);
+ if (hasher.consumed_ == len) break;
+ string = op.ContinueOperation(&type, &leaf_length);
+ // This should be taken care of by the length check.
+ ASSERT(string != NULL);
+ }
+ return hasher.GetHashField();
+ }
+ inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
+ AddCharacters(chars, static_cast<int>(length));
+ consumed_ += length;
+ }
+ inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
+ AddCharacters(chars, static_cast<int>(length));
+ consumed_ += length;
+ }
+
+ private:
+ inline IteratingStringHasher(int len, uint32_t seed)
+ : StringHasher(len, seed),
+ consumed_(0) {}
+ unsigned consumed_;
+ DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher);
+};
+
+
uint32_t String::ComputeAndSetHash() {
// Should only be called if hash code has not yet been computed.
ASSERT(!HasHashCode());
- const int len = length();
-
- // Compute the hash code.
- uint32_t field = 0;
- if (StringShape(this).IsSequentialAscii()) {
- field = HashSequentialString(SeqAsciiString::cast(this)->GetChars(),
- len,
- GetHeap()->HashSeed());
- } else if (StringShape(this).IsSequentialTwoByte()) {
- field = HashSequentialString(SeqTwoByteString::cast(this)->GetChars(),
- len,
- GetHeap()->HashSeed());
- } else {
- StringInputBuffer buffer(this);
- field = ComputeHashField(&buffer, len, GetHeap()->HashSeed());
- }
-
// Store the hash code in the object.
+ uint32_t field = IteratingStringHasher::Hash(this, GetHeap()->HashSeed());
set_hash_field(field);
// Check the hash code is there.
@@ -7242,11 +9093,12 @@ uint32_t String::ComputeAndSetHash() {
}
-bool String::ComputeArrayIndex(unibrow::CharacterStream* buffer,
- uint32_t* index,
- int length) {
+bool String::ComputeArrayIndex(uint32_t* index) {
+ int length = this->length();
if (length == 0 || length > kMaxArrayIndexSize) return false;
- uc32 ch = buffer->GetNext();
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(this, &op);
+ uint16_t ch = stream.GetNext();
// If the string begins with a '0' character, it must only consist
// of it to be a legal array index.
@@ -7259,8 +9111,8 @@ bool String::ComputeArrayIndex(unibrow::CharacterStream* buffer,
int d = ch - '0';
if (d < 0 || d > 9) return false;
uint32_t result = d;
- while (buffer->has_more()) {
- d = buffer->GetNext() - '0';
+ while (stream.HasMore()) {
+ d = stream.GetNext() - '0';
if (d < 0 || d > 9) return false;
// Check that the new result is below the 32 bit limit.
if (result > 429496729U - ((d > 5) ? 1 : 0)) return false;
@@ -7281,9 +9133,81 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
*index = (kArrayIndexHashMask & field) >> kHashShift;
return true;
} else {
- StringInputBuffer buffer(this);
- return ComputeArrayIndex(&buffer, index, length());
+ return ComputeArrayIndex(index);
+ }
+}
+
+
+Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
+ int new_size, old_size;
+ int old_length = string->length();
+ if (old_length <= new_length) return string;
+
+ if (string->IsSeqOneByteString()) {
+ old_size = SeqOneByteString::SizeFor(old_length);
+ new_size = SeqOneByteString::SizeFor(new_length);
+ } else {
+ ASSERT(string->IsSeqTwoByteString());
+ old_size = SeqTwoByteString::SizeFor(old_length);
+ new_size = SeqTwoByteString::SizeFor(new_length);
+ }
+
+ int delta = old_size - new_size;
+ string->set_length(new_length);
+
+ Address start_of_string = string->address();
+ ASSERT_OBJECT_ALIGNED(start_of_string);
+ ASSERT_OBJECT_ALIGNED(start_of_string + new_size);
+
+ Heap* heap = string->GetHeap();
+ NewSpace* newspace = heap->new_space();
+ if (newspace->Contains(start_of_string) &&
+ newspace->top() == start_of_string + old_size) {
+ // Last allocated object in new space. Simply lower allocation top.
+ newspace->set_top(start_of_string + new_size);
+ } else {
+ // Sizes are pointer size aligned, so that we can use filler objects
+ // that are a multiple of pointer size.
+ heap->CreateFillerObjectAt(start_of_string + new_size, delta);
}
+ if (Marking::IsBlack(Marking::MarkBitFrom(start_of_string))) {
+ MemoryChunk::IncrementLiveBytesFromMutator(start_of_string, -delta);
+ }
+
+
+ if (new_length == 0) return heap->isolate()->factory()->empty_string();
+ return string;
+}
+
+
+AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object,
+ bool in_GC) {
+ // Currently, AllocationMemento objects are only allocated immediately
+ // after JSArrays in NewSpace, and detecting whether a JSArray has one
+ // involves carefully checking the object immediately after the JSArray
+ // (if there is one) to see if it's an AllocationMemento.
+ if (FLAG_track_allocation_sites && object->GetHeap()->InNewSpace(object)) {
+ Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag) +
+ object->Size();
+ Address top;
+ if (in_GC) {
+ top = object->GetHeap()->new_space()->FromSpacePageHigh();
+ } else {
+ top = object->GetHeap()->NewSpaceTop();
+ }
+ if ((ptr_end + AllocationMemento::kSize) <= top) {
+ // There is room in newspace for allocation info. Do we have some?
+ Map** possible_allocation_memento_map =
+ reinterpret_cast<Map**>(ptr_end);
+ if (*possible_allocation_memento_map ==
+ object->GetHeap()->allocation_memento_map()) {
+ AllocationMemento* memento = AllocationMemento::cast(
+ reinterpret_cast<Object*>(ptr_end + kHeapObjectTag));
+ return memento;
+ }
+ }
+ }
+ return NULL;
}
@@ -7305,57 +9229,64 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
}
-void StringHasher::AddSurrogatePair(uc32 c) {
- uint16_t lead = unibrow::Utf16::LeadSurrogate(c);
- AddCharacter(lead);
- uint16_t trail = unibrow::Utf16::TrailSurrogate(c);
- AddCharacter(trail);
-}
-
-
-void StringHasher::AddSurrogatePairNoIndex(uc32 c) {
- uint16_t lead = unibrow::Utf16::LeadSurrogate(c);
- AddCharacterNoIndex(lead);
- uint16_t trail = unibrow::Utf16::TrailSurrogate(c);
- AddCharacterNoIndex(trail);
-}
-
-
uint32_t StringHasher::GetHashField() {
if (length_ <= String::kMaxHashCalcLength) {
- if (is_array_index()) {
- return MakeArrayIndexHash(array_index(), length_);
+ if (is_array_index_) {
+ return MakeArrayIndexHash(array_index_, length_);
}
- return (GetHash() << String::kHashShift) | String::kIsNotArrayIndexMask;
+ return (GetHashCore(raw_running_hash_) << String::kHashShift) |
+ String::kIsNotArrayIndexMask;
} else {
return (length_ << String::kHashShift) | String::kIsNotArrayIndexMask;
}
}
-uint32_t String::ComputeHashField(unibrow::CharacterStream* buffer,
- int length,
- uint32_t seed) {
- StringHasher hasher(length, seed);
-
- // Very long strings have a trivial hash that doesn't inspect the
- // string contents.
- if (hasher.has_trivial_hash()) {
- return hasher.GetHashField();
- }
-
- // Do the iterative array index computation as long as there is a
- // chance this is an array index.
- while (buffer->has_more() && hasher.is_array_index()) {
- hasher.AddCharacter(buffer->GetNext());
- }
-
- // Process the remaining characters without updating the array
- // index.
- while (buffer->has_more()) {
- hasher.AddCharacterNoIndex(buffer->GetNext());
+uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars,
+ uint32_t seed,
+ int* utf16_length_out) {
+ int vector_length = chars.length();
+ // Handle some edge cases
+ if (vector_length <= 1) {
+ ASSERT(vector_length == 0 ||
+ static_cast<uint8_t>(chars.start()[0]) <=
+ unibrow::Utf8::kMaxOneByteChar);
+ *utf16_length_out = vector_length;
+ return HashSequentialString(chars.start(), vector_length, seed);
+ }
+ // Start with a fake length which won't affect computation.
+ // It will be updated later.
+ StringHasher hasher(String::kMaxArrayIndexSize, seed);
+ unsigned remaining = static_cast<unsigned>(vector_length);
+ const uint8_t* stream = reinterpret_cast<const uint8_t*>(chars.start());
+ int utf16_length = 0;
+ bool is_index = true;
+ ASSERT(hasher.is_array_index_);
+ while (remaining > 0) {
+ unsigned consumed = 0;
+ uint32_t c = unibrow::Utf8::ValueOf(stream, remaining, &consumed);
+ ASSERT(consumed > 0 && consumed <= remaining);
+ stream += consumed;
+ remaining -= consumed;
+ bool is_two_characters = c > unibrow::Utf16::kMaxNonSurrogateCharCode;
+ utf16_length += is_two_characters ? 2 : 1;
+ // No need to keep hashing. But we do need to calculate utf16_length.
+ if (utf16_length > String::kMaxHashCalcLength) continue;
+ if (is_two_characters) {
+ uint16_t c1 = unibrow::Utf16::LeadSurrogate(c);
+ uint16_t c2 = unibrow::Utf16::TrailSurrogate(c);
+ hasher.AddCharacter(c1);
+ hasher.AddCharacter(c2);
+ if (is_index) is_index = hasher.UpdateIndex(c1);
+ if (is_index) is_index = hasher.UpdateIndex(c2);
+ } else {
+ hasher.AddCharacter(c);
+ if (is_index) is_index = hasher.UpdateIndex(c);
+ }
}
-
+ *utf16_length_out = static_cast<int>(utf16_length);
+ // Must set length here so that hash computation is correct.
+ hasher.length_ = utf16_length;
return hasher.GetHashField();
}
@@ -7371,7 +9302,7 @@ MaybeObject* String::SubString(int start, int end, PretenureFlag pretenure) {
void String::PrintOn(FILE* file) {
int length = this->length();
for (int i = 0; i < length; i++) {
- fprintf(file, "%c", Get(i));
+ PrintF(file, "%c", Get(i));
}
}
@@ -7399,11 +9330,12 @@ static void TrimDescriptorArray(Heap* heap,
Map* map,
DescriptorArray* descriptors,
int number_of_own_descriptors) {
- int number_of_descriptors = descriptors->number_of_descriptors();
+ int number_of_descriptors = descriptors->number_of_descriptors_storage();
int to_trim = number_of_descriptors - number_of_own_descriptors;
- if (to_trim <= 0) return;
+ if (to_trim == 0) return;
- RightTrimFixedArray<FROM_GC>(heap, descriptors, to_trim);
+ RightTrimFixedArray<FROM_GC>(
+ heap, descriptors, to_trim * DescriptorArray::kDescriptorSize);
descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
if (descriptors->HasEnumCache()) TrimEnumCache(heap, map, descriptors);
@@ -7443,11 +9375,10 @@ void Map::ClearNonLiveTransitions(Heap* heap) {
if (ClearBackPointer(heap, target)) {
if (target->instance_descriptors() == descriptors) {
descriptors_owner_died = true;
- descriptors_owner_died = true;
}
} else {
if (i != transition_index) {
- String* key = t->GetKey(i);
+ Name* key = t->GetKey(i);
t->SetKey(transition_index, key);
Object** key_slot = t->GetKeySlot(transition_index);
collector->RecordSlot(key_slot, key_slot, key);
@@ -7458,18 +9389,10 @@ void Map::ClearNonLiveTransitions(Heap* heap) {
}
}
- if (t->HasElementsTransition() &&
- ClearBackPointer(heap, t->elements_transition())) {
- if (t->elements_transition()->instance_descriptors() == descriptors) {
- descriptors_owner_died = true;
- }
- t->ClearElementsTransition();
- } else {
- // If there are no transitions to be cleared, return.
- // TODO(verwaest) Should be an assert, otherwise back pointers are not
- // properly cleared.
- if (transition_index == t->number_of_transitions()) return;
- }
+ // If there are no transitions to be cleared, return.
+ // TODO(verwaest) Should be an assert, otherwise back pointers are not
+ // properly cleared.
+ if (transition_index == t->number_of_transitions()) return;
int number_of_own_descriptors = NumberOfOwnDescriptors();
@@ -7477,6 +9400,7 @@ void Map::ClearNonLiveTransitions(Heap* heap) {
if (number_of_own_descriptors > 0) {
TrimDescriptorArray(heap, this, descriptors, number_of_own_descriptors);
ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ set_owns_descriptors(true);
} else {
ASSERT(descriptors == GetHeap()->empty_descriptor_array());
}
@@ -7508,18 +9432,38 @@ int Map::Hash() {
}
+static bool CheckEquivalent(Map* first, Map* second) {
+ return
+ first->constructor() == second->constructor() &&
+ first->prototype() == second->prototype() &&
+ first->instance_type() == second->instance_type() &&
+ first->bit_field() == second->bit_field() &&
+ first->bit_field2() == second->bit_field2() &&
+ first->is_observed() == second->is_observed() &&
+ first->function_with_prototype() == second->function_with_prototype();
+}
+
+
+bool Map::EquivalentToForTransition(Map* other) {
+ return CheckEquivalent(this, other);
+}
+
+
bool Map::EquivalentToForNormalization(Map* other,
PropertyNormalizationMode mode) {
- return
- constructor() == other->constructor() &&
- prototype() == other->prototype() &&
- inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
- 0 :
- other->inobject_properties()) &&
- instance_type() == other->instance_type() &&
- bit_field() == other->bit_field() &&
- bit_field2() == other->bit_field2() &&
- function_with_prototype() == other->function_with_prototype();
+ int properties = mode == CLEAR_INOBJECT_PROPERTIES
+ ? 0 : other->inobject_properties();
+ return CheckEquivalent(this, other) && inobject_properties() == properties;
+}
+
+
+void ConstantPoolArray::ConstantPoolIterateBody(ObjectVisitor* v) {
+ int first_ptr_offset = OffsetOfElementAt(first_ptr_index());
+ int last_ptr_offset =
+ OffsetOfElementAt(first_ptr_index() + count_of_ptr_entries());
+ v->VisitPointers(
+ HeapObject::RawField(this, first_ptr_offset),
+ HeapObject::RawField(this, last_ptr_offset));
}
@@ -7533,33 +9477,59 @@ void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
void JSFunction::MarkForLazyRecompilation() {
- ASSERT(is_compiled() && !IsOptimized());
+ ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
+ ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() ||
code()->optimizable());
- Builtins* builtins = GetIsolate()->builtins();
- ReplaceCode(builtins->builtin(Builtins::kLazyRecompile));
+ ASSERT(!shared()->is_generator());
+ set_code_no_write_barrier(
+ GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile));
+ // No write barrier required, since the builtin is part of the root set.
}
-void JSFunction::MarkForParallelRecompilation() {
- ASSERT(is_compiled() && !IsOptimized());
+
+void JSFunction::MarkForConcurrentRecompilation() {
+ ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
+ ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- Builtins* builtins = GetIsolate()->builtins();
- ReplaceCode(builtins->builtin(Builtins::kParallelRecompile));
+ ASSERT(!shared()->is_generator());
+ ASSERT(FLAG_concurrent_recompilation);
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Marking ");
+ PrintName();
+ PrintF(" for concurrent recompilation.\n");
+ }
+ set_code_no_write_barrier(
+ GetIsolate()->builtins()->builtin(Builtins::kConcurrentRecompile));
+ // No write barrier required, since the builtin is part of the root set.
+}
- // Unlike MarkForLazyRecompilation, after queuing a function for
- // recompilation on the compiler thread, we actually tail-call into
- // the full code. We reset the profiler ticks here so that the
- // function doesn't bother the runtime profiler too much.
- shared()->code()->set_profiler_ticks(0);
+
+void JSFunction::MarkInRecompileQueue() {
+ // We can only arrive here via the concurrent-recompilation builtin. If
+ // break points were set, the code would point to the lazy-compile builtin.
+ ASSERT(!GetIsolate()->DebuggerHasBreakPoints());
+ ASSERT(IsMarkedForConcurrentRecompilation() && !IsOptimized());
+ ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
+ ASSERT(FLAG_concurrent_recompilation);
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Queueing ");
+ PrintName();
+ PrintF(" for concurrent recompilation.\n");
+ }
+ set_code_no_write_barrier(
+ GetIsolate()->builtins()->builtin(Builtins::kInRecompileQueue));
+ // No write barrier required, since the builtin is part of the root set.
}
+
static bool CompileLazyHelper(CompilationInfo* info,
ClearExceptionFlag flag) {
// Compile the source information to a code object.
ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
ASSERT(!info->isolate()->has_pending_exception());
bool result = Compiler::CompileLazy(info);
- ASSERT(result != Isolate::Current()->has_pending_exception());
+ ASSERT(result != info->isolate()->has_pending_exception());
if (!result && flag == CLEAR_EXCEPTION) {
info->isolate()->clear_pending_exception();
}
@@ -7575,43 +9545,54 @@ bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared,
}
-void SharedFunctionInfo::ClearOptimizedCodeMap() {
- set_optimized_code_map(Smi::FromInt(0));
-}
-
-
void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
Handle<Code> code,
Handle<FixedArray> literals) {
+ CALL_HEAP_FUNCTION_VOID(
+ shared->GetIsolate(),
+ shared->AddToOptimizedCodeMap(*native_context, *code, *literals));
+}
+
+
+MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context,
+ Code* code,
+ FixedArray* literals) {
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(native_context->IsNativeContext());
STATIC_ASSERT(kEntryLength == 3);
- Object* value = shared->optimized_code_map();
- Handle<FixedArray> new_code_map;
+ Heap* heap = GetHeap();
+ FixedArray* new_code_map;
+ Object* value = optimized_code_map();
if (value->IsSmi()) {
// No optimized code map.
ASSERT_EQ(0, Smi::cast(value)->value());
// Crate 3 entries per context {context, code, literals}.
- new_code_map = FACTORY->NewFixedArray(kEntryLength);
- new_code_map->set(0, *native_context);
- new_code_map->set(1, *code);
- new_code_map->set(2, *literals);
+ MaybeObject* maybe = heap->AllocateFixedArray(kInitialLength);
+ if (!maybe->To(&new_code_map)) return maybe;
+ new_code_map->set(kEntriesStart + 0, native_context);
+ new_code_map->set(kEntriesStart + 1, code);
+ new_code_map->set(kEntriesStart + 2, literals);
} else {
// Copy old map and append one new entry.
- Handle<FixedArray> old_code_map(FixedArray::cast(value));
- ASSERT_EQ(-1, shared->SearchOptimizedCodeMap(*native_context));
+ FixedArray* old_code_map = FixedArray::cast(value);
+ ASSERT_EQ(-1, SearchOptimizedCodeMap(native_context));
int old_length = old_code_map->length();
int new_length = old_length + kEntryLength;
- new_code_map = FACTORY->NewFixedArray(new_length);
- old_code_map->CopyTo(0, *new_code_map, 0, old_length);
- new_code_map->set(old_length, *native_context);
- new_code_map->set(old_length + 1, *code);
- new_code_map->set(old_length + 2, *literals);
+ MaybeObject* maybe = old_code_map->CopySize(new_length);
+ if (!maybe->To(&new_code_map)) return maybe;
+ new_code_map->set(old_length + 0, native_context);
+ new_code_map->set(old_length + 1, code);
+ new_code_map->set(old_length + 2, literals);
+ // Zap the old map for the sake of the heap verifier.
+ if (Heap::ShouldZapGarbage()) {
+ Object** data = old_code_map->data_start();
+ MemsetPointer(data, heap->the_hole_value(), old_length);
+ }
}
#ifdef DEBUG
- for (int i = 0; i < new_code_map->length(); i += kEntryLength) {
+ for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
ASSERT(new_code_map->get(i)->IsNativeContext());
ASSERT(new_code_map->get(i + 1)->IsCode());
ASSERT(Code::cast(new_code_map->get(i + 1))->kind() ==
@@ -7619,14 +9600,14 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
ASSERT(new_code_map->get(i + 2)->IsFixedArray());
}
#endif
- shared->set_optimized_code_map(*new_code_map);
+ set_optimized_code_map(new_code_map);
+ return new_code_map;
}
void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function,
int index) {
- ASSERT(index > 0);
- ASSERT(optimized_code_map()->IsFixedArray());
+ ASSERT(index > kEntriesStart);
FixedArray* code_map = FixedArray::cast(optimized_code_map());
if (!bound()) {
FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1));
@@ -7640,12 +9621,73 @@ void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function,
}
+void SharedFunctionInfo::ClearOptimizedCodeMap() {
+ FixedArray* code_map = FixedArray::cast(optimized_code_map());
+
+ // If the next map link slot is already used then the function was
+ // enqueued with code flushing and we remove it now.
+ if (!code_map->get(kNextMapIndex)->IsUndefined()) {
+ CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
+ flusher->EvictOptimizedCodeMap(this);
+ }
+
+ ASSERT(code_map->get(kNextMapIndex)->IsUndefined());
+ set_optimized_code_map(Smi::FromInt(0));
+}
+
+
+void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
+ const char* reason) {
+ if (optimized_code_map()->IsSmi()) return;
+
+ int i;
+ bool removed_entry = false;
+ FixedArray* code_map = FixedArray::cast(optimized_code_map());
+ for (i = kEntriesStart; i < code_map->length(); i += kEntryLength) {
+ ASSERT(code_map->get(i)->IsNativeContext());
+ if (Code::cast(code_map->get(i + 1)) == optimized_code) {
+ if (FLAG_trace_opt) {
+ PrintF("[evicting entry from optimizing code map (%s) for ", reason);
+ ShortPrint();
+ PrintF("]\n");
+ }
+ removed_entry = true;
+ break;
+ }
+ }
+ while (i < (code_map->length() - kEntryLength)) {
+ code_map->set(i, code_map->get(i + kEntryLength));
+ code_map->set(i + 1, code_map->get(i + 1 + kEntryLength));
+ code_map->set(i + 2, code_map->get(i + 2 + kEntryLength));
+ i += kEntryLength;
+ }
+ if (removed_entry) {
+ // Always trim even when array is cleared because of heap verifier.
+ RightTrimFixedArray<FROM_MUTATOR>(GetHeap(), code_map, kEntryLength);
+ if (code_map->length() == kEntriesStart) {
+ ClearOptimizedCodeMap();
+ }
+ }
+}
+
+
+void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
+ FixedArray* code_map = FixedArray::cast(optimized_code_map());
+ ASSERT(shrink_by % kEntryLength == 0);
+ ASSERT(shrink_by <= code_map->length() - kEntriesStart);
+ // Always trim even when array is cleared because of heap verifier.
+ RightTrimFixedArray<FROM_GC>(GetHeap(), code_map, shrink_by);
+ if (code_map->length() == kEntriesStart) {
+ ClearOptimizedCodeMap();
+ }
+}
+
+
bool JSFunction::CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag) {
bool result = true;
if (function->shared()->is_compiled()) {
function->ReplaceCode(function->shared()->code());
- function->shared()->set_code_age(0);
} else {
ASSERT(function->shared()->allows_lazy_compilation());
CompilationInfoWithZone info(function);
@@ -7656,11 +9698,25 @@ bool JSFunction::CompileLazy(Handle<JSFunction> function,
}
+Handle<Code> JSFunction::CompileOsr(Handle<JSFunction> function,
+ BailoutId osr_ast_id,
+ ClearExceptionFlag flag) {
+ CompilationInfoWithZone info(function);
+ info.SetOptimizing(osr_ast_id);
+ if (CompileLazyHelper(&info, flag)) {
+ // TODO(titzer): don't install the OSR code.
+ // ASSERT(function->code() != *info.code());
+ return info.code();
+ } else {
+ return Handle<Code>::null();
+ }
+}
+
+
bool JSFunction::CompileOptimized(Handle<JSFunction> function,
- BailoutId osr_ast_id,
ClearExceptionFlag flag) {
CompilationInfoWithZone info(function);
- info.SetOptimizing(osr_ast_id);
+ info.SetOptimizing(BailoutId::None());
return CompileLazyHelper(&info, flag);
}
@@ -7685,27 +9741,24 @@ bool JSFunction::IsInlineable() {
}
-MaybeObject* JSObject::OptimizeAsPrototype() {
- if (IsGlobalObject()) return this;
+void JSObject::OptimizeAsPrototype(Handle<JSObject> object) {
+ if (object->IsGlobalObject()) return;
// Make sure prototypes are fast objects and their maps have the bit set
// so they remain fast.
- if (!HasFastProperties()) {
- MaybeObject* new_proto = TransformToFastProperties(0);
- if (new_proto->IsFailure()) return new_proto;
- ASSERT(new_proto == this);
+ if (!object->HasFastProperties()) {
+ TransformToFastProperties(object, 0);
}
- return this;
}
-MUST_USE_RESULT static MaybeObject* CacheInitialJSArrayMaps(
+static MUST_USE_RESULT MaybeObject* CacheInitialJSArrayMaps(
Context* native_context, Map* initial_map) {
// Replace all of the cached initial array maps in the native context with
// the appropriate transitioned elements kind maps.
Heap* heap = native_context->GetHeap();
MaybeObject* maybe_maps =
- heap->AllocateFixedArrayWithHoles(kElementsKindCount);
+ heap->AllocateFixedArrayWithHoles(kElementsKindCount, TENURED);
FixedArray* maps;
if (!maybe_maps->To(&maps)) return maybe_maps;
@@ -7717,9 +9770,14 @@ MUST_USE_RESULT static MaybeObject* CacheInitialJSArrayMaps(
i < kFastElementsKindCount; ++i) {
Map* new_map;
ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
- MaybeObject* maybe_new_map =
- current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ if (current_map->HasElementsTransition()) {
+ new_map = current_map->elements_transition_map();
+ ASSERT(new_map->elements_kind() == next_kind);
+ } else {
+ MaybeObject* maybe_new_map =
+ current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ }
maps->set(next_kind, new_map);
current_map = new_map;
}
@@ -7728,57 +9786,61 @@ MUST_USE_RESULT static MaybeObject* CacheInitialJSArrayMaps(
}
-MaybeObject* JSFunction::SetInstancePrototype(Object* value) {
+Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
+ Handle<Map> initial_map) {
+ CALL_HEAP_FUNCTION(native_context->GetIsolate(),
+ CacheInitialJSArrayMaps(*native_context, *initial_map),
+ Object);
+}
+
+
+void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
+ Handle<Object> value) {
ASSERT(value->IsJSReceiver());
- Heap* heap = GetHeap();
// First some logic for the map of the prototype to make sure it is in fast
// mode.
if (value->IsJSObject()) {
- MaybeObject* ok = JSObject::cast(value)->OptimizeAsPrototype();
- if (ok->IsFailure()) return ok;
+ JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value));
}
// Now some logic for the maps of the objects that are created by using this
// function as a constructor.
- if (has_initial_map()) {
+ if (function->has_initial_map()) {
// If the function has allocated the initial map replace it with a
// copy containing the new prototype. Also complete any in-object
// slack tracking that is in progress at this point because it is
// still tracking the old copy.
- if (shared()->IsInobjectSlackTrackingInProgress()) {
- shared()->CompleteInobjectSlackTracking();
+ if (function->shared()->IsInobjectSlackTrackingInProgress()) {
+ function->shared()->CompleteInobjectSlackTracking();
}
- Map* new_map;
- MaybeObject* maybe_object = initial_map()->Copy();
- if (!maybe_object->To(&new_map)) return maybe_object;
- new_map->set_prototype(value);
+ Handle<Map> new_map = Map::Copy(handle(function->initial_map()));
+ new_map->set_prototype(*value);
// If the function is used as the global Array function, cache the
// initial map (and transitioned versions) in the native context.
- Context* native_context = context()->native_context();
+ Context* native_context = function->context()->native_context();
Object* array_function = native_context->get(Context::ARRAY_FUNCTION_INDEX);
if (array_function->IsJSFunction() &&
- this == JSFunction::cast(array_function)) {
- MaybeObject* ok = CacheInitialJSArrayMaps(native_context, new_map);
- if (ok->IsFailure()) return ok;
+ *function == JSFunction::cast(array_function)) {
+ CacheInitialJSArrayMaps(handle(native_context), new_map);
}
- set_initial_map(new_map);
+ function->set_initial_map(*new_map);
} else {
// Put the value in the initial map field until an initial map is
// needed. At that point, a new initial map is created and the
// prototype is put into the initial map where it belongs.
- set_prototype_or_initial_map(value);
+ function->set_prototype_or_initial_map(*value);
}
- heap->ClearInstanceofCache();
- return value;
+ function->GetHeap()->ClearInstanceofCache();
}
-MaybeObject* JSFunction::SetPrototype(Object* value) {
- ASSERT(should_have_prototype());
- Object* construct_prototype = value;
+void JSFunction::SetPrototype(Handle<JSFunction> function,
+ Handle<Object> value) {
+ ASSERT(function->should_have_prototype());
+ Handle<Object> construct_prototype = value;
// If the value is not a JSReceiver, store the value in the map's
// constructor field so it can be accessed. Also, set the prototype
@@ -7788,22 +9850,20 @@ MaybeObject* JSFunction::SetPrototype(Object* value) {
// Copy the map so this does not affect unrelated functions.
// Remove map transitions because they point to maps with a
// different prototype.
- Map* new_map;
- MaybeObject* maybe_new_map = map()->Copy();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ Handle<Map> new_map = Map::Copy(handle(function->map()));
- Heap* heap = new_map->GetHeap();
- set_map(new_map);
- new_map->set_constructor(value);
+ function->set_map(*new_map);
+ new_map->set_constructor(*value);
new_map->set_non_instance_prototype(true);
- construct_prototype =
- heap->isolate()->context()->native_context()->
- initial_object_prototype();
+ Isolate* isolate = new_map->GetIsolate();
+ construct_prototype = handle(
+ isolate->context()->native_context()->initial_object_prototype(),
+ isolate);
} else {
- map()->set_non_instance_prototype(false);
+ function->map()->set_non_instance_prototype(false);
}
- return SetInstancePrototype(construct_prototype);
+ return SetInstancePrototype(function, construct_prototype);
}
@@ -7840,15 +9900,46 @@ Context* JSFunction::NativeContextFromLiterals(FixedArray* literals) {
}
-MaybeObject* Oddball::Initialize(const char* to_string,
+// The filter is a pattern that matches function names in this way:
+// "*" all; the default
+// "-" all but the top-level function
+// "-name" all but the function "name"
+// "" only the top-level function
+// "name" only the function "name"
+// "name*" only functions starting with "name"
+bool JSFunction::PassesFilter(const char* raw_filter) {
+ if (*raw_filter == '*') return true;
+ String* name = shared()->DebugName();
+ Vector<const char> filter = CStrVector(raw_filter);
+ if (filter.length() == 0) return name->length() == 0;
+ if (filter[0] == '-') {
+ if (filter.length() == 1) {
+ return (name->length() != 0);
+ } else if (!name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
+ return true;
+ }
+ } else if (name->IsUtf8EqualTo(filter)) {
+ return true;
+ }
+ if (filter[filter.length() - 1] == '*' &&
+ name->IsUtf8EqualTo(filter.SubVector(0, filter.length() - 1), true)) {
+ return true;
+ }
+ return false;
+}
+
+
+MaybeObject* Oddball::Initialize(Heap* heap,
+ const char* to_string,
Object* to_number,
byte kind) {
- String* symbol;
- { MaybeObject* maybe_symbol =
- Isolate::Current()->heap()->LookupAsciiSymbol(to_string);
- if (!maybe_symbol->To(&symbol)) return maybe_symbol;
+ String* internalized_to_string;
+ { MaybeObject* maybe_string =
+ heap->InternalizeUtf8String(
+ CStrVector(to_string));
+ if (!maybe_string->To(&internalized_to_string)) return maybe_string;
}
- set_to_string(symbol);
+ set_to_string(internalized_to_string);
set_to_number(to_number);
set_kind(kind);
return this;
@@ -7871,7 +9962,8 @@ bool SharedFunctionInfo::HasSourceCode() {
Handle<Object> SharedFunctionInfo::GetSourceCode() {
if (!HasSourceCode()) return GetIsolate()->factory()->undefined_value();
Handle<String> source(String::cast(Script::cast(script())->source()));
- return SubString(source, start_position(), end_position());
+ return GetIsolate()->factory()->NewSubString(
+ source, start_position(), end_position());
}
@@ -7896,112 +9988,6 @@ int SharedFunctionInfo::CalculateInObjectProperties() {
}
-bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
- // Check the basic conditions for generating inline constructor code.
- if (!FLAG_inline_new
- || !has_only_simple_this_property_assignments()
- || this_property_assignments_count() == 0) {
- return false;
- }
-
- Heap* heap = GetHeap();
-
- // Traverse the proposed prototype chain looking for properties of the
- // same names as are set by the inline constructor.
- for (Object* obj = prototype;
- obj != heap->null_value();
- obj = obj->GetPrototype()) {
- JSReceiver* receiver = JSReceiver::cast(obj);
- for (int i = 0; i < this_property_assignments_count(); i++) {
- LookupResult result(heap->isolate());
- String* name = GetThisPropertyAssignmentName(i);
- receiver->LocalLookup(name, &result);
- if (result.IsFound()) {
- switch (result.type()) {
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- break;
- case INTERCEPTOR:
- case CALLBACKS:
- case HANDLER:
- return false;
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- }
- }
- }
-
- return true;
-}
-
-
-void SharedFunctionInfo::ForbidInlineConstructor() {
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlySimpleThisPropertyAssignments,
- false));
-}
-
-
-void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
- bool only_simple_this_property_assignments,
- FixedArray* assignments) {
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlySimpleThisPropertyAssignments,
- only_simple_this_property_assignments));
- set_this_property_assignments(assignments);
- set_this_property_assignments_count(assignments->length() / 3);
-}
-
-
-void SharedFunctionInfo::ClearThisPropertyAssignmentsInfo() {
- Heap* heap = GetHeap();
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlySimpleThisPropertyAssignments,
- false));
- set_this_property_assignments(heap->undefined_value());
- set_this_property_assignments_count(0);
-}
-
-
-String* SharedFunctionInfo::GetThisPropertyAssignmentName(int index) {
- Object* obj = this_property_assignments();
- ASSERT(obj->IsFixedArray());
- ASSERT(index < this_property_assignments_count());
- obj = FixedArray::cast(obj)->get(index * 3);
- ASSERT(obj->IsString());
- return String::cast(obj);
-}
-
-
-bool SharedFunctionInfo::IsThisPropertyAssignmentArgument(int index) {
- Object* obj = this_property_assignments();
- ASSERT(obj->IsFixedArray());
- ASSERT(index < this_property_assignments_count());
- obj = FixedArray::cast(obj)->get(index * 3 + 1);
- return Smi::cast(obj)->value() != -1;
-}
-
-
-int SharedFunctionInfo::GetThisPropertyAssignmentArgument(int index) {
- ASSERT(IsThisPropertyAssignmentArgument(index));
- Object* obj =
- FixedArray::cast(this_property_assignments())->get(index * 3 + 1);
- return Smi::cast(obj)->value();
-}
-
-
-Object* SharedFunctionInfo::GetThisPropertyAssignmentConstant(int index) {
- ASSERT(!IsThisPropertyAssignmentArgument(index));
- Object* obj =
- FixedArray::cast(this_property_assignments())->get(index * 3 + 2);
- return obj;
-}
-
-
// Support function for printing the source code to a StringStream
// without any allocation in the heap.
void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
@@ -8058,7 +10044,7 @@ static bool IsCodeEquivalent(Code* code, Code* recompiled) {
void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
ASSERT(!has_deoptimization_support());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Code* code = this->code();
if (IsCodeEquivalent(code, recompiled)) {
// Copy the deoptimization data from the recompiled code.
@@ -8069,13 +10055,13 @@ void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
// old code, we have to replace it. We should try to avoid this
// altogether because it flushes valuable type feedback by
// effectively resetting all IC state.
- set_code(recompiled);
+ ReplaceCode(recompiled);
}
ASSERT(has_deoptimization_support());
}
-void SharedFunctionInfo::DisableOptimization(const char* reason) {
+void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
// Disable optimization for the shared function info and mark the
// code as non-optimizable. The marker on the shared function info
// is there because we flush non-optimized code thereby loosing the
@@ -8084,15 +10070,20 @@ void SharedFunctionInfo::DisableOptimization(const char* reason) {
// non-optimizable if optimization is disabled for the shared
// function info.
set_optimization_disabled(true);
+ set_bailout_reason(reason);
// Code should be the lazy compilation stub or else unoptimized. If the
// latter, disable optimization for the code too.
ASSERT(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
if (code()->kind() == Code::FUNCTION) {
code()->set_optimizable(false);
}
+ PROFILE(GetIsolate(),
+ LogExistingFunction(Handle<SharedFunctionInfo>(this),
+ Handle<Code>(code())));
if (FLAG_trace_opt) {
- PrintF("[disabled optimization for %s, reason: %s]\n",
- *DebugName()->ToCString(), reason);
+ PrintF("[disabled optimization for ");
+ ShortPrint();
+ PrintF(", reason: %s]\n", GetBailoutReason(reason));
}
}
@@ -8149,7 +10140,7 @@ void SharedFunctionInfo::DetachInitialMap() {
// constructor is called. The countdown will continue and (possibly after
// several more GCs) CompleteInobjectSlackTracking will eventually be called.
Heap* heap = map->GetHeap();
- set_initial_map(heap->raw_unchecked_undefined_value());
+ set_initial_map(heap->undefined_value());
Builtins* builtins = heap->isolate()->builtins();
ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
*RawField(this, kConstructStubOffset));
@@ -8242,11 +10233,16 @@ int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context) {
if (!value->IsSmi()) {
FixedArray* optimized_code_map = FixedArray::cast(value);
int length = optimized_code_map->length();
- for (int i = 0; i < length; i += 3) {
+ for (int i = kEntriesStart; i < length; i += kEntryLength) {
if (optimized_code_map->get(i) == native_context) {
return i + 1;
}
}
+ if (FLAG_trace_opt) {
+ PrintF("[didn't find optimized code in optimized code map for ");
+ ShortPrint();
+ PrintF("]\n");
+ }
}
return -1;
}
@@ -8277,6 +10273,15 @@ void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
}
+void ObjectVisitor::VisitCodeAgeSequence(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+ Object* stub = rinfo->code_age_stub();
+ if (stub) {
+ VisitPointer(&stub);
+ }
+}
+
+
void ObjectVisitor::VisitCodeEntry(Address entry_address) {
Object* code = Code::GetObjectFromEntryAddress(entry_address);
Object* old_code = code;
@@ -8287,13 +10292,13 @@ void ObjectVisitor::VisitCodeEntry(Address entry_address) {
}
-void ObjectVisitor::VisitGlobalPropertyCell(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
+void ObjectVisitor::VisitCell(RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::CELL);
Object* cell = rinfo->target_cell();
Object* old_cell = cell;
VisitPointer(&cell);
if (cell != old_cell) {
- rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
+ rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
}
}
@@ -8309,16 +10314,19 @@ void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
}
+
void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
VisitPointer(rinfo->target_object_address());
}
+
void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
Address* p = rinfo->target_reference_address();
- VisitExternalReferences(p, p + 1);
+ VisitExternalReference(p);
}
+
void Code::InvalidateRelocation() {
set_relocation_info(GetHeap()->empty_byte_array());
}
@@ -8336,27 +10344,31 @@ void Code::CopyFrom(const CodeDesc& desc) {
ASSERT(Marking::Color(this) == Marking::WHITE_OBJECT);
// copy code
- memmove(instruction_start(), desc.buffer, desc.instr_size);
+ CopyBytes(instruction_start(), desc.buffer,
+ static_cast<size_t>(desc.instr_size));
// copy reloc info
- memmove(relocation_start(),
- desc.buffer + desc.buffer_size - desc.reloc_size,
- desc.reloc_size);
+ CopyBytes(relocation_start(),
+ desc.buffer + desc.buffer_size - desc.reloc_size,
+ static_cast<size_t>(desc.reloc_size));
// unbox handles and relocate
intptr_t delta = instruction_start() - desc.buffer;
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+ RelocInfo::ModeMask(RelocInfo::CELL) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
RelocInfo::kApplyMask;
- Assembler* origin = desc.origin; // Needed to find target_object on X64.
+ // Needed to find target_object and runtime_entry on X64
+ Assembler* origin = desc.origin;
+ AllowDeferredHandleDereference embedding_raw_address;
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Handle<Object> p = it.rinfo()->target_object_handle(origin);
it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
+ } else if (mode == RelocInfo::CELL) {
+ Handle<Cell> cell = it.rinfo()->target_cell_handle();
it.rinfo()->set_target_cell(*cell, SKIP_WRITE_BARRIER);
} else if (RelocInfo::IsCodeTarget(mode)) {
// rewrite code handles in inline cache targets to direct
@@ -8365,6 +10377,13 @@ void Code::CopyFrom(const CodeDesc& desc) {
Code* code = Code::cast(*p);
it.rinfo()->set_target_address(code->instruction_start(),
SKIP_WRITE_BARRIER);
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ Address p = it.rinfo()->target_runtime_entry(origin);
+ it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER);
+ } else if (mode == RelocInfo::CODE_AGE_SEQUENCE) {
+ Handle<Object> p = it.rinfo()->code_age_stub_handle(origin);
+ Code* code = Code::cast(*p);
+ it.rinfo()->set_code_age_stub(code);
} else {
it.rinfo()->apply(delta);
}
@@ -8435,26 +10454,128 @@ SafepointEntry Code::GetSafepointEntry(Address pc) {
}
-void Code::SetNoStackCheckTable() {
- // Indicate the absence of a stack-check table by a table start after the
- // end of the instructions. Table start must be aligned, so round up.
- set_stack_check_table_offset(RoundUp(instruction_size(), kIntSize));
+Object* Code::FindNthObject(int n, Map* match_map) {
+ ASSERT(is_inline_cache_stub());
+ DisallowHeapAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Object* object = info->target_object();
+ if (object->IsHeapObject()) {
+ if (HeapObject::cast(object)->map() == match_map) {
+ if (--n == 0) return object;
+ }
+ }
+ }
+ return NULL;
}
Map* Code::FindFirstMap() {
+ Object* result = FindNthObject(1, GetHeap()->meta_map());
+ return (result != NULL) ? Map::cast(result) : NULL;
+}
+
+
+void Code::ReplaceNthObject(int n,
+ Map* match_map,
+ Object* replace_with) {
+ ASSERT(is_inline_cache_stub());
+ DisallowHeapAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Object* object = info->target_object();
+ if (object->IsHeapObject()) {
+ if (HeapObject::cast(object)->map() == match_map) {
+ if (--n == 0) {
+ info->set_target_object(replace_with);
+ return;
+ }
+ }
+ }
+ }
+ UNREACHABLE();
+}
+
+
+void Code::FindAllMaps(MapHandleList* maps) {
ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Object* object = info->target_object();
- if (object->IsMap()) return Map::cast(object);
+ if (object->IsMap()) maps->Add(Handle<Map>(Map::cast(object)));
+ }
+}
+
+
+void Code::ReplaceFirstMap(Map* replace_with) {
+ ReplaceNthObject(1, GetHeap()->meta_map(), replace_with);
+}
+
+
+Code* Code::FindFirstHandler() {
+ ASSERT(is_inline_cache_stub());
+ DisallowHeapAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Code* code = Code::GetCodeFromTargetAddress(info->target_address());
+ if (code->kind() == Code::HANDLER) return code;
}
return NULL;
}
+bool Code::FindHandlers(CodeHandleList* code_list, int length) {
+ ASSERT(is_inline_cache_stub());
+ DisallowHeapAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+ int i = 0;
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ if (i == length) return true;
+ RelocInfo* info = it.rinfo();
+ Code* code = Code::GetCodeFromTargetAddress(info->target_address());
+ // IC stubs with handlers never contain non-handler code objects before
+ // handler targets.
+ if (code->kind() != Code::HANDLER) break;
+ code_list->Add(Handle<Code>(code));
+ i++;
+ }
+ return i == length;
+}
+
+
+Name* Code::FindFirstName() {
+ ASSERT(is_inline_cache_stub());
+ DisallowHeapAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Object* object = info->target_object();
+ if (object->IsName()) return Name::cast(object);
+ }
+ return NULL;
+}
+
+
+void Code::ReplaceNthCell(int n, Cell* replace_with) {
+ ASSERT(is_inline_cache_stub());
+ DisallowHeapAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::CELL);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (--n == 0) {
+ info->set_target_cell(replace_with);
+ return;
+ }
+ }
+ UNREACHABLE();
+}
+
+
void Code::ClearInlineCaches() {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
@@ -8464,28 +10585,207 @@ void Code::ClearInlineCaches() {
RelocInfo* info = it.rinfo();
Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
if (target->is_inline_cache_stub()) {
- IC::Clear(info->pc());
+ IC::Clear(this->GetIsolate(), info->pc());
}
}
}
void Code::ClearTypeFeedbackCells(Heap* heap) {
+ if (kind() != FUNCTION) return;
Object* raw_info = type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackCells* type_feedback_cells =
TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
- JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
- cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
+ Cell* cell = type_feedback_cells->GetCell(i);
+ // Don't clear AllocationSites
+ Object* value = cell->value();
+ if (value == NULL || !value->IsAllocationSite()) {
+ cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
+ }
+ }
+ }
+}
+
+
+BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
+ DisallowHeapAllocation no_gc;
+ ASSERT(kind() == FUNCTION);
+ BackEdgeTable back_edges(this, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ if (back_edges.pc_offset(i) == pc_offset) return back_edges.ast_id(i);
+ }
+ return BailoutId::None();
+}
+
+
+void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) {
+ PatchPlatformCodeAge(isolate, sequence, kNoAgeCodeAge, NO_MARKING_PARITY);
+}
+
+
+void Code::MarkCodeAsExecuted(byte* sequence, Isolate* isolate) {
+ PatchPlatformCodeAge(isolate, sequence, kExecutedOnceCodeAge,
+ NO_MARKING_PARITY);
+}
+
+
+void Code::MakeOlder(MarkingParity current_parity) {
+ byte* sequence = FindCodeAgeSequence();
+ if (sequence != NULL) {
+ Age age;
+ MarkingParity code_parity;
+ GetCodeAgeAndParity(sequence, &age, &code_parity);
+ if (age != kLastCodeAge && code_parity != current_parity) {
+ PatchPlatformCodeAge(GetIsolate(),
+ sequence,
+ static_cast<Age>(age + 1),
+ current_parity);
}
}
}
-bool Code::allowed_in_shared_map_code_cache() {
- return is_keyed_load_stub() || is_keyed_store_stub() ||
- (is_compare_ic_stub() && compare_state() == CompareIC::KNOWN_OBJECTS);
+bool Code::IsOld() {
+ Age age = GetAge();
+ return age >= kIsOldCodeAge;
+}
+
+
+byte* Code::FindCodeAgeSequence() {
+ return FLAG_age_code &&
+ prologue_offset() != Code::kPrologueOffsetNotSet &&
+ (kind() == OPTIMIZED_FUNCTION ||
+ (kind() == FUNCTION && !has_debug_break_slots()))
+ ? instruction_start() + prologue_offset()
+ : NULL;
+}
+
+
+Code::Age Code::GetAge() {
+ byte* sequence = FindCodeAgeSequence();
+ if (sequence == NULL) {
+ return Code::kNoAgeCodeAge;
+ }
+ Age age;
+ MarkingParity parity;
+ GetCodeAgeAndParity(sequence, &age, &parity);
+ return age;
+}
+
+
+void Code::GetCodeAgeAndParity(Code* code, Age* age,
+ MarkingParity* parity) {
+ Isolate* isolate = code->GetIsolate();
+ Builtins* builtins = isolate->builtins();
+ Code* stub = NULL;
+#define HANDLE_CODE_AGE(AGE) \
+ stub = *builtins->Make##AGE##CodeYoungAgainEvenMarking(); \
+ if (code == stub) { \
+ *age = k##AGE##CodeAge; \
+ *parity = EVEN_MARKING_PARITY; \
+ return; \
+ } \
+ stub = *builtins->Make##AGE##CodeYoungAgainOddMarking(); \
+ if (code == stub) { \
+ *age = k##AGE##CodeAge; \
+ *parity = ODD_MARKING_PARITY; \
+ return; \
+ }
+ CODE_AGE_LIST(HANDLE_CODE_AGE)
+#undef HANDLE_CODE_AGE
+ stub = *builtins->MarkCodeAsExecutedOnce();
+ if (code == stub) {
+ // Treat that's never been executed as old immediatly.
+ *age = kIsOldCodeAge;
+ *parity = NO_MARKING_PARITY;
+ return;
+ }
+ stub = *builtins->MarkCodeAsExecutedTwice();
+ if (code == stub) {
+ // Pre-age code that has only been executed once.
+ *age = kPreAgedCodeAge;
+ *parity = NO_MARKING_PARITY;
+ return;
+ }
+ UNREACHABLE();
+}
+
+
+Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) {
+ Builtins* builtins = isolate->builtins();
+ switch (age) {
+#define HANDLE_CODE_AGE(AGE) \
+ case k##AGE##CodeAge: { \
+ Code* stub = parity == EVEN_MARKING_PARITY \
+ ? *builtins->Make##AGE##CodeYoungAgainEvenMarking() \
+ : *builtins->Make##AGE##CodeYoungAgainOddMarking(); \
+ return stub; \
+ }
+ CODE_AGE_LIST(HANDLE_CODE_AGE)
+#undef HANDLE_CODE_AGE
+ case kNotExecutedCodeAge: {
+ ASSERT(parity == NO_MARKING_PARITY);
+ return *builtins->MarkCodeAsExecutedOnce();
+ }
+ case kExecutedOnceCodeAge: {
+ ASSERT(parity == NO_MARKING_PARITY);
+ return *builtins->MarkCodeAsExecutedTwice();
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return NULL;
+}
+
+
+void Code::PrintDeoptLocation(int bailout_id) {
+ const char* last_comment = NULL;
+ int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
+ | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (info->rmode() == RelocInfo::COMMENT) {
+ last_comment = reinterpret_cast<const char*>(info->data());
+ } else if (last_comment != NULL) {
+ if ((bailout_id == Deoptimizer::GetDeoptimizationId(
+ GetIsolate(), info->target_address(), Deoptimizer::EAGER)) ||
+ (bailout_id == Deoptimizer::GetDeoptimizationId(
+ GetIsolate(), info->target_address(), Deoptimizer::SOFT))) {
+ CHECK(RelocInfo::IsRuntimeEntry(info->rmode()));
+ PrintF(" %s\n", last_comment);
+ return;
+ }
+ }
+ }
+}
+
+
+bool Code::CanDeoptAt(Address pc) {
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(deoptimization_data());
+ Address code_start_address = instruction_start();
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+ Address address = code_start_address + deopt_data->Pc(i)->value();
+ if (address == pc) return true;
+ }
+ return false;
+}
+
+
+// Identify kind of code.
+const char* Code::Kind2String(Kind kind) {
+ switch (kind) {
+#define CASE(name) case name: return #name;
+ CODE_KIND_LIST(CASE)
+#undef CASE
+ case NUMBER_OF_KINDS: break;
+ }
+ UNREACHABLE();
+ return NULL;
}
@@ -8548,6 +10848,12 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
break;
}
+ case Translation::COMPILED_STUB_FRAME: {
+ Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
+ PrintF(out, "{kind=%d}", stub_kind);
+ break;
+ }
+
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME: {
int function_id = iterator.Next();
@@ -8571,9 +10877,6 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
break;
}
- case Translation::DUPLICATE:
- break;
-
case Translation::REGISTER: {
int reg_code = iterator.Next();
PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
@@ -8588,8 +10891,7 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
case Translation::UINT32_REGISTER: {
int reg_code = iterator.Next();
- PrintF(out,
- "{input=%s (unsigned)}",
+ PrintF(out, "{input=%s (unsigned)}",
converter.NameOfCPURegister(reg_code));
break;
}
@@ -8631,8 +10933,18 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
break;
}
+ case Translation::DUPLICATED_OBJECT: {
+ int object_index = iterator.Next();
+ PrintF(out, "{object_index=%d}", object_index);
+ break;
+ }
+
case Translation::ARGUMENTS_OBJECT:
+ case Translation::CAPTURED_OBJECT: {
+ int args_length = iterator.Next();
+ PrintF(out, "{length=%d}", args_length);
break;
+ }
}
PrintF(out, "\n");
}
@@ -8657,38 +10969,16 @@ void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) {
}
-// Identify kind of code.
-const char* Code::Kind2String(Kind kind) {
- switch (kind) {
- case FUNCTION: return "FUNCTION";
- case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
- case STUB: return "STUB";
- case BUILTIN: return "BUILTIN";
- case LOAD_IC: return "LOAD_IC";
- case KEYED_LOAD_IC: return "KEYED_LOAD_IC";
- case STORE_IC: return "STORE_IC";
- case KEYED_STORE_IC: return "KEYED_STORE_IC";
- case CALL_IC: return "CALL_IC";
- case KEYED_CALL_IC: return "KEYED_CALL_IC";
- case UNARY_OP_IC: return "UNARY_OP_IC";
- case BINARY_OP_IC: return "BINARY_OP_IC";
- case COMPARE_IC: return "COMPARE_IC";
- case TO_BOOLEAN_IC: return "TO_BOOLEAN_IC";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
const char* Code::ICState2String(InlineCacheState state) {
switch (state) {
case UNINITIALIZED: return "UNINITIALIZED";
case PREMONOMORPHIC: return "PREMONOMORPHIC";
case MONOMORPHIC: return "MONOMORPHIC";
case MONOMORPHIC_PROTOTYPE_FAILURE: return "MONOMORPHIC_PROTOTYPE_FAILURE";
+ case POLYMORPHIC: return "POLYMORPHIC";
case MEGAMORPHIC: return "MEGAMORPHIC";
- case DEBUG_BREAK: return "DEBUG_BREAK";
- case DEBUG_PREPARE_STEP_IN: return "DEBUG_PREPARE_STEP_IN";
+ case GENERIC: return "GENERIC";
+ case DEBUG_STUB: return "DEBUG_STUB";
}
UNREACHABLE();
return NULL;
@@ -8699,10 +10989,10 @@ const char* Code::StubType2String(StubType type) {
switch (type) {
case NORMAL: return "NORMAL";
case FIELD: return "FIELD";
- case CONSTANT_FUNCTION: return "CONSTANT_FUNCTION";
+ case CONSTANT: return "CONSTANT";
case CALLBACKS: return "CALLBACKS";
case INTERCEPTOR: return "INTERCEPTOR";
- case MAP_TRANSITION: return "MAP_TRANSITION";
+ case TRANSITION: return "TRANSITION";
case NONEXISTENT: return "NONEXISTENT";
}
UNREACHABLE(); // keep the compiler happy
@@ -8711,6 +11001,7 @@ const char* Code::StubType2String(StubType type) {
void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
+ PrintF(out, "extra_ic_state = ");
const char* name = NULL;
switch (kind) {
case CALL_IC:
@@ -8728,9 +11019,9 @@ void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
break;
}
if (name != NULL) {
- PrintF(out, "extra_ic_state = %s\n", name);
+ PrintF(out, "%s\n", name);
} else {
- PrintF(out, "extra_ic_state = %d\n", extra);
+ PrintF(out, "%d\n", extra);
}
}
@@ -8739,7 +11030,8 @@ void Code::Disassemble(const char* name, FILE* out) {
PrintF(out, "kind = %s\n", Kind2String(kind()));
if (is_inline_cache_stub()) {
PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
- PrintExtraICState(out, kind(), extra_ic_state());
+ PrintExtraICState(out, kind(), needs_extended_extra_ic_state(kind()) ?
+ extended_extra_ic_state() : extra_ic_state());
if (ic_state() == MONOMORPHIC) {
PrintF(out, "type = %s\n", StubType2String(type()));
}
@@ -8747,11 +11039,15 @@ void Code::Disassemble(const char* name, FILE* out) {
PrintF(out, "argc = %d\n", arguments_count());
}
if (is_compare_ic_stub()) {
- CompareIC::State state = CompareIC::ComputeState(this);
- PrintF(out, "compare_state = %s\n", CompareIC::GetStateName(state));
- }
- if (is_compare_ic_stub() && major_key() == CodeStub::CompareIC) {
- Token::Value op = CompareIC::ComputeOperation(this);
+ ASSERT(major_key() == CodeStub::CompareIC);
+ CompareIC::State left_state, right_state, handler_state;
+ Token::Value op;
+ ICCompareStub::DecodeMinorKey(stub_info(), &left_state, &right_state,
+ &handler_state, &op);
+ PrintF(out, "compare_state = %s*%s -> %s\n",
+ CompareIC::GetStateName(left_state),
+ CompareIC::GetStateName(right_state),
+ CompareIC::GetStateName(handler_state));
PrintF(out, "compare_operation = %s\n", Token::Name(op));
}
}
@@ -8777,7 +11073,7 @@ void Code::Disassemble(const char* name, FILE* out) {
}
PrintF("\n");
- if (kind() == OPTIMIZED_FUNCTION) {
+ if (is_crankshafted()) {
SafepointTable table(this);
PrintF(out, "Safepoints (size = %u)\n", table.size());
for (unsigned i = 0; i < table.length(); i++) {
@@ -8797,22 +11093,23 @@ void Code::Disassemble(const char* name, FILE* out) {
PrintF(out, "\n");
}
PrintF(out, "\n");
- // Just print if type feedback info is ever used for optimized code.
- ASSERT(type_feedback_info()->IsUndefined());
} else if (kind() == FUNCTION) {
- unsigned offset = stack_check_table_offset();
- // If there is no stack check table, the "table start" will at or after
+ unsigned offset = back_edge_table_offset();
+ // If there is no back edge table, the "table start" will be at or after
// (due to alignment) the end of the instruction stream.
if (static_cast<int>(offset) < instruction_size()) {
- unsigned* address =
- reinterpret_cast<unsigned*>(instruction_start() + offset);
- unsigned length = address[0];
- PrintF(out, "Stack checks (size = %u)\n", length);
- PrintF(out, "ast_id pc_offset\n");
- for (unsigned i = 0; i < length; ++i) {
- unsigned index = (2 * i) + 1;
- PrintF(out, "%6u %9u\n", address[index], address[index + 1]);
+ DisallowHeapAllocation no_gc;
+ BackEdgeTable back_edges(this, &no_gc);
+
+ PrintF(out, "Back edges (size = %u)\n", back_edges.length());
+ PrintF(out, "ast_id pc_offset loop_depth\n");
+
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ PrintF(out, "%6d %9u %10u\n", back_edges.ast_id(i).ToInt(),
+ back_edges.pc_offset(i),
+ back_edges.loop_depth(i));
}
+
PrintF(out, "\n");
}
#ifdef OBJECT_PRINT
@@ -8824,7 +11121,9 @@ void Code::Disassemble(const char* name, FILE* out) {
}
PrintF("RelocInfo (size = %d)\n", relocation_size());
- for (RelocIterator it(this); !it.done(); it.next()) it.rinfo()->Print(out);
+ for (RelocIterator it(this); !it.done(); it.next()) {
+ it.rinfo()->Print(GetIsolate(), out);
+ }
PrintF(out, "\n");
}
#endif // ENABLE_DISASSEMBLER
@@ -8837,12 +11136,12 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
+ ASSERT(!map()->is_observed());
// Allocate a new fast elements backing store.
FixedArray* new_elements;
- { MaybeObject* maybe = heap->AllocateFixedArrayWithHoles(capacity);
- if (!maybe->To(&new_elements)) return maybe;
- }
+ MaybeObject* maybe = heap->AllocateUninitializedFixedArray(capacity);
+ if (!maybe->To(&new_elements)) return maybe;
ElementsKind elements_kind = GetElementsKind();
ElementsKind new_elements_kind;
@@ -8865,11 +11164,11 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
}
}
FixedArrayBase* old_elements = elements();
- ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
- { MaybeObject* maybe_obj =
- accessor->CopyElements(this, new_elements, new_elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
- }
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(new_elements_kind);
+ MaybeObject* maybe_obj =
+ accessor->CopyElements(this, new_elements, elements_kind);
+ if (maybe_obj->IsFailure()) return maybe_obj;
+
if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
Map* new_map = map();
if (new_elements_kind != elements_kind) {
@@ -8879,6 +11178,10 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
}
ValidateElements();
set_map_and_elements(new_map, new_elements);
+
+ // Transition through the allocation site as well if present.
+ maybe_obj = UpdateAllocationSite(new_elements_kind);
+ if (maybe_obj->IsFailure()) return maybe_obj;
} else {
FixedArray* parameter_map = FixedArray::cast(old_elements);
parameter_map->set(1, new_elements);
@@ -8896,12 +11199,29 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
}
+bool Code::IsWeakEmbeddedObject(Kind kind, Object* object) {
+ if (kind != Code::OPTIMIZED_FUNCTION) return false;
+
+ if (object->IsMap()) {
+ return Map::cast(object)->CanTransition() &&
+ FLAG_collect_maps &&
+ FLAG_weak_embedded_maps_in_optimized_code;
+ }
+
+ if (object->IsJSObject()) {
+ return FLAG_weak_embedded_objects_in_optimized_code;
+ }
+
+ return false;
+}
+
MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
int capacity,
int length) {
Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
+ ASSERT(!map()->is_observed());
FixedArrayBase* elems;
{ MaybeObject* maybe_obj =
@@ -8924,9 +11244,9 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
}
FixedArrayBase* old_elements = elements();
- ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(FAST_DOUBLE_ELEMENTS);
{ MaybeObject* maybe_obj =
- accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS);
+ accessor->CopyElements(this, elems, elements_kind);
if (maybe_obj->IsFailure()) return maybe_obj;
}
if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
@@ -8950,19 +11270,10 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
}
-MaybeObject* JSArray::Initialize(int capacity) {
- Heap* heap = GetHeap();
+MaybeObject* JSArray::Initialize(int capacity, int length) {
ASSERT(capacity >= 0);
- set_length(Smi::FromInt(0));
- FixedArray* new_elements;
- if (capacity == 0) {
- new_elements = heap->empty_fixed_array();
- } else {
- MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity);
- if (!maybe_obj->To(&new_elements)) return maybe_obj;
- }
- set_elements(new_elements);
- return this;
+ return GetHeap()->AllocateJSArrayStorage(this, length, capacity,
+ INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
}
@@ -8972,78 +11283,227 @@ void JSArray::Expand(int required_size) {
}
+// Returns false if the passed-in index is marked non-configurable,
+// which will cause the ES5 truncation operation to halt, and thus
+// no further old values need be collected.
+static bool GetOldValue(Isolate* isolate,
+ Handle<JSObject> object,
+ uint32_t index,
+ List<Handle<Object> >* old_values,
+ List<uint32_t>* indices) {
+ PropertyAttributes attributes = object->GetLocalElementAttribute(index);
+ ASSERT(attributes != ABSENT);
+ if (attributes == DONT_DELETE) return false;
+ old_values->Add(object->GetLocalElementAccessorPair(index) == NULL
+ ? Object::GetElement(isolate, object, index)
+ : Handle<Object>::cast(isolate->factory()->the_hole_value()));
+ indices->Add(index);
+ return true;
+}
+
+static void EnqueueSpliceRecord(Handle<JSArray> object,
+ uint32_t index,
+ Handle<JSArray> deleted,
+ uint32_t add_count) {
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Object> index_object = isolate->factory()->NewNumberFromUint(index);
+ Handle<Object> add_count_object =
+ isolate->factory()->NewNumberFromUint(add_count);
+
+ Handle<Object> args[] =
+ { object, index_object, deleted, add_count_object };
+
+ bool threw;
+ Execution::Call(isolate,
+ Handle<JSFunction>(isolate->observers_enqueue_splice()),
+ isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
+ &threw);
+ ASSERT(!threw);
+}
+
+
+static void BeginPerformSplice(Handle<JSArray> object) {
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Object> args[] = { object };
+
+ bool threw;
+ Execution::Call(isolate,
+ Handle<JSFunction>(isolate->observers_begin_perform_splice()),
+ isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
+ &threw);
+ ASSERT(!threw);
+}
+
+
+static void EndPerformSplice(Handle<JSArray> object) {
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Object> args[] = { object };
+
+ bool threw;
+ Execution::Call(isolate,
+ Handle<JSFunction>(isolate->observers_end_perform_splice()),
+ isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
+ &threw);
+ ASSERT(!threw);
+}
+
+
MaybeObject* JSArray::SetElementsLength(Object* len) {
// We should never end in here with a pixel or external array.
ASSERT(AllowsSetElementsLength());
- return GetElementsAccessor()->SetLength(this, len);
+ if (!(FLAG_harmony_observation && map()->is_observed()))
+ return GetElementsAccessor()->SetLength(this, len);
+
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSArray> self(this);
+ List<uint32_t> indices;
+ List<Handle<Object> > old_values;
+ Handle<Object> old_length_handle(self->length(), isolate);
+ Handle<Object> new_length_handle(len, isolate);
+ uint32_t old_length = 0;
+ CHECK(old_length_handle->ToArrayIndex(&old_length));
+ uint32_t new_length = 0;
+ if (!new_length_handle->ToArrayIndex(&new_length))
+ return Failure::InternalError();
+
+ // Observed arrays should always be in dictionary mode;
+ // if they were in fast mode, the below is slower than necessary
+ // as it iterates over the array backing store multiple times.
+ ASSERT(self->HasDictionaryElements());
+ static const PropertyAttributes kNoAttrFilter = NONE;
+ int num_elements = self->NumberOfLocalElements(kNoAttrFilter);
+ if (num_elements > 0) {
+ if (old_length == static_cast<uint32_t>(num_elements)) {
+ // Simple case for arrays without holes.
+ for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
+ if (!GetOldValue(isolate, self, i, &old_values, &indices)) break;
+ }
+ } else {
+ // For sparse arrays, only iterate over existing elements.
+ Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements);
+ self->GetLocalElementKeys(*keys, kNoAttrFilter);
+ while (num_elements-- > 0) {
+ uint32_t index = NumberToUint32(keys->get(num_elements));
+ if (index < new_length) break;
+ if (!GetOldValue(isolate, self, index, &old_values, &indices)) break;
+ }
+ }
+ }
+
+ MaybeObject* result =
+ self->GetElementsAccessor()->SetLength(*self, *new_length_handle);
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult, isolate)) return result;
+
+ CHECK(self->length()->ToArrayIndex(&new_length));
+ if (old_length == new_length) return *hresult;
+
+ BeginPerformSplice(self);
+
+ for (int i = 0; i < indices.length(); ++i) {
+ JSObject::EnqueueChangeRecord(
+ self, "deleted", isolate->factory()->Uint32ToString(indices[i]),
+ old_values[i]);
+ }
+ JSObject::EnqueueChangeRecord(
+ self, "updated", isolate->factory()->length_string(),
+ old_length_handle);
+
+ EndPerformSplice(self);
+
+ uint32_t index = Min(old_length, new_length);
+ uint32_t add_count = new_length > old_length ? new_length - old_length : 0;
+ uint32_t delete_count = new_length < old_length ? old_length - new_length : 0;
+ Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
+ if (delete_count > 0) {
+ for (int i = indices.length() - 1; i >= 0; i--) {
+ JSObject::SetElement(deleted, indices[i] - index, old_values[i], NONE,
+ kNonStrictMode);
+ }
+
+ SetProperty(deleted, isolate->factory()->length_string(),
+ isolate->factory()->NewNumberFromUint(delete_count),
+ NONE, kNonStrictMode);
+ }
+
+ EnqueueSpliceRecord(self, index, deleted, add_count);
+
+ return *hresult;
}
-Map* Map::GetPrototypeTransition(Object* prototype) {
- FixedArray* cache = GetPrototypeTransitions();
- int number_of_transitions = NumberOfProtoTransitions();
+Handle<Map> Map::GetPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype) {
+ FixedArray* cache = map->GetPrototypeTransitions();
+ int number_of_transitions = map->NumberOfProtoTransitions();
const int proto_offset =
kProtoTransitionHeaderSize + kProtoTransitionPrototypeOffset;
const int map_offset = kProtoTransitionHeaderSize + kProtoTransitionMapOffset;
const int step = kProtoTransitionElementsPerEntry;
for (int i = 0; i < number_of_transitions; i++) {
- if (cache->get(proto_offset + i * step) == prototype) {
- Object* map = cache->get(map_offset + i * step);
- return Map::cast(map);
+ if (cache->get(proto_offset + i * step) == *prototype) {
+ Object* result = cache->get(map_offset + i * step);
+ return Handle<Map>(Map::cast(result));
}
}
- return NULL;
+ return Handle<Map>();
}
-MaybeObject* Map::PutPrototypeTransition(Object* prototype, Map* map) {
- ASSERT(map->IsMap());
- ASSERT(HeapObject::cast(prototype)->map()->IsMap());
+Handle<Map> Map::PutPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype,
+ Handle<Map> target_map) {
+ ASSERT(target_map->IsMap());
+ ASSERT(HeapObject::cast(*prototype)->map()->IsMap());
// Don't cache prototype transition if this map is shared.
- if (is_shared() || !FLAG_cache_prototype_transitions) return this;
-
- FixedArray* cache = GetPrototypeTransitions();
+ if (map->is_shared() || !FLAG_cache_prototype_transitions) return map;
const int step = kProtoTransitionElementsPerEntry;
const int header = kProtoTransitionHeaderSize;
+ Handle<FixedArray> cache(map->GetPrototypeTransitions());
int capacity = (cache->length() - header) / step;
-
- int transitions = NumberOfProtoTransitions() + 1;
+ int transitions = map->NumberOfProtoTransitions() + 1;
if (transitions > capacity) {
- if (capacity > kMaxCachedPrototypeTransitions) return this;
+ if (capacity > kMaxCachedPrototypeTransitions) return map;
- FixedArray* new_cache;
// Grow array by factor 2 over and above what we need.
- { MaybeObject* maybe_cache =
- GetHeap()->AllocateFixedArray(transitions * 2 * step + header);
- if (!maybe_cache->To(&new_cache)) return maybe_cache;
- }
+ Factory* factory = map->GetIsolate()->factory();
+ cache = factory->CopySizeFixedArray(cache, transitions * 2 * step + header);
- for (int i = 0; i < capacity * step; i++) {
- new_cache->set(i + header, cache->get(i + header));
- }
- cache = new_cache;
- MaybeObject* set_result = SetPrototypeTransitions(cache);
- if (set_result->IsFailure()) return set_result;
+ CALL_AND_RETRY_OR_DIE(map->GetIsolate(),
+ map->SetPrototypeTransitions(*cache),
+ break,
+ return Handle<Map>());
}
- int last = transitions - 1;
+ // Reload number of transitions as GC might shrink them.
+ int last = map->NumberOfProtoTransitions();
+ int entry = header + last * step;
- cache->set(header + last * step + kProtoTransitionPrototypeOffset, prototype);
- cache->set(header + last * step + kProtoTransitionMapOffset, map);
- SetNumberOfProtoTransitions(transitions);
+ cache->set(entry + kProtoTransitionPrototypeOffset, *prototype);
+ cache->set(entry + kProtoTransitionMapOffset, *target_map);
+ map->SetNumberOfProtoTransitions(transitions);
- return cache;
+ return map;
}
void Map::ZapTransitions() {
TransitionArray* transition_array = transitions();
- MemsetPointer(transition_array->data_start(),
- GetHeap()->the_hole_value(),
- transition_array->length());
+ // TODO(mstarzinger): Temporarily use a slower version instead of the faster
+ // MemsetPointer to investigate a crasher. Switch back to MemsetPointer.
+ Object** data = transition_array->data_start();
+ Object* the_hole = GetHeap()->the_hole_value();
+ int length = transition_array->length();
+ for (int i = 0; i < length; i++) {
+ data[i] = the_hole;
+ }
}
@@ -9055,13 +11515,213 @@ void Map::ZapPrototypeTransitions() {
}
-MaybeObject* JSReceiver::SetPrototype(Object* value,
+void Map::AddDependentCompilationInfo(DependentCode::DependencyGroup group,
+ CompilationInfo* info) {
+ Handle<DependentCode> dep(dependent_code());
+ Handle<DependentCode> codes =
+ DependentCode::Insert(dep, group, info->object_wrapper());
+ if (*codes != dependent_code()) set_dependent_code(*codes);
+ info->dependencies(group)->Add(Handle<HeapObject>(this), info->zone());
+}
+
+
+void Map::AddDependentCode(DependentCode::DependencyGroup group,
+ Handle<Code> code) {
+ Handle<DependentCode> codes = DependentCode::Insert(
+ Handle<DependentCode>(dependent_code()), group, code);
+ if (*codes != dependent_code()) set_dependent_code(*codes);
+}
+
+
+DependentCode::GroupStartIndexes::GroupStartIndexes(DependentCode* entries) {
+ Recompute(entries);
+}
+
+
+void DependentCode::GroupStartIndexes::Recompute(DependentCode* entries) {
+ start_indexes_[0] = 0;
+ for (int g = 1; g <= kGroupCount; g++) {
+ int count = entries->number_of_entries(static_cast<DependencyGroup>(g - 1));
+ start_indexes_[g] = start_indexes_[g - 1] + count;
+ }
+}
+
+
+DependentCode* DependentCode::ForObject(Handle<HeapObject> object,
+ DependencyGroup group) {
+ AllowDeferredHandleDereference dependencies_are_safe;
+ if (group == DependentCode::kPropertyCellChangedGroup) {
+ return Handle<PropertyCell>::cast(object)->dependent_code();
+ }
+ return Handle<Map>::cast(object)->dependent_code();
+}
+
+
+Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
+ DependencyGroup group,
+ Handle<Object> object) {
+ GroupStartIndexes starts(*entries);
+ int start = starts.at(group);
+ int end = starts.at(group + 1);
+ int number_of_entries = starts.number_of_entries();
+ if (start < end && entries->object_at(end - 1) == *object) {
+ // Do not append the compilation info if it is already in the array.
+ // It is sufficient to just check only the last element because
+ // we process embedded maps of an optimized code in one batch.
+ return entries;
+ }
+ if (entries->length() < kCodesStartIndex + number_of_entries + 1) {
+ Factory* factory = entries->GetIsolate()->factory();
+ int capacity = kCodesStartIndex + number_of_entries + 1;
+ if (capacity > 5) capacity = capacity * 5 / 4;
+ Handle<DependentCode> new_entries = Handle<DependentCode>::cast(
+ factory->CopySizeFixedArray(entries, capacity, TENURED));
+ // The number of codes can change after GC.
+ starts.Recompute(*entries);
+ start = starts.at(group);
+ end = starts.at(group + 1);
+ number_of_entries = starts.number_of_entries();
+ for (int i = 0; i < number_of_entries; i++) {
+ entries->clear_at(i);
+ }
+ // If the old fixed array was empty, we need to reset counters of the
+ // new array.
+ if (number_of_entries == 0) {
+ for (int g = 0; g < kGroupCount; g++) {
+ new_entries->set_number_of_entries(static_cast<DependencyGroup>(g), 0);
+ }
+ }
+ entries = new_entries;
+ }
+ entries->ExtendGroup(group);
+ entries->set_object_at(end, *object);
+ entries->set_number_of_entries(group, end + 1 - start);
+ return entries;
+}
+
+
+void DependentCode::UpdateToFinishedCode(DependencyGroup group,
+ CompilationInfo* info,
+ Code* code) {
+ DisallowHeapAllocation no_gc;
+ AllowDeferredHandleDereference get_object_wrapper;
+ Foreign* info_wrapper = *info->object_wrapper();
+ GroupStartIndexes starts(this);
+ int start = starts.at(group);
+ int end = starts.at(group + 1);
+ for (int i = start; i < end; i++) {
+ if (object_at(i) == info_wrapper) {
+ set_object_at(i, code);
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ for (int i = start; i < end; i++) {
+ ASSERT(is_code_at(i) || compilation_info_at(i) != info);
+ }
+#endif
+}
+
+
+void DependentCode::RemoveCompilationInfo(DependentCode::DependencyGroup group,
+ CompilationInfo* info) {
+ DisallowHeapAllocation no_allocation;
+ AllowDeferredHandleDereference get_object_wrapper;
+ Foreign* info_wrapper = *info->object_wrapper();
+ GroupStartIndexes starts(this);
+ int start = starts.at(group);
+ int end = starts.at(group + 1);
+ // Find compilation info wrapper.
+ int info_pos = -1;
+ for (int i = start; i < end; i++) {
+ if (object_at(i) == info_wrapper) {
+ info_pos = i;
+ break;
+ }
+ }
+ if (info_pos == -1) return; // Not found.
+ int gap = info_pos;
+ // Use the last of each group to fill the gap in the previous group.
+ for (int i = group; i < kGroupCount; i++) {
+ int last_of_group = starts.at(i + 1) - 1;
+ ASSERT(last_of_group >= gap);
+ if (last_of_group == gap) continue;
+ copy(last_of_group, gap);
+ gap = last_of_group;
+ }
+ ASSERT(gap == starts.number_of_entries() - 1);
+ clear_at(gap); // Clear last gap.
+ set_number_of_entries(group, end - start - 1);
+
+#ifdef DEBUG
+ for (int i = start; i < end - 1; i++) {
+ ASSERT(is_code_at(i) || compilation_info_at(i) != info);
+ }
+#endif
+}
+
+
+bool DependentCode::Contains(DependencyGroup group, Code* code) {
+ GroupStartIndexes starts(this);
+ int start = starts.at(group);
+ int end = starts.at(group + 1);
+ for (int i = start; i < end; i++) {
+ if (object_at(i) == code) return true;
+ }
+ return false;
+}
+
+
+void DependentCode::DeoptimizeDependentCodeGroup(
+ Isolate* isolate,
+ DependentCode::DependencyGroup group) {
+ ASSERT(AllowCodeDependencyChange::IsAllowed());
+ DisallowHeapAllocation no_allocation_scope;
+ DependentCode::GroupStartIndexes starts(this);
+ int start = starts.at(group);
+ int end = starts.at(group + 1);
+ int code_entries = starts.number_of_entries();
+ if (start == end) return;
+
+ // Mark all the code that needs to be deoptimized.
+ bool marked = false;
+ for (int i = start; i < end; i++) {
+ if (is_code_at(i)) {
+ Code* code = code_at(i);
+ if (!code->marked_for_deoptimization()) {
+ code->set_marked_for_deoptimization(true);
+ marked = true;
+ }
+ } else {
+ CompilationInfo* info = compilation_info_at(i);
+ info->AbortDueToDependencyChange();
+ }
+ }
+ // Compact the array by moving all subsequent groups to fill in the new holes.
+ for (int src = end, dst = start; src < code_entries; src++, dst++) {
+ copy(src, dst);
+ }
+ // Now the holes are at the end of the array, zap them for heap-verifier.
+ int removed = end - start;
+ for (int i = code_entries - removed; i < code_entries; i++) {
+ clear_at(i);
+ }
+ set_number_of_entries(group, 0);
+
+ if (marked) Deoptimizer::DeoptimizeMarkedCode(isolate);
+}
+
+
+Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
+ Handle<Object> value,
bool skip_hidden_prototypes) {
#ifdef DEBUG
- int size = Size();
+ int size = object->Size();
#endif
- Heap* heap = GetHeap();
+ Isolate* isolate = object->GetIsolate();
+ Heap* heap = isolate->heap();
// Silently ignore the change if value is not a JSObject or null.
// SpiderMonkey behaves this way.
if (!value->IsJSReceiver() && !value->IsNull()) return value;
@@ -9074,67 +11734,64 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
// Implementation specific extensions that modify [[Class]], [[Prototype]]
// or [[Extensible]] must not violate the invariants defined in the preceding
// paragraph.
- if (!this->map()->is_extensible()) {
- HandleScope scope(heap->isolate());
- Handle<Object> handle(this, heap->isolate());
- return heap->isolate()->Throw(
- *FACTORY->NewTypeError("non_extensible_proto",
- HandleVector<Object>(&handle, 1)));
+ if (!object->map()->is_extensible()) {
+ Handle<Object> args[] = { object };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "non_extensible_proto", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// Before we can set the prototype we need to be sure
// prototype cycles are prevented.
// It is sufficient to validate that the receiver is not in the new prototype
// chain.
- for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) {
- if (JSReceiver::cast(pt) == this) {
+ for (Object* pt = *value;
+ pt != heap->null_value();
+ pt = pt->GetPrototype(isolate)) {
+ if (JSReceiver::cast(pt) == *object) {
// Cycle detected.
- HandleScope scope(heap->isolate());
- return heap->isolate()->Throw(
- *FACTORY->NewError("cyclic_proto", HandleVector<Object>(NULL, 0)));
+ Handle<Object> error = isolate->factory()->NewError(
+ "cyclic_proto", HandleVector<Object>(NULL, 0));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
- JSReceiver* real_receiver = this;
+ Handle<JSObject> real_receiver = object;
if (skip_hidden_prototypes) {
// Find the first object in the chain whose prototype object is not
// hidden and set the new prototype on that object.
Object* current_proto = real_receiver->GetPrototype();
while (current_proto->IsJSObject() &&
- JSReceiver::cast(current_proto)->map()->is_hidden_prototype()) {
- real_receiver = JSReceiver::cast(current_proto);
- current_proto = current_proto->GetPrototype();
+ JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
+ real_receiver = handle(JSObject::cast(current_proto), isolate);
+ current_proto = current_proto->GetPrototype(isolate);
}
}
// Set the new prototype of the object.
- Map* map = real_receiver->map();
+ Handle<Map> map(real_receiver->map());
// Nothing to do if prototype is already set.
- if (map->prototype() == value) return value;
+ if (map->prototype() == *value) return value;
if (value->IsJSObject()) {
- MaybeObject* ok = JSObject::cast(value)->OptimizeAsPrototype();
- if (ok->IsFailure()) return ok;
+ JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value));
}
- Map* new_map = map->GetPrototypeTransition(value);
- if (new_map == NULL) {
- MaybeObject* maybe_new_map = map->Copy();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- MaybeObject* maybe_new_cache =
- map->PutPrototypeTransition(value, new_map);
- if (maybe_new_cache->IsFailure()) return maybe_new_cache;
-
- new_map->set_prototype(value);
+ Handle<Map> new_map = Map::GetPrototypeTransition(map, value);
+ if (new_map.is_null()) {
+ new_map = Map::Copy(map);
+ Map::PutPrototypeTransition(map, value, new_map);
+ new_map->set_prototype(*value);
}
- ASSERT(new_map->prototype() == value);
- real_receiver->set_map(new_map);
+ ASSERT(new_map->prototype() == *value);
+ real_receiver->set_map(*new_map);
heap->ClearInstanceofCache();
- ASSERT(size == Size());
+ ASSERT(size == object->Size());
return value;
}
@@ -9152,203 +11809,35 @@ MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
}
-bool JSObject::HasElementWithInterceptor(JSReceiver* receiver, uint32_t index) {
- Isolate* isolate = GetIsolate();
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSReceiver> receiver_handle(receiver);
- Handle<JSObject> holder_handle(this);
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
- if (!interceptor->query()->IsUndefined()) {
- v8::IndexedPropertyQuery query =
- v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
- v8::Handle<v8::Integer> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = query(index, info);
- }
- if (!result.IsEmpty()) {
- ASSERT(result->IsInt32());
- return true; // absence of property is signaled by empty handle.
- }
- } else if (!interceptor->getter()->IsUndefined()) {
- v8::IndexedPropertyGetter getter =
- v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-has-get", this, index));
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = getter(index, info);
- }
- if (!result.IsEmpty()) return true;
+AccessorPair* JSObject::GetLocalPropertyAccessorPair(Name* name) {
+ uint32_t index = 0;
+ if (name->AsArrayIndex(&index)) {
+ return GetLocalElementAccessorPair(index);
}
- if (holder_handle->GetElementsAccessor()->HasElement(
- *receiver_handle, *holder_handle, index)) {
- return true;
- }
+ LookupResult lookup(GetIsolate());
+ LocalLookupRealNamedProperty(name, &lookup);
- if (holder_handle->IsStringObjectWithCharacterAt(index)) return true;
- Object* pt = holder_handle->GetPrototype();
- if (pt->IsJSProxy()) {
- // We need to follow the spec and simulate a call to [[GetOwnProperty]].
- return JSProxy::cast(pt)->GetElementAttributeWithHandler(
- receiver, index) != ABSENT;
+ if (lookup.IsPropertyCallbacks() &&
+ lookup.GetCallbackObject()->IsAccessorPair()) {
+ return AccessorPair::cast(lookup.GetCallbackObject());
}
- if (pt->IsNull()) return false;
- return JSObject::cast(pt)->HasElementWithReceiver(*receiver_handle, index);
+ return NULL;
}
-JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return UNDEFINED_ELEMENT;
- }
- }
-
+AccessorPair* JSObject::GetLocalElementAccessorPair(uint32_t index) {
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return UNDEFINED_ELEMENT;
+ if (proto->IsNull()) return NULL;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->HasLocalElement(index);
- }
-
- // Check for lookup interceptor
- if (HasIndexedInterceptor()) {
- return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT
- : UNDEFINED_ELEMENT;
- }
-
- // Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) {
- return STRING_CHARACTER_ELEMENT;
- }
-
- switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>
- (Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if ((index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole()) {
- return FAST_ELEMENT;
- }
- break;
- }
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>
- (Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
- if ((index < length) &&
- !FixedDoubleArray::cast(elements())->is_the_hole(index)) {
- return FAST_ELEMENT;
- }
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- if (index < static_cast<uint32_t>(pixels->length())) return FAST_ELEMENT;
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS: {
- ExternalArray* array = ExternalArray::cast(elements());
- if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT;
- break;
- }
- case DICTIONARY_ELEMENTS: {
- if (element_dictionary()->FindEntry(index) !=
- SeededNumberDictionary::kNotFound) {
- return DICTIONARY_ELEMENT;
- }
- break;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
- // Aliased parameters and non-aliased elements in a fast backing store
- // behave as FAST_ELEMENT. Non-aliased elements in a dictionary
- // backing store behave as DICTIONARY_ELEMENT.
- FixedArray* parameter_map = FixedArray::cast(elements());
- uint32_t length = parameter_map->length();
- Object* probe =
- index < (length - 2) ? parameter_map->get(index + 2) : NULL;
- if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT;
- // If not aliased, check the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- if (arguments->IsDictionary()) {
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(arguments);
- if (dictionary->FindEntry(index) != SeededNumberDictionary::kNotFound) {
- return DICTIONARY_ELEMENT;
- }
- } else {
- length = arguments->length();
- probe = (index < length) ? arguments->get(index) : NULL;
- if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT;
- }
- break;
- }
+ return JSObject::cast(proto)->GetLocalElementAccessorPair(index);
}
- return UNDEFINED_ELEMENT;
-}
+ // Check for lookup interceptor.
+ if (HasIndexedInterceptor()) return NULL;
-
-bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
- }
- }
-
- // Check for lookup interceptor
- if (HasIndexedInterceptor()) {
- return HasElementWithInterceptor(receiver, index);
- }
-
- ElementsAccessor* accessor = GetElementsAccessor();
- if (accessor->HasElement(receiver, this, index)) {
- return true;
- }
-
- // Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) return true;
-
- Object* pt = GetPrototype();
- if (pt->IsNull()) return false;
- if (pt->IsJSProxy()) {
- // We need to follow the spec and simulate a call to [[GetOwnProperty]].
- return JSProxy::cast(pt)->GetElementAttributeWithHandler(
- receiver, index) != ABSENT;
- }
- return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
+ return GetElementsAccessor()->GetAccessorPair(this, this, index);
}
@@ -9359,26 +11848,23 @@ MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
bool check_prototype,
SetPropertyMode set_mode) {
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
+ AssertNoContextChange ncc(isolate);
+
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSObject> this_handle(this);
Handle<Object> value_handle(value, isolate);
if (!interceptor->setter()->IsUndefined()) {
- v8::IndexedPropertySetter setter =
- v8::ToCData<v8::IndexedPropertySetter>(interceptor->setter());
+ v8::IndexedPropertySetterCallback setter =
+ v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = setter(index, v8::Utils::ToLocal(value_handle), info);
- }
+ PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
+ v8::Handle<v8::Value> result =
+ args.Call(setter, index, v8::Utils::ToLocal(value_handle));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) return *value_handle;
}
@@ -9402,10 +11888,12 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
ASSERT(!structure->IsForeign());
// api style callbacks.
- if (structure->IsAccessorInfo()) {
- Handle<AccessorInfo> data(AccessorInfo::cast(structure));
+ if (structure->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> data(
+ ExecutableAccessorInfo::cast(structure));
Object* fun_obj = data->getter();
- v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
+ v8::AccessorGetterCallback call_fun =
+ v8::ToCData<v8::AccessorGetterCallback>(fun_obj);
if (call_fun == NULL) return isolate->heap()->undefined_value();
HandleScope scope(isolate);
Handle<JSObject> self(JSObject::cast(receiver));
@@ -9413,14 +11901,9 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> key = isolate->factory()->NumberToString(number);
LOG(isolate, ApiNamedPropertyAccess("load", *self, *key));
- CustomArguments args(isolate, data->data(), *self, *holder_handle);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = call_fun(v8::Utils::ToLocal(key), info);
- }
+ PropertyCallbackArguments
+ args(isolate, data->data(), *self, *holder_handle);
+ v8::Handle<v8::Value> result = args.Call(call_fun, v8::Utils::ToLocal(key));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (result.IsEmpty()) return isolate->heap()->undefined_value();
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
@@ -9439,73 +11922,78 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
return isolate->heap()->undefined_value();
}
+ if (structure->IsDeclaredAccessorInfo()) {
+ return GetDeclaredAccessorProperty(receiver,
+ DeclaredAccessorInfo::cast(structure),
+ isolate);
+ }
+
UNREACHABLE();
return NULL;
}
-MaybeObject* JSObject::SetElementWithCallback(Object* structure,
- uint32_t index,
- Object* value,
- JSObject* holder,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+Handle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
+ Handle<Object> structure,
+ uint32_t index,
+ Handle<Object> value,
+ Handle<JSObject> holder,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = object->GetIsolate();
// We should never get here to initialize a const with the hole
// value since a const declaration would conflict with the setter.
ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value, isolate);
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually foreign
// callbacks should be phased out.
ASSERT(!structure->IsForeign());
- if (structure->IsAccessorInfo()) {
+ if (structure->IsExecutableAccessorInfo()) {
// api style callbacks
- Handle<JSObject> self(this);
- Handle<JSObject> holder_handle(JSObject::cast(holder));
- Handle<AccessorInfo> data(AccessorInfo::cast(structure));
+ Handle<ExecutableAccessorInfo> data =
+ Handle<ExecutableAccessorInfo>::cast(structure);
Object* call_obj = data->setter();
- v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
+ v8::AccessorSetterCallback call_fun =
+ v8::ToCData<v8::AccessorSetterCallback>(call_obj);
if (call_fun == NULL) return value;
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> key(isolate->factory()->NumberToString(number));
- LOG(isolate, ApiNamedPropertyAccess("store", *self, *key));
- CustomArguments args(isolate, data->data(), *self, *holder_handle);
- v8::AccessorInfo info(args.end());
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- call_fun(v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle),
- info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
+ LOG(isolate, ApiNamedPropertyAccess("store", *object, *key));
+ PropertyCallbackArguments
+ args(isolate, data->data(), *object, *holder);
+ args.Call(call_fun,
+ v8::Utils::ToLocal(key),
+ v8::Utils::ToLocal(value));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
if (structure->IsAccessorPair()) {
- Handle<Object> setter(AccessorPair::cast(structure)->setter());
+ Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
if (setter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value);
+ return SetPropertyWithDefinedSetter(
+ object, Handle<JSReceiver>::cast(setter), value);
} else {
if (strict_mode == kNonStrictMode) {
return value;
}
- Handle<Object> holder_handle(holder, isolate);
Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
- Handle<Object> args[2] = { key, holder_handle };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
+ Handle<Object> args[2] = { key, holder };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "no_setter_in_callback", HandleVector(args, 2));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
+ // TODO(dcarney): Handle correctly.
+ if (structure->IsDeclaredAccessorInfo()) return value;
+
UNREACHABLE();
- return NULL;
+ return Handle<Object>();
}
@@ -9543,6 +12031,18 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
ASSERT(HasFastSmiOrObjectElements() ||
HasFastArgumentsElements());
+ // Array optimizations rely on the prototype lookups of Array objects always
+ // returning undefined. If there is a store to the initial prototype object,
+ // make sure all of these optimizations are invalidated.
+ Isolate* isolate(GetIsolate());
+ if (isolate->is_initial_object_prototype(this) ||
+ isolate->is_initial_array_prototype(this)) {
+ HandleScope scope(GetIsolate());
+ map()->dependent_code()->DeoptimizeDependentCodeGroup(
+ GetIsolate(),
+ DependentCode::kElementsCantBeAddedGroup);
+ }
+
FixedArray* backing_store = FixedArray::cast(elements());
if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) {
backing_store = FixedArray::cast(backing_store->get(1));
@@ -9609,6 +12109,14 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
}
// Convert to fast double elements if appropriate.
if (HasFastSmiElements() && !value->IsSmi() && value->IsNumber()) {
+ // Consider fixing the boilerplate as well if we have one.
+ ElementsKind to_kind = IsHoleyElementsKind(elements_kind)
+ ? FAST_HOLEY_DOUBLE_ELEMENTS
+ : FAST_DOUBLE_ELEMENTS;
+
+ MaybeObject* maybe_failure = UpdateAllocationSite(to_kind);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
MaybeObject* maybe =
SetFastDoubleElementsCapacityAndLength(new_capacity, array_length);
if (maybe->IsFailure()) return maybe;
@@ -9622,6 +12130,10 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
ElementsKind kind = HasFastHoleyElements()
? FAST_HOLEY_ELEMENTS
: FAST_ELEMENTS;
+
+ MaybeObject* maybe_failure = UpdateAllocationSite(kind);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
kind);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
@@ -9657,7 +12169,7 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
- Object* value,
+ Object* value_raw,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype,
@@ -9665,24 +12177,26 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
Isolate* isolate = GetIsolate();
Heap* heap = isolate->heap();
+ Handle<JSObject> self(this);
+ Handle<Object> value(value_raw, isolate);
// Insert element in the dictionary.
- FixedArray* elements = FixedArray::cast(this->elements());
+ Handle<FixedArray> elements(FixedArray::cast(this->elements()));
bool is_arguments =
(elements->map() == heap->non_strict_arguments_elements_map());
- SeededNumberDictionary* dictionary = NULL;
- if (is_arguments) {
- dictionary = SeededNumberDictionary::cast(elements->get(1));
- } else {
- dictionary = SeededNumberDictionary::cast(elements);
- }
+ Handle<SeededNumberDictionary> dictionary(is_arguments
+ ? SeededNumberDictionary::cast(elements->get(1))
+ : SeededNumberDictionary::cast(*elements));
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
+ Handle<Object> element(dictionary->ValueAt(entry), isolate);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) {
- return SetElementWithCallback(element, index, value, this, strict_mode);
+ Handle<Object> result = SetElementWithCallback(self, element, index,
+ value, self, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
} else {
dictionary->UpdateMaxNumberKey(index);
// If a value has not been initialized we allow writing to it even if it
@@ -9696,7 +12210,7 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
if (strict_mode == kNonStrictMode) {
return isolate->heap()->undefined_value();
} else {
- Handle<Object> holder(this);
+ Handle<Object> holder(this, isolate);
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<Object> args[2] = { number, holder };
Handle<Object> error =
@@ -9707,28 +12221,28 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
}
// Elements of the arguments object in slow mode might be slow aliases.
if (is_arguments && element->IsAliasedArgumentsEntry()) {
- AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(element);
+ AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(*element);
Context* context = Context::cast(elements->get(0));
int context_index = entry->aliased_context_slot();
ASSERT(!context->get(context_index)->IsTheHole());
- context->set(context_index, value);
+ context->set(context_index, *value);
// For elements that are still writable we keep slow aliasing.
if (!details.IsReadOnly()) value = element;
}
- dictionary->ValueAtPut(entry, value);
+ dictionary->ValueAtPut(entry, *value);
}
} else {
// Index not already used. Look for an accessor in the prototype chain.
+ // Can cause GC!
if (check_prototype) {
bool found;
- MaybeObject* result =
- SetElementWithCallbackSetterInPrototypes(
- index, value, &found, strict_mode);
+ MaybeObject* result = SetElementWithCallbackSetterInPrototypes(
+ index, *value, &found, strict_mode);
if (found) return result;
}
// When we set the is_extensible flag to false we always force the
// element into dictionary mode (and force them to stay there).
- if (!map()->is_extensible()) {
+ if (!self->map()->is_extensible()) {
if (strict_mode == kNonStrictMode) {
return isolate->heap()->undefined_value();
} else {
@@ -9742,31 +12256,32 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
}
}
FixedArrayBase* new_dictionary;
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
- MaybeObject* maybe = dictionary->AddNumberEntry(index, value, details);
+ PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
+ MaybeObject* maybe = dictionary->AddNumberEntry(index, *value, details);
if (!maybe->To(&new_dictionary)) return maybe;
- if (dictionary != SeededNumberDictionary::cast(new_dictionary)) {
+ if (*dictionary != SeededNumberDictionary::cast(new_dictionary)) {
if (is_arguments) {
elements->set(1, new_dictionary);
} else {
- set_elements(new_dictionary);
+ self->set_elements(new_dictionary);
}
- dictionary = SeededNumberDictionary::cast(new_dictionary);
+ dictionary =
+ handle(SeededNumberDictionary::cast(new_dictionary), isolate);
}
}
// Update the array length if this JSObject is an array.
- if (IsJSArray()) {
+ if (self->IsJSArray()) {
MaybeObject* result =
- JSArray::cast(this)->JSArrayUpdateLengthFromIndex(index, value);
+ JSArray::cast(*self)->JSArrayUpdateLengthFromIndex(index, *value);
if (result->IsFailure()) return result;
}
// Attempt to put this object back in fast case.
- if (ShouldConvertToFastElements()) {
+ if (self->ShouldConvertToFastElements()) {
uint32_t new_length = 0;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
+ if (self->IsJSArray()) {
+ CHECK(JSArray::cast(*self)->length()->ToArrayIndex(&new_length));
} else {
new_length = dictionary->max_number_key() + 1;
}
@@ -9775,16 +12290,15 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
: kDontAllowSmiElements;
bool has_smi_only_elements = false;
bool should_convert_to_fast_double_elements =
- ShouldConvertToFastDoubleElements(&has_smi_only_elements);
+ self->ShouldConvertToFastDoubleElements(&has_smi_only_elements);
if (has_smi_only_elements) {
smi_mode = kForceSmiElements;
}
MaybeObject* result = should_convert_to_fast_double_elements
- ? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
- : SetFastElementsCapacityAndLength(new_length,
- new_length,
- smi_mode);
- ValidateElements();
+ ? self->SetFastDoubleElementsCapacityAndLength(new_length, new_length)
+ : self->SetFastElementsCapacityAndLength(
+ new_length, new_length, smi_mode);
+ self->ValidateElements();
if (result->IsFailure()) return result;
#ifdef DEBUG
if (FLAG_trace_normalization) {
@@ -9793,7 +12307,7 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
}
#endif
}
- return value;
+ return *value;
}
@@ -9900,18 +12414,17 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
}
-MaybeObject* JSReceiver::SetElement(uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_proto) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->SetElementWithHandler(
- this, index, value, strict_mode);
- } else {
- return JSObject::cast(this)->SetElement(
- index, value, attributes, strict_mode, check_proto);
+Handle<Object> JSReceiver::SetElement(Handle<JSReceiver> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
+ if (object->IsJSProxy()) {
+ return JSProxy::SetElementWithHandler(
+ Handle<JSProxy>::cast(object), object, index, value, strict_mode);
}
+ return JSObject::SetElement(
+ Handle<JSObject>::cast(object), index, value, attributes, strict_mode);
}
@@ -9934,9 +12447,10 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
StrictModeFlag strict_mode,
SetPropertyMode set_mode) {
if (object->HasExternalArrayElements()) {
- if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
+ if (!value->IsNumber() && !value->IsUndefined()) {
bool has_exception;
- Handle<Object> number = Execution::ToNumber(value, &has_exception);
+ Handle<Object> number =
+ Execution::ToNumber(object->GetIsolate(), value, &has_exception);
if (has_exception) return Handle<Object>();
value = number;
}
@@ -9949,28 +12463,28 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
MaybeObject* JSObject::SetElement(uint32_t index,
- Object* value,
+ Object* value_raw,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype,
SetPropertyMode set_mode) {
+ Isolate* isolate = GetIsolate();
+
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
- HandleScope scope(heap->isolate());
- Handle<Object> value_handle(value);
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return *value_handle;
+ if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ return value_raw;
}
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return value;
+ if (proto->IsNull()) return value_raw;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->SetElement(index,
- value,
+ value_raw,
attributes,
strict_mode,
check_prototype,
@@ -9979,10 +12493,8 @@ MaybeObject* JSObject::SetElement(uint32_t index,
// Don't allow element properties to be redefined for external arrays.
if (HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
- Isolate* isolate = GetHeap()->isolate();
- Handle<Object> receiver(this);
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[] = { receiver, number };
+ Handle<Object> args[] = { handle(this, isolate), number };
Handle<Object> error = isolate->factory()->NewTypeError(
"redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
return isolate->Throw(*error);
@@ -9997,22 +12509,77 @@ MaybeObject* JSObject::SetElement(uint32_t index,
dictionary->set_requires_slow_elements();
}
+ if (!(FLAG_harmony_observation && map()->is_observed())) {
+ return HasIndexedInterceptor()
+ ? SetElementWithInterceptor(
+ index, value_raw, attributes, strict_mode, check_prototype, set_mode)
+ : SetElementWithoutInterceptor(
+ index, value_raw, attributes, strict_mode, check_prototype, set_mode);
+ }
+
+ // From here on, everything has to be handlified.
+ Handle<JSObject> self(this);
+ Handle<Object> value(value_raw, isolate);
+ PropertyAttributes old_attributes = self->GetLocalElementAttribute(index);
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ Handle<Object> old_length_handle;
+ Handle<Object> new_length_handle;
+
+ if (old_attributes != ABSENT) {
+ if (self->GetLocalElementAccessorPair(index) == NULL)
+ old_value = Object::GetElement(isolate, self, index);
+ } else if (self->IsJSArray()) {
+ // Store old array length in case adding an element grows the array.
+ old_length_handle = handle(Handle<JSArray>::cast(self)->length(), isolate);
+ }
+
// Check for lookup interceptor
- if (HasIndexedInterceptor()) {
- return SetElementWithInterceptor(index,
- value,
- attributes,
- strict_mode,
- check_prototype,
- set_mode);
+ MaybeObject* result = self->HasIndexedInterceptor()
+ ? self->SetElementWithInterceptor(
+ index, *value, attributes, strict_mode, check_prototype, set_mode)
+ : self->SetElementWithoutInterceptor(
+ index, *value, attributes, strict_mode, check_prototype, set_mode);
+
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult, isolate)) return result;
+
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ PropertyAttributes new_attributes = self->GetLocalElementAttribute(index);
+ if (old_attributes == ABSENT) {
+ if (self->IsJSArray() &&
+ !old_length_handle->SameValue(Handle<JSArray>::cast(self)->length())) {
+ new_length_handle = handle(Handle<JSArray>::cast(self)->length(),
+ isolate);
+ uint32_t old_length = 0;
+ uint32_t new_length = 0;
+ CHECK(old_length_handle->ToArrayIndex(&old_length));
+ CHECK(new_length_handle->ToArrayIndex(&new_length));
+
+ BeginPerformSplice(Handle<JSArray>::cast(self));
+ EnqueueChangeRecord(self, "new", name, old_value);
+ EnqueueChangeRecord(self, "updated", isolate->factory()->length_string(),
+ old_length_handle);
+ EndPerformSplice(Handle<JSArray>::cast(self));
+ Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
+ EnqueueSpliceRecord(Handle<JSArray>::cast(self), old_length, deleted,
+ new_length - old_length);
+ } else {
+ EnqueueChangeRecord(self, "new", name, old_value);
+ }
+ } else if (old_value->IsTheHole()) {
+ EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ } else {
+ Handle<Object> new_value = Object::GetElement(isolate, self, index);
+ bool value_changed = !old_value->SameValue(*new_value);
+ if (old_attributes != new_attributes) {
+ if (!value_changed) old_value = isolate->factory()->the_hole_value();
+ EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ } else if (value_changed) {
+ EnqueueChangeRecord(self, "updated", name, old_value);
+ }
}
- return SetElementWithoutInterceptor(index,
- value,
- attributes,
- strict_mode,
- check_prototype,
- set_mode);
+ return *hresult;
}
@@ -10026,6 +12593,16 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
HasDictionaryArgumentsElements() ||
(attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
Isolate* isolate = GetIsolate();
+ if (FLAG_trace_external_array_abuse &&
+ IsExternalArrayElementsKind(GetElementsKind())) {
+ CheckArrayAbuse(this, "external elements write", index);
+ }
+ if (FLAG_trace_js_array_abuse &&
+ !IsExternalArrayElementsKind(GetElementsKind())) {
+ if (IsJSArray()) {
+ CheckArrayAbuse(this, "elements write", index, true);
+ }
+ }
switch (GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
@@ -10113,21 +12690,98 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
}
-Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->TransitionElementsKind(to_kind),
- Object);
+void JSObject::TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
+ object->TransitionElementsKind(to_kind));
+}
+
+
+bool AllocationSite::IsNestedSite() {
+ ASSERT(FLAG_trace_track_allocation_sites);
+ Object* current = GetHeap()->allocation_sites_list();
+ while (current != NULL && current->IsAllocationSite()) {
+ AllocationSite* current_site = AllocationSite::cast(current);
+ if (current_site->nested_site() == this) {
+ return true;
+ }
+ current = current_site->weak_next();
+ }
+ return false;
+}
+
+
+MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) {
+ if (!FLAG_track_allocation_sites || !IsJSArray()) {
+ return this;
+ }
+
+ AllocationMemento* memento = AllocationMemento::FindForJSObject(this);
+ if (memento == NULL || !memento->IsValid()) {
+ return this;
+ }
+
+ // Walk through to the Allocation Site
+ AllocationSite* site = memento->GetAllocationSite();
+ if (site->SitePointsToLiteral() &&
+ site->transition_info()->IsJSArray()) {
+ JSArray* transition_info = JSArray::cast(site->transition_info());
+ ElementsKind kind = transition_info->GetElementsKind();
+ // if kind is holey ensure that to_kind is as well.
+ if (IsHoleyElementsKind(kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ }
+ if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
+ // If the array is huge, it's not likely to be defined in a local
+ // function, so we shouldn't make new instances of it very often.
+ uint32_t length = 0;
+ CHECK(transition_info->length()->ToArrayIndex(&length));
+ if (length <= AllocationSite::kMaximumArrayBytesToPretransition) {
+ if (FLAG_trace_track_allocation_sites) {
+ bool is_nested = site->IsNestedSite();
+ PrintF(
+ "AllocationSite: JSArray %p boilerplate %s updated %s->%s\n",
+ reinterpret_cast<void*>(this),
+ is_nested ? "(nested)" : "",
+ ElementsKindToString(kind),
+ ElementsKindToString(to_kind));
+ }
+ return transition_info->TransitionElementsKind(to_kind);
+ }
+ }
+ } else {
+ ElementsKind kind = site->GetElementsKind();
+ // if kind is holey ensure that to_kind is as well.
+ if (IsHoleyElementsKind(kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ }
+ if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF("AllocationSite: JSArray %p site updated %s->%s\n",
+ reinterpret_cast<void*>(this),
+ ElementsKindToString(kind),
+ ElementsKindToString(to_kind));
+ }
+ site->set_transition_info(Smi::FromInt(to_kind));
+ }
+ }
+ return this;
}
MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
+ ASSERT(!map()->is_observed());
ElementsKind from_kind = map()->elements_kind();
if (IsFastHoleyElementsKind(from_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
+ if (from_kind == to_kind) return this;
+
+ MaybeObject* maybe_failure = UpdateAllocationSite(to_kind);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
Isolate* isolate = GetIsolate();
if (elements() == isolate->heap()->empty_fixed_array() ||
(IsFastSmiOrObjectElementsKind(from_kind) &&
@@ -10223,26 +12877,23 @@ MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
uint32_t index) {
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
+ AssertNoContextChange ncc(isolate);
+
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor(), isolate);
Handle<Object> this_handle(receiver, isolate);
Handle<JSObject> holder_handle(this, isolate);
if (!interceptor->getter()->IsUndefined()) {
- v8::IndexedPropertyGetter getter =
- v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
+ v8::IndexedPropertyGetterCallback getter =
+ v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = getter(index, info);
- }
+ PropertyCallbackArguments
+ args(isolate, interceptor->data(), receiver, this);
+ v8::Handle<v8::Value> result = args.Call(getter, index);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
@@ -10262,7 +12913,7 @@ MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
Object* pt = holder_handle->GetPrototype();
if (pt == heap->null_value()) return heap->undefined_value();
- return pt->GetElementWithReceiver(*this_handle, index);
+ return pt->GetElementWithReceiver(isolate, *this_handle, index);
}
@@ -10376,6 +13027,9 @@ bool JSObject::ShouldConvertToFastElements() {
// An object requiring access checks is never allowed to have fast
// elements. If it had fast elements we would skip security checks.
if (IsAccessCheckNeeded()) return false;
+ // Observed objects may not go to fast mode because they rely on map checks,
+ // and for fast element accesses we sometimes check element kinds only.
+ if (FLAG_harmony_observation && map()->is_observed()) return false;
FixedArray* elements = FixedArray::cast(this->elements());
SeededNumberDictionary* dictionary = NULL;
@@ -10459,7 +13113,7 @@ template<typename Shape, typename Key>
void Dictionary<Shape, Key>::CopyValuesTo(FixedArray* elements) {
int pos = 0;
int capacity = HashTable<Shape, Key>::Capacity();
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (int i = 0; i < capacity; i++) {
Object* k = Dictionary<Shape, Key>::KeyAt(i);
@@ -10491,27 +13145,32 @@ InterceptorInfo* JSObject::GetIndexedInterceptor() {
}
-MaybeObject* JSObject::GetPropertyPostInterceptor(
- Object* receiver,
- String* name,
+Handle<Object> JSObject::GetPropertyPostInterceptor(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
PropertyAttributes* attributes) {
// Check local property in holder, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (result.IsFound()) {
- return GetProperty(receiver, &result, name, attributes);
+ Isolate* isolate = object->GetIsolate();
+ LookupResult lookup(isolate);
+ object->LocalLookupRealNamedProperty(*name, &lookup);
+ Handle<Object> result;
+ if (lookup.IsFound()) {
+ result = GetProperty(object, receiver, &lookup, name, attributes);
+ } else {
+ // Continue searching via the prototype chain.
+ Handle<Object> prototype(object->GetPrototype(), isolate);
+ *attributes = ABSENT;
+ if (prototype->IsNull()) return isolate->factory()->undefined_value();
+ result = GetPropertyWithReceiver(prototype, receiver, name, attributes);
}
- // Continue searching via the prototype chain.
- Object* pt = GetPrototype();
- *attributes = ABSENT;
- if (pt->IsNull()) return GetHeap()->undefined_value();
- return pt->GetPropertyWithReceiver(receiver, name, attributes);
+ return result;
}
MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
Object* receiver,
- String* name,
+ Name* name,
PropertyAttributes* attributes) {
// Check local property in holder, ignore interceptor.
LookupResult result(GetIsolate());
@@ -10523,140 +13182,98 @@ MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
}
-MaybeObject* JSObject::GetPropertyWithInterceptor(
- Object* receiver,
- String* name,
+Handle<Object> JSObject::GetPropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
PropertyAttributes* attributes) {
- Isolate* isolate = GetIsolate();
- InterceptorInfo* interceptor = GetNamedInterceptor();
- HandleScope scope(isolate);
- Handle<Object> receiver_handle(receiver);
- Handle<JSObject> holder_handle(this);
- Handle<String> name_handle(name);
+ Isolate* isolate = object->GetIsolate();
+
+ // TODO(rossberg): Support symbols in the API.
+ if (name->IsSymbol()) return isolate->factory()->undefined_value();
+
+ Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor(), isolate);
+ Handle<String> name_string = Handle<String>::cast(name);
if (!interceptor->getter()->IsUndefined()) {
- v8::NamedPropertyGetter getter =
- v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
+ v8::NamedPropertyGetterCallback getter =
+ v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = getter(v8::Utils::ToLocal(name_handle), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ ApiNamedPropertyAccess("interceptor-named-get", *object, *name));
+ PropertyCallbackArguments
+ args(isolate, interceptor->data(), *receiver, *object);
+ v8::Handle<v8::Value> result =
+ args.Call(getter, v8::Utils::ToLocal(name_string));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) {
*attributes = NONE;
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
- return *result_internal;
+ // Rebox handle to escape this scope.
+ return handle(*result_internal, isolate);
}
}
- MaybeObject* result = holder_handle->GetPropertyPostInterceptor(
- *receiver_handle,
- *name_handle,
- attributes);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return result;
+ return GetPropertyPostInterceptor(object, receiver, name, attributes);
}
-bool JSObject::HasRealNamedProperty(String* key) {
+bool JSObject::HasRealNamedProperty(Handle<JSObject> object,
+ Handle<Name> key) {
+ Isolate* isolate = object->GetIsolate();
+ SealHandleScope shs(isolate);
// Check access rights if needed.
- Isolate* isolate = GetIsolate();
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
return false;
}
}
LookupResult result(isolate);
- LocalLookupRealNamedProperty(key, &result);
+ object->LocalLookupRealNamedProperty(*key, &result);
return result.IsFound() && !result.IsInterceptor();
}
-bool JSObject::HasRealElementProperty(uint32_t index) {
+bool JSObject::HasRealElementProperty(Handle<JSObject> object, uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
+ SealHandleScope shs(isolate);
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayIndexedAccess(*object, index, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
return false;
}
}
- // Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) return true;
-
- switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- return (index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole();
- }
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
- return (index < length) &&
- !FixedDoubleArray::cast(elements())->is_the_hole(index);
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- return index < static_cast<uint32_t>(pixels->length());
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS: {
- ExternalArray* array = ExternalArray::cast(elements());
- return index < static_cast<uint32_t>(array->length());
- }
- case DICTIONARY_ELEMENTS: {
- return element_dictionary()->FindEntry(index)
- != SeededNumberDictionary::kNotFound;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNIMPLEMENTED();
- break;
+ if (object->IsJSGlobalProxy()) {
+ HandleScope scope(isolate);
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return false;
+ ASSERT(proto->IsJSGlobalObject());
+ return HasRealElementProperty(Handle<JSObject>::cast(proto), index);
}
- // All possibilities have been handled above already.
- UNREACHABLE();
- return GetHeap()->null_value();
+
+ return object->GetElementAttributeWithoutInterceptor(
+ *object, index, false) != ABSENT;
}
-bool JSObject::HasRealNamedCallbackProperty(String* key) {
+bool JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
+ Handle<Name> key) {
+ Isolate* isolate = object->GetIsolate();
+ SealHandleScope shs(isolate);
// Check access rights if needed.
- Isolate* isolate = GetIsolate();
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
return false;
}
}
LookupResult result(isolate);
- LocalLookupRealNamedProperty(key, &result);
+ object->LocalLookupRealNamedProperty(*key, &result);
return result.IsPropertyCallbacks();
}
@@ -10665,7 +13282,7 @@ int JSObject::NumberOfLocalProperties(PropertyAttributes filter) {
if (HasFastProperties()) {
Map* map = this->map();
if (filter == NONE) return map->NumberOfOwnDescriptors();
- if (filter == DONT_ENUM) {
+ if (filter & DONT_ENUM) {
int result = map->EnumLength();
if (result != Map::kInvalidEnumCache) return result;
}
@@ -10791,19 +13408,23 @@ void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) {
// Fill in the names of local properties into the supplied storage. The main
// purpose of this function is to provide reflection information for the object
// mirrors.
-void JSObject::GetLocalPropertyNames(FixedArray* storage, int index) {
- ASSERT(storage->length() >= (NumberOfLocalProperties() - index));
+void JSObject::GetLocalPropertyNames(
+ FixedArray* storage, int index, PropertyAttributes filter) {
+ ASSERT(storage->length() >= (NumberOfLocalProperties(filter) - index));
if (HasFastProperties()) {
int real_size = map()->NumberOfOwnDescriptors();
DescriptorArray* descs = map()->instance_descriptors();
- ASSERT(storage->length() >= index + real_size);
for (int i = 0; i < real_size; i++) {
- storage->set(index + i, descs->GetKey(i));
+ if ((descs->GetDetails(i).attributes() & filter) == 0 &&
+ ((filter & SYMBOLIC) == 0 || !descs->GetKey(i)->IsSymbol())) {
+ storage->set(index++, descs->GetKey(i));
+ }
}
} else {
property_dictionary()->CopyKeysTo(storage,
index,
- StringDictionary::UNSORTED);
+ filter,
+ NameDictionary::UNSORTED);
}
}
@@ -10989,7 +13610,7 @@ class StringKey : public HashTableKey {
uint32_t HashForObject(Object* other) { return String::cast(other)->Hash(); }
- Object* AsObject() { return string_; }
+ Object* AsObject(Heap* heap) { return string_; }
String* string_;
uint32_t hash_;
@@ -11064,9 +13685,9 @@ class StringSharedKey : public HashTableKey {
source, shared, language_mode, scope_position);
}
- MUST_USE_RESULT MaybeObject* AsObject() {
+ MUST_USE_RESULT MaybeObject* AsObject(Heap* heap) {
Object* obj;
- { MaybeObject* maybe_obj = source_->GetHeap()->AllocateFixedArray(4);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArray(4);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* other_array = FixedArray::cast(obj);
@@ -11104,7 +13725,7 @@ class RegExpKey : public HashTableKey {
uint32_t Hash() { return RegExpHash(string_, flags_); }
- Object* AsObject() {
+ Object* AsObject(Heap* heap) {
// Plain hash maps, which is where regexp keys are used, don't
// use this function.
UNREACHABLE();
@@ -11125,22 +13746,20 @@ class RegExpKey : public HashTableKey {
Smi* flags_;
};
-// Utf8SymbolKey carries a vector of chars as key.
-class Utf8SymbolKey : public HashTableKey {
+
+// Utf8StringKey carries a vector of chars as key.
+class Utf8StringKey : public HashTableKey {
public:
- explicit Utf8SymbolKey(Vector<const char> string, uint32_t seed)
+ explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
: string_(string), hash_field_(0), seed_(seed) { }
bool IsMatch(Object* string) {
- return String::cast(string)->IsEqualTo(string_);
+ return String::cast(string)->IsUtf8EqualTo(string_);
}
uint32_t Hash() {
if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
- unibrow::Utf8InputBuffer<> buffer(string_.start(),
- static_cast<unsigned>(string_.length()));
- chars_ = buffer.Utf16Length();
- hash_field_ = String::ComputeHashField(&buffer, chars_, seed_);
+ hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
uint32_t result = hash_field_ >> String::kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
@@ -11150,10 +13769,11 @@ class Utf8SymbolKey : public HashTableKey {
return String::cast(other)->Hash();
}
- MaybeObject* AsObject() {
+ MaybeObject* AsObject(Heap* heap) {
if (hash_field_ == 0) Hash();
- return Isolate::Current()->heap()->AllocateSymbol(
- string_, chars_, hash_field_);
+ return heap->AllocateInternalizedStringFromUtf8(string_,
+ chars_,
+ hash_field_);
}
Vector<const char> string_;
@@ -11164,35 +13784,15 @@ class Utf8SymbolKey : public HashTableKey {
template <typename Char>
-class SequentialSymbolKey : public HashTableKey {
+class SequentialStringKey : public HashTableKey {
public:
- explicit SequentialSymbolKey(Vector<const Char> string, uint32_t seed)
+ explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
: string_(string), hash_field_(0), seed_(seed) { }
uint32_t Hash() {
- StringHasher hasher(string_.length(), seed_);
-
- // Very long strings have a trivial hash that doesn't inspect the
- // string contents.
- if (hasher.has_trivial_hash()) {
- hash_field_ = hasher.GetHashField();
- } else {
- int i = 0;
- // Do the iterative array index computation as long as there is a
- // chance this is an array index.
- while (i < string_.length() && hasher.is_array_index()) {
- hasher.AddCharacter(static_cast<uc32>(string_[i]));
- i++;
- }
-
- // Process the remaining characters without updating the array
- // index.
- while (i < string_.length()) {
- hasher.AddCharacterNoIndex(static_cast<uc32>(string_[i]));
- i++;
- }
- hash_field_ = hasher.GetHashField();
- }
+ hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
+ string_.length(),
+ seed_);
uint32_t result = hash_field_ >> String::kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
@@ -11211,59 +13811,35 @@ class SequentialSymbolKey : public HashTableKey {
-class AsciiSymbolKey : public SequentialSymbolKey<char> {
+class OneByteStringKey : public SequentialStringKey<uint8_t> {
public:
- AsciiSymbolKey(Vector<const char> str, uint32_t seed)
- : SequentialSymbolKey<char>(str, seed) { }
+ OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
+ : SequentialStringKey<uint8_t>(str, seed) { }
bool IsMatch(Object* string) {
- return String::cast(string)->IsAsciiEqualTo(string_);
+ return String::cast(string)->IsOneByteEqualTo(string_);
}
- MaybeObject* AsObject() {
+ MaybeObject* AsObject(Heap* heap) {
if (hash_field_ == 0) Hash();
- return HEAP->AllocateAsciiSymbol(string_, hash_field_);
+ return heap->AllocateOneByteInternalizedString(string_, hash_field_);
}
};
-class SubStringAsciiSymbolKey : public HashTableKey {
+class SubStringOneByteStringKey : public HashTableKey {
public:
- explicit SubStringAsciiSymbolKey(Handle<SeqAsciiString> string,
- int from,
- int length,
- uint32_t seed)
- : string_(string), from_(from), length_(length), seed_(seed) { }
+ explicit SubStringOneByteStringKey(Handle<SeqOneByteString> string,
+ int from,
+ int length)
+ : string_(string), from_(from), length_(length) { }
uint32_t Hash() {
ASSERT(length_ >= 0);
ASSERT(from_ + length_ <= string_->length());
- StringHasher hasher(length_, string_->GetHeap()->HashSeed());
-
- // Very long strings have a trivial hash that doesn't inspect the
- // string contents.
- if (hasher.has_trivial_hash()) {
- hash_field_ = hasher.GetHashField();
- } else {
- int i = 0;
- // Do the iterative array index computation as long as there is a
- // chance this is an array index.
- while (i < length_ && hasher.is_array_index()) {
- hasher.AddCharacter(static_cast<uc32>(
- string_->SeqAsciiStringGet(i + from_)));
- i++;
- }
-
- // Process the remaining characters without updating the array
- // index.
- while (i < length_) {
- hasher.AddCharacterNoIndex(static_cast<uc32>(
- string_->SeqAsciiStringGet(i + from_)));
- i++;
- }
- hash_field_ = hasher.GetHashField();
- }
-
+ uint8_t* chars = string_->GetChars() + from_;
+ hash_field_ = StringHasher::HashSequentialString(
+ chars, length_, string_->GetHeap()->HashSeed());
uint32_t result = hash_field_ >> String::kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
@@ -11275,45 +13851,44 @@ class SubStringAsciiSymbolKey : public HashTableKey {
}
bool IsMatch(Object* string) {
- Vector<const char> chars(string_->GetChars() + from_, length_);
- return String::cast(string)->IsAsciiEqualTo(chars);
+ Vector<const uint8_t> chars(string_->GetChars() + from_, length_);
+ return String::cast(string)->IsOneByteEqualTo(chars);
}
- MaybeObject* AsObject() {
+ MaybeObject* AsObject(Heap* heap) {
if (hash_field_ == 0) Hash();
- Vector<const char> chars(string_->GetChars() + from_, length_);
- return HEAP->AllocateAsciiSymbol(chars, hash_field_);
+ Vector<const uint8_t> chars(string_->GetChars() + from_, length_);
+ return heap->AllocateOneByteInternalizedString(chars, hash_field_);
}
private:
- Handle<SeqAsciiString> string_;
+ Handle<SeqOneByteString> string_;
int from_;
int length_;
uint32_t hash_field_;
- uint32_t seed_;
};
-class TwoByteSymbolKey : public SequentialSymbolKey<uc16> {
+class TwoByteStringKey : public SequentialStringKey<uc16> {
public:
- explicit TwoByteSymbolKey(Vector<const uc16> str, uint32_t seed)
- : SequentialSymbolKey<uc16>(str, seed) { }
+ explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
+ : SequentialStringKey<uc16>(str, seed) { }
bool IsMatch(Object* string) {
return String::cast(string)->IsTwoByteEqualTo(string_);
}
- MaybeObject* AsObject() {
+ MaybeObject* AsObject(Heap* heap) {
if (hash_field_ == 0) Hash();
- return HEAP->AllocateTwoByteSymbol(string_, hash_field_);
+ return heap->AllocateTwoByteInternalizedString(string_, hash_field_);
}
};
-// SymbolKey carries a string/symbol object as key.
-class SymbolKey : public HashTableKey {
+// InternalizedStringKey carries a string/internalized-string object as key.
+class InternalizedStringKey : public HashTableKey {
public:
- explicit SymbolKey(String* string)
+ explicit InternalizedStringKey(String* string)
: string_(string) { }
bool IsMatch(Object* string) {
@@ -11326,23 +13901,20 @@ class SymbolKey : public HashTableKey {
return String::cast(other)->Hash();
}
- MaybeObject* AsObject() {
- // Attempt to flatten the string, so that symbols will most often
- // be flat strings.
+ MaybeObject* AsObject(Heap* heap) {
+ // Attempt to flatten the string, so that internalized strings will most
+ // often be flat strings.
string_ = string_->TryFlattenGetString();
- Heap* heap = string_->GetHeap();
- // Transform string to symbol if possible.
- Map* map = heap->SymbolMapForString(string_);
+ // Internalize the string if possible.
+ Map* map = heap->InternalizedStringMapForString(string_);
if (map != NULL) {
string_->set_map_no_write_barrier(map);
- ASSERT(string_->IsSymbol());
+ ASSERT(string_->IsInternalizedString());
return string_;
}
- // Otherwise allocate a new symbol.
- StringInputBuffer buffer(string_);
- return heap->AllocateInternalSymbol(&buffer,
- string_->length(),
- string_->hash_field());
+ // Otherwise allocate a new internalized string.
+ return heap->AllocateInternalizedStringImpl(
+ string_, string_->length(), string_->hash_field());
}
static uint32_t StringHash(Object* obj) {
@@ -11368,7 +13940,8 @@ void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) {
template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for,
+MaybeObject* HashTable<Shape, Key>::Allocate(Heap* heap,
+ int at_least_space_for,
MinimumCapacity capacity_option,
PretenureFlag pretenure) {
ASSERT(!capacity_option || IS_POWER_OF_TWO(at_least_space_for));
@@ -11376,12 +13949,12 @@ MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for,
? at_least_space_for
: ComputeCapacity(at_least_space_for);
if (capacity > HashTable::kMaxCapacity) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x10);
}
Object* obj;
- { MaybeObject* maybe_obj = Isolate::Current()->heap()->
- AllocateHashTable(EntryToIndex(capacity), pretenure);
+ { MaybeObject* maybe_obj =
+ heap-> AllocateHashTable(EntryToIndex(capacity), pretenure);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
HashTable::cast(obj)->SetNumberOfElements(0);
@@ -11392,19 +13965,19 @@ MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for,
// Find entry for key otherwise return kNotFound.
-int StringDictionary::FindEntry(String* key) {
- if (!key->IsSymbol()) {
- return HashTable<StringDictionaryShape, String*>::FindEntry(key);
+int NameDictionary::FindEntry(Name* key) {
+ if (!key->IsUniqueName()) {
+ return HashTable<NameDictionaryShape, Name*>::FindEntry(key);
}
- // Optimized for symbol key. Knowledge of the key type allows:
- // 1. Move the check if the key is a symbol out of the loop.
- // 2. Avoid comparing hash codes in symbol to symbol comparison.
- // 3. Detect a case when a dictionary key is not a symbol but the key is.
- // In case of positive result the dictionary key may be replaced by
- // the symbol with minimal performance penalty. It gives a chance to
- // perform further lookups in code stubs (and significant performance boost
- // a certain style of code).
+ // Optimized for unique names. Knowledge of the key type allows:
+ // 1. Move the check if the key is unique out of the loop.
+ // 2. Avoid comparing hash codes in unique-to-unique comparison.
+ // 3. Detect a case when a dictionary key is not unique but the key is.
+ // In case of positive result the dictionary key may be replaced by the
+ // internalized string with minimal performance penalty. It gives a chance
+ // to perform further lookups in code stubs (and significant performance
+ // boost a certain style of code).
// EnsureCapacity will guarantee the hash table is never full.
uint32_t capacity = Capacity();
@@ -11416,15 +13989,15 @@ int StringDictionary::FindEntry(String* key) {
Object* element = get(index);
if (element->IsUndefined()) break; // Empty entry.
if (key == element) return entry;
- if (!element->IsSymbol() &&
+ if (!element->IsUniqueName() &&
!element->IsTheHole() &&
- String::cast(element)->Equals(key)) {
- // Replace a non-symbol key by the equivalent symbol for faster further
- // lookups.
+ Name::cast(element)->Equals(key)) {
+ // Replace a key that is a non-internalized string by the equivalent
+ // internalized string for faster further lookups.
set(index, key);
return entry;
}
- ASSERT(element->IsTheHole() || !String::cast(element)->Equals(key));
+ ASSERT(element->IsTheHole() || !Name::cast(element)->Equals(key));
entry = NextProbe(entry, count++, capacity);
}
return kNotFound;
@@ -11435,7 +14008,7 @@ template<typename Shape, typename Key>
MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
ASSERT(NumberOfElements() < new_table->Capacity());
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
// Copy prefix to new array.
@@ -11466,7 +14039,77 @@ MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
+uint32_t HashTable<Shape, Key>::EntryForProbe(Key key,
+ Object* k,
+ int probe,
+ uint32_t expected) {
+ uint32_t hash = HashTable<Shape, Key>::HashForObject(key, k);
+ uint32_t capacity = Capacity();
+ uint32_t entry = FirstProbe(hash, capacity);
+ for (int i = 1; i < probe; i++) {
+ if (entry == expected) return expected;
+ entry = NextProbe(entry, i, capacity);
+ }
+ return entry;
+}
+
+
+template<typename Shape, typename Key>
+void HashTable<Shape, Key>::Swap(uint32_t entry1,
+ uint32_t entry2,
+ WriteBarrierMode mode) {
+ int index1 = EntryToIndex(entry1);
+ int index2 = EntryToIndex(entry2);
+ Object* temp[Shape::kEntrySize];
+ for (int j = 0; j < Shape::kEntrySize; j++) {
+ temp[j] = get(index1 + j);
+ }
+ for (int j = 0; j < Shape::kEntrySize; j++) {
+ set(index1 + j, get(index2 + j), mode);
+ }
+ for (int j = 0; j < Shape::kEntrySize; j++) {
+ set(index2 + j, temp[j], mode);
+ }
+}
+
+
+template<typename Shape, typename Key>
+void HashTable<Shape, Key>::Rehash(Key key) {
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = GetWriteBarrierMode(no_gc);
+ uint32_t capacity = Capacity();
+ bool done = false;
+ for (int probe = 1; !done; probe++) {
+ // All elements at entries given by one of the first _probe_ probes
+ // are placed correctly. Other elements might need to be moved.
+ done = true;
+ for (uint32_t current = 0; current < capacity; current++) {
+ Object* current_key = get(EntryToIndex(current));
+ if (IsKey(current_key)) {
+ uint32_t target = EntryForProbe(key, current_key, probe, current);
+ if (current == target) continue;
+ Object* target_key = get(EntryToIndex(target));
+ if (!IsKey(target_key) ||
+ EntryForProbe(key, target_key, probe, target) != target) {
+ // Put the current element into the correct position.
+ Swap(current, target, mode);
+ // The other element will be processed on the next iteration.
+ current--;
+ } else {
+ // The place for the current element is occupied. Leave the element
+ // for the next probe.
+ done = false;
+ }
+ }
+ }
+ }
+}
+
+
+template<typename Shape, typename Key>
+MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n,
+ Key key,
+ PretenureFlag pretenure) {
int capacity = Capacity();
int nof = NumberOfElements() + n;
int nod = NumberOfDeletedElements();
@@ -11479,13 +14122,14 @@ MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
}
const int kMinCapacityForPretenure = 256;
- bool pretenure =
- (capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this);
+ bool should_pretenure = pretenure == TENURED ||
+ ((capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this));
Object* obj;
{ MaybeObject* maybe_obj =
- Allocate(nof * 2,
+ Allocate(GetHeap(),
+ nof * 2,
USE_DEFAULT_MINIMUM_CAPACITY,
- pretenure ? TENURED : NOT_TENURED);
+ should_pretenure ? TENURED : NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -11514,7 +14158,8 @@ MaybeObject* HashTable<Shape, Key>::Shrink(Key key) {
!GetHeap()->InNewSpace(this);
Object* obj;
{ MaybeObject* maybe_obj =
- Allocate(at_least_room_for,
+ Allocate(GetHeap(),
+ at_least_room_for,
USE_DEFAULT_MINIMUM_CAPACITY,
pretenure ? TENURED : NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -11538,10 +14183,11 @@ uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
return entry;
}
+
// Force instantiation of template instances class.
// Please note this list is compiler dependent.
-template class HashTable<SymbolTableShape, HashTableKey*>;
+template class HashTable<StringTableShape, HashTableKey*>;
template class HashTable<CompilationCacheShape, HashTableKey*>;
@@ -11551,20 +14197,22 @@ template class HashTable<ObjectHashTableShape<1>, Object*>;
template class HashTable<ObjectHashTableShape<2>, Object*>;
-template class Dictionary<StringDictionaryShape, String*>;
+template class HashTable<WeakHashTableShape<2>, Object*>;
+
+template class Dictionary<NameDictionaryShape, Name*>;
template class Dictionary<SeededNumberDictionaryShape, uint32_t>;
template class Dictionary<UnseededNumberDictionaryShape, uint32_t>;
template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
- Allocate(int at_least_space_for);
+ Allocate(Heap* heap, int at_least_space_for, PretenureFlag pretenure);
template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
- Allocate(int at_least_space_for);
+ Allocate(Heap* heap, int at_least_space_for, PretenureFlag pretenure);
-template MaybeObject* Dictionary<StringDictionaryShape, String*>::Allocate(
- int);
+template MaybeObject* Dictionary<NameDictionaryShape, Name*>::
+ Allocate(Heap* heap, int n, PretenureFlag pretenure);
template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::AtPut(
uint32_t, Object*);
@@ -11578,7 +14226,7 @@ template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>::
template Object* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
SlowReverseLookup(Object* value);
-template Object* Dictionary<StringDictionaryShape, String*>::SlowReverseLookup(
+template Object* Dictionary<NameDictionaryShape, Name*>::SlowReverseLookup(
Object*);
template void Dictionary<SeededNumberDictionaryShape, uint32_t>::CopyKeysTo(
@@ -11586,32 +14234,32 @@ template void Dictionary<SeededNumberDictionaryShape, uint32_t>::CopyKeysTo(
PropertyAttributes,
Dictionary<SeededNumberDictionaryShape, uint32_t>::SortMode);
-template Object* Dictionary<StringDictionaryShape, String*>::DeleteProperty(
+template Object* Dictionary<NameDictionaryShape, Name*>::DeleteProperty(
int, JSObject::DeleteMode);
template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>::
DeleteProperty(int, JSObject::DeleteMode);
-template MaybeObject* Dictionary<StringDictionaryShape, String*>::Shrink(
- String*);
+template MaybeObject* Dictionary<NameDictionaryShape, Name*>::Shrink(Name* n);
template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::Shrink(
uint32_t);
-template void Dictionary<StringDictionaryShape, String*>::CopyKeysTo(
+template void Dictionary<NameDictionaryShape, Name*>::CopyKeysTo(
FixedArray*,
int,
- Dictionary<StringDictionaryShape, String*>::SortMode);
+ PropertyAttributes,
+ Dictionary<NameDictionaryShape, Name*>::SortMode);
template int
-Dictionary<StringDictionaryShape, String*>::NumberOfElementsFilterAttributes(
+Dictionary<NameDictionaryShape, Name*>::NumberOfElementsFilterAttributes(
PropertyAttributes);
-template MaybeObject* Dictionary<StringDictionaryShape, String*>::Add(
- String*, Object*, PropertyDetails);
+template MaybeObject* Dictionary<NameDictionaryShape, Name*>::Add(
+ Name*, Object*, PropertyDetails);
template MaybeObject*
-Dictionary<StringDictionaryShape, String*>::GenerateNewEnumerationIndices();
+Dictionary<NameDictionaryShape, Name*>::GenerateNewEnumerationIndices();
template int
Dictionary<SeededNumberDictionaryShape, uint32_t>::
@@ -11629,8 +14277,8 @@ template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
EnsureCapacity(int, uint32_t);
-template MaybeObject* Dictionary<StringDictionaryShape, String*>::
- EnsureCapacity(int, String*);
+template MaybeObject* Dictionary<NameDictionaryShape, Name*>::
+ EnsureCapacity(int, Name*);
template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
AddEntry(uint32_t, Object*, PropertyDetails, uint32_t);
@@ -11638,19 +14286,27 @@ template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
AddEntry(uint32_t, Object*, PropertyDetails, uint32_t);
-template MaybeObject* Dictionary<StringDictionaryShape, String*>::AddEntry(
- String*, Object*, PropertyDetails, uint32_t);
+template MaybeObject* Dictionary<NameDictionaryShape, Name*>::AddEntry(
+ Name*, Object*, PropertyDetails, uint32_t);
template
int Dictionary<SeededNumberDictionaryShape, uint32_t>::NumberOfEnumElements();
template
-int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements();
+int Dictionary<NameDictionaryShape, Name*>::NumberOfEnumElements();
template
int HashTable<SeededNumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
+Handle<Object> JSObject::PrepareSlowElementsForSort(
+ Handle<JSObject> object, uint32_t limit) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->PrepareSlowElementsForSort(limit),
+ Object);
+}
+
+
// Collates undefined and unexisting elements below limit from position
// zero of the elements. The object stays in Dictionary mode.
MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
@@ -11671,12 +14327,12 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
Object* obj;
{ MaybeObject* maybe_obj =
- SeededNumberDictionary::Allocate(dict->NumberOfElements());
+ SeededNumberDictionary::Allocate(GetHeap(), dict->NumberOfElements());
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
SeededNumberDictionary* new_dict = SeededNumberDictionary::cast(obj);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_alloc;
uint32_t pos = 0;
uint32_t undefs = 0;
@@ -11690,8 +14346,9 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32);
Object* value = dict->ValueAt(i);
PropertyDetails details = dict->DetailsAt(i);
- if (details.type() == CALLBACKS) {
+ if (details.type() == CALLBACKS || details.IsReadOnly()) {
// Bail out and do the sorting of undefineds and array holes in JS.
+ // Also bail out if the element is not supposed to be moved.
return Smi::FromInt(-1);
}
uint32_t key = NumberToUint32(k);
@@ -11722,7 +14379,7 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
}
uint32_t result = pos;
- PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
+ PropertyDetails no_details = PropertyDetails(NONE, NORMAL, 0);
Heap* heap = GetHeap();
while (undefs > 0) {
if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
@@ -11752,73 +14409,57 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
// the start of the elements array.
// If the object is in dictionary mode, it is converted to fast elements
// mode.
-MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
- Heap* heap = GetHeap();
+Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
+ uint32_t limit) {
+ Isolate* isolate = object->GetIsolate();
- if (HasDictionaryElements()) {
+ ASSERT(!object->map()->is_observed());
+ if (object->HasDictionaryElements()) {
// Convert to fast elements containing only the existing properties.
// Ordering is irrelevant, since we are going to sort anyway.
- SeededNumberDictionary* dict = element_dictionary();
- if (IsJSArray() || dict->requires_slow_elements() ||
+ Handle<SeededNumberDictionary> dict(object->element_dictionary());
+ if (object->IsJSArray() || dict->requires_slow_elements() ||
dict->max_number_key() >= limit) {
- return PrepareSlowElementsForSort(limit);
+ return JSObject::PrepareSlowElementsForSort(object, limit);
}
// Convert to fast elements.
- Object* obj;
- MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
- FAST_HOLEY_ELEMENTS);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- Map* new_map = Map::cast(obj);
+ Handle<Map> new_map =
+ JSObject::GetElementsTransitionMap(object, FAST_HOLEY_ELEMENTS);
- PretenureFlag tenure = heap->InNewSpace(this) ? NOT_TENURED: TENURED;
- Object* new_array;
- { MaybeObject* maybe_new_array =
- heap->AllocateFixedArray(dict->NumberOfElements(), tenure);
- if (!maybe_new_array->ToObject(&new_array)) return maybe_new_array;
- }
- FixedArray* fast_elements = FixedArray::cast(new_array);
- dict->CopyValuesTo(fast_elements);
- ValidateElements();
+ PretenureFlag tenure = isolate->heap()->InNewSpace(*object) ?
+ NOT_TENURED: TENURED;
+ Handle<FixedArray> fast_elements =
+ isolate->factory()->NewFixedArray(dict->NumberOfElements(), tenure);
+ dict->CopyValuesTo(*fast_elements);
+ object->ValidateElements();
- set_map_and_elements(new_map, fast_elements);
- } else if (HasExternalArrayElements()) {
+ object->set_map_and_elements(*new_map, *fast_elements);
+ } else if (object->HasExternalArrayElements()) {
// External arrays cannot have holes or undefined elements.
- return Smi::FromInt(ExternalArray::cast(elements())->length());
- } else if (!HasFastDoubleElements()) {
- Object* obj;
- { MaybeObject* maybe_obj = EnsureWritableFastElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ return handle(Smi::FromInt(
+ ExternalArray::cast(object->elements())->length()), isolate);
+ } else if (!object->HasFastDoubleElements()) {
+ JSObject::EnsureWritableFastElements(object);
}
- ASSERT(HasFastSmiOrObjectElements() || HasFastDoubleElements());
+ ASSERT(object->HasFastSmiOrObjectElements() ||
+ object->HasFastDoubleElements());
// Collect holes at the end, undefined before that and the rest at the
// start, and return the number of non-hole, non-undefined values.
- FixedArrayBase* elements_base = FixedArrayBase::cast(this->elements());
+ Handle<FixedArrayBase> elements_base(object->elements());
uint32_t elements_length = static_cast<uint32_t>(elements_base->length());
if (limit > elements_length) {
limit = elements_length ;
}
if (limit == 0) {
- return Smi::FromInt(0);
- }
-
- HeapNumber* result_double = NULL;
- if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Pessimistically allocate space for return value before
- // we start mutating the array.
- Object* new_double;
- { MaybeObject* maybe_new_double = heap->AllocateHeapNumber(0.0);
- if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
- }
- result_double = HeapNumber::cast(new_double);
+ return handle(Smi::FromInt(0), isolate);
}
uint32_t result = 0;
- if (elements_base->map() == heap->fixed_double_array_map()) {
- FixedDoubleArray* elements = FixedDoubleArray::cast(elements_base);
+ if (elements_base->map() == isolate->heap()->fixed_double_array_map()) {
+ FixedDoubleArray* elements = FixedDoubleArray::cast(*elements_base);
// Split elements into defined and the_hole, in that order.
unsigned int holes = limit;
// Assume most arrays contain no holes and undefined values, so minimize the
@@ -11845,12 +14486,12 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
holes++;
}
} else {
- FixedArray* elements = FixedArray::cast(elements_base);
- AssertNoAllocation no_alloc;
+ FixedArray* elements = FixedArray::cast(*elements_base);
+ DisallowHeapAllocation no_gc;
// Split elements into defined, undefined and the_hole, in that order. Only
// count locations for undefined and the hole, and fill them afterwards.
- WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_alloc);
+ WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_gc);
unsigned int undefs = limit;
unsigned int holes = limit;
// Assume most arrays contain no holes and undefined values, so minimize the
@@ -11890,12 +14531,60 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
}
}
- if (result <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Smi::FromInt(static_cast<int>(result));
+ return isolate->factory()->NewNumberFromUint(result);
+}
+
+
+ExternalArrayType JSTypedArray::type() {
+ switch (elements()->map()->instance_type()) {
+ case EXTERNAL_BYTE_ARRAY_TYPE:
+ return kExternalByteArray;
+ case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+ return kExternalUnsignedByteArray;
+ case EXTERNAL_SHORT_ARRAY_TYPE:
+ return kExternalShortArray;
+ case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+ return kExternalUnsignedShortArray;
+ case EXTERNAL_INT_ARRAY_TYPE:
+ return kExternalIntArray;
+ case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+ return kExternalUnsignedIntArray;
+ case EXTERNAL_FLOAT_ARRAY_TYPE:
+ return kExternalFloatArray;
+ case EXTERNAL_DOUBLE_ARRAY_TYPE:
+ return kExternalDoubleArray;
+ case EXTERNAL_PIXEL_ARRAY_TYPE:
+ return kExternalPixelArray;
+ default:
+ return static_cast<ExternalArrayType>(-1);
+ }
+}
+
+
+size_t JSTypedArray::element_size() {
+ switch (elements()->map()->instance_type()) {
+ case EXTERNAL_BYTE_ARRAY_TYPE:
+ return 1;
+ case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+ return 1;
+ case EXTERNAL_SHORT_ARRAY_TYPE:
+ return 2;
+ case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+ return 2;
+ case EXTERNAL_INT_ARRAY_TYPE:
+ return 4;
+ case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+ return 4;
+ case EXTERNAL_FLOAT_ARRAY_TYPE:
+ return 4;
+ case EXTERNAL_DOUBLE_ARRAY_TYPE:
+ return 8;
+ case EXTERNAL_PIXEL_ARRAY_TYPE:
+ return 1;
+ default:
+ UNREACHABLE();
+ return 0;
}
- ASSERT_NE(NULL, result_double);
- result_double->set_value(static_cast<double>(result));
- return result_double;
}
@@ -12053,64 +14742,50 @@ MaybeObject* ExternalDoubleArray::SetValue(uint32_t index, Object* value) {
}
-JSGlobalPropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
+PropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
ASSERT(!HasFastProperties());
Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
- return JSGlobalPropertyCell::cast(value);
-}
-
-
-Handle<JSGlobalPropertyCell> GlobalObject::EnsurePropertyCell(
- Handle<GlobalObject> global,
- Handle<String> name) {
- Isolate* isolate = global->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- global->EnsurePropertyCell(*name),
- JSGlobalPropertyCell);
+ return PropertyCell::cast(value);
}
-MaybeObject* GlobalObject::EnsurePropertyCell(String* name) {
- ASSERT(!HasFastProperties());
- int entry = property_dictionary()->FindEntry(name);
- if (entry == StringDictionary::kNotFound) {
- Heap* heap = GetHeap();
- Object* cell;
- { MaybeObject* maybe_cell =
- heap->AllocateJSGlobalPropertyCell(heap->the_hole_value());
- if (!maybe_cell->ToObject(&cell)) return maybe_cell;
- }
- PropertyDetails details(NONE, NORMAL);
+Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
+ Handle<JSGlobalObject> global,
+ Handle<Name> name) {
+ ASSERT(!global->HasFastProperties());
+ int entry = global->property_dictionary()->FindEntry(*name);
+ if (entry == NameDictionary::kNotFound) {
+ Isolate* isolate = global->GetIsolate();
+ Handle<PropertyCell> cell = isolate->factory()->NewPropertyCell(
+ isolate->factory()->the_hole_value());
+ PropertyDetails details(NONE, NORMAL, 0);
details = details.AsDeleted();
- Object* dictionary;
- { MaybeObject* maybe_dictionary =
- property_dictionary()->Add(name, cell, details);
- if (!maybe_dictionary->ToObject(&dictionary)) return maybe_dictionary;
- }
- set_properties(StringDictionary::cast(dictionary));
+ Handle<NameDictionary> dictionary = NameDictionaryAdd(
+ handle(global->property_dictionary()), name, cell, details);
+ global->set_properties(*dictionary);
return cell;
} else {
- Object* value = property_dictionary()->ValueAt(entry);
- ASSERT(value->IsJSGlobalPropertyCell());
- return value;
+ Object* value = global->property_dictionary()->ValueAt(entry);
+ ASSERT(value->IsPropertyCell());
+ return handle(PropertyCell::cast(value));
}
}
-MaybeObject* SymbolTable::LookupString(String* string, Object** s) {
- SymbolKey key(string);
+MaybeObject* StringTable::LookupString(String* string, Object** s) {
+ InternalizedStringKey key(string);
return LookupKey(&key, s);
}
-// This class is used for looking up two character strings in the symbol table.
+// This class is used for looking up two character strings in the string table.
// If we don't have a hit we don't want to waste much time so we unroll the
// string hash calculation loop here for speed. Doesn't work if the two
// characters form a decimal integer, since such strings have a different hash
// algorithm.
class TwoCharHashTableKey : public HashTableKey {
public:
- TwoCharHashTableKey(uint32_t c1, uint32_t c2, uint32_t seed)
+ TwoCharHashTableKey(uint16_t c1, uint16_t c2, uint32_t seed)
: c1_(c1), c2_(c2) {
// Char 1.
uint32_t hash = seed;
@@ -12126,17 +14801,17 @@ class TwoCharHashTableKey : public HashTableKey {
hash ^= hash >> 11;
hash += hash << 15;
if ((hash & String::kHashBitMask) == 0) hash = StringHasher::kZeroHash;
+ hash_ = hash;
#ifdef DEBUG
- StringHasher hasher(2, seed);
- hasher.AddCharacter(c1);
- hasher.AddCharacter(c2);
// If this assert fails then we failed to reproduce the two-character
// version of the string hashing algorithm above. One reason could be
// that we were passed two digits as characters, since the hash
// algorithm is different in that case.
- ASSERT_EQ(static_cast<int>(hasher.GetHash()), static_cast<int>(hash));
+ uint16_t chars[2] = {c1, c2};
+ uint32_t check_hash = StringHasher::HashSequentialString(chars, 2, seed);
+ hash = (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
+ ASSERT_EQ(static_cast<int32_t>(hash), static_cast<int32_t>(check_hash));
#endif
- hash_ = hash;
}
bool IsMatch(Object* o) {
@@ -12153,110 +14828,110 @@ class TwoCharHashTableKey : public HashTableKey {
return String::cast(key)->Hash();
}
- Object* AsObject() {
- // The TwoCharHashTableKey is only used for looking in the symbol
+ Object* AsObject(Heap* heap) {
+ // The TwoCharHashTableKey is only used for looking in the string
// table, not for adding to it.
UNREACHABLE();
return NULL;
}
private:
- uint32_t c1_;
- uint32_t c2_;
+ uint16_t c1_;
+ uint16_t c2_;
uint32_t hash_;
};
-bool SymbolTable::LookupSymbolIfExists(String* string, String** symbol) {
- SymbolKey key(string);
+bool StringTable::LookupStringIfExists(String* string, String** result) {
+ InternalizedStringKey key(string);
int entry = FindEntry(&key);
if (entry == kNotFound) {
return false;
} else {
- String* result = String::cast(KeyAt(entry));
- ASSERT(StringShape(result).IsSymbol());
- *symbol = result;
+ *result = String::cast(KeyAt(entry));
+ ASSERT(StringShape(*result).IsInternalized());
return true;
}
}
-bool SymbolTable::LookupTwoCharsSymbolIfExists(uint32_t c1,
- uint32_t c2,
- String** symbol) {
+bool StringTable::LookupTwoCharsStringIfExists(uint16_t c1,
+ uint16_t c2,
+ String** result) {
TwoCharHashTableKey key(c1, c2, GetHeap()->HashSeed());
int entry = FindEntry(&key);
if (entry == kNotFound) {
return false;
} else {
- String* result = String::cast(KeyAt(entry));
- ASSERT(StringShape(result).IsSymbol());
- *symbol = result;
+ *result = String::cast(KeyAt(entry));
+ ASSERT(StringShape(*result).IsInternalized());
return true;
}
}
-MaybeObject* SymbolTable::LookupSymbol(Vector<const char> str,
- Object** s) {
- Utf8SymbolKey key(str, GetHeap()->HashSeed());
+MaybeObject* StringTable::LookupUtf8String(Vector<const char> str,
+ Object** s) {
+ Utf8StringKey key(str, GetHeap()->HashSeed());
return LookupKey(&key, s);
}
-MaybeObject* SymbolTable::LookupAsciiSymbol(Vector<const char> str,
- Object** s) {
- AsciiSymbolKey key(str, GetHeap()->HashSeed());
+MaybeObject* StringTable::LookupOneByteString(Vector<const uint8_t> str,
+ Object** s) {
+ OneByteStringKey key(str, GetHeap()->HashSeed());
return LookupKey(&key, s);
}
-MaybeObject* SymbolTable::LookupSubStringAsciiSymbol(Handle<SeqAsciiString> str,
- int from,
- int length,
- Object** s) {
- SubStringAsciiSymbolKey key(str, from, length, GetHeap()->HashSeed());
+MaybeObject* StringTable::LookupSubStringOneByteString(
+ Handle<SeqOneByteString> str,
+ int from,
+ int length,
+ Object** s) {
+ SubStringOneByteStringKey key(str, from, length);
return LookupKey(&key, s);
}
-MaybeObject* SymbolTable::LookupTwoByteSymbol(Vector<const uc16> str,
+MaybeObject* StringTable::LookupTwoByteString(Vector<const uc16> str,
Object** s) {
- TwoByteSymbolKey key(str, GetHeap()->HashSeed());
+ TwoByteStringKey key(str, GetHeap()->HashSeed());
return LookupKey(&key, s);
}
-MaybeObject* SymbolTable::LookupKey(HashTableKey* key, Object** s) {
+
+MaybeObject* StringTable::LookupKey(HashTableKey* key, Object** s) {
int entry = FindEntry(key);
- // Symbol already in table.
+ // String already in table.
if (entry != kNotFound) {
*s = KeyAt(entry);
return this;
}
- // Adding new symbol. Grow table if needed.
+ // Adding new string. Grow table if needed.
Object* obj;
{ MaybeObject* maybe_obj = EnsureCapacity(1, key);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- // Create symbol object.
- Object* symbol;
- { MaybeObject* maybe_symbol = key->AsObject();
- if (!maybe_symbol->ToObject(&symbol)) return maybe_symbol;
+ // Create string object.
+ Object* string;
+ { MaybeObject* maybe_string = key->AsObject(GetHeap());
+ if (!maybe_string->ToObject(&string)) return maybe_string;
}
- // If the symbol table grew as part of EnsureCapacity, obj is not
- // the current symbol table and therefore we cannot use
- // SymbolTable::cast here.
- SymbolTable* table = reinterpret_cast<SymbolTable*>(obj);
+ // If the string table grew as part of EnsureCapacity, obj is not
+ // the current string table and therefore we cannot use
+ // StringTable::cast here.
+ StringTable* table = reinterpret_cast<StringTable*>(obj);
- // Add the new symbol and return it along with the symbol table.
+ // Add the new string and return it along with the string table.
entry = table->FindInsertionEntry(key->Hash());
- table->set(EntryToIndex(entry), symbol);
+ table->set(EntryToIndex(entry), string);
table->ElementAdded();
- *s = symbol;
+ *s = string;
return table;
}
@@ -12320,7 +14995,7 @@ MaybeObject* CompilationCacheTable::Put(String* src,
if (!maybe_cache->To(&cache)) return maybe_cache;
Object* k;
- MaybeObject* maybe_k = key.AsObject();
+ MaybeObject* maybe_k = key.AsObject(GetHeap());
if (!maybe_k->To(&k)) return maybe_k;
int entry = cache->FindInsertionEntry(key.Hash());
@@ -12349,7 +15024,7 @@ MaybeObject* CompilationCacheTable::PutEval(String* src,
int entry = cache->FindInsertionEntry(key.Hash());
Object* k;
- { MaybeObject* maybe_k = key.AsObject();
+ { MaybeObject* maybe_k = key.AsObject(GetHeap());
if (!maybe_k->ToObject(&k)) return maybe_k;
}
@@ -12396,42 +15071,42 @@ void CompilationCacheTable::Remove(Object* value) {
}
-// SymbolsKey used for HashTable where key is array of symbols.
-class SymbolsKey : public HashTableKey {
+// StringsKey used for HashTable where key is array of internalized strings.
+class StringsKey : public HashTableKey {
public:
- explicit SymbolsKey(FixedArray* symbols) : symbols_(symbols) { }
+ explicit StringsKey(FixedArray* strings) : strings_(strings) { }
- bool IsMatch(Object* symbols) {
- FixedArray* o = FixedArray::cast(symbols);
- int len = symbols_->length();
+ bool IsMatch(Object* strings) {
+ FixedArray* o = FixedArray::cast(strings);
+ int len = strings_->length();
if (o->length() != len) return false;
for (int i = 0; i < len; i++) {
- if (o->get(i) != symbols_->get(i)) return false;
+ if (o->get(i) != strings_->get(i)) return false;
}
return true;
}
- uint32_t Hash() { return HashForObject(symbols_); }
+ uint32_t Hash() { return HashForObject(strings_); }
uint32_t HashForObject(Object* obj) {
- FixedArray* symbols = FixedArray::cast(obj);
- int len = symbols->length();
+ FixedArray* strings = FixedArray::cast(obj);
+ int len = strings->length();
uint32_t hash = 0;
for (int i = 0; i < len; i++) {
- hash ^= String::cast(symbols->get(i))->Hash();
+ hash ^= String::cast(strings->get(i))->Hash();
}
return hash;
}
- Object* AsObject() { return symbols_; }
+ Object* AsObject(Heap* heap) { return strings_; }
private:
- FixedArray* symbols_;
+ FixedArray* strings_;
};
Object* MapCache::Lookup(FixedArray* array) {
- SymbolsKey key(array);
+ StringsKey key(array);
int entry = FindEntry(&key);
if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
@@ -12439,7 +15114,7 @@ Object* MapCache::Lookup(FixedArray* array) {
MaybeObject* MapCache::Put(FixedArray* array, Map* value) {
- SymbolsKey key(array);
+ StringsKey key(array);
Object* obj;
{ MaybeObject* maybe_obj = EnsureCapacity(1, &key);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -12455,10 +15130,16 @@ MaybeObject* MapCache::Put(FixedArray* array, Map* value) {
template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::Allocate(int at_least_space_for) {
+MaybeObject* Dictionary<Shape, Key>::Allocate(Heap* heap,
+ int at_least_space_for,
+ PretenureFlag pretenure) {
Object* obj;
{ MaybeObject* maybe_obj =
- HashTable<Shape, Key>::Allocate(at_least_space_for);
+ HashTable<Shape, Key>::Allocate(
+ heap,
+ at_least_space_for,
+ HashTable<Shape, Key>::USE_DEFAULT_MINIMUM_CAPACITY,
+ pretenure);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
// Initialize the next enumeration index.
@@ -12468,8 +15149,8 @@ MaybeObject* Dictionary<Shape, Key>::Allocate(int at_least_space_for) {
}
-void StringDictionary::DoGenerateNewEnumerationIndices(
- Handle<StringDictionary> dictionary) {
+void NameDictionary::DoGenerateNewEnumerationIndices(
+ Handle<NameDictionary> dictionary) {
CALL_HEAP_FUNCTION_VOID(dictionary->GetIsolate(),
dictionary->GenerateNewEnumerationIndices());
}
@@ -12522,8 +15203,8 @@ MaybeObject* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
int enum_index = Smi::cast(enumeration_order->get(pos++))->value();
PropertyDetails details = DetailsAt(i);
- PropertyDetails new_details =
- PropertyDetails(details.attributes(), details.type(), enum_index);
+ PropertyDetails new_details = PropertyDetails(
+ details.attributes(), details.type(), enum_index);
DetailsAtPut(i, new_details);
}
}
@@ -12586,10 +15267,10 @@ MaybeObject* Dictionary<Shape, Key>::AtPut(Key key, Object* value) {
}
Object* k;
- { MaybeObject* maybe_k = Shape::AsObject(key);
+ { MaybeObject* maybe_k = Shape::AsObject(this->GetHeap(), key);
if (!maybe_k->ToObject(&k)) return maybe_k;
}
- PropertyDetails details = PropertyDetails(NONE, NORMAL);
+ PropertyDetails details = PropertyDetails(NONE, NORMAL, 0);
return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
Dictionary<Shape, Key>::Hash(key));
@@ -12600,8 +15281,6 @@ template<typename Shape, typename Key>
MaybeObject* Dictionary<Shape, Key>::Add(Key key,
Object* value,
PropertyDetails details) {
- ASSERT(details.dictionary_index() == details.descriptor_index());
-
// Valdate key is absent.
SLOW_ASSERT((this->FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
// Check whether the dictionary should be extended.
@@ -12623,7 +15302,7 @@ MaybeObject* Dictionary<Shape, Key>::AddEntry(Key key,
uint32_t hash) {
// Compute the key object.
Object* k;
- { MaybeObject* maybe_k = Shape::AsObject(key);
+ { MaybeObject* maybe_k = Shape::AsObject(this->GetHeap(), key);
if (!maybe_k->ToObject(&k)) return maybe_k;
}
@@ -12639,8 +15318,8 @@ MaybeObject* Dictionary<Shape, Key>::AddEntry(Key key,
SetNextEnumerationIndex(index + 1);
}
SetEntry(entry, k, value, details);
- ASSERT((Dictionary<Shape, Key>::KeyAt(entry)->IsNumber()
- || Dictionary<Shape, Key>::KeyAt(entry)->IsString()));
+ ASSERT((Dictionary<Shape, Key>::KeyAt(entry)->IsNumber() ||
+ Dictionary<Shape, Key>::KeyAt(entry)->IsName()));
HashTable<Shape, Key>::ElementAdded();
return this;
}
@@ -12677,7 +15356,7 @@ MaybeObject* SeededNumberDictionary::AddNumberEntry(uint32_t key,
MaybeObject* UnseededNumberDictionary::AddNumberEntry(uint32_t key,
Object* value) {
SLOW_ASSERT(this->FindEntry(key) == kNotFound);
- return Add(key, value, PropertyDetails(NONE, NORMAL));
+ return Add(key, value, PropertyDetails(NONE, NORMAL, 0));
}
@@ -12723,7 +15402,8 @@ MaybeObject* SeededNumberDictionary::Set(uint32_t key,
details = PropertyDetails(details.attributes(),
details.type(),
DetailsAt(entry).dictionary_index());
- MaybeObject* maybe_object_key = SeededNumberDictionaryShape::AsObject(key);
+ MaybeObject* maybe_object_key =
+ SeededNumberDictionaryShape::AsObject(GetHeap(), key);
Object* object_key;
if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
SetEntry(entry, object_key, value, details);
@@ -12735,7 +15415,8 @@ MaybeObject* UnseededNumberDictionary::Set(uint32_t key,
Object* value) {
int entry = FindEntry(key);
if (entry == kNotFound) return AddNumberEntry(key, value);
- MaybeObject* maybe_object_key = UnseededNumberDictionaryShape::AsObject(key);
+ MaybeObject* maybe_object_key =
+ UnseededNumberDictionaryShape::AsObject(GetHeap(), key);
Object* object_key;
if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
SetEntry(entry, object_key, value);
@@ -12751,7 +15432,8 @@ int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
int result = 0;
for (int i = 0; i < capacity; i++) {
Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
+ if (HashTable<Shape, Key>::IsKey(k) &&
+ ((filter & SYMBOLIC) == 0 || !k->IsSymbol())) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted()) continue;
PropertyAttributes attr = details.attributes();
@@ -12793,7 +15475,7 @@ void Dictionary<Shape, Key>::CopyKeysTo(
}
-FixedArray* StringDictionary::CopyEnumKeysTo(FixedArray* storage) {
+FixedArray* NameDictionary::CopyEnumKeysTo(FixedArray* storage) {
int length = storage->length();
ASSERT(length >= NumberOfEnumElements());
Heap* heap = GetHeap();
@@ -12806,7 +15488,7 @@ FixedArray* StringDictionary::CopyEnumKeysTo(FixedArray* storage) {
// that are deleted or not enumerable.
for (int i = 0; i < capacity; i++) {
Object* k = KeyAt(i);
- if (IsKey(k)) {
+ if (IsKey(k) && !k->IsSymbol()) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted() || details.IsDontEnum()) continue;
properties++;
@@ -12839,6 +15521,7 @@ template<typename Shape, typename Key>
void Dictionary<Shape, Key>::CopyKeysTo(
FixedArray* storage,
int index,
+ PropertyAttributes filter,
typename Dictionary<Shape, Key>::SortMode sort_mode) {
ASSERT(storage->length() >= NumberOfElementsFilterAttributes(
static_cast<PropertyAttributes>(NONE)));
@@ -12848,7 +15531,8 @@ void Dictionary<Shape, Key>::CopyKeysTo(
if (HashTable<Shape, Key>::IsKey(k)) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted()) continue;
- storage->set(index++, k);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & filter) == 0) storage->set(index++, k);
}
}
if (sort_mode == Dictionary<Shape, Key>::SORTED) {
@@ -12866,8 +15550,8 @@ Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) {
Object* k = HashTable<Shape, Key>::KeyAt(i);
if (Dictionary<Shape, Key>::IsKey(k)) {
Object* e = ValueAt(i);
- if (e->IsJSGlobalPropertyCell()) {
- e = JSGlobalPropertyCell::cast(e)->value();
+ if (e->IsPropertyCell()) {
+ e = PropertyCell::cast(e)->value();
}
if (e == value) return k;
}
@@ -12877,7 +15561,7 @@ Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) {
}
-MaybeObject* StringDictionary::TransformPropertiesToFastFor(
+MaybeObject* NameDictionary::TransformPropertiesToFastFor(
JSObject* obj, int unused_property_fields) {
// Make sure we preserve dictionary representation if there are too many
// descriptors.
@@ -12903,8 +15587,7 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
PropertyType type = DetailsAt(i).type();
ASSERT(type != FIELD);
instance_descriptor_length++;
- if (type == NORMAL &&
- (!value->IsJSFunction() || heap->InNewSpace(value))) {
+ if (type == NORMAL && !value->IsJSFunction()) {
number_of_fields += 1;
}
}
@@ -12932,7 +15615,7 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
// Allocate the instance descriptor.
DescriptorArray* descriptors;
MaybeObject* maybe_descriptors =
- DescriptorArray::Allocate(instance_descriptor_length);
+ DescriptorArray::Allocate(GetIsolate(), instance_descriptor_length);
if (!maybe_descriptors->To(&descriptors)) {
return maybe_descriptors;
}
@@ -12959,21 +15642,22 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
Object* k = KeyAt(i);
if (IsKey(k)) {
Object* value = ValueAt(i);
- // Ensure the key is a symbol before writing into the instance descriptor.
- String* key;
- MaybeObject* maybe_key = heap->LookupSymbol(String::cast(k));
- if (!maybe_key->To(&key)) return maybe_key;
+ Name* key;
+ if (k->IsSymbol()) {
+ key = Symbol::cast(k);
+ } else {
+ // Ensure the key is a unique name before writing into the
+ // instance descriptor.
+ MaybeObject* maybe_key = heap->InternalizeString(String::cast(k));
+ if (!maybe_key->To(&key)) return maybe_key;
+ }
PropertyDetails details = DetailsAt(i);
- ASSERT(details.descriptor_index() == details.dictionary_index());
- int enumeration_index = details.descriptor_index();
+ int enumeration_index = details.dictionary_index();
PropertyType type = details.type();
- if (value->IsJSFunction() && !heap->InNewSpace(value)) {
- ConstantFunctionDescriptor d(key,
- JSFunction::cast(value),
- details.attributes(),
- enumeration_index);
+ if (value->IsJSFunction()) {
+ ConstantDescriptor d(key, value, details.attributes());
descriptors->Set(enumeration_index - 1, &d, witness);
} else if (type == NORMAL) {
if (current_offset < inobject_props) {
@@ -12987,13 +15671,13 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
FieldDescriptor d(key,
current_offset++,
details.attributes(),
- enumeration_index);
+ // TODO(verwaest): value->OptimalRepresentation();
+ Representation::Tagged());
descriptors->Set(enumeration_index - 1, &d, witness);
} else if (type == CALLBACKS) {
CallbacksDescriptor d(key,
value,
- details.attributes(),
- enumeration_index);
+ details.attributes());
descriptors->Set(enumeration_index - 1, &d, witness);
} else {
UNREACHABLE();
@@ -13038,6 +15722,7 @@ MaybeObject* ObjectHashSet::Add(Object* key) {
int hash;
{ MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
if (maybe_hash->IsFailure()) return maybe_hash;
+ ASSERT(key->GetHash(OMIT_CREATION) == maybe_hash);
hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
}
int entry = FindEntry(key);
@@ -13099,6 +15784,7 @@ MaybeObject* ObjectHashTable::Put(Object* key, Object* value) {
int hash;
{ MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
if (maybe_hash->IsFailure()) return maybe_hash;
+ ASSERT(key->GetHash(OMIT_CREATION) == maybe_hash);
hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
}
int entry = FindEntry(key);
@@ -13141,6 +15827,93 @@ void ObjectHashTable::RemoveEntry(int entry) {
}
+Object* WeakHashTable::Lookup(Object* key) {
+ ASSERT(IsKey(key));
+ int entry = FindEntry(key);
+ if (entry == kNotFound) return GetHeap()->the_hole_value();
+ return get(EntryToValueIndex(entry));
+}
+
+
+MaybeObject* WeakHashTable::Put(Object* key, Object* value) {
+ ASSERT(IsKey(key));
+ int entry = FindEntry(key);
+ // Key is already in table, just overwrite value.
+ if (entry != kNotFound) {
+ set(EntryToValueIndex(entry), value);
+ return this;
+ }
+
+ // Check whether the hash table should be extended.
+ Object* obj;
+ { MaybeObject* maybe_obj = EnsureCapacity(1, key, TENURED);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ WeakHashTable* table = WeakHashTable::cast(obj);
+ table->AddEntry(table->FindInsertionEntry(Hash(key)), key, value);
+ return table;
+}
+
+
+void WeakHashTable::AddEntry(int entry, Object* key, Object* value) {
+ set(EntryToIndex(entry), key);
+ set(EntryToValueIndex(entry), value);
+ ElementAdded();
+}
+
+
+DeclaredAccessorDescriptorIterator::DeclaredAccessorDescriptorIterator(
+ DeclaredAccessorDescriptor* descriptor)
+ : array_(descriptor->serialized_data()->GetDataStartAddress()),
+ length_(descriptor->serialized_data()->length()),
+ offset_(0) {
+}
+
+
+const DeclaredAccessorDescriptorData*
+ DeclaredAccessorDescriptorIterator::Next() {
+ ASSERT(offset_ < length_);
+ uint8_t* ptr = &array_[offset_];
+ ASSERT(reinterpret_cast<uintptr_t>(ptr) % sizeof(uintptr_t) == 0);
+ const DeclaredAccessorDescriptorData* data =
+ reinterpret_cast<const DeclaredAccessorDescriptorData*>(ptr);
+ offset_ += sizeof(*data);
+ ASSERT(offset_ <= length_);
+ return data;
+}
+
+
+Handle<DeclaredAccessorDescriptor> DeclaredAccessorDescriptor::Create(
+ Isolate* isolate,
+ const DeclaredAccessorDescriptorData& descriptor,
+ Handle<DeclaredAccessorDescriptor> previous) {
+ int previous_length =
+ previous.is_null() ? 0 : previous->serialized_data()->length();
+ int length = sizeof(descriptor) + previous_length;
+ Handle<ByteArray> serialized_descriptor =
+ isolate->factory()->NewByteArray(length);
+ Handle<DeclaredAccessorDescriptor> value =
+ isolate->factory()->NewDeclaredAccessorDescriptor();
+ value->set_serialized_data(*serialized_descriptor);
+ // Copy in the data.
+ {
+ DisallowHeapAllocation no_allocation;
+ uint8_t* array = serialized_descriptor->GetDataStartAddress();
+ if (previous_length != 0) {
+ uint8_t* previous_array =
+ previous->serialized_data()->GetDataStartAddress();
+ OS::MemCopy(array, previous_array, previous_length);
+ array += previous_length;
+ }
+ ASSERT(reinterpret_cast<uintptr_t>(array) % sizeof(uintptr_t) == 0);
+ DeclaredAccessorDescriptorData* data =
+ reinterpret_cast<DeclaredAccessorDescriptorData*>(array);
+ *data = descriptor;
+ }
+ return value;
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check if there is a break point at this code position.
bool DebugInfo::HasBreakPoint(int code_position) {
@@ -13169,7 +15942,8 @@ Object* DebugInfo::GetBreakPointInfo(int code_position) {
void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
int code_position,
Handle<Object> break_point_object) {
- Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position));
+ Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position),
+ debug_info->GetIsolate());
if (break_point_info->IsUndefined()) return;
BreakPointInfo::ClearBreakPoint(
Handle<BreakPointInfo>::cast(break_point_info),
@@ -13182,8 +15956,9 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
int source_position,
int statement_position,
Handle<Object> break_point_object) {
- Isolate* isolate = Isolate::Current();
- Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position));
+ Isolate* isolate = debug_info->GetIsolate();
+ Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position),
+ isolate);
if (!break_point_info->IsUndefined()) {
BreakPointInfo::SetBreakPoint(
Handle<BreakPointInfo>::cast(break_point_info),
@@ -13295,7 +16070,7 @@ int DebugInfo::GetBreakPointInfoIndex(int code_position) {
// Remove the specified break point object.
void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
Handle<Object> break_point_object) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = break_point_info->GetIsolate();
// If there are no break points just ignore.
if (break_point_info->break_point_objects()->IsUndefined()) return;
// If there is a single break point clear it if it is the same.
@@ -13330,6 +16105,8 @@ void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
// Add the specified break point object.
void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
Handle<Object> break_point_object) {
+ Isolate* isolate = break_point_info->GetIsolate();
+
// If there was no break point objects before just set it.
if (break_point_info->break_point_objects()->IsUndefined()) {
break_point_info->set_break_point_objects(*break_point_object);
@@ -13339,7 +16116,7 @@ void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
if (break_point_info->break_point_objects() == *break_point_object) return;
// If there was one break point object before replace with array.
if (!break_point_info->break_point_objects()->IsFixedArray()) {
- Handle<FixedArray> array = FACTORY->NewFixedArray(2);
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(2);
array->set(0, break_point_info->break_point_objects());
array->set(1, *break_point_object);
break_point_info->set_break_point_objects(*array);
@@ -13350,7 +16127,7 @@ void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
Handle<FixedArray>(
FixedArray::cast(break_point_info->break_point_objects()));
Handle<FixedArray> new_array =
- FACTORY->NewFixedArray(old_array->length() + 1);
+ isolate->factory()->NewFixedArray(old_array->length() + 1);
for (int i = 0; i < old_array->length(); i++) {
// If the break point was there before just ignore.
if (old_array->get(i) == *break_point_object) return;
@@ -13430,7 +16207,7 @@ Object* JSDate::DoGetField(FieldIndex index) {
}
double time = value()->Number();
- if (isnan(time)) return GetIsolate()->heap()->nan_value();
+ if (std::isnan(time)) return GetIsolate()->heap()->nan_value();
int64_t local_time_ms = date_cache->ToLocal(static_cast<int64_t>(time));
int days = DateCache::DaysFromTime(local_time_ms);
@@ -13449,7 +16226,7 @@ Object* JSDate::GetUTCField(FieldIndex index,
DateCache* date_cache) {
ASSERT(index >= kFirstUTCField);
- if (isnan(value)) return GetIsolate()->heap()->nan_value();
+ if (std::isnan(value)) return GetIsolate()->heap()->nan_value();
int64_t time_ms = static_cast<int64_t>(value);
@@ -13523,4 +16300,106 @@ void JSDate::SetLocalFields(int64_t local_time_ms, DateCache* date_cache) {
set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER);
}
+
+void JSArrayBuffer::Neuter() {
+ ASSERT(is_external());
+ set_backing_store(NULL);
+ set_byte_length(Smi::FromInt(0));
+}
+
+
+void JSArrayBufferView::NeuterView() {
+ set_byte_offset(Smi::FromInt(0));
+ set_byte_length(Smi::FromInt(0));
+}
+
+
+void JSDataView::Neuter() {
+ NeuterView();
+}
+
+
+void JSTypedArray::Neuter() {
+ NeuterView();
+ set_length(Smi::FromInt(0));
+ set_elements(GetHeap()->EmptyExternalArrayForMap(map()));
+}
+
+
+Type* PropertyCell::type() {
+ return static_cast<Type*>(type_raw());
+}
+
+
+void PropertyCell::set_type(Type* type, WriteBarrierMode ignored) {
+ ASSERT(IsPropertyCell());
+ set_type_raw(type, ignored);
+}
+
+
+Handle<Type> PropertyCell::UpdatedType(Handle<PropertyCell> cell,
+ Handle<Object> value) {
+ Isolate* isolate = cell->GetIsolate();
+ Handle<Type> old_type(cell->type(), isolate);
+ // TODO(2803): Do not track ConsString as constant because they cannot be
+ // embedded into code.
+ Handle<Type> new_type(value->IsConsString() || value->IsTheHole()
+ ? Type::Any()
+ : Type::Constant(value, isolate), isolate);
+
+ if (new_type->Is(old_type)) {
+ return old_type;
+ }
+
+ cell->dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kPropertyCellChangedGroup);
+
+ if (old_type->Is(Type::None()) || old_type->Is(Type::Undefined())) {
+ return new_type;
+ }
+
+ return handle(Type::Any(), isolate);
+}
+
+
+void PropertyCell::SetValueInferType(Handle<PropertyCell> cell,
+ Handle<Object> value) {
+ cell->set_value(*value);
+ if (!Type::Any()->Is(cell->type())) {
+ Handle<Type> new_type = UpdatedType(cell, value);
+ cell->set_type(*new_type);
+ }
+}
+
+
+void PropertyCell::AddDependentCompilationInfo(CompilationInfo* info) {
+ Handle<DependentCode> dep(dependent_code());
+ Handle<DependentCode> codes =
+ DependentCode::Insert(dep, DependentCode::kPropertyCellChangedGroup,
+ info->object_wrapper());
+ if (*codes != dependent_code()) set_dependent_code(*codes);
+ info->dependencies(DependentCode::kPropertyCellChangedGroup)->Add(
+ Handle<HeapObject>(this), info->zone());
+}
+
+
+void PropertyCell::AddDependentCode(Handle<Code> code) {
+ Handle<DependentCode> codes = DependentCode::Insert(
+ Handle<DependentCode>(dependent_code()),
+ DependentCode::kPropertyCellChangedGroup, code);
+ if (*codes != dependent_code()) set_dependent_code(*codes);
+}
+
+
+const char* GetBailoutReason(BailoutReason reason) {
+ ASSERT(reason < kLastErrorMessage);
+#define ERROR_MESSAGES_TEXTS(C, T) T,
+ static const char* error_messages_[] = {
+ ERROR_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)
+ };
+#undef ERROR_MESSAGES_TEXTS
+ return error_messages_[reason];
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 47d775781..8331a7688 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -29,8 +29,10 @@
#define V8_OBJECTS_H_
#include "allocation.h"
+#include "assert-scope.h"
#include "builtins.h"
#include "elements-kind.h"
+#include "flags.h"
#include "list.h"
#include "property-details.h"
#include "smart-pointers.h"
@@ -56,11 +58,18 @@
// - JSReceiver (suitable for property access)
// - JSObject
// - JSArray
+// - JSArrayBuffer
+// - JSArrayBufferView
+// - JSTypedArray
+// - JSDataView
// - JSSet
// - JSMap
-// - JSWeakMap
+// - JSWeakCollection
+// - JSWeakMap
+// - JSWeakSet
// - JSRegExp
// - JSFunction
+// - JSGeneratorObject
// - JSModule
// - GlobalObject
// - JSGlobalObject
@@ -77,7 +86,7 @@
// - DescriptorArray
// - HashTable
// - Dictionary
-// - SymbolTable
+// - StringTable
// - CompilationCacheTable
// - CodeCacheHashTable
// - MapCache
@@ -95,23 +104,39 @@
// - ExternalIntArray
// - ExternalUnsignedIntArray
// - ExternalFloatArray
-// - String
-// - SeqString
-// - SeqAsciiString
-// - SeqTwoByteString
-// - SlicedString
-// - ConsString
-// - ExternalString
-// - ExternalAsciiString
-// - ExternalTwoByteString
+// - Name
+// - String
+// - SeqString
+// - SeqOneByteString
+// - SeqTwoByteString
+// - SlicedString
+// - ConsString
+// - ExternalString
+// - ExternalAsciiString
+// - ExternalTwoByteString
+// - InternalizedString
+// - SeqInternalizedString
+// - SeqOneByteInternalizedString
+// - SeqTwoByteInternalizedString
+// - ConsInternalizedString
+// - ExternalInternalizedString
+// - ExternalAsciiInternalizedString
+// - ExternalTwoByteInternalizedString
+// - Symbol
// - HeapNumber
+// - Cell
+// - PropertyCell
// - Code
// - Map
// - Oddball
// - Foreign
// - SharedFunctionInfo
// - Struct
+// - Box
+// - DeclaredAccessorDescriptor
// - AccessorInfo
+// - DeclaredAccessorInfo
+// - ExecutableAccessorInfo
// - AccessorPair
// - AccessCheckInfo
// - InterceptorInfo
@@ -134,20 +159,86 @@
namespace v8 {
namespace internal {
-enum CompareMapMode {
- REQUIRE_EXACT_MAP,
- ALLOW_ELEMENT_TRANSITION_MAPS
+enum KeyedAccessStoreMode {
+ STANDARD_STORE,
+ STORE_TRANSITION_SMI_TO_OBJECT,
+ STORE_TRANSITION_SMI_TO_DOUBLE,
+ STORE_TRANSITION_DOUBLE_TO_OBJECT,
+ STORE_TRANSITION_HOLEY_SMI_TO_OBJECT,
+ STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE,
+ STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT,
+ STORE_AND_GROW_NO_TRANSITION,
+ STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
+ STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
+ STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT,
+ STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT,
+ STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE,
+ STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT,
+ STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS,
+ STORE_NO_TRANSITION_HANDLE_COW
};
-enum KeyedAccessGrowMode {
- DO_NOT_ALLOW_JSARRAY_GROWTH,
- ALLOW_JSARRAY_GROWTH
-};
+
+static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
+ STANDARD_STORE;
+STATIC_ASSERT(STANDARD_STORE == 0);
+STATIC_ASSERT(kGrowICDelta ==
+ STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT -
+ STORE_TRANSITION_SMI_TO_OBJECT);
+STATIC_ASSERT(kGrowICDelta ==
+ STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE -
+ STORE_TRANSITION_SMI_TO_DOUBLE);
+STATIC_ASSERT(kGrowICDelta ==
+ STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT -
+ STORE_TRANSITION_DOUBLE_TO_OBJECT);
+
+
+static inline KeyedAccessStoreMode GetGrowStoreMode(
+ KeyedAccessStoreMode store_mode) {
+ if (store_mode < STORE_AND_GROW_NO_TRANSITION) {
+ store_mode = static_cast<KeyedAccessStoreMode>(
+ static_cast<int>(store_mode) + kGrowICDelta);
+ }
+ return store_mode;
+}
+
+
+static inline bool IsTransitionStoreMode(KeyedAccessStoreMode store_mode) {
+ return store_mode > STANDARD_STORE &&
+ store_mode <= STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT &&
+ store_mode != STORE_AND_GROW_NO_TRANSITION;
+}
+
+
+static inline KeyedAccessStoreMode GetNonTransitioningStoreMode(
+ KeyedAccessStoreMode store_mode) {
+ if (store_mode >= STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+ return store_mode;
+ }
+ if (store_mode >= STORE_AND_GROW_NO_TRANSITION) {
+ return STORE_AND_GROW_NO_TRANSITION;
+ }
+ return STANDARD_STORE;
+}
+
+
+static inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
+ return store_mode >= STORE_AND_GROW_NO_TRANSITION &&
+ store_mode <= STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
+}
+
// Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER.
enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
+// Indicates whether a value can be loaded as a constant.
+enum StoreMode {
+ ALLOW_AS_CONSTANT,
+ FORCE_FIELD
+};
+
+
// PropertyNormalizationMode is used to specify whether to keep
// inobject properties when normalizing properties of a JSObject.
enum PropertyNormalizationMode {
@@ -178,6 +269,12 @@ enum TransitionFlag {
};
+enum DebugExtraICState {
+ DEBUG_BREAK,
+ DEBUG_PREPARE_STEP_IN
+};
+
+
// Indicates whether the transition is simple: the target map of the transition
// either extends the current map with a new property, or it modifies the
// property that was added last to the current map.
@@ -194,6 +291,18 @@ enum DescriptorFlag {
OWN_DESCRIPTORS
};
+// The GC maintains a bit of information, the MarkingParity, which toggles
+// from odd to even and back every time marking is completed. Incremental
+// marking can visit an object twice during a marking phase, so algorithms that
+// that piggy-back on marking can use the parity to ensure that they only
+// perform an operation on an object once per marking phase: they record the
+// MarkingParity when they visit an object, and only re-visit the object when it
+// is marked again and the MarkingParity changes.
+enum MarkingParity {
+ NO_MARKING_PARITY,
+ ODD_MARKING_PARITY,
+ EVEN_MARKING_PARITY
+};
// Instance size sentinel for objects of variable size.
const int kVariableSizeSentinel = 0;
@@ -213,8 +322,8 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// encoding is mentioned explicitly in the name. Likewise, the default
// representation is considered sequential. It is not mentioned in the
// name. The other representations (e.g. CONS, EXTERNAL) are explicitly
-// mentioned. Finally, the string is either a SYMBOL_TYPE (if it is a
-// symbol) or a STRING_TYPE (if it is not a symbol).
+// mentioned. Finally, the string is either a STRING_TYPE (if it is a normal
+// string) or a INTERNALIZED_STRING_TYPE (if it is a internalized string).
//
// NOTE: The following things are some that depend on the string types having
// instance_types that are less than those of all other types:
@@ -224,34 +333,37 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// NOTE: Everything following JS_VALUE_TYPE is considered a
// JSObject for GC purposes. The first four entries here have typeof
// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-#define INSTANCE_TYPE_LIST_ALL(V) \
- V(SYMBOL_TYPE) \
- V(ASCII_SYMBOL_TYPE) \
- V(CONS_SYMBOL_TYPE) \
- V(CONS_ASCII_SYMBOL_TYPE) \
- V(EXTERNAL_SYMBOL_TYPE) \
- V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE) \
- V(EXTERNAL_ASCII_SYMBOL_TYPE) \
- V(SHORT_EXTERNAL_SYMBOL_TYPE) \
- V(SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE) \
- V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE) \
+#define INSTANCE_TYPE_LIST(V) \
V(STRING_TYPE) \
V(ASCII_STRING_TYPE) \
V(CONS_STRING_TYPE) \
V(CONS_ASCII_STRING_TYPE) \
V(SLICED_STRING_TYPE) \
+ V(SLICED_ASCII_STRING_TYPE) \
V(EXTERNAL_STRING_TYPE) \
- V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \
V(EXTERNAL_ASCII_STRING_TYPE) \
+ V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
V(SHORT_EXTERNAL_STRING_TYPE) \
- V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \
V(SHORT_EXTERNAL_ASCII_STRING_TYPE) \
- V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \
+ V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
\
+ V(INTERNALIZED_STRING_TYPE) \
+ V(ASCII_INTERNALIZED_STRING_TYPE) \
+ V(CONS_INTERNALIZED_STRING_TYPE) \
+ V(CONS_ASCII_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ \
+ V(SYMBOL_TYPE) \
V(MAP_TYPE) \
V(CODE_TYPE) \
V(ODDBALL_TYPE) \
- V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
+ V(CELL_TYPE) \
+ V(PROPERTY_CELL_TYPE) \
\
V(HEAP_NUMBER_TYPE) \
V(FOREIGN_TYPE) \
@@ -267,10 +379,13 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(EXTERNAL_INT_ARRAY_TYPE) \
V(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE) \
V(EXTERNAL_FLOAT_ARRAY_TYPE) \
+ V(EXTERNAL_DOUBLE_ARRAY_TYPE) \
V(EXTERNAL_PIXEL_ARRAY_TYPE) \
V(FILLER_TYPE) \
\
- V(ACCESSOR_INFO_TYPE) \
+ V(DECLARED_ACCESSOR_DESCRIPTOR_TYPE) \
+ V(DECLARED_ACCESSOR_INFO_TYPE) \
+ V(EXECUTABLE_ACCESSOR_INFO_TYPE) \
V(ACCESSOR_PAIR_TYPE) \
V(ACCESS_CHECK_INFO_TYPE) \
V(INTERCEPTOR_INFO_TYPE) \
@@ -279,14 +394,18 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(OBJECT_TEMPLATE_INFO_TYPE) \
V(SIGNATURE_INFO_TYPE) \
V(TYPE_SWITCH_INFO_TYPE) \
+ V(ALLOCATION_MEMENTO_TYPE) \
+ V(ALLOCATION_SITE_TYPE) \
V(SCRIPT_TYPE) \
V(CODE_CACHE_TYPE) \
V(POLYMORPHIC_CODE_CACHE_TYPE) \
V(TYPE_FEEDBACK_INFO_TYPE) \
V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
+ V(BOX_TYPE) \
\
V(FIXED_ARRAY_TYPE) \
V(FIXED_DOUBLE_ARRAY_TYPE) \
+ V(CONSTANT_POOL_ARRAY_TYPE) \
V(SHARED_FUNCTION_INFO_TYPE) \
\
V(JS_MESSAGE_OBJECT_TYPE) \
@@ -295,74 +414,31 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(JS_DATE_TYPE) \
V(JS_OBJECT_TYPE) \
V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
+ V(JS_GENERATOR_OBJECT_TYPE) \
V(JS_MODULE_TYPE) \
V(JS_GLOBAL_OBJECT_TYPE) \
V(JS_BUILTINS_OBJECT_TYPE) \
V(JS_GLOBAL_PROXY_TYPE) \
V(JS_ARRAY_TYPE) \
+ V(JS_ARRAY_BUFFER_TYPE) \
+ V(JS_TYPED_ARRAY_TYPE) \
+ V(JS_DATA_VIEW_TYPE) \
V(JS_PROXY_TYPE) \
+ V(JS_SET_TYPE) \
+ V(JS_MAP_TYPE) \
V(JS_WEAK_MAP_TYPE) \
+ V(JS_WEAK_SET_TYPE) \
V(JS_REGEXP_TYPE) \
\
V(JS_FUNCTION_TYPE) \
V(JS_FUNCTION_PROXY_TYPE) \
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#define INSTANCE_TYPE_LIST_DEBUGGER(V) \
V(DEBUG_INFO_TYPE) \
V(BREAK_POINT_INFO_TYPE)
-#else
-#define INSTANCE_TYPE_LIST_DEBUGGER(V)
-#endif
-
-#define INSTANCE_TYPE_LIST(V) \
- INSTANCE_TYPE_LIST_ALL(V) \
- INSTANCE_TYPE_LIST_DEBUGGER(V)
// Since string types are not consecutive, this macro is used to
// iterate over them.
#define STRING_TYPE_LIST(V) \
- V(SYMBOL_TYPE, \
- kVariableSizeSentinel, \
- symbol, \
- Symbol) \
- V(ASCII_SYMBOL_TYPE, \
- kVariableSizeSentinel, \
- ascii_symbol, \
- AsciiSymbol) \
- V(CONS_SYMBOL_TYPE, \
- ConsString::kSize, \
- cons_symbol, \
- ConsSymbol) \
- V(CONS_ASCII_SYMBOL_TYPE, \
- ConsString::kSize, \
- cons_ascii_symbol, \
- ConsAsciiSymbol) \
- V(EXTERNAL_SYMBOL_TYPE, \
- ExternalTwoByteString::kSize, \
- external_symbol, \
- ExternalSymbol) \
- V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE, \
- ExternalTwoByteString::kSize, \
- external_symbol_with_ascii_data, \
- ExternalSymbolWithAsciiData) \
- V(EXTERNAL_ASCII_SYMBOL_TYPE, \
- ExternalAsciiString::kSize, \
- external_ascii_symbol, \
- ExternalAsciiSymbol) \
- V(SHORT_EXTERNAL_SYMBOL_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_symbol, \
- ShortExternalSymbol) \
- V(SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_symbol_with_ascii_data, \
- ShortExternalSymbolWithAsciiData) \
- V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE, \
- ExternalAsciiString::kShortSize, \
- short_external_ascii_symbol, \
- ShortExternalAsciiSymbol) \
V(STRING_TYPE, \
kVariableSizeSentinel, \
string, \
@@ -391,26 +467,67 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
ExternalTwoByteString::kSize, \
external_string, \
ExternalString) \
- V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \
- ExternalTwoByteString::kSize, \
- external_string_with_ascii_data, \
- ExternalStringWithAsciiData) \
V(EXTERNAL_ASCII_STRING_TYPE, \
ExternalAsciiString::kSize, \
external_ascii_string, \
ExternalAsciiString) \
+ V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \
+ ExternalTwoByteString::kSize, \
+ external_string_with_one_bytei_data, \
+ ExternalStringWithOneByteData) \
V(SHORT_EXTERNAL_STRING_TYPE, \
ExternalTwoByteString::kShortSize, \
short_external_string, \
ShortExternalString) \
- V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_string_with_ascii_data, \
- ShortExternalStringWithAsciiData) \
V(SHORT_EXTERNAL_ASCII_STRING_TYPE, \
ExternalAsciiString::kShortSize, \
short_external_ascii_string, \
- ShortExternalAsciiString)
+ ShortExternalAsciiString) \
+ V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \
+ ExternalTwoByteString::kShortSize, \
+ short_external_string_with_one_byte_data, \
+ ShortExternalStringWithOneByteData) \
+ \
+ V(INTERNALIZED_STRING_TYPE, \
+ kVariableSizeSentinel, \
+ internalized_string, \
+ InternalizedString) \
+ V(ASCII_INTERNALIZED_STRING_TYPE, \
+ kVariableSizeSentinel, \
+ ascii_internalized_string, \
+ AsciiInternalizedString) \
+ V(CONS_INTERNALIZED_STRING_TYPE, \
+ ConsString::kSize, \
+ cons_internalized_string, \
+ ConsInternalizedString) \
+ V(CONS_ASCII_INTERNALIZED_STRING_TYPE, \
+ ConsString::kSize, \
+ cons_ascii_internalized_string, \
+ ConsAsciiInternalizedString) \
+ V(EXTERNAL_INTERNALIZED_STRING_TYPE, \
+ ExternalTwoByteString::kSize, \
+ external_internalized_string, \
+ ExternalInternalizedString) \
+ V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE, \
+ ExternalAsciiString::kSize, \
+ external_ascii_internalized_string, \
+ ExternalAsciiInternalizedString) \
+ V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
+ ExternalTwoByteString::kSize, \
+ external_internalized_string_with_one_byte_data, \
+ ExternalInternalizedStringWithOneByteData) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE, \
+ ExternalTwoByteString::kShortSize, \
+ short_external_internalized_string, \
+ ShortExternalInternalizedString) \
+ V(SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE, \
+ ExternalAsciiString::kShortSize, \
+ short_external_ascii_internalized_string, \
+ ShortExternalAsciiInternalizedString) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
+ ExternalTwoByteString::kShortSize, \
+ short_external_internalized_string_with_one_byte_data, \
+ ShortExternalInternalizedStringWithOneByteData) \
// A struct is a simple object a set of object-valued fields. Including an
// object type in this causes the compiler to generate most of the boilerplate
@@ -422,7 +539,12 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
// manually.
#define STRUCT_LIST_ALL(V) \
- V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
+ V(BOX, Box, box) \
+ V(DECLARED_ACCESSOR_DESCRIPTOR, \
+ DeclaredAccessorDescriptor, \
+ declared_accessor_descriptor) \
+ V(DECLARED_ACCESSOR_INFO, DeclaredAccessorInfo, declared_accessor_info) \
+ V(EXECUTABLE_ACCESSOR_INFO, ExecutableAccessorInfo, executable_accessor_info)\
V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
@@ -432,6 +554,8 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(SIGNATURE_INFO, SignatureInfo, signature_info) \
V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
V(SCRIPT, Script, script) \
+ V(ALLOCATION_SITE, AllocationSite, allocation_site) \
+ V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \
V(CODE_CACHE, CodeCache, code_cache) \
V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache) \
V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info) \
@@ -456,18 +580,17 @@ const uint32_t kIsNotStringMask = 0x80;
const uint32_t kStringTag = 0x0;
const uint32_t kNotStringTag = 0x80;
-// Bit 6 indicates that the object is a symbol (if set) or not (if cleared).
-// There are not enough types that the non-string types (with bit 7 set) can
-// have bit 6 set too.
-const uint32_t kIsSymbolMask = 0x40;
-const uint32_t kNotSymbolTag = 0x0;
-const uint32_t kSymbolTag = 0x40;
+// Bit 6 indicates that the object is an internalized string (if set) or not.
+// Bit 7 has to be clear as well.
+const uint32_t kIsNotInternalizedMask = 0x40;
+const uint32_t kNotInternalizedTag = 0x40;
+const uint32_t kInternalizedTag = 0x0;
// If bit 7 is clear then bit 2 indicates whether the string consists of
// two-byte characters or one-byte characters.
const uint32_t kStringEncodingMask = 0x4;
const uint32_t kTwoByteStringTag = 0x0;
-const uint32_t kAsciiStringTag = 0x4;
+const uint32_t kOneByteStringTag = 0x4;
// If bit 7 is clear, the low-order 2 bits indicate the representation
// of the string.
@@ -493,9 +616,9 @@ const uint32_t kSlicedNotConsMask = kSlicedStringTag & ~kConsStringTag;
STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask) && kSlicedNotConsMask != 0);
// If bit 7 is clear, then bit 3 indicates whether this two-byte
-// string actually contains ASCII data.
-const uint32_t kAsciiDataHintMask = 0x08;
-const uint32_t kAsciiDataHintTag = 0x08;
+// string actually contains one byte data.
+const uint32_t kOneByteDataHintMask = 0x08;
+const uint32_t kOneByteDataHintTag = 0x08;
// If bit 7 is clear and string representation indicates an external string,
// then bit 4 indicates whether the data pointer is cached.
@@ -504,60 +627,78 @@ const uint32_t kShortExternalStringTag = 0x10;
// A ConsString with an empty string as the right side is a candidate
-// for being shortcut by the garbage collector unless it is a
-// symbol. It's not common to have non-flat symbols, so we do not
-// shortcut them thereby avoiding turning symbols into strings. See
-// heap.cc and mark-compact.cc.
+// for being shortcut by the garbage collector unless it is internalized.
+// It's not common to have non-flat internalized strings, so we do not
+// shortcut them thereby avoiding turning internalized strings into strings.
+// See heap.cc and mark-compact.cc.
const uint32_t kShortcutTypeMask =
kIsNotStringMask |
- kIsSymbolMask |
+ kIsNotInternalizedMask |
kStringRepresentationMask;
-const uint32_t kShortcutTypeTag = kConsStringTag;
+const uint32_t kShortcutTypeTag = kConsStringTag | kNotInternalizedTag;
enum InstanceType {
// String types.
- SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
- ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
- CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
- CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag,
- SHORT_EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag |
- kExternalStringTag | kShortExternalStringTag,
- SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
- kTwoByteStringTag | kSymbolTag | kExternalStringTag |
- kAsciiDataHintTag | kShortExternalStringTag,
- SHORT_EXTERNAL_ASCII_SYMBOL_TYPE = kAsciiStringTag | kExternalStringTag |
- kSymbolTag | kShortExternalStringTag,
- EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag,
- EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
- kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag,
- EXTERNAL_ASCII_SYMBOL_TYPE =
- kAsciiStringTag | kSymbolTag | kExternalStringTag,
- STRING_TYPE = kTwoByteStringTag | kSeqStringTag,
- ASCII_STRING_TYPE = kAsciiStringTag | kSeqStringTag,
- CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag,
- CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
- SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag,
- SLICED_ASCII_STRING_TYPE = kAsciiStringTag | kSlicedStringTag,
+ INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kSeqStringTag
+ | kInternalizedTag,
+ ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag | kSeqStringTag
+ | kInternalizedTag,
+ CONS_INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kConsStringTag
+ | kInternalizedTag,
+ CONS_ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag | kConsStringTag
+ | kInternalizedTag,
+ EXTERNAL_INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kExternalStringTag
+ | kInternalizedTag,
+ EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag
+ | kExternalStringTag | kInternalizedTag,
+ EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
+ EXTERNAL_INTERNALIZED_STRING_TYPE | kOneByteDataHintTag
+ | kInternalizedTag,
+ SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE =
+ EXTERNAL_INTERNALIZED_STRING_TYPE | kShortExternalStringTag
+ | kInternalizedTag,
+ SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE =
+ EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE | kShortExternalStringTag
+ | kInternalizedTag,
+ SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
+ EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE
+ | kShortExternalStringTag | kInternalizedTag,
+
+ STRING_TYPE = INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ ASCII_STRING_TYPE = ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ CONS_STRING_TYPE = CONS_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ CONS_ASCII_STRING_TYPE =
+ CONS_ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+
+ SLICED_STRING_TYPE =
+ kTwoByteStringTag | kSlicedStringTag | kNotInternalizedTag,
+ SLICED_ASCII_STRING_TYPE =
+ kOneByteStringTag | kSlicedStringTag | kNotInternalizedTag,
+ EXTERNAL_STRING_TYPE =
+ EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ EXTERNAL_ASCII_STRING_TYPE =
+ EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
+ EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE
+ | kNotInternalizedTag,
SHORT_EXTERNAL_STRING_TYPE =
- kTwoByteStringTag | kExternalStringTag | kShortExternalStringTag,
- SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
- kTwoByteStringTag | kExternalStringTag |
- kAsciiDataHintTag | kShortExternalStringTag,
+ SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
SHORT_EXTERNAL_ASCII_STRING_TYPE =
- kAsciiStringTag | kExternalStringTag | kShortExternalStringTag,
- EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
- EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
- kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
- // LAST_STRING_TYPE
- EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
- PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
+ SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
+ SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE
+ | kNotInternalizedTag,
+
+ // Non-string names
+ SYMBOL_TYPE = kNotStringTag, // LAST_NAME_TYPE, FIRST_NONSTRING_TYPE
// Objects allocated in their own spaces (never in new space).
- MAP_TYPE = kNotStringTag, // FIRST_NONSTRING_TYPE
+ MAP_TYPE,
CODE_TYPE,
ODDBALL_TYPE,
- JS_GLOBAL_PROPERTY_CELL_TYPE,
+ CELL_TYPE,
+ PROPERTY_CELL_TYPE,
// "Data", objects that cannot contain non-map-word pointers to heap
// objects.
@@ -575,10 +716,13 @@ enum InstanceType {
EXTERNAL_DOUBLE_ARRAY_TYPE,
EXTERNAL_PIXEL_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
FIXED_DOUBLE_ARRAY_TYPE,
+ CONSTANT_POOL_ARRAY_TYPE,
FILLER_TYPE, // LAST_DATA_TYPE
// Structs.
- ACCESSOR_INFO_TYPE,
+ DECLARED_ACCESSOR_DESCRIPTOR_TYPE,
+ DECLARED_ACCESSOR_INFO_TYPE,
+ EXECUTABLE_ACCESSOR_INFO_TYPE,
ACCESSOR_PAIR_TYPE,
ACCESS_CHECK_INFO_TYPE,
INTERCEPTOR_INFO_TYPE,
@@ -587,11 +731,14 @@ enum InstanceType {
OBJECT_TEMPLATE_INFO_TYPE,
SIGNATURE_INFO_TYPE,
TYPE_SWITCH_INFO_TYPE,
+ ALLOCATION_SITE_TYPE,
+ ALLOCATION_MEMENTO_TYPE,
SCRIPT_TYPE,
CODE_CACHE_TYPE,
POLYMORPHIC_CODE_CACHE_TYPE,
TYPE_FEEDBACK_INFO_TYPE,
ALIASED_ARGUMENTS_ENTRY_TYPE,
+ BOX_TYPE,
// The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
// is defined. However as include/v8.h contain some of the instance type
// constants always having them avoids them getting different numbers
@@ -616,14 +763,19 @@ enum InstanceType {
JS_DATE_TYPE,
JS_OBJECT_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
+ JS_GENERATOR_OBJECT_TYPE,
JS_MODULE_TYPE,
JS_GLOBAL_OBJECT_TYPE,
JS_BUILTINS_OBJECT_TYPE,
JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE,
+ JS_ARRAY_BUFFER_TYPE,
+ JS_TYPED_ARRAY_TYPE,
+ JS_DATA_VIEW_TYPE,
JS_SET_TYPE,
JS_MAP_TYPE,
JS_WEAK_MAP_TYPE,
+ JS_WEAK_SET_TYPE,
JS_REGEXP_TYPE,
@@ -632,8 +784,11 @@ enum InstanceType {
// Pseudo-types
FIRST_TYPE = 0x0,
LAST_TYPE = JS_FUNCTION_TYPE,
- INVALID_TYPE = FIRST_TYPE - 1,
- FIRST_NONSTRING_TYPE = MAP_TYPE,
+ FIRST_NAME_TYPE = FIRST_TYPE,
+ LAST_NAME_TYPE = SYMBOL_TYPE,
+ FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE,
+ LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE,
+ FIRST_NONSTRING_TYPE = SYMBOL_TYPE,
// Boundaries for testing for an external array.
FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_PIXEL_ARRAY_TYPE,
@@ -679,7 +834,7 @@ STATIC_CHECK(FOREIGN_TYPE == Internals::kForeignType);
V(DICTIONARY_PROPERTIES_SUB_TYPE) \
V(MAP_CODE_CACHE_SUB_TYPE) \
V(SCOPE_INFO_SUB_TYPE) \
- V(SYMBOL_TABLE_SUB_TYPE) \
+ V(STRING_TABLE_SUB_TYPE) \
V(DESCRIPTOR_ARRAY_SUB_TYPE) \
V(TRANSITION_ARRAY_SUB_TYPE)
@@ -710,14 +865,16 @@ enum CompareResult {
inline void set_##name(type* value, \
WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \
-
class AccessorPair;
+class AllocationSite;
+class AllocationSiteContext;
class DictionaryElementsAccessor;
class ElementsAccessor;
class Failure;
class FixedArrayBase;
class ObjectVisitor;
class StringStream;
+class Type;
struct ValueInfo : public Malloced {
ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
@@ -729,7 +886,7 @@ struct ValueInfo : public Malloced {
// A template-ized version of the IsXXX functions.
-template <class C> static inline bool Is(Object* obj);
+template <class C> inline bool Is(Object* obj);
#ifdef VERIFY_HEAP
#define DECLARE_VERIFIER(Name) void Name##Verify();
@@ -737,6 +894,12 @@ template <class C> static inline bool Is(Object* obj);
#define DECLARE_VERIFIER(Name)
#endif
+#ifdef OBJECT_PRINT
+#define DECLARE_PRINTER(Name) void Name##Print(FILE* out = stdout);
+#else
+#define DECLARE_PRINTER(Name)
+#endif
+
class MaybeObject BASE_EMBEDDED {
public:
inline bool IsFailure();
@@ -744,6 +907,7 @@ class MaybeObject BASE_EMBEDDED {
inline bool IsOutOfMemory();
inline bool IsException();
INLINE(bool IsTheHole());
+ INLINE(bool IsUninitialized());
inline bool ToObject(Object** obj) {
if (IsFailure()) return false;
*obj = reinterpret_cast<Object*>(this);
@@ -754,7 +918,9 @@ class MaybeObject BASE_EMBEDDED {
return reinterpret_cast<Failure*>(this);
}
inline Object* ToObjectUnchecked() {
- ASSERT(!IsFailure());
+ // TODO(jkummerow): Turn this back into an ASSERT when we can be certain
+ // that it never fires in Release mode in the wild.
+ CHECK(!IsFailure());
return reinterpret_cast<Object*>(this);
}
inline Object* ToObjectChecked() {
@@ -769,15 +935,18 @@ class MaybeObject BASE_EMBEDDED {
return true;
}
+ template<typename T>
+ inline bool ToHandle(Handle<T>* obj, Isolate* isolate) {
+ if (IsFailure()) return false;
+ *obj = handle(T::cast(reinterpret_cast<Object*>(this)), isolate);
+ return true;
+ }
+
#ifdef OBJECT_PRINT
// Prints this object with details.
- inline void Print() {
- Print(stdout);
- }
- inline void PrintLn() {
- PrintLn(stdout);
- }
+ void Print();
void Print(FILE* out);
+ void PrintLn();
void PrintLn(FILE* out);
#endif
#ifdef VERIFY_HEAP
@@ -794,8 +963,9 @@ class MaybeObject BASE_EMBEDDED {
#define HEAP_OBJECT_TYPE_LIST(V) \
V(HeapNumber) \
+ V(Name) \
+ V(UniqueName) \
V(String) \
- V(Symbol) \
V(SeqString) \
V(ExternalString) \
V(ConsString) \
@@ -803,7 +973,9 @@ class MaybeObject BASE_EMBEDDED {
V(ExternalTwoByteString) \
V(ExternalAsciiString) \
V(SeqTwoByteString) \
- V(SeqAsciiString) \
+ V(SeqOneByteString) \
+ V(InternalizedString) \
+ V(Symbol) \
\
V(ExternalArray) \
V(ExternalByteArray) \
@@ -820,15 +992,18 @@ class MaybeObject BASE_EMBEDDED {
V(JSReceiver) \
V(JSObject) \
V(JSContextExtensionObject) \
+ V(JSGeneratorObject) \
V(JSModule) \
V(Map) \
V(DescriptorArray) \
V(TransitionArray) \
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
+ V(DependentCode) \
V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
+ V(ConstantPoolArray) \
V(Context) \
V(NativeContext) \
V(ScopeInfo) \
@@ -843,15 +1018,21 @@ class MaybeObject BASE_EMBEDDED {
V(Foreign) \
V(Boolean) \
V(JSArray) \
+ V(JSArrayBuffer) \
+ V(JSArrayBufferView) \
+ V(JSTypedArray) \
+ V(JSDataView) \
V(JSProxy) \
V(JSFunctionProxy) \
V(JSSet) \
V(JSMap) \
+ V(JSWeakCollection) \
V(JSWeakMap) \
+ V(JSWeakSet) \
V(JSRegExp) \
V(HashTable) \
V(Dictionary) \
- V(SymbolTable) \
+ V(StringTable) \
V(JSFunctionResultCache) \
V(NormalizedMapCache) \
V(CompilationCacheTable) \
@@ -865,11 +1046,305 @@ class MaybeObject BASE_EMBEDDED {
V(JSGlobalProxy) \
V(UndetectableObject) \
V(AccessCheckNeeded) \
- V(JSGlobalPropertyCell) \
+ V(Cell) \
+ V(PropertyCell) \
+ V(ObjectHashTable) \
+ V(WeakHashTable)
+
+
+#define ERROR_MESSAGES_LIST(V) \
+ V(kNoReason, "no reason") \
+ \
+ V(k32BitValueInRegisterIsNotZeroExtended, \
+ "32 bit value in register is not zero-extended") \
+ V(kAlignmentMarkerExpected, "alignment marker expected") \
+ V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
+ V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
+ V(kArgumentsObjectValueInATestContext, \
+ "arguments object value in a test context") \
+ V(kArrayBoilerplateCreationFailed, "array boilerplate creation failed") \
+ V(kArrayIndexConstantValueTooBig, "array index constant value too big") \
+ V(kAssignmentToArguments, "assignment to arguments") \
+ V(kAssignmentToLetVariableBeforeInitialization, \
+ "assignment to let variable before initialization") \
+ V(kAssignmentToLOOKUPVariable, "assignment to LOOKUP variable") \
+ V(kAssignmentToParameterFunctionUsesArgumentsObject, \
+ "assignment to parameter, function uses arguments object") \
+ V(kAssignmentToParameterInArgumentsObject, \
+ "assignment to parameter in arguments object") \
+ V(kAttemptToUseUndefinedCache, "Attempt to use undefined cache") \
+ V(kBadValueContextForArgumentsObjectValue, \
+ "bad value context for arguments object value") \
+ V(kBadValueContextForArgumentsValue, \
+ "bad value context for arguments value") \
+ V(kBailedOutDueToDependencyChange, "bailed out due to dependency change") \
+ V(kBailoutWasNotPrepared, "bailout was not prepared") \
+ V(kBinaryStubGenerateFloatingPointCode, \
+ "BinaryStub_GenerateFloatingPointCode") \
+ V(kBothRegistersWereSmisInSelectNonSmi, \
+ "Both registers were smis in SelectNonSmi") \
+ V(kCallToAJavaScriptRuntimeFunction, \
+ "call to a JavaScript runtime function") \
+ V(kCannotTranslatePositionInChangedArea, \
+ "Cannot translate position in changed area") \
+ V(kCodeGenerationFailed, "code generation failed") \
+ V(kCodeObjectNotProperlyPatched, "code object not properly patched") \
+ V(kCompoundAssignmentToLookupSlot, "compound assignment to lookup slot") \
+ V(kContextAllocatedArguments, "context-allocated arguments") \
+ V(kDebuggerIsActive, "debugger is active") \
+ V(kDebuggerStatement, "DebuggerStatement") \
+ V(kDeclarationInCatchContext, "Declaration in catch context") \
+ V(kDeclarationInWithContext, "Declaration in with context") \
+ V(kDefaultNaNModeNotSet, "Default NaN mode not set") \
+ V(kDeleteWithGlobalVariable, "delete with global variable") \
+ V(kDeleteWithNonGlobalVariable, "delete with non-global variable") \
+ V(kDestinationOfCopyNotAligned, "Destination of copy not aligned") \
+ V(kDontDeleteCellsCannotContainTheHole, \
+ "DontDelete cells can't contain the hole") \
+ V(kDoPushArgumentNotImplementedForDoubleType, \
+ "DoPushArgument not implemented for double type") \
+ V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
+ "EmitLoadRegister: Unsupported double immediate") \
+ V(kEval, "eval") \
+ V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel") \
+ V(kExpectedAlignmentMarker, "expected alignment marker") \
+ V(kExpectedAllocationSiteInCell, \
+ "Expected AllocationSite in property cell") \
+ V(kExpectedPropertyCellInRegisterA2, \
+ "Expected property cell in register a2") \
+ V(kExpectedPropertyCellInRegisterEbx, \
+ "Expected property cell in register ebx") \
+ V(kExpectedPropertyCellInRegisterRbx, \
+ "Expected property cell in register rbx") \
+ V(kExpectingAlignmentForCopyBytes, \
+ "Expecting alignment for CopyBytes") \
+ V(kExportDeclaration, "Export declaration") \
+ V(kExternalStringExpectedButNotFound, \
+ "external string expected, but not found") \
+ V(kFailedBailedOutLastTime, "failed/bailed out last time") \
+ V(kForInStatementIsNotFastCase, "ForInStatement is not fast case") \
+ V(kForInStatementOptimizationIsDisabled, \
+ "ForInStatement optimization is disabled") \
+ V(kForInStatementWithNonLocalEachVariable, \
+ "ForInStatement with non-local each variable") \
+ V(kForOfStatement, "ForOfStatement") \
+ V(kFrameIsExpectedToBeAligned, "frame is expected to be aligned") \
+ V(kFunctionCallsEval, "function calls eval") \
+ V(kFunctionIsAGenerator, "function is a generator") \
+ V(kFunctionWithIllegalRedeclaration, "function with illegal redeclaration") \
+ V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
+ V(kGeneratorFailedToResume, "Generator failed to resume") \
+ V(kGenerator, "generator") \
+ V(kGlobalFunctionsMustHaveInitialMap, \
+ "Global functions must have initial map") \
+ V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
+ V(kImportDeclaration, "Import declaration") \
+ V(kImproperObjectOnPrototypeChainForStore, \
+ "improper object on prototype chain for store") \
+ V(kIndexIsNegative, "Index is negative") \
+ V(kIndexIsTooLarge, "Index is too large") \
+ V(kInlinedRuntimeFunctionClassOf, "inlined runtime function: ClassOf") \
+ V(kInlinedRuntimeFunctionFastAsciiArrayJoin, \
+ "inlined runtime function: FastAsciiArrayJoin") \
+ V(kInlinedRuntimeFunctionGeneratorNext, \
+ "inlined runtime function: GeneratorNext") \
+ V(kInlinedRuntimeFunctionGeneratorThrow, \
+ "inlined runtime function: GeneratorThrow") \
+ V(kInlinedRuntimeFunctionGetFromCache, \
+ "inlined runtime function: GetFromCache") \
+ V(kInlinedRuntimeFunctionIsNonNegativeSmi, \
+ "inlined runtime function: IsNonNegativeSmi") \
+ V(kInlinedRuntimeFunctionIsRegExpEquivalent, \
+ "inlined runtime function: IsRegExpEquivalent") \
+ V(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf, \
+ "inlined runtime function: IsStringWrapperSafeForDefaultValueOf") \
+ V(kInliningBailedOut, "inlining bailed out") \
+ V(kInputGPRIsExpectedToHaveUpper32Cleared, \
+ "input GPR is expected to have upper32 cleared") \
+ V(kInstanceofStubUnexpectedCallSiteCacheCheck, \
+ "InstanceofStub unexpected call site cache (check)") \
+ V(kInstanceofStubUnexpectedCallSiteCacheCmp1, \
+ "InstanceofStub unexpected call site cache (cmp 1)") \
+ V(kInstanceofStubUnexpectedCallSiteCacheCmp2, \
+ "InstanceofStub unexpected call site cache (cmp 2)") \
+ V(kInstanceofStubUnexpectedCallSiteCacheMov, \
+ "InstanceofStub unexpected call site cache (mov)") \
+ V(kInteger32ToSmiFieldWritingToNonSmiLocation, \
+ "Integer32ToSmiField writing to non-smi location") \
+ V(kInvalidCaptureReferenced, "Invalid capture referenced") \
+ V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
+ "Invalid ElementsKind for InternalArray or InternalPackedArray") \
+ V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
+ V(kInvalidLeftHandSideInAssignment, "invalid left-hand side in assignment") \
+ V(kInvalidLhsInCompoundAssignment, "invalid lhs in compound assignment") \
+ V(kInvalidLhsInCountOperation, "invalid lhs in count operation") \
+ V(kInvalidMinLength, "Invalid min_length") \
+ V(kJSGlobalObjectNativeContextShouldBeANativeContext, \
+ "JSGlobalObject::native_context should be a native context") \
+ V(kJSGlobalProxyContextShouldNotBeNull, \
+ "JSGlobalProxy::context() should not be null") \
+ V(kJSObjectWithFastElementsMapHasSlowElements, \
+ "JSObject with fast elements map has slow elements") \
+ V(kLetBindingReInitialization, "Let binding re-initialization") \
+ V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
+ V(kLiveEditFrameDroppingIsNotSupportedOnArm, \
+ "LiveEdit frame dropping is not supported on arm") \
+ V(kLiveEditFrameDroppingIsNotSupportedOnMips, \
+ "LiveEdit frame dropping is not supported on mips") \
+ V(kLiveEdit, "LiveEdit") \
+ V(kLookupVariableInCountOperation, \
+ "lookup variable in count operation") \
+ V(kMapIsNoLongerInEax, "Map is no longer in eax") \
+ V(kModuleDeclaration, "Module declaration") \
+ V(kModuleLiteral, "Module literal") \
+ V(kModulePath, "Module path") \
+ V(kModuleStatement, "Module statement") \
+ V(kModuleVariable, "Module variable") \
+ V(kModuleUrl, "Module url") \
+ V(kNativeFunctionLiteral, "Native function literal") \
+ V(kNoCasesLeft, "no cases left") \
+ V(kNoEmptyArraysHereInEmitFastAsciiArrayJoin, \
+ "No empty arrays here in EmitFastAsciiArrayJoin") \
+ V(kNonInitializerAssignmentToConst, \
+ "non-initializer assignment to const") \
+ V(kNonSmiIndex, "Non-smi index") \
+ V(kNonSmiKeyInArrayLiteral, "Non-smi key in array literal") \
+ V(kNonSmiValue, "Non-smi value") \
+ V(kNotEnoughVirtualRegistersForValues, \
+ "not enough virtual registers for values") \
+ V(kNotEnoughSpillSlotsForOsr, \
+ "not enough spill slots for OSR") \
+ V(kNotEnoughVirtualRegistersRegalloc, \
+ "not enough virtual registers (regalloc)") \
+ V(kObjectFoundInSmiOnlyArray, "object found in smi-only array") \
+ V(kObjectLiteralWithComplexProperty, \
+ "Object literal with complex property") \
+ V(kOddballInStringTableIsNotUndefinedOrTheHole, \
+ "oddball in string table is not undefined or the hole") \
+ V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
+ V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
+ V(kOperandIsASmi, "Operand is a smi") \
+ V(kOperandIsNotAName, "Operand is not a name") \
+ V(kOperandIsNotANumber, "Operand is not a number") \
+ V(kOperandIsNotASmi, "Operand is not a smi") \
+ V(kOperandIsNotAString, "Operand is not a string") \
+ V(kOperandIsNotSmi, "Operand is not smi") \
+ V(kOperandNotANumber, "Operand not a number") \
+ V(kOptimizedTooManyTimes, "optimized too many times") \
+ V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \
+ "Out of virtual registers while trying to allocate temp register") \
+ V(kParseScopeError, "parse/scope error") \
+ V(kPossibleDirectCallToEval, "possible direct call to eval") \
+ V(kPropertyAllocationCountFailed, "Property allocation count failed") \
+ V(kReceivedInvalidReturnAddress, "Received invalid return address") \
+ V(kReferenceToAVariableWhichRequiresDynamicLookup, \
+ "reference to a variable which requires dynamic lookup") \
+ V(kReferenceToGlobalLexicalVariable, \
+ "reference to global lexical variable") \
+ V(kReferenceToUninitializedVariable, "reference to uninitialized variable") \
+ V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
+ V(kRegisterWasClobbered, "register was clobbered") \
+ V(kScopedBlock, "ScopedBlock") \
+ V(kSmiAdditionOverflow, "Smi addition overflow") \
+ V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
+ V(kStackFrameTypesMustMatch, "stack frame types must match") \
+ V(kSwitchStatementMixedOrNonLiteralSwitchLabels, \
+ "SwitchStatement: mixed or non-literal switch labels") \
+ V(kSwitchStatementTooManyClauses, "SwitchStatement: too many clauses") \
+ V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
+ V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
+ V(kTheInstructionToPatchShouldBeALoadFromPc, \
+ "The instruction to patch should be a load from pc") \
+ V(kTheInstructionToPatchShouldBeALui, \
+ "The instruction to patch should be a lui") \
+ V(kTheInstructionToPatchShouldBeAnOri, \
+ "The instruction to patch should be an ori") \
+ V(kTooManyParametersLocals, "too many parameters/locals") \
+ V(kTooManyParameters, "too many parameters") \
+ V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR") \
+ V(kToOperandIsDoubleRegisterUnimplemented, \
+ "ToOperand IsDoubleRegister unimplemented") \
+ V(kToOperandUnsupportedDoubleImmediate, \
+ "ToOperand Unsupported double immediate") \
+ V(kTryCatchStatement, "TryCatchStatement") \
+ V(kTryFinallyStatement, "TryFinallyStatement") \
+ V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi") \
+ V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
+ V(kUndefinedValueNotLoaded, "Undefined value not loaded") \
+ V(kUndoAllocationOfNonAllocatedMemory, \
+ "Undo allocation of non allocated memory") \
+ V(kUnexpectedAllocationTop, "Unexpected allocation top") \
+ V(kUnexpectedElementsKindInArrayConstructor, \
+ "Unexpected ElementsKind in array constructor") \
+ V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
+ "Unexpected fallthrough from CharCodeAt slow case") \
+ V(kUnexpectedFallthroughFromCharFromCodeSlowCase, \
+ "Unexpected fallthrough from CharFromCode slow case") \
+ V(kUnexpectedFallThroughFromStringComparison, \
+ "Unexpected fall-through from string comparison") \
+ V(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode, \
+ "Unexpected fall-through in BinaryStub_GenerateFloatingPointCode") \
+ V(kUnexpectedFallthroughToCharCodeAtSlowCase, \
+ "Unexpected fallthrough to CharCodeAt slow case") \
+ V(kUnexpectedFallthroughToCharFromCodeSlowCase, \
+ "Unexpected fallthrough to CharFromCode slow case") \
+ V(kUnexpectedFPUStackDepthAfterInstruction, \
+ "Unexpected FPU stack depth after instruction") \
+ V(kUnexpectedInitialMapForArrayFunction1, \
+ "Unexpected initial map for Array function (1)") \
+ V(kUnexpectedInitialMapForArrayFunction2, \
+ "Unexpected initial map for Array function (2)") \
+ V(kUnexpectedInitialMapForArrayFunction, \
+ "Unexpected initial map for Array function") \
+ V(kUnexpectedInitialMapForInternalArrayFunction, \
+ "Unexpected initial map for InternalArray function") \
+ V(kUnexpectedLevelAfterReturnFromApiCall, \
+ "Unexpected level after return from api call") \
+ V(kUnexpectedNumberOfPreAllocatedPropertyFields, \
+ "Unexpected number of pre-allocated property fields") \
+ V(kUnexpectedStringFunction, "Unexpected String function") \
+ V(kUnexpectedStringType, "Unexpected string type") \
+ V(kUnexpectedStringWrapperInstanceSize, \
+ "Unexpected string wrapper instance size") \
+ V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
+ "Unexpected type for RegExp data, FixedArray expected") \
+ V(kUnexpectedUnusedPropertiesOfStringWrapper, \
+ "Unexpected unused properties of string wrapper") \
+ V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
+ V(kUnknown, "unknown") \
+ V(kUnsupportedConstCompoundAssignment, \
+ "unsupported const compound assignment") \
+ V(kUnsupportedCountOperationWithConst, \
+ "unsupported count operation with const") \
+ V(kUnsupportedDoubleImmediate, "unsupported double immediate") \
+ V(kUnsupportedLetCompoundAssignment, "unsupported let compound assignment") \
+ V(kUnsupportedLookupSlotInDeclaration, \
+ "unsupported lookup slot in declaration") \
+ V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
+ V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
+ V(kUnsupportedPhiUseOfConstVariable, \
+ "Unsupported phi use of const variable") \
+ V(kUnsupportedTaggedImmediate, "unsupported tagged immediate") \
+ V(kVariableResolvedToWithContext, "Variable resolved to with context") \
+ V(kWeShouldNotHaveAnEmptyLexicalContext, \
+ "we should not have an empty lexical context") \
+ V(kWithStatement, "WithStatement") \
+ V(kWrongAddressOrValuePassedToRecordWrite, \
+ "Wrong address or value passed to RecordWrite") \
+ V(kYield, "Yield")
+
+
+#define ERROR_MESSAGES_CONSTANTS(C, T) C,
+enum BailoutReason {
+ ERROR_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS)
+ kLastErrorMessage
+};
+#undef ERROR_MESSAGES_CONSTANTS
+
+
+const char* GetBailoutReason(BailoutReason reason);
-class JSReceiver;
-
// Object is the abstract superclass for all classes in the
// object hierarchy.
// Object does not use any virtual functions to avoid the
@@ -887,6 +1362,8 @@ class Object : public MaybeObject {
#undef IS_TYPE_FUNCTION_DECL
inline bool IsFixedArrayBase();
+ inline bool IsExternal();
+ inline bool IsAccessorInfo();
// Returns true if this object is an instance of the specified
// function template.
@@ -904,6 +1381,7 @@ class Object : public MaybeObject {
INLINE(bool IsUndefined());
INLINE(bool IsNull());
INLINE(bool IsTheHole()); // Shadows MaybeObject's implementation.
+ INLINE(bool IsUninitialized());
INLINE(bool IsTrue());
INLINE(bool IsFalse());
inline bool IsArgumentsMarker();
@@ -915,6 +1393,49 @@ class Object : public MaybeObject {
// Extract the number.
inline double Number();
inline bool IsNaN();
+ bool ToInt32(int32_t* value);
+ bool ToUint32(uint32_t* value);
+
+ // Indicates whether OptimalRepresentation can do its work, or whether it
+ // always has to return Representation::Tagged().
+ enum ValueType {
+ OPTIMAL_REPRESENTATION,
+ FORCE_TAGGED
+ };
+
+ inline Representation OptimalRepresentation(
+ ValueType type = OPTIMAL_REPRESENTATION) {
+ if (!FLAG_track_fields) return Representation::Tagged();
+ if (type == FORCE_TAGGED) return Representation::Tagged();
+ if (IsSmi()) {
+ return Representation::Smi();
+ } else if (FLAG_track_double_fields && IsHeapNumber()) {
+ return Representation::Double();
+ } else if (FLAG_track_computed_fields && IsUninitialized()) {
+ return Representation::None();
+ } else if (FLAG_track_heap_object_fields) {
+ ASSERT(IsHeapObject());
+ return Representation::HeapObject();
+ } else {
+ return Representation::Tagged();
+ }
+ }
+
+ inline bool FitsRepresentation(Representation representation) {
+ if (FLAG_track_fields && representation.IsNone()) {
+ return false;
+ } else if (FLAG_track_fields && representation.IsSmi()) {
+ return IsSmi();
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ return IsNumber();
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ return IsHeapObject();
+ }
+ return true;
+ }
+
+ inline MaybeObject* AllocateNewStorageFor(Heap* heap,
+ Representation representation);
// Returns true if the object is of the correct type to be used as a
// implementation of a JSObject's elements.
@@ -922,8 +1443,8 @@ class Object : public MaybeObject {
inline bool HasSpecificClassOf(String* name);
- MUST_USE_RESULT MaybeObject* ToObject(); // ECMA-262 9.9.
- Object* ToBoolean(); // ECMA-262 9.2.
+ MUST_USE_RESULT MaybeObject* ToObject(Isolate* isolate); // ECMA-262 9.9.
+ bool BooleanValue(); // ECMA-262 9.2.
// Convert to a JSObject if needed.
// native_context is used when creating wrapper object.
@@ -933,41 +1454,60 @@ class Object : public MaybeObject {
// Failure is returned otherwise.
MUST_USE_RESULT inline MaybeObject* ToSmi();
- void Lookup(String* name, LookupResult* result);
+ void Lookup(Name* name, LookupResult* result);
// Property access.
- MUST_USE_RESULT inline MaybeObject* GetProperty(String* key);
+ MUST_USE_RESULT inline MaybeObject* GetProperty(Name* key);
MUST_USE_RESULT inline MaybeObject* GetProperty(
- String* key,
+ Name* key,
PropertyAttributes* attributes);
+
+ // TODO(yangguo): this should eventually replace the non-handlified version.
+ static Handle<Object> GetPropertyWithReceiver(Handle<Object> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
+ PropertyAttributes* attributes);
MUST_USE_RESULT MaybeObject* GetPropertyWithReceiver(
Object* receiver,
- String* key,
+ Name* key,
PropertyAttributes* attributes);
static Handle<Object> GetProperty(Handle<Object> object,
+ Handle<Name> key);
+ static Handle<Object> GetProperty(Handle<Object> object,
Handle<Object> receiver,
LookupResult* result,
- Handle<String> key,
+ Handle<Name> key,
PropertyAttributes* attributes);
+ MUST_USE_RESULT static MaybeObject* GetPropertyOrFail(
+ Handle<Object> object,
+ Handle<Object> receiver,
+ LookupResult* result,
+ Handle<Name> key,
+ PropertyAttributes* attributes);
+
MUST_USE_RESULT MaybeObject* GetProperty(Object* receiver,
LookupResult* result,
- String* key,
+ Name* key,
PropertyAttributes* attributes);
MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
JSReceiver* getter);
- static Handle<Object> GetElement(Handle<Object> object, uint32_t index);
- MUST_USE_RESULT inline MaybeObject* GetElement(uint32_t index);
+ static Handle<Object> GetElement(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index);
+ MUST_USE_RESULT inline MaybeObject* GetElement(Isolate* isolate,
+ uint32_t index);
// For use when we know that no exception can be thrown.
- inline Object* GetElementNoExceptionThrown(uint32_t index);
- MUST_USE_RESULT MaybeObject* GetElementWithReceiver(Object* receiver,
+ inline Object* GetElementNoExceptionThrown(Isolate* isolate, uint32_t index);
+ MUST_USE_RESULT MaybeObject* GetElementWithReceiver(Isolate* isolate,
+ Object* receiver,
uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
- Object* GetPrototype();
+ Object* GetPrototype(Isolate* isolate);
// Returns the permanent hash code associated with this object depending on
// the actual object type. Might return a failure in case no hash was
@@ -995,10 +1535,7 @@ class Object : public MaybeObject {
inline void VerifyApiCallResultType();
// Prints this object without details.
- inline void ShortPrint() {
- ShortPrint(stdout);
- }
- void ShortPrint(FILE* out);
+ void ShortPrint(FILE* out = stdout);
// Prints this object without details to a message accumulator.
void ShortPrint(StringStream* accumulator);
@@ -1037,10 +1574,7 @@ class Smi: public Object {
static inline Smi* cast(Object* object);
// Dispatched behavior.
- inline void SmiPrint() {
- SmiPrint(stdout);
- }
- void SmiPrint(FILE* out);
+ void SmiPrint(FILE* out = stdout);
void SmiPrint(StringStream* accumulator);
DECLARE_VERIFIER(Smi)
@@ -1104,15 +1638,14 @@ class Failure: public MaybeObject {
static inline Failure* RetryAfterGC(); // NEW_SPACE
static inline Failure* Exception();
static inline Failure* InternalError();
- static inline Failure* OutOfMemoryException();
+ // TODO(jkummerow): The value is temporary instrumentation. Remove it
+ // when it has served its purpose.
+ static inline Failure* OutOfMemoryException(intptr_t value);
// Casting.
static inline Failure* cast(MaybeObject* object);
// Dispatched behavior.
- inline void FailurePrint() {
- FailurePrint(stdout);
- }
- void FailurePrint(FILE* out);
+ void FailurePrint(FILE* out = stdout);
void FailurePrint(StringStream* accumulator);
DECLARE_VERIFIER(Failure)
@@ -1193,9 +1726,7 @@ class HeapObject: public Object {
// The Heap the object was allocated in. Used also to access Isolate.
inline Heap* GetHeap();
- // Convenience method to get current isolate. This method can be
- // accessed only when its result is the same as
- // Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
+ // Convenience method to get current isolate.
inline Isolate* GetIsolate();
// Converts an address to a HeapObject pointer.
@@ -1227,25 +1758,30 @@ class HeapObject: public Object {
// during marking GC.
static inline Object** RawField(HeapObject* obj, int offset);
+ // Adds the |code| object related to |name| to the code cache of this map. If
+ // this map is a dictionary map that is shared, the map copied and installed
+ // onto the object.
+ static void UpdateMapCodeCache(Handle<HeapObject> object,
+ Handle<Name> name,
+ Handle<Code> code);
+
// Casting.
static inline HeapObject* cast(Object* obj);
// Return the write barrier mode for this. Callers of this function
- // must be able to present a reference to an AssertNoAllocation
+ // must be able to present a reference to an DisallowHeapAllocation
// object as a sign that they are not going to use this function
// from code that allocates and thus invalidates the returned write
// barrier mode.
- inline WriteBarrierMode GetWriteBarrierMode(const AssertNoAllocation&);
+ inline WriteBarrierMode GetWriteBarrierMode(
+ const DisallowHeapAllocation& promise);
// Dispatched behavior.
void HeapObjectShortPrint(StringStream* accumulator);
#ifdef OBJECT_PRINT
- inline void HeapObjectPrint() {
- HeapObjectPrint(stdout);
- }
- void HeapObjectPrint(FILE* out);
void PrintHeader(FILE* out, const char* id);
#endif
+ DECLARE_PRINTER(HeapObject)
DECLARE_VERIFIER(HeapObject)
#ifdef VERIFY_HEAP
inline void VerifyObjectField(int offset);
@@ -1327,11 +1863,9 @@ class HeapNumber: public HeapObject {
static inline HeapNumber* cast(Object* obj);
// Dispatched behavior.
- Object* HeapNumberToBoolean();
- inline void HeapNumberPrint() {
- HeapNumberPrint(stdout);
- }
- void HeapNumberPrint(FILE* out);
+ bool HeapNumberBooleanValue();
+
+ void HeapNumberPrint(FILE* out = stdout);
void HeapNumberPrint(StringStream* accumulator);
DECLARE_VERIFIER(HeapNumber)
@@ -1355,6 +1889,8 @@ class HeapNumber: public HeapObject {
static const int kExponentBits = 11;
static const int kExponentBias = 1023;
static const int kExponentShift = 20;
+ static const int kInfinityOrNanExponent =
+ (kExponentMask >> kExponentShift) - kExponentBias;
static const int kMantissaBitsInTopWord = 20;
static const int kNonMantissaBitsInTopWord = 12;
@@ -1414,38 +1950,33 @@ class JSReceiver: public HeapObject {
// Casting.
static inline JSReceiver* cast(Object* obj);
+ // Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5.
static Handle<Object> SetProperty(Handle<JSReceiver> object,
- Handle<String> key,
+ Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* SetProperty(
- String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
- MUST_USE_RESULT MaybeObject* SetProperty(
- LookupResult* result,
- String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
- MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter,
- Object* value);
-
- MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
-
- // Set the index'th array element.
- // Can cause GC, or return failure if GC is required.
- MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype);
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode =
+ MAY_BE_STORE_FROM_KEYED);
+ static Handle<Object> SetElement(Handle<JSReceiver> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
+
+ // Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
+ static inline bool HasProperty(Handle<JSReceiver> object, Handle<Name> name);
+ static inline bool HasLocalProperty(Handle<JSReceiver>, Handle<Name> name);
+ static inline bool HasElement(Handle<JSReceiver> object, uint32_t index);
+ static inline bool HasLocalElement(Handle<JSReceiver> object, uint32_t index);
+
+ // Implementation of [[Delete]], ECMA-262 5th edition, section 8.12.7.
+ static Handle<Object> DeleteProperty(Handle<JSReceiver> object,
+ Handle<Name> name,
+ DeleteMode mode = NORMAL_DELETION);
+ static Handle<Object> DeleteElement(Handle<JSReceiver> object,
+ uint32_t index,
+ DeleteMode mode = NORMAL_DELETION);
// Tests for the fast common case for property enumeration.
bool IsSimpleEnum();
@@ -1457,15 +1988,13 @@ class JSReceiver: public HeapObject {
// function that was used to instantiate the object).
String* constructor_name();
- inline PropertyAttributes GetPropertyAttribute(String* name);
+ inline PropertyAttributes GetPropertyAttribute(Name* name);
PropertyAttributes GetPropertyAttributeWithReceiver(JSReceiver* receiver,
- String* name);
- PropertyAttributes GetLocalPropertyAttribute(String* name);
+ Name* name);
+ PropertyAttributes GetLocalPropertyAttribute(Name* name);
- // Can cause a GC.
- inline bool HasProperty(String* name);
- inline bool HasLocalProperty(String* name);
- inline bool HasElement(uint32_t index);
+ inline PropertyAttributes GetElementAttribute(uint32_t index);
+ inline PropertyAttributes GetLocalElementAttribute(uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
inline Object* GetPrototype();
@@ -1473,27 +2002,36 @@ class JSReceiver: public HeapObject {
// Return the constructor function (may be Heap::null_value()).
inline Object* GetConstructor();
- // Set the object's prototype (only JSReceiver and null are allowed).
- MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
- bool skip_hidden_prototypes);
-
// Retrieves a permanent object identity hash code. The undefined value might
// be returned in case no hash was created yet and OMIT_CREATION was used.
inline MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
// Lookup a property. If found, the result is valid and has
// detailed information.
- void LocalLookup(String* name, LookupResult* result);
- void Lookup(String* name, LookupResult* result);
+ void LocalLookup(Name* name, LookupResult* result,
+ bool search_hidden_prototypes = false);
+ void Lookup(Name* name, LookupResult* result);
protected:
Smi* GenerateIdentityHash();
+ static Handle<Object> SetPropertyWithDefinedSetter(Handle<JSReceiver> object,
+ Handle<JSReceiver> setter,
+ Handle<Object> value);
+
private:
- PropertyAttributes GetPropertyAttribute(JSReceiver* receiver,
- LookupResult* result,
- String* name,
- bool continue_search);
+ PropertyAttributes GetPropertyAttributeForResult(JSReceiver* receiver,
+ LookupResult* result,
+ Name* name,
+ bool continue_search);
+
+ static Handle<Object> SetProperty(Handle<JSReceiver> receiver,
+ LookupResult* result,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_from_keyed);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
};
@@ -1510,7 +2048,7 @@ class JSObject: public JSReceiver {
DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
inline void initialize_properties();
inline bool HasFastProperties();
- inline StringDictionary* property_dictionary(); // Gets slow properties.
+ inline NameDictionary* property_dictionary(); // Gets slow properties.
// [elements]: The elements (properties with names that are integers).
//
@@ -1542,6 +2080,8 @@ class JSObject: public JSReceiver {
// Returns true if an object has elements of FAST_ELEMENTS or
// FAST_SMI_ONLY_ELEMENTS.
inline bool HasFastSmiOrObjectElements();
+ // Returns true if an object has any of the fast elements kinds.
+ inline bool HasFastElements();
// Returns true if an object has elements of FAST_DOUBLE_ELEMENTS
// ElementsKind.
inline bool HasFastDoubleElements();
@@ -1564,80 +2104,95 @@ class JSObject: public JSReceiver {
bool HasDictionaryArgumentsElements();
inline SeededNumberDictionary* element_dictionary(); // Gets slow elements.
+ inline bool ShouldTrackAllocationInfo();
+
inline void set_map_and_elements(
Map* map,
FixedArrayBase* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Requires: HasFastElements().
+ static Handle<FixedArray> EnsureWritableFastElements(
+ Handle<JSObject> object);
MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements();
// Collects elements starting at index 0.
// Undefined values are placed after non-undefined values.
// Returns the number of non-undefined values.
- MUST_USE_RESULT MaybeObject* PrepareElementsForSort(uint32_t limit);
+ static Handle<Object> PrepareElementsForSort(Handle<JSObject> object,
+ uint32_t limit);
// As PrepareElementsForSort, but only on objects where elements is
// a dictionary, and it will stay a dictionary.
+ static Handle<Object> PrepareSlowElementsForSort(Handle<JSObject> object,
+ uint32_t limit);
MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
- MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
- Object* structure,
- String* name);
+ static Handle<Object> GetPropertyWithCallback(Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Object> structure,
+ Handle<Name> name);
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result,
- String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck(
- LookupResult* result,
- String* name,
- Object* value,
- bool check_prototype,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithCallback(
- Object* structure,
- String* name,
- Object* value,
- JSObject* holder,
+ static Handle<Object> SetPropertyWithCallback(
+ Handle<JSObject> object,
+ Handle<Object> structure,
+ Handle<Name> name,
+ Handle<Object> value,
+ Handle<JSObject> holder,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
- String* name,
- Object* value,
+
+ static Handle<Object> SetPropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyPostInterceptor(
- String* name,
- Object* value,
+
+ static Handle<Object> SetPropertyForResult(
+ Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
- ExtensibilityCheck extensibility_check);
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
static Handle<Object> SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
- Handle<String> key,
+ Handle<Name> key,
Handle<Object> value,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ ValueType value_type = OPTIMAL_REPRESENTATION,
+ StoreMode mode = ALLOW_AS_CONSTANT,
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
+
+ static inline Handle<String> ExpectedTransitionKey(Handle<Map> map);
+ static inline Handle<Map> ExpectedTransitionTarget(Handle<Map> map);
// Try to follow an existing transition to a field with attributes NONE. The
// return value indicates whether the transition was successful.
- static inline bool TryTransitionToField(Handle<JSObject> object,
- Handle<String> key);
-
- inline int LastAddedFieldIndex();
+ static inline Handle<Map> FindTransitionToField(Handle<Map> map,
+ Handle<Name> key);
// Extend the receiver with a single fast property appeared first in the
// passed map. This also extends the property backing store if necessary.
- static void AddFastPropertyUsingMap(Handle<JSObject> object, Handle<Map> map);
- inline MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* map);
+ static void AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map);
+
+ // Migrates the given object to a map whose field representations are the
+ // lowest upper bound of all known representations for that field.
+ static void MigrateInstance(Handle<JSObject> instance);
+
+ // Migrates the given object only if the target map is already available,
+ // or returns an empty handle if such a map is not yet available.
+ static Handle<Object> TryMigrateInstance(Handle<JSObject> instance);
// Can cause GC.
- MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
- String* key,
+ MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributesTrampoline(
+ Name* key,
Object* value,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ ValueType value_type = OPTIMAL_REPRESENTATION,
+ StoreMode mode = ALLOW_AS_CONSTANT,
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
// Retrieve a value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
@@ -1645,24 +2200,18 @@ class JSObject: public JSReceiver {
// Sets the property value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
- Object* SetNormalizedProperty(LookupResult* result, Object* value);
+ static void SetNormalizedProperty(Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Object> value);
// Sets the property value in a normalized object given (key, value, details).
// Handles the special representation of JS global objects.
- static Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyDetails details);
-
- MUST_USE_RESULT MaybeObject* SetNormalizedProperty(String* name,
- Object* value,
- PropertyDetails details);
-
- // Deletes the named property in a normalized object.
- MUST_USE_RESULT MaybeObject* DeleteNormalizedProperty(String* name,
- DeleteMode mode);
+ static void SetNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyDetails details);
- MUST_USE_RESULT MaybeObject* OptimizeAsPrototype();
+ static void OptimizeAsPrototype(Handle<JSObject> object);
// Retrieve interceptors.
InterceptorInfo* GetNamedInterceptor();
@@ -1670,55 +2219,53 @@ class JSObject: public JSReceiver {
// Used from JSReceiver.
PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
- String* name,
+ Name* name,
bool continue_search);
PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver,
- String* name,
+ Name* name,
bool continue_search);
PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
Object* receiver,
LookupResult* result,
- String* name,
+ Name* name,
bool continue_search);
-
+ PropertyAttributes GetElementAttributeWithReceiver(JSReceiver* receiver,
+ uint32_t index,
+ bool continue_search);
+
+ // Retrieves an AccessorPair property from the given object. Might return
+ // undefined if the property doesn't exist or is of a different kind.
+ static Handle<Object> GetAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ AccessorComponent component);
+
+ // Defines an AccessorPair property on the given object.
+ // TODO(mstarzinger): Rename to SetAccessor() and return empty handle on
+ // exception instead of letting callers check for scheduled exception.
static void DefineAccessor(Handle<JSObject> object,
- Handle<String> name,
+ Handle<Name> name,
Handle<Object> getter,
Handle<Object> setter,
- PropertyAttributes attributes);
- MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes);
- // Try to define a single accessor paying attention to map transitions.
- // Returns a JavaScript null if this was not possible and we have to use the
- // slow case. Note that we can fail due to allocations, too.
- MUST_USE_RESULT MaybeObject* DefineFastAccessor(
- String* name,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes);
- Object* LookupAccessor(String* name, AccessorComponent component);
+ PropertyAttributes attributes,
+ v8::AccessControl access_control = v8::DEFAULT);
- MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info);
+ // Defines an AccessorInfo property on the given object.
+ static Handle<Object> SetAccessor(Handle<JSObject> object,
+ Handle<AccessorInfo> info);
- // Used from Object::GetProperty().
- MUST_USE_RESULT MaybeObject* GetPropertyWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- String* name,
- PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetPropertyWithInterceptor(
- Object* receiver,
- String* name,
+ static Handle<Object> GetPropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetPropertyPostInterceptor(
- Object* receiver,
- String* name,
+ static Handle<Object> GetPropertyPostInterceptor(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
PropertyAttributes* attributes);
MUST_USE_RESULT MaybeObject* GetLocalPropertyPostInterceptor(
Object* receiver,
- String* name,
+ Name* name,
PropertyAttributes* attributes);
// Returns true if this is an instance of an api function and has
@@ -1733,7 +2280,7 @@ class JSObject: public JSReceiver {
//
// Hidden properties are not local properties of the object itself.
// Instead they are stored in an auxiliary structure kept as a local
- // property with a special name Heap::hidden_symbol(). But if the
+ // property with a special name Heap::hidden_string(). But if the
// receiver is a JSGlobalProxy then the auxiliary object is a property
// of its prototype, and if it's a detached proxy, then you can't have
// hidden properties.
@@ -1741,35 +2288,28 @@ class JSObject: public JSReceiver {
// Sets a hidden property on this object. Returns this object if successful,
// undefined if called on a detached proxy.
static Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
- Handle<String> key,
+ Handle<Name> key,
Handle<Object> value);
// Returns a failure if a GC is required.
- MUST_USE_RESULT MaybeObject* SetHiddenProperty(String* key, Object* value);
- // Gets the value of a hidden property with the given key. Returns undefined
+ MUST_USE_RESULT MaybeObject* SetHiddenProperty(Name* key, Object* value);
+ // Gets the value of a hidden property with the given key. Returns the hole
// if the property doesn't exist (or if called on a detached proxy),
// otherwise returns the value set for the key.
- Object* GetHiddenProperty(String* key);
+ Object* GetHiddenProperty(Name* key);
// Deletes a hidden property. Deleting a non-existing property is
// considered successful.
- void DeleteHiddenProperty(String* key);
- // Returns true if the object has a property with the hidden symbol as name.
+ static void DeleteHiddenProperty(Handle<JSObject> object,
+ Handle<Name> key);
+ // Returns true if the object has a property with the hidden string as name.
bool HasHiddenProperties();
- static int GetIdentityHash(Handle<JSObject> obj);
- MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
- MUST_USE_RESULT MaybeObject* SetIdentityHash(Smi* hash, CreationFlag flag);
-
- static Handle<Object> DeleteProperty(Handle<JSObject> obj,
- Handle<String> name);
- MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
-
- static Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
- MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
+ static int GetIdentityHash(Handle<JSObject> object);
+ static void SetIdentityHash(Handle<JSObject> object, Smi* hash);
inline void ValidateElements();
// Makes sure that this object can contain HeapObject as elements.
- MUST_USE_RESULT inline MaybeObject* EnsureCanContainHeapObjectElements();
+ static inline void EnsureCanContainHeapObjectElements(Handle<JSObject> obj);
// Makes sure that this object can contain the specified elements.
MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
@@ -1799,36 +2339,15 @@ class JSObject: public JSReceiver {
// be represented as a double and not a Smi.
bool ShouldConvertToFastDoubleElements(bool* has_smi_only_elements);
- // Tells whether the index'th element is present.
- bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index);
-
// Computes the new capacity when expanding the elements of a JSObject.
static int NewElementsCapacity(int old_capacity) {
// (old_capacity + 50%) + 16
return old_capacity + (old_capacity >> 1) + 16;
}
- // Tells whether the index'th element is present and how it is stored.
- enum LocalElementType {
- // There is no element with given index.
- UNDEFINED_ELEMENT,
-
- // Element with given index is handled by interceptor.
- INTERCEPTED_ELEMENT,
-
- // Element with given index is character in string.
- STRING_CHARACTER_ELEMENT,
-
- // Element with given index is stored in fast backing store.
- FAST_ELEMENT,
-
- // Element with given index is stored in slow backing store.
- DICTIONARY_ELEMENT
- };
-
- LocalElementType HasLocalElement(uint32_t index);
-
- bool HasElementWithInterceptor(JSReceiver* receiver, uint32_t index);
+ // These methods do not perform access checks!
+ AccessorPair* GetLocalPropertyAccessorPair(Name* name);
+ AccessorPair* GetLocalElementAccessorPair(uint32_t index);
MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index,
Object* value,
@@ -1855,7 +2374,7 @@ class JSObject: public JSReceiver {
StrictModeFlag strict_mode);
// Empty handle is returned if the element cannot be set to the given value.
- static MUST_USE_RESULT Handle<Object> SetElement(
+ static Handle<Object> SetElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
@@ -1900,9 +2419,11 @@ class JSObject: public JSReceiver {
inline bool HasIndexedInterceptor();
// Support functions for v8 api (needed for correct interceptor behavior).
- bool HasRealNamedProperty(String* key);
- bool HasRealElementProperty(uint32_t index);
- bool HasRealNamedCallbackProperty(String* key);
+ static bool HasRealNamedProperty(Handle<JSObject> object,
+ Handle<Name> key);
+ static bool HasRealElementProperty(Handle<JSObject> object, uint32_t index);
+ static bool HasRealNamedCallbackProperty(Handle<JSObject> object,
+ Handle<Name> key);
// Get the header size for a JSObject. Used to compute the index of
// internal fields as well as the number of internal fields.
@@ -1915,19 +2436,18 @@ class JSObject: public JSReceiver {
inline void SetInternalField(int index, Smi* value);
// The following lookup functions skip interceptors.
- void LocalLookupRealNamedProperty(String* name, LookupResult* result);
- void LookupRealNamedProperty(String* name, LookupResult* result);
- void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
- MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
- uint32_t index, Object* value, bool* found, StrictModeFlag strict_mode);
- void LookupCallbackProperty(String* name, LookupResult* result);
+ void LocalLookupRealNamedProperty(Name* name, LookupResult* result);
+ void LookupRealNamedProperty(Name* name, LookupResult* result);
+ void LookupRealNamedPropertyInPrototypes(Name* name, LookupResult* result);
+ void LookupCallbackProperty(Name* name, LookupResult* result);
// Returns the number of properties on this object filtering out properties
// with the specified attributes (ignoring interceptors).
int NumberOfLocalProperties(PropertyAttributes filter = NONE);
// Fill in details for properties into storage starting at the specified
// index.
- void GetLocalPropertyNames(FixedArray* storage, int index);
+ void GetLocalPropertyNames(
+ FixedArray* storage, int index, PropertyAttributes filter = NONE);
// Returns the number of properties on this object filtering out properties
// with the specified attributes (ignoring interceptors).
@@ -1944,29 +2464,6 @@ class JSObject: public JSReceiver {
// Returns the number of enumerable elements.
int GetEnumElementKeys(FixedArray* storage);
- // Add a property to a fast-case object using a map transition to
- // new_map.
- MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* new_map,
- String* name,
- Object* value,
- int field_index);
-
- // Add a constant function property to a fast-case object.
- // This leaves a CONSTANT_TRANSITION in the old map, and
- // if it is called on a second object with this map, a
- // normal property is added instead, with a map transition.
- // This avoids the creation of many maps with the same constant
- // function, all orphaned.
- MUST_USE_RESULT MaybeObject* AddConstantFunctionProperty(
- String* name,
- JSFunction* function,
- PropertyAttributes attributes);
-
- MUST_USE_RESULT MaybeObject* ReplaceSlowProperty(
- String* name,
- Object* value,
- PropertyAttributes attributes);
-
// Returns a new map with all transitions dropped from the object's current
// map and the ElementsKind set.
static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
@@ -1977,45 +2474,18 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* GetElementsTransitionMapSlow(
ElementsKind elements_kind);
- static Handle<Object> TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind);
+ static void TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind);
MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
+ MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind);
- // Replaces an existing transition with a transition to a map with a FIELD.
- MUST_USE_RESULT MaybeObject* ConvertTransitionToMapTransition(
- int transition_index,
- String* name,
- Object* new_value,
- PropertyAttributes attributes);
-
- // Converts a descriptor of any other type to a real field, backed by the
- // properties array.
- MUST_USE_RESULT MaybeObject* ConvertDescriptorToField(
- String* name,
- Object* new_value,
- PropertyAttributes attributes);
-
- // Add a property to a fast-case object.
- MUST_USE_RESULT MaybeObject* AddFastProperty(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
-
- // Add a property to a slow-case object.
- MUST_USE_RESULT MaybeObject* AddSlowProperty(String* name,
- Object* value,
- PropertyAttributes attributes);
-
- // Add a property to an object.
- MUST_USE_RESULT MaybeObject* AddProperty(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
- ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
+ // TODO(mstarzinger): Both public because of ConvertAnsSetLocalProperty().
+ static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map);
+ static void GeneralizeFieldRepresentation(Handle<JSObject> object,
+ int modify_index,
+ Representation new_representation,
+ StoreMode store_mode);
// Convert the object to use the canonical dictionary
// representation. If the object is expected to have additional properties
@@ -2025,10 +2495,6 @@ class JSObject: public JSReceiver {
PropertyNormalizationMode mode,
int expected_additional_properties);
- MUST_USE_RESULT MaybeObject* NormalizeProperties(
- PropertyNormalizationMode mode,
- int expected_additional_properties);
-
// Convert and update the elements backing store to be a
// SeededNumberDictionary dictionary. Returns the backing after conversion.
static Handle<SeededNumberDictionary> NormalizeElements(
@@ -2036,23 +2502,16 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* NormalizeElements();
- static void UpdateMapCodeCache(Handle<JSObject> object,
- Handle<String> name,
- Handle<Code> code);
-
- MUST_USE_RESULT MaybeObject* UpdateMapCodeCache(String* name, Code* code);
-
// Transform slow named properties to fast variants.
- // Returns failure if allocation failed.
static void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields);
- MUST_USE_RESULT MaybeObject* TransformToFastProperties(
- int unused_property_fields);
-
// Access fast-case object properties at index.
- inline Object* FastPropertyAt(int index);
- inline Object* FastPropertyAtPut(int index, Object* value);
+ MUST_USE_RESULT inline MaybeObject* FastPropertyAt(
+ Representation representation,
+ int index);
+ inline Object* RawFastPropertyAt(int index);
+ inline void FastPropertyAtPut(int index, Object* value);
// Access to in object properties.
inline int GetInObjectPropertyOffset(int index);
@@ -2062,6 +2521,11 @@ class JSObject: public JSReceiver {
WriteBarrierMode mode
= UPDATE_WRITE_BARRIER);
+ // Set the object's prototype (only JSReceiver and null are allowed values).
+ static Handle<Object> SetPrototype(Handle<JSObject> object,
+ Handle<Object> value,
+ bool skip_hidden_prototypes = false);
+
// Initializes the body after properties slot, properties slot is
// initialized by set_properties. Fill the pre-allocated fields with
// pre_allocated_value and the rest with filler_value.
@@ -2074,43 +2538,43 @@ class JSObject: public JSReceiver {
// Check whether this object references another object
bool ReferencesObject(Object* obj);
- // Casting.
- static inline JSObject* cast(Object* obj);
-
// Disalow further properties to be added to the object.
static Handle<Object> PreventExtensions(Handle<JSObject> object);
- MUST_USE_RESULT MaybeObject* PreventExtensions();
+ // ES5 Object.freeze
+ static Handle<Object> Freeze(Handle<JSObject> object);
+
+ // Called the first time an object is observed with ES7 Object.observe.
+ static void SetObserved(Handle<JSObject> object);
+
+ // Copy object.
+ static Handle<JSObject> Copy(Handle<JSObject> object,
+ Handle<AllocationSite> site);
+ static Handle<JSObject> Copy(Handle<JSObject> object);
+ static Handle<JSObject> DeepCopy(Handle<JSObject> object,
+ AllocationSiteContext* site_context);
+ static Handle<JSObject> DeepWalk(Handle<JSObject> object,
+ AllocationSiteContext* site_context);
+
+ // Casting.
+ static inline JSObject* cast(Object* obj);
// Dispatched behavior.
void JSObjectShortPrint(StringStream* accumulator);
-#ifdef OBJECT_PRINT
- inline void JSObjectPrint() {
- JSObjectPrint(stdout);
- }
- void JSObjectPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSObject)
DECLARE_VERIFIER(JSObject)
#ifdef OBJECT_PRINT
- inline void PrintProperties() {
- PrintProperties(stdout);
- }
- void PrintProperties(FILE* out);
-
- inline void PrintElements() {
- PrintElements(stdout);
- }
- void PrintElements(FILE* out);
- inline void PrintTransitions() {
- PrintTransitions(stdout);
- }
- void PrintTransitions(FILE* out);
+ void PrintProperties(FILE* out = stdout);
+ void PrintElements(FILE* out = stdout);
+ void PrintTransitions(FILE* out = stdout);
#endif
void PrintElementsTransition(
FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
ElementsKind to_kind, FixedArrayBase* to_elements);
+ void PrintInstanceMigration(FILE* file, Map* original_map, Map* new_map);
+
#ifdef DEBUG
// Structure for collecting spill information about JSObjects.
class SpillInformation {
@@ -2132,12 +2596,21 @@ class JSObject: public JSReceiver {
void IncrementSpillStatistics(SpillInformation* info);
#endif
+
+#ifdef VERIFY_HEAP
+ // If a GC was caused while constructing this object, the elements pointer
+ // may point to a one pointer filler map. The object won't be rooted, but
+ // our heap verification code could stumble across it.
+ bool ElementsAreSafeToExamine();
+#endif
+
Object* SlowReverseLookup(Object* value);
// Maximal number of fast properties for the JSObject. Used to
// restrict the number of map transitions to avoid an explosion in
// the number of maps for objects used as dictionaries.
- inline bool TooManyFastProperties(int properties, StoreFromKeyed store_mode);
+ inline bool TooManyFastProperties(
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
// Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
// Also maximal value of JSArray's length property.
@@ -2158,7 +2631,10 @@ class JSObject: public JSReceiver {
// don't want to be wasteful with long lived objects.
static const int kMaxUncheckedOldFastElementsLength = 500;
+ // Note that Heap::MaxRegularSpaceAllocationSize() puts a limit on
+ // permissible values (see the ASSERT in heap.cc).
static const int kInitialMaxFastElementArray = 100000;
+
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 64;
static const int kMaxInstanceSize = 255 * kPointerSize;
@@ -2179,18 +2655,46 @@ class JSObject: public JSReceiver {
static inline int SizeOf(Map* map, HeapObject* object);
};
+ // Enqueue change record for Object.observe. May cause GC.
+ static void EnqueueChangeRecord(Handle<JSObject> object,
+ const char* type,
+ Handle<Name> name,
+ Handle<Object> old_value);
+
+ // Deliver change records to observers. May cause GC.
+ static void DeliverChangeRecords(Isolate* isolate);
+
private:
friend class DictionaryElementsAccessor;
+ friend class JSReceiver;
+ friend class Object;
+
+ // Used from Object::GetProperty().
+ static Handle<Object> GetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ LookupResult* result,
+ Handle<Name> name,
+ PropertyAttributes* attributes);
MUST_USE_RESULT MaybeObject* GetElementWithCallback(Object* receiver,
Object* structure,
uint32_t index,
Object* holder);
- MUST_USE_RESULT MaybeObject* SetElementWithCallback(
- Object* structure,
+ MUST_USE_RESULT PropertyAttributes GetElementAttributeWithInterceptor(
+ JSReceiver* receiver,
uint32_t index,
- Object* value,
- JSObject* holder,
+ bool continue_search);
+ MUST_USE_RESULT PropertyAttributes GetElementAttributeWithoutInterceptor(
+ JSReceiver* receiver,
+ uint32_t index,
+ bool continue_search);
+ static Handle<Object> SetElementWithCallback(
+ Handle<JSObject> object,
+ Handle<Object> structure,
+ uint32_t index,
+ Handle<Object> value,
+ Handle<JSObject> holder,
StrictModeFlag strict_mode);
MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(
uint32_t index,
@@ -2206,27 +2710,111 @@ class JSObject: public JSReceiver {
StrictModeFlag strict_mode,
bool check_prototype,
SetPropertyMode set_mode);
+ MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
+ uint32_t index,
+ Object* value,
+ bool* found,
+ StrictModeFlag strict_mode);
// Searches the prototype chain for property 'name'. If it is found and
// has a setter, invoke it and set '*done' to true. If it is found and is
// read-only, reject and set '*done' to true. Otherwise, set '*done' to
- // false. Can cause GC and can return a failure result with '*done==true'.
- MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypes(
- String* name,
- Object* value,
+ // false. Can throw and return an empty handle with '*done==true'.
+ static Handle<Object> SetPropertyViaPrototypes(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool* done);
+ static Handle<Object> SetPropertyPostInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
+ static Handle<Object> SetPropertyUsingTransition(
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+ static Handle<Object> SetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Name> name,
+ Handle<Object> value,
+ bool check_prototype,
+ StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name,
- DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeletePropertyWithInterceptor(String* name);
+ // Add a property to an object.
+ static Handle<Object> AddProperty(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
+ ValueType value_type = OPTIMAL_REPRESENTATION,
+ StoreMode mode = ALLOW_AS_CONSTANT,
+ TransitionFlag flag = INSERT_TRANSITION);
- MUST_USE_RESULT MaybeObject* DeleteElementWithInterceptor(uint32_t index);
+ // Add a constant function property to a fast-case object.
+ // This leaves a CONSTANT_TRANSITION in the old map, and
+ // if it is called on a second object with this map, a
+ // normal property is added instead, with a map transition.
+ // This avoids the creation of many maps with the same constant
+ // function, all orphaned.
+ static void AddConstantProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> constant,
+ PropertyAttributes attributes,
+ TransitionFlag flag);
+
+ // Add a property to a fast-case object.
+ static void AddFastProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StoreFromKeyed store_mode,
+ ValueType value_type,
+ TransitionFlag flag);
+
+ // Add a property to a fast-case object using a map transition to
+ // new_map.
+ static void AddFastPropertyUsingMap(Handle<JSObject> object,
+ Handle<Map> new_map,
+ Handle<Name> name,
+ Handle<Object> value,
+ int field_index,
+ Representation representation);
+
+ // Add a property to a slow-case object.
+ static void AddSlowProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+
+ static Handle<Object> DeleteProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ DeleteMode mode);
+ static Handle<Object> DeletePropertyPostInterceptor(Handle<JSObject> object,
+ Handle<Name> name,
+ DeleteMode mode);
+ static Handle<Object> DeletePropertyWithInterceptor(Handle<JSObject> object,
+ Handle<Name> name);
+
+ // Deletes the named property in a normalized object.
+ static Handle<Object> DeleteNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeleteFastElement(uint32_t index);
- MUST_USE_RESULT MaybeObject* DeleteDictionaryElement(uint32_t index,
- DeleteMode mode);
+ static Handle<Object> DeleteElement(Handle<JSObject> object,
+ uint32_t index,
+ DeleteMode mode);
+ static Handle<Object> DeleteElementWithInterceptor(Handle<JSObject> object,
+ uint32_t index);
bool ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
@@ -2238,27 +2826,37 @@ class JSObject: public JSReceiver {
// Gets the current elements capacity and the number of used elements.
void GetElementsCapacityAndUsage(int* capacity, int* used);
- bool CanSetCallback(String* name);
- MUST_USE_RESULT MaybeObject* SetElementCallback(
- uint32_t index,
- Object* structure,
- PropertyAttributes attributes);
- MUST_USE_RESULT MaybeObject* SetPropertyCallback(
- String* name,
- Object* structure,
- PropertyAttributes attributes);
- MUST_USE_RESULT MaybeObject* DefineElementAccessor(
- uint32_t index,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes);
- MUST_USE_RESULT MaybeObject* CreateAccessorPairFor(String* name);
- MUST_USE_RESULT MaybeObject* DefinePropertyAccessor(
- String* name,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes);
+ bool CanSetCallback(Name* name);
+ static void SetElementCallback(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> structure,
+ PropertyAttributes attributes);
+ static void SetPropertyCallback(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> structure,
+ PropertyAttributes attributes);
+ static void DefineElementAccessor(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes,
+ v8::AccessControl access_control);
+ static Handle<AccessorPair> CreateAccessorPairFor(Handle<JSObject> object,
+ Handle<Name> name);
+ static void DefinePropertyAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes,
+ v8::AccessControl access_control);
+ // Try to define a single accessor paying attention to map transitions.
+ // Returns false if this was not possible and we have to use the slow case.
+ static bool DefineFastAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ AccessorComponent component,
+ Handle<Object> accessor,
+ PropertyAttributes attributes);
enum InitializeHiddenProperties {
CREATE_NEW_IF_ABSENT,
@@ -2276,6 +2874,8 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* SetHiddenPropertiesHashTable(
Object* value);
+ MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -2310,35 +2910,26 @@ class FixedArray: public FixedArrayBase {
inline void set(int index, Object* value);
inline bool is_the_hole(int index);
- // Setter that doesn't need write barrier).
+ // Setter that doesn't need write barrier.
inline void set(int index, Smi* value);
// Setter with explicit barrier mode.
inline void set(int index, Object* value, WriteBarrierMode mode);
// Setters for frequently used oddballs located in old space.
inline void set_undefined(int index);
- // TODO(isolates): duplicate.
- inline void set_undefined(Heap* heap, int index);
inline void set_null(int index);
- // TODO(isolates): duplicate.
- inline void set_null(Heap* heap, int index);
inline void set_the_hole(int index);
- // Setters with less debug checks for the GC to use.
- inline void set_unchecked(int index, Smi* value);
- inline void set_null_unchecked(Heap* heap, int index);
- inline void set_unchecked(Heap* heap, int index, Object* value,
- WriteBarrierMode mode);
+ inline Object** GetFirstElementAddress();
+ inline bool ContainsOnlySmisOrHoles();
// Gives access to raw memory which stores the array's data.
inline Object** data_start();
- inline Object** GetFirstElementAddress();
- inline bool ContainsOnlySmisOrHoles();
-
// Copy operations.
MUST_USE_RESULT inline MaybeObject* Copy();
- MUST_USE_RESULT MaybeObject* CopySize(int new_length);
+ MUST_USE_RESULT MaybeObject* CopySize(int new_length,
+ PretenureFlag pretenure = NOT_TENURED);
// Add the elements of a JSArray to this FixedArray.
MUST_USE_RESULT MaybeObject* AddKeysFromJSArray(JSArray* array);
@@ -2366,12 +2957,7 @@ class FixedArray: public FixedArrayBase {
static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void FixedArrayPrint() {
- FixedArrayPrint(stdout);
- }
- void FixedArrayPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(FixedArray)
DECLARE_VERIFIER(FixedArray)
#ifdef DEBUG
// Checks if two FixedArrays have identical contents.
@@ -2410,6 +2996,8 @@ class FixedArray: public FixedArrayBase {
Object* value);
private:
+ STATIC_CHECK(kHeaderSize == Internals::kFixedArrayHeaderSize);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
};
@@ -2435,6 +3023,9 @@ class FixedDoubleArray: public FixedArrayBase {
return kHeaderSize + length * kDoubleSize;
}
+ // Gives access to raw memory which stores the array's data.
+ inline double* data_start();
+
// Code Generation support.
static int OffsetOfElementAt(int index) { return SizeFor(index); }
@@ -2453,12 +3044,7 @@ class FixedDoubleArray: public FixedArrayBase {
static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize;
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void FixedDoubleArrayPrint() {
- FixedDoubleArrayPrint(stdout);
- }
- void FixedDoubleArrayPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(FixedDoubleArray)
DECLARE_VERIFIER(FixedDoubleArray)
private:
@@ -2466,6 +3052,100 @@ class FixedDoubleArray: public FixedArrayBase {
};
+// ConstantPoolArray describes a fixed-sized array containing constant pool
+// entires.
+// The format of the pool is:
+// [0]: Field holding the first index which is a pointer entry
+// [1]: Field holding the first index which is a int32 entry
+// [2] ... [first_ptr_index() - 1]: 64 bit entries
+// [first_ptr_index()] ... [first_int32_index() - 1]: pointer entries
+// [first_int32_index()] ... [length - 1]: 32 bit entries
+class ConstantPoolArray: public FixedArrayBase {
+ public:
+ // Getters for the field storing the first index for different type entries.
+ inline int first_ptr_index();
+ inline int first_int64_index();
+ inline int first_int32_index();
+
+ // Getters for counts of different type entries.
+ inline int count_of_ptr_entries();
+ inline int count_of_int64_entries();
+ inline int count_of_int32_entries();
+
+ // Setter and getter for pool elements.
+ inline Object* get_ptr_entry(int index);
+ inline int64_t get_int64_entry(int index);
+ inline int32_t get_int32_entry(int index);
+ inline double get_int64_entry_as_double(int index);
+
+ inline void set(int index, Object* value);
+ inline void set(int index, int64_t value);
+ inline void set(int index, double value);
+ inline void set(int index, int32_t value);
+
+ // Set up initial state.
+ inline void SetEntryCounts(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries);
+
+ // Copy operations
+ MUST_USE_RESULT inline MaybeObject* Copy();
+
+ // Garbage collection support.
+ inline static int SizeFor(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ return RoundUp(OffsetAt(number_of_int64_entries,
+ number_of_ptr_entries,
+ number_of_int32_entries),
+ kPointerSize);
+ }
+
+ // Code Generation support.
+ inline int OffsetOfElementAt(int index) {
+ ASSERT(index < length());
+ if (index >= first_int32_index()) {
+ return OffsetAt(count_of_int64_entries(), count_of_ptr_entries(),
+ index - first_int32_index());
+ } else if (index >= first_ptr_index()) {
+ return OffsetAt(count_of_int64_entries(), index - first_ptr_index(), 0);
+ } else {
+ return OffsetAt(index, 0, 0);
+ }
+ }
+
+ // Casting.
+ static inline ConstantPoolArray* cast(Object* obj);
+
+ // Layout description.
+ static const int kFirstPointerIndexOffset = FixedArray::kHeaderSize;
+ static const int kFirstInt32IndexOffset =
+ kFirstPointerIndexOffset + kPointerSize;
+ static const int kFirstOffset = kFirstInt32IndexOffset + kPointerSize;
+
+ // Dispatched behavior.
+ void ConstantPoolIterateBody(ObjectVisitor* v);
+
+ DECLARE_PRINTER(ConstantPoolArray)
+ DECLARE_VERIFIER(ConstantPoolArray)
+
+ private:
+ inline void set_first_ptr_index(int value);
+ inline void set_first_int32_index(int value);
+
+ inline static int OffsetAt(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ return kFirstOffset
+ + (number_of_int64_entries * kInt64Size)
+ + (number_of_ptr_entries * kPointerSize)
+ + (number_of_int32_entries * kInt32Size);
+ }
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolArray);
+};
+
+
// DescriptorArrays are fixed arrays used to hold instance descriptors.
// The format of the these objects is:
// [0]: Number of descriptors
@@ -2560,20 +3240,25 @@ class DescriptorArray: public FixedArray {
Object* new_index_cache);
// Accessors for fetching instance descriptor at descriptor number.
- inline String* GetKey(int descriptor_number);
+ inline Name* GetKey(int descriptor_number);
inline Object** GetKeySlot(int descriptor_number);
inline Object* GetValue(int descriptor_number);
inline Object** GetValueSlot(int descriptor_number);
+ inline Object** GetDescriptorStartSlot(int descriptor_number);
+ inline Object** GetDescriptorEndSlot(int descriptor_number);
inline PropertyDetails GetDetails(int descriptor_number);
inline PropertyType GetType(int descriptor_number);
inline int GetFieldIndex(int descriptor_number);
- inline JSFunction* GetConstantFunction(int descriptor_number);
+ inline Object* GetConstant(int descriptor_number);
inline Object* GetCallbacksObject(int descriptor_number);
inline AccessorDescriptor* GetCallbacks(int descriptor_number);
- inline String* GetSortedKey(int descriptor_number);
+ inline Name* GetSortedKey(int descriptor_number);
inline int GetSortedKeyIndex(int descriptor_number);
inline void SetSortedKey(int pointer, int descriptor_number);
+ inline void InitializeRepresentations(Representation representation);
+ inline void SetRepresentation(int descriptor_number,
+ Representation representation);
// Accessor for complete descriptor.
inline void Get(int descriptor_number, Descriptor* desc);
@@ -2594,22 +3279,51 @@ class DescriptorArray: public FixedArray {
DescriptorArray* src,
int src_index,
const WhitenessWitness&);
-
- MUST_USE_RESULT MaybeObject* CopyUpTo(int enumeration_index);
+ static Handle<DescriptorArray> Merge(Handle<DescriptorArray> desc,
+ int verbatim,
+ int valid,
+ int new_size,
+ int modify_index,
+ StoreMode store_mode,
+ Handle<DescriptorArray> other);
+ MUST_USE_RESULT MaybeObject* Merge(int verbatim,
+ int valid,
+ int new_size,
+ int modify_index,
+ StoreMode store_mode,
+ DescriptorArray* other);
+
+ bool IsMoreGeneralThan(int verbatim,
+ int valid,
+ int new_size,
+ DescriptorArray* other);
+
+ MUST_USE_RESULT MaybeObject* CopyUpTo(int enumeration_index) {
+ return CopyUpToAddAttributes(enumeration_index, NONE);
+ }
+
+ static Handle<DescriptorArray> CopyUpToAddAttributes(
+ Handle<DescriptorArray> desc,
+ int enumeration_index,
+ PropertyAttributes attributes);
+ MUST_USE_RESULT MaybeObject* CopyUpToAddAttributes(
+ int enumeration_index,
+ PropertyAttributes attributes);
// Sort the instance descriptors by the hash codes of their keys.
void Sort();
// Search the instance descriptors for given name.
- INLINE(int Search(String* name, int number_of_own_descriptors));
+ INLINE(int Search(Name* name, int number_of_own_descriptors));
// As the above, but uses DescriptorLookupCache and updates it when
// necessary.
- INLINE(int SearchWithCache(String* name, Map* map));
+ INLINE(int SearchWithCache(Name* name, Map* map));
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_descriptors,
+ MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate,
+ int number_of_descriptors,
int slack = 0);
// Casting.
@@ -2643,10 +3357,7 @@ class DescriptorArray: public FixedArray {
#ifdef OBJECT_PRINT
// Print all the descriptors.
- inline void PrintDescriptors() {
- PrintDescriptors(stdout);
- }
- void PrintDescriptors(FILE* out);
+ void PrintDescriptors(FILE* out = stdout);
#endif
#ifdef DEBUG
@@ -2714,11 +3425,11 @@ class DescriptorArray: public FixedArray {
enum SearchMode { ALL_ENTRIES, VALID_ENTRIES };
template<SearchMode search_mode, typename T>
-inline int LinearSearch(T* array, String* name, int len, int valid_entries);
+inline int LinearSearch(T* array, Name* name, int len, int valid_entries);
template<SearchMode search_mode, typename T>
-inline int Search(T* array, String* name, int valid_entries = 0);
+inline int Search(T* array, Name* name, int valid_entries = 0);
// HashTable is a subclass of FixedArray that implements a hash table
@@ -2743,7 +3454,7 @@ inline int Search(T* array, String* name, int valid_entries = 0);
// // Returns the hash value for object.
// static uint32_t HashForObject(Key key, Object* object);
// // Convert key to an object.
-// static inline Object* AsObject(Key key);
+// static inline Object* AsObject(Heap* heap, Key key);
// // The prefix size indicates number of elements in the beginning
// // of the backing storage.
// static const int kPrefixSize = ..;
@@ -2829,6 +3540,7 @@ class HashTable: public FixedArray {
// Returns a new HashTable object. Might return Failure.
MUST_USE_RESULT static MaybeObject* Allocate(
+ Heap* heap,
int at_least_space_for,
MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY,
PretenureFlag pretenure = NOT_TENURED);
@@ -2883,6 +3595,9 @@ class HashTable: public FixedArray {
inline int FindEntry(Key key);
int FindEntry(Isolate* isolate, Key key);
+ // Rehashes the table in-place.
+ void Rehash(Key key);
+
protected:
// Find the entry at which to insert element with the given key that
// has the given hash value.
@@ -2929,6 +3644,13 @@ class HashTable: public FixedArray {
return (last + number) & (size - 1);
}
+ // Returns _expected_ if one of entries given by the first _probe_ probes is
+ // equal to _expected_. Otherwise, returns the entry given by the probe
+ // number _probe_.
+ uint32_t EntryForProbe(Key key, Object* k, int probe, uint32_t expected);
+
+ void Swap(uint32_t entry1, uint32_t entry2, WriteBarrierMode mode);
+
// Rehashes this hash-table into the new table.
MUST_USE_RESULT MaybeObject* Rehash(HashTable* new_table, Key key);
@@ -2936,7 +3658,10 @@ class HashTable: public FixedArray {
MUST_USE_RESULT MaybeObject* Shrink(Key key);
// Ensure enough space for n additional elements.
- MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
+ MUST_USE_RESULT MaybeObject* EnsureCapacity(
+ int n,
+ Key key,
+ PretenureFlag pretenure = NOT_TENURED);
};
@@ -2951,13 +3676,13 @@ class HashTableKey {
virtual uint32_t HashForObject(Object* key) = 0;
// Returns the key object for storing into the hash table.
// If allocations fails a failure object is returned.
- MUST_USE_RESULT virtual MaybeObject* AsObject() = 0;
+ MUST_USE_RESULT virtual MaybeObject* AsObject(Heap* heap) = 0;
// Required.
virtual ~HashTableKey() {}
};
-class SymbolTableShape : public BaseShape<HashTableKey*> {
+class StringTableShape : public BaseShape<HashTableKey*> {
public:
static inline bool IsMatch(HashTableKey* key, Object* value) {
return key->IsMatch(value);
@@ -2968,53 +3693,58 @@ class SymbolTableShape : public BaseShape<HashTableKey*> {
static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
return key->HashForObject(object);
}
- MUST_USE_RESULT static inline MaybeObject* AsObject(HashTableKey* key) {
- return key->AsObject();
+ MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
+ HashTableKey* key) {
+ return key->AsObject(heap);
}
static const int kPrefixSize = 0;
static const int kEntrySize = 1;
};
-class SeqAsciiString;
+class SeqOneByteString;
-// SymbolTable.
+// StringTable.
//
// No special elements in the prefix and the element size is 1
-// because only the symbol itself (the key) needs to be stored.
-class SymbolTable: public HashTable<SymbolTableShape, HashTableKey*> {
- public:
- // Find symbol in the symbol table. If it is not there yet, it is
- // added. The return value is the symbol table which might have
- // been enlarged. If the return value is not a failure, the symbol
- // pointer *s is set to the symbol found.
- MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str, Object** s);
- MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupSubStringAsciiSymbol(
- Handle<SeqAsciiString> str,
+// because only the string itself (the key) needs to be stored.
+class StringTable: public HashTable<StringTableShape, HashTableKey*> {
+ public:
+ // Find string in the string table. If it is not there yet, it is
+ // added. The return value is the string table which might have
+ // been enlarged. If the return value is not a failure, the string
+ // pointer *s is set to the string found.
+ MUST_USE_RESULT MaybeObject* LookupUtf8String(
+ Vector<const char> str,
+ Object** s);
+ MUST_USE_RESULT MaybeObject* LookupOneByteString(
+ Vector<const uint8_t> str,
+ Object** s);
+ MUST_USE_RESULT MaybeObject* LookupSubStringOneByteString(
+ Handle<SeqOneByteString> str,
int from,
int length,
Object** s);
- MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str,
- Object** s);
+ MUST_USE_RESULT MaybeObject* LookupTwoByteString(
+ Vector<const uc16> str,
+ Object** s);
MUST_USE_RESULT MaybeObject* LookupString(String* key, Object** s);
- // Looks up a symbol that is equal to the given string and returns
- // true if it is found, assigning the symbol to the given output
+ // Looks up a string that is equal to the given string and returns
+ // true if it is found, assigning the string to the given output
// parameter.
- bool LookupSymbolIfExists(String* str, String** symbol);
- bool LookupTwoCharsSymbolIfExists(uint32_t c1, uint32_t c2, String** symbol);
+ bool LookupStringIfExists(String* str, String** result);
+ bool LookupTwoCharsStringIfExists(uint16_t c1, uint16_t c2, String** result);
// Casting.
- static inline SymbolTable* cast(Object* obj);
+ static inline StringTable* cast(Object* obj);
private:
MUST_USE_RESULT MaybeObject* LookupKey(HashTableKey* key, Object** s);
template <bool seq_ascii> friend class JsonParser;
- DISALLOW_IMPLICIT_CONSTRUCTORS(SymbolTable);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringTable);
};
@@ -3031,8 +3761,9 @@ class MapCacheShape : public BaseShape<HashTableKey*> {
return key->HashForObject(object);
}
- MUST_USE_RESULT static inline MaybeObject* AsObject(HashTableKey* key) {
- return key->AsObject();
+ MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
+ HashTableKey* key) {
+ return key->AsObject(heap);
}
static const int kPrefixSize = 0;
@@ -3042,11 +3773,11 @@ class MapCacheShape : public BaseShape<HashTableKey*> {
// MapCache.
//
-// Maps keys that are a fixed array of symbols to a map.
+// Maps keys that are a fixed array of unique names to a map.
// Used for canonicalize maps for object literals.
class MapCache: public HashTable<MapCacheShape, HashTableKey*> {
public:
- // Find cached value for a string key, otherwise return null.
+ // Find cached value for a name key, otherwise return null.
Object* Lookup(FixedArray* key);
MUST_USE_RESULT MaybeObject* Put(FixedArray* key, Map* value);
static inline MapCache* cast(Object* obj);
@@ -3107,7 +3838,10 @@ class Dictionary: public HashTable<Shape, Key> {
PropertyAttributes filter,
SortMode sort_mode);
// Fill in details for properties into storage.
- void CopyKeysTo(FixedArray* storage, int index, SortMode sort_mode);
+ void CopyKeysTo(FixedArray* storage,
+ int index,
+ PropertyAttributes filter,
+ SortMode sort_mode);
// Accessors for next enumeration index.
void SetNextEnumerationIndex(int index) {
@@ -3120,16 +3854,16 @@ class Dictionary: public HashTable<Shape, Key> {
}
// Returns a new array for dictionary usage. Might return Failure.
- MUST_USE_RESULT static MaybeObject* Allocate(int at_least_space_for);
+ MUST_USE_RESULT static MaybeObject* Allocate(
+ Heap* heap,
+ int at_least_space_for,
+ PretenureFlag pretenure = NOT_TENURED);
// Ensure enough space for n additional elements.
MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
#ifdef OBJECT_PRINT
- inline void Print() {
- Print(stdout);
- }
- void Print(FILE* out);
+ void Print(FILE* out = stdout);
#endif
// Returns the key (slow).
Object* SlowReverseLookup(Object* value);
@@ -3165,29 +3899,30 @@ class Dictionary: public HashTable<Shape, Key> {
};
-class StringDictionaryShape : public BaseShape<String*> {
+class NameDictionaryShape : public BaseShape<Name*> {
public:
- static inline bool IsMatch(String* key, Object* other);
- static inline uint32_t Hash(String* key);
- static inline uint32_t HashForObject(String* key, Object* object);
- MUST_USE_RESULT static inline MaybeObject* AsObject(String* key);
+ static inline bool IsMatch(Name* key, Object* other);
+ static inline uint32_t Hash(Name* key);
+ static inline uint32_t HashForObject(Name* key, Object* object);
+ MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
+ Name* key);
static const int kPrefixSize = 2;
static const int kEntrySize = 3;
static const bool kIsEnumerable = true;
};
-class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
+class NameDictionary: public Dictionary<NameDictionaryShape, Name*> {
public:
- static inline StringDictionary* cast(Object* obj) {
+ static inline NameDictionary* cast(Object* obj) {
ASSERT(obj->IsDictionary());
- return reinterpret_cast<StringDictionary*>(obj);
+ return reinterpret_cast<NameDictionary*>(obj);
}
// Copies enumerable keys to preallocated fixed array.
FixedArray* CopyEnumKeysTo(FixedArray* storage);
static void DoGenerateNewEnumerationIndices(
- Handle<StringDictionary> dictionary);
+ Handle<NameDictionary> dictionary);
// For transforming properties of a JSObject.
MUST_USE_RESULT MaybeObject* TransformPropertiesToFastFor(
@@ -3196,14 +3931,15 @@ class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
// Find entry for key, otherwise return kNotFound. Optimized version of
// HashTable::FindEntry.
- int FindEntry(String* key);
+ int FindEntry(Name* key);
};
class NumberDictionaryShape : public BaseShape<uint32_t> {
public:
static inline bool IsMatch(uint32_t key, Object* other);
- MUST_USE_RESULT static inline MaybeObject* AsObject(uint32_t key);
+ MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
+ uint32_t key);
static const int kEntrySize = 3;
static const bool kIsEnumerable = false;
};
@@ -3307,7 +4043,8 @@ class ObjectHashTableShape : public BaseShape<Object*> {
static inline bool IsMatch(Object* key, Object* other);
static inline uint32_t Hash(Object* key);
static inline uint32_t HashForObject(Object* key, Object* object);
- MUST_USE_RESULT static inline MaybeObject* AsObject(Object* key);
+ MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
+ Object* key);
static const int kPrefixSize = 0;
static const int kEntrySize = entrysize;
};
@@ -3363,6 +4100,58 @@ class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> {
};
+template <int entrysize>
+class WeakHashTableShape : public BaseShape<Object*> {
+ public:
+ static inline bool IsMatch(Object* key, Object* other);
+ static inline uint32_t Hash(Object* key);
+ static inline uint32_t HashForObject(Object* key, Object* object);
+ MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
+ Object* key);
+ static const int kPrefixSize = 0;
+ static const int kEntrySize = entrysize;
+};
+
+
+// WeakHashTable maps keys that are arbitrary objects to object values.
+// It is used for the global weak hash table that maps objects
+// embedded in optimized code to dependent code lists.
+class WeakHashTable: public HashTable<WeakHashTableShape<2>, Object*> {
+ public:
+ static inline WeakHashTable* cast(Object* obj) {
+ ASSERT(obj->IsHashTable());
+ return reinterpret_cast<WeakHashTable*>(obj);
+ }
+
+ // Looks up the value associated with the given key. The hole value is
+ // returned in case the key is not present.
+ Object* Lookup(Object* key);
+
+ // Adds (or overwrites) the value associated with the given key. Mapping a
+ // key to the hole value causes removal of the whole entry.
+ MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value);
+
+ // This function is called when heap verification is turned on.
+ void Zap(Object* value) {
+ int capacity = Capacity();
+ for (int i = 0; i < capacity; i++) {
+ set(EntryToIndex(i), value);
+ set(EntryToValueIndex(i), value);
+ }
+ }
+
+ private:
+ friend class MarkCompactCollector;
+
+ void AddEntry(int entry, Object* key, Object* value);
+
+ // Returns the index to the value of an entry.
+ static inline int EntryToValueIndex(int entry) {
+ return EntryToIndex(entry) + 1;
+ }
+};
+
+
// JSFunctionResultCache caches results of some JSFunction invocation.
// It is a fixed array with fixed structure:
// [0]: factory function
@@ -3411,7 +4200,7 @@ class ScopeInfo : public FixedArray {
static inline ScopeInfo* cast(Object* object);
// Return the type of this scope.
- ScopeType Type();
+ ScopeType scope_type();
// Does this scope call eval?
bool CallsEval();
@@ -3475,13 +4264,13 @@ class ScopeInfo : public FixedArray {
// Lookup support for serialized scope info. Returns the
// the stack slot index for a given slot name if the slot is
- // present; otherwise returns a value < 0. The name must be a symbol
- // (canonicalized).
+ // present; otherwise returns a value < 0. The name must be an internalized
+ // string.
int StackSlotIndex(String* name);
// Lookup support for serialized scope info. Returns the
// context slot index for a given slot name if the slot is present; otherwise
- // returns a value < 0. The name must be a symbol (canonicalized).
+ // returns a value < 0. The name must be an internalized string.
// If the slot is present and mode != NULL, sets *mode to the corresponding
// mode for that variable.
int ContextSlotIndex(String* name,
@@ -3490,19 +4279,26 @@ class ScopeInfo : public FixedArray {
// Lookup support for serialized scope info. Returns the
// parameter index for a given parameter name if the parameter is present;
- // otherwise returns a value < 0. The name must be a symbol (canonicalized).
+ // otherwise returns a value < 0. The name must be an internalized string.
int ParameterIndex(String* name);
// Lookup support for serialized scope info. Returns the function context
// slot index if the function name is present and context-allocated (named
// function expressions, only), otherwise returns a value < 0. The name
- // must be a symbol (canonicalized).
+ // must be an internalized string.
int FunctionContextSlotIndex(String* name, VariableMode* mode);
+
+ // Copies all the context locals into an object used to materialize a scope.
+ static bool CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ Handle<JSObject> scope_object);
+
+
static Handle<ScopeInfo> Create(Scope* scope, Zone* zone);
// Serializes empty scope info.
- static ScopeInfo* Empty();
+ static ScopeInfo* Empty(Isolate* isolate);
#ifdef DEBUG
void Print();
@@ -3587,7 +4383,7 @@ class ScopeInfo : public FixedArray {
};
// Properties of scopes.
- class TypeField: public BitField<ScopeType, 0, 3> {};
+ class ScopeTypeField: public BitField<ScopeType, 0, 3> {};
class CallsEvalField: public BitField<bool, 3, 1> {};
class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
class FunctionVariableField: public BitField<FunctionVariableInfo, 6, 2> {};
@@ -3607,8 +4403,9 @@ class NormalizedMapCache: public FixedArray {
public:
static const int kEntries = 64;
- MUST_USE_RESULT MaybeObject* Get(JSObject* object,
- PropertyNormalizationMode mode);
+ static Handle<Map> Get(Handle<NormalizedMapCache> cache,
+ Handle<JSObject> object,
+ PropertyNormalizationMode mode);
void Clear();
@@ -3658,12 +4455,7 @@ class ByteArray: public FixedArrayBase {
inline int ByteArraySize() {
return SizeFor(this->length());
}
-#ifdef OBJECT_PRINT
- inline void ByteArrayPrint() {
- ByteArrayPrint(stdout);
- }
- void ByteArrayPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(ByteArray)
DECLARE_VERIFIER(ByteArray)
// Layout description.
@@ -3692,12 +4484,8 @@ class FreeSpace: public HeapObject {
// Casting.
static inline FreeSpace* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void FreeSpacePrint() {
- FreeSpacePrint(stdout);
- }
- void FreeSpacePrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(FreeSpace)
DECLARE_VERIFIER(FreeSpace)
// Layout description.
@@ -3772,12 +4560,8 @@ class ExternalPixelArray: public ExternalArray {
// Casting.
static inline ExternalPixelArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalPixelArrayPrint() {
- ExternalPixelArrayPrint(stdout);
- }
- void ExternalPixelArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalPixelArray)
DECLARE_VERIFIER(ExternalPixelArray)
private:
@@ -3799,12 +4583,8 @@ class ExternalByteArray: public ExternalArray {
// Casting.
static inline ExternalByteArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalByteArrayPrint() {
- ExternalByteArrayPrint(stdout);
- }
- void ExternalByteArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalByteArray)
DECLARE_VERIFIER(ExternalByteArray)
private:
@@ -3826,12 +4606,8 @@ class ExternalUnsignedByteArray: public ExternalArray {
// Casting.
static inline ExternalUnsignedByteArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalUnsignedByteArrayPrint() {
- ExternalUnsignedByteArrayPrint(stdout);
- }
- void ExternalUnsignedByteArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalUnsignedByteArray)
DECLARE_VERIFIER(ExternalUnsignedByteArray)
private:
@@ -3853,12 +4629,8 @@ class ExternalShortArray: public ExternalArray {
// Casting.
static inline ExternalShortArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalShortArrayPrint() {
- ExternalShortArrayPrint(stdout);
- }
- void ExternalShortArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalShortArray)
DECLARE_VERIFIER(ExternalShortArray)
private:
@@ -3880,12 +4652,8 @@ class ExternalUnsignedShortArray: public ExternalArray {
// Casting.
static inline ExternalUnsignedShortArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalUnsignedShortArrayPrint() {
- ExternalUnsignedShortArrayPrint(stdout);
- }
- void ExternalUnsignedShortArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalUnsignedShortArray)
DECLARE_VERIFIER(ExternalUnsignedShortArray)
private:
@@ -3907,12 +4675,8 @@ class ExternalIntArray: public ExternalArray {
// Casting.
static inline ExternalIntArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalIntArrayPrint() {
- ExternalIntArrayPrint(stdout);
- }
- void ExternalIntArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalIntArray)
DECLARE_VERIFIER(ExternalIntArray)
private:
@@ -3934,12 +4698,8 @@ class ExternalUnsignedIntArray: public ExternalArray {
// Casting.
static inline ExternalUnsignedIntArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalUnsignedIntArrayPrint() {
- ExternalUnsignedIntArrayPrint(stdout);
- }
- void ExternalUnsignedIntArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalUnsignedIntArray)
DECLARE_VERIFIER(ExternalUnsignedIntArray)
private:
@@ -3961,12 +4721,8 @@ class ExternalFloatArray: public ExternalArray {
// Casting.
static inline ExternalFloatArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalFloatArrayPrint() {
- ExternalFloatArrayPrint(stdout);
- }
- void ExternalFloatArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalFloatArray)
DECLARE_VERIFIER(ExternalFloatArray)
private:
@@ -3988,12 +4744,8 @@ class ExternalDoubleArray: public ExternalArray {
// Casting.
static inline ExternalDoubleArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalDoubleArrayPrint() {
- ExternalDoubleArrayPrint(stdout);
- }
- void ExternalDoubleArrayPrint(FILE* out);
-#endif // OBJECT_PRINT
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalDoubleArray)
DECLARE_VERIFIER(ExternalDoubleArray)
private:
@@ -4071,7 +4823,8 @@ class DeoptimizationInputData: public FixedArray {
}
// Allocates a DeoptimizationInputData.
- MUST_USE_RESULT static MaybeObject* Allocate(int deopt_entry_count,
+ MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate,
+ int deopt_entry_count,
PretenureFlag pretenure);
// Casting.
@@ -4117,7 +4870,8 @@ class DeoptimizationOutputData: public FixedArray {
}
// Allocates a DeoptimizationOutputData.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_deopt_points,
+ MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate,
+ int number_of_deopt_points,
PretenureFlag pretenure);
// Casting.
@@ -4130,7 +4884,8 @@ class DeoptimizationOutputData: public FixedArray {
// Forward declaration.
-class JSGlobalPropertyCell;
+class Cell;
+class PropertyCell;
// TypeFeedbackCells is a fixed array used to hold the association between
// cache cells and AST ids for code generated by the full compiler.
@@ -4147,8 +4902,8 @@ class TypeFeedbackCells: public FixedArray {
inline void SetAstId(int index, TypeFeedbackId id);
// Accessors for global property cells holding the cache values.
- inline JSGlobalPropertyCell* Cell(int index);
- inline void SetCell(int index, JSGlobalPropertyCell* cell);
+ inline Cell* GetCell(int index);
+ inline void SetCell(int index, Cell* cell);
// The object that indicates an uninitialized cache.
static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
@@ -4156,6 +4911,11 @@ class TypeFeedbackCells: public FixedArray {
// The object that indicates a megamorphic state.
static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
+ // The object that indicates a monomorphic state of Array with
+ // ElementsKind
+ static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
+ ElementsKind elements_kind);
+
// A raw version of the uninitialized sentinel that's safe to read during
// garbage collection (e.g., for patching the cache).
static inline Object* RawUninitializedSentinel(Heap* heap);
@@ -4177,74 +4937,68 @@ class Code: public HeapObject {
public:
// Opaque data type for encapsulating code flags like kind, inline
// cache state, and arguments count.
- // FLAGS_MIN_VALUE and FLAGS_MAX_VALUE are specified to ensure that
- // enumeration type has correct value range (see Issue 830 for more details).
- enum Flags {
- FLAGS_MIN_VALUE = 0,
- FLAGS_MAX_VALUE = kMaxUInt32
- };
+ typedef uint32_t Flags;
+
+#define NON_IC_KIND_LIST(V) \
+ V(FUNCTION) \
+ V(OPTIMIZED_FUNCTION) \
+ V(STUB) \
+ V(HANDLER) \
+ V(BUILTIN) \
+ V(REGEXP)
+
+#define IC_KIND_LIST(V) \
+ V(LOAD_IC) \
+ V(KEYED_LOAD_IC) \
+ V(CALL_IC) \
+ V(KEYED_CALL_IC) \
+ V(STORE_IC) \
+ V(KEYED_STORE_IC) \
+ V(BINARY_OP_IC) \
+ V(COMPARE_IC) \
+ V(COMPARE_NIL_IC) \
+ V(TO_BOOLEAN_IC)
#define CODE_KIND_LIST(V) \
- V(FUNCTION) \
- V(OPTIMIZED_FUNCTION) \
- V(STUB) \
- V(BUILTIN) \
- V(LOAD_IC) \
- V(KEYED_LOAD_IC) \
- V(CALL_IC) \
- V(KEYED_CALL_IC) \
- V(STORE_IC) \
- V(KEYED_STORE_IC) \
- V(UNARY_OP_IC) \
- V(BINARY_OP_IC) \
- V(COMPARE_IC) \
- V(TO_BOOLEAN_IC)
+ NON_IC_KIND_LIST(V) \
+ IC_KIND_LIST(V)
enum Kind {
#define DEFINE_CODE_KIND_ENUM(name) name,
CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
#undef DEFINE_CODE_KIND_ENUM
-
- // Pseudo-kinds.
- LAST_CODE_KIND = TO_BOOLEAN_IC,
- REGEXP = BUILTIN,
- FIRST_IC_KIND = LOAD_IC,
- LAST_IC_KIND = TO_BOOLEAN_IC
+ NUMBER_OF_KINDS
};
// No more than 16 kinds. The value is currently encoded in four bits in
// Flags.
- STATIC_ASSERT(LAST_CODE_KIND < 16);
+ STATIC_ASSERT(NUMBER_OF_KINDS <= 16);
+
+ static const char* Kind2String(Kind kind);
// Types of stubs.
enum StubType {
NORMAL,
FIELD,
- CONSTANT_FUNCTION,
+ CONSTANT,
CALLBACKS,
INTERCEPTOR,
- MAP_TRANSITION,
+ TRANSITION,
NONEXISTENT
};
- enum {
- NUMBER_OF_KINDS = LAST_IC_KIND + 1
- };
-
typedef int ExtraICState;
static const ExtraICState kNoExtraICState = 0;
+ static const int kPrologueOffsetNotSet = -1;
+
#ifdef ENABLE_DISASSEMBLER
// Printing
- static const char* Kind2String(Kind kind);
static const char* ICState2String(InlineCacheState state);
static const char* StubType2String(StubType type);
static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra);
- inline void Disassemble(const char* name) {
- Disassemble(name, stdout);
- }
- void Disassemble(const char* name, FILE* out);
+ void Disassemble(const char* name, FILE* out = stdout);
#endif // ENABLE_DISASSEMBLER
// [instruction_size]: Size of the native instructions
@@ -4261,9 +5015,19 @@ class Code: public HeapObject {
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
- // [type_feedback_info]: Struct containing type feedback information.
- // Will contain either a TypeFeedbackInfo object, or undefined.
+ // [type_feedback_info]: This field stores various things, depending on the
+ // kind of the code object.
+ // FUNCTION => type feedback information.
+ // STUB => various things, e.g. a SMI
+ // OPTIMIZED_FUNCTION => the next_code_link for optimized code list.
DECL_ACCESSORS(type_feedback_info, Object)
+ inline void InitializeTypeFeedbackInfoNoWriteBarrier(Object* value);
+ inline int stub_info();
+ inline void set_stub_info(int info);
+
+ // [next_code_link]: Link for lists of optimized or deoptimized code.
+ // Note that storage for this field is overlapped with typefeedback_info.
+ DECL_ACCESSORS(next_code_link, Object)
// [gc_metadata]: Field used to hold GC related metadata. The contents of this
// field does not have to be traced during garbage collection since
@@ -4275,9 +5039,13 @@ class Code: public HeapObject {
inline void set_ic_age(int count);
inline int ic_age();
+ // [prologue_offset]: Offset of the function prologue, used for aging
+ // FUNCTIONs and OPTIMIZED_FUNCTIONs.
+ inline int prologue_offset();
+ inline void set_prologue_offset(int offset);
+
// Unchecked accessors to be used during GC.
inline ByteArray* unchecked_relocation_info();
- inline FixedArray* unchecked_deoptimization_data();
inline int relocation_size();
@@ -4287,28 +5055,50 @@ class Code: public HeapObject {
// [flags]: Access to specific code flags.
inline Kind kind();
+ inline Kind handler_kind() {
+ return static_cast<Kind>(arguments_count());
+ }
inline InlineCacheState ic_state(); // Only valid for IC stubs.
inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
+
+ inline ExtraICState extended_extra_ic_state(); // Only valid for
+ // non-call IC stubs.
+ static bool needs_extended_extra_ic_state(Kind kind) {
+ // TODO(danno): This is a bit of a hack right now since there are still
+ // clients of this API that pass "extra" values in for argc. These clients
+ // should be retrofitted to used ExtendedExtraICState.
+ return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC ||
+ kind == BINARY_OP_IC;
+ }
+
inline StubType type(); // Only valid for monomorphic IC stubs.
inline int arguments_count(); // Only valid for call IC stubs.
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
+ inline bool is_debug_stub();
+ inline bool is_handler() { return kind() == HANDLER; }
inline bool is_load_stub() { return kind() == LOAD_IC; }
inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; }
inline bool is_store_stub() { return kind() == STORE_IC; }
inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
inline bool is_call_stub() { return kind() == CALL_IC; }
inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
- inline bool is_unary_op_stub() { return kind() == UNARY_OP_IC; }
inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; }
inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
+ inline bool is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; }
inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
+ inline bool is_keyed_stub();
// [major_key]: For kind STUB or BINARY_OP_IC, the major key.
inline int major_key();
inline void set_major_key(int value);
+ // For kind STUB or ICs, tells whether or not a code object was generated by
+ // the optimizing compiler (but it may not be an optimized function).
+ bool is_crankshafted();
+ inline void set_is_crankshafted(bool value);
+
// For stubs, tells whether they should always exist, so that they can be
// called from other stubs.
inline bool is_pregenerated();
@@ -4355,75 +5145,74 @@ class Code: public HeapObject {
inline unsigned safepoint_table_offset();
inline void set_safepoint_table_offset(unsigned offset);
- // [stack_check_table_start]: For kind FUNCTION, the offset in the
- // instruction stream where the stack check table starts.
- inline unsigned stack_check_table_offset();
- inline void set_stack_check_table_offset(unsigned offset);
+ // [back_edge_table_start]: For kind FUNCTION, the offset in the
+ // instruction stream where the back edge table starts.
+ inline unsigned back_edge_table_offset();
+ inline void set_back_edge_table_offset(unsigned offset);
+
+ inline bool back_edges_patched_for_osr();
+ inline void set_back_edges_patched_for_osr(bool value);
// [check type]: For kind CALL_IC, tells how to check if the
// receiver is valid for the given call.
inline CheckType check_type();
inline void set_check_type(CheckType value);
- // [type-recording unary op type]: For kind UNARY_OP_IC.
- inline byte unary_op_type();
- inline void set_unary_op_type(byte value);
-
- // [type-recording binary op type]: For kind BINARY_OP_IC.
- inline byte binary_op_type();
- inline void set_binary_op_type(byte value);
- inline byte binary_op_result_type();
- inline void set_binary_op_result_type(byte value);
-
- // [compare state]: For kind COMPARE_IC, tells what state the stub is in.
- inline byte compare_state();
- inline void set_compare_state(byte value);
-
- // [compare_operation]: For kind COMPARE_IC tells what compare operation the
- // stub was generated for.
- inline byte compare_operation();
- inline void set_compare_operation(byte value);
-
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline byte to_boolean_state();
- inline void set_to_boolean_state(byte value);
// [has_function_cache]: For kind STUB tells whether there is a function
// cache is passed to the stub.
inline bool has_function_cache();
inline void set_has_function_cache(bool flag);
- bool allowed_in_shared_map_code_cache();
+
+ // [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
+ // the code is going to be deoptimized because of dead embedded maps.
+ inline bool marked_for_deoptimization();
+ inline void set_marked_for_deoptimization(bool flag);
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
- // Mark this code object as not having a stack check table. Assumes kind
- // is FUNCTION.
- void SetNoStackCheckTable();
+ // Find an object in a stub with a specified map
+ Object* FindNthObject(int n, Map* match_map);
+ void ReplaceNthObject(int n, Map* match_map, Object* replace_with);
// Find the first map in an IC stub.
Map* FindFirstMap();
+ void FindAllMaps(MapHandleList* maps);
+ void ReplaceFirstMap(Map* replace);
- class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {};
- class ExtraICStateKeyedAccessGrowMode:
- public BitField<KeyedAccessGrowMode, 1, 1> {}; // NOLINT
+ // Find the first handler in an IC stub.
+ Code* FindFirstHandler();
+
+ // Find |length| handlers and put them into |code_list|. Returns false if not
+ // enough handlers can be found.
+ bool FindHandlers(CodeHandleList* code_list, int length = -1);
+
+ // Find the first name in an IC stub.
+ Name* FindFirstName();
+
+ void ReplaceNthCell(int n, Cell* replace_with);
- static const int kExtraICStateGrowModeShift = 1;
+ class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {};
+ class ExtraICStateKeyedAccessStoreMode:
+ public BitField<KeyedAccessStoreMode, 1, 4> {}; // NOLINT
static inline StrictModeFlag GetStrictMode(ExtraICState extra_ic_state) {
return ExtraICStateStrictMode::decode(extra_ic_state);
}
- static inline KeyedAccessGrowMode GetKeyedAccessGrowMode(
+ static inline KeyedAccessStoreMode GetKeyedAccessStoreMode(
ExtraICState extra_ic_state) {
- return ExtraICStateKeyedAccessGrowMode::decode(extra_ic_state);
+ return ExtraICStateKeyedAccessStoreMode::decode(extra_ic_state);
}
static inline ExtraICState ComputeExtraICState(
- KeyedAccessGrowMode grow_mode,
+ KeyedAccessStoreMode store_mode,
StrictModeFlag strict_mode) {
- return ExtraICStateKeyedAccessGrowMode::encode(grow_mode) |
+ return ExtraICStateKeyedAccessStoreMode::encode(store_mode) |
ExtraICStateStrictMode::encode(strict_mode);
}
@@ -4438,16 +5227,17 @@ class Code: public HeapObject {
static inline Flags ComputeMonomorphicFlags(
Kind kind,
- StubType type,
ExtraICState extra_ic_state = kNoExtraICState,
- InlineCacheHolderFlag holder = OWN_MAP,
- int argc = -1);
+ StubType type = NORMAL,
+ int argc = -1,
+ InlineCacheHolderFlag holder = OWN_MAP);
static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
static inline StubType ExtractTypeFromFlags(Flags flags);
static inline Kind ExtractKindFromFlags(Flags flags);
static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
+ static inline ExtraICState ExtractExtendedExtraICStateFromFlags(Flags flags);
static inline int ExtractArgumentsCountFromFlags(Flags flags);
static inline Flags RemoveTypeFromFlags(Flags flags);
@@ -4511,17 +5301,52 @@ class Code: public HeapObject {
template<typename StaticVisitor>
inline void CodeIterateBody(Heap* heap);
-#ifdef OBJECT_PRINT
- inline void CodePrint() {
- CodePrint(stdout);
- }
- void CodePrint(FILE* out);
-#endif
+
+ DECLARE_PRINTER(Code)
DECLARE_VERIFIER(Code)
void ClearInlineCaches();
void ClearTypeFeedbackCells(Heap* heap);
+ BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
+
+#define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
+ enum Age {
+ kNotExecutedCodeAge = -2,
+ kExecutedOnceCodeAge = -1,
+ kNoAgeCodeAge = 0,
+ CODE_AGE_LIST(DECLARE_CODE_AGE_ENUM)
+ kAfterLastCodeAge,
+ kLastCodeAge = kAfterLastCodeAge - 1,
+ kCodeAgeCount = kAfterLastCodeAge - 1,
+ kIsOldCodeAge = kSexagenarianCodeAge,
+ kPreAgedCodeAge = kIsOldCodeAge - 1
+ };
+#undef DECLARE_CODE_AGE_ENUM
+
+ // Code aging. Indicates how many full GCs this code has survived without
+ // being entered through the prologue. Used to determine when it is
+ // relatively safe to flush this code object and replace it with the lazy
+ // compilation stub.
+ static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate);
+ static void MarkCodeAsExecuted(byte* sequence, Isolate* isolate);
+ void MakeOlder(MarkingParity);
+ static bool IsYoungSequence(byte* sequence);
+ bool IsOld();
+ Age GetAge();
+ static inline Code* GetPreAgedCodeAgeStub(Isolate* isolate) {
+ return GetCodeAgeStub(isolate, kNotExecutedCodeAge, NO_MARKING_PARITY);
+ }
+
+ void PrintDeoptLocation(int bailout_id);
+ bool CanDeoptAt(Address pc);
+
+#ifdef VERIFY_HEAP
+ void VerifyEmbeddedObjectsDependency();
+#endif
+
+ static bool IsWeakEmbeddedObject(Kind kind, Object* object);
+
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
@@ -4534,6 +5359,7 @@ class Code: public HeapObject {
kHandlerTableOffset + kPointerSize;
static const int kTypeFeedbackInfoOffset =
kDeoptimizationDataOffset + kPointerSize;
+ static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset; // Shared.
static const int kGCMetadataOffset = kTypeFeedbackInfoOffset + kPointerSize;
static const int kICAgeOffset =
kGCMetadataOffset + kPointerSize;
@@ -4541,8 +5367,10 @@ class Code: public HeapObject {
static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize;
static const int kKindSpecificFlags2Offset =
kKindSpecificFlags1Offset + kIntSize;
+ // Note: We might be able to squeeze this into the flags above.
+ static const int kPrologueOffset = kKindSpecificFlags2Offset + kIntSize;
- static const int kHeaderPaddingStart = kKindSpecificFlags2Offset + kIntSize;
+ static const int kHeaderPaddingStart = kPrologueOffset + kIntSize;
// Add padding to align the instruction start following right after
// the Code object header.
@@ -4567,70 +5395,51 @@ class Code: public HeapObject {
class TypeField: public BitField<StubType, 3, 3> {};
class CacheHolderField: public BitField<InlineCacheHolderFlag, 6, 1> {};
class KindField: public BitField<Kind, 7, 4> {};
- class ExtraICStateField: public BitField<ExtraICState, 11, 2> {};
- class IsPregeneratedField: public BitField<bool, 13, 1> {};
+ class IsPregeneratedField: public BitField<bool, 11, 1> {};
+ class ExtraICStateField: public BitField<ExtraICState, 12, 5> {};
+ class ExtendedExtraICStateField: public BitField<ExtraICState, 12,
+ PlatformSmiTagging::kSmiValueSize - 12 + 1> {}; // NOLINT
+ STATIC_ASSERT(ExtraICStateField::kShift == ExtendedExtraICStateField::kShift);
// KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
static const int kStackSlotsFirstBit = 0;
static const int kStackSlotsBitCount = 24;
- static const int kUnaryOpTypeFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kUnaryOpTypeBitCount = 3;
- static const int kBinaryOpTypeFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kBinaryOpTypeBitCount = 3;
- static const int kBinaryOpResultTypeFirstBit =
- kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount;
- static const int kBinaryOpResultTypeBitCount = 3;
- static const int kCompareStateFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kCompareStateBitCount = 3;
- static const int kCompareOperationFirstBit =
- kCompareStateFirstBit + kCompareStateBitCount;
- static const int kCompareOperationBitCount = 4;
- static const int kToBooleanStateFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kToBooleanStateBitCount = 8;
static const int kHasFunctionCacheFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kHasFunctionCacheBitCount = 1;
+ static const int kMarkedForDeoptimizationFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount + 1;
+ static const int kMarkedForDeoptimizationBitCount = 1;
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
- STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32);
- STATIC_ASSERT(kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount <= 32);
- STATIC_ASSERT(kBinaryOpResultTypeFirstBit +
- kBinaryOpResultTypeBitCount <= 32);
- STATIC_ASSERT(kCompareStateFirstBit + kCompareStateBitCount <= 32);
- STATIC_ASSERT(kCompareOperationFirstBit + kCompareOperationBitCount <= 32);
- STATIC_ASSERT(kToBooleanStateFirstBit + kToBooleanStateBitCount <= 32);
STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
+ STATIC_ASSERT(kMarkedForDeoptimizationFirstBit +
+ kMarkedForDeoptimizationBitCount <= 32);
class StackSlotsField: public BitField<int,
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
- class UnaryOpTypeField: public BitField<int,
- kUnaryOpTypeFirstBit, kUnaryOpTypeBitCount> {}; // NOLINT
- class BinaryOpTypeField: public BitField<int,
- kBinaryOpTypeFirstBit, kBinaryOpTypeBitCount> {}; // NOLINT
- class BinaryOpResultTypeField: public BitField<int,
- kBinaryOpResultTypeFirstBit, kBinaryOpResultTypeBitCount> {}; // NOLINT
- class CompareStateField: public BitField<int,
- kCompareStateFirstBit, kCompareStateBitCount> {}; // NOLINT
- class CompareOperationField: public BitField<int,
- kCompareOperationFirstBit, kCompareOperationBitCount> {}; // NOLINT
- class ToBooleanStateField: public BitField<int,
- kToBooleanStateFirstBit, kToBooleanStateBitCount> {}; // NOLINT
class HasFunctionCacheField: public BitField<bool,
kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {}; // NOLINT
+ class MarkedForDeoptimizationField: public BitField<bool,
+ kMarkedForDeoptimizationFirstBit,
+ kMarkedForDeoptimizationBitCount> {}; // NOLINT
+
+ // KindSpecificFlags2 layout (ALL)
+ static const int kIsCrankshaftedBit = 0;
+ class IsCrankshaftedField: public BitField<bool,
+ kIsCrankshaftedBit, 1> {}; // NOLINT
// KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION)
- static const int kStubMajorKeyFirstBit = 0;
+ static const int kStubMajorKeyFirstBit = kIsCrankshaftedBit + 1;
static const int kSafepointTableOffsetFirstBit =
kStubMajorKeyFirstBit + kStubMajorKeyBits;
- static const int kSafepointTableOffsetBitCount = 26;
+ static const int kSafepointTableOffsetBitCount = 25;
STATIC_ASSERT(kStubMajorKeyFirstBit + kStubMajorKeyBits <= 32);
STATIC_ASSERT(kSafepointTableOffsetFirstBit +
kSafepointTableOffsetBitCount <= 32);
+ STATIC_ASSERT(1 + kStubMajorKeyBits +
+ kSafepointTableOffsetBitCount <= 32);
class SafepointTableOffsetField: public BitField<int,
kSafepointTableOffsetFirstBit,
@@ -4639,24 +5448,143 @@ class Code: public HeapObject {
kStubMajorKeyFirstBit, kStubMajorKeyBits> {}; // NOLINT
// KindSpecificFlags2 layout (FUNCTION)
- class StackCheckTableOffsetField: public BitField<int, 0, 31> {};
+ class BackEdgeTableOffsetField: public BitField<int,
+ kIsCrankshaftedBit + 1, 29> {}; // NOLINT
+ class BackEdgesPatchedForOSRField: public BitField<bool,
+ kIsCrankshaftedBit + 1 + 29, 1> {}; // NOLINT
// Signed field cannot be encoded using the BitField class.
- static const int kArgumentsCountShift = 14;
+ static const int kArgumentsCountShift = 17;
static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
static const int kArgumentsBits =
PlatformSmiTagging::kSmiValueSize - Code::kArgumentsCountShift + 1;
static const int kMaxArguments = (1 << kArgumentsBits) - 1;
+ // ICs can use either argument count or ExtendedExtraIC, since their storage
+ // overlaps.
+ STATIC_ASSERT(ExtraICStateField::kShift +
+ ExtraICStateField::kSize + kArgumentsBits ==
+ ExtendedExtraICStateField::kShift +
+ ExtendedExtraICStateField::kSize);
+
// This constant should be encodable in an ARM instruction.
static const int kFlagsNotUsedInLookup =
TypeField::kMask | CacheHolderField::kMask;
private:
+ friend class RelocIterator;
+
+ // Code aging
+ byte* FindCodeAgeSequence();
+ static void GetCodeAgeAndParity(Code* code, Age* age,
+ MarkingParity* parity);
+ static void GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity);
+ static Code* GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity);
+
+ // Code aging -- platform-specific
+ static void PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence, Age age,
+ MarkingParity parity);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
};
+class CompilationInfo;
+
+// This class describes the layout of dependent codes array of a map. The
+// array is partitioned into several groups of dependent codes. Each group
+// contains codes with the same dependency on the map. The array has the
+// following layout for n dependency groups:
+//
+// +----+----+-----+----+---------+----------+-----+---------+-----------+
+// | C1 | C2 | ... | Cn | group 1 | group 2 | ... | group n | undefined |
+// +----+----+-----+----+---------+----------+-----+---------+-----------+
+//
+// The first n elements are Smis, each of them specifies the number of codes
+// in the corresponding group. The subsequent elements contain grouped code
+// objects. The suffix of the array can be filled with the undefined value if
+// the number of codes is less than the length of the array. The order of the
+// code objects within a group is not preserved.
+//
+// All code indexes used in the class are counted starting from the first
+// code object of the first group. In other words, code index 0 corresponds
+// to array index n = kCodesStartIndex.
+
+class DependentCode: public FixedArray {
+ public:
+ enum DependencyGroup {
+ // Group of code that weakly embed this map and depend on being
+ // deoptimized when the map is garbage collected.
+ kWeaklyEmbeddedGroup,
+ // Group of code that embed a transition to this map, and depend on being
+ // deoptimized when the transition is replaced by a new version.
+ kTransitionGroup,
+ // Group of code that omit run-time prototype checks for prototypes
+ // described by this map. The group is deoptimized whenever an object
+ // described by this map changes shape (and transitions to a new map),
+ // possibly invalidating the assumptions embedded in the code.
+ kPrototypeCheckGroup,
+ // Group of code that depends on elements not being added to objects with
+ // this map.
+ kElementsCantBeAddedGroup,
+ // Group of code that depends on global property values in property cells
+ // not being changed.
+ kPropertyCellChangedGroup,
+ kGroupCount = kPropertyCellChangedGroup + 1
+ };
+
+ // Array for holding the index of the first code object of each group.
+ // The last element stores the total number of code objects.
+ class GroupStartIndexes {
+ public:
+ explicit GroupStartIndexes(DependentCode* entries);
+ void Recompute(DependentCode* entries);
+ int at(int i) { return start_indexes_[i]; }
+ int number_of_entries() { return start_indexes_[kGroupCount]; }
+ private:
+ int start_indexes_[kGroupCount + 1];
+ };
+
+ bool Contains(DependencyGroup group, Code* code);
+ static Handle<DependentCode> Insert(Handle<DependentCode> entries,
+ DependencyGroup group,
+ Handle<Object> object);
+ void UpdateToFinishedCode(DependencyGroup group,
+ CompilationInfo* info,
+ Code* code);
+ void RemoveCompilationInfo(DependentCode::DependencyGroup group,
+ CompilationInfo* info);
+
+ void DeoptimizeDependentCodeGroup(Isolate* isolate,
+ DependentCode::DependencyGroup group);
+
+ // The following low-level accessors should only be used by this class
+ // and the mark compact collector.
+ inline int number_of_entries(DependencyGroup group);
+ inline void set_number_of_entries(DependencyGroup group, int value);
+ inline bool is_code_at(int i);
+ inline Code* code_at(int i);
+ inline CompilationInfo* compilation_info_at(int i);
+ inline void set_object_at(int i, Object* object);
+ inline Object** slot_at(int i);
+ inline Object* object_at(int i);
+ inline void clear_at(int i);
+ inline void copy(int from, int to);
+ static inline DependentCode* cast(Object* object);
+
+ static DependentCode* ForObject(Handle<HeapObject> object,
+ DependencyGroup group);
+
+ private:
+ // Make a room at the end of the given group by moving out the first
+ // code objects of the subsequent groups.
+ inline void ExtendGroup(DependencyGroup group);
+ static const int kCodesStartIndex = kGroupCount;
+};
+
+
// All heap objects have a Map that describes their structure.
// A Map contains information about:
// - Size information about the object
@@ -4695,8 +5623,8 @@ class Map: public HeapObject {
inline void set_bit_field2(byte value);
// Bit field 3.
- inline int bit_field3();
- inline void set_bit_field3(int value);
+ inline uint32_t bit_field3();
+ inline void set_bit_field3(uint32_t bits);
class EnumLengthBits: public BitField<int, 0, 11> {};
class NumberOfOwnDescriptorsBits: public BitField<int, 11, 11> {};
@@ -4704,6 +5632,11 @@ class Map: public HeapObject {
class FunctionWithPrototype: public BitField<bool, 23, 1> {};
class DictionaryMap: public BitField<bool, 24, 1> {};
class OwnsDescriptors: public BitField<bool, 25, 1> {};
+ class IsObserved: public BitField<bool, 26, 1> {};
+ class Deprecated: public BitField<bool, 27, 1> {};
+ class IsFrozen: public BitField<bool, 28, 1> {};
+ class IsUnstable: public BitField<bool, 29, 1> {};
+ class IsMigrationTarget: public BitField<bool, 30, 1> {};
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
@@ -4720,7 +5653,7 @@ class Map: public HeapObject {
inline bool function_with_prototype();
// Tells whether the instance with this map should be ignored by the
- // __proto__ accessor.
+ // Object.getPrototypeOf() function and the __proto__ accessor.
inline void set_is_hidden_prototype() {
set_bit_field(bit_field() | (1 << kIsHiddenPrototype));
}
@@ -4776,6 +5709,10 @@ class Map: public HeapObject {
inline void set_elements_kind(ElementsKind elements_kind) {
ASSERT(elements_kind < kElementsKindCount);
ASSERT(kElementsKindCount <= (1 << kElementsKindBitCount));
+ ASSERT(!is_observed() ||
+ elements_kind == DICTIONARY_ELEMENTS ||
+ elements_kind == NON_STRICT_ARGUMENTS_ELEMENTS ||
+ IsExternalArrayElementsKind(elements_kind));
set_bit_field2((bit_field2() & ~kElementsKindMask) |
(elements_kind << kElementsKindShift));
ASSERT(this->elements_kind() == elements_kind);
@@ -4804,6 +5741,10 @@ class Map: public HeapObject {
return IsFastDoubleElementsKind(elements_kind());
}
+ inline bool has_fast_elements() {
+ return IsFastElementsKind(elements_kind());
+ }
+
inline bool has_non_strict_arguments_elements() {
return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
}
@@ -4831,13 +5772,60 @@ class Map: public HeapObject {
Map* transitioned_map);
inline void SetTransition(int transition_index, Map* target);
inline Map* GetTransition(int transition_index);
- MUST_USE_RESULT inline MaybeObject* AddTransition(String* key,
+
+ static Handle<TransitionArray> AddTransition(Handle<Map> map,
+ Handle<Name> key,
+ Handle<Map> target,
+ SimpleTransitionFlag flag);
+
+ MUST_USE_RESULT inline MaybeObject* AddTransition(Name* key,
Map* target,
SimpleTransitionFlag flag);
DECL_ACCESSORS(transitions, TransitionArray)
inline void ClearTransitions(Heap* heap,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ void DeprecateTransitionTree();
+ void DeprecateTarget(Name* key, DescriptorArray* new_descriptors);
+
+ Map* FindRootMap();
+ Map* FindUpdatedMap(int verbatim, int length, DescriptorArray* descriptors);
+ Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
+
+ int NumberOfFields();
+
+ bool InstancesNeedRewriting(Map* target,
+ int target_number_of_fields,
+ int target_inobject,
+ int target_unused);
+ static Handle<Map> GeneralizeAllFieldRepresentations(
+ Handle<Map> map,
+ Representation new_representation);
+ static Handle<Map> GeneralizeRepresentation(
+ Handle<Map> map,
+ int modify_index,
+ Representation new_representation,
+ StoreMode store_mode);
+ static Handle<Map> CopyGeneralizeAllRepresentations(
+ Handle<Map> map,
+ int modify_index,
+ StoreMode store_mode,
+ PropertyAttributes attributes,
+ const char* reason);
+
+ void PrintGeneralization(FILE* file,
+ const char* reason,
+ int modify_index,
+ int split,
+ int descriptors,
+ bool constant_to_field,
+ Representation old_representation,
+ Representation new_representation);
+
+ // Returns the constructor name (the name (possibly, inferred name) of the
+ // function that was used to instantiate the object).
+ String* constructor_name();
+
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
inline void set_attached_to_shared_function_info(bool value);
@@ -4862,14 +5850,15 @@ class Map: public HeapObject {
inline void set_is_access_check_needed(bool access_check_needed);
inline bool is_access_check_needed();
+ // Returns true if map has a non-empty stub code cache.
+ inline bool has_code_cache();
+
// [prototype]: implicit prototype object.
DECL_ACCESSORS(prototype, Object)
// [constructor]: points back to the function responsible for this map.
DECL_ACCESSORS(constructor, Object)
- inline JSFunction* unchecked_constructor();
-
// [instance descriptors]: describes the object.
DECL_ACCESSORS(instance_descriptors, DescriptorArray)
inline void InitializeDescriptors(DescriptorArray* descriptors);
@@ -4877,6 +5866,9 @@ class Map: public HeapObject {
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, Object)
+ // [dependent code]: list of optimized codes that have this map embedded.
+ DECL_ACCESSORS(dependent_code, DependentCode)
+
// [back pointer]: points back to the parent map from which a transition
// leads to this map. The field overlaps with prototype transitions and the
// back pointer will be moved into the prototype transitions array if
@@ -4918,19 +5910,18 @@ class Map: public HeapObject {
inline void SetNumberOfProtoTransitions(int value) {
FixedArray* cache = GetPrototypeTransitions();
ASSERT(cache->length() != 0);
- cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
- Smi::FromInt(value));
+ cache->set(kProtoTransitionNumberOfEntriesOffset, Smi::FromInt(value));
}
// Lookup in the map's instance descriptors and fill out the result
// with the given holder if the name is found. The holder may be
// NULL when this function is used from the compiler.
inline void LookupDescriptor(JSObject* holder,
- String* name,
+ Name* name,
LookupResult* result);
inline void LookupTransition(JSObject* holder,
- String* name,
+ Name* name,
LookupResult* result);
// The size of transition arrays are limited so they do not end up in large
@@ -4953,7 +5944,7 @@ class Map: public HeapObject {
set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
}
- inline JSGlobalPropertyCell* RetrieveDescriptorsPointer();
+ inline Cell* RetrieveDescriptorsPointer();
int EnumLength() {
return EnumLengthBits::decode(bit_field3());
@@ -4968,18 +5959,43 @@ class Map: public HeapObject {
set_bit_field3(EnumLengthBits::update(bit_field3(), length));
}
-
inline bool owns_descriptors();
inline void set_owns_descriptors(bool is_shared);
-
+ inline bool is_observed();
+ inline void set_is_observed(bool is_observed);
+ inline void freeze();
+ inline bool is_frozen();
+ inline void mark_unstable();
+ inline bool is_stable();
+ inline void set_migration_target(bool value);
+ inline bool is_migration_target();
+ inline void deprecate();
+ inline bool is_deprecated();
+ inline bool CanBeDeprecated();
+ // Returns a non-deprecated version of the input. If the input was not
+ // deprecated, it is directly returned. Otherwise, the non-deprecated version
+ // is found by re-transitioning from the root of the transition tree using the
+ // descriptor array of the map. Returns NULL if no updated map is found.
+ Map* CurrentMapForDeprecated();
+
+ static Handle<Map> RawCopy(Handle<Map> map, int instance_size);
MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
+ static Handle<Map> CopyDropDescriptors(Handle<Map> map);
MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
+ static Handle<Map> CopyReplaceDescriptors(Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ TransitionFlag flag,
+ Handle<Name> name);
MUST_USE_RESULT MaybeObject* CopyReplaceDescriptors(
DescriptorArray* descriptors,
- String* name,
TransitionFlag flag,
- int descriptor_index);
+ Name* name = NULL,
+ SimpleTransitionFlag simple_flag = FULL_TRANSITION);
+ static Handle<Map> CopyInstallDescriptors(
+ Handle<Map> map,
+ int new_descriptor,
+ Handle<DescriptorArray> descriptors);
MUST_USE_RESULT MaybeObject* ShareDescriptor(DescriptorArray* descriptors,
Descriptor* descriptor);
MUST_USE_RESULT MaybeObject* CopyAddDescriptor(Descriptor* descriptor,
@@ -4991,22 +6007,25 @@ class Map: public HeapObject {
Descriptor* descriptor,
int index,
TransitionFlag flag);
+ MUST_USE_RESULT MaybeObject* AsElementsKind(ElementsKind kind);
+
MUST_USE_RESULT MaybeObject* CopyAsElementsKind(ElementsKind kind,
TransitionFlag flag);
- MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
- NormalizedMapSharingMode sharing);
+ static Handle<Map> CopyForObserved(Handle<Map> map);
+
+ static Handle<Map> CopyNormalized(Handle<Map> map,
+ PropertyNormalizationMode mode,
+ NormalizedMapSharingMode sharing);
inline void AppendDescriptor(Descriptor* desc,
const DescriptorArray::WhitenessWitness&);
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
+ static Handle<Map> Copy(Handle<Map> map);
MUST_USE_RESULT MaybeObject* Copy();
- // Returns the property index for name (only valid for FAST MODE).
- int PropertyIndexFor(String* name);
-
// Returns the next free property index (only valid for FAST MODE).
int NextFreePropertyIndex();
@@ -5015,11 +6034,18 @@ class Map: public HeapObject {
int NumberOfDescribedProperties(DescriptorFlag which = OWN_DESCRIPTORS,
PropertyAttributes filter = NONE);
+ // Returns the number of slots allocated for the initial properties
+ // backing storage for instances of this map.
+ int InitialPropertiesLength() {
+ return pre_allocated_property_fields() + unused_property_fields() -
+ inobject_properties();
+ }
+
// Casting.
static inline Map* cast(Object* obj);
// Locate an accessor in the instance descriptor.
- AccessorDescriptor* FindAccessor(String* name);
+ AccessorDescriptor* FindAccessor(Name* name);
// Code cache operations.
@@ -5028,9 +6054,9 @@ class Map: public HeapObject {
// Update code cache.
static void UpdateCodeCache(Handle<Map> map,
- Handle<String> name,
+ Handle<Name> name,
Handle<Code> code);
- MUST_USE_RESULT MaybeObject* UpdateCodeCache(String* name, Code* code);
+ MUST_USE_RESULT MaybeObject* UpdateCodeCache(Name* name, Code* code);
// Extend the descriptor array of the map with the list of descriptors.
// In case of duplicates, the latest descriptor is used.
@@ -5040,14 +6066,14 @@ class Map: public HeapObject {
static void EnsureDescriptorSlack(Handle<Map> map, int slack);
// Returns the found code or undefined if absent.
- Object* FindInCodeCache(String* name, Code::Flags flags);
+ Object* FindInCodeCache(Name* name, Code::Flags flags);
// Returns the non-negative index of the code object if it is in the
// cache and -1 otherwise.
int IndexInCodeCache(Object* name, Code* code);
// Removes a code object from the code cache at the given index.
- void RemoveFromCodeCache(String* name, Code* code, int index);
+ void RemoveFromCodeCache(Name* name, Code* code, int index);
// Set all map transitions from this map to dead maps to null. Also clear
// back pointers in transition targets so that we do not process this map
@@ -5057,6 +6083,8 @@ class Map: public HeapObject {
// Computes a hash value for this map, to be used in HashTables and such.
int Hash();
+ bool EquivalentToForTransition(Map* other);
+
// Compares this map to another to see if they describe equivalent objects.
// If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
// it had exactly zero inobject properties.
@@ -5084,17 +6112,38 @@ class Map: public HeapObject {
void ZapPrototypeTransitions();
void ZapTransitions();
- // Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void MapPrint() {
- MapPrint(stdout);
+ bool CanTransition() {
+ // Only JSObject and subtypes have map transitions and back pointers.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
+ return instance_type() >= FIRST_JS_OBJECT_TYPE;
}
- void MapPrint(FILE* out);
-#endif
+
+ bool IsJSObjectMap() {
+ return instance_type() >= FIRST_JS_OBJECT_TYPE;
+ }
+
+ // Fires when the layout of an object with a leaf map changes.
+ // This includes adding transitions to the leaf map or changing
+ // the descriptor array.
+ inline void NotifyLeafMapLayoutChange();
+
+ inline bool CanOmitMapChecks();
+
+ void AddDependentCompilationInfo(DependentCode::DependencyGroup group,
+ CompilationInfo* info);
+
+ void AddDependentCode(DependentCode::DependencyGroup group,
+ Handle<Code> code);
+
+ bool IsMapInArrayPrototypeChain();
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(Map)
DECLARE_VERIFIER(Map)
#ifdef VERIFY_HEAP
void SharedMapVerify();
+ void VerifyOmittedMapChecks();
#endif
inline int visitor_id();
@@ -5112,11 +6161,11 @@ class Map: public HeapObject {
// transitions are in the form of a map where the keys are prototype objects
// and the values are the maps the are transitioned to.
static const int kMaxCachedPrototypeTransitions = 256;
-
- Map* GetPrototypeTransition(Object* prototype);
-
- MUST_USE_RESULT MaybeObject* PutPrototypeTransition(Object* prototype,
- Map* map);
+ static Handle<Map> GetPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype);
+ static Handle<Map> PutPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype,
+ Handle<Map> target_map);
static const int kMaxPreAllocatedPropertyFields = 255;
@@ -5136,9 +6185,9 @@ class Map: public HeapObject {
kConstructorOffset + kPointerSize;
static const int kDescriptorsOffset =
kTransitionsOrBackPointerOffset + kPointerSize;
- static const int kCodeCacheOffset =
- kDescriptorsOffset + kPointerSize;
- static const int kBitField3Offset = kCodeCacheOffset + kPointerSize;
+ static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize;
+ static const int kDependentCodeOffset = kCodeCacheOffset + kPointerSize;
+ static const int kBitField3Offset = kDependentCodeOffset + kPointerSize;
static const int kSize = kBitField3Offset + kPointerSize;
// Layout of pointer fields. Heap iteration code relies on them
@@ -5218,6 +6267,26 @@ class Struct: public HeapObject {
};
+// A simple one-element struct, useful where smis need to be boxed.
+class Box : public Struct {
+ public:
+ // [value]: the boxed contents.
+ DECL_ACCESSORS(value, Object)
+
+ static inline Box* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(Box)
+ DECLARE_VERIFIER(Box)
+
+ static const int kValueOffset = HeapObject::kHeaderSize;
+ static const int kSize = kValueOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Box);
+};
+
+
// Script describes a script which has been added to the VM.
class Script: public Struct {
public:
@@ -5247,7 +6316,7 @@ class Script: public Struct {
DECL_ACCESSORS(name, Object)
// [id]: the script id.
- DECL_ACCESSORS(id, Object)
+ DECL_ACCESSORS(id, Smi)
// [line_offset]: script line offset in resource from where it was extracted.
DECL_ACCESSORS(line_offset, Smi)
@@ -5268,12 +6337,6 @@ class Script: public Struct {
// [type]: the script type.
DECL_ACCESSORS(type, Smi)
- // [compilation]: how the the script was compiled.
- DECL_ACCESSORS(compilation_type, Smi)
-
- // [is_compiled]: determines whether the script has already been compiled.
- DECL_ACCESSORS(compilation_state, Smi)
-
// [line_ends]: FixedArray of line ends positions.
DECL_ACCESSORS(line_ends, Object)
@@ -5285,18 +6348,33 @@ class Script: public Struct {
// function from which eval was called where eval was called.
DECL_ACCESSORS(eval_from_instructions_offset, Smi)
+ // [flags]: Holds an exciting bitfield.
+ DECL_ACCESSORS(flags, Smi)
+
+ // [compilation_type]: how the the script was compiled. Encoded in the
+ // 'flags' field.
+ inline CompilationType compilation_type();
+ inline void set_compilation_type(CompilationType type);
+
+ // [compilation_state]: determines whether the script has already been
+ // compiled. Encoded in the 'flags' field.
+ inline CompilationState compilation_state();
+ inline void set_compilation_state(CompilationState state);
+
+ // [is_shared_cross_origin]: An opaque boolean set by the embedder via
+ // ScriptOrigin, and used by the embedder to make decisions about the
+ // script's level of privilege. V8 just passes this through. Encoded in
+ // the 'flags' field.
+ DECL_BOOLEAN_ACCESSORS(is_shared_cross_origin)
+
static inline Script* cast(Object* obj);
// If script source is an external string, check that the underlying
// resource is accessible. Otherwise, always return true.
inline bool HasValidSource();
-#ifdef OBJECT_PRINT
- inline void ScriptPrint() {
- ScriptPrint(stdout);
- }
- void ScriptPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(Script)
DECLARE_VERIFIER(Script)
static const int kSourceOffset = HeapObject::kHeaderSize;
@@ -5307,17 +6385,21 @@ class Script: public Struct {
static const int kContextOffset = kDataOffset + kPointerSize;
static const int kWrapperOffset = kContextOffset + kPointerSize;
static const int kTypeOffset = kWrapperOffset + kPointerSize;
- static const int kCompilationTypeOffset = kTypeOffset + kPointerSize;
- static const int kCompilationStateOffset =
- kCompilationTypeOffset + kPointerSize;
- static const int kLineEndsOffset = kCompilationStateOffset + kPointerSize;
+ static const int kLineEndsOffset = kTypeOffset + kPointerSize;
static const int kIdOffset = kLineEndsOffset + kPointerSize;
static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
static const int kEvalFrominstructionsOffsetOffset =
kEvalFromSharedOffset + kPointerSize;
- static const int kSize = kEvalFrominstructionsOffsetOffset + kPointerSize;
+ static const int kFlagsOffset =
+ kEvalFrominstructionsOffsetOffset + kPointerSize;
+ static const int kSize = kFlagsOffset + kPointerSize;
private:
+ // Bit positions in the flags field.
+ static const int kCompilationTypeBit = 0;
+ static const int kCompilationStateBit = 1;
+ static const int kIsSharedCrossOriginBit = 2;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Script);
};
@@ -5331,9 +6413,6 @@ class Script: public Struct {
//
// Installation of ids for the selected builtin functions is handled
// by the bootstrapper.
-//
-// NOTE: Order is important: math functions should be at the end of
-// the list and MathFloor should be the first math function.
#define FUNCTIONS_WITH_ID_LIST(V) \
V(Array.prototype, push, ArrayPush) \
V(Array.prototype, pop, ArrayPop) \
@@ -5357,18 +6436,18 @@ class Script: public Struct {
V(Math, pow, MathPow) \
V(Math, random, MathRandom) \
V(Math, max, MathMax) \
- V(Math, min, MathMin)
-
+ V(Math, min, MathMin) \
+ V(Math, imul, MathImul)
enum BuiltinFunctionId {
+ kArrayCode,
#define DECLARE_FUNCTION_ID(ignored1, ignore2, name) \
k##name,
FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
#undef DECLARE_FUNCTION_ID
// Fake id for a special case of Math.pow. Note, it continues the
// list of math functions.
- kMathPowHalf,
- kFirstMathFunctionId = kMathFloor
+ kMathPowHalf
};
@@ -5381,9 +6460,10 @@ class SharedFunctionInfo: public HeapObject {
// [code]: Function code.
DECL_ACCESSORS(code, Code)
+ inline void ReplaceCode(Code* code);
// [optimized_code_map]: Map from native context to optimized code
- // and a shared literals array or Smi 0 if none.
+ // and a shared literals array or Smi(0) if none.
DECL_ACCESSORS(optimized_code_map, Object)
// Returns index i of the entry with the specified context. At position
@@ -5398,12 +6478,29 @@ class SharedFunctionInfo: public HeapObject {
// Clear optimized code map.
void ClearOptimizedCodeMap();
+ // Removed a specific optimized code object from the optimized code map.
+ void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
+
+ // Trims the optimized code map after entries have been removed.
+ void TrimOptimizedCodeMap(int shrink_by);
+
// Add a new entry to the optimized code map.
+ MUST_USE_RESULT MaybeObject* AddToOptimizedCodeMap(Context* native_context,
+ Code* code,
+ FixedArray* literals);
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
Handle<Code> code,
Handle<FixedArray> literals);
+
+ // Layout description of the optimized code map.
+ static const int kNextMapIndex = 0;
+ static const int kEntriesStart = 1;
static const int kEntryLength = 3;
+ static const int kFirstContextSlot = FixedArray::kHeaderSize + kPointerSize;
+ static const int kFirstCodeSlot = FixedArray::kHeaderSize + 2 * kPointerSize;
+ static const int kSecondEntryIndex = kEntryLength + kEntriesStart;
+ static const int kInitialLength = kEntriesStart + kEntryLength;
// [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo)
@@ -5411,8 +6508,6 @@ class SharedFunctionInfo: public HeapObject {
// [construct stub]: Code stub for constructing instances of this function.
DECL_ACCESSORS(construct_stub, Code)
- inline Code* unchecked_code();
-
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
@@ -5599,11 +6694,6 @@ class SharedFunctionInfo: public HeapObject {
inline int ast_node_count();
inline void set_ast_node_count(int count);
- // A counter used to determine when to stress the deoptimizer with a
- // deopt.
- inline int stress_deopt_counter();
- inline void set_stress_deopt_counter(int counter);
-
inline int profiler_ticks();
// Inline cache age is used to infer whether the function survived a context
@@ -5611,18 +6701,6 @@ class SharedFunctionInfo: public HeapObject {
inline int ic_age();
inline void set_ic_age(int age);
- // Add information on assignments of the form this.x = ...;
- void SetThisPropertyAssignmentsInfo(
- bool has_only_simple_this_property_assignments,
- FixedArray* this_property_assignments);
-
- // Clear information on assignments of the form this.x = ...;
- void ClearThisPropertyAssignmentsInfo();
-
- // Indicate that this function only consists of assignments of the form
- // this.x = y; where y is either a constant or refers to an argument.
- inline bool has_only_simple_this_property_assignments();
-
// Indicates if this function can be lazy compiled.
// This is used to determine if we can safely flush code from a function
// when doing GC if we expect that the function will no longer be used.
@@ -5634,14 +6712,6 @@ class SharedFunctionInfo: public HeapObject {
// iteration by the debugger).
DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation_without_context)
- // Indicates how many full GCs this function has survived with assigned
- // code object. Used to determine when it is relatively safe to flush
- // this code object and replace it with lazy compilation stub.
- // Age is reset when GC notices that the code object is referenced
- // from the stack or compilation cache.
- inline int code_age();
- inline void set_code_age(int age);
-
// Indicates whether optimizations have been disabled for this
// shared function info. If a function is repeatedly optimized or if
// we cannot optimize the function we disable optimization to avoid
@@ -5701,6 +6771,12 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that code for this function cannot be cached.
DECL_BOOLEAN_ACCESSORS(dont_cache)
+ // Indicates that code for this function cannot be flushed.
+ DECL_BOOLEAN_ACCESSORS(dont_flush)
+
+ // Indicates that this function is a generator.
+ DECL_BOOLEAN_ACCESSORS(is_generator)
+
// Indicates whether or not the code in the shared function support
// deoptimization.
inline bool has_deoptimization_support();
@@ -5710,31 +6786,15 @@ class SharedFunctionInfo: public HeapObject {
// Disable (further) attempted optimization of all functions sharing this
// shared function info.
- void DisableOptimization(const char* reason);
+ void DisableOptimization(BailoutReason reason);
+
+ inline BailoutReason DisableOptimizationReason();
// Lookup the bailout ID and ASSERT that it exists in the non-optimized
// code, returns whether it asserted (i.e., always true if assertions are
// disabled).
bool VerifyBailoutId(BailoutId id);
- // Check whether a inlined constructor can be generated with the given
- // prototype.
- bool CanGenerateInlineConstructor(Object* prototype);
-
- // Prevents further attempts to generate inline constructors.
- // To be called if generation failed for any reason.
- void ForbidInlineConstructor();
-
- // For functions which only contains this property assignments this provides
- // access to the names for the properties assigned.
- DECL_ACCESSORS(this_property_assignments, Object)
- inline int this_property_assignments_count();
- inline void set_this_property_assignments_count(int value);
- String* GetThisPropertyAssignmentName(int index);
- bool IsThisPropertyAssignmentArgument(int index);
- int GetThisPropertyAssignmentArgument(int index);
- Object* GetThisPropertyAssignmentConstant(int index);
-
// [source code]: Source code for the function.
bool HasSourceCode();
Handle<Object> GetSourceCode();
@@ -5759,6 +6819,21 @@ class SharedFunctionInfo: public HeapObject {
inline void set_counters(int value);
inline int counters();
+ // Stores opt_count and bailout_reason as bit-fields.
+ inline void set_opt_count_and_bailout_reason(int value);
+ inline int opt_count_and_bailout_reason();
+
+ void set_bailout_reason(BailoutReason reason) {
+ set_opt_count_and_bailout_reason(
+ DisabledOptimizationReasonBits::update(opt_count_and_bailout_reason(),
+ reason));
+ }
+
+ void set_dont_optimize_reason(BailoutReason reason) {
+ set_bailout_reason(reason);
+ set_dont_optimize(reason != kNoReason);
+ }
+
// Source size of this function.
int SourceSize();
@@ -5771,12 +6846,7 @@ class SharedFunctionInfo: public HeapObject {
// Dispatched behavior.
// Set max_length to -1 for unlimited length.
void SourceCodePrint(StringStream* accumulator, int max_length);
-#ifdef OBJECT_PRINT
- inline void SharedFunctionInfoPrint() {
- SharedFunctionInfoPrint(stdout);
- }
- void SharedFunctionInfoPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(SharedFunctionInfo)
DECLARE_VERIFIER(SharedFunctionInfo)
void ResetForNewContext(int new_ic_age);
@@ -5809,12 +6879,10 @@ class SharedFunctionInfo: public HeapObject {
static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
static const int kInitialMapOffset =
kInferredNameOffset + kPointerSize;
- static const int kThisPropertyAssignmentsOffset =
- kInitialMapOffset + kPointerSize;
// ast_node_count is a Smi field. It could be grouped with another Smi field
// into a PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
static const int kAstNodeCountOffset =
- kThisPropertyAssignmentsOffset + kPointerSize;
+ kInitialMapOffset + kPointerSize;
#if V8_HOST_ARCH_32_BIT
// Smi fields.
static const int kLengthOffset =
@@ -5832,15 +6900,13 @@ class SharedFunctionInfo: public HeapObject {
kEndPositionOffset + kPointerSize;
static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kPointerSize;
- static const int kThisPropertyAssignmentsCountOffset =
+ static const int kOptCountAndBailoutReasonOffset =
kCompilerHintsOffset + kPointerSize;
- static const int kOptCountOffset =
- kThisPropertyAssignmentsCountOffset + kPointerSize;
- static const int kCountersOffset = kOptCountOffset + kPointerSize;
- static const int kStressDeoptCounterOffset = kCountersOffset + kPointerSize;
+ static const int kCountersOffset =
+ kOptCountAndBailoutReasonOffset + kPointerSize;
// Total size.
- static const int kSize = kStressDeoptCounterOffset + kPointerSize;
+ static const int kSize = kCountersOffset + kPointerSize;
#else
// The only reason to use smi fields instead of int fields
// is to allow iteration without maps decoding during
@@ -5871,16 +6937,14 @@ class SharedFunctionInfo: public HeapObject {
static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kIntSize;
- static const int kThisPropertyAssignmentsCountOffset =
+ static const int kOptCountAndBailoutReasonOffset =
kCompilerHintsOffset + kIntSize;
- static const int kOptCountOffset =
- kThisPropertyAssignmentsCountOffset + kIntSize;
- static const int kCountersOffset = kOptCountOffset + kIntSize;
- static const int kStressDeoptCounterOffset = kCountersOffset + kIntSize;
+ static const int kCountersOffset =
+ kOptCountAndBailoutReasonOffset + kIntSize;
// Total size.
- static const int kSize = kStressDeoptCounterOffset + kIntSize;
+ static const int kSize = kCountersOffset + kIntSize;
#endif
@@ -5898,28 +6962,23 @@ class SharedFunctionInfo: public HeapObject {
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
typedef FixedBodyDescriptor<kNameOffset,
- kThisPropertyAssignmentsOffset + kPointerSize,
+ kInitialMapOffset + kPointerSize,
kSize> BodyDescriptor;
// Bit positions in start_position_and_type.
// The source code start position is in the 30 most significant bits of
// the start_position_and_type field.
- static const int kIsExpressionBit = 0;
- static const int kIsTopLevelBit = 1;
+ static const int kIsExpressionBit = 0;
+ static const int kIsTopLevelBit = 1;
static const int kStartPositionShift = 2;
- static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
+ static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
// Bit positions in compiler_hints.
- static const int kCodeAgeSize = 3;
- static const int kCodeAgeMask = (1 << kCodeAgeSize) - 1;
-
enum CompilerHints {
- kHasOnlySimpleThisPropertyAssignments,
kAllowLazyCompilation,
kAllowLazyCompilationWithoutContext,
kLiveObjectsMayExist,
- kCodeAgeShift,
- kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
+ kOptimizationDisabled,
kStrictModeFunction,
kExtendedModeFunction,
kUsesArguments,
@@ -5932,6 +6991,8 @@ class SharedFunctionInfo: public HeapObject {
kDontOptimize,
kDontInline,
kDontCache,
+ kDontFlush,
+ kIsGenerator,
kCompilerHintsCount // Pseudo entry
};
@@ -5939,6 +7000,9 @@ class SharedFunctionInfo: public HeapObject {
class OptReenableTriesBits: public BitField<int, 4, 18> {};
class ICAgeBits: public BitField<int, 22, 8> {};
+ class OptCountBits: public BitField<int, 0, 22> {};
+ class DisabledOptimizationReasonBits: public BitField<int, 22, 8> {};
+
private:
#if V8_HOST_ARCH_32_BIT
// On 32 bit platforms, compiler hints is a smi.
@@ -5992,6 +7056,73 @@ class SharedFunctionInfo: public HeapObject {
};
+class JSGeneratorObject: public JSObject {
+ public:
+ // [function]: The function corresponding to this generator object.
+ DECL_ACCESSORS(function, JSFunction)
+
+ // [context]: The context of the suspended computation.
+ DECL_ACCESSORS(context, Context)
+
+ // [receiver]: The receiver of the suspended computation.
+ DECL_ACCESSORS(receiver, Object)
+
+ // [continuation]: Offset into code of continuation.
+ //
+ // A positive offset indicates a suspended generator. The special
+ // kGeneratorExecuting and kGeneratorClosed values indicate that a generator
+ // cannot be resumed.
+ inline int continuation();
+ inline void set_continuation(int continuation);
+
+ // [operand_stack]: Saved operand stack.
+ DECL_ACCESSORS(operand_stack, FixedArray)
+
+ // [stack_handler_index]: Index of first stack handler in operand_stack, or -1
+ // if the captured activation had no stack handler.
+ inline int stack_handler_index();
+ inline void set_stack_handler_index(int stack_handler_index);
+
+ // Casting.
+ static inline JSGeneratorObject* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSGeneratorObject)
+ DECLARE_VERIFIER(JSGeneratorObject)
+
+ // Magic sentinel values for the continuation.
+ static const int kGeneratorExecuting = -1;
+ static const int kGeneratorClosed = 0;
+
+ // Layout description.
+ static const int kFunctionOffset = JSObject::kHeaderSize;
+ static const int kContextOffset = kFunctionOffset + kPointerSize;
+ static const int kReceiverOffset = kContextOffset + kPointerSize;
+ static const int kContinuationOffset = kReceiverOffset + kPointerSize;
+ static const int kOperandStackOffset = kContinuationOffset + kPointerSize;
+ static const int kStackHandlerIndexOffset =
+ kOperandStackOffset + kPointerSize;
+ static const int kSize = kStackHandlerIndexOffset + kPointerSize;
+
+ // Resume mode, for use by runtime functions.
+ enum ResumeMode { NEXT, THROW };
+
+ // Yielding from a generator returns an object with the following inobject
+ // properties. See Context::generator_result_map() for the map.
+ static const int kResultValuePropertyIndex = 0;
+ static const int kResultDonePropertyIndex = 1;
+ static const int kResultPropertyCount = 2;
+
+ static const int kResultValuePropertyOffset = JSObject::kHeaderSize;
+ static const int kResultDonePropertyOffset =
+ kResultValuePropertyOffset + kPointerSize;
+ static const int kResultSize = kResultDonePropertyOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
+};
+
+
// Representation for module instance objects.
class JSModule: public JSObject {
public:
@@ -6005,12 +7136,7 @@ class JSModule: public JSObject {
static inline JSModule* cast(Object* obj);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSModulePrint() {
- JSModulePrint(stdout);
- }
- void JSModulePrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSModule)
DECLARE_VERIFIER(JSModule)
// Layout description.
@@ -6033,11 +7159,8 @@ class JSFunction: public JSObject {
// can be shared by instances.
DECL_ACCESSORS(shared, SharedFunctionInfo)
- inline SharedFunctionInfo* unchecked_shared();
-
// [context]: The context for this function.
inline Context* context();
- inline Object* unchecked_context();
inline void set_context(Object* context);
// [code]: The generated code object for this function. Executed
@@ -6046,10 +7169,9 @@ class JSFunction: public JSObject {
// 8.6.2, page 27.
inline Code* code();
inline void set_code(Code* code);
+ inline void set_code_no_write_barrier(Code* code);
inline void ReplaceCode(Code* code);
- inline Code* unchecked_code();
-
// Tells whether this function is builtin.
inline bool IsBuiltin();
@@ -6065,7 +7187,8 @@ class JSFunction: public JSObject {
// Mark this function for lazy recompilation. The function will be
// recompiled the next time it is executed.
void MarkForLazyRecompilation();
- void MarkForParallelRecompilation();
+ void MarkForConcurrentRecompilation();
+ void MarkInRecompileQueue();
// Helpers to compile this function. Returns true on success, false on
// failure (e.g., stack overflow during compilation).
@@ -6073,17 +7196,18 @@ class JSFunction: public JSObject {
ClearExceptionFlag flag);
static bool CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag);
+ static Handle<Code> CompileOsr(Handle<JSFunction> function,
+ BailoutId osr_ast_id,
+ ClearExceptionFlag flag);
static bool CompileOptimized(Handle<JSFunction> function,
- BailoutId osr_ast_id,
ClearExceptionFlag flag);
// Tells whether or not the function is already marked for lazy
// recompilation.
inline bool IsMarkedForLazyRecompilation();
- inline bool IsMarkedForParallelRecompilation();
+ inline bool IsMarkedForConcurrentRecompilation();
- // Tells whether or not the function is on the parallel
- // recompilation queue.
+ // Tells whether or not the function is on the concurrent recompilation queue.
inline bool IsInRecompileQueue();
// Check whether or not this function is inlineable.
@@ -6124,8 +7248,10 @@ class JSFunction: public JSObject {
inline bool has_instance_prototype();
inline Object* prototype();
inline Object* instance_prototype();
- MUST_USE_RESULT MaybeObject* SetInstancePrototype(Object* value);
- MUST_USE_RESULT MaybeObject* SetPrototype(Object* value);
+ static void SetPrototype(Handle<JSFunction> function,
+ Handle<Object> value);
+ static void SetInstancePrototype(Handle<JSFunction> function,
+ Handle<Object> value);
// After prototype is removed, it will not be created when accessed, and
// [[Construct]] from this function will not be allowed.
@@ -6146,15 +7272,14 @@ class JSFunction: public JSObject {
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
- // [next_function_link]: Field for linking functions. This list is treated as
- // a weak list by the GC.
+ // [next_function_link]: Links functions into various lists, e.g. the list
+ // of optimized functions hanging off the native_context. The CodeFlusher
+ // uses this link to chain together flushing candidates. Treated weakly
+ // by the garbage collector.
DECL_ACCESSORS(next_function_link, Object)
// Prints the name of the function using PrintF.
- inline void PrintName() {
- PrintName(stdout);
- }
- void PrintName(FILE* out);
+ void PrintName(FILE* out = stdout);
// Casting.
static inline JSFunction* cast(Object* obj);
@@ -6164,12 +7289,7 @@ class JSFunction: public JSObject {
void JSFunctionIterateBody(int object_size, ObjectVisitor* v);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSFunctionPrint() {
- JSFunctionPrint(stdout);
- }
- void JSFunctionPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSFunction)
DECLARE_VERIFIER(JSFunction)
// Returns the number of allocated literals.
@@ -6178,6 +7298,9 @@ class JSFunction: public JSObject {
// Retrieve the native context from a function's literal array.
static Context* NativeContextFromLiterals(FixedArray* literals);
+ // Used for flags such as --hydrogen-filter.
+ bool PassesFilter(const char* raw_filter);
+
// Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
// kSize) is weak and has special handling during garbage collection.
static const int kCodeEntryOffset = JSObject::kHeaderSize;
@@ -6223,12 +7346,7 @@ class JSGlobalProxy : public JSObject {
static inline JSGlobalProxy* cast(Object* obj);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSGlobalProxyPrint() {
- JSGlobalProxyPrint(stdout);
- }
- void JSGlobalProxyPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSGlobalProxy)
DECLARE_VERIFIER(JSGlobalProxy)
// Layout description.
@@ -6260,25 +7378,17 @@ class GlobalObject: public JSObject {
DECL_ACCESSORS(global_receiver, JSObject)
// Retrieve the property cell used to store a property.
- JSGlobalPropertyCell* GetPropertyCell(LookupResult* result);
+ PropertyCell* GetPropertyCell(LookupResult* result);
// This is like GetProperty, but is used when you know the lookup won't fail
// by throwing an exception. This is for the debug and builtins global
// objects, where it is known which properties can be expected to be present
// on the object.
- Object* GetPropertyNoExceptionThrown(String* key) {
+ Object* GetPropertyNoExceptionThrown(Name* key) {
Object* answer = GetProperty(key)->ToObjectUnchecked();
return answer;
}
- // Ensure that the global object has a cell for the given property name.
- static Handle<JSGlobalPropertyCell> EnsurePropertyCell(
- Handle<GlobalObject> global,
- Handle<String> name);
- // TODO(kmillikin): This function can be eliminated once the stub cache is
- // full handlified (and the static helper can be written directly).
- MUST_USE_RESULT MaybeObject* EnsurePropertyCell(String* name);
-
// Casting.
static inline GlobalObject* cast(Object* obj);
@@ -6300,13 +7410,12 @@ class JSGlobalObject: public GlobalObject {
// Casting.
static inline JSGlobalObject* cast(Object* obj);
+ // Ensure that the global object has a cell for the given property name.
+ static Handle<PropertyCell> EnsurePropertyCell(Handle<JSGlobalObject> global,
+ Handle<Name> name);
+
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSGlobalObjectPrint() {
- JSGlobalObjectPrint(stdout);
- }
- void JSGlobalObjectPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSGlobalObject)
DECLARE_VERIFIER(JSGlobalObject)
// Layout description.
@@ -6333,12 +7442,7 @@ class JSBuiltinsObject: public GlobalObject {
static inline JSBuiltinsObject* cast(Object* obj);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSBuiltinsObjectPrint() {
- JSBuiltinsObjectPrint(stdout);
- }
- void JSBuiltinsObjectPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSBuiltinsObject)
DECLARE_VERIFIER(JSBuiltinsObject)
// Layout description. The size of the builtins object includes
@@ -6374,12 +7478,7 @@ class JSValue: public JSObject {
static inline JSValue* cast(Object* obj);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSValuePrint() {
- JSValuePrint(stdout);
- }
- void JSValuePrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSValue)
DECLARE_VERIFIER(JSValue)
// Layout description.
@@ -6428,12 +7527,7 @@ class JSDate: public JSObject {
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSDatePrint() {
- JSDatePrint(stdout);
- }
- void JSDatePrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSDate)
DECLARE_VERIFIER(JSDate)
// The order is important. It must be kept in sync with date macros
@@ -6525,12 +7619,7 @@ class JSMessageObject: public JSObject {
static inline JSMessageObject* cast(Object* obj);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSMessageObjectPrint() {
- JSMessageObjectPrint(stdout);
- }
- void JSMessageObjectPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSMessageObject)
DECLARE_VERIFIER(JSMessageObject)
// Layout description.
@@ -6596,11 +7685,6 @@ class JSRegExp: public JSObject {
// Set implementation data after the object has been prepared.
inline void SetDataAt(int index, Object* value);
- // Used during GC when flushing code or setting age.
- inline Object* DataAtUnchecked(int index);
- inline void SetDataAtUnchecked(int index, Object* value, Heap* heap);
- inline Type TypeTagUnchecked();
-
static int code_index(bool is_ascii) {
if (is_ascii) {
return kIrregexpASCIICodeIndex;
@@ -6707,8 +7791,9 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> {
return key->HashForObject(object);
}
- MUST_USE_RESULT static MaybeObject* AsObject(HashTableKey* key) {
- return key->AsObject();
+ MUST_USE_RESULT static MaybeObject* AsObject(Heap* heap,
+ HashTableKey* key) {
+ return key->AsObject(heap);
}
static const int kPrefixSize = 0;
@@ -6753,11 +7838,11 @@ class CodeCache: public Struct {
DECL_ACCESSORS(normal_type_cache, Object)
// Add the code object to the cache.
- MUST_USE_RESULT MaybeObject* Update(String* name, Code* code);
+ MUST_USE_RESULT MaybeObject* Update(Name* name, Code* code);
// Lookup code object in the cache. Returns code object if found and undefined
// if not.
- Object* Lookup(String* name, Code::Flags flags);
+ Object* Lookup(Name* name, Code::Flags flags);
// Get the internal index of a code object in the cache. Returns -1 if the
// code object is not in that cache. This index can be used to later call
@@ -6770,12 +7855,8 @@ class CodeCache: public Struct {
static inline CodeCache* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void CodeCachePrint() {
- CodeCachePrint(stdout);
- }
- void CodeCachePrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(CodeCache)
DECLARE_VERIFIER(CodeCache)
static const int kDefaultCacheOffset = HeapObject::kHeaderSize;
@@ -6784,10 +7865,10 @@ class CodeCache: public Struct {
static const int kSize = kNormalTypeCacheOffset + kPointerSize;
private:
- MUST_USE_RESULT MaybeObject* UpdateDefaultCache(String* name, Code* code);
- MUST_USE_RESULT MaybeObject* UpdateNormalTypeCache(String* name, Code* code);
- Object* LookupDefaultCache(String* name, Code::Flags flags);
- Object* LookupNormalTypeCache(String* name, Code::Flags flags);
+ MUST_USE_RESULT MaybeObject* UpdateDefaultCache(Name* name, Code* code);
+ MUST_USE_RESULT MaybeObject* UpdateNormalTypeCache(Name* name, Code* code);
+ Object* LookupDefaultCache(Name* name, Code::Flags flags);
+ Object* LookupNormalTypeCache(Name* name, Code::Flags flags);
// Code cache layout of the default cache. Elements are alternating name and
// code objects for non normal load/store/call IC's.
@@ -6813,8 +7894,9 @@ class CodeCacheHashTableShape : public BaseShape<HashTableKey*> {
return key->HashForObject(object);
}
- MUST_USE_RESULT static MaybeObject* AsObject(HashTableKey* key) {
- return key->AsObject();
+ MUST_USE_RESULT static MaybeObject* AsObject(Heap* heap,
+ HashTableKey* key) {
+ return key->AsObject(heap);
}
static const int kPrefixSize = 0;
@@ -6825,10 +7907,10 @@ class CodeCacheHashTableShape : public BaseShape<HashTableKey*> {
class CodeCacheHashTable: public HashTable<CodeCacheHashTableShape,
HashTableKey*> {
public:
- Object* Lookup(String* name, Code::Flags flags);
- MUST_USE_RESULT MaybeObject* Put(String* name, Code* code);
+ Object* Lookup(Name* name, Code::Flags flags);
+ MUST_USE_RESULT MaybeObject* Put(Name* name, Code* code);
- int GetIndex(String* name, Code::Flags flags);
+ int GetIndex(Name* name, Code::Flags flags);
void RemoveByIndex(int index);
static inline CodeCacheHashTable* cast(Object* obj);
@@ -6859,12 +7941,8 @@ class PolymorphicCodeCache: public Struct {
static inline PolymorphicCodeCache* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void PolymorphicCodeCachePrint() {
- PolymorphicCodeCachePrint(stdout);
- }
- void PolymorphicCodeCachePrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(PolymorphicCodeCache)
DECLARE_VERIFIER(PolymorphicCodeCache)
static const int kCacheOffset = HeapObject::kHeaderSize;
@@ -6912,12 +7990,8 @@ class TypeFeedbackInfo: public Struct {
static inline TypeFeedbackInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void TypeFeedbackInfoPrint() {
- TypeFeedbackInfoPrint(stdout);
- }
- void TypeFeedbackInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(TypeFeedbackInfo)
DECLARE_VERIFIER(TypeFeedbackInfo)
static const int kStorage1Offset = HeapObject::kHeaderSize;
@@ -6943,6 +8017,100 @@ class TypeFeedbackInfo: public Struct {
};
+enum AllocationSiteMode {
+ DONT_TRACK_ALLOCATION_SITE,
+ TRACK_ALLOCATION_SITE,
+ LAST_ALLOCATION_SITE_MODE = TRACK_ALLOCATION_SITE
+};
+
+
+class AllocationSite: public Struct {
+ public:
+ static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
+
+ DECL_ACCESSORS(transition_info, Object)
+ // nested_site threads a list of sites that represent nested literals
+ // walked in a particular order. So [[1, 2], 1, 2] will have one
+ // nested_site, but [[1, 2], 3, [4]] will have a list of two.
+ DECL_ACCESSORS(nested_site, Object)
+ DECL_ACCESSORS(dependent_code, DependentCode)
+ DECL_ACCESSORS(weak_next, Object)
+
+ inline void Initialize();
+
+ bool HasNestedSites() {
+ return nested_site()->IsAllocationSite();
+ }
+
+ // This method is expensive, it should only be called for reporting.
+ bool IsNestedSite();
+
+ ElementsKind GetElementsKind() {
+ ASSERT(!SitePointsToLiteral());
+ return static_cast<ElementsKind>(Smi::cast(transition_info())->value());
+ }
+
+ void SetElementsKind(ElementsKind kind) {
+ set_transition_info(Smi::FromInt(static_cast<int>(kind)));
+ }
+
+ bool SitePointsToLiteral() {
+ // If transition_info is a smi, then it represents an ElementsKind
+ // for a constructed array. Otherwise, it must be a boilerplate
+ // for an object or array literal.
+ return transition_info()->IsJSArray() || transition_info()->IsJSObject();
+ }
+
+ DECLARE_PRINTER(AllocationSite)
+ DECLARE_VERIFIER(AllocationSite)
+
+ static inline AllocationSite* cast(Object* obj);
+ static inline AllocationSiteMode GetMode(
+ ElementsKind boilerplate_elements_kind);
+ static inline AllocationSiteMode GetMode(ElementsKind from, ElementsKind to);
+ static inline bool CanTrack(InstanceType type);
+
+ static const int kTransitionInfoOffset = HeapObject::kHeaderSize;
+ static const int kNestedSiteOffset = kTransitionInfoOffset + kPointerSize;
+ static const int kDependentCodeOffset = kNestedSiteOffset + kPointerSize;
+ static const int kWeakNextOffset = kDependentCodeOffset + kPointerSize;
+ static const int kSize = kWeakNextOffset + kPointerSize;
+
+ typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
+ kDependentCodeOffset + kPointerSize,
+ kSize> BodyDescriptor;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSite);
+};
+
+
+class AllocationMemento: public Struct {
+ public:
+ static const int kAllocationSiteOffset = HeapObject::kHeaderSize;
+ static const int kSize = kAllocationSiteOffset + kPointerSize;
+
+ DECL_ACCESSORS(allocation_site, Object)
+
+ bool IsValid() { return allocation_site()->IsAllocationSite(); }
+ AllocationSite* GetAllocationSite() {
+ ASSERT(IsValid());
+ return AllocationSite::cast(allocation_site());
+ }
+
+ DECLARE_PRINTER(AllocationMemento)
+ DECLARE_VERIFIER(AllocationMemento)
+
+ // Returns NULL if no AllocationMemento is available for object.
+ static AllocationMemento* FindForJSObject(JSObject* object,
+ bool in_GC = false);
+ static inline AllocationMemento* cast(Object* obj);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationMemento);
+};
+
+
// Representation of a slow alias as part of a non-strict arguments objects.
// For fast aliases (if HasNonStrictArgumentsElements()):
// - the parameter map contains an index into the context
@@ -6958,12 +8126,8 @@ class AliasedArgumentsEntry: public Struct {
static inline AliasedArgumentsEntry* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void AliasedArgumentsEntryPrint() {
- AliasedArgumentsEntryPrint(stdout);
- }
- void AliasedArgumentsEntryPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(AliasedArgumentsEntry)
DECLARE_VERIFIER(AliasedArgumentsEntry)
static const int kAliasedContextSlot = HeapObject::kHeaderSize;
@@ -6982,30 +8146,15 @@ class StringHasher {
public:
explicit inline StringHasher(int length, uint32_t seed);
- // Returns true if the hash of this string can be computed without
- // looking at the contents.
- inline bool has_trivial_hash();
-
- // Add a character to the hash and update the array index calculation.
- inline void AddCharacter(uint32_t c);
-
- // Adds a character to the hash but does not update the array index
- // calculation. This can only be called when it has been verified
- // that the input is not an array index.
- inline void AddCharacterNoIndex(uint32_t c);
-
- // Add a character above 0xffff as a surrogate pair. These can get into
- // the hasher through the routines that take a UTF-8 string and make a symbol.
- void AddSurrogatePair(uc32 c);
- void AddSurrogatePairNoIndex(uc32 c);
-
- // Returns the value to store in the hash field of a string with
- // the given length and contents.
- uint32_t GetHashField();
+ template <typename schar>
+ static inline uint32_t HashSequentialString(const schar* chars,
+ int length,
+ uint32_t seed);
- // Returns true if the characters seen so far make up a legal array
- // index.
- bool is_array_index() { return is_array_index_; }
+ // Reads all the data, even for long strings and computes the utf16 length.
+ static uint32_t ComputeUtf8Hash(Vector<const char> chars,
+ uint32_t seed,
+ int* utf16_length_out);
// Calculated hash value for a string consisting of 1 to
// String::kMaxArrayIndexSize digits with no leading zeros (except "0").
@@ -7017,51 +8166,36 @@ class StringHasher {
// use 27 instead.
static const int kZeroHash = 27;
- private:
- uint32_t array_index() {
- ASSERT(is_array_index());
- return array_index_;
- }
-
- inline uint32_t GetHash();
-
// Reusable parts of the hashing algorithm.
- INLINE(static uint32_t AddCharacterCore(uint32_t running_hash, uint32_t c));
+ INLINE(static uint32_t AddCharacterCore(uint32_t running_hash, uint16_t c));
INLINE(static uint32_t GetHashCore(uint32_t running_hash));
- int length_;
- uint32_t raw_running_hash_;
- uint32_t array_index_;
- bool is_array_index_;
- bool is_first_char_;
- friend class TwoCharHashTableKey;
-
- template <bool seq_ascii> friend class JsonParser;
-};
-
-
-class IncrementalAsciiStringHasher {
- public:
- explicit inline IncrementalAsciiStringHasher(uint32_t seed, char first_char);
- inline void AddCharacter(uc32 c);
- inline uint32_t GetHash();
+ protected:
+ // Returns the value to store in the hash field of a string with
+ // the given length and contents.
+ uint32_t GetHashField();
+ // Returns true if the hash of this string can be computed without
+ // looking at the contents.
+ inline bool has_trivial_hash();
+ // Adds a block of characters to the hash.
+ template<typename Char>
+ inline void AddCharacters(const Char* chars, int len);
private:
+ // Add a character to the hash.
+ inline void AddCharacter(uint16_t c);
+ // Update index. Returns true if string is still an index.
+ inline bool UpdateIndex(uint16_t c);
+
int length_;
uint32_t raw_running_hash_;
uint32_t array_index_;
bool is_array_index_;
- char first_char_;
+ bool is_first_char_;
+ DISALLOW_COPY_AND_ASSIGN(StringHasher);
};
-// Calculates string hash.
-template <typename schar>
-inline uint32_t HashSequentialString(const schar* chars,
- int length,
- uint32_t seed);
-
-
// The characteristics of a string are stored in its map. Retrieving these
// few bits of information is moderately expensive, involving two memory
// loads where the second is dependent on the first. To improve efficiency
@@ -7087,7 +8221,7 @@ class StringShape BASE_EMBEDDED {
inline bool IsExternalTwoByte();
inline bool IsSequentialAscii();
inline bool IsSequentialTwoByte();
- inline bool IsSymbol();
+ inline bool IsInternalized();
inline StringRepresentationTag representation_tag();
inline uint32_t encoding_tag();
inline uint32_t full_representation_tag();
@@ -7111,6 +8245,121 @@ class StringShape BASE_EMBEDDED {
};
+// The Name abstract class captures anything that can be used as a property
+// name, i.e., strings and symbols. All names store a hash value.
+class Name: public HeapObject {
+ public:
+ // Get and set the hash field of the name.
+ inline uint32_t hash_field();
+ inline void set_hash_field(uint32_t value);
+
+ // Tells whether the hash code has been computed.
+ inline bool HasHashCode();
+
+ // Returns a hash value used for the property table
+ inline uint32_t Hash();
+
+ // Equality operations.
+ inline bool Equals(Name* other);
+
+ // Conversion.
+ inline bool AsArrayIndex(uint32_t* index);
+
+ // Casting.
+ static inline Name* cast(Object* obj);
+
+ bool IsCacheable(Isolate* isolate);
+
+ DECLARE_PRINTER(Name)
+
+ // Layout description.
+ static const int kHashFieldOffset = HeapObject::kHeaderSize;
+ static const int kSize = kHashFieldOffset + kPointerSize;
+
+ // Mask constant for checking if a name has a computed hash code
+ // and if it is a string that is an array index. The least significant bit
+ // indicates whether a hash code has been computed. If the hash code has
+ // been computed the 2nd bit tells whether the string can be used as an
+ // array index.
+ static const int kHashNotComputedMask = 1;
+ static const int kIsNotArrayIndexMask = 1 << 1;
+ static const int kNofHashBitFields = 2;
+
+ // Shift constant retrieving hash code from hash field.
+ static const int kHashShift = kNofHashBitFields;
+
+ // Only these bits are relevant in the hash, since the top two are shifted
+ // out.
+ static const uint32_t kHashBitMask = 0xffffffffu >> kHashShift;
+
+ // Array index strings this short can keep their index in the hash field.
+ static const int kMaxCachedArrayIndexLength = 7;
+
+ // For strings which are array indexes the hash value has the string length
+ // mixed into the hash, mainly to avoid a hash value of zero which would be
+ // the case for the string '0'. 24 bits are used for the array index value.
+ static const int kArrayIndexValueBits = 24;
+ static const int kArrayIndexLengthBits =
+ kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
+
+ STATIC_CHECK((kArrayIndexLengthBits > 0));
+
+ static const int kArrayIndexHashLengthShift =
+ kArrayIndexValueBits + kNofHashBitFields;
+
+ static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
+
+ static const int kArrayIndexValueMask =
+ ((1 << kArrayIndexValueBits) - 1) << kHashShift;
+
+ // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
+ // could use a mask to test if the length of string is less than or equal to
+ // kMaxCachedArrayIndexLength.
+ STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
+
+ static const int kContainsCachedArrayIndexMask =
+ (~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) |
+ kIsNotArrayIndexMask;
+
+ // Value of empty hash field indicating that the hash is not computed.
+ static const int kEmptyHashField =
+ kIsNotArrayIndexMask | kHashNotComputedMask;
+
+ protected:
+ static inline bool IsHashFieldComputed(uint32_t field);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Name);
+};
+
+
+// ES6 symbols.
+class Symbol: public Name {
+ public:
+ // [name]: the print name of a symbol, or undefined if none.
+ DECL_ACCESSORS(name, Object)
+
+ // Casting.
+ static inline Symbol* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(Symbol)
+ DECLARE_VERIFIER(Symbol)
+
+ // Layout description.
+ static const int kNameOffset = Name::kSize;
+ static const int kSize = kNameOffset + kPointerSize;
+
+ typedef FixedBodyDescriptor<kNameOffset, kNameOffset + kPointerSize, kSize>
+ BodyDescriptor;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol);
+};
+
+
+class ConsString;
+
// The String abstract class captures JavaScript string values:
//
// Ecma-262:
@@ -7119,8 +8368,10 @@ class StringShape BASE_EMBEDDED {
// ordered sequence of zero or more 16-bit unsigned integer values.
//
// All string values have a length field.
-class String: public HeapObject {
+class String: public Name {
public:
+ enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING };
+
// Representation of the flat content of a String.
// A non-flat string doesn't have flat content.
// A flat string has content that's encoded as a sequence of either
@@ -7135,11 +8386,11 @@ class String: public HeapObject {
// Returns true if the structure contains two-byte content.
bool IsTwoByte() { return state_ == TWO_BYTE; }
- // Return the ASCII content of the string. Only use if IsAscii() returns
+ // Return the one byte content of the string. Only use if IsAscii() returns
// true.
- Vector<const char> ToAsciiVector() {
+ Vector<const uint8_t> ToOneByteVector() {
ASSERT_EQ(ASCII, state_);
- return Vector<const char>::cast(buffer_);
+ return buffer_;
}
// Return the two-byte content of the string. Only use if IsTwoByte()
// returns true.
@@ -7152,15 +8403,15 @@ class String: public HeapObject {
enum State { NON_FLAT, ASCII, TWO_BYTE };
// Constructors only used by String::GetFlatContent().
- explicit FlatContent(Vector<const char> chars)
- : buffer_(Vector<const byte>::cast(chars)),
+ explicit FlatContent(Vector<const uint8_t> chars)
+ : buffer_(chars),
state_(ASCII) { }
explicit FlatContent(Vector<const uc16> chars)
: buffer_(Vector<const byte>::cast(chars)),
state_(TWO_BYTE) { }
FlatContent() : buffer_(), state_(NON_FLAT) { }
- Vector<const byte> buffer_;
+ Vector<const uint8_t> buffer_;
State state_;
friend class String;
@@ -7170,26 +8421,22 @@ class String: public HeapObject {
inline int length();
inline void set_length(int value);
- // Get and set the hash field of the string.
- inline uint32_t hash_field();
- inline void set_hash_field(uint32_t value);
-
// Returns whether this string has only ASCII chars, i.e. all of them can
// be ASCII encoded. This might be the case even if the string is
// two-byte. Such strings may appear when the embedder prefers
// two-byte external representations even for ASCII data.
- inline bool IsAsciiRepresentation();
+ inline bool IsOneByteRepresentation();
inline bool IsTwoByteRepresentation();
// Cons and slices have an encoding flag that may not represent the actual
// encoding of the underlying string. This is taken into account here.
// Requires: this->IsFlat()
- inline bool IsAsciiRepresentationUnderneath();
+ inline bool IsOneByteRepresentationUnderneath();
inline bool IsTwoByteRepresentationUnderneath();
// NOTE: this should be considered only a hint. False negatives are
// possible.
- inline bool HasOnlyAsciiChars();
+ inline bool HasOnlyOneByteChars();
// Get and set individual two byte chars in the string.
inline void Set(int index, uint16_t value);
@@ -7241,8 +8488,8 @@ class String: public HeapObject {
// String equality operations.
inline bool Equals(String* other);
- bool IsEqualTo(Vector<const char> str);
- bool IsAsciiEqualTo(Vector<const char> str);
+ bool IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match = false);
+ bool IsOneByteEqualTo(Vector<const uint8_t> str);
bool IsTwoByteEqualTo(Vector<const uc16> str);
// Return a UTF8 representation of the string. The string is null
@@ -7272,19 +8519,7 @@ class String: public HeapObject {
SmartArrayPointer<uc16> ToWideCString(
RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL);
- // Tells whether the hash code has been computed.
- inline bool HasHashCode();
-
- // Returns a hash value used for the property table
- inline uint32_t Hash();
-
- static uint32_t ComputeHashField(unibrow::CharacterStream* buffer,
- int length,
- uint32_t seed);
-
- static bool ComputeArrayIndex(unibrow::CharacterStream* buffer,
- uint32_t* index,
- int length);
+ bool ComputeArrayIndex(uint32_t* index);
// Externalization.
bool MakeExternal(v8::String::ExternalStringResource* resource);
@@ -7304,81 +8539,26 @@ class String: public HeapObject {
// Dispatched behavior.
void StringShortPrint(StringStream* accumulator);
#ifdef OBJECT_PRINT
- inline void StringPrint() {
- StringPrint(stdout);
- }
- void StringPrint(FILE* out);
-
char* ToAsciiArray();
#endif
+ DECLARE_PRINTER(String)
DECLARE_VERIFIER(String)
inline bool IsFlat();
// Layout description.
- static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kHashFieldOffset = kLengthOffset + kPointerSize;
- static const int kSize = kHashFieldOffset + kPointerSize;
+ static const int kLengthOffset = Name::kSize;
+ static const int kSize = kLengthOffset + kPointerSize;
// Maximum number of characters to consider when trying to convert a string
// value into an array index.
static const int kMaxArrayIndexSize = 10;
-
- // Max ASCII char code.
- static const int kMaxAsciiCharCode = unibrow::Utf8::kMaxOneByteChar;
- static const unsigned kMaxAsciiCharCodeU = unibrow::Utf8::kMaxOneByteChar;
- static const int kMaxUtf16CodeUnit = 0xffff;
-
- // Mask constant for checking if a string has a computed hash code
- // and if it is an array index. The least significant bit indicates
- // whether a hash code has been computed. If the hash code has been
- // computed the 2nd bit tells whether the string can be used as an
- // array index.
- static const int kHashNotComputedMask = 1;
- static const int kIsNotArrayIndexMask = 1 << 1;
- static const int kNofHashBitFields = 2;
-
- // Shift constant retrieving hash code from hash field.
- static const int kHashShift = kNofHashBitFields;
-
- // Only these bits are relevant in the hash, since the top two are shifted
- // out.
- static const uint32_t kHashBitMask = 0xffffffffu >> kHashShift;
-
- // Array index strings this short can keep their index in the hash
- // field.
- static const int kMaxCachedArrayIndexLength = 7;
-
- // For strings which are array indexes the hash value has the string length
- // mixed into the hash, mainly to avoid a hash value of zero which would be
- // the case for the string '0'. 24 bits are used for the array index value.
- static const int kArrayIndexValueBits = 24;
- static const int kArrayIndexLengthBits =
- kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
-
- STATIC_CHECK((kArrayIndexLengthBits > 0));
STATIC_CHECK(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
- static const int kArrayIndexHashLengthShift =
- kArrayIndexValueBits + kNofHashBitFields;
-
- static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
-
- static const int kArrayIndexValueMask =
- ((1 << kArrayIndexValueBits) - 1) << kHashShift;
-
- // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
- // could use a mask to test if the length of string is less than or equal to
- // kMaxCachedArrayIndexLength.
- STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
-
- static const int kContainsCachedArrayIndexMask =
- (~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) |
- kIsNotArrayIndexMask;
-
- // Value of empty hash field indicating that the hash is not computed.
- static const int kEmptyHashField =
- kIsNotArrayIndexMask | kHashNotComputedMask;
+ // Max char codes.
+ static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
+ static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
+ static const int kMaxUtf16CodeUnit = 0xffff;
// Value of hash field containing computed hash equal to zero.
static const int kEmptyStringHash = kIsNotArrayIndexMask;
@@ -7397,18 +8577,6 @@ class String: public HeapObject {
const uc16* GetTwoByteData();
const uc16* GetTwoByteData(unsigned start);
- // Support for StringInputBuffer
- static const unibrow::byte* ReadBlock(String* input,
- unibrow::byte* util_buffer,
- unsigned capacity,
- unsigned* remaining,
- unsigned* offset);
- static const unibrow::byte* ReadBlock(String** input,
- unibrow::byte* util_buffer,
- unsigned capacity,
- unsigned* remaining,
- unsigned* offset);
-
// Helper function for flattening strings.
template <typename sinkchar>
static void WriteToFlat(String* source,
@@ -7423,7 +8591,7 @@ class String: public HeapObject {
const char* start = chars;
const char* limit = chars + length;
#ifdef V8_HOST_CAN_READ_UNALIGNED
- ASSERT(kMaxAsciiCharCode == 0x7F);
+ ASSERT(unibrow::Utf8::kMaxOneByteChar == 0x7F);
const uintptr_t non_ascii_mask = kUintptrAllBitsSet / 0xFF * 0x80;
while (chars + sizeof(uintptr_t) <= limit) {
if (*reinterpret_cast<const uintptr_t*>(chars) & non_ascii_mask) {
@@ -7433,7 +8601,7 @@ class String: public HeapObject {
}
#endif
while (chars < limit) {
- if (static_cast<uint8_t>(*chars) > kMaxAsciiCharCodeU) {
+ if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
return static_cast<int>(chars - start);
}
++chars;
@@ -7445,55 +8613,57 @@ class String: public HeapObject {
return NonAsciiStart(chars, length) >= length;
}
- static inline int NonAsciiStart(const uc16* chars, int length) {
+ static inline bool IsAscii(const uint8_t* chars, int length) {
+ return
+ NonAsciiStart(reinterpret_cast<const char*>(chars), length) >= length;
+ }
+
+ static inline int NonOneByteStart(const uc16* chars, int length) {
const uc16* limit = chars + length;
const uc16* start = chars;
while (chars < limit) {
- if (*chars > kMaxAsciiCharCodeU) return static_cast<int>(chars - start);
+ if (*chars > kMaxOneByteCharCodeU) return static_cast<int>(chars - start);
++chars;
}
return static_cast<int>(chars - start);
}
- static inline bool IsAscii(const uc16* chars, int length) {
- return NonAsciiStart(chars, length) >= length;
+ static inline bool IsOneByte(const uc16* chars, int length) {
+ return NonOneByteStart(chars, length) >= length;
}
- protected:
- class ReadBlockBuffer {
- public:
- ReadBlockBuffer(unibrow::byte* util_buffer_,
- unsigned cursor_,
- unsigned capacity_,
- unsigned remaining_) :
- util_buffer(util_buffer_),
- cursor(cursor_),
- capacity(capacity_),
- remaining(remaining_) {
- }
- unibrow::byte* util_buffer;
- unsigned cursor;
- unsigned capacity;
- unsigned remaining;
- };
+ // TODO(dcarney): Replace all instances of this with VisitFlat.
+ template<class Visitor, class ConsOp>
+ static inline void Visit(String* string,
+ unsigned offset,
+ Visitor& visitor,
+ ConsOp& cons_op,
+ int32_t type,
+ unsigned length);
- static inline const unibrow::byte* ReadBlock(String* input,
- ReadBlockBuffer* buffer,
- unsigned* offset,
- unsigned max_chars);
- static void ReadBlockIntoBuffer(String* input,
- ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned max_chars);
+ template<class Visitor>
+ static inline ConsString* VisitFlat(Visitor* visitor,
+ String* string,
+ int offset,
+ int length,
+ int32_t type);
+
+ template<class Visitor>
+ static inline ConsString* VisitFlat(Visitor* visitor,
+ String* string,
+ int offset = 0) {
+ int32_t type = string->map()->instance_type();
+ return VisitFlat(visitor, string, offset, string->length(), type);
+ }
private:
+ friend class Name;
+
// Try to flatten the top level ConsString that is hiding behind this
// string. This is a no-op unless the string is a ConsString. Flatten
// mutates the ConsString and might return a failure.
MUST_USE_RESULT MaybeObject* SlowTryFlatten(PretenureFlag pretenure);
- static inline bool IsHashFieldComputed(uint32_t field);
-
// Slow case of String::Equals. This implementation works on any strings
// but it is most efficient on strings that are almost flat.
bool SlowEquals(String* other);
@@ -7517,6 +8687,11 @@ class SeqString: public String {
// Layout description.
static const int kHeaderSize = String::kSize;
+ // Truncate the string in-place if possible and return the result.
+ // In case of new_length == 0, the empty string is returned without
+ // truncating the original string.
+ MUST_USE_RESULT static Handle<String> Truncate(Handle<SeqString> string,
+ int new_length);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
};
@@ -7524,26 +8699,26 @@ class SeqString: public String {
// The AsciiString class captures sequential ASCII string objects.
// Each character in the AsciiString is an ASCII character.
-class SeqAsciiString: public SeqString {
+class SeqOneByteString: public SeqString {
public:
static const bool kHasAsciiEncoding = true;
// Dispatched behavior.
- inline uint16_t SeqAsciiStringGet(int index);
- inline void SeqAsciiStringSet(int index, uint16_t value);
+ inline uint16_t SeqOneByteStringGet(int index);
+ inline void SeqOneByteStringSet(int index, uint16_t value);
// Get the address of the characters in this string.
inline Address GetCharsAddress();
- inline char* GetChars();
+ inline uint8_t* GetChars();
// Casting
- static inline SeqAsciiString* cast(Object* obj);
+ static inline SeqOneByteString* cast(Object* obj);
// Garbage collection support. This method is called by the
// garbage collector to compute the actual size of an AsciiString
// instance.
- inline int SeqAsciiStringSize(InstanceType instance_type);
+ inline int SeqOneByteStringSize(InstanceType instance_type);
// Computes the size for an AsciiString instance of a given length.
static int SizeFor(int length) {
@@ -7556,18 +8731,8 @@ class SeqAsciiString: public SeqString {
// Q.v. String::kMaxLength which is the maximal size of concatenated strings.
static const int kMaxLength = (kMaxSize - kHeaderSize);
- // Support for StringInputBuffer.
- inline void SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset,
- unsigned chars);
- inline const unibrow::byte* SeqAsciiStringReadBlock(unsigned* remaining,
- unsigned* offset,
- unsigned chars);
-
- DECLARE_VERIFIER(SeqAsciiString)
-
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqAsciiString);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString);
};
@@ -7608,11 +8773,6 @@ class SeqTwoByteString: public SeqString {
// Q.v. String::kMaxLength which is the maximal size of concatenated strings.
static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
- // Support for StringInputBuffer.
- inline void SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
};
@@ -7655,14 +8815,6 @@ class ConsString: public String {
static const int kSecondOffset = kFirstOffset + kPointerSize;
static const int kSize = kSecondOffset + kPointerSize;
- // Support for StringInputBuffer.
- inline const unibrow::byte* ConsStringReadBlock(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
- inline void ConsStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
-
// Minimum length for a cons string.
static const int kMinLength = 13;
@@ -7707,13 +8859,6 @@ class SlicedString: public String {
static const int kOffsetOffset = kParentOffset + kPointerSize;
static const int kSize = kOffsetOffset + kPointerSize;
- // Support for StringInputBuffer
- inline const unibrow::byte* SlicedStringReadBlock(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
- inline void SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
// Minimum length for a sliced string.
static const int kMinLength = 13;
@@ -7748,6 +8893,9 @@ class ExternalString: public String {
static const int kResourceDataOffset = kResourceOffset + kPointerSize;
static const int kSize = kResourceDataOffset + kPointerSize;
+ static const int kMaxShortLength =
+ (kShortSize - SeqString::kHeaderSize) / kCharSize;
+
// Return whether external string is short (data pointer is not cached).
inline bool is_short();
@@ -7776,7 +8924,7 @@ class ExternalAsciiString: public ExternalString {
// which the pointer cache has to be refreshed.
inline void update_data_cache();
- inline const char* GetChars();
+ inline const uint8_t* GetChars();
// Dispatched behavior.
inline uint16_t ExternalAsciiStringGet(int index);
@@ -7790,14 +8938,6 @@ class ExternalAsciiString: public ExternalString {
template<typename StaticVisitor>
inline void ExternalAsciiStringIterateBody();
- // Support for StringInputBuffer.
- const unibrow::byte* ExternalAsciiStringReadBlock(unsigned* remaining,
- unsigned* offset,
- unsigned chars);
- inline void ExternalAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset,
- unsigned chars);
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalAsciiString);
};
@@ -7838,12 +8978,6 @@ class ExternalTwoByteString: public ExternalString {
template<typename StaticVisitor>
inline void ExternalTwoByteStringIterateBody();
-
- // Support for StringInputBuffer.
- void ExternalTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
};
@@ -7859,13 +8993,14 @@ class Relocatable BASE_EMBEDDED {
virtual void IterateInstance(ObjectVisitor* v) { }
virtual void PostGarbageCollection() { }
- static void PostGarbageCollectionProcessing();
+ static void PostGarbageCollectionProcessing(Isolate* isolate);
static int ArchiveSpacePerThread();
static char* ArchiveState(Isolate* isolate, char* to);
static char* RestoreState(Isolate* isolate, char* from);
- static void Iterate(ObjectVisitor* v);
+ static void Iterate(Isolate* isolate, ObjectVisitor* v);
static void Iterate(ObjectVisitor* v, Relocatable* top);
static char* Iterate(ObjectVisitor* v, char* t);
+
private:
Isolate* isolate_;
Relocatable* prev_;
@@ -7890,32 +9025,82 @@ class FlatStringReader : public Relocatable {
};
-// Note that StringInputBuffers are not valid across a GC! To fix this
-// it would have to store a String Handle instead of a String* and
-// AsciiStringReadBlock would have to be modified to use memcpy.
-//
-// StringInputBuffer is able to traverse any string regardless of how
-// deeply nested a sequence of ConsStrings it is made of. However,
-// performance will be better if deep strings are flattened before they
-// are traversed. Since flattening requires memory allocation this is
-// not always desirable, however (esp. in debugging situations).
-class StringInputBuffer: public unibrow::InputBuffer<String, String*, 1024> {
+// A ConsStringOp that returns null.
+// Useful when the operation to apply on a ConsString
+// requires an expensive data structure.
+class ConsStringNullOp {
public:
- virtual void Seek(unsigned pos);
- inline StringInputBuffer(): unibrow::InputBuffer<String, String*, 1024>() {}
- explicit inline StringInputBuffer(String* backing):
- unibrow::InputBuffer<String, String*, 1024>(backing) {}
+ inline ConsStringNullOp() {}
+ static inline String* Operate(String*, unsigned*, int32_t*, unsigned*);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ConsStringNullOp);
};
-class SafeStringInputBuffer
- : public unibrow::InputBuffer<String, String**, 256> {
+// This maintains an off-stack representation of the stack frames required
+// to traverse a ConsString, allowing an entirely iterative and restartable
+// traversal of the entire string
+// Note: this class is not GC-safe.
+class ConsStringIteratorOp {
public:
- virtual void Seek(unsigned pos);
- inline SafeStringInputBuffer()
- : unibrow::InputBuffer<String, String**, 256>() {}
- explicit inline SafeStringInputBuffer(String** backing)
- : unibrow::InputBuffer<String, String**, 256>(backing) {}
+ inline ConsStringIteratorOp() {}
+ String* Operate(String* string,
+ unsigned* offset_out,
+ int32_t* type_out,
+ unsigned* length_out);
+ inline String* ContinueOperation(int32_t* type_out, unsigned* length_out);
+ inline void Reset();
+ inline bool HasMore();
+
+ private:
+ // TODO(dcarney): Templatize this out for different stack sizes.
+ static const unsigned kStackSize = 32;
+ // Use a mask instead of doing modulo operations for stack wrapping.
+ static const unsigned kDepthMask = kStackSize-1;
+ STATIC_ASSERT(IS_POWER_OF_TWO(kStackSize));
+ static inline unsigned OffsetForDepth(unsigned depth);
+
+ inline void PushLeft(ConsString* string);
+ inline void PushRight(ConsString* string);
+ inline void AdjustMaximumDepth();
+ inline void Pop();
+ String* NextLeaf(bool* blew_stack, int32_t* type_out, unsigned* length_out);
+ String* Search(unsigned* offset_out,
+ int32_t* type_out,
+ unsigned* length_out);
+
+ unsigned depth_;
+ unsigned maximum_depth_;
+ // Stack must always contain only frames for which right traversal
+ // has not yet been performed.
+ ConsString* frames_[kStackSize];
+ unsigned consumed_;
+ ConsString* root_;
+ DISALLOW_COPY_AND_ASSIGN(ConsStringIteratorOp);
+};
+
+
+// Note: this class is not GC-safe.
+class StringCharacterStream {
+ public:
+ inline StringCharacterStream(String* string,
+ ConsStringIteratorOp* op,
+ unsigned offset = 0);
+ inline uint16_t GetNext();
+ inline bool HasMore();
+ inline void Reset(String* string, unsigned offset = 0);
+ inline void VisitOneByteString(const uint8_t* chars, unsigned length);
+ inline void VisitTwoByteString(const uint16_t* chars, unsigned length);
+
+ private:
+ bool is_one_byte_;
+ union {
+ const uint8_t* buffer8_;
+ const uint16_t* buffer16_;
+ };
+ const uint8_t* end_;
+ ConsStringIteratorOp* op_;
+ DISALLOW_COPY_AND_ASSIGN(StringCharacterStream);
};
@@ -7951,7 +9136,8 @@ class Oddball: public HeapObject {
DECLARE_VERIFIER(Oddball)
// Initialize the fields.
- MUST_USE_RESULT MaybeObject* Initialize(const char* to_string,
+ MUST_USE_RESULT MaybeObject* Initialize(Heap* heap,
+ const char* to_string,
Object* to_number,
byte kind);
@@ -7968,7 +9154,8 @@ class Oddball: public HeapObject {
static const byte kNull = 3;
static const byte kArgumentMarker = 4;
static const byte kUndefined = 5;
- static const byte kOther = 6;
+ static const byte kUninitialized = 6;
+ static const byte kOther = 7;
typedef FixedBodyDescriptor<kToStringOffset,
kToNumberOffset + kPointerSize,
@@ -7983,30 +9170,27 @@ class Oddball: public HeapObject {
};
-class JSGlobalPropertyCell: public HeapObject {
+class Cell: public HeapObject {
public:
// [value]: value of the global property.
DECL_ACCESSORS(value, Object)
// Casting.
- static inline JSGlobalPropertyCell* cast(Object* obj);
+ static inline Cell* cast(Object* obj);
- static inline JSGlobalPropertyCell* FromValueAddress(Address value) {
- return cast(FromAddress(value - kValueOffset));
+ static inline Cell* FromValueAddress(Address value) {
+ Object* result = FromAddress(value - kValueOffset);
+ ASSERT(result->IsCell() || result->IsPropertyCell());
+ return static_cast<Cell*>(result);
}
inline Address ValueAddress() {
return address() + kValueOffset;
}
- DECLARE_VERIFIER(JSGlobalPropertyCell)
-
-#ifdef OBJECT_PRINT
- inline void JSGlobalPropertyCellPrint() {
- JSGlobalPropertyCellPrint(stdout);
- }
- void JSGlobalPropertyCellPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(Cell)
+ DECLARE_VERIFIER(Cell)
// Layout description.
static const int kValueOffset = HeapObject::kHeaderSize;
@@ -8017,7 +9201,62 @@ class JSGlobalPropertyCell: public HeapObject {
kSize> BodyDescriptor;
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Cell);
+};
+
+
+class PropertyCell: public Cell {
+ public:
+ // [type]: type of the global property.
+ Type* type();
+ void set_type(Type* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ // [dependent_code]: dependent code that depends on the type of the global
+ // property.
+ DECL_ACCESSORS(dependent_code, DependentCode)
+
+ // Sets the value of the cell and updates the type field to be the union
+ // of the cell's current type and the value's type. If the change causes
+ // a change of the type of the cell's contents, code dependent on the cell
+ // will be deoptimized.
+ static void SetValueInferType(Handle<PropertyCell> cell,
+ Handle<Object> value);
+
+ // Computes the new type of the cell's contents for the given value, but
+ // without actually modifying the 'type' field.
+ static Handle<Type> UpdatedType(Handle<PropertyCell> cell,
+ Handle<Object> value);
+
+ void AddDependentCompilationInfo(CompilationInfo* info);
+
+ void AddDependentCode(Handle<Code> code);
+
+ // Casting.
+ static inline PropertyCell* cast(Object* obj);
+
+ inline Address TypeAddress() {
+ return address() + kTypeOffset;
+ }
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(PropertyCell)
+ DECLARE_VERIFIER(PropertyCell)
+
+ // Layout description.
+ static const int kTypeOffset = kValueOffset + kPointerSize;
+ static const int kDependentCodeOffset = kTypeOffset + kPointerSize;
+ static const int kSize = kDependentCodeOffset + kPointerSize;
+
+ static const int kPointerFieldsBeginOffset = kValueOffset;
+ static const int kPointerFieldsEndOffset = kDependentCodeOffset;
+
+ typedef FixedBodyDescriptor<kValueOffset,
+ kSize,
+ kSize> BodyDescriptor;
+
+ private:
+ DECL_ACCESSORS(type_raw, Object)
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyCell);
};
@@ -8033,58 +9272,35 @@ class JSProxy: public JSReceiver {
// Casting.
static inline JSProxy* cast(Object* obj);
- bool HasPropertyWithHandler(String* name);
- bool HasElementWithHandler(uint32_t index);
-
MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(
Object* receiver,
- String* name);
+ Name* name);
MUST_USE_RESULT MaybeObject* GetElementWithHandler(
Object* receiver,
uint32_t index);
- MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
- JSReceiver* receiver,
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetElementWithHandler(
- JSReceiver* receiver,
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode);
-
// If the handler defines an accessor property with a setter, invoke it.
// If it defines an accessor property without a setter, or a data property
// that is read-only, throw. In all these cases set '*done' to true,
// otherwise set it to false.
- MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypesWithHandler(
- JSReceiver* receiver,
- String* name,
- Object* value,
+ static Handle<Object> SetPropertyViaPrototypesWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool* done);
- MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler(
- String* name,
- DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeleteElementWithHandler(
- uint32_t index,
- DeleteMode mode);
-
MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
JSReceiver* receiver,
- String* name);
+ Name* name);
MUST_USE_RESULT PropertyAttributes GetElementAttributeWithHandler(
JSReceiver* receiver,
uint32_t index);
- MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
-
- // Turn this into an (empty) JSObject.
- void Fix();
+ // Turn the proxy into an (empty) JSObject.
+ static void Fix(Handle<JSProxy> proxy);
// Initializes the body after the handler slot.
inline void InitializeBody(int object_size, Object* value);
@@ -8097,12 +9313,7 @@ class JSProxy: public JSReceiver {
Handle<Object> args[]);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSProxyPrint() {
- JSProxyPrint(stdout);
- }
- void JSProxyPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSProxy)
DECLARE_VERIFIER(JSProxy)
// Layout description. We add padding so that a proxy has the same
@@ -8122,6 +9333,34 @@ class JSProxy: public JSReceiver {
kSize> BodyDescriptor;
private:
+ friend class JSReceiver;
+
+ static Handle<Object> SetPropertyWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
+ static Handle<Object> SetElementWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode);
+
+ static bool HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name);
+ static bool HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index);
+
+ static Handle<Object> DeletePropertyWithHandler(Handle<JSProxy> proxy,
+ Handle<Name> name,
+ DeleteMode mode);
+ static Handle<Object> DeleteElementWithHandler(Handle<JSProxy> proxy,
+ uint32_t index,
+ DeleteMode mode);
+
+ MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+ static Handle<Object> GetIdentityHash(Handle<JSProxy> proxy,
+ CreationFlag flag);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
};
@@ -8138,12 +9377,7 @@ class JSFunctionProxy: public JSProxy {
static inline JSFunctionProxy* cast(Object* obj);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSFunctionProxyPrint() {
- JSFunctionProxyPrint(stdout);
- }
- void JSFunctionProxyPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSFunctionProxy)
DECLARE_VERIFIER(JSFunctionProxy)
// Layout description.
@@ -8173,12 +9407,8 @@ class JSSet: public JSObject {
// Casting.
static inline JSSet* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void JSSetPrint() {
- JSSetPrint(stdout);
- }
- void JSSetPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSSet)
DECLARE_VERIFIER(JSSet)
static const int kTableOffset = JSObject::kHeaderSize;
@@ -8198,12 +9428,8 @@ class JSMap: public JSObject {
// Casting.
static inline JSMap* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void JSMapPrint() {
- JSMapPrint(stdout);
- }
- void JSMapPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSMap)
DECLARE_VERIFIER(JSMap)
static const int kTableOffset = JSObject::kHeaderSize;
@@ -8214,8 +9440,8 @@ class JSMap: public JSObject {
};
-// The JSWeakMap describes EcmaScript Harmony weak maps
-class JSWeakMap: public JSObject {
+// Base class for both JSWeakMap and JSWeakSet
+class JSWeakCollection: public JSObject {
public:
// [table]: the backing hash table mapping keys to values.
DECL_ACCESSORS(table, Object)
@@ -8223,26 +9449,177 @@ class JSWeakMap: public JSObject {
// [next]: linked list of encountered weak maps during GC.
DECL_ACCESSORS(next, Object)
+ static const int kTableOffset = JSObject::kHeaderSize;
+ static const int kNextOffset = kTableOffset + kPointerSize;
+ static const int kSize = kNextOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakCollection);
+};
+
+
+// The JSWeakMap describes EcmaScript Harmony weak maps
+class JSWeakMap: public JSWeakCollection {
+ public:
// Casting.
static inline JSWeakMap* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void JSWeakMapPrint() {
- JSWeakMapPrint(stdout);
- }
- void JSWeakMapPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSWeakMap)
DECLARE_VERIFIER(JSWeakMap)
- static const int kTableOffset = JSObject::kHeaderSize;
- static const int kNextOffset = kTableOffset + kPointerSize;
- static const int kSize = kNextOffset + kPointerSize;
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakMap);
};
+// The JSWeakSet describes EcmaScript Harmony weak sets
+class JSWeakSet: public JSWeakCollection {
+ public:
+ // Casting.
+ static inline JSWeakSet* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSWeakSet)
+ DECLARE_VERIFIER(JSWeakSet)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakSet);
+};
+
+
+class JSArrayBuffer: public JSObject {
+ public:
+ // [backing_store]: backing memory for this array
+ DECL_ACCESSORS(backing_store, void)
+
+ // [byte_length]: length in bytes
+ DECL_ACCESSORS(byte_length, Object)
+
+ // [flags]
+ DECL_ACCESSORS(flag, Smi)
+
+ inline bool is_external();
+ inline void set_is_external(bool value);
+
+ // [weak_next]: linked list of array buffers.
+ DECL_ACCESSORS(weak_next, Object)
+
+ // [weak_first_array]: weak linked list of views.
+ DECL_ACCESSORS(weak_first_view, Object)
+
+ // Casting.
+ static inline JSArrayBuffer* cast(Object* obj);
+
+ // Neutering. Only neuters the buffer, not associated typed arrays.
+ void Neuter();
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSArrayBuffer)
+ DECLARE_VERIFIER(JSArrayBuffer)
+
+ static const int kBackingStoreOffset = JSObject::kHeaderSize;
+ static const int kByteLengthOffset = kBackingStoreOffset + kPointerSize;
+ static const int kFlagOffset = kByteLengthOffset + kPointerSize;
+ static const int kWeakNextOffset = kFlagOffset + kPointerSize;
+ static const int kWeakFirstViewOffset = kWeakNextOffset + kPointerSize;
+ static const int kSize = kWeakFirstViewOffset + kPointerSize;
+
+ static const int kSizeWithInternalFields =
+ kSize + v8::ArrayBuffer::kInternalFieldCount * kPointerSize;
+
+ private:
+ // Bit position in a flag
+ static const int kIsExternalBit = 0;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
+};
+
+
+class JSArrayBufferView: public JSObject {
+ public:
+ // [buffer]: ArrayBuffer that this typed array views.
+ DECL_ACCESSORS(buffer, Object)
+
+ // [byte_length]: offset of typed array in bytes.
+ DECL_ACCESSORS(byte_offset, Object)
+
+ // [byte_length]: length of typed array in bytes.
+ DECL_ACCESSORS(byte_length, Object)
+
+ // [weak_next]: linked list of typed arrays over the same array buffer.
+ DECL_ACCESSORS(weak_next, Object)
+
+ // Casting.
+ static inline JSArrayBufferView* cast(Object* obj);
+
+ DECLARE_VERIFIER(JSArrayBufferView)
+
+ static const int kBufferOffset = JSObject::kHeaderSize;
+ static const int kByteOffsetOffset = kBufferOffset + kPointerSize;
+ static const int kByteLengthOffset = kByteOffsetOffset + kPointerSize;
+ static const int kWeakNextOffset = kByteLengthOffset + kPointerSize;
+ static const int kViewSize = kWeakNextOffset + kPointerSize;
+
+ protected:
+ void NeuterView();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBufferView);
+};
+
+
+class JSTypedArray: public JSArrayBufferView {
+ public:
+ // [length]: length of typed array in elements.
+ DECL_ACCESSORS(length, Object)
+
+ // Neutering. Only neuters this typed array.
+ void Neuter();
+
+ // Casting.
+ static inline JSTypedArray* cast(Object* obj);
+
+ ExternalArrayType type();
+ size_t element_size();
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSTypedArray)
+ DECLARE_VERIFIER(JSTypedArray)
+
+ static const int kLengthOffset = kViewSize + kPointerSize;
+ static const int kSize = kLengthOffset + kPointerSize;
+
+ static const int kSizeWithInternalFields =
+ kSize + v8::ArrayBufferView::kInternalFieldCount * kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
+};
+
+
+class JSDataView: public JSArrayBufferView {
+ public:
+ // Only neuters this DataView
+ void Neuter();
+
+ // Casting.
+ static inline JSDataView* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSDataView)
+ DECLARE_VERIFIER(JSDataView)
+
+ static const int kSize = kViewSize;
+
+ static const int kSizeWithInternalFields =
+ kSize + v8::ArrayBufferView::kInternalFieldCount * kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSDataView);
+};
+
+
// Foreign describes objects pointing from JavaScript to C structures.
// Since they cannot contain references to JS HeapObjects they can be
// placed in old_data_space.
@@ -8261,12 +9638,8 @@ class Foreign: public HeapObject {
template<typename StaticVisitor>
inline void ForeignIterateBody();
-#ifdef OBJECT_PRINT
- inline void ForeignPrint() {
- ForeignPrint(stdout);
- }
- void ForeignPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(Foreign)
DECLARE_VERIFIER(Foreign)
// Layout description.
@@ -8301,10 +9674,11 @@ class JSArray: public JSObject {
// Initialize the array with the given capacity. The function may
// fail due to out-of-memory situations, but only if the requested
// capacity is non-zero.
- MUST_USE_RESULT MaybeObject* Initialize(int capacity);
+ MUST_USE_RESULT MaybeObject* Initialize(int capacity, int length = 0);
// Initializes the array to a certain length.
inline bool AllowsSetElementsLength();
+ // Can cause GC.
MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
// Set the content of the array to the content of storage.
@@ -8318,12 +9692,7 @@ class JSArray: public JSObject {
inline void EnsureSize(int minimum_size_of_backing_fixed_array);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSArrayPrint() {
- JSArrayPrint(stdout);
- }
- void JSArrayPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSArray)
DECLARE_VERIFIER(JSArray)
// Number of element slots to pre-allocate for an empty array.
@@ -8342,6 +9711,10 @@ class JSArray: public JSObject {
};
+Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
+ Handle<Map> initial_map);
+
+
// JSRegExpResult is just a JSArray with a specific initial map.
// This initial map adds in-object properties for "index" and "input"
// properties, as assigned by RegExp.prototype.exec, which allows
@@ -8362,20 +9735,8 @@ class JSRegExpResult: public JSArray {
};
-// An accessor must have a getter, but can have no setter.
-//
-// When setting a property, V8 searches accessors in prototypes.
-// If an accessor was found and it does not have a setter,
-// the request is ignored.
-//
-// If the accessor in the prototype has the READ_ONLY property attribute, then
-// a new value is added to the local object when the property is set.
-// This shadows the accessor in the prototype.
class AccessorInfo: public Struct {
public:
- DECL_ACCESSORS(getter, Object)
- DECL_ACCESSORS(setter, Object)
- DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(name, Object)
DECL_ACCESSORS(flag, Smi)
DECL_ACCESSORS(expected_receiver_type, Object)
@@ -8397,18 +9758,16 @@ class AccessorInfo: public Struct {
static inline AccessorInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void AccessorInfoPrint() {
- AccessorInfoPrint(stdout);
- }
- void AccessorInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
DECLARE_VERIFIER(AccessorInfo)
- static const int kGetterOffset = HeapObject::kHeaderSize;
- static const int kSetterOffset = kGetterOffset + kPointerSize;
- static const int kDataOffset = kSetterOffset + kPointerSize;
- static const int kNameOffset = kDataOffset + kPointerSize;
+ // Append all descriptors to the array that are not already there.
+ // Return number added.
+ static int AppendUnique(Handle<Object> descriptors,
+ Handle<FixedArray> array,
+ int valid_descriptors);
+
+ static const int kNameOffset = HeapObject::kHeaderSize;
static const int kFlagOffset = kNameOffset + kPointerSize;
static const int kExpectedReceiverTypeOffset = kFlagOffset + kPointerSize;
static const int kSize = kExpectedReceiverTypeOffset + kPointerSize;
@@ -8424,20 +9783,168 @@ class AccessorInfo: public Struct {
};
+enum AccessorDescriptorType {
+ kDescriptorBitmaskCompare,
+ kDescriptorPointerCompare,
+ kDescriptorPrimitiveValue,
+ kDescriptorObjectDereference,
+ kDescriptorPointerDereference,
+ kDescriptorPointerShift,
+ kDescriptorReturnObject
+};
+
+
+struct BitmaskCompareDescriptor {
+ uint32_t bitmask;
+ uint32_t compare_value;
+ uint8_t size; // Must be in {1,2,4}.
+};
+
+
+struct PointerCompareDescriptor {
+ void* compare_value;
+};
+
+
+struct PrimitiveValueDescriptor {
+ v8::DeclaredAccessorDescriptorDataType data_type;
+ uint8_t bool_offset; // Must be in [0,7], used for kDescriptorBoolType.
+};
+
+
+struct ObjectDerefenceDescriptor {
+ uint8_t internal_field;
+};
+
+
+struct PointerShiftDescriptor {
+ int16_t byte_offset;
+};
+
+
+struct DeclaredAccessorDescriptorData {
+ AccessorDescriptorType type;
+ union {
+ struct BitmaskCompareDescriptor bitmask_compare_descriptor;
+ struct PointerCompareDescriptor pointer_compare_descriptor;
+ struct PrimitiveValueDescriptor primitive_value_descriptor;
+ struct ObjectDerefenceDescriptor object_dereference_descriptor;
+ struct PointerShiftDescriptor pointer_shift_descriptor;
+ };
+};
+
+
+class DeclaredAccessorDescriptor;
+
+
+class DeclaredAccessorDescriptorIterator {
+ public:
+ explicit DeclaredAccessorDescriptorIterator(
+ DeclaredAccessorDescriptor* descriptor);
+ const DeclaredAccessorDescriptorData* Next();
+ bool Complete() const { return length_ == offset_; }
+ private:
+ uint8_t* array_;
+ const int length_;
+ int offset_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorDescriptorIterator);
+};
+
+
+class DeclaredAccessorDescriptor: public Struct {
+ public:
+ DECL_ACCESSORS(serialized_data, ByteArray)
+
+ static inline DeclaredAccessorDescriptor* cast(Object* obj);
+
+ static Handle<DeclaredAccessorDescriptor> Create(
+ Isolate* isolate,
+ const DeclaredAccessorDescriptorData& data,
+ Handle<DeclaredAccessorDescriptor> previous);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(DeclaredAccessorDescriptor)
+ DECLARE_VERIFIER(DeclaredAccessorDescriptor)
+
+ static const int kSerializedDataOffset = HeapObject::kHeaderSize;
+ static const int kSize = kSerializedDataOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorDescriptor);
+};
+
+
+class DeclaredAccessorInfo: public AccessorInfo {
+ public:
+ DECL_ACCESSORS(descriptor, DeclaredAccessorDescriptor)
+
+ static inline DeclaredAccessorInfo* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(DeclaredAccessorInfo)
+ DECLARE_VERIFIER(DeclaredAccessorInfo)
+
+ static const int kDescriptorOffset = AccessorInfo::kSize;
+ static const int kSize = kDescriptorOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorInfo);
+};
+
+
+// An accessor must have a getter, but can have no setter.
+//
+// When setting a property, V8 searches accessors in prototypes.
+// If an accessor was found and it does not have a setter,
+// the request is ignored.
+//
+// If the accessor in the prototype has the READ_ONLY property attribute, then
+// a new value is added to the local object when the property is set.
+// This shadows the accessor in the prototype.
+class ExecutableAccessorInfo: public AccessorInfo {
+ public:
+ DECL_ACCESSORS(getter, Object)
+ DECL_ACCESSORS(setter, Object)
+ DECL_ACCESSORS(data, Object)
+
+ static inline ExecutableAccessorInfo* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExecutableAccessorInfo)
+ DECLARE_VERIFIER(ExecutableAccessorInfo)
+
+ static const int kGetterOffset = AccessorInfo::kSize;
+ static const int kSetterOffset = kGetterOffset + kPointerSize;
+ static const int kDataOffset = kSetterOffset + kPointerSize;
+ static const int kSize = kDataOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExecutableAccessorInfo);
+};
+
+
// Support for JavaScript accessors: A pair of a getter and a setter. Each
// accessor can either be
// * a pointer to a JavaScript function or proxy: a real accessor
// * undefined: considered an accessor by the spec, too, strangely enough
// * the hole: an accessor which has not been set
// * a pointer to a map: a transition used to ensure map sharing
+// access_flags provides the ability to override access checks on access check
+// failure.
class AccessorPair: public Struct {
public:
DECL_ACCESSORS(getter, Object)
DECL_ACCESSORS(setter, Object)
+ DECL_ACCESSORS(access_flags, Smi)
+
+ inline void set_access_flags(v8::AccessControl access_control);
+ inline bool all_can_read();
+ inline bool all_can_write();
+ inline bool prohibits_overwriting();
static inline AccessorPair* cast(Object* obj);
- MUST_USE_RESULT MaybeObject* Copy();
+ static Handle<AccessorPair> Copy(Handle<AccessorPair> pair);
Object* get(AccessorComponent component) {
return component == ACCESSOR_GETTER ? getter() : setter();
@@ -8464,16 +9971,20 @@ class AccessorPair: public Struct {
return IsJSAccessor(getter()) || IsJSAccessor(setter());
}
-#ifdef OBJECT_PRINT
- void AccessorPairPrint(FILE* out = stdout);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(AccessorPair)
DECLARE_VERIFIER(AccessorPair)
static const int kGetterOffset = HeapObject::kHeaderSize;
static const int kSetterOffset = kGetterOffset + kPointerSize;
- static const int kSize = kSetterOffset + kPointerSize;
+ static const int kAccessFlagsOffset = kSetterOffset + kPointerSize;
+ static const int kSize = kAccessFlagsOffset + kPointerSize;
private:
+ static const int kAllCanReadBit = 0;
+ static const int kAllCanWriteBit = 1;
+ static const int kProhibitsOverwritingBit = 2;
+
// Strangely enough, in addition to functions and harmony proxies, the spec
// requires us to consider undefined as a kind of accessor, too:
// var obj = {};
@@ -8495,12 +10006,8 @@ class AccessCheckInfo: public Struct {
static inline AccessCheckInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void AccessCheckInfoPrint() {
- AccessCheckInfoPrint(stdout);
- }
- void AccessCheckInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(AccessCheckInfo)
DECLARE_VERIFIER(AccessCheckInfo)
static const int kNamedCallbackOffset = HeapObject::kHeaderSize;
@@ -8524,12 +10031,8 @@ class InterceptorInfo: public Struct {
static inline InterceptorInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void InterceptorInfoPrint() {
- InterceptorInfoPrint(stdout);
- }
- void InterceptorInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(InterceptorInfo)
DECLARE_VERIFIER(InterceptorInfo)
static const int kGetterOffset = HeapObject::kHeaderSize;
@@ -8552,12 +10055,8 @@ class CallHandlerInfo: public Struct {
static inline CallHandlerInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void CallHandlerInfoPrint() {
- CallHandlerInfoPrint(stdout);
- }
- void CallHandlerInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(CallHandlerInfo)
DECLARE_VERIFIER(CallHandlerInfo)
static const int kCallbackOffset = HeapObject::kHeaderSize;
@@ -8573,12 +10072,15 @@ class TemplateInfo: public Struct {
public:
DECL_ACCESSORS(tag, Object)
DECL_ACCESSORS(property_list, Object)
+ DECL_ACCESSORS(property_accessors, Object)
DECLARE_VERIFIER(TemplateInfo)
- static const int kTagOffset = HeapObject::kHeaderSize;
+ static const int kTagOffset = HeapObject::kHeaderSize;
static const int kPropertyListOffset = kTagOffset + kPointerSize;
- static const int kHeaderSize = kPropertyListOffset + kPointerSize;
+ static const int kPropertyAccessorsOffset =
+ kPropertyListOffset + kPointerSize;
+ static const int kHeaderSize = kPropertyAccessorsOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
@@ -8589,7 +10091,6 @@ class FunctionTemplateInfo: public TemplateInfo {
public:
DECL_ACCESSORS(serial_number, Object)
DECL_ACCESSORS(call_code, Object)
- DECL_ACCESSORS(property_accessors, Object)
DECL_ACCESSORS(prototype_template, Object)
DECL_ACCESSORS(parent_template, Object)
DECL_ACCESSORS(named_property_handler, Object)
@@ -8601,6 +10102,9 @@ class FunctionTemplateInfo: public TemplateInfo {
DECL_ACCESSORS(access_check_info, Object)
DECL_ACCESSORS(flag, Smi)
+ inline int length();
+ inline void set_length(int value);
+
// Following properties use flag bits.
DECL_BOOLEAN_ACCESSORS(hidden_prototype)
DECL_BOOLEAN_ACCESSORS(undetectable)
@@ -8608,22 +10112,19 @@ class FunctionTemplateInfo: public TemplateInfo {
// requires access check.
DECL_BOOLEAN_ACCESSORS(needs_access_check)
DECL_BOOLEAN_ACCESSORS(read_only_prototype)
+ DECL_BOOLEAN_ACCESSORS(remove_prototype)
+ DECL_BOOLEAN_ACCESSORS(do_not_cache)
static inline FunctionTemplateInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void FunctionTemplateInfoPrint() {
- FunctionTemplateInfoPrint(stdout);
- }
- void FunctionTemplateInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(FunctionTemplateInfo)
DECLARE_VERIFIER(FunctionTemplateInfo)
static const int kSerialNumberOffset = TemplateInfo::kHeaderSize;
static const int kCallCodeOffset = kSerialNumberOffset + kPointerSize;
- static const int kPropertyAccessorsOffset = kCallCodeOffset + kPointerSize;
static const int kPrototypeTemplateOffset =
- kPropertyAccessorsOffset + kPointerSize;
+ kCallCodeOffset + kPointerSize;
static const int kParentTemplateOffset =
kPrototypeTemplateOffset + kPointerSize;
static const int kNamedPropertyHandlerOffset =
@@ -8638,7 +10139,8 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kAccessCheckInfoOffset =
kInstanceCallHandlerOffset + kPointerSize;
static const int kFlagOffset = kAccessCheckInfoOffset + kPointerSize;
- static const int kSize = kFlagOffset + kPointerSize;
+ static const int kLengthOffset = kFlagOffset + kPointerSize;
+ static const int kSize = kLengthOffset + kPointerSize;
private:
// Bit position in the flag, from least significant bit position.
@@ -8646,6 +10148,8 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kUndetectableBit = 1;
static const int kNeedsAccessCheckBit = 2;
static const int kReadOnlyPrototypeBit = 3;
+ static const int kRemovePrototypeBit = 4;
+ static const int kDoNotCacheBit = 5;
DISALLOW_IMPLICIT_CONSTRUCTORS(FunctionTemplateInfo);
};
@@ -8658,12 +10162,8 @@ class ObjectTemplateInfo: public TemplateInfo {
static inline ObjectTemplateInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ObjectTemplateInfoPrint() {
- ObjectTemplateInfoPrint(stdout);
- }
- void ObjectTemplateInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ObjectTemplateInfo)
DECLARE_VERIFIER(ObjectTemplateInfo)
static const int kConstructorOffset = TemplateInfo::kHeaderSize;
@@ -8680,12 +10180,8 @@ class SignatureInfo: public Struct {
static inline SignatureInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void SignatureInfoPrint() {
- SignatureInfoPrint(stdout);
- }
- void SignatureInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(SignatureInfo)
DECLARE_VERIFIER(SignatureInfo)
static const int kReceiverOffset = Struct::kHeaderSize;
@@ -8703,12 +10199,8 @@ class TypeSwitchInfo: public Struct {
static inline TypeSwitchInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void TypeSwitchInfoPrint() {
- TypeSwitchInfoPrint(stdout);
- }
- void TypeSwitchInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(TypeSwitchInfo)
DECLARE_VERIFIER(TypeSwitchInfo)
static const int kTypesOffset = Struct::kHeaderSize;
@@ -8753,12 +10245,8 @@ class DebugInfo: public Struct {
static inline DebugInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void DebugInfoPrint() {
- DebugInfoPrint(stdout);
- }
- void DebugInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(DebugInfo)
DECLARE_VERIFIER(DebugInfo)
static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
@@ -8809,12 +10297,8 @@ class BreakPointInfo: public Struct {
static inline BreakPointInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void BreakPointInfoPrint() {
- BreakPointInfoPrint(stdout);
- }
- void BreakPointInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(BreakPointInfo)
DECLARE_VERIFIER(BreakPointInfo)
static const int kCodePositionIndex = Struct::kHeaderSize;
@@ -8836,10 +10320,10 @@ class BreakPointInfo: public Struct {
#undef DECLARE_VERIFIER
#define VISITOR_SYNCHRONIZATION_TAGS_LIST(V) \
- V(kSymbolTable, "symbol_table", "(Symbols)") \
+ V(kStringTable, "string_table", "(Internalized strings)") \
V(kExternalStringsTable, "external_strings_table", "(External strings)") \
V(kStrongRootList, "strong_root_list", "(Strong roots)") \
- V(kSymbol, "symbol", "(Symbol)") \
+ V(kInternalizedString, "internalized_string", "(Internal string)") \
V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
V(kTop, "top", "(Isolate)") \
V(kRelocatable, "relocatable", "(Relocatable)") \
@@ -8848,6 +10332,7 @@ class BreakPointInfo: public Struct {
V(kHandleScope, "handlescope", "(Handle scope)") \
V(kBuiltins, "builtins", "(Builtins)") \
V(kGlobalHandles, "globalhandles", "(Global handles)") \
+ V(kEternalHandles, "eternalhandles", "(Eternal handles)") \
V(kThreadManager, "threadmanager", "(Thread manager)") \
V(kExtensions, "Extensions", "(Extensions)")
@@ -8874,6 +10359,9 @@ class ObjectVisitor BASE_EMBEDDED {
// [start, end). Any or all of the values may be modified on return.
virtual void VisitPointers(Object** start, Object** end) = 0;
+ // Handy shorthand for visiting a single pointer.
+ virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
+
// To allow lazy clearing of inline caches the visitor has
// a rich interface for iterating over Code objects..
@@ -8884,7 +10372,7 @@ class ObjectVisitor BASE_EMBEDDED {
virtual void VisitCodeEntry(Address entry_address);
// Visits a global property cell reference in the instruction stream.
- virtual void VisitGlobalPropertyCell(RelocInfo* rinfo);
+ virtual void VisitCell(RelocInfo* rinfo);
// Visits a runtime entry in the instruction stream.
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
@@ -8898,22 +10386,18 @@ class ObjectVisitor BASE_EMBEDDED {
// Visits a debug call target in the instruction stream.
virtual void VisitDebugTarget(RelocInfo* rinfo);
- // Handy shorthand for visiting a single pointer.
- virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
+ // Visits the byte sequence in a function's prologue that contains information
+ // about the code's age.
+ virtual void VisitCodeAgeSequence(RelocInfo* rinfo);
// Visit pointer embedded into a code object.
virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
- // Visits a contiguous arrays of external references (references to the C++
- // heap) in the half-open range [start, end). Any or all of the values
- // may be modified on return.
- virtual void VisitExternalReferences(Address* start, Address* end) {}
-
+ // Visits an external reference embedded into a code object.
virtual void VisitExternalReference(RelocInfo* rinfo);
- inline void VisitExternalReference(Address* p) {
- VisitExternalReferences(p, p + 1);
- }
+ // Visits an external reference. The value may be modified on return.
+ virtual void VisitExternalReference(Address* p) {}
// Visits a handle that has an embedder-assigned class ID.
virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {}
diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc
index 06018dd1a..e9c025452 100644
--- a/deps/v8/src/optimizing-compiler-thread.cc
+++ b/deps/v8/src/optimizing-compiler-thread.cc
@@ -29,6 +29,7 @@
#include "v8.h"
+#include "full-codegen.h"
#include "hydrogen.h"
#include "isolate.h"
#include "v8threads.h"
@@ -36,89 +37,342 @@
namespace v8 {
namespace internal {
+OptimizingCompilerThread::~OptimizingCompilerThread() {
+ ASSERT_EQ(0, input_queue_length_);
+ DeleteArray(input_queue_);
+ if (FLAG_concurrent_osr) {
+#ifdef DEBUG
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ CHECK_EQ(NULL, osr_buffer_[i]);
+ }
+#endif
+ DeleteArray(osr_buffer_);
+ }
+}
+
void OptimizingCompilerThread::Run() {
#ifdef DEBUG
- thread_id_ = ThreadId::Current().ToInteger();
+ { LockGuard<Mutex> lock_guard(&thread_id_mutex_);
+ thread_id_ = ThreadId::Current().ToInteger();
+ }
#endif
Isolate::SetIsolateThreadLocals(isolate_, NULL);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
- int64_t epoch = 0;
- if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks();
+ ElapsedTimer total_timer;
+ if (FLAG_trace_concurrent_recompilation) total_timer.Start();
while (true) {
- input_queue_semaphore_->Wait();
- if (Acquire_Load(&stop_thread_)) {
- stop_semaphore_->Signal();
- if (FLAG_trace_parallel_recompilation) {
- time_spent_total_ = OS::Ticks() - epoch;
- }
- return;
+ input_queue_semaphore_.Wait();
+ Logger::TimerEventScope timer(
+ isolate_, Logger::TimerEventScope::v8_recompile_concurrent);
+
+ if (FLAG_concurrent_recompilation_delay != 0) {
+ OS::Sleep(FLAG_concurrent_recompilation_delay);
+ }
+
+ switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) {
+ case CONTINUE:
+ break;
+ case STOP:
+ if (FLAG_trace_concurrent_recompilation) {
+ time_spent_total_ = total_timer.Elapsed();
+ }
+ stop_semaphore_.Signal();
+ return;
+ case FLUSH:
+ // The main thread is blocked, waiting for the stop semaphore.
+ { AllowHandleDereference allow_handle_dereference;
+ FlushInputQueue(true);
+ }
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
+ stop_semaphore_.Signal();
+ // Return to start of consumer loop.
+ continue;
+ }
+
+ ElapsedTimer compiling_timer;
+ if (FLAG_trace_concurrent_recompilation) compiling_timer.Start();
+
+ CompileNext();
+
+ if (FLAG_trace_concurrent_recompilation) {
+ time_spent_compiling_ += compiling_timer.Elapsed();
+ }
+ }
+}
+
+
+RecompileJob* OptimizingCompilerThread::NextInput() {
+ LockGuard<Mutex> access_input_queue_(&input_queue_mutex_);
+ if (input_queue_length_ == 0) return NULL;
+ RecompileJob* job = input_queue_[InputQueueIndex(0)];
+ ASSERT_NE(NULL, job);
+ input_queue_shift_ = InputQueueIndex(1);
+ input_queue_length_--;
+ return job;
+}
+
+
+void OptimizingCompilerThread::CompileNext() {
+ RecompileJob* job = NextInput();
+ ASSERT_NE(NULL, job);
+
+ // The function may have already been optimized by OSR. Simply continue.
+ RecompileJob::Status status = job->OptimizeGraph();
+ USE(status); // Prevent an unused-variable error in release mode.
+ ASSERT(status != RecompileJob::FAILED);
+
+ // The function may have already been optimized by OSR. Simply continue.
+ // Use a mutex to make sure that functions marked for install
+ // are always also queued.
+ output_queue_.Enqueue(job);
+ isolate_->stack_guard()->RequestInstallCode();
+}
+
+
+static void DisposeRecompileJob(RecompileJob* job,
+ bool restore_function_code) {
+ // The recompile job is allocated in the CompilationInfo's zone.
+ CompilationInfo* info = job->info();
+ if (restore_function_code) {
+ if (info->is_osr()) {
+ if (!job->IsWaitingForInstall()) BackEdgeTable::RemoveStackCheck(info);
+ } else {
+ Handle<JSFunction> function = info->closure();
+ function->ReplaceCode(function->shared()->code());
}
+ }
+ delete info;
+}
- int64_t compiling_start = 0;
- if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks();
- Heap::RelocationLock relocation_lock(isolate_->heap());
- OptimizingCompiler* optimizing_compiler = NULL;
- input_queue_.Dequeue(&optimizing_compiler);
- Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
+void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
+ RecompileJob* job;
+ while ((job = NextInput())) {
+ // This should not block, since we have one signal on the input queue
+ // semaphore corresponding to each element in the input queue.
+ input_queue_semaphore_.Wait();
+ // OSR jobs are dealt with separately.
+ if (!job->info()->is_osr()) {
+ DisposeRecompileJob(job, restore_function_code);
+ }
+ }
+}
- ASSERT(!optimizing_compiler->info()->closure()->IsOptimized());
- OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
- ASSERT(status != OptimizingCompiler::FAILED);
- // Prevent an unused-variable error in release mode.
- USE(status);
+void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
+ RecompileJob* job;
+ while (output_queue_.Dequeue(&job)) {
+ // OSR jobs are dealt with separately.
+ if (!job->info()->is_osr()) {
+ DisposeRecompileJob(job, restore_function_code);
+ }
+ }
+}
- output_queue_.Enqueue(optimizing_compiler);
- isolate_->stack_guard()->RequestCodeReadyEvent();
- if (FLAG_trace_parallel_recompilation) {
- time_spent_compiling_ += OS::Ticks() - compiling_start;
+void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ if (osr_buffer_[i] != NULL) {
+ DisposeRecompileJob(osr_buffer_[i], restore_function_code);
+ osr_buffer_[i] = NULL;
}
}
}
+void OptimizingCompilerThread::Flush() {
+ ASSERT(!IsOptimizerThread());
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
+ if (FLAG_block_concurrent_recompilation) Unblock();
+ input_queue_semaphore_.Signal();
+ stop_semaphore_.Wait();
+ FlushOutputQueue(true);
+ if (FLAG_concurrent_osr) FlushOsrBuffer(true);
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Flushed concurrent recompilation queues.\n");
+ }
+}
+
+
void OptimizingCompilerThread::Stop() {
- Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
- input_queue_semaphore_->Signal();
- stop_semaphore_->Wait();
-
- if (FLAG_trace_parallel_recompilation) {
- double compile_time = static_cast<double>(time_spent_compiling_);
- double total_time = static_cast<double>(time_spent_total_);
- double percentage = (compile_time * 100) / total_time;
+ ASSERT(!IsOptimizerThread());
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
+ if (FLAG_block_concurrent_recompilation) Unblock();
+ input_queue_semaphore_.Signal();
+ stop_semaphore_.Wait();
+
+ if (FLAG_concurrent_recompilation_delay != 0) {
+ // At this point the optimizing compiler thread's event loop has stopped.
+ // There is no need for a mutex when reading input_queue_length_.
+ while (input_queue_length_ > 0) CompileNext();
+ InstallOptimizedFunctions();
+ } else {
+ FlushInputQueue(false);
+ FlushOutputQueue(false);
+ }
+
+ if (FLAG_concurrent_osr) FlushOsrBuffer(false);
+
+ if (FLAG_trace_concurrent_recompilation) {
+ double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
}
+
+ if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
+ FLAG_concurrent_osr) {
+ PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
+ }
+
+ Join();
}
void OptimizingCompilerThread::InstallOptimizedFunctions() {
+ ASSERT(!IsOptimizerThread());
HandleScope handle_scope(isolate_);
- int functions_installed = 0;
- while (!output_queue_.IsEmpty()) {
- OptimizingCompiler* compiler = NULL;
- output_queue_.Dequeue(&compiler);
- Compiler::InstallOptimizedCode(compiler);
- functions_installed++;
+
+ RecompileJob* job;
+ while (output_queue_.Dequeue(&job)) {
+ CompilationInfo* info = job->info();
+ if (info->is_osr()) {
+ if (FLAG_trace_osr) {
+ PrintF("[COSR - ");
+ info->closure()->PrintName();
+ PrintF(" is ready for install and entry at AST id %d]\n",
+ info->osr_ast_id().ToInt());
+ }
+ job->WaitForInstall();
+ BackEdgeTable::RemoveStackCheck(info);
+ } else {
+ Compiler::InstallOptimizedCode(job);
+ }
+ }
+}
+
+
+void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) {
+ ASSERT(IsQueueAvailable());
+ ASSERT(!IsOptimizerThread());
+ CompilationInfo* info = job->info();
+ if (info->is_osr()) {
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Queueing ");
+ info->closure()->PrintName();
+ PrintF(" for concurrent on-stack replacement.\n");
+ }
+ osr_attempts_++;
+ BackEdgeTable::AddStackCheck(info);
+ AddToOsrBuffer(job);
+ // Add job to the front of the input queue.
+ LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ ASSERT_LT(input_queue_length_, input_queue_capacity_);
+ // Move shift_ back by one.
+ input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
+ input_queue_[InputQueueIndex(0)] = job;
+ input_queue_length_++;
+ } else {
+ info->closure()->MarkInRecompileQueue();
+ // Add job to the back of the input queue.
+ LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ ASSERT_LT(input_queue_length_, input_queue_capacity_);
+ input_queue_[InputQueueIndex(input_queue_length_)] = job;
+ input_queue_length_++;
+ }
+ if (FLAG_block_concurrent_recompilation) {
+ blocked_jobs_++;
+ } else {
+ input_queue_semaphore_.Signal();
+ }
+}
+
+
+void OptimizingCompilerThread::Unblock() {
+ ASSERT(!IsOptimizerThread());
+ while (blocked_jobs_ > 0) {
+ input_queue_semaphore_.Signal();
+ blocked_jobs_--;
}
- if (FLAG_trace_parallel_recompilation && functions_installed != 0) {
- PrintF(" ** Installed %d function(s).\n", functions_installed);
+}
+
+
+RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
+ Handle<JSFunction> function, uint32_t osr_pc_offset) {
+ ASSERT(!IsOptimizerThread());
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ RecompileJob* current = osr_buffer_[i];
+ if (current != NULL &&
+ current->IsWaitingForInstall() &&
+ current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ osr_hits_++;
+ osr_buffer_[i] = NULL;
+ return current;
+ }
+ }
+ return NULL;
+}
+
+
+bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
+ uint32_t osr_pc_offset) {
+ ASSERT(!IsOptimizerThread());
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ RecompileJob* current = osr_buffer_[i];
+ if (current != NULL &&
+ current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ return !current->IsWaitingForInstall();
+ }
+ }
+ return false;
+}
+
+
+bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
+ ASSERT(!IsOptimizerThread());
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ RecompileJob* current = osr_buffer_[i];
+ if (current != NULL && *current->info()->closure() == function) {
+ return !current->IsWaitingForInstall();
+ }
}
+ return false;
}
-void OptimizingCompilerThread::QueueForOptimization(
- OptimizingCompiler* optimizing_compiler) {
- input_queue_.Enqueue(optimizing_compiler);
- input_queue_semaphore_->Signal();
+void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) {
+ ASSERT(!IsOptimizerThread());
+ // Find the next slot that is empty or has a stale job.
+ while (true) {
+ RecompileJob* stale = osr_buffer_[osr_buffer_cursor_];
+ if (stale == NULL || stale->IsWaitingForInstall()) break;
+ osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
+ }
+
+ // Add to found slot and dispose the evicted job.
+ RecompileJob* evicted = osr_buffer_[osr_buffer_cursor_];
+ if (evicted != NULL) {
+ ASSERT(evicted->IsWaitingForInstall());
+ CompilationInfo* info = evicted->info();
+ if (FLAG_trace_osr) {
+ PrintF("[COSR - Discarded ");
+ info->closure()->PrintName();
+ PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
+ }
+ DisposeRecompileJob(evicted, false);
+ }
+ osr_buffer_[osr_buffer_cursor_] = job;
+ osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
}
+
#ifdef DEBUG
bool OptimizingCompilerThread::IsOptimizerThread() {
- if (!FLAG_parallel_recompilation) return false;
+ if (!FLAG_concurrent_recompilation) return false;
+ LockGuard<Mutex> lock_guard(&thread_id_mutex_);
return ThreadId::Current().ToInteger() == thread_id_;
}
#endif
diff --git a/deps/v8/src/optimizing-compiler-thread.h b/deps/v8/src/optimizing-compiler-thread.h
index d5627266d..754aecebf 100644
--- a/deps/v8/src/optimizing-compiler-thread.h
+++ b/deps/v8/src/optimizing-compiler-thread.h
@@ -29,71 +29,129 @@
#define V8_OPTIMIZING_COMPILER_THREAD_H_
#include "atomicops.h"
-#include "platform.h"
#include "flags.h"
-#include "unbound-queue.h"
+#include "list.h"
+#include "platform.h"
+#include "platform/mutex.h"
+#include "platform/time.h"
+#include "unbound-queue-inl.h"
namespace v8 {
namespace internal {
-class HGraphBuilder;
-class OptimizingCompiler;
+class HOptimizedGraphBuilder;
+class RecompileJob;
+class SharedFunctionInfo;
class OptimizingCompilerThread : public Thread {
public:
explicit OptimizingCompilerThread(Isolate *isolate) :
Thread("OptimizingCompilerThread"),
+#ifdef DEBUG
+ thread_id_(0),
+#endif
isolate_(isolate),
- stop_semaphore_(OS::CreateSemaphore(0)),
- input_queue_semaphore_(OS::CreateSemaphore(0)),
- time_spent_compiling_(0),
- time_spent_total_(0) {
- NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
- NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
+ stop_semaphore_(0),
+ input_queue_semaphore_(0),
+ input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
+ input_queue_length_(0),
+ input_queue_shift_(0),
+ osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4),
+ osr_buffer_cursor_(0),
+ osr_hits_(0),
+ osr_attempts_(0),
+ blocked_jobs_(0) {
+ NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
+ input_queue_ = NewArray<RecompileJob*>(input_queue_capacity_);
+ if (FLAG_concurrent_osr) {
+ // Allocate and mark OSR buffer slots as empty.
+ osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_capacity_);
+ for (int i = 0; i < osr_buffer_capacity_; i++) osr_buffer_[i] = NULL;
+ }
}
+ ~OptimizingCompilerThread();
+
void Run();
void Stop();
- void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
+ void Flush();
+ void QueueForOptimization(RecompileJob* optimizing_compiler);
+ void Unblock();
void InstallOptimizedFunctions();
+ RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
+ uint32_t osr_pc_offset);
+ bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset);
+
+ bool IsQueuedForOSR(JSFunction* function);
inline bool IsQueueAvailable() {
- // We don't need a barrier since we have a data dependency right
- // after.
- Atomic32 current_length = NoBarrier_Load(&queue_length_);
-
- // This can be queried only from the execution thread.
- ASSERT(!IsOptimizerThread());
- // Since only the execution thread increments queue_length_ and
- // only one thread can run inside an Isolate at one time, a direct
- // doesn't introduce a race -- queue_length_ may decreased in
- // meantime, but not increased.
- return (current_length < FLAG_parallel_recompilation_queue_length);
+ LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ return input_queue_length_ < input_queue_capacity_;
+ }
+
+ inline void AgeBufferedOsrJobs() {
+ // Advance cursor of the cyclic buffer to next empty slot or stale OSR job.
+ // Dispose said OSR job in the latter case. Calling this on every GC
+ // should make sure that we do not hold onto stale jobs indefinitely.
+ AddToOsrBuffer(NULL);
}
#ifdef DEBUG
bool IsOptimizerThread();
#endif
- ~OptimizingCompilerThread() {
- delete input_queue_semaphore_;
- delete stop_semaphore_;
- }
-
private:
- Isolate* isolate_;
- Semaphore* stop_semaphore_;
- Semaphore* input_queue_semaphore_;
- UnboundQueue<OptimizingCompiler*> input_queue_;
- UnboundQueue<OptimizingCompiler*> output_queue_;
- volatile AtomicWord stop_thread_;
- volatile Atomic32 queue_length_;
- int64_t time_spent_compiling_;
- int64_t time_spent_total_;
+ enum StopFlag { CONTINUE, STOP, FLUSH };
+
+ void FlushInputQueue(bool restore_function_code);
+ void FlushOutputQueue(bool restore_function_code);
+ void FlushOsrBuffer(bool restore_function_code);
+ void CompileNext();
+ RecompileJob* NextInput();
+
+ // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
+ // Tasks evicted from the cyclic buffer are discarded.
+ void AddToOsrBuffer(RecompileJob* compiler);
+
+ inline int InputQueueIndex(int i) {
+ int result = (i + input_queue_shift_) % input_queue_capacity_;
+ ASSERT_LE(0, result);
+ ASSERT_LT(result, input_queue_capacity_);
+ return result;
+ }
#ifdef DEBUG
int thread_id_;
+ Mutex thread_id_mutex_;
#endif
+
+ Isolate* isolate_;
+ Semaphore stop_semaphore_;
+ Semaphore input_queue_semaphore_;
+
+ // Circular queue of incoming recompilation tasks (including OSR).
+ RecompileJob** input_queue_;
+ int input_queue_capacity_;
+ int input_queue_length_;
+ int input_queue_shift_;
+ Mutex input_queue_mutex_;
+
+ // Queue of recompilation tasks ready to be installed (excluding OSR).
+ UnboundQueue<RecompileJob*> output_queue_;
+
+ // Cyclic buffer of recompilation tasks for OSR.
+ RecompileJob** osr_buffer_;
+ int osr_buffer_capacity_;
+ int osr_buffer_cursor_;
+
+ volatile AtomicWord stop_thread_;
+ TimeDelta time_spent_compiling_;
+ TimeDelta time_spent_total_;
+
+ int osr_hits_;
+ int osr_attempts_;
+
+ int blocked_jobs_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 6da414af9..d84649d86 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -52,7 +52,10 @@ namespace internal {
class PositionStack {
public:
explicit PositionStack(bool* ok) : top_(NULL), ok_(ok) {}
- ~PositionStack() { ASSERT(!*ok_ || is_empty()); }
+ ~PositionStack() {
+ ASSERT(!*ok_ || is_empty());
+ USE(ok_);
+ }
class Element {
public:
@@ -199,9 +202,8 @@ RegExpTree* RegExpBuilder::ToRegExp() {
}
-void RegExpBuilder::AddQuantifierToAtom(int min,
- int max,
- RegExpQuantifier::Type type) {
+void RegExpBuilder::AddQuantifierToAtom(
+ int min, int max, RegExpQuantifier::QuantifierType quantifier_type) {
if (pending_empty_) {
pending_empty_ = false;
return;
@@ -241,7 +243,8 @@ void RegExpBuilder::AddQuantifierToAtom(int min,
UNREACHABLE();
return;
}
- terms_.Add(new(zone()) RegExpQuantifier(min, max, type, atom), zone());
+ terms_.Add(
+ new(zone()) RegExpQuantifier(min, max, quantifier_type, atom), zone());
LAST(ADD_TERM);
}
@@ -254,10 +257,10 @@ Handle<String> Parser::LookupSymbol(int symbol_id) {
if (static_cast<unsigned>(symbol_id)
>= static_cast<unsigned>(symbol_cache_.length())) {
if (scanner().is_literal_ascii()) {
- return isolate()->factory()->LookupAsciiSymbol(
- scanner().literal_ascii_string());
+ return isolate()->factory()->InternalizeOneByteString(
+ Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
} else {
- return isolate()->factory()->LookupTwoByteSymbol(
+ return isolate()->factory()->InternalizeTwoByteString(
scanner().literal_utf16_string());
}
}
@@ -275,10 +278,10 @@ Handle<String> Parser::LookupCachedSymbol(int symbol_id) {
Handle<String> result = symbol_cache_.at(symbol_id);
if (result.is_null()) {
if (scanner().is_literal_ascii()) {
- result = isolate()->factory()->LookupAsciiSymbol(
- scanner().literal_ascii_string());
+ result = isolate()->factory()->InternalizeOneByteString(
+ Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
} else {
- result = isolate()->factory()->LookupTwoByteSymbol(
+ result = isolate()->factory()->InternalizeTwoByteString(
scanner().literal_utf16_string());
}
symbol_cache_.at(symbol_id) = result;
@@ -368,6 +371,7 @@ const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) {
return result;
}
+
Scanner::Location ScriptDataImpl::MessageLocation() {
int beg_pos = Read(PreparseDataConstants::kMessageStartPos);
int end_pos = Read(PreparseDataConstants::kMessageEndPos);
@@ -407,8 +411,8 @@ unsigned* ScriptDataImpl::ReadAddress(int position) {
}
-Scope* Parser::NewScope(Scope* parent, ScopeType type) {
- Scope* result = new(zone()) Scope(parent, type, zone());
+Scope* Parser::NewScope(Scope* parent, ScopeType scope_type) {
+ Scope* result = new(zone()) Scope(parent, scope_type, zone());
result->Initialize();
return result;
}
@@ -487,8 +491,7 @@ Parser::FunctionState::FunctionState(Parser* parser,
: next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
next_handler_index_(0),
expected_property_count_(0),
- only_simple_this_property_assignments_(false),
- this_property_assignments_(isolate->factory()->empty_fixed_array()),
+ generator_object_variable_(NULL),
parser_(parser),
outer_function_state_(parser->current_function_state_),
outer_scope_(parser->top_scope_),
@@ -532,45 +535,45 @@ Parser::FunctionState::~FunctionState() {
// ----------------------------------------------------------------------------
// Implementation of Parser
-Parser::Parser(CompilationInfo* info,
- int parser_flags,
- v8::Extension* extension,
- ScriptDataImpl* pre_data)
- : isolate_(info->isolate()),
- symbol_cache_(pre_data ? pre_data->symbol_count() : 0, info->zone()),
+Parser::Parser(CompilationInfo* info)
+ : ParserBase(&scanner_, info->isolate()->stack_guard()->real_climit()),
+ isolate_(info->isolate()),
+ symbol_cache_(0, info->zone()),
script_(info->script()),
scanner_(isolate_->unicode_cache()),
reusable_preparser_(NULL),
top_scope_(NULL),
+ original_scope_(NULL),
current_function_state_(NULL),
target_stack_(NULL),
- extension_(extension),
- pre_data_(pre_data),
+ extension_(info->extension()),
+ pre_parse_data_(NULL),
fni_(NULL),
- allow_natives_syntax_((parser_flags & kAllowNativesSyntax) != 0),
- allow_lazy_((parser_flags & kAllowLazy) != 0),
- allow_modules_((parser_flags & kAllowModules) != 0),
- stack_overflow_(false),
parenthesized_function_(false),
zone_(info->zone()),
info_(info) {
ASSERT(!script_.is_null());
isolate_->set_ast_node_id(0);
- if ((parser_flags & kLanguageModeMask) == EXTENDED_MODE) {
- scanner().SetHarmonyScoping(true);
- }
- if ((parser_flags & kAllowModules) != 0) {
- scanner().SetHarmonyModules(true);
- }
+ set_allow_harmony_scoping(!info->is_native() && FLAG_harmony_scoping);
+ set_allow_modules(!info->is_native() && FLAG_harmony_modules);
+ set_allow_natives_syntax(FLAG_allow_natives_syntax || info->is_native());
+ set_allow_lazy(false); // Must be explicitly enabled.
+ set_allow_generators(FLAG_harmony_generators);
+ set_allow_for_of(FLAG_harmony_iteration);
+ set_allow_harmony_numeric_literals(FLAG_harmony_numeric_literals);
}
FunctionLiteral* Parser::ParseProgram() {
- ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
- HistogramTimerScope timer(isolate()->counters()->parse());
+ // TODO(bmeurer): We temporarily need to pass allow_nesting = true here,
+ // see comment for HistogramTimerScope class.
+ HistogramTimerScope timer_scope(isolate()->counters()->parse(), true);
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
- int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
+ ElapsedTimer timer;
+ if (FLAG_trace_parse) {
+ timer.Start();
+ }
fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
// Initialize parser state.
@@ -583,15 +586,15 @@ FunctionLiteral* Parser::ParseProgram() {
ExternalTwoByteStringUtf16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
scanner_.Initialize(&stream);
- result = DoParseProgram(info(), source, &zone_scope);
+ result = DoParseProgram(info(), source);
} else {
GenericStringUtf16CharacterStream stream(source, 0, source->length());
scanner_.Initialize(&stream);
- result = DoParseProgram(info(), source, &zone_scope);
+ result = DoParseProgram(info(), source);
}
if (FLAG_trace_parse && result != NULL) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ double ms = timer.Elapsed().InMillisecondsF();
if (info()->is_eval()) {
PrintF("[parsing eval");
} else if (info()->script()->name()->IsString()) {
@@ -608,13 +611,12 @@ FunctionLiteral* Parser::ParseProgram() {
FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
- Handle<String> source,
- ZoneScope* zone_scope) {
+ Handle<String> source) {
ASSERT(top_scope_ == NULL);
ASSERT(target_stack_ == NULL);
- if (pre_data_ != NULL) pre_data_->Initialize();
+ if (pre_parse_data_ != NULL) pre_parse_data_->Initialize();
- Handle<String> no_name = isolate()->factory()->empty_symbol();
+ Handle<String> no_name = isolate()->factory()->empty_string();
FunctionLiteral* result = NULL;
{ Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
@@ -622,6 +624,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
if (!info->context().is_null()) {
scope = Scope::DeserializeScopeChain(*info->context(), scope, zone());
}
+ original_scope_ = scope;
if (info->is_eval()) {
if (!scope->is_global_scope() || info->language_mode() != CLASSIC_MODE) {
scope = NewScope(scope, EVAL_SCOPE);
@@ -633,26 +636,40 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
scope->set_end_position(source->length());
// Compute the parsing mode.
- Mode mode = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
- if (allow_natives_syntax_ || extension_ != NULL || scope->is_eval_scope()) {
+ Mode mode = (FLAG_lazy && allow_lazy()) ? PARSE_LAZILY : PARSE_EAGERLY;
+ if (allow_natives_syntax() ||
+ extension_ != NULL ||
+ scope->is_eval_scope()) {
mode = PARSE_EAGERLY;
}
ParsingModeScope parsing_mode(this, mode);
- FunctionState function_state(this, scope, isolate()); // Enters 'scope'.
+ // Enters 'scope'.
+ FunctionState function_state(this, scope, isolate());
+
top_scope_->SetLanguageMode(info->language_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
- int beg_loc = scanner().location().beg_pos;
+ int beg_pos = scanner().location().beg_pos;
ParseSourceElements(body, Token::EOS, info->is_eval(), true, &ok);
if (ok && !top_scope_->is_classic_mode()) {
- CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
+ CheckOctalLiteral(beg_pos, scanner().location().end_pos, &ok);
}
if (ok && is_extended_mode()) {
CheckConflictingVarDeclarations(top_scope_, &ok);
}
+ if (ok && info->parse_restriction() == ONLY_SINGLE_FUNCTION_LITERAL) {
+ if (body->length() != 1 ||
+ !body->at(0)->IsExpressionStatement() ||
+ !body->at(0)->AsExpressionStatement()->
+ expression()->IsFunctionLiteral()) {
+ ReportMessage("single_function_literal", Vector<const char*>::empty());
+ ok = false;
+ }
+ }
+
if (ok) {
result = factory()->NewFunctionLiteral(
no_name,
@@ -661,15 +678,17 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
function_state.materialized_literal_count(),
function_state.expected_property_count(),
function_state.handler_count(),
- function_state.only_simple_this_property_assignments(),
- function_state.this_property_assignments(),
0,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::ANONYMOUS_EXPRESSION,
FunctionLiteral::kGlobalOrEval,
- FunctionLiteral::kNotParenthesized);
+ FunctionLiteral::kNotParenthesized,
+ FunctionLiteral::kNotGenerator,
+ 0);
result->set_ast_properties(factory()->visitor()->ast_properties());
- } else if (stack_overflow_) {
+ result->set_dont_optimize_reason(
+ factory()->visitor()->dont_optimize_reason());
+ } else if (stack_overflow()) {
isolate()->StackOverflow();
}
}
@@ -677,19 +696,18 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
// Make sure the target stack is empty.
ASSERT(target_stack_ == NULL);
- // If there was a syntax error we have to get rid of the AST
- // and it is not safe to do so before the scope has been deleted.
- if (result == NULL) zone_scope->DeleteOnExit();
return result;
}
FunctionLiteral* Parser::ParseLazy() {
- ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
- HistogramTimerScope timer(isolate()->counters()->parse_lazy());
+ HistogramTimerScope timer_scope(isolate()->counters()->parse_lazy());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
- int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
+ ElapsedTimer timer;
+ if (FLAG_trace_parse) {
+ timer.Start();
+ }
Handle<SharedFunctionInfo> shared_info = info()->shared_info();
// Initialize parser state.
@@ -700,16 +718,16 @@ FunctionLiteral* Parser::ParseLazy() {
Handle<ExternalTwoByteString>::cast(source),
shared_info->start_position(),
shared_info->end_position());
- result = ParseLazy(&stream, &zone_scope);
+ result = ParseLazy(&stream);
} else {
GenericStringUtf16CharacterStream stream(source,
shared_info->start_position(),
shared_info->end_position());
- result = ParseLazy(&stream, &zone_scope);
+ result = ParseLazy(&stream);
}
if (FLAG_trace_parse && result != NULL) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ double ms = timer.Elapsed().InMillisecondsF();
SmartArrayPointer<char> name_chars = result->debug_name()->ToCString();
PrintF("[parsing function: %s - took %0.3f ms]\n", *name_chars, ms);
}
@@ -717,8 +735,7 @@ FunctionLiteral* Parser::ParseLazy() {
}
-FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
- ZoneScope* zone_scope) {
+FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
Handle<SharedFunctionInfo> shared_info = info()->shared_info();
scanner_.Initialize(source);
ASSERT(top_scope_ == NULL);
@@ -741,13 +758,14 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
zone());
}
+ original_scope_ = scope;
FunctionState function_state(this, scope, isolate());
ASSERT(scope->language_mode() != STRICT_MODE || !info()->is_classic_mode());
ASSERT(scope->language_mode() != EXTENDED_MODE ||
info()->is_extended_mode());
ASSERT(info()->language_mode() == shared_info->language_mode());
scope->SetLanguageMode(shared_info->language_mode());
- FunctionLiteral::Type type = shared_info->is_expression()
+ FunctionLiteral::FunctionType function_type = shared_info->is_expression()
? (shared_info->is_anonymous()
? FunctionLiteral::ANONYMOUS_EXPRESSION
: FunctionLiteral::NAMED_EXPRESSION)
@@ -755,8 +773,9 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
bool ok = true;
result = ParseFunctionLiteral(name,
false, // Strict mode name already checked.
+ shared_info->is_generator(),
RelocInfo::kNoPosition,
- type,
+ function_type,
&ok);
// Make sure the results agree.
ASSERT(ok == (result != NULL));
@@ -765,11 +784,8 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
// Make sure the target stack is empty.
ASSERT(target_stack_ == NULL);
- // If there was a stack overflow we have to get rid of AST and it is
- // not safe to do before scope has been deleted.
if (result == NULL) {
- zone_scope->DeleteOnExit();
- if (stack_overflow_) isolate()->StackOverflow();
+ if (stack_overflow()) isolate()->StackOverflow();
} else {
Handle<String> inferred_name(shared_info->inferred_name());
result->set_inferred_name(inferred_name);
@@ -778,29 +794,29 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
}
-Handle<String> Parser::GetSymbol(bool* ok) {
+Handle<String> Parser::GetSymbol() {
int symbol_id = -1;
- if (pre_data() != NULL) {
- symbol_id = pre_data()->GetSymbolIdentifier();
+ if (pre_parse_data() != NULL) {
+ symbol_id = pre_parse_data()->GetSymbolIdentifier();
}
return LookupSymbol(symbol_id);
}
-void Parser::ReportMessage(const char* type, Vector<const char*> args) {
+void Parser::ReportMessage(const char* message, Vector<const char*> args) {
Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, type, args);
+ ReportMessageAt(source_location, message, args);
}
-void Parser::ReportMessage(const char* type, Vector<Handle<String> > args) {
+void Parser::ReportMessage(const char* message, Vector<Handle<String> > args) {
Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, type, args);
+ ReportMessageAt(source_location, message, args);
}
void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* type,
+ const char* message,
Vector<const char*> args) {
MessageLocation location(script_,
source_location.beg_pos,
@@ -812,13 +828,13 @@ void Parser::ReportMessageAt(Scanner::Location source_location,
elements->set(i, *arg_string);
}
Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(type, array);
+ Handle<Object> result = factory->NewSyntaxError(message, array);
isolate()->Throw(*result, &location);
}
void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* type,
+ const char* message,
Vector<Handle<String> > args) {
MessageLocation location(script_,
source_location.beg_pos,
@@ -829,183 +845,11 @@ void Parser::ReportMessageAt(Scanner::Location source_location,
elements->set(i, *args[i]);
}
Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(type, array);
+ Handle<Object> result = factory->NewSyntaxError(message, array);
isolate()->Throw(*result, &location);
}
-// A ThisNamedPropertyAssignmentFinder finds and marks statements of the form
-// this.x = ...;, where x is a named property. It also determines whether a
-// function contains only assignments of this type.
-class ThisNamedPropertyAssignmentFinder {
- public:
- ThisNamedPropertyAssignmentFinder(Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- only_simple_this_property_assignments_(true),
- names_(0, zone),
- assigned_arguments_(0, zone),
- assigned_constants_(0, zone),
- zone_(zone) {
- }
-
- static Assignment* AsAssignment(Statement* stat) {
- if (stat == NULL) return NULL;
- ExpressionStatement* exp_stat = stat->AsExpressionStatement();
- if (exp_stat == NULL) return NULL;
- return exp_stat->expression()->AsAssignment();
- }
-
- void Update(Scope* scope, Statement* stat) {
- // Bail out if function already has property assignment that are
- // not simple this property assignments.
- if (!only_simple_this_property_assignments_) {
- return;
- }
-
- // Check whether this statement is of the form this.x = ...;
- Assignment* assignment = AsAssignment(stat);
- if (IsThisPropertyAssignment(assignment)) {
- HandleThisPropertyAssignment(scope, assignment);
- } else {
- only_simple_this_property_assignments_ = false;
- }
- }
-
- // Returns whether only statements of the form this.x = y; where y is either a
- // constant or a function argument was encountered.
- bool only_simple_this_property_assignments() {
- return only_simple_this_property_assignments_;
- }
-
- // Returns a fixed array containing three elements for each assignment of the
- // form this.x = y;
- Handle<FixedArray> GetThisPropertyAssignments() {
- if (names_.is_empty()) {
- return isolate_->factory()->empty_fixed_array();
- }
- ASSERT_EQ(names_.length(), assigned_arguments_.length());
- ASSERT_EQ(names_.length(), assigned_constants_.length());
- Handle<FixedArray> assignments =
- isolate_->factory()->NewFixedArray(names_.length() * 3);
- for (int i = 0; i < names_.length(); ++i) {
- assignments->set(i * 3, *names_[i]);
- assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_[i]));
- assignments->set(i * 3 + 2, *assigned_constants_[i]);
- }
- return assignments;
- }
-
- private:
- bool IsThisPropertyAssignment(Assignment* assignment) {
- if (assignment != NULL) {
- Property* property = assignment->target()->AsProperty();
- return assignment->op() == Token::ASSIGN
- && property != NULL
- && property->obj()->AsVariableProxy() != NULL
- && property->obj()->AsVariableProxy()->is_this();
- }
- return false;
- }
-
- void HandleThisPropertyAssignment(Scope* scope, Assignment* assignment) {
- // Check that the property assigned to is a named property, which is not
- // __proto__.
- Property* property = assignment->target()->AsProperty();
- ASSERT(property != NULL);
- Literal* literal = property->key()->AsLiteral();
- uint32_t dummy;
- if (literal != NULL &&
- literal->handle()->IsString() &&
- !String::cast(*(literal->handle()))->Equals(
- isolate_->heap()->Proto_symbol()) &&
- !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
- Handle<String> key = Handle<String>::cast(literal->handle());
-
- // Check whether the value assigned is either a constant or matches the
- // name of one of the arguments to the function.
- if (assignment->value()->AsLiteral() != NULL) {
- // Constant assigned.
- Literal* literal = assignment->value()->AsLiteral();
- AssignmentFromConstant(key, literal->handle());
- return;
- } else if (assignment->value()->AsVariableProxy() != NULL) {
- // Variable assigned.
- Handle<String> name =
- assignment->value()->AsVariableProxy()->name();
- // Check whether the variable assigned matches an argument name.
- for (int i = 0; i < scope->num_parameters(); i++) {
- if (*scope->parameter(i)->name() == *name) {
- // Assigned from function argument.
- AssignmentFromParameter(key, i);
- return;
- }
- }
- }
- }
- // It is not a simple "this.x = value;" assignment with a constant
- // or parameter value.
- AssignmentFromSomethingElse();
- }
-
-
-
-
- // We will potentially reorder the property assignments, so they must be
- // simple enough that the ordering does not matter.
- void AssignmentFromParameter(Handle<String> name, int index) {
- EnsureInitialized();
- for (int i = 0; i < names_.length(); ++i) {
- if (name->Equals(*names_[i])) {
- assigned_arguments_[i] = index;
- assigned_constants_[i] = isolate_->factory()->undefined_value();
- return;
- }
- }
- names_.Add(name, zone());
- assigned_arguments_.Add(index, zone());
- assigned_constants_.Add(isolate_->factory()->undefined_value(), zone());
- }
-
- void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
- EnsureInitialized();
- for (int i = 0; i < names_.length(); ++i) {
- if (name->Equals(*names_[i])) {
- assigned_arguments_[i] = -1;
- assigned_constants_[i] = value;
- return;
- }
- }
- names_.Add(name, zone());
- assigned_arguments_.Add(-1, zone());
- assigned_constants_.Add(value, zone());
- }
-
- void AssignmentFromSomethingElse() {
- // The this assignment is not a simple one.
- only_simple_this_property_assignments_ = false;
- }
-
- void EnsureInitialized() {
- if (names_.capacity() == 0) {
- ASSERT(assigned_arguments_.capacity() == 0);
- ASSERT(assigned_constants_.capacity() == 0);
- names_.Initialize(4, zone());
- assigned_arguments_.Initialize(4, zone());
- assigned_constants_.Initialize(4, zone());
- }
- }
-
- Zone* zone() const { return zone_; }
-
- Isolate* isolate_;
- bool only_simple_this_property_assignments_;
- ZoneStringList names_;
- ZoneList<int> assigned_arguments_;
- ZoneObjectList assigned_constants_;
- Zone* zone_;
-};
-
-
void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
int end_token,
bool is_eval,
@@ -1021,8 +865,6 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
TargetScope scope(&this->target_stack_);
ASSERT(processor != NULL);
- ThisNamedPropertyAssignmentFinder this_property_assignment_finder(isolate(),
- zone());
bool directive_prologue = true; // Parsing directive prologue.
while (peek() != end_token) {
@@ -1049,14 +891,14 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
// Still processing directive prologue?
if ((e_stat = stat->AsExpressionStatement()) != NULL &&
(literal = e_stat->expression()->AsLiteral()) != NULL &&
- literal->handle()->IsString()) {
- Handle<String> directive = Handle<String>::cast(literal->handle());
+ literal->value()->IsString()) {
+ Handle<String> directive = Handle<String>::cast(literal->value());
// Check "use strict" directive (ES5 14.1).
if (top_scope_->is_classic_mode() &&
- directive->Equals(isolate()->heap()->use_strict()) &&
+ directive->Equals(isolate()->heap()->use_strict_string()) &&
token_loc.end_pos - token_loc.beg_pos ==
- isolate()->heap()->use_strict()->length() + 2) {
+ isolate()->heap()->use_strict_string()->length() + 2) {
// TODO(mstarzinger): Global strict eval calls, need their own scope
// as specified in ES5 10.4.2(3). The correct fix would be to always
// add this scope in DoParseProgram(), but that requires adaptations
@@ -1071,7 +913,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
mode_ = PARSE_EAGERLY;
}
// TODO(ES6): Fix entering extended mode, once it is specified.
- top_scope_->SetLanguageMode(FLAG_harmony_scoping
+ top_scope_->SetLanguageMode(allow_harmony_scoping()
? EXTENDED_MODE : STRICT_MODE);
// "use strict" is the only directive for now.
directive_prologue = false;
@@ -1082,25 +924,9 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
}
}
- // Find and mark all assignments to named properties in this (this.x =)
- if (top_scope_->is_function_scope()) {
- this_property_assignment_finder.Update(top_scope_, stat);
- }
processor->Add(stat, zone());
}
- // Propagate the collected information on this property assignments.
- if (top_scope_->is_function_scope()) {
- bool only_simple_this_property_assignments =
- this_property_assignment_finder.only_simple_this_property_assignments()
- && top_scope_->declarations()->length() == 0;
- if (only_simple_this_property_assignments) {
- current_function_state_->SetThisPropertyAssignmentInfo(
- only_simple_this_property_assignments,
- this_property_assignment_finder.GetThisPropertyAssignments());
- }
- }
-
return 0;
}
@@ -1119,6 +945,7 @@ Statement* Parser::ParseModuleElement(ZoneStringList* labels,
// ModuleDeclaration
// ImportDeclaration
// ExportDeclaration
+ // GeneratorDeclaration
switch (peek()) {
case Token::FUNCTION:
@@ -1141,7 +968,7 @@ Statement* Parser::ParseModuleElement(ZoneStringList* labels,
if (estmt != NULL &&
estmt->expression()->AsVariableProxy() != NULL &&
estmt->expression()->AsVariableProxy()->name()->Equals(
- isolate()->heap()->module_symbol()) &&
+ isolate()->heap()->module_string()) &&
!scanner().literal_contains_escapes()) {
return ParseModuleDeclaration(NULL, ok);
}
@@ -1156,6 +983,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
// ModuleDeclaration:
// 'module' Identifier Module
+ int pos = peek_position();
Handle<String> name = ParseIdentifier(CHECK_OK);
#ifdef DEBUG
@@ -1164,9 +992,9 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
#endif
Module* module = ParseModule(CHECK_OK);
- VariableProxy* proxy = NewUnresolved(name, LET, module->interface());
+ VariableProxy* proxy = NewUnresolved(name, MODULE, module->interface());
Declaration* declaration =
- factory()->NewModuleDeclaration(proxy, module, top_scope_);
+ factory()->NewModuleDeclaration(proxy, module, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
#ifdef DEBUG
@@ -1181,9 +1009,9 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
if (names) names->Add(name, zone());
if (module->body() == NULL)
- return factory()->NewEmptyStatement();
+ return factory()->NewEmptyStatement(pos);
else
- return module->body();
+ return factory()->NewModuleStatement(proxy, module->body(), pos);
}
@@ -1205,7 +1033,7 @@ Module* Parser::ParseModule(bool* ok) {
}
default: {
- ExpectContextualKeyword("at", CHECK_OK);
+ ExpectContextualKeyword(CStrVector("at"), CHECK_OK);
Module* result = ParseModuleUrl(CHECK_OK);
ExpectSemicolon(CHECK_OK);
return result;
@@ -1218,8 +1046,9 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
// Module:
// '{' ModuleElement '}'
+ int pos = peek_position();
// Construct block expecting 16 statements.
- Block* body = factory()->NewBlock(NULL, 16, false);
+ Block* body = factory()->NewBlock(NULL, 16, false, RelocInfo::kNoPosition);
#ifdef DEBUG
if (FLAG_print_interface_details) PrintF("# Literal ");
#endif
@@ -1264,7 +1093,7 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
ASSERT(*ok);
interface->Freeze(ok);
ASSERT(*ok);
- return factory()->NewModuleLiteral(body, interface);
+ return factory()->NewModuleLiteral(body, interface, pos);
}
@@ -1273,6 +1102,7 @@ Module* Parser::ParseModulePath(bool* ok) {
// Identifier
// ModulePath '.' Identifier
+ int pos = peek_position();
Module* result = ParseModuleVariable(CHECK_OK);
while (Check(Token::PERIOD)) {
Handle<String> name = ParseIdentifierName(CHECK_OK);
@@ -1280,7 +1110,7 @@ Module* Parser::ParseModulePath(bool* ok) {
if (FLAG_print_interface_details)
PrintF("# Path .%s ", name->ToAsciiArray());
#endif
- Module* member = factory()->NewModulePath(result, name);
+ Module* member = factory()->NewModulePath(result, name, pos);
result->interface()->Add(name, member->interface(), zone(), ok);
if (!*ok) {
#ifdef DEBUG
@@ -1306,6 +1136,7 @@ Module* Parser::ParseModuleVariable(bool* ok) {
// ModulePath:
// Identifier
+ int pos = peek_position();
Handle<String> name = ParseIdentifier(CHECK_OK);
#ifdef DEBUG
if (FLAG_print_interface_details)
@@ -1315,7 +1146,7 @@ Module* Parser::ParseModuleVariable(bool* ok) {
factory(), name, Interface::NewModule(zone()),
scanner().location().beg_pos);
- return factory()->NewModuleVariable(proxy);
+ return factory()->NewModuleVariable(proxy, pos);
}
@@ -1323,8 +1154,9 @@ Module* Parser::ParseModuleUrl(bool* ok) {
// Module:
// String
+ int pos = peek_position();
Expect(Token::STRING, CHECK_OK);
- Handle<String> symbol = GetSymbol(CHECK_OK);
+ Handle<String> symbol = GetSymbol();
// TODO(ES6): Request JS resource from environment...
@@ -1332,12 +1164,15 @@ Module* Parser::ParseModuleUrl(bool* ok) {
if (FLAG_print_interface_details) PrintF("# Url ");
#endif
- Module* result = factory()->NewModuleUrl(symbol);
- Interface* interface = result->interface();
+ // Create an empty literal as long as the feature isn't finished.
+ USE(symbol);
+ Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
+ Block* body = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
+ body->set_scope(scope);
+ Interface* interface = scope->interface();
+ Module* result = factory()->NewModuleLiteral(body, interface, pos);
interface->Freeze(ok);
ASSERT(*ok);
- // Create dummy scope to avoid errors as long as the feature isn't finished.
- Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
interface->Unify(scope->interface(), zone(), ok);
ASSERT(*ok);
return result;
@@ -1363,6 +1198,7 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
//
// TODO(ES6): implement destructuring ImportSpecifiers
+ int pos = peek_position();
Expect(Token::IMPORT, CHECK_OK);
ZoneStringList names(1, zone());
@@ -1374,13 +1210,13 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
names.Add(name, zone());
}
- ExpectContextualKeyword("from", CHECK_OK);
+ ExpectContextualKeyword(CStrVector("from"), CHECK_OK);
Module* module = ParseModuleSpecifier(CHECK_OK);
ExpectSemicolon(CHECK_OK);
// Generate a separate declaration for each identifier.
// TODO(ES6): once we implement destructuring, make that one declaration.
- Block* block = factory()->NewBlock(NULL, 1, true);
+ Block* block = factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
for (int i = 0; i < names.length(); ++i) {
#ifdef DEBUG
if (FLAG_print_interface_details)
@@ -1401,7 +1237,7 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
}
VariableProxy* proxy = NewUnresolved(names[i], LET, interface);
Declaration* declaration =
- factory()->NewImportDeclaration(proxy, module, top_scope_);
+ factory()->NewImportDeclaration(proxy, module, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
}
@@ -1414,6 +1250,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
// 'export' Identifier (',' Identifier)* ';'
// 'export' VariableDeclaration
// 'export' FunctionDeclaration
+ // 'export' GeneratorDeclaration
// 'export' ModuleDeclaration
//
// TODO(ES6): implement structuring ExportSpecifiers
@@ -1424,9 +1261,10 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
ZoneStringList names(1, zone());
switch (peek()) {
case Token::IDENTIFIER: {
+ int pos = position();
Handle<String> name = ParseIdentifier(CHECK_OK);
// Handle 'module' as a context-sensitive keyword.
- if (!name->IsEqualTo(CStrVector("module"))) {
+ if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("module"))) {
names.Add(name, zone());
while (peek() == Token::COMMA) {
Consume(Token::COMMA);
@@ -1434,7 +1272,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
names.Add(name, zone());
}
ExpectSemicolon(CHECK_OK);
- result = factory()->NewEmptyStatement();
+ result = factory()->NewEmptyStatement(pos);
} else {
result = ParseModuleDeclaration(&names, CHECK_OK);
}
@@ -1473,7 +1311,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
// TODO(rossberg): Rethink whether we actually need to store export
// declarations (for compilation?).
// ExportDeclaration* declaration =
- // factory()->NewExportDeclaration(proxy, top_scope_);
+ // factory()->NewExportDeclaration(proxy, top_scope_, position);
// top_scope_->AddDeclaration(declaration);
}
@@ -1493,6 +1331,7 @@ Statement* Parser::ParseBlockElement(ZoneStringList* labels,
// BlockElement (aka SourceElement):
// LetDeclaration
// ConstDeclaration
+ // GeneratorDeclaration
switch (peek()) {
case Token::FUNCTION:
@@ -1530,10 +1369,6 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
// labels can be simply ignored in all other cases; except for
// trivial labeled break statements 'label: break label' which is
// parsed into an empty statement.
-
- // Keep the source position of the statement
- int statement_pos = scanner().peek_location().beg_pos;
- Statement* stmt = NULL;
switch (peek()) {
case Token::LBRACE:
return ParseBlock(labels, ok);
@@ -1541,52 +1376,41 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
case Token::CONST: // fall through
case Token::LET:
case Token::VAR:
- stmt = ParseVariableStatement(kStatement, NULL, ok);
- break;
+ return ParseVariableStatement(kStatement, NULL, ok);
case Token::SEMICOLON:
Next();
- return factory()->NewEmptyStatement();
+ return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
case Token::IF:
- stmt = ParseIfStatement(labels, ok);
- break;
+ return ParseIfStatement(labels, ok);
case Token::DO:
- stmt = ParseDoWhileStatement(labels, ok);
- break;
+ return ParseDoWhileStatement(labels, ok);
case Token::WHILE:
- stmt = ParseWhileStatement(labels, ok);
- break;
+ return ParseWhileStatement(labels, ok);
case Token::FOR:
- stmt = ParseForStatement(labels, ok);
- break;
+ return ParseForStatement(labels, ok);
case Token::CONTINUE:
- stmt = ParseContinueStatement(ok);
- break;
+ return ParseContinueStatement(ok);
case Token::BREAK:
- stmt = ParseBreakStatement(labels, ok);
- break;
+ return ParseBreakStatement(labels, ok);
case Token::RETURN:
- stmt = ParseReturnStatement(ok);
- break;
+ return ParseReturnStatement(ok);
case Token::WITH:
- stmt = ParseWithStatement(labels, ok);
- break;
+ return ParseWithStatement(labels, ok);
case Token::SWITCH:
- stmt = ParseSwitchStatement(labels, ok);
- break;
+ return ParseSwitchStatement(labels, ok);
case Token::THROW:
- stmt = ParseThrowStatement(ok);
- break;
+ return ParseThrowStatement(ok);
case Token::TRY: {
// NOTE: It is somewhat complicated to have labels on
@@ -1594,12 +1418,10 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
// one must take great care not to treat it as a
// fall-through. It is much easier just to wrap the entire
// try-statement in a statement block and put the labels there
- Block* result = factory()->NewBlock(labels, 1, false);
+ Block* result =
+ factory()->NewBlock(labels, 1, false, RelocInfo::kNoPosition);
Target target(&this->target_stack_, result);
TryStatement* statement = ParseTryStatement(CHECK_OK);
- if (statement) {
- statement->set_statement_pos(statement_pos);
- }
if (result) result->AddStatement(statement, zone());
return result;
}
@@ -1612,6 +1434,10 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
// FunctionDeclaration
// Common language extension is to allow function declaration in place
// of any statement. This language extension is disabled in strict mode.
+ //
+ // In Harmony mode, this case also handles the extension:
+ // Statement:
+ // GeneratorDeclaration
if (!top_scope_->is_classic_mode()) {
ReportMessageAt(scanner().peek_location(), "strict_function",
Vector<const char*>::empty());
@@ -1622,16 +1448,11 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
}
case Token::DEBUGGER:
- stmt = ParseDebuggerStatement(ok);
- break;
+ return ParseDebuggerStatement(ok);
default:
- stmt = ParseExpressionOrLabelledStatement(labels, ok);
+ return ParseExpressionOrLabelledStatement(labels, ok);
}
-
- // Store the source position of the statement
- if (stmt != NULL) stmt->set_statement_pos(statement_pos);
- return stmt;
}
@@ -1643,7 +1464,7 @@ VariableProxy* Parser::NewUnresolved(
// Let/const variables in harmony mode are always added to the immediately
// enclosing scope.
return DeclarationScope(mode)->NewUnresolved(
- factory(), name, interface, scanner().location().beg_pos);
+ factory(), name, interface, position());
}
@@ -1706,13 +1527,12 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
*ok = false;
return;
}
- const char* type =
- (var->mode() == VAR) ? "var" : var->is_const_mode() ? "const" : "let";
- Handle<String> type_string =
- isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
+ Handle<String> message_string =
+ isolate()->factory()->NewStringFromUtf8(CStrVector("Variable"),
+ TENURED);
Expression* expression =
- NewThrowTypeError(isolate()->factory()->redeclaration_symbol(),
- type_string, name);
+ NewThrowTypeError(isolate()->factory()->redeclaration_string(),
+ message_string, name);
declaration_scope->SetIllegalRedeclaration(expression);
}
}
@@ -1739,24 +1559,18 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// For global const variables we bind the proxy to a variable.
ASSERT(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
- var = new(zone()) Variable(declaration_scope,
- name,
- mode,
- true,
- kind,
- kNeedsInitialization);
+ var = new(zone()) Variable(
+ declaration_scope, name, mode, true, kind,
+ kNeedsInitialization, proxy->interface());
} else if (declaration_scope->is_eval_scope() &&
declaration_scope->is_classic_mode()) {
// For variable declarations in a non-strict eval scope the proxy is bound
// to a lookup variable to force a dynamic declaration using the
// DeclareContextSlot runtime function.
Variable::Kind kind = Variable::NORMAL;
- var = new(zone()) Variable(declaration_scope,
- name,
- mode,
- true,
- kind,
- declaration->initialization());
+ var = new(zone()) Variable(
+ declaration_scope, name, mode, true, kind,
+ declaration->initialization(), proxy->interface());
var->AllocateTo(Variable::LOOKUP, -1);
resolve = true;
}
@@ -1817,6 +1631,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// declaration is resolved by looking up the function through a
// callback provided by the extension.
Statement* Parser::ParseNativeDeclaration(bool* ok) {
+ int pos = peek_position();
Expect(Token::FUNCTION, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
@@ -1837,52 +1652,38 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// because of lazy compilation.
DeclarationScope(VAR)->ForceEagerCompilation();
- // Compute the function template for the native function.
- v8::Handle<v8::FunctionTemplate> fun_template =
- extension_->GetNativeFunction(v8::Utils::ToLocal(name));
- ASSERT(!fun_template.IsEmpty());
-
- // Instantiate the function and create a shared function info from it.
- Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
- const int literals = fun->NumberOfLiterals();
- Handle<Code> code = Handle<Code>(fun->shared()->code());
- Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
- Handle<SharedFunctionInfo> shared =
- isolate()->factory()->NewSharedFunctionInfo(name, literals, code,
- Handle<ScopeInfo>(fun->shared()->scope_info()));
- shared->set_construct_stub(*construct_stub);
-
- // Copy the function data to the shared function info.
- shared->set_function_data(fun->shared()->function_data());
- int parameters = fun->shared()->formal_parameter_count();
- shared->set_formal_parameter_count(parameters);
-
// TODO(1240846): It's weird that native function declarations are
// introduced dynamically when we meet their declarations, whereas
// other functions are set up when entering the surrounding scope.
VariableProxy* proxy = NewUnresolved(name, VAR, Interface::NewValue());
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, VAR, top_scope_);
+ factory()->NewVariableDeclaration(proxy, VAR, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
- SharedFunctionInfoLiteral* lit =
- factory()->NewSharedFunctionInfoLiteral(shared);
+ NativeFunctionLiteral* lit = factory()->NewNativeFunctionLiteral(
+ name, extension_, RelocInfo::kNoPosition);
return factory()->NewExpressionStatement(
factory()->NewAssignment(
- Token::INIT_VAR, proxy, lit, RelocInfo::kNoPosition));
+ Token::INIT_VAR, proxy, lit, RelocInfo::kNoPosition),
+ pos);
}
Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
// FunctionDeclaration ::
// 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
+ // GeneratorDeclaration ::
+ // 'function' '*' Identifier '(' FormalParameterListopt ')'
+ // '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
- int function_token_position = scanner().location().beg_pos;
+ int pos = position();
+ bool is_generator = allow_generators() && Check(Token::MUL);
bool is_strict_reserved = false;
Handle<String> name = ParseIdentifierOrStrictReservedWord(
&is_strict_reserved, CHECK_OK);
FunctionLiteral* fun = ParseFunctionLiteral(name,
is_strict_reserved,
- function_token_position,
+ is_generator,
+ pos,
FunctionLiteral::DECLARATION,
CHECK_OK);
// Even if we're not at the top-level of the global or a function
@@ -1894,10 +1695,10 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
is_extended_mode() && !top_scope_->is_global_scope() ? LET : VAR;
VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
Declaration* declaration =
- factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_);
+ factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
if (names) names->Add(name, zone());
- return factory()->NewEmptyStatement();
+ return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
}
@@ -1911,7 +1712,8 @@ Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
// (ECMA-262, 3rd, 12.2)
//
// Construct block expecting 16 statements.
- Block* result = factory()->NewBlock(labels, 16, false);
+ Block* result =
+ factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition);
Target target(&this->target_stack_, result);
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
@@ -1932,7 +1734,8 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
// '{' BlockElement* '}'
// Construct block expecting 16 statements.
- Block* body = factory()->NewBlock(labels, 16, false);
+ Block* body =
+ factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition);
Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
// Parse the statements and collect escaping labels.
@@ -1973,8 +1776,8 @@ Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
bool Parser::IsEvalOrArguments(Handle<String> string) {
- return string.is_identical_to(isolate()->factory()->eval_symbol()) ||
- string.is_identical_to(isolate()->factory()->arguments_symbol());
+ return string.is_identical_to(isolate()->factory()->eval_string()) ||
+ string.is_identical_to(isolate()->factory()->arguments_string());
}
@@ -2002,6 +1805,8 @@ Block* Parser::ParseVariableDeclarations(
// TODO(ES6):
// ConstBinding ::
// BindingPattern '=' AssignmentExpression
+
+ int pos = peek_position();
VariableMode mode = VAR;
// True if the binding needs initialization. 'let' and 'const' declared
// bindings are created uninitialized by their declaration nodes and
@@ -2087,7 +1892,7 @@ Block* Parser::ParseVariableDeclarations(
// is inside an initializer block, it is ignored.
//
// Create new block with one expected declaration.
- Block* block = factory()->NewBlock(NULL, 1, true);
+ Block* block = factory()->NewBlock(NULL, 1, true, pos);
int nvars = 0; // the number of variables declared
Handle<String> name;
do {
@@ -2124,7 +1929,7 @@ Block* Parser::ParseVariableDeclarations(
is_const ? Interface::NewConst() : Interface::NewValue();
VariableProxy* proxy = NewUnresolved(name, mode, interface);
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, mode, top_scope_);
+ factory()->NewVariableDeclaration(proxy, mode, top_scope_, pos);
Declare(declaration, mode != VAR, CHECK_OK);
nvars++;
if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
@@ -2164,11 +1969,11 @@ Block* Parser::ParseVariableDeclarations(
Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
Expression* value = NULL;
- int position = -1;
+ int pos = -1;
// Harmony consts have non-optional initializers.
if (peek() == Token::ASSIGN || mode == CONST_HARMONY) {
Expect(Token::ASSIGN, CHECK_OK);
- position = scanner().location().beg_pos;
+ pos = position();
value = ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
// Don't infer if it is "a = function(){...}();"-like expression.
if (fni_ != NULL &&
@@ -2183,12 +1988,12 @@ Block* Parser::ParseVariableDeclarations(
// Record the end position of the initializer.
if (proxy->var() != NULL) {
- proxy->var()->set_initializer_position(scanner().location().end_pos);
+ proxy->var()->set_initializer_position(position());
}
// Make sure that 'const x' and 'let x' initialize 'x' to undefined.
if (value == NULL && needs_init) {
- value = GetLiteralUndefined();
+ value = GetLiteralUndefined(position());
}
// Global variable declarations must be compiled in a specific
@@ -2216,7 +2021,7 @@ Block* Parser::ParseVariableDeclarations(
ZoneList<Expression*>* arguments =
new(zone()) ZoneList<Expression*>(3, zone());
// We have at least 1 parameter.
- arguments->Add(factory()->NewLiteral(name), zone());
+ arguments->Add(factory()->NewLiteral(name, pos), zone());
CallRuntime* initialize;
if (is_const) {
@@ -2228,14 +2033,14 @@ Block* Parser::ParseVariableDeclarations(
// Note that the function does different things depending on
// the number of arguments (1 or 2).
initialize = factory()->NewCallRuntime(
- isolate()->factory()->InitializeConstGlobal_symbol(),
+ isolate()->factory()->InitializeConstGlobal_string(),
Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
- arguments);
+ arguments, pos);
} else {
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
LanguageMode language_mode = initialization_scope->language_mode();
- arguments->Add(factory()->NewNumberLiteral(language_mode), zone());
+ arguments->Add(factory()->NewNumberLiteral(language_mode, pos), zone());
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
@@ -2251,13 +2056,14 @@ Block* Parser::ParseVariableDeclarations(
// Note that the function does different things depending on
// the number of arguments (2 or 3).
initialize = factory()->NewCallRuntime(
- isolate()->factory()->InitializeVarGlobal_symbol(),
+ isolate()->factory()->InitializeVarGlobal_string(),
Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
- arguments);
+ arguments, pos);
}
- block->AddStatement(factory()->NewExpressionStatement(initialize),
- zone());
+ block->AddStatement(
+ factory()->NewExpressionStatement(initialize, RelocInfo::kNoPosition),
+ zone());
} else if (needs_init) {
// Constant initializations always assign to the declared constant which
// is always at the function scope level. This is only relevant for
@@ -2270,9 +2076,10 @@ Block* Parser::ParseVariableDeclarations(
ASSERT(proxy->var() != NULL);
ASSERT(value != NULL);
Assignment* assignment =
- factory()->NewAssignment(init_op, proxy, value, position);
- block->AddStatement(factory()->NewExpressionStatement(assignment),
- zone());
+ factory()->NewAssignment(init_op, proxy, value, pos);
+ block->AddStatement(
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ zone());
value = NULL;
}
@@ -2286,9 +2093,10 @@ Block* Parser::ParseVariableDeclarations(
VariableProxy* proxy =
initialization_scope->NewUnresolved(factory(), name, interface);
Assignment* assignment =
- factory()->NewAssignment(init_op, proxy, value, position);
- block->AddStatement(factory()->NewExpressionStatement(assignment),
- zone());
+ factory()->NewAssignment(init_op, proxy, value, pos);
+ block->AddStatement(
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ zone());
}
if (fni_ != NULL) fni_->Leave();
@@ -2320,6 +2128,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// ExpressionStatement | LabelledStatement ::
// Expression ';'
// Identifier ':' Statement
+ int pos = peek_position();
bool starts_with_idenfifier = peek_any_identifier();
Expression* expr = ParseExpression(true, CHECK_OK);
if (peek() == Token::COLON && starts_with_idenfifier && expr != NULL &&
@@ -2363,7 +2172,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
expr != NULL &&
expr->AsVariableProxy() != NULL &&
expr->AsVariableProxy()->name()->Equals(
- isolate()->heap()->native_symbol()) &&
+ isolate()->heap()->native_string()) &&
!scanner().literal_contains_escapes()) {
return ParseNativeDeclaration(ok);
}
@@ -2375,11 +2184,11 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
scanner().HasAnyLineTerminatorBeforeNext() ||
expr->AsVariableProxy() == NULL ||
!expr->AsVariableProxy()->name()->Equals(
- isolate()->heap()->module_symbol()) ||
+ isolate()->heap()->module_string()) ||
scanner().literal_contains_escapes()) {
ExpectSemicolon(CHECK_OK);
}
- return factory()->NewExpressionStatement(expr);
+ return factory()->NewExpressionStatement(expr, pos);
}
@@ -2387,6 +2196,7 @@ IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) {
// IfStatement ::
// 'if' '(' Expression ')' Statement ('else' Statement)?
+ int pos = peek_position();
Expect(Token::IF, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
Expression* condition = ParseExpression(true, CHECK_OK);
@@ -2397,9 +2207,10 @@ IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) {
Next();
else_statement = ParseStatement(labels, CHECK_OK);
} else {
- else_statement = factory()->NewEmptyStatement();
+ else_statement = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
}
- return factory()->NewIfStatement(condition, then_statement, else_statement);
+ return factory()->NewIfStatement(
+ condition, then_statement, else_statement, pos);
}
@@ -2407,6 +2218,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
// ContinueStatement ::
// 'continue' Identifier? ';'
+ int pos = peek_position();
Expect(Token::CONTINUE, CHECK_OK);
Handle<String> label = Handle<String>::null();
Token::Value tok = peek();
@@ -2429,7 +2241,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
return NULL;
}
ExpectSemicolon(CHECK_OK);
- return factory()->NewContinueStatement(target);
+ return factory()->NewContinueStatement(target, pos);
}
@@ -2437,6 +2249,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
// BreakStatement ::
// 'break' Identifier? ';'
+ int pos = peek_position();
Expect(Token::BREAK, CHECK_OK);
Handle<String> label;
Token::Value tok = peek();
@@ -2448,7 +2261,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
// empty statements, e.g. 'l1: l2: l3: break l2;'
if (!label.is_null() && ContainsLabel(labels, label)) {
ExpectSemicolon(CHECK_OK);
- return factory()->NewEmptyStatement();
+ return factory()->NewEmptyStatement(pos);
}
BreakableStatement* target = NULL;
target = LookupBreakTarget(label, CHECK_OK);
@@ -2465,7 +2278,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
return NULL;
}
ExpectSemicolon(CHECK_OK);
- return factory()->NewBreakStatement(target);
+ return factory()->NewBreakStatement(target, pos);
}
@@ -2473,23 +2286,32 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// ReturnStatement ::
// 'return' Expression? ';'
- // Consume the return token. It is necessary to do the before
+ // Consume the return token. It is necessary to do that before
// reporting any errors on it, because of the way errors are
// reported (underlining).
Expect(Token::RETURN, CHECK_OK);
+ int pos = position();
Token::Value tok = peek();
Statement* result;
+ Expression* return_value;
if (scanner().HasAnyLineTerminatorBeforeNext() ||
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
- ExpectSemicolon(CHECK_OK);
- result = factory()->NewReturnStatement(GetLiteralUndefined());
+ return_value = GetLiteralUndefined(position());
} else {
- Expression* expr = ParseExpression(true, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- result = factory()->NewReturnStatement(expr);
+ return_value = ParseExpression(true, CHECK_OK);
+ }
+ ExpectSemicolon(CHECK_OK);
+ if (is_generator()) {
+ Expression* generator = factory()->NewVariableProxy(
+ current_function_state_->generator_object_variable());
+ Expression* yield = factory()->NewYield(
+ generator, return_value, Yield::FINAL, pos);
+ result = factory()->NewExpressionStatement(yield, pos);
+ } else {
+ result = factory()->NewReturnStatement(return_value, pos);
}
// An ECMAScript program is considered syntactically incorrect if it
@@ -2500,9 +2322,10 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Scope* declaration_scope = top_scope_->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_eval_scope()) {
- Handle<String> type = isolate()->factory()->illegal_return_symbol();
- Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
- return factory()->NewExpressionStatement(throw_error);
+ Handle<String> message = isolate()->factory()->illegal_return_string();
+ Expression* throw_error =
+ NewThrowSyntaxError(message, Handle<Object>::null());
+ return factory()->NewExpressionStatement(throw_error, pos);
}
return result;
}
@@ -2513,6 +2336,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
// 'with' '(' Expression ')' Statement
Expect(Token::WITH, CHECK_OK);
+ int pos = position();
if (!top_scope_->is_classic_mode()) {
ReportMessage("strict_mode_with", Vector<const char*>::empty());
@@ -2532,7 +2356,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
stmt = ParseStatement(labels, CHECK_OK);
with_scope->set_end_position(scanner().location().end_pos);
}
- return factory()->NewWithStatement(expr, stmt);
+ return factory()->NewWithStatement(with_scope, expr, stmt, pos);
}
@@ -2556,7 +2380,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
*default_seen_ptr = true;
}
Expect(Token::COLON, CHECK_OK);
- int pos = scanner().location().beg_pos;
+ int pos = position();
ZoneList<Statement*>* statements =
new(zone()) ZoneList<Statement*>(5, zone());
while (peek() != Token::CASE &&
@@ -2566,7 +2390,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
statements->Add(stat, zone());
}
- return new(zone()) CaseClause(isolate(), label, statements, pos);
+ return factory()->NewCaseClause(label, statements, pos);
}
@@ -2575,7 +2399,8 @@ SwitchStatement* Parser::ParseSwitchStatement(ZoneStringList* labels,
// SwitchStatement ::
// 'switch' '(' Expression ')' '{' CaseClause* '}'
- SwitchStatement* statement = factory()->NewSwitchStatement(labels);
+ SwitchStatement* statement =
+ factory()->NewSwitchStatement(labels, peek_position());
Target target(&this->target_stack_, statement);
Expect(Token::SWITCH, CHECK_OK);
@@ -2602,7 +2427,7 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
// 'throw' Expression ';'
Expect(Token::THROW, CHECK_OK);
- int pos = scanner().location().beg_pos;
+ int pos = position();
if (scanner().HasAnyLineTerminatorBeforeNext()) {
ReportMessage("newline_after_throw", Vector<const char*>::empty());
*ok = false;
@@ -2611,7 +2436,8 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
Expression* exception = ParseExpression(true, CHECK_OK);
ExpectSemicolon(CHECK_OK);
- return factory()->NewExpressionStatement(factory()->NewThrow(exception, pos));
+ return factory()->NewExpressionStatement(
+ factory()->NewThrow(exception, pos), pos);
}
@@ -2628,6 +2454,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
// 'finally' Block
Expect(Token::TRY, CHECK_OK);
+ int pos = position();
TargetCollector try_collector(zone());
Block* try_block;
@@ -2699,9 +2526,10 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
ASSERT(catch_scope != NULL && catch_variable != NULL);
int index = current_function_state_->NextHandlerIndex();
TryCatchStatement* statement = factory()->NewTryCatchStatement(
- index, try_block, catch_scope, catch_variable, catch_block);
+ index, try_block, catch_scope, catch_variable, catch_block,
+ RelocInfo::kNoPosition);
statement->set_escaping_targets(try_collector.targets());
- try_block = factory()->NewBlock(NULL, 1, false);
+ try_block = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
try_block->AddStatement(statement, zone());
catch_block = NULL; // Clear to indicate it's been handled.
}
@@ -2712,11 +2540,12 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
ASSERT(catch_scope != NULL && catch_variable != NULL);
int index = current_function_state_->NextHandlerIndex();
result = factory()->NewTryCatchStatement(
- index, try_block, catch_scope, catch_variable, catch_block);
+ index, try_block, catch_scope, catch_variable, catch_block, pos);
} else {
ASSERT(finally_block != NULL);
int index = current_function_state_->NextHandlerIndex();
- result = factory()->NewTryFinallyStatement(index, try_block, finally_block);
+ result = factory()->NewTryFinallyStatement(
+ index, try_block, finally_block, pos);
// Combine the jump targets of the try block and the possible catch block.
try_collector.targets()->AddAll(*catch_collector.targets(), zone());
}
@@ -2731,7 +2560,8 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
// DoStatement ::
// 'do' Statement 'while' '(' Expression ')' ';'
- DoWhileStatement* loop = factory()->NewDoWhileStatement(labels);
+ DoWhileStatement* loop =
+ factory()->NewDoWhileStatement(labels, peek_position());
Target target(&this->target_stack_, loop);
Expect(Token::DO, CHECK_OK);
@@ -2739,11 +2569,6 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- if (loop != NULL) {
- int position = scanner().location().beg_pos;
- loop->set_condition_position(position);
- }
-
Expression* cond = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
@@ -2762,7 +2587,7 @@ WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
// WhileStatement ::
// 'while' '(' Expression ')' Statement
- WhileStatement* loop = factory()->NewWhileStatement(labels);
+ WhileStatement* loop = factory()->NewWhileStatement(labels, peek_position());
Target target(&this->target_stack_, loop);
Expect(Token::WHILE, CHECK_OK);
@@ -2776,10 +2601,97 @@ WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
}
+bool Parser::CheckInOrOf(bool accept_OF,
+ ForEachStatement::VisitMode* visit_mode) {
+ if (Check(Token::IN)) {
+ *visit_mode = ForEachStatement::ENUMERATE;
+ return true;
+ } else if (allow_for_of() && accept_OF &&
+ CheckContextualKeyword(CStrVector("of"))) {
+ *visit_mode = ForEachStatement::ITERATE;
+ return true;
+ }
+ return false;
+}
+
+
+void Parser::InitializeForEachStatement(ForEachStatement* stmt,
+ Expression* each,
+ Expression* subject,
+ Statement* body) {
+ ForOfStatement* for_of = stmt->AsForOfStatement();
+
+ if (for_of != NULL) {
+ Factory* heap_factory = isolate()->factory();
+ Handle<String> iterator_str = heap_factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR(".iterator"));
+ Handle<String> result_str = heap_factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR(".result"));
+ Variable* iterator =
+ top_scope_->DeclarationScope()->NewTemporary(iterator_str);
+ Variable* result = top_scope_->DeclarationScope()->NewTemporary(result_str);
+
+ Expression* assign_iterator;
+ Expression* next_result;
+ Expression* result_done;
+ Expression* assign_each;
+
+ // var iterator = iterable;
+ {
+ Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
+ assign_iterator = factory()->NewAssignment(
+ Token::ASSIGN, iterator_proxy, subject, RelocInfo::kNoPosition);
+ }
+
+ // var result = iterator.next();
+ {
+ Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
+ Expression* next_literal = factory()->NewLiteral(
+ heap_factory->next_string(), RelocInfo::kNoPosition);
+ Expression* next_property = factory()->NewProperty(
+ iterator_proxy, next_literal, RelocInfo::kNoPosition);
+ ZoneList<Expression*>* next_arguments =
+ new(zone()) ZoneList<Expression*>(0, zone());
+ Expression* next_call = factory()->NewCall(
+ next_property, next_arguments, RelocInfo::kNoPosition);
+ Expression* result_proxy = factory()->NewVariableProxy(result);
+ next_result = factory()->NewAssignment(
+ Token::ASSIGN, result_proxy, next_call, RelocInfo::kNoPosition);
+ }
+
+ // result.done
+ {
+ Expression* done_literal = factory()->NewLiteral(
+ heap_factory->done_string(), RelocInfo::kNoPosition);
+ Expression* result_proxy = factory()->NewVariableProxy(result);
+ result_done = factory()->NewProperty(
+ result_proxy, done_literal, RelocInfo::kNoPosition);
+ }
+
+ // each = result.value
+ {
+ Expression* value_literal = factory()->NewLiteral(
+ heap_factory->value_string(), RelocInfo::kNoPosition);
+ Expression* result_proxy = factory()->NewVariableProxy(result);
+ Expression* result_value = factory()->NewProperty(
+ result_proxy, value_literal, RelocInfo::kNoPosition);
+ assign_each = factory()->NewAssignment(
+ Token::ASSIGN, each, result_value, RelocInfo::kNoPosition);
+ }
+
+ for_of->Initialize(each, subject, body,
+ assign_iterator, next_result, result_done, assign_each);
+ } else {
+ stmt->Initialize(each, subject, body);
+ }
+}
+
+
Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
+ int pos = peek_position();
Statement* init = NULL;
// Create an in-between scope for let-bound iteration variables.
@@ -2794,24 +2706,29 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
if (peek() == Token::VAR || peek() == Token::CONST) {
bool is_const = peek() == Token::CONST;
Handle<String> name;
+ VariableDeclarationProperties decl_props = kHasNoInitializers;
Block* variable_statement =
- ParseVariableDeclarations(kForStatement, NULL, NULL, &name, CHECK_OK);
+ ParseVariableDeclarations(kForStatement, &decl_props, NULL, &name,
+ CHECK_OK);
+ bool accept_OF = decl_props == kHasNoInitializers;
+ ForEachStatement::VisitMode mode;
- if (peek() == Token::IN && !name.is_null()) {
+ if (!name.is_null() && CheckInOrOf(accept_OF, &mode)) {
Interface* interface =
is_const ? Interface::NewConst() : Interface::NewValue();
- ForInStatement* loop = factory()->NewForInStatement(labels);
+ ForEachStatement* loop =
+ factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
- Expect(Token::IN, CHECK_OK);
Expression* enumerable = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
VariableProxy* each =
top_scope_->NewUnresolved(factory(), name, interface);
Statement* body = ParseStatement(NULL, CHECK_OK);
- loop->Initialize(each, enumerable, body);
- Block* result = factory()->NewBlock(NULL, 2, false);
+ InitializeForEachStatement(loop, each, enumerable, body);
+ Block* result =
+ factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
result->AddStatement(variable_statement, zone());
result->AddStatement(loop, zone());
top_scope_ = saved_scope;
@@ -2830,7 +2747,10 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
ParseVariableDeclarations(kForStatement, &decl_props, NULL, &name,
CHECK_OK);
bool accept_IN = !name.is_null() && decl_props != kHasInitializers;
- if (peek() == Token::IN && accept_IN) {
+ bool accept_OF = decl_props == kHasNoInitializers;
+ ForEachStatement::VisitMode mode;
+
+ if (accept_IN && CheckInOrOf(accept_OF, &mode)) {
// Rewrite a for-in statement of the form
//
// for (let x in e) b
@@ -2848,15 +2768,15 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// implementing stack allocated block scoped variables.
Factory* heap_factory = isolate()->factory();
Handle<String> tempstr =
- heap_factory->NewConsString(heap_factory->dot_for_symbol(), name);
- Handle<String> tempname = heap_factory->LookupSymbol(tempstr);
+ heap_factory->NewConsString(heap_factory->dot_for_string(), name);
+ Handle<String> tempname = heap_factory->InternalizeString(tempstr);
Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
- ForInStatement* loop = factory()->NewForInStatement(labels);
+ ForEachStatement* loop =
+ factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
// The expression does not see the loop variable.
- Expect(Token::IN, CHECK_OK);
top_scope_ = saved_scope;
Expression* enumerable = ParseExpression(true, CHECK_OK);
top_scope_ = for_scope;
@@ -2865,15 +2785,16 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
VariableProxy* each =
top_scope_->NewUnresolved(factory(), name, Interface::NewValue());
Statement* body = ParseStatement(NULL, CHECK_OK);
- Block* body_block = factory()->NewBlock(NULL, 3, false);
+ Block* body_block =
+ factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
Assignment* assignment = factory()->NewAssignment(
Token::ASSIGN, each, temp_proxy, RelocInfo::kNoPosition);
- Statement* assignment_statement =
- factory()->NewExpressionStatement(assignment);
+ Statement* assignment_statement = factory()->NewExpressionStatement(
+ assignment, RelocInfo::kNoPosition);
body_block->AddStatement(variable_statement, zone());
body_block->AddStatement(assignment_statement, zone());
body_block->AddStatement(body, zone());
- loop->Initialize(temp_proxy, enumerable, body_block);
+ InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
top_scope_ = saved_scope;
for_scope->set_end_position(scanner().location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
@@ -2886,25 +2807,28 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
}
} else {
Expression* expression = ParseExpression(false, CHECK_OK);
- if (peek() == Token::IN) {
+ ForEachStatement::VisitMode mode;
+ bool accept_OF = expression->AsVariableProxy();
+
+ if (CheckInOrOf(accept_OF, &mode)) {
// Signal a reference error if the expression is an invalid
// left-hand side expression. We could report this as a syntax
// error here but for compatibility with JSC we choose to report
// the error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
- isolate()->factory()->invalid_lhs_in_for_in_symbol();
- expression = NewThrowReferenceError(type);
+ Handle<String> message =
+ isolate()->factory()->invalid_lhs_in_for_in_string();
+ expression = NewThrowReferenceError(message);
}
- ForInStatement* loop = factory()->NewForInStatement(labels);
+ ForEachStatement* loop =
+ factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
- Expect(Token::IN, CHECK_OK);
Expression* enumerable = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
Statement* body = ParseStatement(NULL, CHECK_OK);
- if (loop) loop->Initialize(expression, enumerable, body);
+ InitializeForEachStatement(loop, expression, enumerable, body);
top_scope_ = saved_scope;
for_scope->set_end_position(scanner().location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
@@ -2913,13 +2837,14 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
return loop;
} else {
- init = factory()->NewExpressionStatement(expression);
+ init = factory()->NewExpressionStatement(
+ expression, RelocInfo::kNoPosition);
}
}
}
// Standard 'for' loop
- ForStatement* loop = factory()->NewForStatement(labels);
+ ForStatement* loop = factory()->NewForStatement(labels, pos);
Target target(&this->target_stack_, loop);
// Parsed initializer at this point.
@@ -2934,7 +2859,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* next = NULL;
if (peek() != Token::RPAREN) {
Expression* exp = ParseExpression(true, CHECK_OK);
- next = factory()->NewExpressionStatement(exp);
+ next = factory()->NewExpressionStatement(exp, RelocInfo::kNoPosition);
}
Expect(Token::RPAREN, CHECK_OK);
@@ -2954,14 +2879,14 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// for (; c; n) b
// }
ASSERT(init != NULL);
- Block* result = factory()->NewBlock(NULL, 2, false);
+ Block* result = factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
result->AddStatement(init, zone());
result->AddStatement(loop, zone());
result->set_scope(for_scope);
- if (loop) loop->Initialize(NULL, cond, next, body);
+ loop->Initialize(NULL, cond, next, body);
return result;
} else {
- if (loop) loop->Initialize(init, cond, next, body);
+ loop->Initialize(init, cond, next, body);
return loop;
}
}
@@ -2976,10 +2901,9 @@ Expression* Parser::ParseExpression(bool accept_IN, bool* ok) {
Expression* result = ParseAssignmentExpression(accept_IN, CHECK_OK);
while (peek() == Token::COMMA) {
Expect(Token::COMMA, CHECK_OK);
- int position = scanner().location().beg_pos;
+ int pos = position();
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- result =
- factory()->NewBinaryOperation(Token::COMMA, result, right, position);
+ result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
}
return result;
}
@@ -2989,8 +2913,13 @@ Expression* Parser::ParseExpression(bool accept_IN, bool* ok) {
Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
// AssignmentExpression ::
// ConditionalExpression
+ // YieldExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
+ if (peek() == Token::YIELD && is_generator()) {
+ return ParseYieldExpression(ok);
+ }
+
if (fni_ != NULL) fni_->Enter();
Expression* expression = ParseConditionalExpression(accept_IN, CHECK_OK);
@@ -3004,10 +2933,11 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
// side expression. We could report this as a syntax error here but
// for compatibility with JSC we choose to report the error at
// runtime.
+ // TODO(ES5): Should change parsing for spec conformance.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
- isolate()->factory()->invalid_lhs_in_assignment_symbol();
- expression = NewThrowReferenceError(type);
+ Handle<String> message =
+ isolate()->factory()->invalid_lhs_in_assignment_string();
+ expression = NewThrowReferenceError(message);
}
if (!top_scope_->is_classic_mode()) {
@@ -3017,7 +2947,7 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
MarkAsLValue(expression);
Token::Value op = Next(); // Get assignment operator.
- int pos = scanner().location().beg_pos;
+ int pos = position();
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
// TODO(1231235): We try to estimate the set of properties set by
@@ -3058,12 +2988,31 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
}
+Expression* Parser::ParseYieldExpression(bool* ok) {
+ // YieldExpression ::
+ // 'yield' '*'? AssignmentExpression
+ int pos = peek_position();
+ Expect(Token::YIELD, CHECK_OK);
+ Yield::Kind kind =
+ Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND;
+ Expression* generator_object = factory()->NewVariableProxy(
+ current_function_state_->generator_object_variable());
+ Expression* expression = ParseAssignmentExpression(false, CHECK_OK);
+ Yield* yield = factory()->NewYield(generator_object, expression, kind, pos);
+ if (kind == Yield::DELEGATING) {
+ yield->set_index(current_function_state_->NextHandlerIndex());
+ }
+ return yield;
+}
+
+
// Precedence = 3
Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
// ConditionalExpression ::
// LogicalOrExpression
// LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
+ int pos = peek_position();
// We start using the binary expression parser for prec >= 4 only!
Expression* expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
if (peek() != Token::CONDITIONAL) return expression;
@@ -3071,17 +3020,14 @@ Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
// In parsing the first assignment expression in conditional
// expressions we always accept the 'in' keyword; see ECMA-262,
// section 11.12, page 58.
- int left_position = scanner().peek_location().beg_pos;
Expression* left = ParseAssignmentExpression(true, CHECK_OK);
Expect(Token::COLON, CHECK_OK);
- int right_position = scanner().peek_location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- return factory()->NewConditional(
- expression, left, right, left_position, right_position);
+ return factory()->NewConditional(expression, left, right, pos);
}
-static int Precedence(Token::Value tok, bool accept_IN) {
+int ParserBase::Precedence(Token::Value tok, bool accept_IN) {
if (tok == Token::IN && !accept_IN)
return 0; // 0 precedence will terminate binary expression parsing
@@ -3097,58 +3043,58 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
// prec1 >= 4
while (Precedence(peek(), accept_IN) == prec1) {
Token::Value op = Next();
- int position = scanner().location().beg_pos;
+ int pos = position();
Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
// Compute some expressions involving only number literals.
- if (x && x->AsLiteral() && x->AsLiteral()->handle()->IsNumber() &&
- y && y->AsLiteral() && y->AsLiteral()->handle()->IsNumber()) {
- double x_val = x->AsLiteral()->handle()->Number();
- double y_val = y->AsLiteral()->handle()->Number();
+ if (x && x->AsLiteral() && x->AsLiteral()->value()->IsNumber() &&
+ y && y->AsLiteral() && y->AsLiteral()->value()->IsNumber()) {
+ double x_val = x->AsLiteral()->value()->Number();
+ double y_val = y->AsLiteral()->value()->Number();
switch (op) {
case Token::ADD:
- x = factory()->NewNumberLiteral(x_val + y_val);
+ x = factory()->NewNumberLiteral(x_val + y_val, pos);
continue;
case Token::SUB:
- x = factory()->NewNumberLiteral(x_val - y_val);
+ x = factory()->NewNumberLiteral(x_val - y_val, pos);
continue;
case Token::MUL:
- x = factory()->NewNumberLiteral(x_val * y_val);
+ x = factory()->NewNumberLiteral(x_val * y_val, pos);
continue;
case Token::DIV:
- x = factory()->NewNumberLiteral(x_val / y_val);
+ x = factory()->NewNumberLiteral(x_val / y_val, pos);
continue;
case Token::BIT_OR: {
int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::BIT_AND: {
int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::BIT_XOR: {
int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::SHL: {
int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::SHR: {
uint32_t shift = DoubleToInt32(y_val) & 0x1f;
uint32_t value = DoubleToUint32(x_val) >> shift;
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::SAR: {
uint32_t shift = DoubleToInt32(y_val) & 0x1f;
int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
default:
@@ -3167,15 +3113,15 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
default: break;
}
- x = factory()->NewCompareOperation(cmp, x, y, position);
+ x = factory()->NewCompareOperation(cmp, x, y, pos);
if (cmp != op) {
// The comparison was negated - add a NOT.
- x = factory()->NewUnaryOperation(Token::NOT, x, position);
+ x = factory()->NewUnaryOperation(Token::NOT, x, pos);
}
} else {
// We have a "normal" binary operation.
- x = factory()->NewBinaryOperation(op, x, y, position);
+ x = factory()->NewBinaryOperation(op, x, y, pos);
}
}
}
@@ -3199,16 +3145,16 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
Token::Value op = peek();
if (Token::IsUnaryOp(op)) {
op = Next();
- int position = scanner().location().beg_pos;
+ int pos = position();
Expression* expression = ParseUnaryExpression(CHECK_OK);
if (expression != NULL && (expression->AsLiteral() != NULL)) {
- Handle<Object> literal = expression->AsLiteral()->handle();
+ Handle<Object> literal = expression->AsLiteral()->value();
if (op == Token::NOT) {
// Convert the literal to a boolean condition and negate it.
- bool condition = literal->ToBoolean()->IsTrue();
- Handle<Object> result(isolate()->heap()->ToBoolean(!condition));
- return factory()->NewLiteral(result);
+ bool condition = literal->BooleanValue();
+ Handle<Object> result = isolate()->factory()->ToBoolean(!condition);
+ return factory()->NewLiteral(result, pos);
} else if (literal->IsNumber()) {
// Compute some expressions involving only number literals.
double value = literal->Number();
@@ -3216,9 +3162,9 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
case Token::ADD:
return expression;
case Token::SUB:
- return factory()->NewNumberLiteral(-value);
+ return factory()->NewNumberLiteral(-value, pos);
case Token::BIT_NOT:
- return factory()->NewNumberLiteral(~DoubleToInt32(value));
+ return factory()->NewNumberLiteral(~DoubleToInt32(value), pos);
default:
break;
}
@@ -3235,7 +3181,31 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
}
}
- return factory()->NewUnaryOperation(op, expression, position);
+ // Desugar '+foo' into 'foo*1', this enables the collection of type feedback
+ // without any special stub and the multiplication is removed later in
+ // Crankshaft's canonicalization pass.
+ if (op == Token::ADD) {
+ return factory()->NewBinaryOperation(Token::MUL,
+ expression,
+ factory()->NewNumberLiteral(1, pos),
+ pos);
+ }
+ // The same idea for '-foo' => 'foo*(-1)'.
+ if (op == Token::SUB) {
+ return factory()->NewBinaryOperation(Token::MUL,
+ expression,
+ factory()->NewNumberLiteral(-1, pos),
+ pos);
+ }
+ // ...and one more time for '~foo' => 'foo^(~0)'.
+ if (op == Token::BIT_NOT) {
+ return factory()->NewBinaryOperation(Token::BIT_XOR,
+ expression,
+ factory()->NewNumberLiteral(~0, pos),
+ pos);
+ }
+
+ return factory()->NewUnaryOperation(op, expression, pos);
} else if (Token::IsCountOp(op)) {
op = Next();
@@ -3245,9 +3215,9 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
// error here but for compatibility with JSC we choose to report the
// error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
- isolate()->factory()->invalid_lhs_in_prefix_op_symbol();
- expression = NewThrowReferenceError(type);
+ Handle<String> message =
+ isolate()->factory()->invalid_lhs_in_prefix_op_string();
+ expression = NewThrowReferenceError(message);
}
if (!top_scope_->is_classic_mode()) {
@@ -3256,11 +3226,10 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
}
MarkAsLValue(expression);
- int position = scanner().location().beg_pos;
return factory()->NewCountOperation(op,
true /* prefix */,
expression,
- position);
+ position());
} else {
return ParsePostfixExpression(ok);
@@ -3280,9 +3249,9 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
// error here but for compatibility with JSC we choose to report the
// error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type =
- isolate()->factory()->invalid_lhs_in_postfix_op_symbol();
- expression = NewThrowReferenceError(type);
+ Handle<String> message =
+ isolate()->factory()->invalid_lhs_in_postfix_op_string();
+ expression = NewThrowReferenceError(message);
}
if (!top_scope_->is_classic_mode()) {
@@ -3292,12 +3261,11 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
MarkAsLValue(expression);
Token::Value next = Next();
- int position = scanner().location().beg_pos;
expression =
factory()->NewCountOperation(next,
false /* postfix */,
expression,
- position);
+ position());
}
return expression;
}
@@ -3318,7 +3286,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
switch (peek()) {
case Token::LBRACK: {
Consume(Token::LBRACK);
- int pos = scanner().location().beg_pos;
+ int pos = position();
Expression* index = ParseExpression(true, CHECK_OK);
result = factory()->NewProperty(result, index, pos);
Expect(Token::RBRACK, CHECK_OK);
@@ -3330,14 +3298,14 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
if (scanner().current_token() == Token::IDENTIFIER) {
// For call of an identifier we want to report position of
// the identifier as position of the call in the stack trace.
- pos = scanner().location().beg_pos;
+ pos = position();
} else {
// For other kinds of calls we record position of the parenthesis as
// position of the call. Note that this is extremely important for
// expressions of the form function(){...}() for which call position
// should not point to the closing brace otherwise it will intersect
// with positions recorded for function literal and confuse debugger.
- pos = scanner().peek_location().beg_pos;
+ pos = peek_position();
// Also the trailing parenthesis are a hint that the function will
// be called immediately. If we happen to have parsed a preceding
// function literal eagerly, we can also compile it eagerly.
@@ -3356,19 +3324,20 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
// they are actually direct calls to eval is determined at run time.
VariableProxy* callee = result->AsVariableProxy();
if (callee != NULL &&
- callee->IsVariable(isolate()->factory()->eval_symbol())) {
+ callee->IsVariable(isolate()->factory()->eval_string())) {
top_scope_->DeclarationScope()->RecordEvalCall();
}
result = factory()->NewCall(result, args, pos);
+ if (fni_ != NULL) fni_->RemoveLastFunction();
break;
}
case Token::PERIOD: {
Consume(Token::PERIOD);
- int pos = scanner().location().beg_pos;
+ int pos = position();
Handle<String> name = ParseIdentifierName(CHECK_OK);
- result =
- factory()->NewProperty(result, factory()->NewLiteral(name), pos);
+ result = factory()->NewProperty(
+ result, factory()->NewLiteral(name, pos), pos);
if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
@@ -3393,7 +3362,7 @@ Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) {
// member expression parser, which is only allowed to match argument
// lists as long as it has 'new' prefixes left
Expect(Token::NEW, CHECK_OK);
- PositionStack::Element pos(stack, scanner().location().beg_pos);
+ PositionStack::Element pos(stack, position());
Expression* result;
if (peek() == Token::NEW) {
@@ -3432,20 +3401,22 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
Expression* result = NULL;
if (peek() == Token::FUNCTION) {
Expect(Token::FUNCTION, CHECK_OK);
- int function_token_position = scanner().location().beg_pos;
+ int function_token_position = position();
+ bool is_generator = allow_generators() && Check(Token::MUL);
Handle<String> name;
bool is_strict_reserved_name = false;
if (peek_any_identifier()) {
name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
CHECK_OK);
}
- FunctionLiteral::Type type = name.is_null()
+ FunctionLiteral::FunctionType function_type = name.is_null()
? FunctionLiteral::ANONYMOUS_EXPRESSION
: FunctionLiteral::NAMED_EXPRESSION;
result = ParseFunctionLiteral(name,
is_strict_reserved_name,
+ is_generator,
function_token_position,
- type,
+ function_type,
CHECK_OK);
} else {
result = ParsePrimaryExpression(CHECK_OK);
@@ -3455,7 +3426,7 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
switch (peek()) {
case Token::LBRACK: {
Consume(Token::LBRACK);
- int pos = scanner().location().beg_pos;
+ int pos = position();
Expression* index = ParseExpression(true, CHECK_OK);
result = factory()->NewProperty(result, index, pos);
if (fni_ != NULL) {
@@ -3463,7 +3434,7 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
fni_->PushLiteralName(index->AsLiteral()->AsPropertyName());
} else {
fni_->PushLiteralName(
- isolate()->factory()->anonymous_function_symbol());
+ isolate()->factory()->anonymous_function_string());
}
}
Expect(Token::RBRACK, CHECK_OK);
@@ -3471,10 +3442,10 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
}
case Token::PERIOD: {
Consume(Token::PERIOD);
- int pos = scanner().location().beg_pos;
+ int pos = position();
Handle<String> name = ParseIdentifierName(CHECK_OK);
- result =
- factory()->NewProperty(result, factory()->NewLiteral(name), pos);
+ result = factory()->NewProperty(
+ result, factory()->NewLiteral(name, pos), pos);
if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
@@ -3482,8 +3453,8 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
if ((stack == NULL) || stack->is_empty()) return result;
// Consume one of the new prefixes (already parsed).
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
- int last = stack->pop();
- result = factory()->NewCallNew(result, args, last);
+ int pos = stack->pop();
+ result = factory()->NewCallNew(result, args, pos);
break;
}
default:
@@ -3500,9 +3471,10 @@ DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
// DebuggerStatement ::
// 'debugger' ';'
+ int pos = peek_position();
Expect(Token::DEBUGGER, CHECK_OK);
ExpectSemicolon(CHECK_OK);
- return factory()->NewDebuggerStatement();
+ return factory()->NewDebuggerStatement(pos);
}
@@ -3510,7 +3482,7 @@ void Parser::ReportUnexpectedToken(Token::Value token) {
// We don't report stack overflows here, to avoid increasing the
// stack depth even further. Instead we report it after parsing is
// over, in ParseProgram/ParseJson.
- if (token == Token::ILLEGAL && stack_overflow_) return;
+ if (token == Token::ILLEGAL && stack_overflow()) return;
// Four of the tokens are treated specially
switch (token) {
case Token::EOS:
@@ -3527,6 +3499,7 @@ void Parser::ReportUnexpectedToken(Token::Value token) {
case Token::FUTURE_RESERVED_WORD:
return ReportMessage("unexpected_reserved",
Vector<const char*>::empty());
+ case Token::YIELD:
case Token::FUTURE_STRICT_RESERVED_WORD:
return ReportMessage(top_scope_->is_classic_mode() ?
"unexpected_token_identifier" :
@@ -3563,6 +3536,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
// RegExpLiteral
// '(' Expression ')'
+ int pos = peek_position();
Expression* result = NULL;
switch (peek()) {
case Token::THIS: {
@@ -3573,20 +3547,21 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
case Token::NULL_LITERAL:
Consume(Token::NULL_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->null_value());
+ result = factory()->NewLiteral(isolate()->factory()->null_value(), pos);
break;
case Token::TRUE_LITERAL:
Consume(Token::TRUE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->true_value());
+ result = factory()->NewLiteral(isolate()->factory()->true_value(), pos);
break;
case Token::FALSE_LITERAL:
Consume(Token::FALSE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->false_value());
+ result = factory()->NewLiteral(isolate()->factory()->false_value(), pos);
break;
case Token::IDENTIFIER:
+ case Token::YIELD:
case Token::FUTURE_STRICT_RESERVED_WORD: {
Handle<String> name = ParseIdentifier(CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name);
@@ -3596,8 +3571,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
PrintF("# Variable %s ", name->ToAsciiArray());
#endif
Interface* interface = Interface::NewUnknown(zone());
- result = top_scope_->NewUnresolved(
- factory(), name, interface, scanner().location().beg_pos);
+ result = top_scope_->NewUnresolved(factory(), name, interface, pos);
break;
}
@@ -3606,15 +3580,16 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
ASSERT(scanner().is_literal_ascii());
double value = StringToDouble(isolate()->unicode_cache(),
scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTALS);
- result = factory()->NewNumberLiteral(value);
+ ALLOW_HEX | ALLOW_OCTAL |
+ ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
+ result = factory()->NewNumberLiteral(value, pos);
break;
}
case Token::STRING: {
Consume(Token::STRING);
- Handle<String> symbol = GetSymbol(CHECK_OK);
- result = factory()->NewLiteral(symbol);
+ Handle<String> symbol = GetSymbol();
+ result = factory()->NewLiteral(symbol, pos);
if (fni_ != NULL) fni_->PushLiteralName(symbol);
break;
}
@@ -3645,7 +3620,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
break;
case Token::MOD:
- if (allow_natives_syntax_ || extension_ != NULL) {
+ if (allow_natives_syntax() || extension_ != NULL) {
result = ParseV8Intrinsic(CHECK_OK);
break;
}
@@ -3664,43 +3639,17 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
}
-void Parser::BuildArrayLiteralBoilerplateLiterals(ZoneList<Expression*>* values,
- Handle<FixedArray> literals,
- bool* is_simple,
- int* depth) {
- // Fill in the literals.
- // Accumulate output values in local variables.
- bool is_simple_acc = true;
- int depth_acc = 1;
- for (int i = 0; i < values->length(); i++) {
- MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() >= depth_acc) {
- depth_acc = m_literal->depth() + 1;
- }
- Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
- if (boilerplate_value->IsUndefined()) {
- literals->set_the_hole(i);
- is_simple_acc = false;
- } else {
- literals->set(i, *boilerplate_value);
- }
- }
-
- *is_simple = is_simple_acc;
- *depth = depth_acc;
-}
-
-
Expression* Parser::ParseArrayLiteral(bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
+ int pos = peek_position();
ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4, zone());
Expect(Token::LBRACK, CHECK_OK);
while (peek() != Token::RBRACK) {
Expression* elem;
if (peek() == Token::COMMA) {
- elem = GetLiteralTheHole();
+ elem = GetLiteralTheHole(peek_position());
} else {
elem = ParseAssignmentExpression(true, CHECK_OK);
}
@@ -3715,17 +3664,16 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
int literal_index = current_function_state_->NextMaterializedLiteralIndex();
// Allocate a fixed array to hold all the object literals.
- Handle<FixedArray> object_literals =
- isolate()->factory()->NewFixedArray(values->length(), TENURED);
- Handle<FixedDoubleArray> double_literals;
- ElementsKind elements_kind = FAST_SMI_ELEMENTS;
- bool has_only_undefined_values = true;
- bool has_hole_values = false;
+ Handle<JSArray> array =
+ isolate()->factory()->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS);
+ isolate()->factory()->SetElementsCapacityAndLength(
+ array, values->length(), values->length());
// Fill in the literals.
Heap* heap = isolate()->heap();
bool is_simple = true;
int depth = 1;
+ bool is_holey = false;
for (int i = 0, n = values->length(); i < n; i++) {
MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral();
if (m_literal != NULL && m_literal->depth() + 1 > depth) {
@@ -3733,95 +3681,37 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
}
Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
if (boilerplate_value->IsTheHole()) {
- has_hole_values = true;
- object_literals->set_the_hole(i);
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
- double_literals->set_the_hole(i);
- }
- } else if (boilerplate_value->IsUndefined()) {
+ is_holey = true;
+ } else if (boilerplate_value->IsUninitialized()) {
is_simple = false;
- object_literals->set(i, Smi::FromInt(0));
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
- double_literals->set(i, 0);
- }
+ JSObject::SetOwnElement(
+ array, i, handle(Smi::FromInt(0), isolate()), kNonStrictMode);
} else {
- // Examine each literal element, and adjust the ElementsKind if the
- // literal element is not of a type that can be stored in the current
- // ElementsKind. Start with FAST_SMI_ONLY_ELEMENTS, and transition to
- // FAST_DOUBLE_ELEMENTS and FAST_ELEMENTS as necessary. Always remember
- // the tagged value, no matter what the ElementsKind is in case we
- // ultimately end up in FAST_ELEMENTS.
- has_only_undefined_values = false;
- object_literals->set(i, *boilerplate_value);
- if (elements_kind == FAST_SMI_ELEMENTS) {
- // Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
- // FAST_ELEMENTS is required.
- if (!boilerplate_value->IsSmi()) {
- if (boilerplate_value->IsNumber() && FLAG_smi_only_arrays) {
- // Allocate a double array on the FAST_DOUBLE_ELEMENTS transition to
- // avoid over-allocating in TENURED space.
- double_literals = isolate()->factory()->NewFixedDoubleArray(
- values->length(), TENURED);
- // Copy the contents of the FAST_SMI_ONLY_ELEMENT array to the
- // FAST_DOUBLE_ELEMENTS array so that they are in sync.
- for (int j = 0; j < i; ++j) {
- Object* smi_value = object_literals->get(j);
- if (smi_value->IsTheHole()) {
- double_literals->set_the_hole(j);
- } else {
- double_literals->set(j, Smi::cast(smi_value)->value());
- }
- }
- double_literals->set(i, boilerplate_value->Number());
- elements_kind = FAST_DOUBLE_ELEMENTS;
- } else {
- elements_kind = FAST_ELEMENTS;
- }
- }
- } else if (elements_kind == FAST_DOUBLE_ELEMENTS) {
- // Continue to store double values in to FAST_DOUBLE_ELEMENTS arrays
- // until the first value is seen that can't be stored as a double.
- if (boilerplate_value->IsNumber()) {
- double_literals->set(i, boilerplate_value->Number());
- } else {
- elements_kind = FAST_ELEMENTS;
- }
- }
+ JSObject::SetOwnElement(array, i, boilerplate_value, kNonStrictMode);
}
}
- // Very small array literals that don't have a concrete hint about their type
- // from a constant value should default to the slow case to avoid lots of
- // elements transitions on really small objects.
- if (has_only_undefined_values && values->length() <= 2) {
- elements_kind = FAST_ELEMENTS;
- }
+ Handle<FixedArrayBase> element_values(array->elements());
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
if (is_simple && depth == 1 && values->length() > 0 &&
- elements_kind != FAST_DOUBLE_ELEMENTS) {
- object_literals->set_map(heap->fixed_cow_array_map());
+ array->HasFastSmiOrObjectElements()) {
+ element_values->set_map(heap->fixed_cow_array_map());
}
- Handle<FixedArrayBase> element_values = elements_kind == FAST_DOUBLE_ELEMENTS
- ? Handle<FixedArrayBase>(double_literals)
- : Handle<FixedArrayBase>(object_literals);
-
// Remember both the literal's constant values as well as the ElementsKind
// in a 2-element FixedArray.
- Handle<FixedArray> literals =
- isolate()->factory()->NewFixedArray(2, TENURED);
+ Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(2, TENURED);
- if (has_hole_values || !FLAG_packed_arrays) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- }
+ ElementsKind kind = array->GetElementsKind();
+ kind = is_holey ? GetHoleyElementsKind(kind) : GetPackedElementsKind(kind);
- literals->set(0, Smi::FromInt(elements_kind));
+ literals->set(0, Smi::FromInt(kind));
literals->set(1, *element_values);
return factory()->NewArrayLiteral(
- literals, values, literal_index, is_simple, depth);
+ literals, values, literal_index, is_simple, depth, pos);
}
@@ -3838,43 +3728,34 @@ bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
}
-bool CompileTimeValue::ArrayLiteralElementNeedsInitialization(
- Expression* value) {
- // If value is a literal the property value is already set in the
- // boilerplate object.
- if (value->AsLiteral() != NULL) return false;
- // If value is a materialized literal the property value is already set
- // in the boilerplate object if it is simple.
- if (CompileTimeValue::IsCompileTimeValue(value)) return false;
- return true;
-}
-
-
-Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
+Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
+ Expression* expression) {
+ Factory* factory = isolate->factory();
ASSERT(IsCompileTimeValue(expression));
- Handle<FixedArray> result = FACTORY->NewFixedArray(2, TENURED);
+ Handle<FixedArray> result = factory->NewFixedArray(2, TENURED);
ObjectLiteral* object_literal = expression->AsObjectLiteral();
if (object_literal != NULL) {
ASSERT(object_literal->is_simple());
if (object_literal->fast_elements()) {
- result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS));
+ result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS));
} else {
- result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL_SLOW_ELEMENTS));
+ result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_SLOW_ELEMENTS));
}
result->set(kElementsSlot, *object_literal->constant_properties());
} else {
ArrayLiteral* array_literal = expression->AsArrayLiteral();
ASSERT(array_literal != NULL && array_literal->is_simple());
- result->set(kTypeSlot, Smi::FromInt(ARRAY_LITERAL));
+ result->set(kLiteralTypeSlot, Smi::FromInt(ARRAY_LITERAL));
result->set(kElementsSlot, *array_literal->constant_elements());
}
return result;
}
-CompileTimeValue::Type CompileTimeValue::GetType(Handle<FixedArray> value) {
- Smi* type_value = Smi::cast(value->get(kTypeSlot));
- return static_cast<Type>(type_value->value());
+CompileTimeValue::LiteralType CompileTimeValue::GetLiteralType(
+ Handle<FixedArray> value) {
+ Smi* literal_type = Smi::cast(value->get(kLiteralTypeSlot));
+ return static_cast<LiteralType>(literal_type->value());
}
@@ -3885,89 +3766,12 @@ Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
if (expression->AsLiteral() != NULL) {
- return expression->AsLiteral()->handle();
+ return expression->AsLiteral()->value();
}
if (CompileTimeValue::IsCompileTimeValue(expression)) {
- return CompileTimeValue::GetValue(expression);
+ return CompileTimeValue::GetValue(isolate(), expression);
}
- return isolate()->factory()->undefined_value();
-}
-
-// Validation per 11.1.5 Object Initialiser
-class ObjectLiteralPropertyChecker {
- public:
- ObjectLiteralPropertyChecker(Parser* parser, LanguageMode language_mode) :
- props_(Literal::Match),
- parser_(parser),
- language_mode_(language_mode) {
- }
-
- void CheckProperty(
- ObjectLiteral::Property* property,
- Scanner::Location loc,
- bool* ok);
-
- private:
- enum PropertyKind {
- kGetAccessor = 0x01,
- kSetAccessor = 0x02,
- kAccessor = kGetAccessor | kSetAccessor,
- kData = 0x04
- };
-
- static intptr_t GetPropertyKind(ObjectLiteral::Property* property) {
- switch (property->kind()) {
- case ObjectLiteral::Property::GETTER:
- return kGetAccessor;
- case ObjectLiteral::Property::SETTER:
- return kSetAccessor;
- default:
- return kData;
- }
- }
-
- HashMap props_;
- Parser* parser_;
- LanguageMode language_mode_;
-};
-
-
-void ObjectLiteralPropertyChecker::CheckProperty(
- ObjectLiteral::Property* property,
- Scanner::Location loc,
- bool* ok) {
- ASSERT(property != NULL);
- Literal* literal = property->key();
- HashMap::Entry* entry = props_.Lookup(literal, literal->Hash(), true);
- intptr_t prev = reinterpret_cast<intptr_t> (entry->value);
- intptr_t curr = GetPropertyKind(property);
-
- // Duplicate data properties are illegal in strict or extended mode.
- if (language_mode_ != CLASSIC_MODE && (curr & prev & kData) != 0) {
- parser_->ReportMessageAt(loc, "strict_duplicate_property",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
- // Data property conflicting with an accessor.
- if (((curr & kData) && (prev & kAccessor)) ||
- ((prev & kData) && (curr & kAccessor))) {
- parser_->ReportMessageAt(loc, "accessor_data_property",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
- // Two accessors of the same type conflicting
- if ((curr & prev & kAccessor) != 0) {
- parser_->ReportMessageAt(loc, "accessor_get_set",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
-
- // Update map
- entry->value = reinterpret_cast<void*> (prev | curr);
- *ok = true;
+ return isolate()->factory()->uninitialized_value();
}
@@ -3976,7 +3780,8 @@ void Parser::BuildObjectLiteralConstantProperties(
Handle<FixedArray> constant_properties,
bool* is_simple,
bool* fast_elements,
- int* depth) {
+ int* depth,
+ bool* may_store_doubles) {
int position = 0;
// Accumulate the value in local variables and store it at the end.
bool is_simple_acc = true;
@@ -3997,9 +3802,20 @@ void Parser::BuildObjectLiteralConstantProperties(
// Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
// value for COMPUTED properties, the real value is filled in at
// runtime. The enumeration order is maintained.
- Handle<Object> key = property->key()->handle();
+ Handle<Object> key = property->key()->value();
Handle<Object> value = GetBoilerplateValue(property->value());
- is_simple_acc = is_simple_acc && !value->IsUndefined();
+
+ // Ensure objects that may, at any point in time, contain fields with double
+ // representation are always treated as nested objects. This is true for
+ // computed fields (value is undefined), and smi and double literals
+ // (value->IsNumber()).
+ // TODO(verwaest): Remove once we can store them inline.
+ if (FLAG_track_double_fields &&
+ (value->IsNumber() || value->IsUninitialized())) {
+ *may_store_doubles = true;
+ }
+
+ is_simple_acc = is_simple_acc && !value->IsUninitialized();
// Keep track of the number of elements in the object literal and
// the largest element index. If the largest element index is
@@ -4031,40 +3847,6 @@ void Parser::BuildObjectLiteralConstantProperties(
}
-ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
- bool* ok) {
- // Special handling of getter and setter syntax:
- // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
- // We have already read the "get" or "set" keyword.
- Token::Value next = Next();
- bool is_keyword = Token::IsKeyword(next);
- if (next == Token::IDENTIFIER || next == Token::NUMBER ||
- next == Token::FUTURE_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD ||
- next == Token::STRING || is_keyword) {
- Handle<String> name;
- if (is_keyword) {
- name = isolate_->factory()->LookupAsciiSymbol(Token::String(next));
- } else {
- name = GetSymbol(CHECK_OK);
- }
- FunctionLiteral* value =
- ParseFunctionLiteral(name,
- false, // reserved words are allowed here
- RelocInfo::kNoPosition,
- FunctionLiteral::ANONYMOUS_EXPRESSION,
- CHECK_OK);
- // Allow any number of parameters for compatibilty with JSC.
- // Specification only allows zero parameters for get and one for set.
- return factory()->NewObjectLiteralProperty(is_getter, value);
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return NULL;
- }
-}
-
-
Expression* Parser::ParseObjectLiteral(bool* ok) {
// ObjectLiteral ::
// '{' (
@@ -4072,12 +3854,13 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
// | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
// )*[','] '}'
+ int pos = peek_position();
ZoneList<ObjectLiteral::Property*>* properties =
new(zone()) ZoneList<ObjectLiteral::Property*>(4, zone());
int number_of_boilerplate_properties = 0;
bool has_function = false;
- ObjectLiteralPropertyChecker checker(this, top_scope_->language_mode());
+ ObjectLiteralChecker checker(this, top_scope_->language_mode());
Expect(Token::LBRACE, CHECK_OK);
@@ -4086,9 +3869,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Literal* key = NULL;
Token::Value next = peek();
-
- // Location of the property name token
- Scanner::Location loc = scanner().peek_location();
+ int next_pos = peek_position();
switch (next) {
case Token::FUTURE_RESERVED_WORD:
@@ -4101,39 +3882,66 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
if (fni_ != NULL) fni_->PushLiteralName(id);
if ((is_getter || is_setter) && peek() != Token::COLON) {
- // Update loc to point to the identifier
- loc = scanner().peek_location();
- ObjectLiteral::Property* property =
- ParseObjectLiteralGetSet(is_getter, CHECK_OK);
- if (IsBoilerplateProperty(property)) {
- number_of_boilerplate_properties++;
- }
- // Validate the property.
- checker.CheckProperty(property, loc, CHECK_OK);
- properties->Add(property, zone());
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
-
- if (fni_ != NULL) {
- fni_->Infer();
- fni_->Leave();
- }
- continue; // restart the while
+ // Special handling of getter and setter syntax:
+ // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
+ // We have already read the "get" or "set" keyword.
+ Token::Value next = Next();
+ bool is_keyword = Token::IsKeyword(next);
+ if (next != i::Token::IDENTIFIER &&
+ next != i::Token::FUTURE_RESERVED_WORD &&
+ next != i::Token::FUTURE_STRICT_RESERVED_WORD &&
+ next != i::Token::NUMBER &&
+ next != i::Token::STRING &&
+ !is_keyword) {
+ // Unexpected token.
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return NULL;
+ }
+ // Validate the property.
+ PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
+ checker.CheckProperty(next, type, CHECK_OK);
+ Handle<String> name = is_keyword
+ ? isolate_->factory()->InternalizeUtf8String(Token::String(next))
+ : GetSymbol();
+ FunctionLiteral* value =
+ ParseFunctionLiteral(name,
+ false, // reserved words are allowed here
+ false, // not a generator
+ RelocInfo::kNoPosition,
+ FunctionLiteral::ANONYMOUS_EXPRESSION,
+ CHECK_OK);
+ // Allow any number of parameters for compatibilty with JSC.
+ // Specification only allows zero parameters for get and one for set.
+ ObjectLiteral::Property* property =
+ factory()->NewObjectLiteralProperty(is_getter, value, next_pos);
+ if (IsBoilerplateProperty(property)) {
+ number_of_boilerplate_properties++;
+ }
+ properties->Add(property, zone());
+ if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
+
+ if (fni_ != NULL) {
+ fni_->Infer();
+ fni_->Leave();
+ }
+ continue; // restart the while
}
// Failed to parse as get/set property, so it's just a property
// called "get" or "set".
- key = factory()->NewLiteral(id);
+ key = factory()->NewLiteral(id, next_pos);
break;
}
case Token::STRING: {
Consume(Token::STRING);
- Handle<String> string = GetSymbol(CHECK_OK);
+ Handle<String> string = GetSymbol();
if (fni_ != NULL) fni_->PushLiteralName(string);
uint32_t index;
if (!string.is_null() && string->AsArrayIndex(&index)) {
- key = factory()->NewNumberLiteral(index);
+ key = factory()->NewNumberLiteral(index, next_pos);
break;
}
- key = factory()->NewLiteral(string);
+ key = factory()->NewLiteral(string, next_pos);
break;
}
case Token::NUMBER: {
@@ -4141,15 +3949,16 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
ASSERT(scanner().is_literal_ascii());
double value = StringToDouble(isolate()->unicode_cache(),
scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTALS);
- key = factory()->NewNumberLiteral(value);
+ ALLOW_HEX | ALLOW_OCTAL |
+ ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
+ key = factory()->NewNumberLiteral(value, next_pos);
break;
}
default:
if (Token::IsKeyword(next)) {
Consume(next);
- Handle<String> string = GetSymbol(CHECK_OK);
- key = factory()->NewLiteral(string);
+ Handle<String> string = GetSymbol();
+ key = factory()->NewLiteral(string, next_pos);
} else {
// Unexpected token.
Token::Value next = Next();
@@ -4159,6 +3968,9 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
}
}
+ // Validate the property
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
+
Expect(Token::COLON, CHECK_OK);
Expression* value = ParseAssignmentExpression(true, CHECK_OK);
@@ -4176,8 +3988,6 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
// Count CONSTANT or COMPUTED properties to maintain the enumeration order.
if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++;
- // Validate the property
- checker.CheckProperty(property, loc, CHECK_OK);
properties->Add(property, zone());
// TODO(1240767): Consider allowing trailing comma.
@@ -4199,22 +4009,27 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
bool is_simple = true;
bool fast_elements = true;
int depth = 1;
+ bool may_store_doubles = false;
BuildObjectLiteralConstantProperties(properties,
constant_properties,
&is_simple,
&fast_elements,
- &depth);
+ &depth,
+ &may_store_doubles);
return factory()->NewObjectLiteral(constant_properties,
properties,
literal_index,
is_simple,
fast_elements,
depth,
- has_function);
+ may_store_doubles,
+ has_function,
+ pos);
}
Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
+ int pos = peek_position();
if (!scanner().ScanRegExpPattern(seen_equal)) {
Next();
ReportMessage("unterminated_regexp", Vector<const char*>::empty());
@@ -4229,7 +4044,7 @@ Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
Handle<String> js_flags = NextLiteralString(TENURED);
Next();
- return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index);
+ return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
}
@@ -4350,14 +4165,19 @@ class SingletonLogger : public ParserRecorder {
};
-FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
- bool name_is_strict_reserved,
- int function_token_position,
- FunctionLiteral::Type type,
- bool* ok) {
+FunctionLiteral* Parser::ParseFunctionLiteral(
+ Handle<String> function_name,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_pos,
+ FunctionLiteral::FunctionType function_type,
+ bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
+ int pos = function_token_pos == RelocInfo::kNoPosition
+ ? peek_position() : function_token_pos;
+
// Anonymous functions were passed either the empty symbol or a null
// handle as the function name. Remember if we were passed a non-empty
// handle to decide whether to invoke function name inference.
@@ -4365,32 +4185,79 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
// We want a non-null handle as the function name.
if (should_infer_name) {
- function_name = isolate()->factory()->empty_symbol();
+ function_name = isolate()->factory()->empty_string();
}
int num_parameters = 0;
// Function declarations are function scoped in normal mode, so they are
// hoisted. In harmony block scoping mode they are block scoped, so they
// are not hoisted.
- Scope* scope = (type == FunctionLiteral::DECLARATION && !is_extended_mode())
- ? NewScope(top_scope_->DeclarationScope(), FUNCTION_SCOPE)
- : NewScope(top_scope_, FUNCTION_SCOPE);
+ //
+ // One tricky case are function declarations in a local sloppy-mode eval:
+ // their declaration is hoisted, but they still see the local scope. E.g.,
+ //
+ // function() {
+ // var x = 0
+ // try { throw 1 } catch (x) { eval("function g() { return x }") }
+ // return g()
+ // }
+ //
+ // needs to return 1. To distinguish such cases, we need to detect
+ // (1) whether a function stems from a sloppy eval, and
+ // (2) whether it actually hoists across the eval.
+ // Unfortunately, we do not represent sloppy eval scopes, so we do not have
+ // either information available directly, especially not when lazily compiling
+ // a function like 'g'. We hence rely on the following invariants:
+ // - (1) is the case iff the innermost scope of the deserialized scope chain
+ // under which we compile is _not_ a declaration scope. This holds because
+ // in all normal cases, function declarations are fully hoisted to a
+ // declaration scope and compiled relative to that.
+ // - (2) is the case iff the current declaration scope is still the original
+ // one relative to the deserialized scope chain. Otherwise we must be
+ // compiling a function in an inner declaration scope in the eval, e.g. a
+ // nested function, and hoisting works normally relative to that.
+ Scope* declaration_scope = top_scope_->DeclarationScope();
+ Scope* original_declaration_scope = original_scope_->DeclarationScope();
+ Scope* scope =
+ function_type == FunctionLiteral::DECLARATION && !is_extended_mode() &&
+ (original_scope_ == original_declaration_scope ||
+ declaration_scope != original_declaration_scope)
+ ? NewScope(declaration_scope, FUNCTION_SCOPE)
+ : NewScope(top_scope_, FUNCTION_SCOPE);
ZoneList<Statement*>* body = NULL;
int materialized_literal_count = -1;
int expected_property_count = -1;
int handler_count = 0;
- bool only_simple_this_property_assignments;
- Handle<FixedArray> this_property_assignments;
FunctionLiteral::ParameterFlag duplicate_parameters =
FunctionLiteral::kNoDuplicateParameters;
FunctionLiteral::IsParenthesizedFlag parenthesized = parenthesized_function_
? FunctionLiteral::kIsParenthesized
: FunctionLiteral::kNotParenthesized;
+ FunctionLiteral::IsGeneratorFlag generator = is_generator
+ ? FunctionLiteral::kIsGenerator
+ : FunctionLiteral::kNotGenerator;
AstProperties ast_properties;
+ BailoutReason dont_optimize_reason = kNoReason;
// Parse function body.
{ FunctionState function_state(this, scope, isolate());
top_scope_->SetScopeName(function_name);
+ if (is_generator) {
+ // For generators, allocating variables in contexts is currently a win
+ // because it minimizes the work needed to suspend and resume an
+ // activation.
+ top_scope_->ForceContextAllocation();
+
+ // Calling a generator returns a generator object. That object is stored
+ // in a temporary variable, a definition that is used by "yield"
+ // expressions. Presence of a variable for the generator object in the
+ // FunctionState indicates that this function is a generator.
+ Handle<String> tempname = isolate()->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR(".generator_object"));
+ Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
+ function_state.set_generator_object_variable(temp);
+ }
+
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
@@ -4441,15 +4308,15 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
// instead of Variables and Proxis as is the case now.
Variable* fvar = NULL;
Token::Value fvar_init_op = Token::INIT_CONST;
- if (type == FunctionLiteral::NAMED_EXPRESSION) {
+ if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
if (is_extended_mode()) fvar_init_op = Token::INIT_CONST_HARMONY;
VariableMode fvar_mode = is_extended_mode() ? CONST_HARMONY : CONST;
fvar = new(zone()) Variable(top_scope_,
function_name, fvar_mode, true /* is valid LHS */,
Variable::NORMAL, kCreatedInitialized, Interface::NewConst());
VariableProxy* proxy = factory()->NewVariableProxy(fvar);
- VariableDeclaration* fvar_declaration =
- factory()->NewVariableDeclaration(proxy, fvar_mode, top_scope_);
+ VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
+ proxy, fvar_mode, top_scope_, RelocInfo::kNoPosition);
top_scope_->DeclareFunctionVar(fvar_declaration);
}
@@ -4470,13 +4337,13 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
parenthesized_function_ = false; // The bit was set for this function only.
if (is_lazily_compiled) {
- int function_block_pos = scanner().location().beg_pos;
+ int function_block_pos = position();
FunctionEntry entry;
- if (pre_data_ != NULL) {
- // If we have pre_data_, we use it to skip parsing the function body.
- // the preparser data contains the information we need to construct the
- // lazy function.
- entry = pre_data()->GetFunctionEntry(function_block_pos);
+ if (pre_parse_data_ != NULL) {
+ // If we have pre_parse_data_, we use it to skip parsing the function
+ // body. The preparser data contains the information we need to
+ // construct the lazy function.
+ entry = pre_parse_data()->GetFunctionEntry(function_block_pos);
if (entry.is_valid()) {
if (entry.end_pos() <= function_block_pos) {
// End position greater than end of stream is safe, and hard
@@ -4492,8 +4359,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
top_scope_->SetLanguageMode(entry.language_mode());
- only_simple_this_property_assignments = false;
- this_property_assignments = isolate()->factory()->empty_fixed_array();
} else {
is_lazily_compiled = false;
}
@@ -4502,11 +4367,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
// building an AST. This gathers the data needed to build a lazy
// function.
SingletonLogger logger;
- preparser::PreParser::PreParseResult result =
- LazyParseFunctionLiteral(&logger);
- if (result == preparser::PreParser::kPreParseStackOverflow) {
+ PreParser::PreParseResult result = LazyParseFunctionLiteral(&logger);
+ if (result == PreParser::kPreParseStackOverflow) {
// Propagate stack overflow.
- stack_overflow_ = true;
+ set_stack_overflow();
*ok = false;
return NULL;
}
@@ -4528,8 +4392,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
materialized_literal_count = logger.literals();
expected_property_count = logger.properties();
top_scope_->SetLanguageMode(logger.language_mode());
- only_simple_this_property_assignments = false;
- this_property_assignments = isolate()->factory()->empty_fixed_array();
}
}
@@ -4543,18 +4405,47 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
body->Add(factory()->NewExpressionStatement(
factory()->NewAssignment(fvar_init_op,
fproxy,
- factory()->NewThisFunction(),
- RelocInfo::kNoPosition)),
- zone());
+ factory()->NewThisFunction(pos),
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition), zone());
+ }
+
+ // For generators, allocate and yield an iterator on function entry.
+ if (is_generator) {
+ ZoneList<Expression*>* arguments =
+ new(zone()) ZoneList<Expression*>(0, zone());
+ CallRuntime* allocation = factory()->NewCallRuntime(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject),
+ arguments, pos);
+ VariableProxy* init_proxy = factory()->NewVariableProxy(
+ current_function_state_->generator_object_variable());
+ Assignment* assignment = factory()->NewAssignment(
+ Token::INIT_VAR, init_proxy, allocation, RelocInfo::kNoPosition);
+ VariableProxy* get_proxy = factory()->NewVariableProxy(
+ current_function_state_->generator_object_variable());
+ Yield* yield = factory()->NewYield(
+ get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition);
+ body->Add(factory()->NewExpressionStatement(
+ yield, RelocInfo::kNoPosition), zone());
}
+
ParseSourceElements(body, Token::RBRACE, false, false, CHECK_OK);
+ if (is_generator) {
+ VariableProxy* get_proxy = factory()->NewVariableProxy(
+ current_function_state_->generator_object_variable());
+ Expression *undefined = factory()->NewLiteral(
+ isolate()->factory()->undefined_value(), RelocInfo::kNoPosition);
+ Yield* yield = factory()->NewYield(
+ get_proxy, undefined, Yield::FINAL, RelocInfo::kNoPosition);
+ body->Add(factory()->NewExpressionStatement(
+ yield, RelocInfo::kNoPosition), zone());
+ }
+
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
handler_count = function_state.handler_count();
- only_simple_this_property_assignments =
- function_state.only_simple_this_property_assignments();
- this_property_assignments = function_state.this_property_assignments();
Expect(Token::RBRACE, CHECK_OK);
scope->set_end_position(scanner().location().end_pos);
@@ -4564,9 +4455,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
if (!top_scope_->is_classic_mode()) {
if (IsEvalOrArguments(function_name)) {
int start_pos = scope->start_position();
- int position = function_token_position != RelocInfo::kNoPosition
- ? function_token_position
- : (start_pos > 0 ? start_pos - 1 : start_pos);
+ int position = function_token_pos != RelocInfo::kNoPosition
+ ? function_token_pos : (start_pos > 0 ? start_pos - 1 : start_pos);
Scanner::Location location = Scanner::Location(position, start_pos);
ReportMessageAt(location,
"strict_function_name", Vector<const char*>::empty());
@@ -4587,9 +4477,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
}
if (name_is_strict_reserved) {
int start_pos = scope->start_position();
- int position = function_token_position != RelocInfo::kNoPosition
- ? function_token_position
- : (start_pos > 0 ? start_pos - 1 : start_pos);
+ int position = function_token_pos != RelocInfo::kNoPosition
+ ? function_token_pos : (start_pos > 0 ? start_pos - 1 : start_pos);
Scanner::Location location = Scanner::Location(position, start_pos);
ReportMessageAt(location, "strict_reserved_word",
Vector<const char*>::empty());
@@ -4607,6 +4496,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
CHECK_OK);
}
ast_properties = *factory()->visitor()->ast_properties();
+ dont_optimize_reason = factory()->visitor()->dont_optimize_reason();
}
if (is_extended_mode()) {
@@ -4620,38 +4510,42 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
materialized_literal_count,
expected_property_count,
handler_count,
- only_simple_this_property_assignments,
- this_property_assignments,
num_parameters,
duplicate_parameters,
- type,
+ function_type,
FunctionLiteral::kIsFunction,
- parenthesized);
- function_literal->set_function_token_position(function_token_position);
+ parenthesized,
+ generator,
+ pos);
+ function_literal->set_function_token_position(function_token_pos);
function_literal->set_ast_properties(&ast_properties);
+ function_literal->set_dont_optimize_reason(dont_optimize_reason);
if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
return function_literal;
}
-preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
+PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
SingletonLogger* logger) {
HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
ASSERT_EQ(Token::LBRACE, scanner().current_token());
if (reusable_preparser_ == NULL) {
intptr_t stack_limit = isolate()->stack_guard()->real_climit();
- bool do_allow_lazy = true;
- reusable_preparser_ = new preparser::PreParser(&scanner_,
- NULL,
- stack_limit,
- do_allow_lazy,
- allow_natives_syntax_,
- allow_modules_);
- }
- preparser::PreParser::PreParseResult result =
+ reusable_preparser_ = new PreParser(&scanner_, NULL, stack_limit);
+ reusable_preparser_->set_allow_harmony_scoping(allow_harmony_scoping());
+ reusable_preparser_->set_allow_modules(allow_modules());
+ reusable_preparser_->set_allow_natives_syntax(allow_natives_syntax());
+ reusable_preparser_->set_allow_lazy(true);
+ reusable_preparser_->set_allow_generators(allow_generators());
+ reusable_preparser_->set_allow_for_of(allow_for_of());
+ reusable_preparser_->set_allow_harmony_numeric_literals(
+ allow_harmony_numeric_literals());
+ }
+ PreParser::PreParseResult result =
reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
+ is_generator(),
logger);
return result;
}
@@ -4661,6 +4555,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
// CallRuntime ::
// '%' Identifier Arguments
+ int pos = peek_position();
Expect(Token::MOD, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK);
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
@@ -4671,7 +4566,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
top_scope_->DeclarationScope()->ForceEagerCompilation();
}
- const Runtime::Function* function = Runtime::FunctionForSymbol(name);
+ const Runtime::Function* function = Runtime::FunctionForName(name);
// Check for built-in IS_VAR macro.
if (function != NULL &&
@@ -4683,7 +4578,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
return args->at(0);
} else {
- ReportMessage("unable_to_parse", Vector<const char*>::empty());
+ ReportMessage("not_isvar", Vector<const char*>::empty());
*ok = false;
return NULL;
}
@@ -4706,45 +4601,30 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
}
// We have a valid intrinsics call or a call to a builtin.
- return factory()->NewCallRuntime(name, function, args);
+ return factory()->NewCallRuntime(name, function, args, pos);
}
-bool Parser::peek_any_identifier() {
+bool ParserBase::peek_any_identifier() {
Token::Value next = peek();
return next == Token::IDENTIFIER ||
next == Token::FUTURE_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD;
-}
-
-
-void Parser::Consume(Token::Value token) {
- Token::Value next = Next();
- USE(next);
- USE(token);
- ASSERT(next == token);
+ next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ next == Token::YIELD;
}
-void Parser::Expect(Token::Value token, bool* ok) {
- Token::Value next = Next();
- if (next == token) return;
- ReportUnexpectedToken(next);
- *ok = false;
-}
-
-
-bool Parser::Check(Token::Value token) {
- Token::Value next = peek();
- if (next == token) {
- Consume(next);
+bool ParserBase::CheckContextualKeyword(Vector<const char> keyword) {
+ if (peek() == Token::IDENTIFIER &&
+ scanner()->is_next_contextual_keyword(keyword)) {
+ Consume(Token::IDENTIFIER);
return true;
}
return false;
}
-void Parser::ExpectSemicolon(bool* ok) {
+void ParserBase::ExpectSemicolon(bool* ok) {
// Check for automatic semicolon insertion according to
// the rules given in ECMA-262, section 7.9, page 21.
Token::Value tok = peek();
@@ -4752,7 +4632,7 @@ void Parser::ExpectSemicolon(bool* ok) {
Next();
return;
}
- if (scanner().HasAnyLineTerminatorBeforeNext() ||
+ if (scanner()->HasAnyLineTerminatorBeforeNext() ||
tok == Token::RBRACE ||
tok == Token::EOS) {
return;
@@ -4761,38 +4641,42 @@ void Parser::ExpectSemicolon(bool* ok) {
}
-void Parser::ExpectContextualKeyword(const char* keyword, bool* ok) {
+void ParserBase::ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
Expect(Token::IDENTIFIER, ok);
if (!*ok) return;
- Handle<String> symbol = GetSymbol(ok);
- if (!*ok) return;
- if (!symbol->IsEqualTo(CStrVector(keyword))) {
+ if (!scanner()->is_literal_contextual_keyword(keyword)) {
+ ReportUnexpectedToken(scanner()->current_token());
*ok = false;
- ReportUnexpectedToken(scanner().current_token());
}
}
-Literal* Parser::GetLiteralUndefined() {
- return factory()->NewLiteral(isolate()->factory()->undefined_value());
+Literal* Parser::GetLiteralUndefined(int position) {
+ return factory()->NewLiteral(
+ isolate()->factory()->undefined_value(), position);
}
-Literal* Parser::GetLiteralTheHole() {
- return factory()->NewLiteral(isolate()->factory()->the_hole_value());
+Literal* Parser::GetLiteralTheHole(int position) {
+ return factory()->NewLiteral(
+ isolate()->factory()->the_hole_value(), RelocInfo::kNoPosition);
}
// Parses an identifier that is valid for the current scope, in particular it
// fails on strict mode future reserved keywords in a strict scope.
Handle<String> Parser::ParseIdentifier(bool* ok) {
- if (!top_scope_->is_classic_mode()) {
- Expect(Token::IDENTIFIER, ok);
- } else if (!Check(Token::IDENTIFIER)) {
- Expect(Token::FUTURE_STRICT_RESERVED_WORD, ok);
+ Token::Value next = Next();
+ if (next == Token::IDENTIFIER ||
+ (top_scope_->is_classic_mode() &&
+ (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ (next == Token::YIELD && !is_generator())))) {
+ return GetSymbol();
+ } else {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Handle<String>();
}
- if (!*ok) return Handle<String>();
- return GetSymbol(ok);
}
@@ -4800,13 +4684,18 @@ Handle<String> Parser::ParseIdentifier(bool* ok) {
// whether it is strict mode future reserved.
Handle<String> Parser::ParseIdentifierOrStrictReservedWord(
bool* is_strict_reserved, bool* ok) {
- *is_strict_reserved = false;
- if (!Check(Token::IDENTIFIER)) {
- Expect(Token::FUTURE_STRICT_RESERVED_WORD, ok);
+ Token::Value next = Next();
+ if (next == Token::IDENTIFIER) {
+ *is_strict_reserved = false;
+ } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ (next == Token::YIELD && !is_generator())) {
*is_strict_reserved = true;
+ } else {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Handle<String>();
}
- if (!*ok) return Handle<String>();
- return GetSymbol(ok);
+ return GetSymbol();
}
@@ -4820,7 +4709,7 @@ Handle<String> Parser::ParseIdentifierName(bool* ok) {
*ok = false;
return Handle<String>();
}
- return GetSymbol(ok);
+ return GetSymbol();
}
@@ -4852,14 +4741,11 @@ void Parser::CheckStrictModeLValue(Expression* expression,
// Checks whether an octal literal was last seen between beg_pos and end_pos.
// If so, reports an error. Only called for strict mode.
-void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- Scanner::Location octal = scanner().octal_position();
- if (octal.IsValid() &&
- beg_pos <= octal.beg_pos &&
- octal.end_pos <= end_pos) {
- ReportMessageAt(octal, "strict_octal_literal",
- Vector<const char*>::empty());
- scanner().clear_octal_position();
+void ParserBase::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+ Scanner::Location octal = scanner()->octal_position();
+ if (octal.IsValid() && beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
+ ReportMessageAt(octal, "strict_octal_literal");
+ scanner()->clear_octal_position();
*ok = false;
}
}
@@ -4957,22 +4843,22 @@ void Parser::RegisterTargetUse(Label* target, Target* stop) {
}
-Expression* Parser::NewThrowReferenceError(Handle<String> type) {
- return NewThrowError(isolate()->factory()->MakeReferenceError_symbol(),
- type, HandleVector<Object>(NULL, 0));
+Expression* Parser::NewThrowReferenceError(Handle<String> message) {
+ return NewThrowError(isolate()->factory()->MakeReferenceError_string(),
+ message, HandleVector<Object>(NULL, 0));
}
-Expression* Parser::NewThrowSyntaxError(Handle<String> type,
+Expression* Parser::NewThrowSyntaxError(Handle<String> message,
Handle<Object> first) {
int argc = first.is_null() ? 0 : 1;
Vector< Handle<Object> > arguments = HandleVector<Object>(&first, argc);
return NewThrowError(
- isolate()->factory()->MakeSyntaxError_symbol(), type, arguments);
+ isolate()->factory()->MakeSyntaxError_string(), message, arguments);
}
-Expression* Parser::NewThrowTypeError(Handle<String> type,
+Expression* Parser::NewThrowTypeError(Handle<String> message,
Handle<Object> first,
Handle<Object> second) {
ASSERT(!first.is_null() && !second.is_null());
@@ -4980,12 +4866,12 @@ Expression* Parser::NewThrowTypeError(Handle<String> type,
Vector< Handle<Object> > arguments =
HandleVector<Object>(elements, ARRAY_SIZE(elements));
return NewThrowError(
- isolate()->factory()->MakeTypeError_symbol(), type, arguments);
+ isolate()->factory()->MakeTypeError_string(), message, arguments);
}
Expression* Parser::NewThrowError(Handle<String> constructor,
- Handle<String> type,
+ Handle<String> message,
Vector< Handle<Object> > arguments) {
int argc = arguments.length();
Handle<FixedArray> elements = isolate()->factory()->NewFixedArray(argc,
@@ -4999,14 +4885,16 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(
elements, FAST_ELEMENTS, TENURED);
+ int pos = position();
ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2, zone());
- args->Add(factory()->NewLiteral(type), zone());
- args->Add(factory()->NewLiteral(array), zone());
+ args->Add(factory()->NewLiteral(message, pos), zone());
+ args->Add(factory()->NewLiteral(array, pos), zone());
CallRuntime* call_constructor =
- factory()->NewCallRuntime(constructor, NULL, args);
- return factory()->NewThrow(call_constructor, scanner().location().beg_pos);
+ factory()->NewCallRuntime(constructor, NULL, args, pos);
+ return factory()->NewThrow(call_constructor, pos);
}
+
// ----------------------------------------------------------------------------
// Regular expressions
@@ -5015,7 +4903,7 @@ RegExpParser::RegExpParser(FlatStringReader* in,
Handle<String>* error,
bool multiline,
Zone* zone)
- : isolate_(Isolate::Current()),
+ : isolate_(zone->isolate()),
zone_(zone),
error_(error),
captures_(NULL),
@@ -5062,6 +4950,7 @@ void RegExpParser::Advance() {
void RegExpParser::Reset(int pos) {
next_pos_ = pos;
+ has_more_ = (pos < in()->length());
Advance();
}
@@ -5076,6 +4965,7 @@ bool RegExpParser::simple() {
return simple_;
}
+
RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
failed_ = true;
*error_ = isolate()->factory()->NewStringFromAscii(message, NOT_TENURED);
@@ -5140,20 +5030,21 @@ RegExpTree* RegExpParser::ParseDisjunction() {
int end_capture_index = captures_started();
int capture_index = stored_state->capture_index();
- SubexpressionType type = stored_state->group_type();
+ SubexpressionType group_type = stored_state->group_type();
// Restore previous state.
stored_state = stored_state->previous_state();
builder = stored_state->builder();
// Build result of subexpression.
- if (type == CAPTURE) {
+ if (group_type == CAPTURE) {
RegExpCapture* capture = new(zone()) RegExpCapture(body, capture_index);
captures_->at(capture_index - 1) = capture;
body = capture;
- } else if (type != GROUPING) {
- ASSERT(type == POSITIVE_LOOKAHEAD || type == NEGATIVE_LOOKAHEAD);
- bool is_positive = (type == POSITIVE_LOOKAHEAD);
+ } else if (group_type != GROUPING) {
+ ASSERT(group_type == POSITIVE_LOOKAHEAD ||
+ group_type == NEGATIVE_LOOKAHEAD);
+ bool is_positive = (group_type == POSITIVE_LOOKAHEAD);
body = new(zone()) RegExpLookahead(body,
is_positive,
end_capture_index - capture_index,
@@ -5187,10 +5078,10 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
case '$': {
Advance();
- RegExpAssertion::Type type =
+ RegExpAssertion::AssertionType assertion_type =
multiline_ ? RegExpAssertion::END_OF_LINE :
RegExpAssertion::END_OF_INPUT;
- builder->AddAssertion(new(zone()) RegExpAssertion(type));
+ builder->AddAssertion(new(zone()) RegExpAssertion(assertion_type));
continue;
}
case '.': {
@@ -5204,18 +5095,18 @@ RegExpTree* RegExpParser::ParseDisjunction() {
break;
}
case '(': {
- SubexpressionType type = CAPTURE;
+ SubexpressionType subexpr_type = CAPTURE;
Advance();
if (current() == '?') {
switch (Next()) {
case ':':
- type = GROUPING;
+ subexpr_type = GROUPING;
break;
case '=':
- type = POSITIVE_LOOKAHEAD;
+ subexpr_type = POSITIVE_LOOKAHEAD;
break;
case '!':
- type = NEGATIVE_LOOKAHEAD;
+ subexpr_type = NEGATIVE_LOOKAHEAD;
break;
default:
ReportError(CStrVector("Invalid group") CHECK_FAILED);
@@ -5232,7 +5123,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
captures_->Add(NULL, zone());
}
// Store current state and begin new disjunction parsing.
- stored_state = new(zone()) RegExpParserState(stored_state, type,
+ stored_state = new(zone()) RegExpParserState(stored_state, subexpr_type,
captures_started(), zone());
builder = stored_state->builder();
continue;
@@ -5420,16 +5311,16 @@ RegExpTree* RegExpParser::ParseDisjunction() {
default:
continue;
}
- RegExpQuantifier::Type type = RegExpQuantifier::GREEDY;
+ RegExpQuantifier::QuantifierType quantifier_type = RegExpQuantifier::GREEDY;
if (current() == '?') {
- type = RegExpQuantifier::NON_GREEDY;
+ quantifier_type = RegExpQuantifier::NON_GREEDY;
Advance();
} else if (FLAG_regexp_possessive_quantifier && current() == '+') {
// FLAG_regexp_possessive_quantifier is a debug-only flag.
- type = RegExpQuantifier::POSSESSIVE;
+ quantifier_type = RegExpQuantifier::POSSESSIVE;
Advance();
}
- builder->AddQuantifierToAtom(min, max, type);
+ builder->AddQuantifierToAtom(min, max, quantifier_type);
}
}
@@ -5884,44 +5775,32 @@ int ScriptDataImpl::ReadNumber(byte** source) {
// Create a Scanner for the preparser to use as input, and preparse the source.
-static ScriptDataImpl* DoPreParse(Utf16CharacterStream* source,
- int flags,
- ParserRecorder* recorder) {
- Isolate* isolate = Isolate::Current();
+ScriptDataImpl* PreParserApi::PreParse(Isolate* isolate,
+ Utf16CharacterStream* source) {
+ CompleteParserRecorder recorder;
HistogramTimerScope timer(isolate->counters()->pre_parse());
Scanner scanner(isolate->unicode_cache());
- scanner.SetHarmonyScoping(FLAG_harmony_scoping);
- scanner.Initialize(source);
intptr_t stack_limit = isolate->stack_guard()->real_climit();
- preparser::PreParser::PreParseResult result =
- preparser::PreParser::PreParseProgram(&scanner,
- recorder,
- flags,
- stack_limit);
- if (result == preparser::PreParser::kPreParseStackOverflow) {
+ PreParser preparser(&scanner, &recorder, stack_limit);
+ preparser.set_allow_lazy(true);
+ preparser.set_allow_generators(FLAG_harmony_generators);
+ preparser.set_allow_for_of(FLAG_harmony_iteration);
+ preparser.set_allow_harmony_scoping(FLAG_harmony_scoping);
+ preparser.set_allow_harmony_numeric_literals(FLAG_harmony_numeric_literals);
+ scanner.Initialize(source);
+ PreParser::PreParseResult result = preparser.PreParseProgram();
+ if (result == PreParser::kPreParseStackOverflow) {
isolate->StackOverflow();
return NULL;
}
// Extract the accumulated data from the recorder as a single
// contiguous vector that we are responsible for disposing.
- Vector<unsigned> store = recorder->ExtractData();
+ Vector<unsigned> store = recorder.ExtractData();
return new ScriptDataImpl(store);
}
-ScriptDataImpl* ParserApi::PreParse(Utf16CharacterStream* source,
- v8::Extension* extension,
- int flags) {
- Handle<Script> no_script;
- if (FLAG_lazy && (extension == NULL)) {
- flags |= kAllowLazy;
- }
- CompleteParserRecorder recorder;
- return DoPreParse(source, flags, &recorder);
-}
-
-
bool RegExpParser::ParseRegExp(FlatStringReader* input,
bool multiline,
RegExpCompileData* result,
@@ -5945,48 +5824,35 @@ bool RegExpParser::ParseRegExp(FlatStringReader* input,
}
-bool ParserApi::Parse(CompilationInfo* info, int parsing_flags) {
- ASSERT(info->function() == NULL);
+bool Parser::Parse() {
+ ASSERT(info()->function() == NULL);
FunctionLiteral* result = NULL;
- ASSERT((parsing_flags & kLanguageModeMask) == CLASSIC_MODE);
- if (!info->is_native() && FLAG_harmony_scoping) {
- // Harmony scoping is requested.
- parsing_flags |= EXTENDED_MODE;
- }
- if (!info->is_native() && FLAG_harmony_modules) {
- parsing_flags |= kAllowModules;
- }
- if (FLAG_allow_natives_syntax || info->is_native()) {
- // We require %identifier(..) syntax.
- parsing_flags |= kAllowNativesSyntax;
- }
- if (info->is_lazy()) {
- ASSERT(!info->is_eval());
- Parser parser(info, parsing_flags, NULL, NULL);
- if (info->shared_info()->is_function()) {
- result = parser.ParseLazy();
+ if (info()->is_lazy()) {
+ ASSERT(!info()->is_eval());
+ if (info()->shared_info()->is_function()) {
+ result = ParseLazy();
} else {
- result = parser.ParseProgram();
+ result = ParseProgram();
}
} else {
- ScriptDataImpl* pre_data = info->pre_parse_data();
- Parser parser(info, parsing_flags, info->extension(), pre_data);
- if (pre_data != NULL && pre_data->has_error()) {
- Scanner::Location loc = pre_data->MessageLocation();
- const char* message = pre_data->BuildMessage();
- Vector<const char*> args = pre_data->BuildArgs();
- parser.ReportMessageAt(loc, message, args);
+ ScriptDataImpl* pre_parse_data = info()->pre_parse_data();
+ set_pre_parse_data(pre_parse_data);
+ if (pre_parse_data != NULL && pre_parse_data->has_error()) {
+ Scanner::Location loc = pre_parse_data->MessageLocation();
+ const char* message = pre_parse_data->BuildMessage();
+ Vector<const char*> args = pre_parse_data->BuildArgs();
+ ReportMessageAt(loc, message, args);
DeleteArray(message);
for (int i = 0; i < args.length(); i++) {
DeleteArray(args[i]);
}
DeleteArray(args.start());
- ASSERT(info->isolate()->has_pending_exception());
+ ASSERT(info()->isolate()->has_pending_exception());
} else {
- result = parser.ParseProgram();
+ result = ParseProgram();
}
}
- info->SetFunction(result);
+ info()->SetFunction(result);
return (result != NULL);
}
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index e36a9b3dc..79ce68b61 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -96,7 +96,6 @@ class FunctionEntry BASE_EMBEDDED {
private:
Vector<unsigned> backing_;
- bool owns_data_;
};
@@ -164,19 +163,18 @@ class ScriptDataImpl : public ScriptData {
};
-class ParserApi {
+class PreParserApi {
public:
- // Parses the source code represented by the compilation info and sets its
- // function literal. Returns false (and deallocates any allocated AST
- // nodes) if parsing failed.
- static bool Parse(CompilationInfo* info, int flags);
-
- // Generic preparser generating full preparse data.
- static ScriptDataImpl* PreParse(Utf16CharacterStream* source,
- v8::Extension* extension,
- int flags);
+ // Pre-parse a character stream and return full preparse data.
+ //
+ // This interface is here instead of in preparser.h because it instantiates a
+ // preparser recorder object that is suited to the parser's purposes. Also,
+ // the preparser doesn't know about ScriptDataImpl.
+ static ScriptDataImpl* PreParse(Isolate* isolate,
+ Utf16CharacterStream* source);
};
+
// ----------------------------------------------------------------------------
// REGEXP PARSING
@@ -272,7 +270,8 @@ class RegExpBuilder: public ZoneObject {
void AddAtom(RegExpTree* tree);
void AddAssertion(RegExpTree* tree);
void NewAlternative(); // '|'
- void AddQuantifierToAtom(int min, int max, RegExpQuantifier::Type type);
+ void AddQuantifierToAtom(
+ int min, int max, RegExpQuantifier::QuantifierType type);
RegExpTree* ToRegExp();
private:
@@ -296,7 +295,7 @@ class RegExpBuilder: public ZoneObject {
};
-class RegExpParser {
+class RegExpParser BASE_EMBEDDED {
public:
RegExpParser(FlatStringReader* in,
Handle<String>* error,
@@ -426,27 +425,19 @@ class RegExpParser {
// Forward declaration.
class SingletonLogger;
-class Parser {
+class Parser : public ParserBase {
public:
- Parser(CompilationInfo* info,
- int parsing_flags, // Combination of ParsingFlags
- v8::Extension* extension,
- ScriptDataImpl* pre_data);
- virtual ~Parser() {
+ explicit Parser(CompilationInfo* info);
+ ~Parser() {
delete reusable_preparser_;
reusable_preparser_ = NULL;
}
- // Returns NULL if parsing failed.
- FunctionLiteral* ParseProgram();
- FunctionLiteral* ParseLazy();
-
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<const char*> args);
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<Handle<String> > args);
+ // Parses the source code represented by the compilation info and sets its
+ // function literal. Returns false (and deallocates any allocated AST
+ // nodes) if parsing failed.
+ static bool Parse(CompilationInfo* info) { return Parser(info).Parse(); }
+ bool Parse();
private:
static const int kMaxNumFunctionLocals = 131071; // 2^17-1
@@ -488,23 +479,21 @@ class Parser {
int NextHandlerIndex() { return next_handler_index_++; }
int handler_count() { return next_handler_index_; }
- void SetThisPropertyAssignmentInfo(
- bool only_simple_this_property_assignments,
- Handle<FixedArray> this_property_assignments) {
- only_simple_this_property_assignments_ =
- only_simple_this_property_assignments;
- this_property_assignments_ = this_property_assignments;
+ void AddProperty() { expected_property_count_++; }
+ int expected_property_count() { return expected_property_count_; }
+
+ void set_generator_object_variable(Variable *variable) {
+ ASSERT(variable != NULL);
+ ASSERT(!is_generator());
+ generator_object_variable_ = variable;
}
- bool only_simple_this_property_assignments() {
- return only_simple_this_property_assignments_;
+ Variable* generator_object_variable() const {
+ return generator_object_variable_;
}
- Handle<FixedArray> this_property_assignments() {
- return this_property_assignments_;
+ bool is_generator() const {
+ return generator_object_variable_ != NULL;
}
- void AddProperty() { expected_property_count_++; }
- int expected_property_count() { return expected_property_count_; }
-
AstNodeFactory<AstConstructionVisitor>* factory() { return &factory_; }
private:
@@ -519,10 +508,10 @@ class Parser {
// Properties count estimation.
int expected_property_count_;
- // Keeps track of assignments to properties of this. Used for
- // optimizing constructors.
- bool only_simple_this_property_assignments_;
- Handle<FixedArray> this_property_assignments_;
+ // For generators, the variable that holds the generator object. This
+ // variable is used by yield expressions and return statements. NULL
+ // indicates that this function is not a generator.
+ Variable* generator_object_variable_;
Parser* parser_;
FunctionState* outer_function_state_;
@@ -547,8 +536,11 @@ class Parser {
Mode old_mode_;
};
- FunctionLiteral* ParseLazy(Utf16CharacterStream* source,
- ZoneScope* zone_scope);
+ // Returns NULL if parsing failed.
+ FunctionLiteral* ParseProgram();
+
+ FunctionLiteral* ParseLazy();
+ FunctionLiteral* ParseLazy(Utf16CharacterStream* source);
Isolate* isolate() { return isolate_; }
Zone* zone() const { return zone_; }
@@ -556,19 +548,32 @@ class Parser {
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(CompilationInfo* info,
- Handle<String> source,
- ZoneScope* zone_scope);
+ Handle<String> source);
// Report syntax error
void ReportUnexpectedToken(Token::Value token);
void ReportInvalidPreparseData(Handle<String> name, bool* ok);
void ReportMessage(const char* message, Vector<const char*> args);
void ReportMessage(const char* message, Vector<Handle<String> > args);
+ void ReportMessageAt(Scanner::Location location, const char* type) {
+ ReportMessageAt(location, type, Vector<const char*>::empty());
+ }
+ void ReportMessageAt(Scanner::Location loc,
+ const char* message,
+ Vector<const char*> args);
+ void ReportMessageAt(Scanner::Location loc,
+ const char* message,
+ Vector<Handle<String> > args);
+
+ void set_pre_parse_data(ScriptDataImpl *data) {
+ pre_parse_data_ = data;
+ symbol_cache_.Initialize(data ? data->symbol_count() : 0, zone());
+ }
bool inside_with() const { return top_scope_->inside_with(); }
Scanner& scanner() { return scanner_; }
Mode mode() const { return mode_; }
- ScriptDataImpl* pre_data() const { return pre_data_; }
+ ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
bool is_extended_mode() {
ASSERT(top_scope_ != NULL);
return top_scope_->is_extended_mode();
@@ -632,6 +637,7 @@ class Parser {
Expression* ParseExpression(bool accept_IN, bool* ok);
Expression* ParseAssignmentExpression(bool accept_IN, bool* ok);
+ Expression* ParseYieldExpression(bool* ok);
Expression* ParseConditionalExpression(bool accept_IN, bool* ok);
Expression* ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
Expression* ParseUnaryExpression(bool* ok);
@@ -645,7 +651,6 @@ class Parser {
Expression* ParsePrimaryExpression(bool* ok);
Expression* ParseArrayLiteral(bool* ok);
Expression* ParseObjectLiteral(bool* ok);
- ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok);
Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
// Populate the constant properties fixed array for a materialized object
@@ -655,13 +660,8 @@ class Parser {
Handle<FixedArray> constants,
bool* is_simple,
bool* fast_elements,
- int* depth);
-
- // Populate the literals fixed array for a materialized array literal.
- void BuildArrayLiteralBoilerplateLiterals(ZoneList<Expression*>* properties,
- Handle<FixedArray> constants,
- bool* is_simple,
- int* depth);
+ int* depth,
+ bool* may_store_doubles);
// Decide if a property should be in the object boilerplate.
bool IsBoilerplateProperty(ObjectLiteral::Property* property);
@@ -672,45 +672,27 @@ class Parser {
// in the object literal boilerplate.
Handle<Object> GetBoilerplateValue(Expression* expression);
+ // Initialize the components of a for-in / for-of statement.
+ void InitializeForEachStatement(ForEachStatement* stmt,
+ Expression* each,
+ Expression* subject,
+ Statement* body);
+
ZoneList<Expression*>* ParseArguments(bool* ok);
FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
bool name_is_reserved,
+ bool is_generator,
int function_token_position,
- FunctionLiteral::Type type,
+ FunctionLiteral::FunctionType type,
bool* ok);
// Magical syntax support.
Expression* ParseV8Intrinsic(bool* ok);
- INLINE(Token::Value peek()) {
- if (stack_overflow_) return Token::ILLEGAL;
- return scanner().peek();
- }
-
- INLINE(Token::Value Next()) {
- // BUG 1215673: Find a thread safe way to set a stack limit in
- // pre-parse mode. Otherwise, we cannot safely pre-parse from other
- // threads.
- if (stack_overflow_) {
- return Token::ILLEGAL;
- }
- if (StackLimitCheck(isolate()).HasOverflowed()) {
- // Any further calls to Next or peek will return the illegal token.
- // The current call must return the next token, which might already
- // have been peek'ed.
- stack_overflow_ = true;
- }
- return scanner().Next();
- }
+ bool is_generator() const { return current_function_state_->is_generator(); }
- bool peek_any_identifier();
-
- INLINE(void Consume(Token::Value token));
- void Expect(Token::Value token, bool* ok);
- bool Check(Token::Value token);
- void ExpectSemicolon(bool* ok);
- void ExpectContextualKeyword(const char* keyword, bool* ok);
+ bool CheckInOrOf(bool accept_OF, ForEachStatement::VisitMode* visit_mode);
Handle<String> LiteralString(PretenureFlag tenured) {
if (scanner().is_literal_ascii()) {
@@ -732,11 +714,11 @@ class Parser {
}
}
- Handle<String> GetSymbol(bool* ok);
+ Handle<String> GetSymbol();
// Get odd-ball literals.
- Literal* GetLiteralUndefined();
- Literal* GetLiteralTheHole();
+ Literal* GetLiteralUndefined(int position);
+ Literal* GetLiteralTheHole(int position);
Handle<String> ParseIdentifier(bool* ok);
Handle<String> ParseIdentifierOrStrictReservedWord(
@@ -756,9 +738,6 @@ class Parser {
const char* error,
bool* ok);
- // Strict mode octal literal validation.
- void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
-
// For harmony block scoping mode: Check if the scope has conflicting var/let
// declarations from different scopes. It covers for example
//
@@ -809,7 +788,7 @@ class Parser {
Handle<String> type,
Vector< Handle<Object> > arguments);
- preparser::PreParser::PreParseResult LazyParseFunctionLiteral(
+ PreParser::PreParseResult LazyParseFunctionLiteral(
SingletonLogger* logger);
AstNodeFactory<AstConstructionVisitor>* factory() {
@@ -821,19 +800,16 @@ class Parser {
Handle<Script> script_;
Scanner scanner_;
- preparser::PreParser* reusable_preparser_;
+ PreParser* reusable_preparser_;
Scope* top_scope_;
+ Scope* original_scope_; // for ES5 function declarations in sloppy eval
FunctionState* current_function_state_;
Target* target_stack_; // for break, continue statements
v8::Extension* extension_;
- ScriptDataImpl* pre_data_;
+ ScriptDataImpl* pre_parse_data_;
FuncNameInferrer* fni_;
Mode mode_;
- bool allow_natives_syntax_;
- bool allow_lazy_;
- bool allow_modules_;
- bool stack_overflow_;
// If true, the next (and immediately following) function literal is
// preceded by a parenthesis.
// Heuristically that means that the function will be called immediately,
@@ -851,7 +827,7 @@ class Parser {
// can be fully handled at compile time.
class CompileTimeValue: public AllStatic {
public:
- enum Type {
+ enum LiteralType {
OBJECT_LITERAL_FAST_ELEMENTS,
OBJECT_LITERAL_SLOW_ELEMENTS,
ARRAY_LITERAL
@@ -859,19 +835,17 @@ class CompileTimeValue: public AllStatic {
static bool IsCompileTimeValue(Expression* expression);
- static bool ArrayLiteralElementNeedsInitialization(Expression* value);
-
// Get the value as a compile time value.
- static Handle<FixedArray> GetValue(Expression* expression);
+ static Handle<FixedArray> GetValue(Isolate* isolate, Expression* expression);
// Get the type of a compile time value returned by GetValue().
- static Type GetType(Handle<FixedArray> value);
+ static LiteralType GetLiteralType(Handle<FixedArray> value);
// Get the elements array of a compile time value returned by GetValue().
static Handle<FixedArray> GetElements(Handle<FixedArray> value);
private:
- static const int kTypeSlot = 0;
+ static const int kLiteralTypeSlot = 0;
static const int kElementsSlot = 1;
DISALLOW_IMPLICIT_CONSTRUCTORS(CompileTimeValue);
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index 089ea38d9..0076d567f 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -41,8 +41,8 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
+#include "simulator.h"
#include "v8threads.h"
#include "vm-state-inl.h"
#include "win32-headers.h"
@@ -50,42 +50,9 @@
namespace v8 {
namespace internal {
-// 0 is never a valid thread id
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Nothing special about Cygwin.
-}
-
-
-int OS::ActivationFrameAlignment() {
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- // An x86 store acts as a release barrier.
- *ptr = value;
-}
const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
+ if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
@@ -105,36 +72,6 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return sysconf(_SC_PAGESIZE);
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
@@ -142,52 +79,14 @@ void* OS::Allocate(const size_t requested,
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::ProtectCode(void* address, const size_t size) {
- DWORD old_protect;
- VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-}
-
-
-void OS::Guard(void* address, const size_t size) {
- DWORD oldprotect;
- VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
-}
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -236,7 +135,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
@@ -247,7 +146,6 @@ void OS::LogSharedLibraryAddresses() {
const int kLibNameLen = FILENAME_MAX + 1;
char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
- i::Isolate* isolate = ISOLATE;
// This loop will terminate once the scanning hits an EOF.
while (true) {
uintptr_t start, end;
@@ -301,485 +199,159 @@ void OS::SignalCodeMovingGC() {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // Not supported on Cygwin.
- return 0;
-}
-
-
// The VirtualMemory implementation is taken from platform-win32.cc.
// The mmap-based virtual memory implementation as it is used on most posix
// platforms does not work well because Cygwin does not support MAP_FIXED.
// This causes VirtualMemory::Commit to not always commit the memory region
// specified.
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
- size_ = size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
- }
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
- return true;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- ASSERT(IsReserved());
- return VirtualFree(address, size, MEM_DECOMMIT) != false;
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- if (NULL == VirtualAlloc(address,
- OS::CommitPageSize(),
- MEM_COMMIT,
- PAGE_READONLY | PAGE_GUARD)) {
- return false;
+static void* GetRandomAddr() {
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ // Note that the current isolate isn't set up in a call path via
+ // CpuFeatures::Probe. We don't care about randomization in this case because
+ // the code page is immediately freed.
+ if (isolate != NULL) {
+ // The address range used to randomize RWX allocations in OS::Allocate
+ // Try not to map pages into the default range that windows loads DLLs
+ // Use a multiple of 64k to prevent committing unused memory.
+ // Note: This does not guarantee RWX regions will be within the
+ // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
+#ifdef V8_HOST_ARCH_64_BIT
+ static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
+ static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
+#else
+ static const intptr_t kAllocationRandomAddressMin = 0x04000000;
+ static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
+#endif
+ uintptr_t address =
+ (isolate->random_number_generator()->NextInt() << kPageSizeBits) |
+ kAllocationRandomAddressMin;
+ address &= kAllocationRandomAddressMax;
+ return reinterpret_cast<void *>(address);
}
- return true;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) {}
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size()) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
return NULL;
}
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
+ LPVOID base = NULL;
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
+ if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
+ // For exectutable pages try and randomize the allocation address
+ for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
+ base = VirtualAlloc(GetRandomAddr(), size, action, protection);
+ }
}
- pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
-}
+ // After three attempts give up and let the OS find an address to use.
+ if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
+ return base;
}
-static inline Thread::LocalStorageKey PthreadKeyToLocalKey(
- pthread_key_t pthread_key) {
- // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
- // because pthread_key_t is a pointer type on Cygwin. This will probably not
- // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
- STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
- intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
- return static_cast<Thread::LocalStorageKey>(ptr_key);
-}
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-static inline pthread_key_t LocalKeyToPthreadKey(
- Thread::LocalStorageKey local_key) {
- STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
- intptr_t ptr_key = static_cast<intptr_t>(local_key);
- return reinterpret_cast<pthread_key_t>(ptr_key);
-}
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* address = ReserveRegion(request_size);
+ if (address == NULL) return;
+ Address base = RoundUp(static_cast<Address>(address), alignment);
+ // Try reducing the size by freeing and then reallocating a specific area.
+ bool result = ReleaseRegion(address, request_size);
USE(result);
- ASSERT(result == 0);
- return PthreadKeyToLocalKey(key);
+ ASSERT(result);
+ address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+ if (address != NULL) {
+ request_size = size;
+ ASSERT(base == static_cast<Address>(address));
+ } else {
+ // Resizing failed, just go with a bigger area.
+ address = ReserveRegion(request_size);
+ if (address == NULL) return;
+ }
+ address_ = address;
+ size_ = request_size;
}
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address_, size_);
+ ASSERT(result);
+ USE(result);
+ }
}
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- return pthread_getspecific(pthread_key);
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
}
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- pthread_setspecific(pthread_key, value);
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
}
-void Thread::YieldCPU() {
- sched_yield();
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
}
-class CygwinMutex : public Mutex {
- public:
- CygwinMutex() {
- pthread_mutexattr_t attrs;
- memset(&attrs, 0, sizeof(attrs));
-
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- }
-
- virtual ~CygwinMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new CygwinMutex();
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ ASSERT(IsReserved());
+ return UncommitRegion(address, size);
}
-class CygwinSemaphore : public Semaphore {
- public:
- explicit CygwinSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~CygwinSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void CygwinSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
+void* VirtualMemory::ReserveRegion(size_t size) {
+ return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
}
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool CygwinSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+ if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false;
}
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new CygwinSemaphore(count);
+ return true;
}
-// ----------------------------------------------------------------------------
-// Cygwin profiler support.
-//
-// On Cygwin we use the same sampler implementation as on win32.
-
-class Sampler::PlatformData : public Malloced {
- public:
- // Get a handle to the calling thread. This is the thread that we are
- // going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- PlatformData() : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId())) {}
-
- ~PlatformData() {
- if (profiled_thread_ != NULL) {
- CloseHandle(profiled_thread_);
- profiled_thread_ = NULL;
- }
- }
-
- HANDLE profiled_thread() { return profiled_thread_; }
-
- private:
- HANDLE profiled_thread_;
-};
-
-
-class SamplerThread : public Thread {
- public:
- static const int kSamplerThreadStackSize = 64 * KB;
-
- explicit SamplerThread(int interval)
- : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- instance_ = new SamplerThread(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- }
- OS::Sleep(interval_);
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
- if (!sampler->isolate()->IsInitialized()) return;
- if (!sampler->IsProfiling()) return;
- SamplerThread* sampler_thread =
- reinterpret_cast<SamplerThread*>(raw_sampler_thread);
- sampler_thread->SampleContext(sampler);
- }
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
- void SampleContext(Sampler* sampler) {
- HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
- if (profiled_thread == NULL) return;
-
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
- if (sample == NULL) sample = &sample_obj;
-
- static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread) == kSuspendFailed) return;
- sample->state = sampler->isolate()->current_vm_state();
-
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread, &context) != 0) {
-#if V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(context.Rip);
- sample->sp = reinterpret_cast<Address>(context.Rsp);
- sample->fp = reinterpret_cast<Address>(context.Rbp);
-#else
- sample->pc = reinterpret_cast<Address>(context.Eip);
- sample->sp = reinterpret_cast<Address>(context.Esp);
- sample->fp = reinterpret_cast<Address>(context.Ebp);
-#endif
- sampler->SampleStack(sample);
- sampler->Tick(sample);
- }
- ResumeThread(profiled_thread);
+bool VirtualMemory::Guard(void* address) {
+ if (NULL == VirtualAlloc(address,
+ OS::CommitPageSize(),
+ MEM_COMMIT,
+ PAGE_NOACCESS)) {
+ return false;
}
-
- const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SamplerThread* instance_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-
-Mutex* SamplerThread::mutex_ = NULL;
-SamplerThread* SamplerThread::instance_ = NULL;
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
- SamplerThread::SetUp();
-}
-
-
-void OS::TearDown() {
- SamplerThread::TearDown();
- delete limit_mutex;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
+ return true;
}
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return VirtualFree(base, size, MEM_DECOMMIT) != 0;
}
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SamplerThread::AddActiveSampler(this);
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return VirtualFree(base, 0, MEM_RELEASE) != 0;
}
-void Sampler::Stop() {
- ASSERT(IsActive());
- SamplerThread::RemoveActiveSampler(this);
- SetActive(false);
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index 511759c48..75d88ec5d 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -43,7 +43,6 @@
#include <sys/fcntl.h> // open
#include <unistd.h> // getpagesize
// If you don't have execinfo.h then you need devel/libexecinfo from ports.
-#include <execinfo.h> // backtrace, backtrace_symbols
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
@@ -54,7 +53,6 @@
#include "v8.h"
#include "v8threads.h"
-#include "platform-posix.h"
#include "platform.h"
#include "vm-state-inl.h"
@@ -62,48 +60,9 @@
namespace v8 {
namespace internal {
-// 0 is never a valid thread id on FreeBSD since tids and pids share a
-// name space and pid 0 is used to kill the group (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- // Correct as on OS X
- if (-1.0 < x && x < 0.0) {
- return -0.0;
- } else {
- return ceil(x);
- }
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- *ptr = value;
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // FreeBSD runs on anything.
-}
-
-
-int OS::ActivationFrameAlignment() {
- // 16 byte alignment on FreeBSD
- return 16;
-}
-
const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
+ if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
@@ -120,36 +79,6 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return getpagesize();
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool executable) {
@@ -158,46 +87,14 @@ void* OS::Allocate(const size_t requested,
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
-void OS::Free(void* buf, const size_t length) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(buf, length);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
-#if (defined(__arm__) || defined(__thumb__))
-# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
- asm("bkpt 0");
-# endif
-#else
- asm("int $3");
-#endif
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -251,7 +148,7 @@ static unsigned StringToLong(char* buffer) {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
static const int MAP_LENGTH = 1024;
int fd = open("/proc/self/maps", O_RDONLY);
if (fd < 0) return;
@@ -285,7 +182,7 @@ void OS::LogSharedLibraryAddresses() {
// There may be no filename in this line. Skip to next.
if (start_of_path == NULL) continue;
buffer[bytes_read] = 0;
- LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
+ LOG(isolate, SharedLibraryEvent(start_of_path, start, end));
}
close(fd);
}
@@ -295,44 +192,17 @@ void OS::SignalCodeMovingGC() {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-}
-
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
+
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
- size_ = size;
-}
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
@@ -435,8 +305,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
@@ -456,446 +324,9 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
-class Thread::PlatformData : public Malloced {
- public:
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData),
- stack_size_(options.stack_size()) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
}
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class FreeBSDMutex : public Mutex {
- public:
- FreeBSDMutex() {
- pthread_mutexattr_t attrs;
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- USE(result);
- }
-
- virtual ~FreeBSDMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new FreeBSDMutex();
-}
-
-
-class FreeBSDSemaphore : public Semaphore {
- public:
- explicit FreeBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~FreeBSDSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void FreeBSDSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-bool FreeBSDSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new FreeBSDSemaphore(count);
-}
-
-
-static pthread_t GetThreadID() {
- pthread_t thread_id = pthread_self();
- return thread_id;
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- pthread_t vm_tid() const { return vm_tid_; }
-
- private:
- pthread_t vm_tid_;
-};
-
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = isolate->current_vm_state();
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
-#elif V8_HOST_ARCH_ARM
- sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
-#endif
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-}
-
-
-class SignalSender : public Thread {
- public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
- static const int kSignalSenderStackSize = 64 * KB;
-
- explicit SignalSender(int interval)
- : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Install a signal handler.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
-
- // Start a thread that sends SIGPROF signal to VM threads.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
-
- // Restore the old signal handler.
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- } else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
- }
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
- void SendProfilingSignal(pthread_t tid) {
- if (!signal_handler_installed_) return;
- pthread_kill(tid, SIGPROF);
- }
-
- void Sleep(SleepInterval full_or_half) {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif
- USE(result);
- }
-
- const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-Mutex* SignalSender::mutex_ = NULL;
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
- SignalSender::SetUp();
-}
-
-
-void OS::TearDown() {
- SignalSender::TearDown();
- delete limit_mutex;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index beb2ccee2..eb2d10b3f 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -46,9 +46,6 @@
#include <sys/stat.h> // open
#include <fcntl.h> // open
#include <unistd.h> // sysconf
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
-#include <execinfo.h> // backtrace, backtrace_symbols
-#endif // defined(__GLIBC__) && !defined(__UCLIBC__)
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
@@ -64,7 +61,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -73,124 +69,8 @@
namespace v8 {
namespace internal {
-// 0 is never a valid thread id on Linux since tids and pids share a
-// name space and pid 0 is reserved (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Linux runs on anything.
-}
-
#ifdef __arm__
-static bool CPUInfoContainsString(const char * search_string) {
- const char* file_name = "/proc/cpuinfo";
- // This is written as a straight shot one pass parser
- // and not using STL string and ifstream because,
- // on Linux, it's reading from a (non-mmap-able)
- // character special device.
- FILE* f = NULL;
- const char* what = search_string;
-
- if (NULL == (f = fopen(file_name, "r")))
- return false;
-
- int k;
- while (EOF != (k = fgetc(f))) {
- if (k == *what) {
- ++what;
- while ((*what != '\0') && (*what == fgetc(f))) {
- ++what;
- }
- if (*what == '\0') {
- fclose(f);
- return true;
- } else {
- what = search_string;
- }
- }
- }
- fclose(f);
-
- // Did not find string in the proc file.
- return false;
-}
-
-
-bool OS::ArmCpuHasFeature(CpuFeature feature) {
- const char* search_string = NULL;
- // Simple detection of VFP at runtime for Linux.
- // It is based on /proc/cpuinfo, which reveals hardware configuration
- // to user-space applications. According to ARM (mid 2009), no similar
- // facility is universally available on the ARM architectures,
- // so it's up to individual OSes to provide such.
- switch (feature) {
- case VFP2:
- search_string = "vfp";
- break;
- case VFP3:
- search_string = "vfpv3";
- break;
- case ARMv7:
- search_string = "ARMv7";
- break;
- case SUDIV:
- search_string = "idiva";
- break;
- default:
- UNREACHABLE();
- }
-
- if (CPUInfoContainsString(search_string)) {
- return true;
- }
-
- if (feature == VFP3) {
- // Some old kernels will report vfp not vfpv3. Here we make a last attempt
- // to detect vfpv3 by checking for vfp *and* neon, since neon is only
- // available on architectures with vfpv3.
- // Checking neon on its own is not enough as it is possible to have neon
- // without vfp.
- if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
- return true;
- }
- }
-
- return false;
-}
-
-
-CpuImplementer OS::GetCpuImplementer() {
- static bool use_cached_value = false;
- static CpuImplementer cached_value = UNKNOWN_IMPLEMENTER;
- if (use_cached_value) {
- return cached_value;
- }
- if (CPUInfoContainsString("CPU implementer\t: 0x41")) {
- cached_value = ARM_IMPLEMENTER;
- } else if (CPUInfoContainsString("CPU implementer\t: 0x51")) {
- cached_value = QUALCOMM_IMPLEMENTER;
- } else {
- cached_value = UNKNOWN_IMPLEMENTER;
- }
- use_cached_value = true;
- return cached_value;
-}
-
bool OS::ArmUsingHardFloat() {
// GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
@@ -217,7 +97,8 @@ bool OS::ArmUsingHardFloat() {
#else
#if defined(__ARM_PCS_VFP)
return true;
-#elif defined(__ARM_PCS) || defined(__SOFTFP) || !defined(__VFP_FP__)
+#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
+ !defined(__VFP_FP__)
return false;
#else
#error "Your version of GCC does not report the FP ABI compiled for." \
@@ -232,87 +113,8 @@ bool OS::ArmUsingHardFloat() {
#endif // def __arm__
-#ifdef __mips__
-bool OS::MipsCpuHasFeature(CpuFeature feature) {
- const char* search_string = NULL;
- const char* file_name = "/proc/cpuinfo";
- // Simple detection of FPU at runtime for Linux.
- // It is based on /proc/cpuinfo, which reveals hardware configuration
- // to user-space applications. According to MIPS (early 2010), no similar
- // facility is universally available on the MIPS architectures,
- // so it's up to individual OSes to provide such.
- //
- // This is written as a straight shot one pass parser
- // and not using STL string and ifstream because,
- // on Linux, it's reading from a (non-mmap-able)
- // character special device.
-
- switch (feature) {
- case FPU:
- search_string = "FPU";
- break;
- default:
- UNREACHABLE();
- }
-
- FILE* f = NULL;
- const char* what = search_string;
-
- if (NULL == (f = fopen(file_name, "r")))
- return false;
-
- int k;
- while (EOF != (k = fgetc(f))) {
- if (k == *what) {
- ++what;
- while ((*what != '\0') && (*what == fgetc(f))) {
- ++what;
- }
- if (*what == '\0') {
- fclose(f);
- return true;
- } else {
- what = search_string;
- }
- }
- }
- fclose(f);
-
- // Did not find string in the proc file.
- return false;
-}
-#endif // def __mips__
-
-
-int OS::ActivationFrameAlignment() {
-#ifdef V8_TARGET_ARCH_ARM
- // On EABI ARM targets this is required for fp correctness in the
- // runtime system.
- return 8;
-#elif V8_TARGET_ARCH_MIPS
- return 8;
-#endif
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-#if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
- (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
- // Only use on ARM or MIPS hardware.
- MemoryBarrier();
-#else
- __asm__ __volatile__("" : : : "memory");
- // An x86 store acts as a release barrier.
-#endif
- *ptr = value;
-}
-
-
const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
+ if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
@@ -329,36 +131,6 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return sysconf(_SC_PAGESIZE);
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
@@ -372,49 +144,10 @@ void* OS::Allocate(const size_t requested,
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- if (FLAG_break_on_abort) {
- DebugBreak();
- }
- abort();
-}
-
-
-void OS::DebugBreak() {
-// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
-// which is the architecture of generated code).
-#if (defined(__arm__) || defined(__thumb__))
-# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
- asm("bkpt 0");
-# endif
-#elif defined(__mips__)
- asm("break");
-#else
- asm("int $3");
-#endif
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -473,7 +206,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
@@ -484,7 +217,6 @@ void OS::LogSharedLibraryAddresses() {
const int kLibNameLen = FILENAME_MAX + 1;
char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
- i::Isolate* isolate = ISOLATE;
// This loop will terminate once the scanning hits an EOF.
while (true) {
uintptr_t start, end;
@@ -499,19 +231,20 @@ void OS::LogSharedLibraryAddresses() {
// the beginning of the filename or the end of the line.
do {
c = getc(fp);
- } while ((c != EOF) && (c != '\n') && (c != '/'));
+ } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '['));
if (c == EOF) break; // EOF: Was unexpected, just exit.
// Process the filename if found.
- if (c == '/') {
- ungetc(c, fp); // Push the '/' back into the stream to be read below.
+ if ((c == '/') || (c == '[')) {
+ // Push the '/' or '[' back into the stream to be read below.
+ ungetc(c, fp);
// Read to the end of the line. Exit if the read fails.
if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
// Drop the newline character read by fgets. We do not need to check
// for a zero-length string because we know that we at least read the
- // '/' character.
+ // '/' or '[' character.
lib_name[strlen(lib_name) - 1] = '\0';
} else {
// No library name found, just record the raw address range.
@@ -544,9 +277,19 @@ void OS::SignalCodeMovingGC() {
// kernel log.
int size = sysconf(_SC_PAGESIZE);
FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
+ if (f == NULL) {
+ OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap);
+ OS::Abort();
+ }
void* addr = mmap(OS::GetRandomMmapAddr(),
size,
+#if defined(__native_client__)
+ // The Native Client port of V8 uses an interpreter,
+ // so code pages don't need PROT_EXEC.
+ PROT_READ,
+#else
PROT_READ | PROT_EXEC,
+#endif
MAP_PRIVATE,
fileno(f),
0);
@@ -556,49 +299,16 @@ void OS::SignalCodeMovingGC() {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // backtrace is a glibc extension.
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-#else // defined(__GLIBC__) && !defined(__UCLIBC__)
- return 0;
-#endif // defined(__GLIBC__) && !defined(__UCLIBC__)
-}
-
-
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
+
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
- size_ = size;
-}
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
@@ -692,7 +402,13 @@ void* VirtualMemory::ReserveRegion(size_t size) {
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+#if defined(__native_client__)
+ // The Native Client port of V8 uses an interpreter,
+ // so code pages don't need PROT_EXEC.
+ int prot = PROT_READ | PROT_WRITE;
+#else
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+#endif
if (MAP_FAILED == mmap(base,
size,
prot,
@@ -702,7 +418,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
return false;
}
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
@@ -722,596 +437,8 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) {}
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size()) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
-#ifdef PR_SET_NAME
- prctl(PR_SET_NAME,
- reinterpret_cast<unsigned long>(thread->name()), // NOLINT
- 0, 0, 0);
-#endif
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- CHECK_EQ(0, result);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class LinuxMutex : public Mutex {
- public:
- LinuxMutex() {
- pthread_mutexattr_t attrs;
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- USE(result);
- }
-
- virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new LinuxMutex();
-}
-
-
-class LinuxSemaphore : public Semaphore {
- public:
- explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void LinuxSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool LinuxSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result > 0) {
- // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
- errno = result;
- result = -1;
- }
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new LinuxSemaphore(count);
-}
-
-
-#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
-
-// Not all versions of Android's C library provide ucontext_t.
-// Detect this and provide custom but compatible definitions. Note that these
-// follow the GLibc naming convention to access register values from
-// mcontext_t.
-//
-// See http://code.google.com/p/android/issues/detail?id=34784
-
-#if defined(__arm__)
-
-typedef struct sigcontext mcontext_t;
-
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__mips__)
-// MIPS version of sigcontext, for Android bionic.
-typedef struct {
- uint32_t regmask;
- uint32_t status;
- uint64_t pc;
- uint64_t gregs[32];
- uint64_t fpregs[32];
- uint32_t acx;
- uint32_t fpc_csr;
- uint32_t fpc_eir;
- uint32_t used_math;
- uint32_t dsp;
- uint64_t mdhi;
- uint64_t mdlo;
- uint32_t hi1;
- uint32_t lo1;
- uint32_t hi2;
- uint32_t lo2;
- uint32_t hi3;
- uint32_t lo3;
-} mcontext_t;
-
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__i386__)
-// x86 version for Android.
-typedef struct {
- uint32_t gregs[19];
- void* fpregs;
- uint32_t oldmask;
- uint32_t cr2;
-} mcontext_t;
-
-typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
-#endif
-
-#endif // __ANDROID__ && !defined(__BIONIC_HAVE_UCONTEXT_T)
-
-static int GetThreadID() {
-#if defined(__ANDROID__)
- // Android's C library provides gettid(2).
- return gettid();
-#else
- // Glibc doesn't provide a wrapper for gettid(2).
- return syscall(SYS_gettid);
-#endif
-}
-
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = isolate->current_vm_state();
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
-#elif V8_HOST_ARCH_ARM
-#if defined(__GLIBC__) && !defined(__UCLIBC__) && \
- (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
- // Old GLibc ARM versions used a gregs[] array to access the register
- // values from mcontext_t.
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
-#else
- sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
- sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
- sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
- // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
-#elif V8_HOST_ARCH_MIPS
- sample->pc = reinterpret_cast<Address>(mcontext.pc);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#endif // V8_HOST_ARCH_*
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- int vm_tid() const { return vm_tid_; }
-
- private:
- const int vm_tid_;
-};
-
-
-class SignalSender : public Thread {
- public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
- static const int kSignalSenderStackSize = 64 * KB;
-
- explicit SignalSender(int interval)
- : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- vm_tgid_(getpid()),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void InstallSignalHandler() {
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
- }
-
- static void RestoreSignalHandler() {
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Start a thread that will send SIGPROF signal to VM threads,
- // when CPU profiling will be enabled.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- RestoreSignalHandler();
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- if (cpu_profiling_enabled && !signal_handler_installed_) {
- InstallSignalHandler();
- } else if (!cpu_profiling_enabled && signal_handler_installed_) {
- RestoreSignalHandler();
- }
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- } else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
- }
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
- void SendProfilingSignal(int tid) {
- if (!signal_handler_installed_) return;
- // Glibc doesn't provide a wrapper for tgkill(2).
-#if defined(ANDROID)
- syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
-#else
- syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
-#endif
- }
-
- void Sleep(SleepInterval full_or_half) {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
-#if defined(ANDROID)
- usleep(interval);
-#else
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif // DEBUG
- USE(result);
-#endif // ANDROID
- }
-
- const int vm_tgid_;
- const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-
-Mutex* SignalSender::mutex_ = NULL;
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-
-#ifdef __arm__
- // When running on ARM hardware check that the EABI used by V8 and
- // by the C code is the same.
- bool hard_float = OS::ArmUsingHardFloat();
- if (hard_float) {
-#if !USE_EABI_HARDFLOAT
- PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
- "-DUSE_EABI_HARDFLOAT\n");
- exit(1);
-#endif
- } else {
-#if USE_EABI_HARDFLOAT
- PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
- "-DUSE_EABI_HARDFLOAT\n");
- exit(1);
-#endif
- }
-#endif
- SignalSender::SetUp();
-}
-
-
-void OS::TearDown() {
- SignalSender::TearDown();
- delete limit_mutex;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
+bool VirtualMemory::HasLazyCommits() {
+ return true;
}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index a216f6e4c..5ffc3fc54 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -58,76 +58,14 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
+#include "simulator.h"
#include "vm-state-inl.h"
-// Manually define these here as weak imports, rather than including execinfo.h.
-// This lets us launch on 10.4 which does not have these calls.
-extern "C" {
- extern int backtrace(void**, int) __attribute__((weak_import));
- extern char** backtrace_symbols(void* const*, int)
- __attribute__((weak_import));
- extern void backtrace_symbols_fd(void* const*, int, int)
- __attribute__((weak_import));
-}
-
namespace v8 {
namespace internal {
-// 0 is never a valid thread id on MacOSX since a pthread_t is
-// a pointer.
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- // Correct Mac OS X Leopard 'ceil' behavior.
- if (-1.0 < x && x < 0.0) {
- return -0.0;
- } else {
- return ceil(x);
- }
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return getpagesize();
-}
-
// Constants used for mmap.
// kMmapFd is used to pass vm_alloc flags to tag the region with the user
@@ -153,35 +91,10 @@ void* OS::Allocate(const size_t requested,
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- usleep(1000 * milliseconds);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -240,7 +153,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
unsigned int images_count = _dyld_image_count();
for (unsigned int i = 0; i < images_count; ++i) {
const mach_header* header = _dyld_get_image_header(i);
@@ -259,7 +172,7 @@ void OS::LogSharedLibraryAddresses() {
if (code_ptr == NULL) continue;
const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
- LOG(Isolate::Current(),
+ LOG(isolate,
SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
}
}
@@ -269,29 +182,8 @@ void OS::SignalCodeMovingGC() {
}
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- // MacOSX requires all these to install so we can assume they are present.
- // These constants are defined by the CPUid instructions.
- const uint64_t one = 1;
- return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
-}
-
-
-int OS::ActivationFrameAlignment() {
- // OS X activation frames must be 16 byte-aligned; see "Mac OS X ABI
- // Function Call Guide".
- return 16;
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- OSMemoryBarrier();
- *ptr = value;
-}
-
-
const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
+ if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
@@ -308,39 +200,6 @@ double OS::LocalTimeOffset() {
}
-int OS::StackWalk(Vector<StackFrame> frames) {
- // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
- if (backtrace == NULL)
- return 0;
-
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text,
- kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-}
-
-
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
@@ -397,12 +256,33 @@ VirtualMemory::~VirtualMemory() {
}
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
@@ -417,22 +297,6 @@ void* VirtualMemory::ReserveRegion(size_t size) {
}
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-
bool VirtualMemory::CommitRegion(void* address,
size_t size,
bool is_executable) {
@@ -445,17 +309,10 @@ bool VirtualMemory::CommitRegion(void* address,
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(address, size);
return true;
}
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
bool VirtualMemory::UncommitRegion(void* address, size_t size) {
return mmap(address,
size,
@@ -471,457 +328,8 @@ bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
}
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) {}
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData),
- stack_size_(options.stack_size()) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void SetThreadName(const char* name) {
- // pthread_setname_np is only available in 10.6 or later, so test
- // for it at runtime.
- int (*dynamic_pthread_setname_np)(const char*);
- *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
- dlsym(RTLD_DEFAULT, "pthread_setname_np");
- if (!dynamic_pthread_setname_np)
- return;
-
- // Mac OS X does not expose the length limit of the name, so hardcode it.
- static const int kMaxNameLength = 63;
- USE(kMaxNameLength);
- ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
- dynamic_pthread_setname_np(name);
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
- SetThreadName(thread->name());
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
+bool VirtualMemory::HasLazyCommits() {
+ return false;
}
-
-#ifdef V8_FAST_TLS_SUPPORTED
-
-static Atomic32 tls_base_offset_initialized = 0;
-intptr_t kMacTlsBaseOffset = 0;
-
-// It's safe to do the initialization more that once, but it has to be
-// done at least once.
-static void InitializeTlsBaseOffset() {
- const size_t kBufferSize = 128;
- char buffer[kBufferSize];
- size_t buffer_size = kBufferSize;
- int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
- if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
- V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
- }
- // The buffer now contains a string of the form XX.YY.ZZ, where
- // XX is the major kernel version component.
- // Make sure the buffer is 0-terminated.
- buffer[kBufferSize - 1] = '\0';
- char* period_pos = strchr(buffer, '.');
- *period_pos = '\0';
- int kernel_version_major =
- static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
- // The constants below are taken from pthreads.s from the XNU kernel
- // sources archive at www.opensource.apple.com.
- if (kernel_version_major < 11) {
- // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
- // same offsets.
-#if defined(V8_HOST_ARCH_IA32)
- kMacTlsBaseOffset = 0x48;
-#else
- kMacTlsBaseOffset = 0x60;
-#endif
- } else {
- // 11.x.x (Lion) changed the offset.
- kMacTlsBaseOffset = 0;
- }
-
- Release_Store(&tls_base_offset_initialized, 1);
-}
-
-static void CheckFastTls(Thread::LocalStorageKey key) {
- void* expected = reinterpret_cast<void*>(0x1234CAFE);
- Thread::SetThreadLocal(key, expected);
- void* actual = Thread::GetExistingThreadLocal(key);
- if (expected != actual) {
- V8_Fatal(__FILE__, __LINE__,
- "V8 failed to initialize fast TLS on current kernel");
- }
- Thread::SetThreadLocal(key, NULL);
-}
-
-#endif // V8_FAST_TLS_SUPPORTED
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-#ifdef V8_FAST_TLS_SUPPORTED
- bool check_fast_tls = false;
- if (tls_base_offset_initialized == 0) {
- check_fast_tls = true;
- InitializeTlsBaseOffset();
- }
-#endif
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- LocalStorageKey typed_key = static_cast<LocalStorageKey>(key);
-#ifdef V8_FAST_TLS_SUPPORTED
- // If we just initialized fast TLS support, make sure it works.
- if (check_fast_tls) CheckFastTls(typed_key);
-#endif
- return typed_key;
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class MacOSMutex : public Mutex {
- public:
- MacOSMutex() {
- pthread_mutexattr_t attr;
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutex_init(&mutex_, &attr);
- }
-
- virtual ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() { return pthread_mutex_lock(&mutex_); }
- virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_;
-};
-
-
-Mutex* OS::CreateMutex() {
- return new MacOSMutex();
-}
-
-
-class MacOSSemaphore : public Semaphore {
- public:
- explicit MacOSSemaphore(int count) {
- int r;
- r = semaphore_create(mach_task_self(),
- &semaphore_,
- SYNC_POLICY_FIFO,
- count);
- ASSERT(r == KERN_SUCCESS);
- }
-
- ~MacOSSemaphore() {
- int r;
- r = semaphore_destroy(mach_task_self(), semaphore_);
- ASSERT(r == KERN_SUCCESS);
- }
-
- void Wait() {
- int r;
- do {
- r = semaphore_wait(semaphore_);
- ASSERT(r == KERN_SUCCESS || r == KERN_ABORTED);
- } while (r == KERN_ABORTED);
- }
-
- bool Wait(int timeout);
-
- void Signal() { semaphore_signal(semaphore_); }
-
- private:
- semaphore_t semaphore_;
-};
-
-
-bool MacOSSemaphore::Wait(int timeout) {
- mach_timespec_t ts;
- ts.tv_sec = timeout / 1000000;
- ts.tv_nsec = (timeout % 1000000) * 1000;
- return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT;
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new MacOSSemaphore(count);
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : profiled_thread_(mach_thread_self()) {}
-
- ~PlatformData() {
- // Deallocate Mach port for thread.
- mach_port_deallocate(mach_task_self(), profiled_thread_);
- }
-
- thread_act_t profiled_thread() { return profiled_thread_; }
-
- private:
- // Note: for profiled_thread_ Mach primitives are used instead of PThread's
- // because the latter doesn't provide thread manipulation primitives required.
- // For details, consult "Mac OS X Internals" book, Section 7.3.
- thread_act_t profiled_thread_;
-};
-
-
-class SamplerThread : public Thread {
- public:
- static const int kSamplerThreadStackSize = 64 * KB;
-
- explicit SamplerThread(int interval)
- : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- instance_ = new SamplerThread(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- }
- OS::Sleep(interval_);
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
- if (!sampler->isolate()->IsInitialized()) return;
- if (!sampler->IsProfiling()) return;
- SamplerThread* sampler_thread =
- reinterpret_cast<SamplerThread*>(raw_sampler_thread);
- sampler_thread->SampleContext(sampler);
- }
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
- void SampleContext(Sampler* sampler) {
- thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
- if (sample == NULL) sample = &sample_obj;
-
- if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
-
-#if V8_HOST_ARCH_X64
- thread_state_flavor_t flavor = x86_THREAD_STATE64;
- x86_thread_state64_t state;
- mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
-#if __DARWIN_UNIX03
-#define REGISTER_FIELD(name) __r ## name
-#else
-#define REGISTER_FIELD(name) r ## name
-#endif // __DARWIN_UNIX03
-#elif V8_HOST_ARCH_IA32
- thread_state_flavor_t flavor = i386_THREAD_STATE;
- i386_thread_state_t state;
- mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
-#if __DARWIN_UNIX03
-#define REGISTER_FIELD(name) __e ## name
-#else
-#define REGISTER_FIELD(name) e ## name
-#endif // __DARWIN_UNIX03
-#else
-#error Unsupported Mac OS X host architecture.
-#endif // V8_HOST_ARCH
-
- if (thread_get_state(profiled_thread,
- flavor,
- reinterpret_cast<natural_t*>(&state),
- &count) == KERN_SUCCESS) {
- sample->state = sampler->isolate()->current_vm_state();
- sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
- sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
- sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
- sampler->SampleStack(sample);
- sampler->Tick(sample);
- }
- thread_resume(profiled_thread);
- }
-
- const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SamplerThread* instance_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-#undef REGISTER_FIELD
-
-
-Mutex* SamplerThread::mutex_ = NULL;
-SamplerThread* SamplerThread::instance_ = NULL;
-
-
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
- SamplerThread::SetUp();
-}
-
-
-void OS::TearDown() {
- SamplerThread::TearDown();
- delete limit_mutex;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SamplerThread::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SamplerThread::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-nullos.cc b/deps/v8/src/platform-nullos.cc
deleted file mode 100644
index 7aaa7b204..000000000
--- a/deps/v8/src/platform-nullos.cc
+++ /dev/null
@@ -1,511 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for NULLOS goes here
-
-// Minimal include to get access to abort, fprintf and friends for bootstrapping
-// messages.
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "platform.h"
-#include "vm-state-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-// Give V8 the opportunity to override the default ceil behaviour.
-double ceiling(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Give V8 the opportunity to override the default fmod behavior.
-double modulo(double x, double y) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_sin(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_cos(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_tan(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_log(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Initialize OS class early in the V8 startup.
-void OS::SetUp() {
- // Seed the random number generator.
- UNIMPLEMENTED();
-}
-
-
-void OS::PostSetUp() {
- UNIMPLEMENTED();
-}
-
-
-void OS::TearDown() {
- UNIMPLEMENTED();
-}
-
-
-// Returns the accumulated user time for thread.
-int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
- UNIMPLEMENTED();
- *secs = 0;
- *usecs = 0;
- return 0;
-}
-
-
-// Returns current time as the number of milliseconds since
-// 00:00:00 UTC, January 1, 1970.
-double OS::TimeCurrentMillis() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns ticks in microsecond resolution.
-int64_t OS::Ticks() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns a string identifying the current timezone taking into
-// account daylight saving.
-const char* OS::LocalTimezone(double time) {
- UNIMPLEMENTED();
- return "<none>";
-}
-
-
-// Returns the daylight savings offset in milliseconds for the given time.
-double OS::DaylightSavingsOffset(double time) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-int OS::GetLastError() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns the local time offset in milliseconds east of UTC without
-// taking daylight savings time into account.
-double OS::LocalTimeOffset() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Print (debug) message to console.
-void OS::Print(const char* format, ...) {
- UNIMPLEMENTED();
-}
-
-
-// Print (debug) message to console.
-void OS::VPrint(const char* format, va_list args) {
- // Minimalistic implementation for bootstrapping.
- vfprintf(stdout, format, args);
-}
-
-
-void OS::FPrint(FILE* out, const char* format, ...) {
- va_list args;
- va_start(args, format);
- VFPrint(out, format, args);
- va_end(args);
-}
-
-
-void OS::VFPrint(FILE* out, const char* format, va_list args) {
- vfprintf(out, format, args);
-}
-
-
-// Print error message to console.
-void OS::PrintError(const char* format, ...) {
- // Minimalistic implementation for bootstrapping.
- va_list args;
- va_start(args, format);
- VPrintError(format, args);
- va_end(args);
-}
-
-
-// Print error message to console.
-void OS::VPrintError(const char* format, va_list args) {
- // Minimalistic implementation for bootstrapping.
- vfprintf(stderr, format, args);
-}
-
-
-int OS::SNPrintF(char* str, size_t size, const char* format, ...) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0;
-}
-
-
-double OS::nan_value() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-CpuImplementer OS::GetCpuImplementer() {
- UNIMPLEMENTED();
-}
-
-
-bool OS::ArmCpuHasFeature(CpuFeature feature) {
- UNIMPLEMENTED();
-}
-
-
-bool OS::ArmUsingHardFloat() {
- UNIMPLEMENTED();
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-size_t OS::AllocateAlignment() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool executable) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void OS::Free(void* buf, const size_t length) {
- // TODO(1240712): potential system call return value which is ignored here.
- UNIMPLEMENTED();
-}
-
-
-void OS::Guard(void* address, const size_t size) {
- UNIMPLEMENTED();
-}
-
-
-void OS::Sleep(int milliseconds) {
- UNIMPLEMENTED();
-}
-
-
-void OS::Abort() {
- // Minimalistic implementation for bootstrapping.
- abort();
-}
-
-
-void OS::DebugBreak() {
- UNIMPLEMENTED();
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void OS::LogSharedLibraryAddresses() {
- UNIMPLEMENTED();
-}
-
-
-void OS::SignalCodeMovingGC() {
- UNIMPLEMENTED();
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
- UNIMPLEMENTED();
-}
-
-
-VirtualMemory::~VirtualMemory() {
- UNIMPLEMENTED();
-}
-
-
-bool VirtualMemory::IsReserved() {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() {
- UNIMPLEMENTED();
- }
-
- void* pd_data_;
-};
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size) {
- set_name(options.name);
- UNIMPLEMENTED();
-}
-
-
-Thread::Thread(const char* name)
- : data_(new PlatformData()),
- stack_size_(0) {
- set_name(name);
- UNIMPLEMENTED();
-}
-
-
-Thread::~Thread() {
- delete data_;
- UNIMPLEMENTED();
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- UNIMPLEMENTED();
-}
-
-
-void Thread::Join() {
- UNIMPLEMENTED();
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- UNIMPLEMENTED();
- return static_cast<LocalStorageKey>(0);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- UNIMPLEMENTED();
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- UNIMPLEMENTED();
-}
-
-
-void Thread::YieldCPU() {
- UNIMPLEMENTED();
-}
-
-
-class NullMutex : public Mutex {
- public:
- NullMutex() : data_(NULL) {
- UNIMPLEMENTED();
- }
-
- virtual ~NullMutex() {
- UNIMPLEMENTED();
- }
-
- virtual int Lock() {
- UNIMPLEMENTED();
- return 0;
- }
-
- virtual int Unlock() {
- UNIMPLEMENTED();
- return 0;
- }
-
- private:
- void* data_;
-};
-
-
-Mutex* OS::CreateMutex() {
- UNIMPLEMENTED();
- return new NullMutex();
-}
-
-
-class NullSemaphore : public Semaphore {
- public:
- explicit NullSemaphore(int count) : data_(NULL) {
- UNIMPLEMENTED();
- }
-
- virtual ~NullSemaphore() {
- UNIMPLEMENTED();
- }
-
- virtual void Wait() {
- UNIMPLEMENTED();
- }
-
- virtual void Signal() {
- UNIMPLEMENTED();
- }
- private:
- void* data_;
-};
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- UNIMPLEMENTED();
- return new NullSemaphore(count);
-}
-
-
-class ProfileSampler::PlatformData : public Malloced {
- public:
- PlatformData() {
- UNIMPLEMENTED();
- }
-};
-
-
-ProfileSampler::ProfileSampler(int interval) {
- UNIMPLEMENTED();
- // Shared setup follows.
- data_ = new PlatformData();
- interval_ = interval;
- active_ = false;
-}
-
-
-ProfileSampler::~ProfileSampler() {
- UNIMPLEMENTED();
- // Shared tear down follows.
- delete data_;
-}
-
-
-void ProfileSampler::Start() {
- UNIMPLEMENTED();
-}
-
-
-void ProfileSampler::Stop() {
- UNIMPLEMENTED();
-}
-
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index 408d4dc0f..710c3904a 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -42,7 +42,6 @@
#include <sys/stat.h> // open
#include <fcntl.h> // open
#include <unistd.h> // sysconf
-#include <execinfo.h> // backtrace, backtrace_symbols
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
@@ -51,7 +50,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -60,72 +58,9 @@
namespace v8 {
namespace internal {
-// 0 is never a valid thread id on Linux and OpenBSD since tids and pids share a
-// name space and pid 0 is reserved (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-static void* GetRandomMmapAddr() {
- Isolate* isolate = Isolate::UncheckedCurrent();
- // Note that the current isolate isn't set up in a call path via
- // CpuFeatures::Probe. We don't care about randomization in this case because
- // the code page is immediately freed.
- if (isolate != NULL) {
-#ifdef V8_TARGET_ARCH_X64
- uint64_t rnd1 = V8::RandomPrivate(isolate);
- uint64_t rnd2 = V8::RandomPrivate(isolate);
- uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
- // Currently available CPUs have 48 bits of virtual addressing. Truncate
- // the hint address to 46 bits to give the kernel a fighting chance of
- // fulfilling our placement request.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#else
- uint32_t raw_addr = V8::RandomPrivate(isolate);
- // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
- // variety of ASLR modes (PAE kernel, NX compat mode, etc).
- raw_addr &= 0x3ffff000;
- raw_addr += 0x20000000;
-#endif
- return reinterpret_cast<void*>(raw_addr);
- }
- return NULL;
-}
-
-
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0;
-}
-
-
-int OS::ActivationFrameAlignment() {
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- // An x86 store acts as a release barrier.
- *ptr = value;
-}
-
const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
+ if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
@@ -142,42 +77,12 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return sysconf(_SC_PAGESIZE);
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* addr = GetRandomMmapAddr();
+ void* addr = OS::GetRandomMmapAddr();
void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
LOG(i::Isolate::Current(),
@@ -185,36 +90,10 @@ void* OS::Allocate(const size_t requested,
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -263,7 +142,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
@@ -274,7 +153,6 @@ void OS::LogSharedLibraryAddresses() {
const int kLibNameLen = FILENAME_MAX + 1;
char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
- i::Isolate* isolate = ISOLATE;
// This loop will terminate once the scanning hits an EOF.
while (true) {
uintptr_t start, end;
@@ -334,6 +212,10 @@ void OS::SignalCodeMovingGC() {
// kernel log.
int size = sysconf(_SC_PAGESIZE);
FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
+ if (f == NULL) {
+ OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap);
+ OS::Abort();
+ }
void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
fileno(f), 0);
ASSERT(addr != MAP_FAILED);
@@ -342,45 +224,17 @@ void OS::SignalCodeMovingGC() {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // backtrace is a glibc extension.
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-}
-
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
+
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
- size_ = size;
-}
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
@@ -388,7 +242,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(GetRandomMmapAddr(),
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
request_size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
@@ -460,7 +314,7 @@ bool VirtualMemory::Guard(void* address) {
void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(GetRandomMmapAddr(),
+ void* result = mmap(OS::GetRandomMmapAddr(),
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
@@ -483,8 +337,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
@@ -504,478 +356,9 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) {}
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size()) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
-#ifdef PR_SET_NAME
- prctl(PR_SET_NAME,
- reinterpret_cast<unsigned long>(thread->name()), // NOLINT
- 0, 0, 0);
-#endif
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class OpenBSDMutex : public Mutex {
- public:
- OpenBSDMutex() {
- pthread_mutexattr_t attrs;
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- USE(result);
- }
-
- virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new OpenBSDMutex();
-}
-
-
-class OpenBSDSemaphore : public Semaphore {
- public:
- explicit OpenBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~OpenBSDSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void OpenBSDSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool OpenBSDSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
-
- int to = ts.tv_sec;
-
- while (true) {
- int result = sem_trywait(&sem_);
- if (result == 0) return true; // Successfully got semaphore.
- if (!to) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- usleep(ts.tv_nsec / 1000);
- to--;
- }
-}
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new OpenBSDSemaphore(count);
-}
-
-
-static pthread_t GetThreadID() {
- return pthread_self();
-}
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- sample->state = isolate->current_vm_state();
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-#ifdef __NetBSD__
- mcontext_t& mcontext = ucontext->uc_mcontext;
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
- sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
- sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
-#endif // V8_HOST_ARCH
-#else // OpenBSD
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
- sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
- sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
- sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
- sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
-#endif // V8_HOST_ARCH
-#endif // __NetBSD__
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- pthread_t vm_tid() const { return vm_tid_; }
-
- private:
- pthread_t vm_tid_;
-};
-
-
-class SignalSender : public Thread {
- public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
- static const int kSignalSenderStackSize = 64 * KB;
-
- explicit SignalSender(int interval)
- : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- vm_tgid_(getpid()),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void InstallSignalHandler() {
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
- }
-
- static void RestoreSignalHandler() {
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Start a thread that will send SIGPROF signal to VM threads,
- // when CPU profiling will be enabled.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- RestoreSignalHandler();
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- if (cpu_profiling_enabled && !signal_handler_installed_) {
- InstallSignalHandler();
- } else if (!cpu_profiling_enabled && signal_handler_installed_) {
- RestoreSignalHandler();
- }
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- } else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
- }
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
- void SendProfilingSignal(pthread_t tid) {
- if (!signal_handler_installed_) return;
- pthread_kill(tid, SIGPROF);
- }
-
- void Sleep(SleepInterval full_or_half) {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif
- USE(result);
- }
-
- const int vm_tgid_;
- const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-
-Mutex* SignalSender::mutex_ = NULL;
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
- SignalSender::SetUp();
-}
-
-
-void OS::TearDown() {
- SignalSender::TearDown();
- delete limit_mutex;
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
}
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index ad74eba8d..797557d76 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -29,8 +29,12 @@
// own but contains the parts which are the same across POSIX platforms Linux,
// Mac OS, FreeBSD and OpenBSD.
-#include "platform-posix.h"
-
+#include <dlfcn.h>
+#include <pthread.h>
+#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+#include <pthread_np.h> // for pthread_set_name_np
+#endif
+#include <sched.h> // for sched_yield
#include <unistd.h>
#include <errno.h>
#include <time.h>
@@ -41,6 +45,13 @@
#include <sys/time.h>
#include <sys/types.h>
#include <sys/stat.h>
+#if defined(__linux__)
+#include <sys/prctl.h> // for prctl
+#endif
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
+ defined(__NetBSD__) || defined(__OpenBSD__)
+#include <sys/sysctl.h> // for sysctl
+#endif
#include <arpa/inet.h>
#include <netinet/in.h>
@@ -56,11 +67,27 @@
#include "v8.h"
#include "codegen.h"
+#include "isolate-inl.h"
#include "platform.h"
namespace v8 {
namespace internal {
+// 0 is never a valid thread id.
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+#if V8_OS_MACOSX
+ // Mac OS X requires all these to install so we can assume they are present.
+ // These constants are defined by the CPUid instructions.
+ const uint64_t one = 1;
+ return (one << SSE2) | (one << CMOV);
+#else
+ return 0; // Nothing special about the other systems.
+#endif
+}
+
// Maximum size of the virtual memory. 0 means there is no artificial
// limit.
@@ -73,43 +100,126 @@ intptr_t OS::MaxVirtualMemory() {
}
+uint64_t OS::TotalPhysicalMemory() {
+#if V8_OS_MACOSX
+ int mib[2];
+ mib[0] = CTL_HW;
+ mib[1] = HW_MEMSIZE;
+ int64_t size = 0;
+ size_t len = sizeof(size);
+ if (sysctl(mib, 2, &size, &len, NULL, 0) != 0) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(size);
+#elif V8_OS_FREEBSD
+ int pages, page_size;
+ size_t size = sizeof(pages);
+ sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
+ sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
+ if (pages == -1 || page_size == -1) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(pages) * page_size;
+#elif V8_OS_CYGWIN
+ MEMORYSTATUS memory_info;
+ memory_info.dwLength = sizeof(memory_info);
+ if (!GlobalMemoryStatus(&memory_info)) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(memory_info.dwTotalPhys);
+#else
+ intptr_t pages = sysconf(_SC_PHYS_PAGES);
+ intptr_t page_size = sysconf(_SC_PAGESIZE);
+ if (pages == -1 || page_size == -1) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(pages) * page_size;
+#endif
+}
+
+
+int OS::ActivationFrameAlignment() {
+#if V8_TARGET_ARCH_ARM
+ // On EABI ARM targets this is required for fp correctness in the
+ // runtime system.
+ return 8;
+#elif V8_TARGET_ARCH_MIPS
+ return 8;
+#else
+ // Otherwise we just assume 16 byte alignment, i.e.:
+ // - With gcc 4.4 the tree vectorization optimizer can generate code
+ // that requires 16 byte alignment such as movdqa on x86.
+ // - Mac OS X and Solaris (64-bit) activation frames must be 16 byte-aligned;
+ // see "Mac OS X ABI Function Call Guide"
+ return 16;
+#endif
+}
+
+
intptr_t OS::CommitPageSize() {
static intptr_t page_size = getpagesize();
return page_size;
}
-#ifndef __CYGWIN__
+void OS::Free(void* address, const size_t size) {
+ // TODO(1240712): munmap has a return value which is ignored here.
+ int result = munmap(address, size);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
// Get rid of writable permission on code allocations.
void OS::ProtectCode(void* address, const size_t size) {
+#if defined(__CYGWIN__)
+ DWORD old_protect;
+ VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
+#elif defined(__native_client__)
+ // The Native Client port of V8 uses an interpreter, so
+ // code pages don't need PROT_EXEC.
+ mprotect(address, size, PROT_READ);
+#else
mprotect(address, size, PROT_READ | PROT_EXEC);
+#endif
}
// Create guard pages.
void OS::Guard(void* address, const size_t size) {
+#if defined(__CYGWIN__)
+ DWORD oldprotect;
+ VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
+#else
mprotect(address, size, PROT_NONE);
+#endif
}
-#endif // __CYGWIN__
void* OS::GetRandomMmapAddr() {
+#if defined(__native_client__)
+ // TODO(bradchen): restore randomization once Native Client gets
+ // smarter about using mmap address hints.
+ // See http://code.google.com/p/nativeclient/issues/3341
+ return NULL;
+#endif
Isolate* isolate = Isolate::UncheckedCurrent();
// Note that the current isolate isn't set up in a call path via
// CpuFeatures::Probe. We don't care about randomization in this case because
// the code page is immediately freed.
if (isolate != NULL) {
-#ifdef V8_TARGET_ARCH_X64
- uint64_t rnd1 = V8::RandomPrivate(isolate);
- uint64_t rnd2 = V8::RandomPrivate(isolate);
- uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
+ uintptr_t raw_addr;
+ isolate->random_number_generator()->NextBytes(&raw_addr, sizeof(raw_addr));
+#if V8_TARGET_ARCH_X64
// Currently available CPUs have 48 bits of virtual addressing. Truncate
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else
- uint32_t raw_addr = V8::RandomPrivate(isolate);
-
raw_addr &= 0x3ffff000;
# ifdef __sun
@@ -136,9 +246,54 @@ void* OS::GetRandomMmapAddr() {
}
+size_t OS::AllocateAlignment() {
+ return getpagesize();
+}
+
+
+void OS::Sleep(int milliseconds) {
+ useconds_t ms = static_cast<useconds_t>(milliseconds);
+ usleep(1000 * ms);
+}
+
+
+void OS::Abort() {
+ // Redirect to std abort to signal abnormal program termination.
+ if (FLAG_break_on_abort) {
+ DebugBreak();
+ }
+ abort();
+}
+
+
+void OS::DebugBreak() {
+#if V8_HOST_ARCH_ARM
+ asm("bkpt 0");
+#elif V8_HOST_ARCH_MIPS
+ asm("break");
+#elif V8_HOST_ARCH_IA32
+#if defined(__native_client__)
+ asm("hlt");
+#else
+ asm("int $3");
+#endif // __native_client__
+#elif V8_HOST_ARCH_X64
+ asm("int $3");
+#else
+#error Unsupported host architecture.
+#endif
+}
+
+
// ----------------------------------------------------------------------------
// Math functions
+double ceiling(double x) {
+ // Correct buggy 'ceil' on some systems (i.e. FreeBSD, OS X 10.5)
+ return (-1.0 < x && x < 0.0) ? -0.0 : ceil(x);
+}
+
+
double modulo(double x, double y) {
return fmod(x, y);
}
@@ -157,9 +312,17 @@ UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
+UNARY_MATH_FUNCTION(exp, CreateExpFunction())
UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
-#undef MATH_FUNCTION
+#undef UNARY_MATH_FUNCTION
+
+
+void lazily_initialize_fast_exp() {
+ if (fast_exp_function == NULL) {
+ init_fast_exp_function();
+ }
+}
double OS::nan_value() {
@@ -188,24 +351,12 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
double OS::TimeCurrentMillis() {
- struct timeval tv;
- if (gettimeofday(&tv, NULL) < 0) return 0.0;
- return (static_cast<double>(tv.tv_sec) * 1000) +
- (static_cast<double>(tv.tv_usec) / 1000);
-}
-
-
-int64_t OS::Ticks() {
- // gettimeofday has microsecond resolution.
- struct timeval tv;
- if (gettimeofday(&tv, NULL) < 0)
- return 0;
- return (static_cast<int64_t>(tv.tv_sec) * 1000000) + tv.tv_usec;
+ return Time::Now().ToJsTime();
}
double OS::DaylightSavingsOffset(double time) {
- if (isnan(time)) return nan_value();
+ if (std::isnan(time)) return nan_value();
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return nan_value();
@@ -322,34 +473,69 @@ int OS::VSNPrintF(Vector<char> str,
}
-#if defined(V8_TARGET_ARCH_IA32)
-static OS::MemCopyFunction memcopy_function = NULL;
+#if V8_TARGET_ARCH_IA32
+static void MemMoveWrapper(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+}
+
+
+// Initialize to library version so we can call this at any time during startup.
+static OS::MemMoveFunction memmove_function = &MemMoveWrapper;
+
// Defined in codegen-ia32.cc.
-OS::MemCopyFunction CreateMemCopyFunction();
+OS::MemMoveFunction CreateMemMoveFunction();
-// Copy memory area to disjoint memory area.
-void OS::MemCopy(void* dest, const void* src, size_t size) {
+// Copy memory area. No restrictions.
+void OS::MemMove(void* dest, const void* src, size_t size) {
+ if (size == 0) return;
// Note: here we rely on dependent reads being ordered. This is true
// on all architectures we currently support.
- (*memcopy_function)(dest, src, size);
-#ifdef DEBUG
- CHECK_EQ(0, memcmp(dest, src, size));
-#endif
+ (*memmove_function)(dest, src, size);
}
-#endif // V8_TARGET_ARCH_IA32
+
+#elif defined(V8_HOST_ARCH_ARM)
+void OS::MemCopyUint16Uint8Wrapper(uint16_t* dest,
+ const uint8_t* src,
+ size_t chars) {
+ uint16_t *limit = dest + chars;
+ while (dest < limit) {
+ *dest++ = static_cast<uint16_t>(*src++);
+ }
+}
+
+
+OS::MemCopyUint8Function OS::memcopy_uint8_function = &OS::MemCopyUint8Wrapper;
+OS::MemCopyUint16Uint8Function OS::memcopy_uint16_uint8_function =
+ &OS::MemCopyUint16Uint8Wrapper;
+// Defined in codegen-arm.cc.
+OS::MemCopyUint8Function CreateMemCopyUint8Function(
+ OS::MemCopyUint8Function stub);
+OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
+ OS::MemCopyUint16Uint8Function stub);
+#endif
-void POSIXPostSetUp() {
-#if defined(V8_TARGET_ARCH_IA32)
- memcopy_function = CreateMemCopyFunction();
+void OS::PostSetUp() {
+#if V8_TARGET_ARCH_IA32
+ OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
+ if (generated_memmove != NULL) {
+ memmove_function = generated_memmove;
+ }
+#elif defined(V8_HOST_ARCH_ARM)
+ OS::memcopy_uint8_function =
+ CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper);
+ OS::memcopy_uint16_uint8_function =
+ CreateMemCopyUint16Uint8Function(&OS::MemCopyUint16Uint8Wrapper);
#endif
init_fast_sin_function();
init_fast_cos_function();
init_fast_tan_function();
init_fast_log_function();
+ // fast_exp is initialized lazily.
init_fast_sqrt_function();
}
+
// ----------------------------------------------------------------------------
// POSIX string support.
//
@@ -365,200 +551,229 @@ void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
// ----------------------------------------------------------------------------
-// POSIX socket support.
+// POSIX thread support.
//
-class POSIXSocket : public Socket {
+class Thread::PlatformData : public Malloced {
public:
- explicit POSIXSocket() {
- // Create the socket.
- socket_ = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
- if (IsValid()) {
- // Allow rapid reuse.
- static const int kOn = 1;
- int ret = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
- &kOn, sizeof(kOn));
- ASSERT(ret == 0);
- USE(ret);
- }
- }
- explicit POSIXSocket(int socket): socket_(socket) { }
- virtual ~POSIXSocket() { Shutdown(); }
-
- // Server initialization.
- bool Bind(const int port);
- bool Listen(int backlog) const;
- Socket* Accept() const;
-
- // Client initialization.
- bool Connect(const char* host, const char* port);
-
- // Shutdown socket for both read and write.
- bool Shutdown();
-
- // Data Transimission
- int Send(const char* data, int len) const;
- int Receive(char* data, int len) const;
-
- bool SetReuseAddress(bool reuse_address);
-
- bool IsValid() const { return socket_ != -1; }
-
- private:
- int socket_;
+ PlatformData() : thread_(kNoThread) {}
+ pthread_t thread_; // Thread handle for pthread.
};
-
-bool POSIXSocket::Bind(const int port) {
- if (!IsValid()) {
- return false;
- }
-
- sockaddr_in addr;
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr.sin_port = htons(port);
- int status = bind(socket_,
- BitCast<struct sockaddr *>(&addr),
- sizeof(addr));
- return status == 0;
+Thread::Thread(const Options& options)
+ : data_(new PlatformData),
+ stack_size_(options.stack_size()),
+ start_semaphore_(NULL) {
+ set_name(options.name());
+}
+
+
+Thread::~Thread() {
+ delete data_;
+}
+
+
+static void SetThreadName(const char* name) {
+#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+ pthread_set_name_np(pthread_self(), name);
+#elif defined(__NetBSD__)
+ STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
+ pthread_setname_np(pthread_self(), "%s", name);
+#elif defined(__APPLE__)
+ // pthread_setname_np is only available in 10.6 or later, so test
+ // for it at runtime.
+ int (*dynamic_pthread_setname_np)(const char*);
+ *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
+ dlsym(RTLD_DEFAULT, "pthread_setname_np");
+ if (dynamic_pthread_setname_np == NULL)
+ return;
+
+ // Mac OS X does not expose the length limit of the name, so hardcode it.
+ static const int kMaxNameLength = 63;
+ STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
+ dynamic_pthread_setname_np(name);
+#elif defined(PR_SET_NAME)
+ prctl(PR_SET_NAME,
+ reinterpret_cast<unsigned long>(name), // NOLINT
+ 0, 0, 0);
+#endif
}
-bool POSIXSocket::Listen(int backlog) const {
- if (!IsValid()) {
- return false;
- }
-
- int status = listen(socket_, backlog);
- return status == 0;
+static void* ThreadEntry(void* arg) {
+ Thread* thread = reinterpret_cast<Thread*>(arg);
+ // This is also initialized by the first argument to pthread_create() but we
+ // don't know which thread will run first (the original thread or the new
+ // one) so we initialize it here too.
+ thread->data()->thread_ = pthread_self();
+ SetThreadName(thread->name());
+ ASSERT(thread->data()->thread_ != kNoThread);
+ thread->NotifyStartedAndRun();
+ return NULL;
}
-Socket* POSIXSocket::Accept() const {
- if (!IsValid()) {
- return NULL;
- }
-
- int socket;
- do {
- socket = accept(socket_, NULL, NULL);
- } while (socket == -1 && errno == EINTR);
-
- if (socket == -1) {
- return NULL;
- } else {
- return new POSIXSocket(socket);
- }
+void Thread::set_name(const char* name) {
+ strncpy(name_, name, sizeof(name_));
+ name_[sizeof(name_) - 1] = '\0';
}
-bool POSIXSocket::Connect(const char* host, const char* port) {
- if (!IsValid()) {
- return false;
+void Thread::Start() {
+ int result;
+ pthread_attr_t attr;
+ memset(&attr, 0, sizeof(attr));
+ result = pthread_attr_init(&attr);
+ ASSERT_EQ(0, result);
+ // Native client uses default stack size.
+#if !defined(__native_client__)
+ if (stack_size_ > 0) {
+ result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+ ASSERT_EQ(0, result);
}
+#endif
+ result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
+ ASSERT_EQ(0, result);
+ result = pthread_attr_destroy(&attr);
+ ASSERT_EQ(0, result);
+ ASSERT(data_->thread_ != kNoThread);
+ USE(result);
+}
- // Lookup host and port.
- struct addrinfo *result = NULL;
- struct addrinfo hints;
- memset(&hints, 0, sizeof(addrinfo));
- hints.ai_family = AF_INET;
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_protocol = IPPROTO_TCP;
- int status = getaddrinfo(host, port, &hints, &result);
- if (status != 0) {
- return false;
- }
- // Connect.
- do {
- status = connect(socket_, result->ai_addr, result->ai_addrlen);
- } while (status == -1 && errno == EINTR);
- freeaddrinfo(result);
- return status == 0;
+void Thread::Join() {
+ pthread_join(data_->thread_, NULL);
}
-bool POSIXSocket::Shutdown() {
- if (IsValid()) {
- // Shutdown socket for both read and write.
- int status = shutdown(socket_, SHUT_RDWR);
- close(socket_);
- socket_ = -1;
- return status == 0;
- }
- return true;
-}
-
-
-int POSIXSocket::Send(const char* data, int len) const {
- if (len <= 0) return 0;
- int written = 0;
- while (written < len) {
- int status = send(socket_, data + written, len - written, 0);
- if (status == 0) {
- break;
- } else if (status > 0) {
- written += status;
- } else if (errno != EINTR) {
- return 0;
- }
- }
- return written;
+void Thread::YieldCPU() {
+ int result = sched_yield();
+ ASSERT_EQ(0, result);
+ USE(result);
}
-int POSIXSocket::Receive(char* data, int len) const {
- if (len <= 0) return 0;
- int status;
- do {
- status = recv(socket_, data, len, 0);
- } while (status == -1 && errno == EINTR);
- return (status < 0) ? 0 : status;
+static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
+#if defined(__CYGWIN__)
+ // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
+ // because pthread_key_t is a pointer type on Cygwin. This will probably not
+ // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
+ STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
+ intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
+ return static_cast<Thread::LocalStorageKey>(ptr_key);
+#else
+ return static_cast<Thread::LocalStorageKey>(pthread_key);
+#endif
}
-bool POSIXSocket::SetReuseAddress(bool reuse_address) {
- int on = reuse_address ? 1 : 0;
- int status = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
- return status == 0;
+static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
+#if defined(__CYGWIN__)
+ STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
+ intptr_t ptr_key = static_cast<intptr_t>(local_key);
+ return reinterpret_cast<pthread_key_t>(ptr_key);
+#else
+ return static_cast<pthread_key_t>(local_key);
+#endif
}
-bool Socket::SetUp() {
- // Nothing to do on POSIX.
- return true;
-}
+#ifdef V8_FAST_TLS_SUPPORTED
+static Atomic32 tls_base_offset_initialized = 0;
+intptr_t kMacTlsBaseOffset = 0;
-int Socket::LastError() {
- return errno;
+// It's safe to do the initialization more that once, but it has to be
+// done at least once.
+static void InitializeTlsBaseOffset() {
+ const size_t kBufferSize = 128;
+ char buffer[kBufferSize];
+ size_t buffer_size = kBufferSize;
+ int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
+ if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
+ V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+ }
+ // The buffer now contains a string of the form XX.YY.ZZ, where
+ // XX is the major kernel version component.
+ // Make sure the buffer is 0-terminated.
+ buffer[kBufferSize - 1] = '\0';
+ char* period_pos = strchr(buffer, '.');
+ *period_pos = '\0';
+ int kernel_version_major =
+ static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
+ // The constants below are taken from pthreads.s from the XNU kernel
+ // sources archive at www.opensource.apple.com.
+ if (kernel_version_major < 11) {
+ // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
+ // same offsets.
+#if V8_HOST_ARCH_IA32
+ kMacTlsBaseOffset = 0x48;
+#else
+ kMacTlsBaseOffset = 0x60;
+#endif
+ } else {
+ // 11.x.x (Lion) changed the offset.
+ kMacTlsBaseOffset = 0;
+ }
+
+ Release_Store(&tls_base_offset_initialized, 1);
}
-uint16_t Socket::HToN(uint16_t value) {
- return htons(value);
+static void CheckFastTls(Thread::LocalStorageKey key) {
+ void* expected = reinterpret_cast<void*>(0x1234CAFE);
+ Thread::SetThreadLocal(key, expected);
+ void* actual = Thread::GetExistingThreadLocal(key);
+ if (expected != actual) {
+ V8_Fatal(__FILE__, __LINE__,
+ "V8 failed to initialize fast TLS on current kernel");
+ }
+ Thread::SetThreadLocal(key, NULL);
}
+#endif // V8_FAST_TLS_SUPPORTED
+
-uint16_t Socket::NToH(uint16_t value) {
- return ntohs(value);
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+#ifdef V8_FAST_TLS_SUPPORTED
+ bool check_fast_tls = false;
+ if (tls_base_offset_initialized == 0) {
+ check_fast_tls = true;
+ InitializeTlsBaseOffset();
+ }
+#endif
+ pthread_key_t key;
+ int result = pthread_key_create(&key, NULL);
+ ASSERT_EQ(0, result);
+ USE(result);
+ LocalStorageKey local_key = PthreadKeyToLocalKey(key);
+#ifdef V8_FAST_TLS_SUPPORTED
+ // If we just initialized fast TLS support, make sure it works.
+ if (check_fast_tls) CheckFastTls(local_key);
+#endif
+ return local_key;
}
-uint32_t Socket::HToN(uint32_t value) {
- return htonl(value);
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+ int result = pthread_key_delete(pthread_key);
+ ASSERT_EQ(0, result);
+ USE(result);
}
-uint32_t Socket::NToH(uint32_t value) {
- return ntohl(value);
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+ return pthread_getspecific(pthread_key);
}
-Socket* OS::CreateSocket() {
- return new POSIXSocket();
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+ int result = pthread_setspecific(pthread_key, value);
+ ASSERT_EQ(0, result);
+ USE(result);
}
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index 07718fe50..a0590cbec 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -38,7 +38,6 @@
#include <ucontext.h> // walkstack(), getcontext()
#include <dlfcn.h> // dladdr
#include <pthread.h>
-#include <sched.h> // for sched_yield
#include <semaphore.h>
#include <time.h>
#include <sys/time.h> // gettimeofday(), timeradd()
@@ -52,7 +51,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -62,6 +60,7 @@
// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
// access signbit() despite the availability of other C99 math functions.
#ifndef signbit
+namespace std {
// Test sign - usually defined in math.h
int signbit(double x) {
// We need to take care of the special case of both positive and negative
@@ -74,49 +73,15 @@ int signbit(double x) {
return x < 0;
}
}
+} // namespace std
#endif // signbit
namespace v8 {
namespace internal {
-// 0 is never a valid thread id on Solaris since the main thread is 1 and
-// subsequent have their ids incremented from there
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Solaris runs on a lot of things.
-}
-
-
-int OS::ActivationFrameAlignment() {
- // GCC generates code that requires 16 byte alignment such as movdqa.
- return Max(STACK_ALIGN, 16);
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- *ptr = value;
-}
-
-
const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
+ if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
@@ -130,36 +95,6 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
- return static_cast<size_t>(getpagesize());
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
@@ -168,40 +103,14 @@ void* OS::Allocate(const size_t requested,
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- useconds_t ms = static_cast<useconds_t>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -250,7 +159,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
}
@@ -296,20 +205,6 @@ static int StackWalkCallback(uintptr_t pc, int signo, void* data) {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- ucontext_t ctx;
- struct StackWalker walker = { frames, 0 };
-
- if (getcontext(&ctx) < 0) return kStackWalkError;
-
- if (!walkcontext(&ctx, StackWalkCallback, &walker)) {
- return kStackWalkError;
- }
-
- return walker.index;
-}
-
-
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
@@ -317,10 +212,9 @@ static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
- size_ = size;
-}
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
@@ -423,8 +317,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
@@ -444,456 +336,9 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) { }
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size()) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- }
- pthread_create(&data_->thread_, NULL, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class SolarisMutex : public Mutex {
- public:
- SolarisMutex() {
- pthread_mutexattr_t attr;
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutex_init(&mutex_, &attr);
- }
-
- ~SolarisMutex() { pthread_mutex_destroy(&mutex_); }
-
- int Lock() { return pthread_mutex_lock(&mutex_); }
-
- int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_;
-};
-
-
-Mutex* OS::CreateMutex() {
- return new SolarisMutex();
-}
-
-
-class SolarisSemaphore : public Semaphore {
- public:
- explicit SolarisSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~SolarisSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void SolarisSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-#ifndef timeradd
-#define timeradd(a, b, result) \
- do { \
- (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
- (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
- if ((result)->tv_usec >= 1000000) { \
- ++(result)->tv_sec; \
- (result)->tv_usec -= 1000000; \
- } \
- } while (0)
-#endif
-
-
-bool SolarisSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new SolarisSemaphore(count);
-}
-
-
-static pthread_t GetThreadID() {
- return pthread_self();
-}
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = isolate->current_vm_state();
-
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
-
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-}
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- pthread_t vm_tid() const { return vm_tid_; }
-
- private:
- pthread_t vm_tid_;
-};
-
-
-class SignalSender : public Thread {
- public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
- static const int kSignalSenderStackSize = 64 * KB;
-
- explicit SignalSender(int interval)
- : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void InstallSignalHandler() {
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
- }
-
- static void RestoreSignalHandler() {
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Start a thread that will send SIGPROF signal to VM threads,
- // when CPU profiling will be enabled.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- RestoreSignalHandler();
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- if (cpu_profiling_enabled && !signal_handler_installed_) {
- InstallSignalHandler();
- } else if (!cpu_profiling_enabled && signal_handler_installed_) {
- RestoreSignalHandler();
- }
-
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- } else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
- }
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
- void SendProfilingSignal(pthread_t tid) {
- if (!signal_handler_installed_) return;
- pthread_kill(tid, SIGPROF);
- }
-
- void Sleep(SleepInterval full_or_half) {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif
- USE(result);
- }
-
- const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-Mutex* SignalSender::mutex_ = NULL;
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly will cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
- SignalSender::SetUp();
-}
-
-
-void OS::TearDown() {
- SignalSender::TearDown();
- delete limit_mutex;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
}
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 49463be8e..35411bfda 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -27,13 +27,25 @@
// Platform specific code for Win32.
-#define V8_WIN32_HEADERS_FULL
+// Secure API functions are not available using MinGW with msvcrt.dll
+// on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to
+// disable definition of secure API functions in standard headers that
+// would conflict with our own implementation.
+#ifdef __MINGW32__
+#include <_mingw.h>
+#ifdef MINGW_HAS_SECURE_API
+#undef MINGW_HAS_SECURE_API
+#endif // MINGW_HAS_SECURE_API
+#endif // __MINGW32__
+
#include "win32-headers.h"
#include "v8.h"
#include "codegen.h"
+#include "isolate-inl.h"
#include "platform.h"
+#include "simulator.h"
#include "vm-state-inl.h"
#ifdef _MSC_VER
@@ -65,8 +77,6 @@ inline void MemoryBarrier() {
#endif // __MINGW64_VERSION_MAJOR
-#ifndef MINGW_HAS_SECURE_API
-
int localtime_s(tm* out_tm, const time_t* time) {
tm* posix_local_time_struct = localtime(time);
if (posix_local_time_struct == NULL) return 1;
@@ -113,17 +123,8 @@ int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
return 0;
}
-#endif // MINGW_HAS_SECURE_API
-
#endif // __MINGW32__
-// Generate a pseudo-random number in the range 0-2^31-1. Usually
-// defined in stdlib.h. Missing in both Microsoft Visual Studio C++ and MinGW.
-int random() {
- return rand();
-}
-
-
namespace v8 {
namespace internal {
@@ -137,22 +138,26 @@ double ceiling(double x) {
}
-static Mutex* limit_mutex = NULL;
+#if V8_TARGET_ARCH_IA32
+static void MemMoveWrapper(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+}
+
+
+// Initialize to library version so we can call this at any time during startup.
+static OS::MemMoveFunction memmove_function = &MemMoveWrapper;
-#if defined(V8_TARGET_ARCH_IA32)
-static OS::MemCopyFunction memcopy_function = NULL;
// Defined in codegen-ia32.cc.
-OS::MemCopyFunction CreateMemCopyFunction();
+OS::MemMoveFunction CreateMemMoveFunction();
// Copy memory area to disjoint memory area.
-void OS::MemCopy(void* dest, const void* src, size_t size) {
+void OS::MemMove(void* dest, const void* src, size_t size) {
+ if (size == 0) return;
// Note: here we rely on dependent reads being ordered. This is true
// on all architectures we currently support.
- (*memcopy_function)(dest, src, size);
-#ifdef DEBUG
- CHECK_EQ(0, memcmp(dest, src, size));
-#endif
+ (*memmove_function)(dest, src, size);
}
+
#endif // V8_TARGET_ARCH_IA32
#ifdef _WIN64
@@ -165,6 +170,7 @@ void init_modulo_function() {
modulo_function = CreateModuloFunction();
}
+
double modulo(double x, double y) {
// Note: here we rely on dependent reads being ordered. This is true
// on all architectures we currently support.
@@ -176,8 +182,8 @@ double modulo(double x, double y) {
// Workaround MS fmod bugs. ECMA-262 says:
// dividend is finite and divisor is an infinity => result equals dividend
// dividend is a zero and divisor is nonzero finite => result equals dividend
- if (!(isfinite(x) && (!isfinite(y) && !isnan(y))) &&
- !(x == 0 && (y != 0 && isfinite(y)))) {
+ if (!(std::isfinite(x) && (!std::isfinite(y) && !std::isnan(y))) &&
+ !(x == 0 && (y != 0 && std::isfinite(y)))) {
x = fmod(x, y);
}
return x;
@@ -199,9 +205,17 @@ UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
+UNARY_MATH_FUNCTION(exp, CreateExpFunction())
UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
-#undef MATH_FUNCTION
+#undef UNARY_MATH_FUNCTION
+
+
+void lazily_initialize_fast_exp() {
+ if (fast_exp_function == NULL) {
+ init_fast_exp_function();
+ }
+}
void MathSetup() {
@@ -212,6 +226,7 @@ void MathSetup() {
init_fast_cos_function();
init_fast_tan_function();
init_fast_log_function();
+ // fast_exp is initialized lazily.
init_fast_sqrt_function();
}
@@ -222,12 +237,12 @@ void MathSetup() {
// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
// January 1, 1970.
-class Time {
+class Win32Time {
public:
// Constructors.
- Time();
- explicit Time(double jstime);
- Time(int year, int mon, int day, int hour, int min, int sec);
+ Win32Time();
+ explicit Win32Time(double jstime);
+ Win32Time(int year, int mon, int day, int hour, int min, int sec);
// Convert timestamp to JavaScript representation.
double ToJSTime();
@@ -276,10 +291,6 @@ class Time {
// Return whether or not daylight savings time is in effect at this time.
bool InDST();
- // Return the difference (in milliseconds) between this timestamp and
- // another timestamp.
- int64_t Diff(Time* other);
-
// Accessor for FILETIME representation.
FILETIME& ft() { return time_.ft_; }
@@ -299,27 +310,28 @@ class Time {
TimeStamp time_;
};
+
// Static variables.
-bool Time::tz_initialized_ = false;
-TIME_ZONE_INFORMATION Time::tzinfo_;
-char Time::std_tz_name_[kTzNameSize];
-char Time::dst_tz_name_[kTzNameSize];
+bool Win32Time::tz_initialized_ = false;
+TIME_ZONE_INFORMATION Win32Time::tzinfo_;
+char Win32Time::std_tz_name_[kTzNameSize];
+char Win32Time::dst_tz_name_[kTzNameSize];
// Initialize timestamp to start of epoc.
-Time::Time() {
+Win32Time::Win32Time() {
t() = 0;
}
// Initialize timestamp from a JavaScript timestamp.
-Time::Time(double jstime) {
+Win32Time::Win32Time(double jstime) {
t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
}
// Initialize timestamp from date/time components.
-Time::Time(int year, int mon, int day, int hour, int min, int sec) {
+Win32Time::Win32Time(int year, int mon, int day, int hour, int min, int sec) {
SYSTEMTIME st;
st.wYear = year;
st.wMonth = mon;
@@ -333,14 +345,70 @@ Time::Time(int year, int mon, int day, int hour, int min, int sec) {
// Convert timestamp to JavaScript timestamp.
-double Time::ToJSTime() {
+double Win32Time::ToJSTime() {
return static_cast<double>((t() - kTimeEpoc) / kTimeScaler);
}
+// Set timestamp to current time.
+void Win32Time::SetToCurrentTime() {
+ // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
+ // Because we're fast, we like fast timers which have at least a
+ // 1ms resolution.
+ //
+ // timeGetTime() provides 1ms granularity when combined with
+ // timeBeginPeriod(). If the host application for v8 wants fast
+ // timers, it can use timeBeginPeriod to increase the resolution.
+ //
+ // Using timeGetTime() has a drawback because it is a 32bit value
+ // and hence rolls-over every ~49days.
+ //
+ // To use the clock, we use GetSystemTimeAsFileTime as our base;
+ // and then use timeGetTime to extrapolate current time from the
+ // start time. To deal with rollovers, we resync the clock
+ // any time when more than kMaxClockElapsedTime has passed or
+ // whenever timeGetTime creates a rollover.
+
+ static bool initialized = false;
+ static TimeStamp init_time;
+ static DWORD init_ticks;
+ static const int64_t kHundredNanosecondsPerSecond = 10000000;
+ static const int64_t kMaxClockElapsedTime =
+ 60*kHundredNanosecondsPerSecond; // 1 minute
+
+ // If we are uninitialized, we need to resync the clock.
+ bool needs_resync = !initialized;
+
+ // Get the current time.
+ TimeStamp time_now;
+ GetSystemTimeAsFileTime(&time_now.ft_);
+ DWORD ticks_now = timeGetTime();
+
+ // Check if we need to resync due to clock rollover.
+ needs_resync |= ticks_now < init_ticks;
+
+ // Check if we need to resync due to elapsed time.
+ needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
+
+ // Check if we need to resync due to backwards time change.
+ needs_resync |= time_now.t_ < init_time.t_;
+
+ // Resync the clock if necessary.
+ if (needs_resync) {
+ GetSystemTimeAsFileTime(&init_time.ft_);
+ init_ticks = ticks_now = timeGetTime();
+ initialized = true;
+ }
+
+ // Finally, compute the actual time. Why is this so hard.
+ DWORD elapsed = ticks_now - init_ticks;
+ this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
+}
+
+
// Guess the name of the timezone from the bias.
// The guess is very biased towards the northern hemisphere.
-const char* Time::GuessTimezoneNameFromBias(int bias) {
+const char* Win32Time::GuessTimezoneNameFromBias(int bias) {
static const int kHour = 60;
switch (-bias) {
case -9*kHour: return "Alaska";
@@ -365,7 +433,7 @@ const char* Time::GuessTimezoneNameFromBias(int bias) {
// Initialize timezone information. The timezone information is obtained from
// windows. If we cannot get the timezone information we fall back to CET.
// Please notice that this code is not thread-safe.
-void Time::TzSet() {
+void Win32Time::TzSet() {
// Just return if timezone information has already been initialized.
if (tz_initialized_) return;
@@ -414,78 +482,16 @@ void Time::TzSet() {
}
-// Return the difference in milliseconds between this and another timestamp.
-int64_t Time::Diff(Time* other) {
- return (t() - other->t()) / kTimeScaler;
-}
-
-
-// Set timestamp to current time.
-void Time::SetToCurrentTime() {
- // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
- // Because we're fast, we like fast timers which have at least a
- // 1ms resolution.
- //
- // timeGetTime() provides 1ms granularity when combined with
- // timeBeginPeriod(). If the host application for v8 wants fast
- // timers, it can use timeBeginPeriod to increase the resolution.
- //
- // Using timeGetTime() has a drawback because it is a 32bit value
- // and hence rolls-over every ~49days.
- //
- // To use the clock, we use GetSystemTimeAsFileTime as our base;
- // and then use timeGetTime to extrapolate current time from the
- // start time. To deal with rollovers, we resync the clock
- // any time when more than kMaxClockElapsedTime has passed or
- // whenever timeGetTime creates a rollover.
-
- static bool initialized = false;
- static TimeStamp init_time;
- static DWORD init_ticks;
- static const int64_t kHundredNanosecondsPerSecond = 10000000;
- static const int64_t kMaxClockElapsedTime =
- 60*kHundredNanosecondsPerSecond; // 1 minute
-
- // If we are uninitialized, we need to resync the clock.
- bool needs_resync = !initialized;
-
- // Get the current time.
- TimeStamp time_now;
- GetSystemTimeAsFileTime(&time_now.ft_);
- DWORD ticks_now = timeGetTime();
-
- // Check if we need to resync due to clock rollover.
- needs_resync |= ticks_now < init_ticks;
-
- // Check if we need to resync due to elapsed time.
- needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
-
- // Check if we need to resync due to backwards time change.
- needs_resync |= time_now.t_ < init_time.t_;
-
- // Resync the clock if necessary.
- if (needs_resync) {
- GetSystemTimeAsFileTime(&init_time.ft_);
- init_ticks = ticks_now = timeGetTime();
- initialized = true;
- }
-
- // Finally, compute the actual time. Why is this so hard.
- DWORD elapsed = ticks_now - init_ticks;
- this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
-}
-
-
// Return the local timezone offset in milliseconds east of UTC. This
// takes into account whether daylight saving is in effect at the time.
// Only times in the 32-bit Unix range may be passed to this function.
// Also, adding the time-zone offset to the input must not overflow.
// The function EquivalentTime() in date.js guarantees this.
-int64_t Time::LocalOffset() {
+int64_t Win32Time::LocalOffset() {
// Initialize timezone information, if needed.
TzSet();
- Time rounded_to_second(*this);
+ Win32Time rounded_to_second(*this);
rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
1000 * kTimeScaler;
// Convert to local time using POSIX localtime function.
@@ -516,7 +522,7 @@ int64_t Time::LocalOffset() {
// Return whether or not daylight savings time is in effect at this time.
-bool Time::InDST() {
+bool Win32Time::InDST() {
// Initialize timezone information, if needed.
TzSet();
@@ -540,14 +546,14 @@ bool Time::InDST() {
// Return the daylight savings time offset for this time.
-int64_t Time::DaylightSavingsOffset() {
+int64_t Win32Time::DaylightSavingsOffset() {
return InDST() ? 60 * kMsPerMinute : 0;
}
// Returns a string identifying the current timezone for the
// timestamp taking into account daylight saving.
-char* Time::LocalTimezone() {
+char* Win32Time::LocalTimezone() {
// Return the standard or DST time zone name based on whether daylight
// saving is in effect at the given time.
return InDST() ? dst_tz_name_ : std_tz_name_;
@@ -558,8 +564,11 @@ void OS::PostSetUp() {
// Math functions depend on CPU features therefore they are initialized after
// CPU.
MathSetup();
-#if defined(V8_TARGET_ARCH_IA32)
- memcopy_function = CreateMemCopyFunction();
+#if V8_TARGET_ARCH_IA32
+ OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
+ if (generated_memmove != NULL) {
+ memmove_function = generated_memmove;
+ }
#endif
}
@@ -586,21 +595,14 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
// Returns current time as the number of milliseconds since
// 00:00:00 UTC, January 1, 1970.
double OS::TimeCurrentMillis() {
- Time t;
- t.SetToCurrentTime();
- return t.ToJSTime();
-}
-
-// Returns the tickcounter based on timeGetTime.
-int64_t OS::Ticks() {
- return timeGetTime() * 1000; // Convert to microseconds.
+ return Time::Now().ToJsTime();
}
// Returns a string identifying the current timezone taking into
// account daylight saving.
const char* OS::LocalTimezone(double time) {
- return Time(time).LocalTimezone();
+ return Win32Time(time).LocalTimezone();
}
@@ -608,7 +610,7 @@ const char* OS::LocalTimezone(double time) {
// taking daylight savings time into account.
double OS::LocalTimeOffset() {
// Use current time, rounded to the millisecond.
- Time t(TimeCurrentMillis());
+ Win32Time t(TimeCurrentMillis());
// Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
return static_cast<double>(t.LocalOffset() - t.DaylightSavingsOffset());
}
@@ -617,7 +619,7 @@ double OS::LocalTimeOffset() {
// Returns the daylight savings offset in milliseconds for the given
// time.
double OS::DaylightSavingsOffset(double time) {
- int64_t offset = Time(time).DaylightSavingsOffset();
+ int64_t offset = Win32Time(time).DaylightSavingsOffset();
return static_cast<double>(offset);
}
@@ -803,34 +805,8 @@ void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* pointer) {
- if (pointer < lowest_ever_allocated || pointer >= highest_ever_allocated)
- return true;
- // Ask the Windows API
- if (IsBadWritePtr(pointer, 1))
- return true;
- return false;
-}
+#undef _TRUNCATE
+#undef STRUNCATE
// Get the system's page size used by VirtualAlloc() or the next power
@@ -860,7 +836,7 @@ size_t OS::AllocateAlignment() {
}
-static void* GetRandomAddr() {
+void* OS::GetRandomMmapAddr() {
Isolate* isolate = Isolate::UncheckedCurrent();
// Note that the current isolate isn't set up in a call path via
// CpuFeatures::Probe. We don't care about randomization in this case because
@@ -878,8 +854,9 @@ static void* GetRandomAddr() {
static const intptr_t kAllocationRandomAddressMin = 0x04000000;
static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
#endif
- uintptr_t address = (V8::RandomPrivate(isolate) << kPageSizeBits)
- | kAllocationRandomAddressMin;
+ uintptr_t address =
+ (isolate->random_number_generator()->NextInt() << kPageSizeBits) |
+ kAllocationRandomAddressMin;
address &= kAllocationRandomAddressMax;
return reinterpret_cast<void *>(address);
}
@@ -893,7 +870,7 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
// For exectutable pages try and randomize the allocation address
for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
- base = VirtualAlloc(GetRandomAddr(), size, action, protection);
+ base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
}
}
@@ -918,14 +895,13 @@ void* OS::Allocate(const size_t requested,
prot);
if (mbase == NULL) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed"));
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "VirtualAlloc failed"));
return NULL;
}
ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize));
return mbase;
}
@@ -950,7 +926,7 @@ void OS::ProtectCode(void* address, const size_t size) {
void OS::Guard(void* address, const size_t size) {
DWORD oldprotect;
- VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
+ VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
}
@@ -971,6 +947,9 @@ void OS::Abort() {
void OS::DebugBreak() {
#ifdef _MSC_VER
+ // To avoid Visual Studio runtime support the following code can be used
+ // instead
+ // __asm { int 3 }
__debugbreak();
#else
::DebugBreak();
@@ -1030,7 +1009,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
if (file_mapping == NULL) return NULL;
// Map a view of the file into memory
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
- if (memory) memmove(memory, initial, size);
+ if (memory) OS::MemMove(memory, initial, size);
return new Win32MemoryMappedFile(file, file_mapping, memory, size);
}
@@ -1204,9 +1183,14 @@ TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
// application is closed.
}
+#undef DBGHELP_FUNCTION_LIST
+#undef TLHELP32_FUNCTION_LIST
+#undef DLL_FUNC_VAR
+#undef DLL_FUNC_TYPE
+
// Load the symbols for generating stack traces.
-static bool LoadSymbols(HANDLE process_handle) {
+static bool LoadSymbols(Isolate* isolate, HANDLE process_handle) {
static bool symbols_loaded = false;
if (symbols_loaded) return true;
@@ -1255,7 +1239,7 @@ static bool LoadSymbols(HANDLE process_handle) {
if (err != ERROR_MOD_NOT_FOUND &&
err != ERROR_INVALID_HANDLE) return false;
}
- LOG(i::Isolate::Current(),
+ LOG(isolate,
SharedLibraryEvent(
module_entry.szExePath,
reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
@@ -1270,14 +1254,14 @@ static bool LoadSymbols(HANDLE process_handle) {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
// SharedLibraryEvents are logged when loading symbol information.
// Only the shared libraries loaded at the time of the call to
// LogSharedLibraryAddresses are logged. DLLs loaded after
// initialization are not accounted for.
if (!LoadDbgHelpAndTlHelp32()) return;
HANDLE process_handle = GetCurrentProcess();
- LoadSymbols(process_handle);
+ LoadSymbols(isolate, process_handle);
}
@@ -1285,132 +1269,21 @@ void OS::SignalCodeMovingGC() {
}
-// Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll
-
-// Switch off warning 4748 (/GS can not protect parameters and local variables
-// from local buffer overrun because optimizations are disabled in function) as
-// it is triggered by the use of inline assembler.
-#pragma warning(push)
-#pragma warning(disable : 4748)
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- BOOL ok;
-
- // Load the required functions from DLL's.
- if (!LoadDbgHelpAndTlHelp32()) return kStackWalkError;
-
- // Get the process and thread handles.
- HANDLE process_handle = GetCurrentProcess();
- HANDLE thread_handle = GetCurrentThread();
-
- // Read the symbols.
- if (!LoadSymbols(process_handle)) return kStackWalkError;
-
- // Capture current context.
- CONTEXT context;
- RtlCaptureContext(&context);
-
- // Initialize the stack walking
- STACKFRAME64 stack_frame;
- memset(&stack_frame, 0, sizeof(stack_frame));
-#ifdef _WIN64
- stack_frame.AddrPC.Offset = context.Rip;
- stack_frame.AddrFrame.Offset = context.Rbp;
- stack_frame.AddrStack.Offset = context.Rsp;
-#else
- stack_frame.AddrPC.Offset = context.Eip;
- stack_frame.AddrFrame.Offset = context.Ebp;
- stack_frame.AddrStack.Offset = context.Esp;
-#endif
- stack_frame.AddrPC.Mode = AddrModeFlat;
- stack_frame.AddrFrame.Mode = AddrModeFlat;
- stack_frame.AddrStack.Mode = AddrModeFlat;
- int frames_count = 0;
-
- // Collect stack frames.
- int frames_size = frames.length();
- while (frames_count < frames_size) {
- ok = _StackWalk64(
- IMAGE_FILE_MACHINE_I386, // MachineType
- process_handle, // hProcess
- thread_handle, // hThread
- &stack_frame, // StackFrame
- &context, // ContextRecord
- NULL, // ReadMemoryRoutine
- _SymFunctionTableAccess64, // FunctionTableAccessRoutine
- _SymGetModuleBase64, // GetModuleBaseRoutine
- NULL); // TranslateAddress
- if (!ok) break;
-
- // Store the address.
- ASSERT((stack_frame.AddrPC.Offset >> 32) == 0); // 32-bit address.
- frames[frames_count].address =
- reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
-
- // Try to locate a symbol for this frame.
- DWORD64 symbol_displacement;
- SmartArrayPointer<IMAGEHLP_SYMBOL64> symbol(
- NewArray<IMAGEHLP_SYMBOL64>(kStackWalkMaxNameLen));
- if (symbol.is_empty()) return kStackWalkError; // Out of memory.
- memset(*symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen);
- (*symbol)->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
- (*symbol)->MaxNameLength = kStackWalkMaxNameLen;
- ok = _SymGetSymFromAddr64(process_handle, // hProcess
- stack_frame.AddrPC.Offset, // Address
- &symbol_displacement, // Displacement
- *symbol); // Symbol
- if (ok) {
- // Try to locate more source information for the symbol.
- IMAGEHLP_LINE64 Line;
- memset(&Line, 0, sizeof(Line));
- Line.SizeOfStruct = sizeof(Line);
- DWORD line_displacement;
- ok = _SymGetLineFromAddr64(
- process_handle, // hProcess
- stack_frame.AddrPC.Offset, // dwAddr
- &line_displacement, // pdwDisplacement
- &Line); // Line
- // Format a text representation of the frame based on the information
- // available.
- if (ok) {
- SNPrintF(MutableCStrVector(frames[frames_count].text,
- kStackWalkMaxTextLen),
- "%s %s:%d:%d",
- (*symbol)->Name, Line.FileName, Line.LineNumber,
- line_displacement);
- } else {
- SNPrintF(MutableCStrVector(frames[frames_count].text,
- kStackWalkMaxTextLen),
- "%s",
- (*symbol)->Name);
- }
- // Make sure line termination is in place.
- frames[frames_count].text[kStackWalkMaxTextLen - 1] = '\0';
- } else {
- // No text representation of this frame
- frames[frames_count].text[0] = '\0';
-
- // Continue if we are just missing a module (for non C/C++ frames a
- // module will never be found).
- int err = GetLastError();
- if (err != ERROR_MOD_NOT_FOUND) {
- break;
- }
- }
-
- frames_count++;
+uint64_t OS::TotalPhysicalMemory() {
+ MEMORYSTATUSEX memory_info;
+ memory_info.dwLength = sizeof(memory_info);
+ if (!GlobalMemoryStatusEx(&memory_info)) {
+ UNREACHABLE();
+ return 0;
}
- // Return the number of frames filled in.
- return frames_count;
+ return static_cast<uint64_t>(memory_info.ullTotalPhys);
}
-// Restore warnings to previous settings.
-#pragma warning(pop)
#else // __MINGW32__
-void OS::LogSharedLibraryAddresses() { }
+void OS::LogSharedLibraryAddresses(Isolate* isolate) { }
void OS::SignalCodeMovingGC() { }
-int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; }
#endif // __MINGW32__
@@ -1434,18 +1307,16 @@ double OS::nan_value() {
int OS::ActivationFrameAlignment() {
#ifdef _WIN64
return 16; // Windows 64-bit ABI requires the stack to be 16-byte aligned.
+#elif defined(__MINGW32__)
+ // With gcc 4.4 the tree vectorization optimizer can generate code
+ // that requires 16 byte alignment such as movdqa on x86.
+ return 16;
#else
return 8; // Floating-point math runs faster with 8-byte alignment.
#endif
}
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
@@ -1481,7 +1352,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- bool result = ReleaseRegion(address_, size_);
+ bool result = ReleaseRegion(address(), size());
ASSERT(result);
USE(result);
}
@@ -1500,11 +1371,7 @@ void VirtualMemory::Reset() {
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- if (CommitRegion(address, size, is_executable)) {
- UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
- return true;
- }
- return false;
+ return CommitRegion(address, size, is_executable);
}
@@ -1514,6 +1381,17 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
+bool VirtualMemory::Guard(void* address) {
+ if (NULL == VirtualAlloc(address,
+ OS::CommitPageSize(),
+ MEM_COMMIT,
+ PAGE_NOACCESS)) {
+ return false;
+ }
+ return true;
+}
+
+
void* VirtualMemory::ReserveRegion(size_t size) {
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
}
@@ -1524,19 +1402,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
- return true;
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- if (NULL == VirtualAlloc(address,
- OS::CommitPageSize(),
- MEM_COMMIT,
- PAGE_READONLY | PAGE_GUARD)) {
- return false;
- }
return true;
}
@@ -1551,6 +1416,12 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
// ----------------------------------------------------------------------------
// Win32 thread support.
@@ -1563,7 +1434,7 @@ static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
// convention.
static unsigned int __stdcall ThreadEntry(void* arg) {
Thread* thread = reinterpret_cast<Thread*>(arg);
- thread->Run();
+ thread->NotifyStartedAndRun();
return 0;
}
@@ -1580,7 +1451,8 @@ class Thread::PlatformData : public Malloced {
// handle until it is started.
Thread::Thread(const Options& options)
- : stack_size_(options.stack_size()) {
+ : stack_size_(options.stack_size()),
+ start_semaphore_(NULL) {
data_ = new PlatformData(kNoThread);
set_name(options.name());
}
@@ -1652,480 +1524,4 @@ void Thread::YieldCPU() {
Sleep(0);
}
-
-// ----------------------------------------------------------------------------
-// Win32 mutex support.
-//
-// On Win32 mutexes are implemented using CRITICAL_SECTION objects. These are
-// faster than Win32 Mutex objects because they are implemented using user mode
-// atomic instructions. Therefore we only do ring transitions if there is lock
-// contention.
-
-class Win32Mutex : public Mutex {
- public:
- Win32Mutex() { InitializeCriticalSection(&cs_); }
-
- virtual ~Win32Mutex() { DeleteCriticalSection(&cs_); }
-
- virtual int Lock() {
- EnterCriticalSection(&cs_);
- return 0;
- }
-
- virtual int Unlock() {
- LeaveCriticalSection(&cs_);
- return 0;
- }
-
-
- virtual bool TryLock() {
- // Returns non-zero if critical section is entered successfully entered.
- return TryEnterCriticalSection(&cs_);
- }
-
- private:
- CRITICAL_SECTION cs_; // Critical section used for mutex
-};
-
-
-Mutex* OS::CreateMutex() {
- return new Win32Mutex();
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 semaphore support.
-//
-// On Win32 semaphores are implemented using Win32 Semaphore objects. The
-// semaphores are anonymous. Also, the semaphores are initialized to have
-// no upper limit on count.
-
-
-class Win32Semaphore : public Semaphore {
- public:
- explicit Win32Semaphore(int count) {
- sem = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
- }
-
- ~Win32Semaphore() {
- CloseHandle(sem);
- }
-
- void Wait() {
- WaitForSingleObject(sem, INFINITE);
- }
-
- bool Wait(int timeout) {
- // Timeout in Windows API is in milliseconds.
- DWORD millis_timeout = timeout / 1000;
- return WaitForSingleObject(sem, millis_timeout) != WAIT_TIMEOUT;
- }
-
- void Signal() {
- LONG dummy;
- ReleaseSemaphore(sem, 1, &dummy);
- }
-
- private:
- HANDLE sem;
-};
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new Win32Semaphore(count);
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 socket support.
-//
-
-class Win32Socket : public Socket {
- public:
- explicit Win32Socket() {
- // Create the socket.
- socket_ = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
- }
- explicit Win32Socket(SOCKET socket): socket_(socket) { }
- virtual ~Win32Socket() { Shutdown(); }
-
- // Server initialization.
- bool Bind(const int port);
- bool Listen(int backlog) const;
- Socket* Accept() const;
-
- // Client initialization.
- bool Connect(const char* host, const char* port);
-
- // Shutdown socket for both read and write.
- bool Shutdown();
-
- // Data Transimission
- int Send(const char* data, int len) const;
- int Receive(char* data, int len) const;
-
- bool SetReuseAddress(bool reuse_address);
-
- bool IsValid() const { return socket_ != INVALID_SOCKET; }
-
- private:
- SOCKET socket_;
-};
-
-
-bool Win32Socket::Bind(const int port) {
- if (!IsValid()) {
- return false;
- }
-
- sockaddr_in addr;
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr.sin_port = htons(port);
- int status = bind(socket_,
- reinterpret_cast<struct sockaddr *>(&addr),
- sizeof(addr));
- return status == 0;
-}
-
-
-bool Win32Socket::Listen(int backlog) const {
- if (!IsValid()) {
- return false;
- }
-
- int status = listen(socket_, backlog);
- return status == 0;
-}
-
-
-Socket* Win32Socket::Accept() const {
- if (!IsValid()) {
- return NULL;
- }
-
- SOCKET socket = accept(socket_, NULL, NULL);
- if (socket == INVALID_SOCKET) {
- return NULL;
- } else {
- return new Win32Socket(socket);
- }
-}
-
-
-bool Win32Socket::Connect(const char* host, const char* port) {
- if (!IsValid()) {
- return false;
- }
-
- // Lookup host and port.
- struct addrinfo *result = NULL;
- struct addrinfo hints;
- memset(&hints, 0, sizeof(addrinfo));
- hints.ai_family = AF_INET;
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_protocol = IPPROTO_TCP;
- int status = getaddrinfo(host, port, &hints, &result);
- if (status != 0) {
- return false;
- }
-
- // Connect.
- status = connect(socket_,
- result->ai_addr,
- static_cast<int>(result->ai_addrlen));
- freeaddrinfo(result);
- return status == 0;
-}
-
-
-bool Win32Socket::Shutdown() {
- if (IsValid()) {
- // Shutdown socket for both read and write.
- int status = shutdown(socket_, SD_BOTH);
- closesocket(socket_);
- socket_ = INVALID_SOCKET;
- return status == SOCKET_ERROR;
- }
- return true;
-}
-
-
-int Win32Socket::Send(const char* data, int len) const {
- if (len <= 0) return 0;
- int written = 0;
- while (written < len) {
- int status = send(socket_, data + written, len - written, 0);
- if (status == 0) {
- break;
- } else if (status > 0) {
- written += status;
- } else {
- return 0;
- }
- }
- return written;
-}
-
-
-int Win32Socket::Receive(char* data, int len) const {
- if (len <= 0) return 0;
- int status = recv(socket_, data, len, 0);
- return (status == SOCKET_ERROR) ? 0 : status;
-}
-
-
-bool Win32Socket::SetReuseAddress(bool reuse_address) {
- BOOL on = reuse_address ? true : false;
- int status = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
- reinterpret_cast<char*>(&on), sizeof(on));
- return status == SOCKET_ERROR;
-}
-
-
-bool Socket::SetUp() {
- // Initialize Winsock32
- int err;
- WSADATA winsock_data;
- WORD version_requested = MAKEWORD(1, 0);
- err = WSAStartup(version_requested, &winsock_data);
- if (err != 0) {
- PrintF("Unable to initialize Winsock, err = %d\n", Socket::LastError());
- }
-
- return err == 0;
-}
-
-
-int Socket::LastError() {
- return WSAGetLastError();
-}
-
-
-uint16_t Socket::HToN(uint16_t value) {
- return htons(value);
-}
-
-
-uint16_t Socket::NToH(uint16_t value) {
- return ntohs(value);
-}
-
-
-uint32_t Socket::HToN(uint32_t value) {
- return htonl(value);
-}
-
-
-uint32_t Socket::NToH(uint32_t value) {
- return ntohl(value);
-}
-
-
-Socket* OS::CreateSocket() {
- return new Win32Socket();
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 profiler support.
-
-class Sampler::PlatformData : public Malloced {
- public:
- // Get a handle to the calling thread. This is the thread that we are
- // going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- PlatformData() : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId())) {}
-
- ~PlatformData() {
- if (profiled_thread_ != NULL) {
- CloseHandle(profiled_thread_);
- profiled_thread_ = NULL;
- }
- }
-
- HANDLE profiled_thread() { return profiled_thread_; }
-
- private:
- HANDLE profiled_thread_;
-};
-
-
-class SamplerThread : public Thread {
- public:
- static const int kSamplerThreadStackSize = 64 * KB;
-
- explicit SamplerThread(int interval)
- : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- instance_ = new SamplerThread(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- }
- OS::Sleep(interval_);
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
- if (!sampler->isolate()->IsInitialized()) return;
- if (!sampler->IsProfiling()) return;
- SamplerThread* sampler_thread =
- reinterpret_cast<SamplerThread*>(raw_sampler_thread);
- sampler_thread->SampleContext(sampler);
- }
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
- void SampleContext(Sampler* sampler) {
- HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
- if (profiled_thread == NULL) return;
-
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
- if (sample == NULL) sample = &sample_obj;
-
- static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread) == kSuspendFailed) return;
- sample->state = sampler->isolate()->current_vm_state();
-
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread, &context) != 0) {
-#if V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(context.Rip);
- sample->sp = reinterpret_cast<Address>(context.Rsp);
- sample->fp = reinterpret_cast<Address>(context.Rbp);
-#else
- sample->pc = reinterpret_cast<Address>(context.Eip);
- sample->sp = reinterpret_cast<Address>(context.Esp);
- sample->fp = reinterpret_cast<Address>(context.Ebp);
-#endif
- sampler->SampleStack(sample);
- sampler->Tick(sample);
- }
- ResumeThread(profiled_thread);
- }
-
- const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SamplerThread* instance_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-
-Mutex* SamplerThread::mutex_ = NULL;
-SamplerThread* SamplerThread::instance_ = NULL;
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srand(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
- SamplerThread::SetUp();
-}
-
-
-void OS::TearDown() {
- SamplerThread::TearDown();
- delete limit_mutex;
-}
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SamplerThread::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SamplerThread::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index de896acad..8e524aeaf 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -44,36 +44,34 @@
#ifndef V8_PLATFORM_H_
#define V8_PLATFORM_H_
+#include <cstdarg>
+
+#include "platform/mutex.h"
+#include "platform/semaphore.h"
+#include "utils.h"
+#include "v8globals.h"
+
#ifdef __sun
# ifndef signbit
+namespace std {
int signbit(double x);
+}
# endif
#endif
-// GCC specific stuff
-#ifdef __GNUC__
-
-// Needed for va_list on at least MinGW and Android.
-#include <stdarg.h>
-
-#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
-
-#endif // __GNUC__
-
-
-// Windows specific stuff.
-#ifdef WIN32
-
// Microsoft Visual C++ specific stuff.
-#ifdef _MSC_VER
+#if V8_CC_MSVC
+#include "win32-headers.h"
#include "win32-math.h"
int strncasecmp(const char* s1, const char* s2, int n);
+// Visual C++ 2013 and higher implement this function.
+#if (_MSC_VER < 1800)
inline int lrint(double flt) {
int intgr;
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
__asm {
fld flt
fistp intgr
@@ -88,46 +86,80 @@ inline int lrint(double flt) {
return intgr;
}
+#endif // _MSC_VER < 1800
-#endif // _MSC_VER
-
-// Random is missing on both Visual Studio and MinGW.
-int random();
-
-#endif // WIN32
-
-#include "atomicops.h"
-#include "lazy-instance.h"
-#include "platform-tls.h"
-#include "utils.h"
-#include "v8globals.h"
+#endif // V8_CC_MSVC
namespace v8 {
namespace internal {
-// Use AtomicWord for a machine-sized pointer. It is assumed that
-// reads and writes of naturally aligned values of this type are atomic.
-#if defined(__OpenBSD__) && defined(__i386__)
-typedef Atomic32 AtomicWord;
-#else
-typedef intptr_t AtomicWord;
-#endif
-
-class Semaphore;
-class Mutex;
-
double ceiling(double x);
double modulo(double x, double y);
-// Custom implementation of sin, cos, tan and log.
+// Custom implementation of math functions.
double fast_sin(double input);
double fast_cos(double input);
double fast_tan(double input);
double fast_log(double input);
+double fast_exp(double input);
double fast_sqrt(double input);
+// The custom exp implementation needs 16KB of lookup data; initialize it
+// on demand.
+void lazily_initialize_fast_exp();
+
+// ----------------------------------------------------------------------------
+// Fast TLS support
+
+#ifndef V8_NO_FAST_TLS
+
+#if defined(_MSC_VER) && V8_HOST_ARCH_IA32
+
+#define V8_FAST_TLS_SUPPORTED 1
+
+INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
+
+inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
+ const intptr_t kTibInlineTlsOffset = 0xE10;
+ const intptr_t kTibExtraTlsOffset = 0xF94;
+ const intptr_t kMaxInlineSlots = 64;
+ const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
+ ASSERT(0 <= index && index < kMaxSlots);
+ if (index < kMaxInlineSlots) {
+ return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
+ kPointerSize * index));
+ }
+ intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
+ ASSERT(extra != 0);
+ return *reinterpret_cast<intptr_t*>(extra +
+ kPointerSize * (index - kMaxInlineSlots));
+}
+
+#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+
+#define V8_FAST_TLS_SUPPORTED 1
+
+extern intptr_t kMacTlsBaseOffset;
+
+INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
+
+inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
+ intptr_t result;
+#if V8_HOST_ARCH_IA32
+ asm("movl %%gs:(%1,%2,4), %0;"
+ :"=r"(result) // Output must be a writable register.
+ :"r"(kMacTlsBaseOffset), "r"(index));
+#else
+ asm("movq %%gs:(%1,%2,8), %0;"
+ :"=r"(result)
+ :"r"(kMacTlsBaseOffset), "r"(index));
+#endif
+ return result;
+}
+
+#endif
+
+#endif // V8_NO_FAST_TLS
-// Forward declarations.
-class Socket;
// ----------------------------------------------------------------------------
// OS
@@ -138,26 +170,16 @@ class Socket;
class OS {
public:
- // Initializes the platform OS support. Called once at VM startup.
- static void SetUp();
-
// Initializes the platform OS support that depend on CPU features. This is
// called after CPU initialization.
static void PostSetUp();
- // Clean up platform-OS-related things. Called once at VM shutdown.
- static void TearDown();
-
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
// strive for high-precision timer resolution, preferable
// micro-second resolution.
static int GetUserTime(uint32_t* secs, uint32_t* usecs);
- // Get a tick counter normalized to one tick per microsecond.
- // Used for calculating time intervals.
- static int64_t Ticks();
-
// Returns current time as the number of milliseconds since
// 00:00:00 UTC, January 1, 1970.
static double TimeCurrentMillis();
@@ -225,13 +247,6 @@ class OS {
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
- // Returns an indication of whether a pointer is in a space that
- // has been allocated by Allocate(). This method may conservatively
- // always return false, but giving more accurate information may
- // improve the robustness of the stack dump code in the presence of
- // heap corruption.
- static bool IsOutsideAllocatedSpace(void* pointer);
-
// Sleep for a number of milliseconds.
static void Sleep(const int milliseconds);
@@ -250,20 +265,6 @@ class OS {
char text[kStackWalkMaxTextLen];
};
- static int StackWalk(Vector<StackFrame> frames);
-
- // Factory method for creating platform dependent Mutex.
- // Please use delete to reclaim the storage for the returned Mutex.
- static Mutex* CreateMutex();
-
- // Factory method for creating platform dependent Semaphore.
- // Please use delete to reclaim the storage for the returned Semaphore.
- static Semaphore* CreateSemaphore(int count);
-
- // Factory method for creating platform dependent Socket.
- // Please use delete to reclaim the storage for the returned Socket.
- static Socket* CreateSocket();
-
class MemoryMappedFile {
public:
static MemoryMappedFile* open(const char* name);
@@ -285,7 +286,7 @@ class OS {
// Support for the profiler. Can do nothing, in which case ticks
// occuring in shared libraries will not be properly accounted for.
- static void LogSharedLibraryAddresses();
+ static void LogSharedLibraryAddresses(Isolate* isolate);
// Support for the profiler. Notifies the external profiling
// process that a code moving garbage collection starts. Can do
@@ -301,6 +302,9 @@ class OS {
// positions indicated by the members of the CpuFeature enum from globals.h
static uint64_t CpuFeaturesImpliedByPlatform();
+ // The total amount of physical memory available on the current system.
+ static uint64_t TotalPhysicalMemory();
+
// Maximum size of the virtual memory. 0 means there is no artificial
// limit.
static intptr_t MaxVirtualMemory();
@@ -308,38 +312,72 @@ class OS {
// Returns the double constant NAN
static double nan_value();
- // Support runtime detection of Cpu implementer
- static CpuImplementer GetCpuImplementer();
-
- // Support runtime detection of VFP3 on ARM CPUs.
- static bool ArmCpuHasFeature(CpuFeature feature);
-
// Support runtime detection of whether the hard float option of the
// EABI is used.
static bool ArmUsingHardFloat();
- // Support runtime detection of FPU on MIPS CPUs.
- static bool MipsCpuHasFeature(CpuFeature feature);
-
// Returns the activation frame alignment constraint or zero if
// the platform doesn't care. Guaranteed to be a power of two.
static int ActivationFrameAlignment();
- static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
-
#if defined(V8_TARGET_ARCH_IA32)
- // Copy memory area to disjoint memory area.
- static void MemCopy(void* dest, const void* src, size_t size);
// Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying.
static const int kMinComplexMemCopy = 64;
- typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
-#else // V8_TARGET_ARCH_IA32
+ // Copy memory area. No restrictions.
+ static void MemMove(void* dest, const void* src, size_t size);
+ typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
+
+ // Keep the distinction of "move" vs. "copy" for the benefit of other
+ // architectures.
+ static void MemCopy(void* dest, const void* src, size_t size) {
+ MemMove(dest, src, size);
+ }
+#elif defined(V8_HOST_ARCH_ARM)
+ typedef void (*MemCopyUint8Function)(uint8_t* dest,
+ const uint8_t* src,
+ size_t size);
+ static MemCopyUint8Function memcopy_uint8_function;
+ static void MemCopyUint8Wrapper(uint8_t* dest,
+ const uint8_t* src,
+ size_t chars) {
+ memcpy(dest, src, chars);
+ }
+ // For values < 16, the assembler function is slower than the inlined C code.
+ static const int kMinComplexMemCopy = 16;
+ static void MemCopy(void* dest, const void* src, size_t size) {
+ (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src),
+ size);
+ }
+ static void MemMove(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+ }
+
+ typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest,
+ const uint8_t* src,
+ size_t size);
+ static MemCopyUint16Uint8Function memcopy_uint16_uint8_function;
+ static void MemCopyUint16Uint8Wrapper(uint16_t* dest,
+ const uint8_t* src,
+ size_t chars);
+ // For values < 12, the assembler function is slower than the inlined C code.
+ static const int kMinComplexConvertMemCopy = 12;
+ static void MemCopyUint16Uint8(uint16_t* dest,
+ const uint8_t* src,
+ size_t size) {
+ (*memcopy_uint16_uint8_function)(dest, src, size);
+ }
+#else
+ // Copy memory area to disjoint memory area.
static void MemCopy(void* dest, const void* src, size_t size) {
memcpy(dest, src, size);
}
- static const int kMinComplexMemCopy = 256;
+ static void MemMove(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+ }
+ static const int kMinComplexMemCopy = 16 * kPointerSize;
#endif // V8_TARGET_ARCH_IA32
static int GetCurrentProcessId();
@@ -432,6 +470,11 @@ class VirtualMemory {
// and the same size it was reserved with.
static bool ReleaseRegion(void* base, size_t size);
+ // Returns true if OS performs lazy commits, i.e. the memory allocation call
+ // defers actual physical memory allocation till the first memory access.
+ // Otherwise returns false.
+ static bool HasLazyCommits();
+
private:
void* address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
@@ -475,9 +518,18 @@ class Thread {
explicit Thread(const Options& options);
virtual ~Thread();
- // Start new thread by calling the Run() method in the new thread.
+ // Start new thread by calling the Run() method on the new thread.
void Start();
+ // Start new thread and wait until Run() method is called on the new thread.
+ void StartSynchronously() {
+ start_semaphore_ = new Semaphore(0);
+ Start();
+ start_semaphore_->Wait();
+ delete start_semaphore_;
+ start_semaphore_ = NULL;
+ }
+
// Wait until thread terminates.
void Join();
@@ -527,6 +579,11 @@ class Thread {
class PlatformData;
PlatformData* data() { return data_; }
+ void NotifyStartedAndRun() {
+ if (start_semaphore_) start_semaphore_->Signal();
+ Run();
+ }
+
private:
void set_name(const char* name);
@@ -534,260 +591,11 @@ class Thread {
char name_[kMaxThreadNameLength];
int stack_size_;
+ Semaphore* start_semaphore_;
DISALLOW_COPY_AND_ASSIGN(Thread);
};
-
-// ----------------------------------------------------------------------------
-// Mutex
-//
-// Mutexes are used for serializing access to non-reentrant sections of code.
-// The implementations of mutex should allow for nested/recursive locking.
-
-class Mutex {
- public:
- virtual ~Mutex() {}
-
- // Locks the given mutex. If the mutex is currently unlocked, it becomes
- // locked and owned by the calling thread, and immediately. If the mutex
- // is already locked by another thread, suspends the calling thread until
- // the mutex is unlocked.
- virtual int Lock() = 0;
-
- // Unlocks the given mutex. The mutex is assumed to be locked and owned by
- // the calling thread on entrance.
- virtual int Unlock() = 0;
-
- // Tries to lock the given mutex. Returns whether the mutex was
- // successfully locked.
- virtual bool TryLock() = 0;
-};
-
-struct CreateMutexTrait {
- static Mutex* Create() {
- return OS::CreateMutex();
- }
-};
-
-// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
-//
-// void my_function() {
-// ScopedLock my_lock(my_mutex.Pointer());
-// // Do something.
-// }
-//
-typedef LazyDynamicInstance<
- Mutex, CreateMutexTrait, ThreadSafeInitOnceTrait>::type LazyMutex;
-
-#define LAZY_MUTEX_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-
-// ----------------------------------------------------------------------------
-// ScopedLock
-//
-// Stack-allocated ScopedLocks provide block-scoped locking and
-// unlocking of a mutex.
-class ScopedLock {
- public:
- explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
- ASSERT(mutex_ != NULL);
- mutex_->Lock();
- }
- ~ScopedLock() {
- mutex_->Unlock();
- }
-
- private:
- Mutex* mutex_;
- DISALLOW_COPY_AND_ASSIGN(ScopedLock);
-};
-
-
-// ----------------------------------------------------------------------------
-// Semaphore
-//
-// A semaphore object is a synchronization object that maintains a count. The
-// count is decremented each time a thread completes a wait for the semaphore
-// object and incremented each time a thread signals the semaphore. When the
-// count reaches zero, threads waiting for the semaphore blocks until the
-// count becomes non-zero.
-
-class Semaphore {
- public:
- virtual ~Semaphore() {}
-
- // Suspends the calling thread until the semaphore counter is non zero
- // and then decrements the semaphore counter.
- virtual void Wait() = 0;
-
- // Suspends the calling thread until the counter is non zero or the timeout
- // time has passed. If timeout happens the return value is false and the
- // counter is unchanged. Otherwise the semaphore counter is decremented and
- // true is returned. The timeout value is specified in microseconds.
- virtual bool Wait(int timeout) = 0;
-
- // Increments the semaphore counter.
- virtual void Signal() = 0;
-};
-
-template <int InitialValue>
-struct CreateSemaphoreTrait {
- static Semaphore* Create() {
- return OS::CreateSemaphore(InitialValue);
- }
-};
-
-// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-// // The following semaphore starts at 0.
-// static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
-//
-// void my_function() {
-// // Do something with my_semaphore.Pointer().
-// }
-//
-template <int InitialValue>
-struct LazySemaphore {
- typedef typename LazyDynamicInstance<
- Semaphore, CreateSemaphoreTrait<InitialValue>,
- ThreadSafeInitOnceTrait>::type type;
-};
-
-#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-
-
-// ----------------------------------------------------------------------------
-// Socket
-//
-
-class Socket {
- public:
- virtual ~Socket() {}
-
- // Server initialization.
- virtual bool Bind(const int port) = 0;
- virtual bool Listen(int backlog) const = 0;
- virtual Socket* Accept() const = 0;
-
- // Client initialization.
- virtual bool Connect(const char* host, const char* port) = 0;
-
- // Shutdown socket for both read and write. This causes blocking Send and
- // Receive calls to exit. After Shutdown the Socket object cannot be used for
- // any communication.
- virtual bool Shutdown() = 0;
-
- // Data Transimission
- // Return 0 on failure.
- virtual int Send(const char* data, int len) const = 0;
- virtual int Receive(char* data, int len) const = 0;
-
- // Set the value of the SO_REUSEADDR socket option.
- virtual bool SetReuseAddress(bool reuse_address) = 0;
-
- virtual bool IsValid() const = 0;
-
- static bool SetUp();
- static int LastError();
- static uint16_t HToN(uint16_t value);
- static uint16_t NToH(uint16_t value);
- static uint32_t HToN(uint32_t value);
- static uint32_t NToH(uint32_t value);
-};
-
-
-// ----------------------------------------------------------------------------
-// Sampler
-//
-// A sampler periodically samples the state of the VM and optionally
-// (if used for profiling) the program counter and stack pointer for
-// the thread that created it.
-
-// TickSample captures the information collected for each sample.
-class TickSample {
- public:
- TickSample()
- : state(OTHER),
- pc(NULL),
- sp(NULL),
- fp(NULL),
- tos(NULL),
- frames_count(0),
- has_external_callback(false) {}
- StateTag state; // The state of the VM.
- Address pc; // Instruction pointer.
- Address sp; // Stack pointer.
- Address fp; // Frame pointer.
- union {
- Address tos; // Top stack value (*sp).
- Address external_callback;
- };
- static const int kMaxFramesCount = 64;
- Address stack[kMaxFramesCount]; // Call stack.
- int frames_count : 8; // Number of captured frames.
- bool has_external_callback : 1;
-};
-
-class Sampler {
- public:
- // Initialize sampler.
- Sampler(Isolate* isolate, int interval);
- virtual ~Sampler();
-
- int interval() const { return interval_; }
-
- // Performs stack sampling.
- void SampleStack(TickSample* sample) {
- DoSampleStack(sample);
- IncSamplesTaken();
- }
-
- // This method is called for each sampling period with the current
- // program counter.
- virtual void Tick(TickSample* sample) = 0;
-
- // Start and stop sampler.
- void Start();
- void Stop();
-
- // Is the sampler used for profiling?
- bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
- void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
- void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
-
- // Whether the sampler is running (that is, consumes resources).
- bool IsActive() const { return NoBarrier_Load(&active_); }
-
- Isolate* isolate() { return isolate_; }
-
- // Used in tests to make sure that stack sampling is performed.
- int samples_taken() const { return samples_taken_; }
- void ResetSamplesTaken() { samples_taken_ = 0; }
-
- class PlatformData;
- PlatformData* data() { return data_; }
-
- PlatformData* platform_data() { return data_; }
-
- protected:
- virtual void DoSampleStack(TickSample* sample) = 0;
-
- private:
- void SetActive(bool value) { NoBarrier_Store(&active_, value); }
- void IncSamplesTaken() { if (++samples_taken_ < 0) samples_taken_ = 0; }
-
- Isolate* isolate_;
- const int interval_;
- Atomic32 profiling_;
- Atomic32 active_;
- PlatformData* data_; // Platform specific data.
- int samples_taken_; // Counts stack samples taken.
- DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
-};
-
-
} } // namespace v8::internal
#endif // V8_PLATFORM_H_
diff --git a/deps/v8/src/platform/condition-variable.cc b/deps/v8/src/platform/condition-variable.cc
new file mode 100644
index 000000000..e2bf3882e
--- /dev/null
+++ b/deps/v8/src/platform/condition-variable.cc
@@ -0,0 +1,345 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/condition-variable.h"
+
+#include <cerrno>
+#include <ctime>
+
+#include "platform/time.h"
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_POSIX
+
+ConditionVariable::ConditionVariable() {
+ // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
+ // hack to support cross-compiling Chrome for Android in AOSP. Remove
+ // this once AOSP is fixed.
+#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
+ (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
+ // On Free/Net/OpenBSD and Linux with glibc we can change the time
+ // source for pthread_cond_timedwait() to use the monotonic clock.
+ pthread_condattr_t attr;
+ int result = pthread_condattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
+ ASSERT_EQ(0, result);
+ result = pthread_cond_init(&native_handle_, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_condattr_destroy(&attr);
+#else
+ int result = pthread_cond_init(&native_handle_, NULL);
+#endif
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+ConditionVariable::~ConditionVariable() {
+ int result = pthread_cond_destroy(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void ConditionVariable::NotifyOne() {
+ int result = pthread_cond_signal(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void ConditionVariable::NotifyAll() {
+ int result = pthread_cond_broadcast(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void ConditionVariable::Wait(Mutex* mutex) {
+ mutex->AssertHeldAndUnmark();
+ int result = pthread_cond_wait(&native_handle_, &mutex->native_handle());
+ ASSERT_EQ(0, result);
+ USE(result);
+ mutex->AssertUnheldAndMark();
+}
+
+
+bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
+ struct timespec ts;
+ int result;
+ mutex->AssertHeldAndUnmark();
+#if V8_OS_MACOSX
+ // Mac OS X provides pthread_cond_timedwait_relative_np(), which does
+ // not depend on the real time clock, which is what you really WANT here!
+ ts = rel_time.ToTimespec();
+ ASSERT_GE(ts.tv_sec, 0);
+ ASSERT_GE(ts.tv_nsec, 0);
+ result = pthread_cond_timedwait_relative_np(
+ &native_handle_, &mutex->native_handle(), &ts);
+#else
+ // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
+ // hack to support cross-compiling Chrome for Android in AOSP. Remove
+ // this once AOSP is fixed.
+#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
+ (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
+ // On Free/Net/OpenBSD and Linux with glibc we can change the time
+ // source for pthread_cond_timedwait() to use the monotonic clock.
+ result = clock_gettime(CLOCK_MONOTONIC, &ts);
+ ASSERT_EQ(0, result);
+ Time now = Time::FromTimespec(ts);
+#else
+ // The timeout argument to pthread_cond_timedwait() is in absolute time.
+ Time now = Time::NowFromSystemTime();
+#endif
+ Time end_time = now + rel_time;
+ ASSERT_GE(end_time, now);
+ ts = end_time.ToTimespec();
+ result = pthread_cond_timedwait(
+ &native_handle_, &mutex->native_handle(), &ts);
+#endif // V8_OS_MACOSX
+ mutex->AssertUnheldAndMark();
+ if (result == ETIMEDOUT) {
+ return false;
+ }
+ ASSERT_EQ(0, result);
+ return true;
+}
+
+#elif V8_OS_WIN
+
+struct ConditionVariable::Event {
+ Event() : handle_(::CreateEventA(NULL, true, false, NULL)) {
+ ASSERT(handle_ != NULL);
+ }
+
+ ~Event() {
+ BOOL ok = ::CloseHandle(handle_);
+ ASSERT(ok);
+ USE(ok);
+ }
+
+ bool WaitFor(DWORD timeout_ms) {
+ DWORD result = ::WaitForSingleObject(handle_, timeout_ms);
+ if (result == WAIT_OBJECT_0) {
+ return true;
+ }
+ ASSERT(result == WAIT_TIMEOUT);
+ return false;
+ }
+
+ HANDLE handle_;
+ Event* next_;
+ HANDLE thread_;
+ volatile bool notified_;
+};
+
+
+ConditionVariable::NativeHandle::~NativeHandle() {
+ ASSERT(waitlist_ == NULL);
+
+ while (freelist_ != NULL) {
+ Event* event = freelist_;
+ freelist_ = event->next_;
+ delete event;
+ }
+}
+
+
+ConditionVariable::Event* ConditionVariable::NativeHandle::Pre() {
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Grab an event from the free list or create a new one.
+ Event* event = freelist_;
+ if (event != NULL) {
+ freelist_ = event->next_;
+ } else {
+ event = new Event;
+ }
+ event->thread_ = GetCurrentThread();
+ event->notified_ = false;
+
+#ifdef DEBUG
+ // The event must not be on the wait list.
+ for (Event* we = waitlist_; we != NULL; we = we->next_) {
+ ASSERT_NE(event, we);
+ }
+#endif
+
+ // Prepend the event to the wait list.
+ event->next_ = waitlist_;
+ waitlist_ = event;
+
+ return event;
+}
+
+
+void ConditionVariable::NativeHandle::Post(Event* event, bool result) {
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Remove the event from the wait list.
+ for (Event** wep = &waitlist_;; wep = &(*wep)->next_) {
+ ASSERT_NE(NULL, *wep);
+ if (*wep == event) {
+ *wep = event->next_;
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ // The event must not be on the free list.
+ for (Event* fe = freelist_; fe != NULL; fe = fe->next_) {
+ ASSERT_NE(event, fe);
+ }
+#endif
+
+ // Reset the event.
+ BOOL ok = ::ResetEvent(event->handle_);
+ ASSERT(ok);
+ USE(ok);
+
+ // Insert the event into the free list.
+ event->next_ = freelist_;
+ freelist_ = event;
+
+ // Forward signals delivered after the timeout to the next waiting event.
+ if (!result && event->notified_ && waitlist_ != NULL) {
+ ok = ::SetEvent(waitlist_->handle_);
+ ASSERT(ok);
+ USE(ok);
+ waitlist_->notified_ = true;
+ }
+}
+
+
+ConditionVariable::ConditionVariable() {}
+
+
+ConditionVariable::~ConditionVariable() {}
+
+
+void ConditionVariable::NotifyOne() {
+ // Notify the thread with the highest priority in the waitlist
+ // that was not already signalled.
+ LockGuard<Mutex> lock_guard(native_handle_.mutex());
+ Event* highest_event = NULL;
+ int highest_priority = std::numeric_limits<int>::min();
+ for (Event* event = native_handle().waitlist();
+ event != NULL;
+ event = event->next_) {
+ if (event->notified_) {
+ continue;
+ }
+ int priority = GetThreadPriority(event->thread_);
+ ASSERT_NE(THREAD_PRIORITY_ERROR_RETURN, priority);
+ if (priority >= highest_priority) {
+ highest_priority = priority;
+ highest_event = event;
+ }
+ }
+ if (highest_event != NULL) {
+ ASSERT(!highest_event->notified_);
+ ::SetEvent(highest_event->handle_);
+ highest_event->notified_ = true;
+ }
+}
+
+
+void ConditionVariable::NotifyAll() {
+ // Notify all threads on the waitlist.
+ LockGuard<Mutex> lock_guard(native_handle_.mutex());
+ for (Event* event = native_handle().waitlist();
+ event != NULL;
+ event = event->next_) {
+ if (!event->notified_) {
+ ::SetEvent(event->handle_);
+ event->notified_ = true;
+ }
+ }
+}
+
+
+void ConditionVariable::Wait(Mutex* mutex) {
+ // Create and setup the wait event.
+ Event* event = native_handle_.Pre();
+
+ // Release the user mutex.
+ mutex->Unlock();
+
+ // Wait on the wait event.
+ while (!event->WaitFor(INFINITE))
+ ;
+
+ // Reaquire the user mutex.
+ mutex->Lock();
+
+ // Release the wait event (we must have been notified).
+ ASSERT(event->notified_);
+ native_handle_.Post(event, true);
+}
+
+
+bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
+ // Create and setup the wait event.
+ Event* event = native_handle_.Pre();
+
+ // Release the user mutex.
+ mutex->Unlock();
+
+ // Wait on the wait event.
+ TimeTicks now = TimeTicks::Now();
+ TimeTicks end = now + rel_time;
+ bool result = false;
+ while (true) {
+ int64_t msec = (end - now).InMilliseconds();
+ if (msec >= static_cast<int64_t>(INFINITE)) {
+ result = event->WaitFor(INFINITE - 1);
+ if (result) {
+ break;
+ }
+ now = TimeTicks::Now();
+ } else {
+ result = event->WaitFor((msec < 0) ? 0 : static_cast<DWORD>(msec));
+ break;
+ }
+ }
+
+ // Reaquire the user mutex.
+ mutex->Lock();
+
+ // Release the wait event.
+ ASSERT(!result || event->notified_);
+ native_handle_.Post(event, result);
+
+ return result;
+}
+
+#endif // V8_OS_POSIX
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform/condition-variable.h b/deps/v8/src/platform/condition-variable.h
new file mode 100644
index 000000000..4d8a88aee
--- /dev/null
+++ b/deps/v8/src/platform/condition-variable.h
@@ -0,0 +1,140 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_CONDITION_VARIABLE_H_
+#define V8_PLATFORM_CONDITION_VARIABLE_H_
+
+#include "platform/mutex.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class ConditionVariableEvent;
+class TimeDelta;
+
+// -----------------------------------------------------------------------------
+// ConditionVariable
+//
+// This class is a synchronization primitive that can be used to block a thread,
+// or multiple threads at the same time, until:
+// - a notification is received from another thread,
+// - a timeout expires, or
+// - a spurious wakeup occurs
+// Any thread that intends to wait on a ConditionVariable has to acquire a lock
+// on a Mutex first. The |Wait()| and |WaitFor()| operations atomically release
+// the mutex and suspend the execution of the calling thread. When the condition
+// variable is notified, the thread is awakened, and the mutex is reacquired.
+
+class ConditionVariable V8_FINAL {
+ public:
+ ConditionVariable();
+ ~ConditionVariable();
+
+ // If any threads are waiting on this condition variable, calling
+ // |NotifyOne()| unblocks one of the waiting threads.
+ void NotifyOne();
+
+ // Unblocks all threads currently waiting for this condition variable.
+ void NotifyAll();
+
+ // |Wait()| causes the calling thread to block until the condition variable is
+ // notified or a spurious wakeup occurs. Atomically releases the mutex, blocks
+ // the current executing thread, and adds it to the list of threads waiting on
+ // this condition variable. The thread will be unblocked when |NotifyAll()| or
+ // |NotifyOne()| is executed. It may also be unblocked spuriously. When
+ // unblocked, regardless of the reason, the lock on the mutex is reacquired
+ // and |Wait()| exits.
+ void Wait(Mutex* mutex);
+
+ // Atomically releases the mutex, blocks the current executing thread, and
+ // adds it to the list of threads waiting on this condition variable. The
+ // thread will be unblocked when |NotifyAll()| or |NotifyOne()| is executed,
+ // or when the relative timeout |rel_time| expires. It may also be unblocked
+ // spuriously. When unblocked, regardless of the reason, the lock on the mutex
+ // is reacquired and |WaitFor()| exits. Returns true if the condition variable
+ // was notified prior to the timeout.
+ bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+#if V8_OS_POSIX
+ typedef pthread_cond_t NativeHandle;
+#elif V8_OS_WIN
+ struct Event;
+ class NativeHandle V8_FINAL {
+ public:
+ NativeHandle() : waitlist_(NULL), freelist_(NULL) {}
+ ~NativeHandle();
+
+ Event* Pre() V8_WARN_UNUSED_RESULT;
+ void Post(Event* event, bool result);
+
+ Mutex* mutex() { return &mutex_; }
+ Event* waitlist() { return waitlist_; }
+
+ private:
+ Event* waitlist_;
+ Event* freelist_;
+ Mutex mutex_;
+
+ DISALLOW_COPY_AND_ASSIGN(NativeHandle);
+ };
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
+};
+
+
+// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is
+// called).
+// Usage:
+// static LazyConditionVariable my_condvar =
+// LAZY_CONDITION_VARIABLE_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<Mutex> lock_guard(&my_mutex);
+// my_condvar.Pointer()->Wait(&my_mutex);
+// }
+typedef LazyStaticInstance<ConditionVariable,
+ DefaultConstructTrait<ConditionVariable>,
+ ThreadSafeInitOnceTrait>::type LazyConditionVariable;
+
+#define LAZY_CONDITION_VARIABLE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_CONDITION_VARIABLE_H_
diff --git a/deps/v8/src/platform/elapsed-timer.h b/deps/v8/src/platform/elapsed-timer.h
new file mode 100644
index 000000000..b61b00760
--- /dev/null
+++ b/deps/v8/src/platform/elapsed-timer.h
@@ -0,0 +1,120 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_ELAPSED_TIMER_H_
+#define V8_PLATFORM_ELAPSED_TIMER_H_
+
+#include "../checks.h"
+#include "time.h"
+
+namespace v8 {
+namespace internal {
+
+class ElapsedTimer V8_FINAL BASE_EMBEDDED {
+ public:
+#ifdef DEBUG
+ ElapsedTimer() : started_(false) {}
+#endif
+
+ // Starts this timer. Once started a timer can be checked with
+ // |Elapsed()| or |HasExpired()|, and may be restarted using |Restart()|.
+ // This method must not be called on an already started timer.
+ void Start() {
+ ASSERT(!IsStarted());
+ start_ticks_ = Now();
+#ifdef DEBUG
+ started_ = true;
+#endif
+ ASSERT(IsStarted());
+ }
+
+ // Stops this timer. Must not be called on a timer that was not
+ // started before.
+ void Stop() {
+ ASSERT(IsStarted());
+ start_ticks_ = TimeTicks();
+#ifdef DEBUG
+ started_ = false;
+#endif
+ ASSERT(!IsStarted());
+ }
+
+ // Returns |true| if this timer was started previously.
+ bool IsStarted() const {
+ ASSERT(started_ || start_ticks_.IsNull());
+ ASSERT(!started_ || !start_ticks_.IsNull());
+ return !start_ticks_.IsNull();
+ }
+
+ // Restarts the timer and returns the time elapsed since the previous start.
+ // This method is equivalent to obtaining the elapsed time with |Elapsed()|
+ // and then starting the timer again, but does so in one single operation,
+ // avoiding the need to obtain the clock value twice. It may only be called
+ // on a previously started timer.
+ TimeDelta Restart() {
+ ASSERT(IsStarted());
+ TimeTicks ticks = Now();
+ TimeDelta elapsed = ticks - start_ticks_;
+ ASSERT(elapsed.InMicroseconds() >= 0);
+ start_ticks_ = ticks;
+ ASSERT(IsStarted());
+ return elapsed;
+ }
+
+ // Returns the time elapsed since the previous start. This method may only
+ // be called on a previously started timer.
+ TimeDelta Elapsed() const {
+ ASSERT(IsStarted());
+ TimeDelta elapsed = Now() - start_ticks_;
+ ASSERT(elapsed.InMicroseconds() >= 0);
+ return elapsed;
+ }
+
+ // Returns |true| if the specified |time_delta| has elapsed since the
+ // previous start, or |false| if not. This method may only be called on
+ // a previously started timer.
+ bool HasExpired(TimeDelta time_delta) const {
+ ASSERT(IsStarted());
+ return Elapsed() >= time_delta;
+ }
+
+ private:
+ static V8_INLINE TimeTicks Now() {
+ TimeTicks now = TimeTicks::HighResolutionNow();
+ ASSERT(!now.IsNull());
+ return now;
+ }
+
+ TimeTicks start_ticks_;
+#ifdef DEBUG
+ bool started_;
+#endif
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_ELAPSED_TIMER_H_
diff --git a/deps/v8/src/platform/mutex.cc b/deps/v8/src/platform/mutex.cc
new file mode 100644
index 000000000..ad9774099
--- /dev/null
+++ b/deps/v8/src/platform/mutex.cc
@@ -0,0 +1,214 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/mutex.h"
+
+#include <cerrno>
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_POSIX
+
+static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) {
+ int result;
+#if defined(DEBUG)
+ // Use an error checking mutex in debug mode.
+ pthread_mutexattr_t attr;
+ result = pthread_mutexattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
+ ASSERT_EQ(0, result);
+ result = pthread_mutex_init(mutex, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_destroy(&attr);
+#else
+ // Use a fast mutex (default attributes).
+ result = pthread_mutex_init(mutex, NULL);
+#endif // defined(DEBUG)
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) {
+ pthread_mutexattr_t attr;
+ int result = pthread_mutexattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ ASSERT_EQ(0, result);
+ result = pthread_mutex_init(mutex, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_destroy(&attr);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void DestroyNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_destroy(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void LockNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_lock(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void UnlockNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_unlock(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_trylock(mutex);
+ if (result == EBUSY) {
+ return false;
+ }
+ ASSERT_EQ(0, result);
+ return true;
+}
+
+#elif V8_OS_WIN
+
+static V8_INLINE void InitializeNativeHandle(PCRITICAL_SECTION cs) {
+ InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs) {
+ InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE void DestroyNativeHandle(PCRITICAL_SECTION cs) {
+ DeleteCriticalSection(cs);
+}
+
+
+static V8_INLINE void LockNativeHandle(PCRITICAL_SECTION cs) {
+ EnterCriticalSection(cs);
+}
+
+
+static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) {
+ LeaveCriticalSection(cs);
+}
+
+
+static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) {
+ return TryEnterCriticalSection(cs);
+}
+
+#endif // V8_OS_POSIX
+
+
+Mutex::Mutex() {
+ InitializeNativeHandle(&native_handle_);
+#ifdef DEBUG
+ level_ = 0;
+#endif
+}
+
+
+Mutex::~Mutex() {
+ DestroyNativeHandle(&native_handle_);
+ ASSERT_EQ(0, level_);
+}
+
+
+void Mutex::Lock() {
+ LockNativeHandle(&native_handle_);
+ AssertUnheldAndMark();
+}
+
+
+void Mutex::Unlock() {
+ AssertHeldAndUnmark();
+ UnlockNativeHandle(&native_handle_);
+}
+
+
+bool Mutex::TryLock() {
+ if (!TryLockNativeHandle(&native_handle_)) {
+ return false;
+ }
+ AssertUnheldAndMark();
+ return true;
+}
+
+
+RecursiveMutex::RecursiveMutex() {
+ InitializeRecursiveNativeHandle(&native_handle_);
+#ifdef DEBUG
+ level_ = 0;
+#endif
+}
+
+
+RecursiveMutex::~RecursiveMutex() {
+ DestroyNativeHandle(&native_handle_);
+ ASSERT_EQ(0, level_);
+}
+
+
+void RecursiveMutex::Lock() {
+ LockNativeHandle(&native_handle_);
+#ifdef DEBUG
+ ASSERT_LE(0, level_);
+ level_++;
+#endif
+}
+
+
+void RecursiveMutex::Unlock() {
+#ifdef DEBUG
+ ASSERT_LT(0, level_);
+ level_--;
+#endif
+ UnlockNativeHandle(&native_handle_);
+}
+
+
+bool RecursiveMutex::TryLock() {
+ if (!TryLockNativeHandle(&native_handle_)) {
+ return false;
+ }
+#ifdef DEBUG
+ ASSERT_LE(0, level_);
+ level_++;
+#endif
+ return true;
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform/mutex.h b/deps/v8/src/platform/mutex.h
new file mode 100644
index 000000000..125e9d486
--- /dev/null
+++ b/deps/v8/src/platform/mutex.h
@@ -0,0 +1,238 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_MUTEX_H_
+#define V8_PLATFORM_MUTEX_H_
+
+#include "../lazy-instance.h"
+#if V8_OS_WIN
+#include "../win32-headers.h"
+#endif
+
+#if V8_OS_POSIX
+#include <pthread.h> // NOLINT
+#endif
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Mutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A mutex offers
+// exclusive, non-recursive ownership semantics:
+// - A calling thread owns a mutex from the time that it successfully calls
+// either |Lock()| or |TryLock()| until it calls |Unlock()|.
+// - When a thread owns a mutex, all other threads will block (for calls to
+// |Lock()|) or receive a |false| return value (for |TryLock()|) if they
+// attempt to claim ownership of the mutex.
+// A calling thread must not own the mutex prior to calling |Lock()| or
+// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
+// while still owned by some thread. The Mutex class is non-copyable.
+
+class Mutex V8_FINAL {
+ public:
+ Mutex();
+ ~Mutex();
+
+ // Locks the given mutex. If the mutex is currently unlocked, it becomes
+ // locked and owned by the calling thread, and immediately. If the mutex
+ // is already locked by another thread, suspends the calling thread until
+ // the mutex is unlocked.
+ void Lock();
+
+ // Unlocks the given mutex. The mutex is assumed to be locked and owned by
+ // the calling thread on entrance.
+ void Unlock();
+
+ // Tries to lock the given mutex. Returns whether the mutex was
+ // successfully locked.
+ bool TryLock() V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+#if V8_OS_POSIX
+ typedef pthread_mutex_t NativeHandle;
+#elif V8_OS_WIN
+ typedef CRITICAL_SECTION NativeHandle;
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+#ifdef DEBUG
+ int level_;
+#endif
+
+ V8_INLINE void AssertHeldAndUnmark() {
+#ifdef DEBUG
+ ASSERT_EQ(1, level_);
+ level_--;
+#endif
+ }
+
+ V8_INLINE void AssertUnheldAndMark() {
+#ifdef DEBUG
+ ASSERT_EQ(0, level_);
+ level_++;
+#endif
+ }
+
+ friend class ConditionVariable;
+
+ DISALLOW_COPY_AND_ASSIGN(Mutex);
+};
+
+
+// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<Mutex> guard(my_mutex.Pointer());
+// // Do something.
+// }
+//
+typedef LazyStaticInstance<Mutex,
+ DefaultConstructTrait<Mutex>,
+ ThreadSafeInitOnceTrait>::type LazyMutex;
+
+#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// RecursiveMutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A recursive
+// mutex offers exclusive, recursive ownership semantics:
+// - A calling thread owns a recursive mutex for a period of time that starts
+// when it successfully calls either |Lock()| or |TryLock()|. During this
+// period, the thread may make additional calls to |Lock()| or |TryLock()|.
+// The period of ownership ends when the thread makes a matching number of
+// calls to |Unlock()|.
+// - When a thread owns a recursive mutex, all other threads will block (for
+// calls to |Lock()|) or receive a |false| return value (for |TryLock()|) if
+// they attempt to claim ownership of the recursive mutex.
+// - The maximum number of times that a recursive mutex may be locked is
+// unspecified, but after that number is reached, calls to |Lock()| will
+// probably abort the process and calls to |TryLock()| return false.
+// The behavior of a program is undefined if a recursive mutex is destroyed
+// while still owned by some thread. The RecursiveMutex class is non-copyable.
+
+class RecursiveMutex V8_FINAL {
+ public:
+ RecursiveMutex();
+ ~RecursiveMutex();
+
+ // Locks the mutex. If another thread has already locked the mutex, a call to
+ // |Lock()| will block execution until the lock is acquired. A thread may call
+ // |Lock()| on a recursive mutex repeatedly. Ownership will only be released
+ // after the thread makes a matching number of calls to |Unlock()|.
+ // The behavior is undefined if the mutex is not unlocked before being
+ // destroyed, i.e. some thread still owns it.
+ void Lock();
+
+ // Unlocks the mutex if its level of ownership is 1 (there was exactly one
+ // more call to |Lock()| than there were calls to unlock() made by this
+ // thread), reduces the level of ownership by 1 otherwise. The mutex must be
+ // locked by the current thread of execution, otherwise, the behavior is
+ // undefined.
+ void Unlock();
+
+ // Tries to lock the given mutex. Returns whether the mutex was
+ // successfully locked.
+ bool TryLock() V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+ typedef Mutex::NativeHandle NativeHandle;
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+#ifdef DEBUG
+ int level_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(RecursiveMutex);
+};
+
+
+// POD RecursiveMutex initialized lazily (i.e. the first time Pointer() is
+// called).
+// Usage:
+// static LazyRecursiveMutex my_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<RecursiveMutex> guard(my_mutex.Pointer());
+// // Do something.
+// }
+//
+typedef LazyStaticInstance<RecursiveMutex,
+ DefaultConstructTrait<RecursiveMutex>,
+ ThreadSafeInitOnceTrait>::type LazyRecursiveMutex;
+
+#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// LockGuard
+//
+// This class is a mutex wrapper that provides a convenient RAII-style mechanism
+// for owning a mutex for the duration of a scoped block.
+// When a LockGuard object is created, it attempts to take ownership of the
+// mutex it is given. When control leaves the scope in which the LockGuard
+// object was created, the LockGuard is destructed and the mutex is released.
+// The LockGuard class is non-copyable.
+
+template <typename Mutex>
+class LockGuard V8_FINAL {
+ public:
+ explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); }
+ ~LockGuard() { mutex_->Unlock(); }
+
+ private:
+ Mutex* mutex_;
+
+ DISALLOW_COPY_AND_ASSIGN(LockGuard);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_MUTEX_H_
diff --git a/deps/v8/src/platform/semaphore.cc b/deps/v8/src/platform/semaphore.cc
new file mode 100644
index 000000000..c3e5826f4
--- /dev/null
+++ b/deps/v8/src/platform/semaphore.cc
@@ -0,0 +1,214 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/semaphore.h"
+
+#if V8_OS_MACOSX
+#include <mach/mach_init.h>
+#include <mach/task.h>
+#endif
+
+#include <cerrno>
+
+#include "checks.h"
+#include "platform/time.h"
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_MACOSX
+
+Semaphore::Semaphore(int count) {
+ kern_return_t result = semaphore_create(
+ mach_task_self(), &native_handle_, SYNC_POLICY_FIFO, count);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+}
+
+
+Semaphore::~Semaphore() {
+ kern_return_t result = semaphore_destroy(mach_task_self(), native_handle_);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+}
+
+
+void Semaphore::Signal() {
+ kern_return_t result = semaphore_signal(native_handle_);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+}
+
+
+void Semaphore::Wait() {
+ while (true) {
+ kern_return_t result = semaphore_wait(native_handle_);
+ if (result == KERN_SUCCESS) return; // Semaphore was signalled.
+ ASSERT_EQ(KERN_ABORTED, result);
+ }
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+ TimeTicks now = TimeTicks::Now();
+ TimeTicks end = now + rel_time;
+ while (true) {
+ mach_timespec_t ts;
+ if (now >= end) {
+ // Return immediately if semaphore was not signalled.
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ } else {
+ ts = (end - now).ToMachTimespec();
+ }
+ kern_return_t result = semaphore_timedwait(native_handle_, ts);
+ if (result == KERN_SUCCESS) return true; // Semaphore was signalled.
+ if (result == KERN_OPERATION_TIMED_OUT) return false; // Timeout.
+ ASSERT_EQ(KERN_ABORTED, result);
+ now = TimeTicks::Now();
+ }
+}
+
+#elif V8_OS_POSIX
+
+Semaphore::Semaphore(int count) {
+ ASSERT(count >= 0);
+ int result = sem_init(&native_handle_, 0, count);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+Semaphore::~Semaphore() {
+ int result = sem_destroy(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void Semaphore::Signal() {
+ int result = sem_post(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void Semaphore::Wait() {
+ while (true) {
+ int result = sem_wait(&native_handle_);
+ if (result == 0) return; // Semaphore was signalled.
+ // Signal caused spurious wakeup.
+ ASSERT_EQ(-1, result);
+ ASSERT_EQ(EINTR, errno);
+ }
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+ // Compute the time for end of timeout.
+ const Time time = Time::NowFromSystemTime() + rel_time;
+ const struct timespec ts = time.ToTimespec();
+
+ // Wait for semaphore signalled or timeout.
+ while (true) {
+ int result = sem_timedwait(&native_handle_, &ts);
+ if (result == 0) return true; // Semaphore was signalled.
+#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
+ if (result > 0) {
+ // sem_timedwait in glibc prior to 2.3.4 returns the errno instead of -1.
+ errno = result;
+ result = -1;
+ }
+#endif
+ if (result == -1 && errno == ETIMEDOUT) {
+ // Timed out while waiting for semaphore.
+ return false;
+ }
+ // Signal caused spurious wakeup.
+ ASSERT_EQ(-1, result);
+ ASSERT_EQ(EINTR, errno);
+ }
+}
+
+#elif V8_OS_WIN
+
+Semaphore::Semaphore(int count) {
+ ASSERT(count >= 0);
+ native_handle_ = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
+ ASSERT(native_handle_ != NULL);
+}
+
+
+Semaphore::~Semaphore() {
+ BOOL result = CloseHandle(native_handle_);
+ ASSERT(result);
+ USE(result);
+}
+
+
+void Semaphore::Signal() {
+ LONG dummy;
+ BOOL result = ReleaseSemaphore(native_handle_, 1, &dummy);
+ ASSERT(result);
+ USE(result);
+}
+
+
+void Semaphore::Wait() {
+ DWORD result = WaitForSingleObject(native_handle_, INFINITE);
+ ASSERT(result == WAIT_OBJECT_0);
+ USE(result);
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+ TimeTicks now = TimeTicks::Now();
+ TimeTicks end = now + rel_time;
+ while (true) {
+ int64_t msec = (end - now).InMilliseconds();
+ if (msec >= static_cast<int64_t>(INFINITE)) {
+ DWORD result = WaitForSingleObject(native_handle_, INFINITE - 1);
+ if (result == WAIT_OBJECT_0) {
+ return true;
+ }
+ ASSERT(result == WAIT_TIMEOUT);
+ now = TimeTicks::Now();
+ } else {
+ DWORD result = WaitForSingleObject(
+ native_handle_, (msec < 0) ? 0 : static_cast<DWORD>(msec));
+ if (result == WAIT_TIMEOUT) {
+ return false;
+ }
+ ASSERT(result == WAIT_OBJECT_0);
+ return true;
+ }
+ }
+}
+
+#endif // V8_OS_MACOSX
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform/semaphore.h b/deps/v8/src/platform/semaphore.h
new file mode 100644
index 000000000..0babe5fd6
--- /dev/null
+++ b/deps/v8/src/platform/semaphore.h
@@ -0,0 +1,126 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_SEMAPHORE_H_
+#define V8_PLATFORM_SEMAPHORE_H_
+
+#include "../lazy-instance.h"
+#if V8_OS_WIN
+#include "../win32-headers.h"
+#endif
+
+#if V8_OS_MACOSX
+#include <mach/semaphore.h> // NOLINT
+#elif V8_OS_POSIX
+#include <semaphore.h> // NOLINT
+#endif
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class TimeDelta;
+
+// ----------------------------------------------------------------------------
+// Semaphore
+//
+// A semaphore object is a synchronization object that maintains a count. The
+// count is decremented each time a thread completes a wait for the semaphore
+// object and incremented each time a thread signals the semaphore. When the
+// count reaches zero, threads waiting for the semaphore blocks until the
+// count becomes non-zero.
+
+class Semaphore V8_FINAL {
+ public:
+ explicit Semaphore(int count);
+ ~Semaphore();
+
+ // Increments the semaphore counter.
+ void Signal();
+
+ // Suspends the calling thread until the semaphore counter is non zero
+ // and then decrements the semaphore counter.
+ void Wait();
+
+ // Suspends the calling thread until the counter is non zero or the timeout
+ // time has passed. If timeout happens the return value is false and the
+ // counter is unchanged. Otherwise the semaphore counter is decremented and
+ // true is returned.
+ bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
+
+#if V8_OS_MACOSX
+ typedef semaphore_t NativeHandle;
+#elif V8_OS_POSIX
+ typedef sem_t NativeHandle;
+#elif V8_OS_WIN
+ typedef HANDLE NativeHandle;
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(Semaphore);
+};
+
+
+// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+// // The following semaphore starts at 0.
+// static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
+//
+// void my_function() {
+// // Do something with my_semaphore.Pointer().
+// }
+//
+
+template <int N>
+struct CreateSemaphoreTrait {
+ static Semaphore* Create() {
+ return new Semaphore(N);
+ }
+};
+
+template <int N>
+struct LazySemaphore {
+ typedef typename LazyDynamicInstance<
+ Semaphore,
+ CreateSemaphoreTrait<N>,
+ ThreadSafeInitOnceTrait>::type type;
+};
+
+#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_SEMAPHORE_H_
diff --git a/deps/v8/src/platform/socket.cc b/deps/v8/src/platform/socket.cc
new file mode 100644
index 000000000..2fce6f299
--- /dev/null
+++ b/deps/v8/src/platform/socket.cc
@@ -0,0 +1,224 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/socket.h"
+
+#if V8_OS_POSIX
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+#include <netdb.h>
+
+#include <unistd.h>
+#endif
+
+#include <cerrno>
+
+#include "checks.h"
+#include "once.h"
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_WIN
+
+static V8_DECLARE_ONCE(initialize_winsock) = V8_ONCE_INIT;
+
+
+static void InitializeWinsock() {
+ WSADATA wsa_data;
+ int result = WSAStartup(MAKEWORD(1, 0), &wsa_data);
+ CHECK_EQ(0, result);
+}
+
+#endif // V8_OS_WIN
+
+
+Socket::Socket() {
+#if V8_OS_WIN
+ // Be sure to initialize the WinSock DLL first.
+ CallOnce(&initialize_winsock, &InitializeWinsock);
+#endif // V8_OS_WIN
+
+ // Create the native socket handle.
+ native_handle_ = ::socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+}
+
+
+bool Socket::Bind(int port) {
+ ASSERT_GE(port, 0);
+ ASSERT_LT(port, 65536);
+ if (!IsValid()) return false;
+ struct sockaddr_in sin;
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ sin.sin_port = htons(static_cast<uint16_t>(port));
+ int result = ::bind(
+ native_handle_, reinterpret_cast<struct sockaddr*>(&sin), sizeof(sin));
+ return result == 0;
+}
+
+
+bool Socket::Listen(int backlog) {
+ if (!IsValid()) return false;
+ int result = ::listen(native_handle_, backlog);
+ return result == 0;
+}
+
+
+Socket* Socket::Accept() {
+ if (!IsValid()) return NULL;
+ while (true) {
+ NativeHandle native_handle = ::accept(native_handle_, NULL, NULL);
+ if (native_handle == kInvalidNativeHandle) {
+#if V8_OS_POSIX
+ if (errno == EINTR) continue; // Retry after signal.
+#endif
+ return NULL;
+ }
+ return new Socket(native_handle);
+ }
+}
+
+
+bool Socket::Connect(const char* host, const char* port) {
+ ASSERT_NE(NULL, host);
+ ASSERT_NE(NULL, port);
+ if (!IsValid()) return false;
+
+ // Lookup host and port.
+ struct addrinfo* info = NULL;
+ struct addrinfo hint;
+ memset(&hint, 0, sizeof(hint));
+ hint.ai_family = AF_INET;
+ hint.ai_socktype = SOCK_STREAM;
+ hint.ai_protocol = IPPROTO_TCP;
+ int result = ::getaddrinfo(host, port, &hint, &info);
+ if (result != 0) {
+ return false;
+ }
+
+ // Connect to the host on the given port.
+ for (struct addrinfo* ai = info; ai != NULL; ai = ai->ai_next) {
+ // Try to connect using this addr info.
+ while (true) {
+ result = ::connect(
+ native_handle_, ai->ai_addr, static_cast<int>(ai->ai_addrlen));
+ if (result == 0) {
+ freeaddrinfo(info);
+ return true;
+ }
+#if V8_OS_POSIX
+ if (errno == EINTR) continue; // Retry after signal.
+#endif
+ break;
+ }
+ }
+ freeaddrinfo(info);
+ return false;
+}
+
+
+bool Socket::Shutdown() {
+ if (!IsValid()) return false;
+ // Shutdown socket for both read and write.
+#if V8_OS_POSIX
+ int result = ::shutdown(native_handle_, SHUT_RDWR);
+ ::close(native_handle_);
+#elif V8_OS_WIN
+ int result = ::shutdown(native_handle_, SD_BOTH);
+ ::closesocket(native_handle_);
+#endif
+ native_handle_ = kInvalidNativeHandle;
+ return result == 0;
+}
+
+
+int Socket::Send(const char* buffer, int length) {
+ ASSERT(length <= 0 || buffer != NULL);
+ if (!IsValid()) return 0;
+ int offset = 0;
+ while (offset < length) {
+ int result = ::send(native_handle_, buffer + offset, length - offset, 0);
+ if (result == 0) {
+ break;
+ } else if (result > 0) {
+ ASSERT(result <= length - offset);
+ offset += result;
+ } else {
+#if V8_OS_POSIX
+ if (errno == EINTR) continue; // Retry after signal.
+#endif
+ return 0;
+ }
+ }
+ return offset;
+}
+
+
+int Socket::Receive(char* buffer, int length) {
+ if (!IsValid()) return 0;
+ if (length <= 0) return 0;
+ ASSERT_NE(NULL, buffer);
+ while (true) {
+ int result = ::recv(native_handle_, buffer, length, 0);
+ if (result < 0) {
+#if V8_OS_POSIX
+ if (errno == EINTR) continue; // Retry after signal.
+#endif
+ return 0;
+ }
+ return result;
+ }
+}
+
+
+bool Socket::SetReuseAddress(bool reuse_address) {
+ if (!IsValid()) return 0;
+ int v = reuse_address ? 1 : 0;
+ int result = ::setsockopt(native_handle_, SOL_SOCKET, SO_REUSEADDR,
+ reinterpret_cast<char*>(&v), sizeof(v));
+ return result == 0;
+}
+
+
+// static
+int Socket::GetLastError() {
+#if V8_OS_POSIX
+ return errno;
+#elif V8_OS_WIN
+ // Be sure to initialize the WinSock DLL first.
+ CallOnce(&initialize_winsock, &InitializeWinsock);
+
+ // Now we can safely perform WSA calls.
+ return ::WSAGetLastError();
+#endif
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform/socket.h b/deps/v8/src/platform/socket.h
new file mode 100644
index 000000000..ff8c1de7c
--- /dev/null
+++ b/deps/v8/src/platform/socket.h
@@ -0,0 +1,101 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_SOCKET_H_
+#define V8_PLATFORM_SOCKET_H_
+
+#include "globals.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Socket
+//
+
+class Socket V8_FINAL {
+ public:
+ Socket();
+ ~Socket() { Shutdown(); }
+
+ // Server initialization.
+ bool Bind(int port) V8_WARN_UNUSED_RESULT;
+ bool Listen(int backlog) V8_WARN_UNUSED_RESULT;
+ Socket* Accept() V8_WARN_UNUSED_RESULT;
+
+ // Client initialization.
+ bool Connect(const char* host, const char* port) V8_WARN_UNUSED_RESULT;
+
+ // Shutdown socket for both read and write. This causes blocking Send and
+ // Receive calls to exit. After |Shutdown()| the Socket object cannot be
+ // used for any communication.
+ bool Shutdown();
+
+ // Data Transimission
+ // Return 0 on failure.
+ int Send(const char* buffer, int length) V8_WARN_UNUSED_RESULT;
+ int Receive(char* buffer, int length) V8_WARN_UNUSED_RESULT;
+
+ // Set the value of the SO_REUSEADDR socket option.
+ bool SetReuseAddress(bool reuse_address);
+
+ V8_INLINE bool IsValid() const {
+ return native_handle_ != kInvalidNativeHandle;
+ }
+
+ static int GetLastError();
+
+ // The implementation-defined native handle type.
+#if V8_OS_POSIX
+ typedef int NativeHandle;
+ static const NativeHandle kInvalidNativeHandle = -1;
+#elif V8_OS_WIN
+ typedef SOCKET NativeHandle;
+ static const NativeHandle kInvalidNativeHandle = INVALID_SOCKET;
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ explicit Socket(NativeHandle native_handle) : native_handle_(native_handle) {}
+
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(Socket);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_SOCKET_H_
diff --git a/deps/v8/src/platform/time.cc b/deps/v8/src/platform/time.cc
new file mode 100644
index 000000000..de0ca1647
--- /dev/null
+++ b/deps/v8/src/platform/time.cc
@@ -0,0 +1,591 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/time.h"
+
+#if V8_OS_POSIX
+#include <sys/time.h>
+#endif
+#if V8_OS_MACOSX
+#include <mach/mach_time.h>
+#endif
+
+#include <cstring>
+
+#include "checks.h"
+#include "cpu.h"
+#include "platform.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+TimeDelta TimeDelta::FromDays(int days) {
+ return TimeDelta(days * Time::kMicrosecondsPerDay);
+}
+
+
+TimeDelta TimeDelta::FromHours(int hours) {
+ return TimeDelta(hours * Time::kMicrosecondsPerHour);
+}
+
+
+TimeDelta TimeDelta::FromMinutes(int minutes) {
+ return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
+}
+
+
+TimeDelta TimeDelta::FromSeconds(int64_t seconds) {
+ return TimeDelta(seconds * Time::kMicrosecondsPerSecond);
+}
+
+
+TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) {
+ return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond);
+}
+
+
+TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
+ return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond);
+}
+
+
+int TimeDelta::InDays() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
+}
+
+
+int TimeDelta::InHours() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
+}
+
+
+int TimeDelta::InMinutes() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
+}
+
+
+double TimeDelta::InSecondsF() const {
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
+}
+
+
+int64_t TimeDelta::InSeconds() const {
+ return delta_ / Time::kMicrosecondsPerSecond;
+}
+
+
+double TimeDelta::InMillisecondsF() const {
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
+}
+
+
+int64_t TimeDelta::InMilliseconds() const {
+ return delta_ / Time::kMicrosecondsPerMillisecond;
+}
+
+
+int64_t TimeDelta::InNanoseconds() const {
+ return delta_ * Time::kNanosecondsPerMicrosecond;
+}
+
+
+#if V8_OS_MACOSX
+
+TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
+ ASSERT_GE(ts.tv_nsec, 0);
+ ASSERT_LT(ts.tv_nsec,
+ static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
+ return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+ ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
+
+struct mach_timespec TimeDelta::ToMachTimespec() const {
+ struct mach_timespec ts;
+ ASSERT(delta_ >= 0);
+ ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
+ ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
+ Time::kNanosecondsPerMicrosecond;
+ return ts;
+}
+
+#endif // V8_OS_MACOSX
+
+
+#if V8_OS_POSIX
+
+TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
+ ASSERT_GE(ts.tv_nsec, 0);
+ ASSERT_LT(ts.tv_nsec,
+ static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
+ return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+ ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
+
+struct timespec TimeDelta::ToTimespec() const {
+ struct timespec ts;
+ ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
+ ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
+ Time::kNanosecondsPerMicrosecond;
+ return ts;
+}
+
+#endif // V8_OS_POSIX
+
+
+#if V8_OS_WIN
+
+// We implement time using the high-resolution timers so that we can get
+// timeouts which are smaller than 10-15ms. To avoid any drift, we
+// periodically resync the internal clock to the system clock.
+class Clock V8_FINAL {
+ public:
+ Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
+
+ Time Now() {
+ // Time between resampling the un-granular clock for this API (1 minute).
+ const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
+
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Determine current time and ticks.
+ TimeTicks ticks = GetSystemTicks();
+ Time time = GetSystemTime();
+
+ // Check if we need to synchronize with the system clock due to a backwards
+ // time change or the amount of time elapsed.
+ TimeDelta elapsed = ticks - initial_ticks_;
+ if (time < initial_time_ || elapsed > kMaxElapsedTime) {
+ initial_ticks_ = ticks;
+ initial_time_ = time;
+ return time;
+ }
+
+ return initial_time_ + elapsed;
+ }
+
+ Time NowFromSystemTime() {
+ LockGuard<Mutex> lock_guard(&mutex_);
+ initial_ticks_ = GetSystemTicks();
+ initial_time_ = GetSystemTime();
+ return initial_time_;
+ }
+
+ private:
+ static TimeTicks GetSystemTicks() {
+ return TimeTicks::Now();
+ }
+
+ static Time GetSystemTime() {
+ FILETIME ft;
+ ::GetSystemTimeAsFileTime(&ft);
+ return Time::FromFiletime(ft);
+ }
+
+ TimeTicks initial_ticks_;
+ Time initial_time_;
+ Mutex mutex_;
+};
+
+
+static LazyStaticInstance<Clock,
+ DefaultConstructTrait<Clock>,
+ ThreadSafeInitOnceTrait>::type clock = LAZY_STATIC_INSTANCE_INITIALIZER;
+
+
+Time Time::Now() {
+ return clock.Pointer()->Now();
+}
+
+
+Time Time::NowFromSystemTime() {
+ return clock.Pointer()->NowFromSystemTime();
+}
+
+
+// Time between windows epoch and standard epoch.
+static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
+
+
+Time Time::FromFiletime(FILETIME ft) {
+ if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
+ return Time();
+ }
+ if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
+ ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
+ return Max();
+ }
+ int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
+ (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
+ return Time(us - kTimeToEpochInMicroseconds);
+}
+
+
+FILETIME Time::ToFiletime() const {
+ ASSERT(us_ >= 0);
+ FILETIME ft;
+ if (IsNull()) {
+ ft.dwLowDateTime = 0;
+ ft.dwHighDateTime = 0;
+ return ft;
+ }
+ if (IsMax()) {
+ ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
+ ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
+ return ft;
+ }
+ uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
+ ft.dwLowDateTime = static_cast<DWORD>(us);
+ ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
+ return ft;
+}
+
+#elif V8_OS_POSIX
+
+Time Time::Now() {
+ struct timeval tv;
+ int result = gettimeofday(&tv, NULL);
+ ASSERT_EQ(0, result);
+ USE(result);
+ return FromTimeval(tv);
+}
+
+
+Time Time::NowFromSystemTime() {
+ return Now();
+}
+
+
+Time Time::FromTimespec(struct timespec ts) {
+ ASSERT(ts.tv_nsec >= 0);
+ ASSERT(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT
+ if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
+ return Time();
+ }
+ if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT
+ ts.tv_sec == std::numeric_limits<time_t>::max()) {
+ return Max();
+ }
+ return Time(ts.tv_sec * kMicrosecondsPerSecond +
+ ts.tv_nsec / kNanosecondsPerMicrosecond);
+}
+
+
+struct timespec Time::ToTimespec() const {
+ struct timespec ts;
+ if (IsNull()) {
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ return ts;
+ }
+ if (IsMax()) {
+ ts.tv_sec = std::numeric_limits<time_t>::max();
+ ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT
+ return ts;
+ }
+ ts.tv_sec = us_ / kMicrosecondsPerSecond;
+ ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
+ return ts;
+}
+
+
+Time Time::FromTimeval(struct timeval tv) {
+ ASSERT(tv.tv_usec >= 0);
+ ASSERT(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
+ if (tv.tv_usec == 0 && tv.tv_sec == 0) {
+ return Time();
+ }
+ if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
+ tv.tv_sec == std::numeric_limits<time_t>::max()) {
+ return Max();
+ }
+ return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
+}
+
+
+struct timeval Time::ToTimeval() const {
+ struct timeval tv;
+ if (IsNull()) {
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ return tv;
+ }
+ if (IsMax()) {
+ tv.tv_sec = std::numeric_limits<time_t>::max();
+ tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
+ return tv;
+ }
+ tv.tv_sec = us_ / kMicrosecondsPerSecond;
+ tv.tv_usec = us_ % kMicrosecondsPerSecond;
+ return tv;
+}
+
+#endif // V8_OS_WIN
+
+
+Time Time::FromJsTime(double ms_since_epoch) {
+ // The epoch is a valid time, so this constructor doesn't interpret
+ // 0 as the null time.
+ if (ms_since_epoch == std::numeric_limits<double>::max()) {
+ return Max();
+ }
+ return Time(
+ static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
+}
+
+
+double Time::ToJsTime() const {
+ if (IsNull()) {
+ // Preserve 0 so the invalid result doesn't depend on the platform.
+ return 0;
+ }
+ if (IsMax()) {
+ // Preserve max without offset to prevent overflow.
+ return std::numeric_limits<double>::max();
+ }
+ return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
+}
+
+
+#if V8_OS_WIN
+
+class TickClock {
+ public:
+ virtual ~TickClock() {}
+ virtual int64_t Now() = 0;
+ virtual bool IsHighResolution() = 0;
+};
+
+
+// Overview of time counters:
+// (1) CPU cycle counter. (Retrieved via RDTSC)
+// The CPU counter provides the highest resolution time stamp and is the least
+// expensive to retrieve. However, the CPU counter is unreliable and should not
+// be used in production. Its biggest issue is that it is per processor and it
+// is not synchronized between processors. Also, on some computers, the counters
+// will change frequency due to thermal and power changes, and stop in some
+// states.
+//
+// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
+// resolution (100 nanoseconds) time stamp but is comparatively more expensive
+// to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
+// (with some help from ACPI).
+// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
+// in the worst case, it gets the counter from the rollover interrupt on the
+// programmable interrupt timer. In best cases, the HAL may conclude that the
+// RDTSC counter runs at a constant frequency, then it uses that instead. On
+// multiprocessor machines, it will try to verify the values returned from
+// RDTSC on each processor are consistent with each other, and apply a handful
+// of workarounds for known buggy hardware. In other words, QPC is supposed to
+// give consistent result on a multiprocessor computer, but it is unreliable in
+// reality due to bugs in BIOS or HAL on some, especially old computers.
+// With recent updates on HAL and newer BIOS, QPC is getting more reliable but
+// it should be used with caution.
+//
+// (3) System time. The system time provides a low-resolution (typically 10ms
+// to 55 milliseconds) time stamp but is comparatively less expensive to
+// retrieve and more reliable.
+class HighResolutionTickClock V8_FINAL : public TickClock {
+ public:
+ explicit HighResolutionTickClock(int64_t ticks_per_second)
+ : ticks_per_second_(ticks_per_second) {
+ ASSERT_LT(0, ticks_per_second);
+ }
+ virtual ~HighResolutionTickClock() {}
+
+ virtual int64_t Now() V8_OVERRIDE {
+ LARGE_INTEGER now;
+ BOOL result = QueryPerformanceCounter(&now);
+ ASSERT(result);
+ USE(result);
+
+ // Intentionally calculate microseconds in a round about manner to avoid
+ // overflow and precision issues. Think twice before simplifying!
+ int64_t whole_seconds = now.QuadPart / ticks_per_second_;
+ int64_t leftover_ticks = now.QuadPart % ticks_per_second_;
+ int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
+ ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
+
+ // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow()
+ // will never return 0.
+ return ticks + 1;
+ }
+
+ virtual bool IsHighResolution() V8_OVERRIDE {
+ return true;
+ }
+
+ private:
+ int64_t ticks_per_second_;
+};
+
+
+class RolloverProtectedTickClock V8_FINAL : public TickClock {
+ public:
+ // We initialize rollover_ms_ to 1 to ensure that we will never
+ // return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below.
+ RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
+ virtual ~RolloverProtectedTickClock() {}
+
+ virtual int64_t Now() V8_OVERRIDE {
+ LockGuard<Mutex> lock_guard(&mutex_);
+ // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
+ // every ~49.7 days. We try to track rollover ourselves, which works if
+ // TimeTicks::Now() is called at least every 49 days.
+ // Note that we do not use GetTickCount() here, since timeGetTime() gives
+ // more predictable delta values, as described here:
+ // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
+ // timeGetTime() provides 1ms granularity when combined with
+ // timeBeginPeriod(). If the host application for V8 wants fast timers, it
+ // can use timeBeginPeriod() to increase the resolution.
+ DWORD now = timeGetTime();
+ if (now < last_seen_now_) {
+ rollover_ms_ += V8_INT64_C(0x100000000); // ~49.7 days.
+ }
+ last_seen_now_ = now;
+ return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
+ }
+
+ virtual bool IsHighResolution() V8_OVERRIDE {
+ return false;
+ }
+
+ private:
+ Mutex mutex_;
+ DWORD last_seen_now_;
+ int64_t rollover_ms_;
+};
+
+
+static LazyStaticInstance<RolloverProtectedTickClock,
+ DefaultConstructTrait<RolloverProtectedTickClock>,
+ ThreadSafeInitOnceTrait>::type tick_clock =
+ LAZY_STATIC_INSTANCE_INITIALIZER;
+
+
+struct CreateHighResTickClockTrait {
+ static TickClock* Create() {
+ // Check if the installed hardware supports a high-resolution performance
+ // counter, and if not fallback to the low-resolution tick clock.
+ LARGE_INTEGER ticks_per_second;
+ if (!QueryPerformanceFrequency(&ticks_per_second)) {
+ return tick_clock.Pointer();
+ }
+
+ // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter
+ // is unreliable, fallback to the low-resolution tick clock.
+ CPU cpu;
+ if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) {
+ return tick_clock.Pointer();
+ }
+
+ return new HighResolutionTickClock(ticks_per_second.QuadPart);
+ }
+};
+
+
+static LazyDynamicInstance<TickClock,
+ CreateHighResTickClockTrait,
+ ThreadSafeInitOnceTrait>::type high_res_tick_clock =
+ LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+
+TimeTicks TimeTicks::Now() {
+ // Make sure we never return 0 here.
+ TimeTicks ticks(tick_clock.Pointer()->Now());
+ ASSERT(!ticks.IsNull());
+ return ticks;
+}
+
+
+TimeTicks TimeTicks::HighResolutionNow() {
+ // Make sure we never return 0 here.
+ TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
+ ASSERT(!ticks.IsNull());
+ return ticks;
+}
+
+
+// static
+bool TimeTicks::IsHighResolutionClockWorking() {
+ return high_res_tick_clock.Pointer()->IsHighResolution();
+}
+
+#else // V8_OS_WIN
+
+TimeTicks TimeTicks::Now() {
+ return HighResolutionNow();
+}
+
+
+TimeTicks TimeTicks::HighResolutionNow() {
+ int64_t ticks;
+#if V8_OS_MACOSX
+ static struct mach_timebase_info info;
+ if (info.denom == 0) {
+ kern_return_t result = mach_timebase_info(&info);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+ }
+ ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
+ info.numer / info.denom);
+#elif V8_OS_SOLARIS
+ ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
+#elif V8_LIBRT_NOT_AVAILABLE
+ // TODO(bmeurer): This is a temporary hack to support cross-compiling
+ // Chrome for Android in AOSP. Remove this once AOSP is fixed, also
+ // cleanup the tools/gyp/v8.gyp file.
+ struct timeval tv;
+ int result = gettimeofday(&tv, NULL);
+ ASSERT_EQ(0, result);
+ USE(result);
+ ticks = (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec);
+#elif V8_OS_POSIX
+ struct timespec ts;
+ int result = clock_gettime(CLOCK_MONOTONIC, &ts);
+ ASSERT_EQ(0, result);
+ USE(result);
+ ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond +
+ ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+#endif // V8_OS_MACOSX
+ // Make sure we never return 0 here.
+ return TimeTicks(ticks + 1);
+}
+
+
+// static
+bool TimeTicks::IsHighResolutionClockWorking() {
+ return true;
+}
+
+#endif // V8_OS_WIN
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform/time.h b/deps/v8/src/platform/time.h
new file mode 100644
index 000000000..877e0203b
--- /dev/null
+++ b/deps/v8/src/platform/time.h
@@ -0,0 +1,416 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_TIME_H_
+#define V8_PLATFORM_TIME_H_
+
+#include <ctime>
+#include <limits>
+
+#include "../allocation.h"
+
+// Forward declarations.
+extern "C" {
+struct _FILETIME;
+struct mach_timespec;
+struct timespec;
+struct timeval;
+}
+
+namespace v8 {
+namespace internal {
+
+class Time;
+class TimeTicks;
+
+// -----------------------------------------------------------------------------
+// TimeDelta
+//
+// This class represents a duration of time, internally represented in
+// microseonds.
+
+class TimeDelta V8_FINAL BASE_EMBEDDED {
+ public:
+ TimeDelta() : delta_(0) {}
+
+ // Converts units of time to TimeDeltas.
+ static TimeDelta FromDays(int days);
+ static TimeDelta FromHours(int hours);
+ static TimeDelta FromMinutes(int minutes);
+ static TimeDelta FromSeconds(int64_t seconds);
+ static TimeDelta FromMilliseconds(int64_t milliseconds);
+ static TimeDelta FromMicroseconds(int64_t microseconds) {
+ return TimeDelta(microseconds);
+ }
+ static TimeDelta FromNanoseconds(int64_t nanoseconds);
+
+ // Returns the time delta in some unit. The F versions return a floating
+ // point value, the "regular" versions return a rounded-down value.
+ //
+ // InMillisecondsRoundedUp() instead returns an integer that is rounded up
+ // to the next full millisecond.
+ int InDays() const;
+ int InHours() const;
+ int InMinutes() const;
+ double InSecondsF() const;
+ int64_t InSeconds() const;
+ double InMillisecondsF() const;
+ int64_t InMilliseconds() const;
+ int64_t InMillisecondsRoundedUp() const;
+ int64_t InMicroseconds() const { return delta_; }
+ int64_t InNanoseconds() const;
+
+ // Converts to/from Mach time specs.
+ static TimeDelta FromMachTimespec(struct mach_timespec ts);
+ struct mach_timespec ToMachTimespec() const;
+
+ // Converts to/from POSIX time specs.
+ static TimeDelta FromTimespec(struct timespec ts);
+ struct timespec ToTimespec() const;
+
+ TimeDelta& operator=(const TimeDelta& other) {
+ delta_ = other.delta_;
+ return *this;
+ }
+
+ // Computations with other deltas.
+ TimeDelta operator+(const TimeDelta& other) const {
+ return TimeDelta(delta_ + other.delta_);
+ }
+ TimeDelta operator-(const TimeDelta& other) const {
+ return TimeDelta(delta_ - other.delta_);
+ }
+
+ TimeDelta& operator+=(const TimeDelta& other) {
+ delta_ += other.delta_;
+ return *this;
+ }
+ TimeDelta& operator-=(const TimeDelta& other) {
+ delta_ -= other.delta_;
+ return *this;
+ }
+ TimeDelta operator-() const {
+ return TimeDelta(-delta_);
+ }
+
+ double TimesOf(const TimeDelta& other) const {
+ return static_cast<double>(delta_) / static_cast<double>(other.delta_);
+ }
+ double PercentOf(const TimeDelta& other) const {
+ return TimesOf(other) * 100.0;
+ }
+
+ // Computations with ints, note that we only allow multiplicative operations
+ // with ints, and additive operations with other deltas.
+ TimeDelta operator*(int64_t a) const {
+ return TimeDelta(delta_ * a);
+ }
+ TimeDelta operator/(int64_t a) const {
+ return TimeDelta(delta_ / a);
+ }
+ TimeDelta& operator*=(int64_t a) {
+ delta_ *= a;
+ return *this;
+ }
+ TimeDelta& operator/=(int64_t a) {
+ delta_ /= a;
+ return *this;
+ }
+ int64_t operator/(const TimeDelta& other) const {
+ return delta_ / other.delta_;
+ }
+
+ // Comparison operators.
+ bool operator==(const TimeDelta& other) const {
+ return delta_ == other.delta_;
+ }
+ bool operator!=(const TimeDelta& other) const {
+ return delta_ != other.delta_;
+ }
+ bool operator<(const TimeDelta& other) const {
+ return delta_ < other.delta_;
+ }
+ bool operator<=(const TimeDelta& other) const {
+ return delta_ <= other.delta_;
+ }
+ bool operator>(const TimeDelta& other) const {
+ return delta_ > other.delta_;
+ }
+ bool operator>=(const TimeDelta& other) const {
+ return delta_ >= other.delta_;
+ }
+
+ private:
+ // Constructs a delta given the duration in microseconds. This is private
+ // to avoid confusion by callers with an integer constructor. Use
+ // FromSeconds, FromMilliseconds, etc. instead.
+ explicit TimeDelta(int64_t delta) : delta_(delta) {}
+
+ // Delta in microseconds.
+ int64_t delta_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Time
+//
+// This class represents an absolute point in time, internally represented as
+// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
+
+class Time V8_FINAL BASE_EMBEDDED {
+ public:
+ static const int64_t kMillisecondsPerSecond = 1000;
+ static const int64_t kMicrosecondsPerMillisecond = 1000;
+ static const int64_t kMicrosecondsPerSecond = kMicrosecondsPerMillisecond *
+ kMillisecondsPerSecond;
+ static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+ static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+ static const int64_t kMicrosecondsPerDay = kMicrosecondsPerHour * 24;
+ static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+ static const int64_t kNanosecondsPerMicrosecond = 1000;
+ static const int64_t kNanosecondsPerSecond = kNanosecondsPerMicrosecond *
+ kMicrosecondsPerSecond;
+
+ // Contains the NULL time. Use Time::Now() to get the current time.
+ Time() : us_(0) {}
+
+ // Returns true if the time object has not been initialized.
+ bool IsNull() const { return us_ == 0; }
+
+ // Returns true if the time object is the maximum time.
+ bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); }
+
+ // Returns the current time. Watch out, the system might adjust its clock
+ // in which case time will actually go backwards. We don't guarantee that
+ // times are increasing, or that two calls to Now() won't be the same.
+ static Time Now();
+
+ // Returns the current time. Same as Now() except that this function always
+ // uses system time so that there are no discrepancies between the returned
+ // time and system time even on virtual environments including our test bot.
+ // For timing sensitive unittests, this function should be used.
+ static Time NowFromSystemTime();
+
+ // Returns the time for epoch in Unix-like system (Jan 1, 1970).
+ static Time UnixEpoch() { return Time(0); }
+
+ // Returns the maximum time, which should be greater than any reasonable time
+ // with which we might compare it.
+ static Time Max() { return Time(std::numeric_limits<int64_t>::max()); }
+
+ // Converts to/from internal values. The meaning of the "internal value" is
+ // completely up to the implementation, so it should be treated as opaque.
+ static Time FromInternalValue(int64_t value) {
+ return Time(value);
+ }
+ int64_t ToInternalValue() const {
+ return us_;
+ }
+
+ // Converts to/from POSIX time specs.
+ static Time FromTimespec(struct timespec ts);
+ struct timespec ToTimespec() const;
+
+ // Converts to/from POSIX time values.
+ static Time FromTimeval(struct timeval tv);
+ struct timeval ToTimeval() const;
+
+ // Converts to/from Windows file times.
+ static Time FromFiletime(struct _FILETIME ft);
+ struct _FILETIME ToFiletime() const;
+
+ // Converts to/from the Javascript convention for times, a number of
+ // milliseconds since the epoch:
+ static Time FromJsTime(double ms_since_epoch);
+ double ToJsTime() const;
+
+ Time& operator=(const Time& other) {
+ us_ = other.us_;
+ return *this;
+ }
+
+ // Compute the difference between two times.
+ TimeDelta operator-(const Time& other) const {
+ return TimeDelta::FromMicroseconds(us_ - other.us_);
+ }
+
+ // Modify by some time delta.
+ Time& operator+=(const TimeDelta& delta) {
+ us_ += delta.InMicroseconds();
+ return *this;
+ }
+ Time& operator-=(const TimeDelta& delta) {
+ us_ -= delta.InMicroseconds();
+ return *this;
+ }
+
+ // Return a new time modified by some delta.
+ Time operator+(const TimeDelta& delta) const {
+ return Time(us_ + delta.InMicroseconds());
+ }
+ Time operator-(const TimeDelta& delta) const {
+ return Time(us_ - delta.InMicroseconds());
+ }
+
+ // Comparison operators
+ bool operator==(const Time& other) const {
+ return us_ == other.us_;
+ }
+ bool operator!=(const Time& other) const {
+ return us_ != other.us_;
+ }
+ bool operator<(const Time& other) const {
+ return us_ < other.us_;
+ }
+ bool operator<=(const Time& other) const {
+ return us_ <= other.us_;
+ }
+ bool operator>(const Time& other) const {
+ return us_ > other.us_;
+ }
+ bool operator>=(const Time& other) const {
+ return us_ >= other.us_;
+ }
+
+ private:
+ explicit Time(int64_t us) : us_(us) {}
+
+ // Time in microseconds in UTC.
+ int64_t us_;
+};
+
+inline Time operator+(const TimeDelta& delta, const Time& time) {
+ return time + delta;
+}
+
+
+// -----------------------------------------------------------------------------
+// TimeTicks
+//
+// This class represents an abstract time that is most of the time incrementing
+// for use in measuring time durations. It is internally represented in
+// microseconds. It can not be converted to a human-readable time, but is
+// guaranteed not to decrease (if the user changes the computer clock,
+// Time::Now() may actually decrease or jump). But note that TimeTicks may
+// "stand still", for example if the computer suspended.
+
+class TimeTicks V8_FINAL BASE_EMBEDDED {
+ public:
+ TimeTicks() : ticks_(0) {}
+
+ // Platform-dependent tick count representing "right now."
+ // The resolution of this clock is ~1-15ms. Resolution varies depending
+ // on hardware/operating system configuration.
+ // This method never returns a null TimeTicks.
+ static TimeTicks Now();
+
+ // Returns a platform-dependent high-resolution tick count. Implementation
+ // is hardware dependent and may or may not return sub-millisecond
+ // resolution. THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND
+ // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED.
+ // This method never returns a null TimeTicks.
+ static TimeTicks HighResolutionNow();
+
+ // Returns true if the high-resolution clock is working on this system.
+ static bool IsHighResolutionClockWorking();
+
+ // Returns true if this object has not been initialized.
+ bool IsNull() const { return ticks_ == 0; }
+
+ // Converts to/from internal values. The meaning of the "internal value" is
+ // completely up to the implementation, so it should be treated as opaque.
+ static TimeTicks FromInternalValue(int64_t value) {
+ return TimeTicks(value);
+ }
+ int64_t ToInternalValue() const {
+ return ticks_;
+ }
+
+ TimeTicks& operator=(const TimeTicks other) {
+ ticks_ = other.ticks_;
+ return *this;
+ }
+
+ // Compute the difference between two times.
+ TimeDelta operator-(const TimeTicks other) const {
+ return TimeDelta::FromMicroseconds(ticks_ - other.ticks_);
+ }
+
+ // Modify by some time delta.
+ TimeTicks& operator+=(const TimeDelta& delta) {
+ ticks_ += delta.InMicroseconds();
+ return *this;
+ }
+ TimeTicks& operator-=(const TimeDelta& delta) {
+ ticks_ -= delta.InMicroseconds();
+ return *this;
+ }
+
+ // Return a new TimeTicks modified by some delta.
+ TimeTicks operator+(const TimeDelta& delta) const {
+ return TimeTicks(ticks_ + delta.InMicroseconds());
+ }
+ TimeTicks operator-(const TimeDelta& delta) const {
+ return TimeTicks(ticks_ - delta.InMicroseconds());
+ }
+
+ // Comparison operators
+ bool operator==(const TimeTicks& other) const {
+ return ticks_ == other.ticks_;
+ }
+ bool operator!=(const TimeTicks& other) const {
+ return ticks_ != other.ticks_;
+ }
+ bool operator<(const TimeTicks& other) const {
+ return ticks_ < other.ticks_;
+ }
+ bool operator<=(const TimeTicks& other) const {
+ return ticks_ <= other.ticks_;
+ }
+ bool operator>(const TimeTicks& other) const {
+ return ticks_ > other.ticks_;
+ }
+ bool operator>=(const TimeTicks& other) const {
+ return ticks_ >= other.ticks_;
+ }
+
+ private:
+ // Please use Now() to create a new object. This is for internal use
+ // and testing. Ticks is in microseconds.
+ explicit TimeTicks(int64_t ticks) : ticks_(ticks) {}
+
+ // Tick count in microseconds.
+ int64_t ticks_;
+};
+
+inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
+ return ticks + delta;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_TIME_H_
diff --git a/deps/v8/src/preparse-data.cc b/deps/v8/src/preparse-data.cc
index 98c343e79..8e0884828 100644
--- a/deps/v8/src/preparse-data.cc
+++ b/deps/v8/src/preparse-data.cc
@@ -86,6 +86,7 @@ void FunctionLoggingParserRecorder::WriteString(Vector<const char> str) {
}
}
+
// ----------------------------------------------------------------------------
// PartialParserRecorder - Record both function entries and symbols.
@@ -95,7 +96,7 @@ Vector<unsigned> PartialParserRecorder::ExtractData() {
Vector<unsigned> data = Vector<unsigned>::New(total_size);
preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
preamble_[PreparseDataConstants::kSymbolCountOffset] = 0;
- memcpy(data.start(), preamble_, sizeof(preamble_));
+ OS::MemCopy(data.start(), preamble_, sizeof(preamble_));
int symbol_start = PreparseDataConstants::kHeaderSize + function_size;
if (function_size > 0) {
function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
@@ -113,7 +114,7 @@ CompleteParserRecorder::CompleteParserRecorder()
literal_chars_(0),
symbol_store_(0),
symbol_keys_(0),
- symbol_table_(vector_compare),
+ string_table_(vector_compare),
symbol_id_(0) {
}
@@ -123,7 +124,7 @@ void CompleteParserRecorder::LogSymbol(int start,
bool is_ascii,
Vector<const byte> literal_bytes) {
Key key = { is_ascii, literal_bytes };
- HashMap::Entry* entry = symbol_table_.Lookup(&key, hash, true);
+ HashMap::Entry* entry = string_table_.Lookup(&key, hash, true);
int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
if (id == 0) {
// Copy literal contents for later comparison.
@@ -151,7 +152,7 @@ Vector<unsigned> CompleteParserRecorder::ExtractData() {
Vector<unsigned> data = Vector<unsigned>::New(total_size);
preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
preamble_[PreparseDataConstants::kSymbolCountOffset] = symbol_id_;
- memcpy(data.start(), preamble_, sizeof(preamble_));
+ OS::MemCopy(data.start(), preamble_, sizeof(preamble_));
int symbol_start = PreparseDataConstants::kHeaderSize + function_size;
if (function_size > 0) {
function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
diff --git a/deps/v8/src/preparse-data.h b/deps/v8/src/preparse-data.h
index f34743020..3a1e99d5d 100644
--- a/deps/v8/src/preparse-data.h
+++ b/deps/v8/src/preparse-data.h
@@ -221,7 +221,7 @@ class CompleteParserRecorder: public FunctionLoggingParserRecorder {
Collector<byte> literal_chars_;
Collector<byte> symbol_store_;
Collector<Key> symbol_keys_;
- HashMap symbol_table_;
+ HashMap string_table_;
int symbol_id_;
};
diff --git a/deps/v8/src/preparser-api.cc b/deps/v8/src/preparser-api.cc
deleted file mode 100644
index 6e8556aa1..000000000
--- a/deps/v8/src/preparser-api.cc
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifdef _MSC_VER
-#define V8_WIN32_LEAN_AND_MEAN
-#include "win32-headers.h"
-#endif
-
-#include "../include/v8-preparser.h"
-
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-#include "utils.h"
-#include "list.h"
-#include "hashmap.h"
-#include "preparse-data-format.h"
-#include "preparse-data.h"
-#include "preparser.h"
-
-namespace v8 {
-namespace internal {
-
-// UTF16Buffer based on a v8::UnicodeInputStream.
-class InputStreamUtf16Buffer : public Utf16CharacterStream {
- public:
- /* The InputStreamUtf16Buffer maintains an internal buffer
- * that is filled in chunks from the Utf16CharacterStream.
- * It also maintains unlimited pushback capability, but optimized
- * for small pushbacks.
- * The pushback_buffer_ pointer points to the limit of pushbacks
- * in the current buffer. There is room for a few pushback'ed chars before
- * the buffer containing the most recently read chunk. If this is overflowed,
- * an external buffer is allocated/reused to hold further pushbacks, and
- * pushback_buffer_ and buffer_cursor_/buffer_end_ now points to the
- * new buffer. When this buffer is read to the end again, the cursor is
- * switched back to the internal buffer
- */
- explicit InputStreamUtf16Buffer(v8::UnicodeInputStream* stream)
- : Utf16CharacterStream(),
- stream_(stream),
- pushback_buffer_(buffer_),
- pushback_buffer_end_cache_(NULL),
- pushback_buffer_backing_(NULL),
- pushback_buffer_backing_size_(0) {
- buffer_cursor_ = buffer_end_ = buffer_ + kPushBackSize;
- }
-
- virtual ~InputStreamUtf16Buffer() {
- if (pushback_buffer_backing_ != NULL) {
- DeleteArray(pushback_buffer_backing_);
- }
- }
-
- virtual void PushBack(uc32 ch) {
- ASSERT(pos_ > 0);
- if (ch == kEndOfInput) {
- pos_--;
- return;
- }
- if (buffer_cursor_ <= pushback_buffer_) {
- // No more room in the current buffer to do pushbacks.
- if (pushback_buffer_end_cache_ == NULL) {
- // We have overflowed the pushback space at the beginning of buffer_.
- // Switch to using a separate allocated pushback buffer.
- if (pushback_buffer_backing_ == NULL) {
- // Allocate a buffer the first time we need it.
- pushback_buffer_backing_ = NewArray<uc16>(kPushBackSize);
- pushback_buffer_backing_size_ = kPushBackSize;
- }
- pushback_buffer_ = pushback_buffer_backing_;
- pushback_buffer_end_cache_ = buffer_end_;
- buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
- buffer_cursor_ = buffer_end_ - 1;
- } else {
- // Hit the bottom of the allocated pushback buffer.
- // Double the buffer and continue.
- uc16* new_buffer = NewArray<uc16>(pushback_buffer_backing_size_ * 2);
- memcpy(new_buffer + pushback_buffer_backing_size_,
- pushback_buffer_backing_,
- pushback_buffer_backing_size_);
- DeleteArray(pushback_buffer_backing_);
- buffer_cursor_ = new_buffer + pushback_buffer_backing_size_;
- pushback_buffer_backing_ = pushback_buffer_ = new_buffer;
- buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
- }
- }
- pushback_buffer_[buffer_cursor_ - pushback_buffer_- 1] =
- static_cast<uc16>(ch);
- pos_--;
- }
-
- protected:
- virtual bool ReadBlock() {
- if (pushback_buffer_end_cache_ != NULL) {
- buffer_cursor_ = buffer_;
- buffer_end_ = pushback_buffer_end_cache_;
- pushback_buffer_end_cache_ = NULL;
- return buffer_end_ > buffer_cursor_;
- }
- // Copy the top of the buffer into the pushback area.
- int32_t value;
- uc16* buffer_start = buffer_ + kPushBackSize;
- buffer_cursor_ = buffer_end_ = buffer_start;
- while ((value = stream_->Next()) >= 0) {
- if (value >
- static_cast<int32_t>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
- buffer_start[buffer_end_++ - buffer_start] =
- unibrow::Utf16::LeadSurrogate(value);
- buffer_start[buffer_end_++ - buffer_start] =
- unibrow::Utf16::TrailSurrogate(value);
- } else {
- // buffer_end_ is a const pointer, but buffer_ is writable.
- buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value);
- }
- // Stop one before the end of the buffer in case we get a surrogate pair.
- if (buffer_end_ <= buffer_ + 1 + kPushBackSize + kBufferSize) break;
- }
- return buffer_end_ > buffer_start;
- }
-
- virtual unsigned SlowSeekForward(unsigned pos) {
- // Seeking in the input is not used by preparsing.
- // It's only used by the real parser based on preparser data.
- UNIMPLEMENTED();
- return 0;
- }
-
- private:
- static const unsigned kBufferSize = 512;
- static const unsigned kPushBackSize = 16;
- v8::UnicodeInputStream* const stream_;
- // Buffer holding first kPushBackSize characters of pushback buffer,
- // then kBufferSize chars of read-ahead.
- // The pushback buffer is only used if pushing back characters past
- // the start of a block.
- uc16 buffer_[kPushBackSize + kBufferSize];
- // Limit of pushbacks before new allocation is necessary.
- uc16* pushback_buffer_;
- // Only if that pushback buffer at the start of buffer_ isn't sufficient
- // is the following used.
- const uc16* pushback_buffer_end_cache_;
- uc16* pushback_buffer_backing_;
- unsigned pushback_buffer_backing_size_;
-};
-
-
-// Functions declared by allocation.h and implemented in both api.cc (for v8)
-// or here (for a stand-alone preparser).
-
-void FatalProcessOutOfMemory(const char* reason) {
- V8_Fatal(__FILE__, __LINE__, reason);
-}
-
-bool EnableSlowAsserts() { return true; }
-
-} // namespace internal.
-
-
-UnicodeInputStream::~UnicodeInputStream() { }
-
-
-PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
- internal::InputStreamUtf16Buffer buffer(input);
- uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
- internal::UnicodeCache unicode_cache;
- internal::Scanner scanner(&unicode_cache);
- scanner.Initialize(&buffer);
- internal::CompleteParserRecorder recorder;
- preparser::PreParser::PreParseResult result =
- preparser::PreParser::PreParseProgram(&scanner,
- &recorder,
- internal::kAllowLazy,
- stack_limit);
- if (result == preparser::PreParser::kPreParseStackOverflow) {
- return PreParserData::StackOverflow();
- }
- internal::Vector<unsigned> pre_data = recorder.ExtractData();
- size_t size = pre_data.length() * sizeof(pre_data[0]);
- unsigned char* data = reinterpret_cast<unsigned char*>(pre_data.start());
- return PreParserData(size, data);
-}
-
-} // namespace v8.
-
-
-// Used by ASSERT macros and other immediate exits.
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
- exit(EXIT_FAILURE);
-}
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index 21da4f80d..a87c43455 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <math.h>
+#include <cmath>
#include "../include/v8stdint.h"
@@ -42,34 +42,38 @@
#include "unicode.h"
#include "utils.h"
-namespace v8 {
+#if V8_CC_MSVC && (_MSC_VER < 1800)
+namespace std {
-#ifdef _MSC_VER
-// Usually defined in math.h, but not in MSVC.
+// Usually defined in math.h, but not in MSVC until VS2013+.
// Abstracted to work
int isfinite(double value);
+
+} // namespace std
#endif
-namespace preparser {
+namespace v8 {
+namespace internal {
PreParser::PreParseResult PreParser::PreParseLazyFunction(
- i::LanguageMode mode, i::ParserRecorder* log) {
+ LanguageMode mode, bool is_generator, ParserRecorder* log) {
log_ = log;
// Lazy functions always have trivial outer scopes (no with/catch scopes).
Scope top_scope(&scope_, kTopLevelScope);
set_language_mode(mode);
Scope function_scope(&scope_, kFunctionScope);
- ASSERT_EQ(i::Token::LBRACE, scanner_->current_token());
+ function_scope.set_is_generator(is_generator);
+ ASSERT_EQ(Token::LBRACE, scanner()->current_token());
bool ok = true;
- int start_position = scanner_->peek_location().beg_pos;
+ int start_position = peek_position();
ParseLazyFunctionLiteralBody(&ok);
- if (stack_overflow_) return kPreParseStackOverflow;
+ if (stack_overflow()) return kPreParseStackOverflow;
if (!ok) {
- ReportUnexpectedToken(scanner_->current_token());
+ ReportUnexpectedToken(scanner()->current_token());
} else {
- ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
+ ASSERT_EQ(Token::RBRACE, scanner()->peek());
if (!is_classic_mode()) {
- int end_pos = scanner_->location().end_pos;
+ int end_pos = scanner()->location().end_pos;
CheckOctalLiteral(start_position, end_pos, &ok);
if (ok) {
CheckDelayedStrictModeViolation(start_position, end_pos, &ok);
@@ -93,50 +97,38 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-void PreParser::ReportUnexpectedToken(i::Token::Value token) {
+void PreParser::ReportUnexpectedToken(Token::Value token) {
// We don't report stack overflows here, to avoid increasing the
// stack depth even further. Instead we report it after parsing is
// over, in ParseProgram.
- if (token == i::Token::ILLEGAL && stack_overflow_) {
+ if (token == Token::ILLEGAL && stack_overflow()) {
return;
}
- i::Scanner::Location source_location = scanner_->location();
+ Scanner::Location source_location = scanner()->location();
// Four of the tokens are treated specially
switch (token) {
- case i::Token::EOS:
+ case Token::EOS:
return ReportMessageAt(source_location, "unexpected_eos", NULL);
- case i::Token::NUMBER:
+ case Token::NUMBER:
return ReportMessageAt(source_location, "unexpected_token_number", NULL);
- case i::Token::STRING:
+ case Token::STRING:
return ReportMessageAt(source_location, "unexpected_token_string", NULL);
- case i::Token::IDENTIFIER:
+ case Token::IDENTIFIER:
return ReportMessageAt(source_location,
"unexpected_token_identifier", NULL);
- case i::Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_RESERVED_WORD:
return ReportMessageAt(source_location, "unexpected_reserved", NULL);
- case i::Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
return ReportMessageAt(source_location,
"unexpected_strict_reserved", NULL);
default:
- const char* name = i::Token::String(token);
+ const char* name = Token::String(token);
ReportMessageAt(source_location, "unexpected_token", name);
}
}
-// Checks whether octal literal last seen is between beg_pos and end_pos.
-// If so, reports an error.
-void PreParser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- i::Scanner::Location octal = scanner_->octal_position();
- if (beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
- ReportMessageAt(octal, "strict_octal_literal", NULL);
- scanner_->clear_octal_position();
- *ok = false;
- }
-}
-
-
#define CHECK_OK ok); \
if (!*ok) return kUnknownSourceElements; \
((void)0
@@ -154,12 +146,13 @@ PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
// SourceElement:
// LetDeclaration
// ConstDeclaration
+ // GeneratorDeclaration
switch (peek()) {
- case i::Token::FUNCTION:
+ case Token::FUNCTION:
return ParseFunctionDeclaration(ok);
- case i::Token::LET:
- case i::Token::CONST:
+ case Token::LET:
+ case Token::CONST:
return ParseVariableStatement(kSourceElement, ok);
default:
return ParseStatement(ok);
@@ -177,8 +170,8 @@ PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
Statement statement = ParseSourceElement(CHECK_OK);
if (allow_directive_prologue) {
if (statement.IsUseStrictLiteral()) {
- set_language_mode(harmony_scoping_ ?
- i::EXTENDED_MODE : i::STRICT_MODE);
+ set_language_mode(allow_harmony_scoping() ?
+ EXTENDED_MODE : STRICT_MODE);
} else if (!statement.IsStringLiteral()) {
allow_directive_prologue = false;
}
@@ -223,55 +216,55 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
// Keep the source position of the statement
switch (peek()) {
- case i::Token::LBRACE:
+ case Token::LBRACE:
return ParseBlock(ok);
- case i::Token::CONST:
- case i::Token::LET:
- case i::Token::VAR:
+ case Token::CONST:
+ case Token::LET:
+ case Token::VAR:
return ParseVariableStatement(kStatement, ok);
- case i::Token::SEMICOLON:
+ case Token::SEMICOLON:
Next();
return Statement::Default();
- case i::Token::IF:
+ case Token::IF:
return ParseIfStatement(ok);
- case i::Token::DO:
+ case Token::DO:
return ParseDoWhileStatement(ok);
- case i::Token::WHILE:
+ case Token::WHILE:
return ParseWhileStatement(ok);
- case i::Token::FOR:
+ case Token::FOR:
return ParseForStatement(ok);
- case i::Token::CONTINUE:
+ case Token::CONTINUE:
return ParseContinueStatement(ok);
- case i::Token::BREAK:
+ case Token::BREAK:
return ParseBreakStatement(ok);
- case i::Token::RETURN:
+ case Token::RETURN:
return ParseReturnStatement(ok);
- case i::Token::WITH:
+ case Token::WITH:
return ParseWithStatement(ok);
- case i::Token::SWITCH:
+ case Token::SWITCH:
return ParseSwitchStatement(ok);
- case i::Token::THROW:
+ case Token::THROW:
return ParseThrowStatement(ok);
- case i::Token::TRY:
+ case Token::TRY:
return ParseTryStatement(ok);
- case i::Token::FUNCTION: {
- i::Scanner::Location start_location = scanner_->peek_location();
+ case Token::FUNCTION: {
+ Scanner::Location start_location = scanner()->peek_location();
Statement statement = ParseFunctionDeclaration(CHECK_OK);
- i::Scanner::Location end_location = scanner_->location();
+ Scanner::Location end_location = scanner()->location();
if (!is_classic_mode()) {
ReportMessageAt(start_location.beg_pos, end_location.end_pos,
"strict_function", NULL);
@@ -282,7 +275,7 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
}
}
- case i::Token::DEBUGGER:
+ case Token::DEBUGGER:
return ParseDebuggerStatement(ok);
default:
@@ -294,19 +287,23 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
// FunctionDeclaration ::
// 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
- Expect(i::Token::FUNCTION, CHECK_OK);
+ // GeneratorDeclaration ::
+ // 'function' '*' Identifier '(' FormalParameterListopt ')'
+ // '{' FunctionBody '}'
+ Expect(Token::FUNCTION, CHECK_OK);
+ bool is_generator = allow_generators() && Check(Token::MUL);
Identifier identifier = ParseIdentifier(CHECK_OK);
- i::Scanner::Location location = scanner_->location();
+ Scanner::Location location = scanner()->location();
- Expression function_value = ParseFunctionLiteral(CHECK_OK);
+ Expression function_value = ParseFunctionLiteral(is_generator, CHECK_OK);
if (function_value.IsStrictFunction() &&
!identifier.IsValidStrictVariable()) {
// Strict mode violation, using either reserved word or eval/arguments
// as name of strict function.
const char* type = "strict_function_name";
- if (identifier.IsFutureStrictReserved()) {
+ if (identifier.IsFutureStrictReserved() || identifier.IsYield()) {
type = "strict_reserved_word";
}
ReportMessageAt(location, type, NULL);
@@ -323,15 +320,15 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
// Note that a Block does not introduce a new execution scope!
// (ECMA-262, 3rd, 12.2)
//
- Expect(i::Token::LBRACE, CHECK_OK);
- while (peek() != i::Token::RBRACE) {
+ Expect(Token::LBRACE, CHECK_OK);
+ while (peek() != Token::RBRACE) {
if (is_extended_mode()) {
ParseSourceElement(CHECK_OK);
} else {
ParseStatement(CHECK_OK);
}
}
- Expect(i::Token::RBRACE, ok);
+ Expect(Token::RBRACE, ok);
return Statement::Default();
}
@@ -375,9 +372,9 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// ConstBinding ::
// BindingPattern '=' AssignmentExpression
bool require_initializer = false;
- if (peek() == i::Token::VAR) {
- Consume(i::Token::VAR);
- } else if (peek() == i::Token::CONST) {
+ if (peek() == Token::VAR) {
+ Consume(Token::VAR);
+ } else if (peek() == Token::CONST) {
// TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
//
// ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
@@ -388,20 +385,20 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// However disallowing const in classic mode will break compatibility with
// existing pages. Therefore we keep allowing const with the old
// non-harmony semantics in classic mode.
- Consume(i::Token::CONST);
+ Consume(Token::CONST);
switch (language_mode()) {
- case i::CLASSIC_MODE:
+ case CLASSIC_MODE:
break;
- case i::STRICT_MODE: {
- i::Scanner::Location location = scanner_->peek_location();
+ case STRICT_MODE: {
+ Scanner::Location location = scanner()->peek_location();
ReportMessageAt(location, "strict_const", NULL);
*ok = false;
return Statement::Default();
}
- case i::EXTENDED_MODE:
+ case EXTENDED_MODE:
if (var_context != kSourceElement &&
var_context != kForStatement) {
- i::Scanner::Location location = scanner_->peek_location();
+ Scanner::Location location = scanner()->peek_location();
ReportMessageAt(location.beg_pos, location.end_pos,
"unprotected_const", NULL);
*ok = false;
@@ -410,7 +407,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
require_initializer = true;
break;
}
- } else if (peek() == i::Token::LET) {
+ } else if (peek() == Token::LET) {
// ES6 Draft Rev4 section 12.2.1:
//
// LetDeclaration : let LetBindingList ;
@@ -418,16 +415,16 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
if (!is_extended_mode()) {
- i::Scanner::Location location = scanner_->peek_location();
+ Scanner::Location location = scanner()->peek_location();
ReportMessageAt(location.beg_pos, location.end_pos,
"illegal_let", NULL);
*ok = false;
return Statement::Default();
}
- Consume(i::Token::LET);
+ Consume(Token::LET);
if (var_context != kSourceElement &&
var_context != kForStatement) {
- i::Scanner::Location location = scanner_->peek_location();
+ Scanner::Location location = scanner()->peek_location();
ReportMessageAt(location.beg_pos, location.end_pos,
"unprotected_let", NULL);
*ok = false;
@@ -445,22 +442,22 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
int nvars = 0; // the number of variables declared
do {
// Parse variable name.
- if (nvars > 0) Consume(i::Token::COMMA);
+ if (nvars > 0) Consume(Token::COMMA);
Identifier identifier = ParseIdentifier(CHECK_OK);
if (!is_classic_mode() && !identifier.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
+ StrictModeIdentifierViolation(scanner()->location(),
"strict_var_name",
identifier,
ok);
return Statement::Default();
}
nvars++;
- if (peek() == i::Token::ASSIGN || require_initializer) {
- Expect(i::Token::ASSIGN, CHECK_OK);
+ if (peek() == Token::ASSIGN || require_initializer) {
+ Expect(Token::ASSIGN, CHECK_OK);
ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
if (decl_props != NULL) *decl_props = kHasInitializers;
}
- } while (peek() == i::Token::COMMA);
+ } while (peek() == Token::COMMA);
if (num_decl != NULL) *num_decl = nvars;
return Statement::Default();
@@ -475,9 +472,11 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
Expression expr = ParseExpression(true, CHECK_OK);
if (expr.IsRawIdentifier()) {
ASSERT(!expr.AsIdentifier().IsFutureReserved());
- ASSERT(is_classic_mode() || !expr.AsIdentifier().IsFutureStrictReserved());
- if (peek() == i::Token::COLON) {
- Consume(i::Token::COLON);
+ ASSERT(is_classic_mode() ||
+ (!expr.AsIdentifier().IsFutureStrictReserved() &&
+ !expr.AsIdentifier().IsYield()));
+ if (peek() == Token::COLON) {
+ Consume(Token::COLON);
return ParseStatement(ok);
}
// Preparsing is disabled for extensions (because the extension details
@@ -494,12 +493,12 @@ PreParser::Statement PreParser::ParseIfStatement(bool* ok) {
// IfStatement ::
// 'if' '(' Expression ')' Statement ('else' Statement)?
- Expect(i::Token::IF, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::IF, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(CHECK_OK);
- if (peek() == i::Token::ELSE) {
+ if (peek() == Token::ELSE) {
Next();
ParseStatement(CHECK_OK);
}
@@ -511,12 +510,12 @@ PreParser::Statement PreParser::ParseContinueStatement(bool* ok) {
// ContinueStatement ::
// 'continue' [no line terminator] Identifier? ';'
- Expect(i::Token::CONTINUE, CHECK_OK);
- i::Token::Value tok = peek();
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
+ Expect(Token::CONTINUE, CHECK_OK);
+ Token::Value tok = peek();
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ tok != Token::SEMICOLON &&
+ tok != Token::RBRACE &&
+ tok != Token::EOS) {
ParseIdentifier(CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
@@ -528,12 +527,12 @@ PreParser::Statement PreParser::ParseBreakStatement(bool* ok) {
// BreakStatement ::
// 'break' [no line terminator] Identifier? ';'
- Expect(i::Token::BREAK, CHECK_OK);
- i::Token::Value tok = peek();
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
+ Expect(Token::BREAK, CHECK_OK);
+ Token::Value tok = peek();
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ tok != Token::SEMICOLON &&
+ tok != Token::RBRACE &&
+ tok != Token::EOS) {
ParseIdentifier(CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
@@ -548,18 +547,18 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
// Consume the return token. It is necessary to do the before
// reporting any errors on it, because of the way errors are
// reported (underlining).
- Expect(i::Token::RETURN, CHECK_OK);
+ Expect(Token::RETURN, CHECK_OK);
// An ECMAScript program is considered syntactically incorrect if it
// contains a return statement that is not within the body of a
// function. See ECMA-262, section 12.9, page 67.
// This is not handled during preparsing.
- i::Token::Value tok = peek();
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
+ Token::Value tok = peek();
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ tok != Token::SEMICOLON &&
+ tok != Token::RBRACE &&
+ tok != Token::EOS) {
ParseExpression(true, CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
@@ -570,16 +569,16 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
// WithStatement ::
// 'with' '(' Expression ')' Statement
- Expect(i::Token::WITH, CHECK_OK);
+ Expect(Token::WITH, CHECK_OK);
if (!is_classic_mode()) {
- i::Scanner::Location location = scanner_->location();
+ Scanner::Location location = scanner()->location();
ReportMessageAt(location, "strict_mode_with", NULL);
*ok = false;
return Statement::Default();
}
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
Scope::InsideWith iw(scope_);
ParseStatement(CHECK_OK);
@@ -591,30 +590,30 @@ PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
// SwitchStatement ::
// 'switch' '(' Expression ')' '{' CaseClause* '}'
- Expect(i::Token::SWITCH, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::SWITCH, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
- Expect(i::Token::LBRACE, CHECK_OK);
- i::Token::Value token = peek();
- while (token != i::Token::RBRACE) {
- if (token == i::Token::CASE) {
- Expect(i::Token::CASE, CHECK_OK);
+ Expect(Token::LBRACE, CHECK_OK);
+ Token::Value token = peek();
+ while (token != Token::RBRACE) {
+ if (token == Token::CASE) {
+ Expect(Token::CASE, CHECK_OK);
ParseExpression(true, CHECK_OK);
} else {
- Expect(i::Token::DEFAULT, CHECK_OK);
+ Expect(Token::DEFAULT, CHECK_OK);
}
- Expect(i::Token::COLON, CHECK_OK);
+ Expect(Token::COLON, CHECK_OK);
token = peek();
- while (token != i::Token::CASE &&
- token != i::Token::DEFAULT &&
- token != i::Token::RBRACE) {
+ while (token != Token::CASE &&
+ token != Token::DEFAULT &&
+ token != Token::RBRACE) {
ParseStatement(CHECK_OK);
token = peek();
}
}
- Expect(i::Token::RBRACE, ok);
+ Expect(Token::RBRACE, ok);
return Statement::Default();
}
@@ -623,13 +622,13 @@ PreParser::Statement PreParser::ParseDoWhileStatement(bool* ok) {
// DoStatement ::
// 'do' Statement 'while' '(' Expression ')' ';'
- Expect(i::Token::DO, CHECK_OK);
+ Expect(Token::DO, CHECK_OK);
ParseStatement(CHECK_OK);
- Expect(i::Token::WHILE, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::WHILE, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, ok);
- if (peek() == i::Token::SEMICOLON) Consume(i::Token::SEMICOLON);
+ Expect(Token::RPAREN, ok);
+ if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
return Statement::Default();
}
@@ -638,45 +637,54 @@ PreParser::Statement PreParser::ParseWhileStatement(bool* ok) {
// WhileStatement ::
// 'while' '(' Expression ')' Statement
- Expect(i::Token::WHILE, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::WHILE, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(ok);
return Statement::Default();
}
+bool PreParser::CheckInOrOf(bool accept_OF) {
+ if (Check(Token::IN) ||
+ (allow_for_of() && accept_OF &&
+ CheckContextualKeyword(CStrVector("of")))) {
+ return true;
+ }
+ return false;
+}
+
+
PreParser::Statement PreParser::ParseForStatement(bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
- Expect(i::Token::FOR, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- if (peek() != i::Token::SEMICOLON) {
- if (peek() == i::Token::VAR || peek() == i::Token::CONST ||
- peek() == i::Token::LET) {
- bool is_let = peek() == i::Token::LET;
+ Expect(Token::FOR, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ if (peek() != Token::SEMICOLON) {
+ if (peek() == Token::VAR || peek() == Token::CONST ||
+ peek() == Token::LET) {
+ bool is_let = peek() == Token::LET;
int decl_count;
VariableDeclarationProperties decl_props = kHasNoInitializers;
ParseVariableDeclarations(
kForStatement, &decl_props, &decl_count, CHECK_OK);
- bool accept_IN = decl_count == 1 &&
- !(is_let && decl_props == kHasInitializers);
- if (peek() == i::Token::IN && accept_IN) {
- Expect(i::Token::IN, CHECK_OK);
+ bool has_initializers = decl_props == kHasInitializers;
+ bool accept_IN = decl_count == 1 && !(is_let && has_initializers);
+ bool accept_OF = !has_initializers;
+ if (accept_IN && CheckInOrOf(accept_OF)) {
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(CHECK_OK);
return Statement::Default();
}
} else {
- ParseExpression(false, CHECK_OK);
- if (peek() == i::Token::IN) {
- Expect(i::Token::IN, CHECK_OK);
+ Expression lhs = ParseExpression(false, CHECK_OK);
+ if (CheckInOrOf(lhs.IsIdentifier())) {
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(CHECK_OK);
return Statement::Default();
@@ -685,17 +693,17 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
}
// Parsed initializer at this point.
- Expect(i::Token::SEMICOLON, CHECK_OK);
+ Expect(Token::SEMICOLON, CHECK_OK);
- if (peek() != i::Token::SEMICOLON) {
+ if (peek() != Token::SEMICOLON) {
ParseExpression(true, CHECK_OK);
}
- Expect(i::Token::SEMICOLON, CHECK_OK);
+ Expect(Token::SEMICOLON, CHECK_OK);
- if (peek() != i::Token::RPAREN) {
+ if (peek() != Token::RPAREN) {
ParseExpression(true, CHECK_OK);
}
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(ok);
return Statement::Default();
@@ -706,9 +714,9 @@ PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
// ThrowStatement ::
// 'throw' [no line terminator] Expression ';'
- Expect(i::Token::THROW, CHECK_OK);
- if (scanner_->HasAnyLineTerminatorBeforeNext()) {
- i::Scanner::Location pos = scanner_->location();
+ Expect(Token::THROW, CHECK_OK);
+ if (scanner()->HasAnyLineTerminatorBeforeNext()) {
+ Scanner::Location pos = scanner()->location();
ReportMessageAt(pos, "newline_after_throw", NULL);
*ok = false;
return Statement::Default();
@@ -734,30 +742,30 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
// In preparsing, allow any number of catch/finally blocks, including zero
// of both.
- Expect(i::Token::TRY, CHECK_OK);
+ Expect(Token::TRY, CHECK_OK);
ParseBlock(CHECK_OK);
bool catch_or_finally_seen = false;
- if (peek() == i::Token::CATCH) {
- Consume(i::Token::CATCH);
- Expect(i::Token::LPAREN, CHECK_OK);
+ if (peek() == Token::CATCH) {
+ Consume(Token::CATCH);
+ Expect(Token::LPAREN, CHECK_OK);
Identifier id = ParseIdentifier(CHECK_OK);
if (!is_classic_mode() && !id.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
+ StrictModeIdentifierViolation(scanner()->location(),
"strict_catch_variable",
id,
ok);
return Statement::Default();
}
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
{ Scope::InsideWith iw(scope_);
ParseBlock(CHECK_OK);
}
catch_or_finally_seen = true;
}
- if (peek() == i::Token::FINALLY) {
- Consume(i::Token::FINALLY);
+ if (peek() == Token::FINALLY) {
+ Consume(Token::FINALLY);
ParseBlock(CHECK_OK);
catch_or_finally_seen = true;
}
@@ -775,7 +783,7 @@ PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
// DebuggerStatement ::
// 'debugger' ';'
- Expect(i::Token::DEBUGGER, CHECK_OK);
+ Expect(Token::DEBUGGER, CHECK_OK);
ExpectSemicolon(ok);
return Statement::Default();
}
@@ -796,8 +804,8 @@ PreParser::Expression PreParser::ParseExpression(bool accept_IN, bool* ok) {
// Expression ',' AssignmentExpression
Expression result = ParseAssignmentExpression(accept_IN, CHECK_OK);
- while (peek() == i::Token::COMMA) {
- Expect(i::Token::COMMA, CHECK_OK);
+ while (peek() == Token::COMMA) {
+ Expect(Token::COMMA, CHECK_OK);
ParseAssignmentExpression(accept_IN, CHECK_OK);
result = Expression::Default();
}
@@ -810,12 +818,17 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
bool* ok) {
// AssignmentExpression ::
// ConditionalExpression
+ // YieldExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
- i::Scanner::Location before = scanner_->peek_location();
+ if (scope_->is_generator() && peek() == Token::YIELD) {
+ return ParseYieldExpression(ok);
+ }
+
+ Scanner::Location before = scanner()->peek_location();
Expression expression = ParseConditionalExpression(accept_IN, CHECK_OK);
- if (!i::Token::IsAssignmentOp(peek())) {
+ if (!Token::IsAssignmentOp(peek())) {
// Parsed conditional expression only (no assignment).
return expression;
}
@@ -823,17 +836,17 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
if (!is_classic_mode() &&
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
- i::Scanner::Location after = scanner_->location();
+ Scanner::Location after = scanner()->location();
ReportMessageAt(before.beg_pos, after.end_pos,
"strict_lhs_assignment", NULL);
*ok = false;
return Expression::Default();
}
- i::Token::Value op = Next(); // Get assignment operator.
+ Token::Value op = Next(); // Get assignment operator.
ParseAssignmentExpression(accept_IN, CHECK_OK);
- if ((op == i::Token::ASSIGN) && expression.IsThisProperty()) {
+ if ((op == Token::ASSIGN) && expression.IsThisProperty()) {
scope_->AddProperty();
}
@@ -842,6 +855,19 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
// Precedence = 3
+PreParser::Expression PreParser::ParseYieldExpression(bool* ok) {
+ // YieldExpression ::
+ // 'yield' '*'? AssignmentExpression
+ Consume(Token::YIELD);
+ Check(Token::MUL);
+
+ ParseAssignmentExpression(false, CHECK_OK);
+
+ return Expression::Default();
+}
+
+
+// Precedence = 3
PreParser::Expression PreParser::ParseConditionalExpression(bool accept_IN,
bool* ok) {
// ConditionalExpression ::
@@ -850,26 +876,18 @@ PreParser::Expression PreParser::ParseConditionalExpression(bool accept_IN,
// We start using the binary expression parser for prec >= 4 only!
Expression expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
- if (peek() != i::Token::CONDITIONAL) return expression;
- Consume(i::Token::CONDITIONAL);
+ if (peek() != Token::CONDITIONAL) return expression;
+ Consume(Token::CONDITIONAL);
// In parsing the first assignment expression in conditional
// expressions we always accept the 'in' keyword; see ECMA-262,
// section 11.12, page 58.
ParseAssignmentExpression(true, CHECK_OK);
- Expect(i::Token::COLON, CHECK_OK);
+ Expect(Token::COLON, CHECK_OK);
ParseAssignmentExpression(accept_IN, CHECK_OK);
return Expression::Default();
}
-int PreParser::Precedence(i::Token::Value tok, bool accept_IN) {
- if (tok == i::Token::IN && !accept_IN)
- return 0; // 0 precedence will terminate binary expression parsing
-
- return i::Token::Precedence(tok);
-}
-
-
// Precedence >= 4
PreParser::Expression PreParser::ParseBinaryExpression(int prec,
bool accept_IN,
@@ -900,19 +918,19 @@ PreParser::Expression PreParser::ParseUnaryExpression(bool* ok) {
// '~' UnaryExpression
// '!' UnaryExpression
- i::Token::Value op = peek();
- if (i::Token::IsUnaryOp(op)) {
+ Token::Value op = peek();
+ if (Token::IsUnaryOp(op)) {
op = Next();
ParseUnaryExpression(ok);
return Expression::Default();
- } else if (i::Token::IsCountOp(op)) {
+ } else if (Token::IsCountOp(op)) {
op = Next();
- i::Scanner::Location before = scanner_->peek_location();
+ Scanner::Location before = scanner()->peek_location();
Expression expression = ParseUnaryExpression(CHECK_OK);
if (!is_classic_mode() &&
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
- i::Scanner::Location after = scanner_->location();
+ Scanner::Location after = scanner()->location();
ReportMessageAt(before.beg_pos, after.end_pos,
"strict_lhs_prefix", NULL);
*ok = false;
@@ -928,14 +946,14 @@ PreParser::Expression PreParser::ParsePostfixExpression(bool* ok) {
// PostfixExpression ::
// LeftHandSideExpression ('++' | '--')?
- i::Scanner::Location before = scanner_->peek_location();
+ Scanner::Location before = scanner()->peek_location();
Expression expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- i::Token::IsCountOp(peek())) {
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ Token::IsCountOp(peek())) {
if (!is_classic_mode() &&
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
- i::Scanner::Location after = scanner_->location();
+ Scanner::Location after = scanner()->location();
ReportMessageAt(before.beg_pos, after.end_pos,
"strict_lhs_postfix", NULL);
*ok = false;
@@ -953,7 +971,7 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
// (NewExpression | MemberExpression) ...
Expression result = Expression::Default();
- if (peek() == i::Token::NEW) {
+ if (peek() == Token::NEW) {
result = ParseNewExpression(CHECK_OK);
} else {
result = ParseMemberExpression(CHECK_OK);
@@ -961,10 +979,10 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
while (true) {
switch (peek()) {
- case i::Token::LBRACK: {
- Consume(i::Token::LBRACK);
+ case Token::LBRACK: {
+ Consume(Token::LBRACK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RBRACK, CHECK_OK);
+ Expect(Token::RBRACK, CHECK_OK);
if (result.IsThis()) {
result = Expression::ThisProperty();
} else {
@@ -973,14 +991,14 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
break;
}
- case i::Token::LPAREN: {
+ case Token::LPAREN: {
ParseArguments(CHECK_OK);
result = Expression::Default();
break;
}
- case i::Token::PERIOD: {
- Consume(i::Token::PERIOD);
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
ParseIdentifierName(CHECK_OK);
if (result.IsThis()) {
result = Expression::ThisProperty();
@@ -1011,9 +1029,9 @@ PreParser::Expression PreParser::ParseNewExpression(bool* ok) {
// lists as long as it has 'new' prefixes left
unsigned new_count = 0;
do {
- Consume(i::Token::NEW);
+ Consume(Token::NEW);
new_count++;
- } while (peek() == i::Token::NEW);
+ } while (peek() == Token::NEW);
return ParseMemberWithNewPrefixesExpression(new_count, ok);
}
@@ -1032,15 +1050,17 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
// Parse the initial primary or function expression.
Expression result = Expression::Default();
- if (peek() == i::Token::FUNCTION) {
- Consume(i::Token::FUNCTION);
+ if (peek() == Token::FUNCTION) {
+ Consume(Token::FUNCTION);
+
+ bool is_generator = allow_generators() && Check(Token::MUL);
Identifier identifier = Identifier::Default();
if (peek_any_identifier()) {
identifier = ParseIdentifier(CHECK_OK);
}
- result = ParseFunctionLiteral(CHECK_OK);
+ result = ParseFunctionLiteral(is_generator, CHECK_OK);
if (result.IsStrictFunction() && !identifier.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
+ StrictModeIdentifierViolation(scanner()->location(),
"strict_function_name",
identifier,
ok);
@@ -1052,10 +1072,10 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
while (true) {
switch (peek()) {
- case i::Token::LBRACK: {
- Consume(i::Token::LBRACK);
+ case Token::LBRACK: {
+ Consume(Token::LBRACK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RBRACK, CHECK_OK);
+ Expect(Token::RBRACK, CHECK_OK);
if (result.IsThis()) {
result = Expression::ThisProperty();
} else {
@@ -1063,8 +1083,8 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
}
break;
}
- case i::Token::PERIOD: {
- Consume(i::Token::PERIOD);
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
ParseIdentifierName(CHECK_OK);
if (result.IsThis()) {
result = Expression::ThisProperty();
@@ -1073,7 +1093,7 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
}
break;
}
- case i::Token::LPAREN: {
+ case Token::LPAREN: {
if (new_count == 0) return result;
// Consume one of the new prefixes (already parsed).
ParseArguments(CHECK_OK);
@@ -1104,74 +1124,59 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
Expression result = Expression::Default();
switch (peek()) {
- case i::Token::THIS: {
+ case Token::THIS: {
Next();
result = Expression::This();
break;
}
- case i::Token::FUTURE_RESERVED_WORD: {
- Next();
- i::Scanner::Location location = scanner_->location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "reserved_word", NULL);
- *ok = false;
- return Expression::Default();
- }
-
- case i::Token::FUTURE_STRICT_RESERVED_WORD:
- if (!is_classic_mode()) {
- Next();
- i::Scanner::Location location = scanner_->location();
- ReportMessageAt(location, "strict_reserved_word", NULL);
- *ok = false;
- return Expression::Default();
- }
- // FALLTHROUGH
- case i::Token::IDENTIFIER: {
+ case Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::YIELD:
+ case Token::IDENTIFIER: {
Identifier id = ParseIdentifier(CHECK_OK);
result = Expression::FromIdentifier(id);
break;
}
- case i::Token::NULL_LITERAL:
- case i::Token::TRUE_LITERAL:
- case i::Token::FALSE_LITERAL:
- case i::Token::NUMBER: {
+ case Token::NULL_LITERAL:
+ case Token::TRUE_LITERAL:
+ case Token::FALSE_LITERAL:
+ case Token::NUMBER: {
Next();
break;
}
- case i::Token::STRING: {
+ case Token::STRING: {
Next();
result = GetStringSymbol();
break;
}
- case i::Token::ASSIGN_DIV:
+ case Token::ASSIGN_DIV:
result = ParseRegExpLiteral(true, CHECK_OK);
break;
- case i::Token::DIV:
+ case Token::DIV:
result = ParseRegExpLiteral(false, CHECK_OK);
break;
- case i::Token::LBRACK:
+ case Token::LBRACK:
result = ParseArrayLiteral(CHECK_OK);
break;
- case i::Token::LBRACE:
+ case Token::LBRACE:
result = ParseObjectLiteral(CHECK_OK);
break;
- case i::Token::LPAREN:
- Consume(i::Token::LPAREN);
- parenthesized_function_ = (peek() == i::Token::FUNCTION);
+ case Token::LPAREN:
+ Consume(Token::LPAREN);
+ parenthesized_function_ = (peek() == Token::FUNCTION);
result = ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
result = result.Parenthesize();
break;
- case i::Token::MOD:
+ case Token::MOD:
result = ParseV8Intrinsic(CHECK_OK);
break;
@@ -1189,54 +1194,21 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
PreParser::Expression PreParser::ParseArrayLiteral(bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
- Expect(i::Token::LBRACK, CHECK_OK);
- while (peek() != i::Token::RBRACK) {
- if (peek() != i::Token::COMMA) {
+ Expect(Token::LBRACK, CHECK_OK);
+ while (peek() != Token::RBRACK) {
+ if (peek() != Token::COMMA) {
ParseAssignmentExpression(true, CHECK_OK);
}
- if (peek() != i::Token::RBRACK) {
- Expect(i::Token::COMMA, CHECK_OK);
+ if (peek() != Token::RBRACK) {
+ Expect(Token::COMMA, CHECK_OK);
}
}
- Expect(i::Token::RBRACK, CHECK_OK);
+ Expect(Token::RBRACK, CHECK_OK);
scope_->NextMaterializedLiteralIndex();
return Expression::Default();
}
-void PreParser::CheckDuplicate(DuplicateFinder* finder,
- i::Token::Value property,
- int type,
- bool* ok) {
- int old_type;
- if (property == i::Token::NUMBER) {
- old_type = finder->AddNumber(scanner_->literal_ascii_string(), type);
- } else if (scanner_->is_literal_ascii()) {
- old_type = finder->AddAsciiSymbol(scanner_->literal_ascii_string(),
- type);
- } else {
- old_type = finder->AddUtf16Symbol(scanner_->literal_utf16_string(), type);
- }
- if (HasConflict(old_type, type)) {
- if (IsDataDataConflict(old_type, type)) {
- // Both are data properties.
- if (is_classic_mode()) return;
- ReportMessageAt(scanner_->location(),
- "strict_duplicate_property", NULL);
- } else if (IsDataAccessorConflict(old_type, type)) {
- // Both a data and an accessor property with the same name.
- ReportMessageAt(scanner_->location(),
- "accessor_data_property", NULL);
- } else {
- ASSERT(IsAccessorAccessorConflict(old_type, type));
- // Both accessors of the same type.
- ReportMessageAt(scanner_->location(),
- "accessor_get_set", NULL);
- }
- *ok = false;
- }
-}
-
PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
// ObjectLiteral ::
@@ -1245,25 +1217,26 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
// | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
// )*[','] '}'
- Expect(i::Token::LBRACE, CHECK_OK);
- DuplicateFinder duplicate_finder(scanner_->unicode_cache());
- while (peek() != i::Token::RBRACE) {
- i::Token::Value next = peek();
+ ObjectLiteralChecker checker(this, language_mode());
+
+ Expect(Token::LBRACE, CHECK_OK);
+ while (peek() != Token::RBRACE) {
+ Token::Value next = peek();
switch (next) {
- case i::Token::IDENTIFIER:
- case i::Token::FUTURE_RESERVED_WORD:
- case i::Token::FUTURE_STRICT_RESERVED_WORD: {
+ case Token::IDENTIFIER:
+ case Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD: {
bool is_getter = false;
bool is_setter = false;
ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
- if ((is_getter || is_setter) && peek() != i::Token::COLON) {
- i::Token::Value name = Next();
- bool is_keyword = i::Token::IsKeyword(name);
- if (name != i::Token::IDENTIFIER &&
- name != i::Token::FUTURE_RESERVED_WORD &&
- name != i::Token::FUTURE_STRICT_RESERVED_WORD &&
- name != i::Token::NUMBER &&
- name != i::Token::STRING &&
+ if ((is_getter || is_setter) && peek() != Token::COLON) {
+ Token::Value name = Next();
+ bool is_keyword = Token::IsKeyword(name);
+ if (name != Token::IDENTIFIER &&
+ name != Token::FUTURE_RESERVED_WORD &&
+ name != Token::FUTURE_STRICT_RESERVED_WORD &&
+ name != Token::NUMBER &&
+ name != Token::STRING &&
!is_keyword) {
*ok = false;
return Expression::Default();
@@ -1271,30 +1244,30 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
if (!is_keyword) {
LogSymbol();
}
- PropertyType type = is_getter ? kGetterProperty : kSetterProperty;
- CheckDuplicate(&duplicate_finder, name, type, CHECK_OK);
- ParseFunctionLiteral(CHECK_OK);
- if (peek() != i::Token::RBRACE) {
- Expect(i::Token::COMMA, CHECK_OK);
+ PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
+ checker.CheckProperty(name, type, CHECK_OK);
+ ParseFunctionLiteral(false, CHECK_OK);
+ if (peek() != Token::RBRACE) {
+ Expect(Token::COMMA, CHECK_OK);
}
continue; // restart the while
}
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
break;
}
- case i::Token::STRING:
+ case Token::STRING:
Consume(next);
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
GetStringSymbol();
break;
- case i::Token::NUMBER:
+ case Token::NUMBER:
Consume(next);
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
break;
default:
- if (i::Token::IsKeyword(next)) {
+ if (Token::IsKeyword(next)) {
Consume(next);
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
} else {
// Unexpected token.
*ok = false;
@@ -1302,13 +1275,13 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
}
}
- Expect(i::Token::COLON, CHECK_OK);
+ Expect(Token::COLON, CHECK_OK);
ParseAssignmentExpression(true, CHECK_OK);
// TODO(1240767): Consider allowing trailing comma.
- if (peek() != i::Token::RBRACE) Expect(i::Token::COMMA, CHECK_OK);
+ if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
}
- Expect(i::Token::RBRACE, CHECK_OK);
+ Expect(Token::RBRACE, CHECK_OK);
scope_->NextMaterializedLiteralIndex();
return Expression::Default();
@@ -1317,18 +1290,18 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
PreParser::Expression PreParser::ParseRegExpLiteral(bool seen_equal,
bool* ok) {
- if (!scanner_->ScanRegExpPattern(seen_equal)) {
+ if (!scanner()->ScanRegExpPattern(seen_equal)) {
Next();
- ReportMessageAt(scanner_->location(), "unterminated_regexp", NULL);
+ ReportMessageAt(scanner()->location(), "unterminated_regexp", NULL);
*ok = false;
return Expression::Default();
}
scope_->NextMaterializedLiteralIndex();
- if (!scanner_->ScanRegExpFlags()) {
+ if (!scanner()->ScanRegExpFlags()) {
Next();
- ReportMessageAt(scanner_->location(), "invalid_regexp_flags", NULL);
+ ReportMessageAt(scanner()->location(), "invalid_regexp_flags", NULL);
*ok = false;
return Expression::Default();
}
@@ -1341,26 +1314,27 @@ PreParser::Arguments PreParser::ParseArguments(bool* ok) {
// Arguments ::
// '(' (AssignmentExpression)*[','] ')'
- Expect(i::Token::LPAREN, ok);
+ Expect(Token::LPAREN, ok);
if (!*ok) return -1;
- bool done = (peek() == i::Token::RPAREN);
+ bool done = (peek() == Token::RPAREN);
int argc = 0;
while (!done) {
ParseAssignmentExpression(true, ok);
if (!*ok) return -1;
argc++;
- done = (peek() == i::Token::RPAREN);
+ done = (peek() == Token::RPAREN);
if (!done) {
- Expect(i::Token::COMMA, ok);
+ Expect(Token::COMMA, ok);
if (!*ok) return -1;
}
}
- Expect(i::Token::RPAREN, ok);
+ Expect(Token::RPAREN, ok);
return argc;
}
-PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
+PreParser::Expression PreParser::ParseFunctionLiteral(bool is_generator,
+ bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
@@ -1368,59 +1342,60 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
ScopeType outer_scope_type = scope_->type();
bool inside_with = scope_->IsInsideWith();
Scope function_scope(&scope_, kFunctionScope);
+ function_scope.set_is_generator(is_generator);
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
- Expect(i::Token::LPAREN, CHECK_OK);
- int start_position = scanner_->location().beg_pos;
- bool done = (peek() == i::Token::RPAREN);
- DuplicateFinder duplicate_finder(scanner_->unicode_cache());
+ Expect(Token::LPAREN, CHECK_OK);
+ int start_position = position();
+ bool done = (peek() == Token::RPAREN);
+ DuplicateFinder duplicate_finder(scanner()->unicode_cache());
while (!done) {
Identifier id = ParseIdentifier(CHECK_OK);
if (!id.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
+ StrictModeIdentifierViolation(scanner()->location(),
"strict_param_name",
id,
CHECK_OK);
}
int prev_value;
- if (scanner_->is_literal_ascii()) {
+ if (scanner()->is_literal_ascii()) {
prev_value =
- duplicate_finder.AddAsciiSymbol(scanner_->literal_ascii_string(), 1);
+ duplicate_finder.AddAsciiSymbol(scanner()->literal_ascii_string(), 1);
} else {
prev_value =
- duplicate_finder.AddUtf16Symbol(scanner_->literal_utf16_string(), 1);
+ duplicate_finder.AddUtf16Symbol(scanner()->literal_utf16_string(), 1);
}
if (prev_value != 0) {
- SetStrictModeViolation(scanner_->location(),
+ SetStrictModeViolation(scanner()->location(),
"strict_param_dupe",
CHECK_OK);
}
- done = (peek() == i::Token::RPAREN);
+ done = (peek() == Token::RPAREN);
if (!done) {
- Expect(i::Token::COMMA, CHECK_OK);
+ Expect(Token::COMMA, CHECK_OK);
}
}
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
// Determine if the function will be lazily compiled.
// Currently only happens to top-level functions.
// Optimistically assume that all top-level functions are lazily compiled.
bool is_lazily_compiled = (outer_scope_type == kTopLevelScope &&
- !inside_with && allow_lazy_ &&
+ !inside_with && allow_lazy() &&
!parenthesized_function_);
parenthesized_function_ = false;
- Expect(i::Token::LBRACE, CHECK_OK);
+ Expect(Token::LBRACE, CHECK_OK);
if (is_lazily_compiled) {
ParseLazyFunctionLiteralBody(CHECK_OK);
} else {
- ParseSourceElements(i::Token::RBRACE, ok);
+ ParseSourceElements(Token::RBRACE, ok);
}
- Expect(i::Token::RBRACE, CHECK_OK);
+ Expect(Token::RBRACE, CHECK_OK);
if (!is_classic_mode()) {
- int end_position = scanner_->location().end_pos;
+ int end_position = scanner()->location().end_pos;
CheckOctalLiteral(start_position, end_position, CHECK_OK);
CheckDelayedStrictModeViolation(start_position, end_position, CHECK_OK);
return Expression::StrictFunction();
@@ -1431,15 +1406,15 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
- int body_start = scanner_->location().beg_pos;
+ int body_start = position();
log_->PauseRecording();
- ParseSourceElements(i::Token::RBRACE, ok);
+ ParseSourceElements(Token::RBRACE, ok);
log_->ResumeRecording();
if (!*ok) return;
// Position right after terminal '}'.
- ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
- int body_end = scanner_->peek_location().end_pos;
+ ASSERT_EQ(Token::RBRACE, scanner()->peek());
+ int body_end = scanner()->peek_location().end_pos;
log_->LogFunction(body_start, body_end,
scope_->materialized_literal_count(),
scope_->expected_properties(),
@@ -1450,8 +1425,8 @@ void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
// CallRuntime ::
// '%' Identifier Arguments
- Expect(i::Token::MOD, CHECK_OK);
- if (!allow_natives_syntax_) {
+ Expect(Token::MOD, CHECK_OK);
+ if (!allow_natives_syntax()) {
*ok = false;
return Expression::Default();
}
@@ -1464,29 +1439,12 @@ PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
#undef CHECK_OK
-void PreParser::ExpectSemicolon(bool* ok) {
- // Check for automatic semicolon insertion according to
- // the rules given in ECMA-262, section 7.9, page 21.
- i::Token::Value tok = peek();
- if (tok == i::Token::SEMICOLON) {
- Next();
- return;
- }
- if (scanner_->HasAnyLineTerminatorBeforeNext() ||
- tok == i::Token::RBRACE ||
- tok == i::Token::EOS) {
- return;
- }
- Expect(i::Token::SEMICOLON, ok);
-}
-
-
void PreParser::LogSymbol() {
- int identifier_pos = scanner_->location().beg_pos;
- if (scanner_->is_literal_ascii()) {
- log_->LogAsciiSymbol(identifier_pos, scanner_->literal_ascii_string());
+ int identifier_pos = position();
+ if (scanner()->is_literal_ascii()) {
+ log_->LogAsciiSymbol(identifier_pos, scanner()->literal_ascii_string());
} else {
- log_->LogUtf16Symbol(identifier_pos, scanner_->literal_utf16_string());
+ log_->LogUtf16Symbol(identifier_pos, scanner()->literal_utf16_string());
}
}
@@ -1495,10 +1453,10 @@ PreParser::Expression PreParser::GetStringSymbol() {
const int kUseStrictLength = 10;
const char* kUseStrictChars = "use strict";
LogSymbol();
- if (scanner_->is_literal_ascii() &&
- scanner_->literal_length() == kUseStrictLength &&
- !scanner_->literal_contains_escapes() &&
- !strncmp(scanner_->literal_ascii_string().start(), kUseStrictChars,
+ if (scanner()->is_literal_ascii() &&
+ scanner()->literal_length() == kUseStrictLength &&
+ !scanner()->literal_contains_escapes() &&
+ !strncmp(scanner()->literal_ascii_string().start(), kUseStrictChars,
kUseStrictLength)) {
return Expression::UseStrictStringLiteral();
}
@@ -1508,20 +1466,22 @@ PreParser::Expression PreParser::GetStringSymbol() {
PreParser::Identifier PreParser::GetIdentifierSymbol() {
LogSymbol();
- if (scanner_->current_token() == i::Token::FUTURE_RESERVED_WORD) {
+ if (scanner()->current_token() == Token::FUTURE_RESERVED_WORD) {
return Identifier::FutureReserved();
- } else if (scanner_->current_token() ==
- i::Token::FUTURE_STRICT_RESERVED_WORD) {
+ } else if (scanner()->current_token() ==
+ Token::FUTURE_STRICT_RESERVED_WORD) {
return Identifier::FutureStrictReserved();
+ } else if (scanner()->current_token() == Token::YIELD) {
+ return Identifier::Yield();
}
- if (scanner_->is_literal_ascii()) {
+ if (scanner()->is_literal_ascii()) {
// Detect strict-mode poison words.
- if (scanner_->literal_length() == 4 &&
- !strncmp(scanner_->literal_ascii_string().start(), "eval", 4)) {
+ if (scanner()->literal_length() == 4 &&
+ !strncmp(scanner()->literal_ascii_string().start(), "eval", 4)) {
return Identifier::Eval();
}
- if (scanner_->literal_length() == 9 &&
- !strncmp(scanner_->literal_ascii_string().start(), "arguments", 9)) {
+ if (scanner()->literal_length() == 9 &&
+ !strncmp(scanner()->literal_ascii_string().start(), "arguments", 9)) {
return Identifier::Arguments();
}
}
@@ -1530,24 +1490,32 @@ PreParser::Identifier PreParser::GetIdentifierSymbol() {
PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
- i::Token::Value next = Next();
+ Token::Value next = Next();
switch (next) {
- case i::Token::FUTURE_RESERVED_WORD: {
- i::Scanner::Location location = scanner_->location();
+ case Token::FUTURE_RESERVED_WORD: {
+ Scanner::Location location = scanner()->location();
ReportMessageAt(location.beg_pos, location.end_pos,
"reserved_word", NULL);
*ok = false;
return GetIdentifierSymbol();
}
- case i::Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::YIELD:
+ if (scope_->is_generator()) {
+ // 'yield' in a generator is only valid as part of a YieldExpression.
+ ReportMessageAt(scanner()->location(), "unexpected_token", "yield");
+ *ok = false;
+ return Identifier::Yield();
+ }
+ // FALLTHROUGH
+ case Token::FUTURE_STRICT_RESERVED_WORD:
if (!is_classic_mode()) {
- i::Scanner::Location location = scanner_->location();
+ Scanner::Location location = scanner()->location();
ReportMessageAt(location.beg_pos, location.end_pos,
"strict_reserved_word", NULL);
*ok = false;
}
// FALLTHROUGH
- case i::Token::IDENTIFIER:
+ case Token::IDENTIFIER:
return GetIdentifierSymbol();
default:
*ok = false;
@@ -1556,7 +1524,7 @@ PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
}
-void PreParser::SetStrictModeViolation(i::Scanner::Location location,
+void PreParser::SetStrictModeViolation(Scanner::Location location,
const char* type,
bool* ok) {
if (!is_classic_mode()) {
@@ -1580,7 +1548,7 @@ void PreParser::SetStrictModeViolation(i::Scanner::Location location,
void PreParser::CheckDelayedStrictModeViolation(int beg_pos,
int end_pos,
bool* ok) {
- i::Scanner::Location location = strict_mode_violation_location_;
+ Scanner::Location location = strict_mode_violation_location_;
if (location.IsValid() &&
location.beg_pos > beg_pos && location.end_pos < end_pos) {
ReportMessageAt(location, strict_mode_violation_type_, NULL);
@@ -1589,14 +1557,14 @@ void PreParser::CheckDelayedStrictModeViolation(int beg_pos,
}
-void PreParser::StrictModeIdentifierViolation(i::Scanner::Location location,
+void PreParser::StrictModeIdentifierViolation(Scanner::Location location,
const char* eval_args_type,
Identifier identifier,
bool* ok) {
const char* type = eval_args_type;
if (identifier.IsFutureReserved()) {
type = "reserved_word";
- } else if (identifier.IsFutureStrictReserved()) {
+ } else if (identifier.IsFutureStrictReserved() || identifier.IsYield()) {
type = "strict_reserved_word";
}
if (!is_classic_mode()) {
@@ -1610,17 +1578,16 @@ void PreParser::StrictModeIdentifierViolation(i::Scanner::Location location,
PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
- i::Token::Value next = Next();
- if (i::Token::IsKeyword(next)) {
- int pos = scanner_->location().beg_pos;
- const char* keyword = i::Token::String(next);
- log_->LogAsciiSymbol(pos, i::Vector<const char>(keyword,
- i::StrLength(keyword)));
+ Token::Value next = Next();
+ if (Token::IsKeyword(next)) {
+ int pos = position();
+ const char* keyword = Token::String(next);
+ log_->LogAsciiSymbol(pos, Vector<const char>(keyword, StrLength(keyword)));
return Identifier::Default();
}
- if (next == i::Token::IDENTIFIER ||
- next == i::Token::FUTURE_RESERVED_WORD ||
- next == i::Token::FUTURE_STRICT_RESERVED_WORD) {
+ if (next == Token::IDENTIFIER ||
+ next == Token::FUTURE_RESERVED_WORD ||
+ next == Token::FUTURE_STRICT_RESERVED_WORD) {
return GetIdentifierSymbol();
}
*ok = false;
@@ -1637,153 +1604,46 @@ PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
bool* ok) {
Identifier result = ParseIdentifierName(ok);
if (!*ok) return Identifier::Default();
- if (scanner_->is_literal_ascii() &&
- scanner_->literal_length() == 3) {
- const char* token = scanner_->literal_ascii_string().start();
+ if (scanner()->is_literal_ascii() &&
+ scanner()->literal_length() == 3) {
+ const char* token = scanner()->literal_ascii_string().start();
*is_get = strncmp(token, "get", 3) == 0;
*is_set = !*is_get && strncmp(token, "set", 3) == 0;
}
return result;
}
-bool PreParser::peek_any_identifier() {
- i::Token::Value next = peek();
- return next == i::Token::IDENTIFIER ||
- next == i::Token::FUTURE_RESERVED_WORD ||
- next == i::Token::FUTURE_STRICT_RESERVED_WORD;
-}
-
-
-int DuplicateFinder::AddAsciiSymbol(i::Vector<const char> key, int value) {
- return AddSymbol(i::Vector<const byte>::cast(key), true, value);
-}
-
-int DuplicateFinder::AddUtf16Symbol(i::Vector<const uint16_t> key, int value) {
- return AddSymbol(i::Vector<const byte>::cast(key), false, value);
-}
-
-int DuplicateFinder::AddSymbol(i::Vector<const byte> key,
- bool is_ascii,
- int value) {
- uint32_t hash = Hash(key, is_ascii);
- byte* encoding = BackupKey(key, is_ascii);
- i::HashMap::Entry* entry = map_.Lookup(encoding, hash, true);
- int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- entry->value =
- reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
- return old_value;
-}
-
-
-int DuplicateFinder::AddNumber(i::Vector<const char> key, int value) {
- ASSERT(key.length() > 0);
- // Quick check for already being in canonical form.
- if (IsNumberCanonical(key)) {
- return AddAsciiSymbol(key, value);
- }
-
- int flags = i::ALLOW_HEX | i::ALLOW_OCTALS;
- double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
- int length;
- const char* string;
- if (!isfinite(double_value)) {
- string = "Infinity";
- length = 8; // strlen("Infinity");
- } else {
- string = DoubleToCString(double_value,
- i::Vector<char>(number_buffer_, kBufferSize));
- length = i::StrLength(string);
- }
- return AddSymbol(i::Vector<const byte>(reinterpret_cast<const byte*>(string),
- length), true, value);
-}
-
-bool DuplicateFinder::IsNumberCanonical(i::Vector<const char> number) {
- // Test for a safe approximation of number literals that are already
- // in canonical form: max 15 digits, no leading zeroes, except an
- // integer part that is a single zero, and no trailing zeros below
- // the decimal point.
- int pos = 0;
- int length = number.length();
- if (number.length() > 15) return false;
- if (number[pos] == '0') {
- pos++;
+void PreParser::ObjectLiteralChecker::CheckProperty(Token::Value property,
+ PropertyKind type,
+ bool* ok) {
+ int old;
+ if (property == Token::NUMBER) {
+ old = finder_.AddNumber(scanner()->literal_ascii_string(), type);
+ } else if (scanner()->is_literal_ascii()) {
+ old = finder_.AddAsciiSymbol(scanner()->literal_ascii_string(), type);
} else {
- while (pos < length &&
- static_cast<unsigned>(number[pos] - '0') <= ('9' - '0')) pos++;
- }
- if (length == pos) return true;
- if (number[pos] != '.') return false;
- pos++;
- bool invalid_last_digit = true;
- while (pos < length) {
- byte digit = number[pos] - '0';
- if (digit > '9' - '0') return false;
- invalid_last_digit = (digit == 0);
- pos++;
+ old = finder_.AddUtf16Symbol(scanner()->literal_utf16_string(), type);
}
- return !invalid_last_digit;
-}
-
-
-uint32_t DuplicateFinder::Hash(i::Vector<const byte> key, bool is_ascii) {
- // Primitive hash function, almost identical to the one used
- // for strings (except that it's seeded by the length and ASCII-ness).
- int length = key.length();
- uint32_t hash = (length << 1) | (is_ascii ? 1 : 0) ;
- for (int i = 0; i < length; i++) {
- uint32_t c = key[i];
- hash = (hash + c) * 1025;
- hash ^= (hash >> 6);
- }
- return hash;
-}
-
-
-bool DuplicateFinder::Match(void* first, void* second) {
- // Decode lengths.
- // Length + ASCII-bit is encoded as base 128, most significant heptet first,
- // with a 8th bit being non-zero while there are more heptets.
- // The value encodes the number of bytes following, and whether the original
- // was ASCII.
- byte* s1 = reinterpret_cast<byte*>(first);
- byte* s2 = reinterpret_cast<byte*>(second);
- uint32_t length_ascii_field = 0;
- byte c1;
- do {
- c1 = *s1;
- if (c1 != *s2) return false;
- length_ascii_field = (length_ascii_field << 7) | (c1 & 0x7f);
- s1++;
- s2++;
- } while ((c1 & 0x80) != 0);
- int length = static_cast<int>(length_ascii_field >> 1);
- return memcmp(s1, s2, length) == 0;
-}
-
-
-byte* DuplicateFinder::BackupKey(i::Vector<const byte> bytes,
- bool is_ascii) {
- uint32_t ascii_length = (bytes.length() << 1) | (is_ascii ? 1 : 0);
- backing_store_.StartSequence();
- // Emit ascii_length as base-128 encoded number, with the 7th bit set
- // on the byte of every heptet except the last, least significant, one.
- if (ascii_length >= (1 << 7)) {
- if (ascii_length >= (1 << 14)) {
- if (ascii_length >= (1 << 21)) {
- if (ascii_length >= (1 << 28)) {
- backing_store_.Add(static_cast<byte>((ascii_length >> 28) | 0x80));
- }
- backing_store_.Add(static_cast<byte>((ascii_length >> 21) | 0x80u));
- }
- backing_store_.Add(static_cast<byte>((ascii_length >> 14) | 0x80u));
+ PropertyKind old_type = static_cast<PropertyKind>(old);
+ if (HasConflict(old_type, type)) {
+ if (IsDataDataConflict(old_type, type)) {
+ // Both are data properties.
+ if (language_mode_ == CLASSIC_MODE) return;
+ parser()->ReportMessageAt(scanner()->location(),
+ "strict_duplicate_property");
+ } else if (IsDataAccessorConflict(old_type, type)) {
+ // Both a data and an accessor property with the same name.
+ parser()->ReportMessageAt(scanner()->location(),
+ "accessor_data_property");
+ } else {
+ ASSERT(IsAccessorAccessorConflict(old_type, type));
+ // Both accessors of the same type.
+ parser()->ReportMessageAt(scanner()->location(),
+ "accessor_get_set");
}
- backing_store_.Add(static_cast<byte>((ascii_length >> 7) | 0x80u));
+ *ok = false;
}
- backing_store_.Add(static_cast<byte>(ascii_length & 0x7f));
-
- backing_store_.AddBlock(bytes);
- return backing_store_.EndSequence().start();
}
-} } // v8::preparser
+
+} } // v8::internal
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index ad52d74bb..e99b4b0a1 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -33,14 +33,178 @@
#include "scanner.h"
namespace v8 {
-
namespace internal {
-class UnicodeCache;
-}
-namespace preparser {
+// Common base class shared between parser and pre-parser.
+class ParserBase {
+ public:
+ ParserBase(Scanner* scanner, uintptr_t stack_limit)
+ : scanner_(scanner),
+ stack_limit_(stack_limit),
+ stack_overflow_(false),
+ allow_lazy_(false),
+ allow_natives_syntax_(false),
+ allow_generators_(false),
+ allow_for_of_(false) { }
+ // TODO(mstarzinger): Only virtual until message reporting has been unified.
+ virtual ~ParserBase() { }
+
+ // Getters that indicate whether certain syntactical constructs are
+ // allowed to be parsed by this instance of the parser.
+ bool allow_lazy() const { return allow_lazy_; }
+ bool allow_natives_syntax() const { return allow_natives_syntax_; }
+ bool allow_generators() const { return allow_generators_; }
+ bool allow_for_of() const { return allow_for_of_; }
+ bool allow_modules() const { return scanner()->HarmonyModules(); }
+ bool allow_harmony_scoping() const { return scanner()->HarmonyScoping(); }
+ bool allow_harmony_numeric_literals() const {
+ return scanner()->HarmonyNumericLiterals();
+ }
+
+ // Setters that determine whether certain syntactical constructs are
+ // allowed to be parsed by this instance of the parser.
+ void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
+ void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
+ void set_allow_generators(bool allow) { allow_generators_ = allow; }
+ void set_allow_for_of(bool allow) { allow_for_of_ = allow; }
+ void set_allow_modules(bool allow) { scanner()->SetHarmonyModules(allow); }
+ void set_allow_harmony_scoping(bool allow) {
+ scanner()->SetHarmonyScoping(allow);
+ }
+ void set_allow_harmony_numeric_literals(bool allow) {
+ scanner()->SetHarmonyNumericLiterals(allow);
+ }
+
+ protected:
+ Scanner* scanner() const { return scanner_; }
+ int position() { return scanner_->location().beg_pos; }
+ int peek_position() { return scanner_->peek_location().beg_pos; }
+ bool stack_overflow() const { return stack_overflow_; }
+ void set_stack_overflow() { stack_overflow_ = true; }
+
+ INLINE(Token::Value peek()) {
+ if (stack_overflow_) return Token::ILLEGAL;
+ return scanner()->peek();
+ }
+
+ INLINE(Token::Value Next()) {
+ if (stack_overflow_) return Token::ILLEGAL;
+ {
+ int marker;
+ if (reinterpret_cast<uintptr_t>(&marker) < stack_limit_) {
+ // Any further calls to Next or peek will return the illegal token.
+ // The current call must return the next token, which might already
+ // have been peek'ed.
+ stack_overflow_ = true;
+ }
+ }
+ return scanner()->Next();
+ }
+
+ void Consume(Token::Value token) {
+ Token::Value next = Next();
+ USE(next);
+ USE(token);
+ ASSERT(next == token);
+ }
+
+ bool Check(Token::Value token) {
+ Token::Value next = peek();
+ if (next == token) {
+ Consume(next);
+ return true;
+ }
+ return false;
+ }
+
+ void Expect(Token::Value token, bool* ok) {
+ Token::Value next = Next();
+ if (next != token) {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ }
+ }
+
+ bool peek_any_identifier();
+ void ExpectSemicolon(bool* ok);
+ bool CheckContextualKeyword(Vector<const char> keyword);
+ void ExpectContextualKeyword(Vector<const char> keyword, bool* ok);
+
+ // Strict mode octal literal validation.
+ void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
+
+ // Determine precedence of given token.
+ static int Precedence(Token::Value token, bool accept_IN);
+
+ // Report syntax errors.
+ virtual void ReportUnexpectedToken(Token::Value token) = 0;
+ virtual void ReportMessageAt(Scanner::Location loc, const char* type) = 0;
+
+ // Used to detect duplicates in object literals. Each of the values
+ // kGetterProperty, kSetterProperty and kValueProperty represents
+ // a type of object literal property. When parsing a property, its
+ // type value is stored in the DuplicateFinder for the property name.
+ // Values are chosen so that having intersection bits means the there is
+ // an incompatibility.
+ // I.e., you can add a getter to a property that already has a setter, since
+ // kGetterProperty and kSetterProperty doesn't intersect, but not if it
+ // already has a getter or a value. Adding the getter to an existing
+ // setter will store the value (kGetterProperty | kSetterProperty), which
+ // is incompatible with adding any further properties.
+ enum PropertyKind {
+ kNone = 0,
+ // Bit patterns representing different object literal property types.
+ kGetterProperty = 1,
+ kSetterProperty = 2,
+ kValueProperty = 7,
+ // Helper constants.
+ kValueFlag = 4
+ };
+
+ // Validation per ECMA 262 - 11.1.5 "Object Initialiser".
+ class ObjectLiteralChecker {
+ public:
+ ObjectLiteralChecker(ParserBase* parser, LanguageMode mode)
+ : parser_(parser),
+ finder_(scanner()->unicode_cache()),
+ language_mode_(mode) { }
+
+ void CheckProperty(Token::Value property, PropertyKind type, bool* ok);
+
+ private:
+ ParserBase* parser() const { return parser_; }
+ Scanner* scanner() const { return parser_->scanner(); }
+
+ // Checks the type of conflict based on values coming from PropertyType.
+ bool HasConflict(PropertyKind type1, PropertyKind type2) {
+ return (type1 & type2) != 0;
+ }
+ bool IsDataDataConflict(PropertyKind type1, PropertyKind type2) {
+ return ((type1 & type2) & kValueFlag) != 0;
+ }
+ bool IsDataAccessorConflict(PropertyKind type1, PropertyKind type2) {
+ return ((type1 ^ type2) & kValueFlag) != 0;
+ }
+ bool IsAccessorAccessorConflict(PropertyKind type1, PropertyKind type2) {
+ return ((type1 | type2) & kValueFlag) == 0;
+ }
+
+ ParserBase* parser_;
+ DuplicateFinder finder_;
+ LanguageMode language_mode_;
+ };
+
+ private:
+ Scanner* scanner_;
+ uintptr_t stack_limit_;
+ bool stack_overflow_;
+
+ bool allow_lazy_;
+ bool allow_natives_syntax_;
+ bool allow_generators_;
+ bool allow_for_of_;
+};
-typedef uint8_t byte;
// Preparsing checks a JavaScript program and emits preparse-data that helps
// a later parsing to be faster.
@@ -54,82 +218,22 @@ typedef uint8_t byte;
// rather it is to speed up properly written and correct programs.
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-
-namespace i = v8::internal;
-
-class DuplicateFinder {
- public:
- explicit DuplicateFinder(i::UnicodeCache* constants)
- : unicode_constants_(constants),
- backing_store_(16),
- map_(&Match) { }
-
- int AddAsciiSymbol(i::Vector<const char> key, int value);
- int AddUtf16Symbol(i::Vector<const uint16_t> key, int value);
- // Add a a number literal by converting it (if necessary)
- // to the string that ToString(ToNumber(literal)) would generate.
- // and then adding that string with AddAsciiSymbol.
- // This string is the actual value used as key in an object literal,
- // and the one that must be different from the other keys.
- int AddNumber(i::Vector<const char> key, int value);
-
- private:
- int AddSymbol(i::Vector<const byte> key, bool is_ascii, int value);
- // Backs up the key and its length in the backing store.
- // The backup is stored with a base 127 encoding of the
- // length (plus a bit saying whether the string is ASCII),
- // followed by the bytes of the key.
- byte* BackupKey(i::Vector<const byte> key, bool is_ascii);
-
- // Compare two encoded keys (both pointing into the backing store)
- // for having the same base-127 encoded lengths and ASCII-ness,
- // and then having the same 'length' bytes following.
- static bool Match(void* first, void* second);
- // Creates a hash from a sequence of bytes.
- static uint32_t Hash(i::Vector<const byte> key, bool is_ascii);
- // Checks whether a string containing a JS number is its canonical
- // form.
- static bool IsNumberCanonical(i::Vector<const char> key);
-
- // Size of buffer. Sufficient for using it to call DoubleToCString in
- // from conversions.h.
- static const int kBufferSize = 100;
-
- i::UnicodeCache* unicode_constants_;
- // Backing store used to store strings used as hashmap keys.
- i::SequenceCollector<unsigned char> backing_store_;
- i::HashMap map_;
- // Buffer used for string->number->canonical string conversions.
- char number_buffer_[kBufferSize];
-};
-
-
-class PreParser {
+class PreParser : public ParserBase {
public:
enum PreParseResult {
kPreParseStackOverflow,
kPreParseSuccess
};
-
- PreParser(i::Scanner* scanner,
- i::ParserRecorder* log,
- uintptr_t stack_limit,
- bool allow_lazy,
- bool allow_natives_syntax,
- bool allow_modules)
- : scanner_(scanner),
+ PreParser(Scanner* scanner,
+ ParserRecorder* log,
+ uintptr_t stack_limit)
+ : ParserBase(scanner, stack_limit),
log_(log),
scope_(NULL),
- stack_limit_(stack_limit),
- strict_mode_violation_location_(i::Scanner::Location::invalid()),
+ strict_mode_violation_location_(Scanner::Location::invalid()),
strict_mode_violation_type_(NULL),
- stack_overflow_(false),
- allow_lazy_(allow_lazy),
- allow_modules_(allow_modules),
- allow_natives_syntax_(allow_natives_syntax),
- parenthesized_function_(false),
- harmony_scoping_(scanner->HarmonyScoping()) { }
+ parenthesized_function_(false) { }
~PreParser() {}
@@ -137,68 +241,33 @@ class PreParser {
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
// during parsing.
- static PreParseResult PreParseProgram(i::Scanner* scanner,
- i::ParserRecorder* log,
- int flags,
- uintptr_t stack_limit) {
- bool allow_lazy = (flags & i::kAllowLazy) != 0;
- bool allow_natives_syntax = (flags & i::kAllowNativesSyntax) != 0;
- bool allow_modules = (flags & i::kAllowModules) != 0;
- return PreParser(scanner, log, stack_limit, allow_lazy,
- allow_natives_syntax, allow_modules).PreParse();
+ PreParseResult PreParseProgram() {
+ Scope top_scope(&scope_, kTopLevelScope);
+ bool ok = true;
+ int start_position = scanner()->peek_location().beg_pos;
+ ParseSourceElements(Token::EOS, &ok);
+ if (stack_overflow()) return kPreParseStackOverflow;
+ if (!ok) {
+ ReportUnexpectedToken(scanner()->current_token());
+ } else if (!scope_->is_classic_mode()) {
+ CheckOctalLiteral(start_position, scanner()->location().end_pos, &ok);
+ }
+ return kPreParseSuccess;
}
// Parses a single function literal, from the opening parentheses before
// parameters to the closing brace after the body.
// Returns a FunctionEntry describing the body of the function in enough
// detail that it can be lazily compiled.
- // The scanner is expected to have matched the "function" keyword and
- // parameters, and have consumed the initial '{'.
+ // The scanner is expected to have matched the "function" or "function*"
+ // keyword and parameters, and have consumed the initial '{'.
// At return, unless an error occurred, the scanner is positioned before the
// the final '}'.
- PreParseResult PreParseLazyFunction(i::LanguageMode mode,
- i::ParserRecorder* log);
+ PreParseResult PreParseLazyFunction(LanguageMode mode,
+ bool is_generator,
+ ParserRecorder* log);
private:
- // Used to detect duplicates in object literals. Each of the values
- // kGetterProperty, kSetterProperty and kValueProperty represents
- // a type of object literal property. When parsing a property, its
- // type value is stored in the DuplicateFinder for the property name.
- // Values are chosen so that having intersection bits means the there is
- // an incompatibility.
- // I.e., you can add a getter to a property that already has a setter, since
- // kGetterProperty and kSetterProperty doesn't intersect, but not if it
- // already has a getter or a value. Adding the getter to an existing
- // setter will store the value (kGetterProperty | kSetterProperty), which
- // is incompatible with adding any further properties.
- enum PropertyType {
- kNone = 0,
- // Bit patterns representing different object literal property types.
- kGetterProperty = 1,
- kSetterProperty = 2,
- kValueProperty = 7,
- // Helper constants.
- kValueFlag = 4
- };
-
- // Checks the type of conflict based on values coming from PropertyType.
- bool HasConflict(int type1, int type2) { return (type1 & type2) != 0; }
- bool IsDataDataConflict(int type1, int type2) {
- return ((type1 & type2) & kValueFlag) != 0;
- }
- bool IsDataAccessorConflict(int type1, int type2) {
- return ((type1 ^ type2) & kValueFlag) != 0;
- }
- bool IsAccessorAccessorConflict(int type1, int type2) {
- return ((type1 | type2) & kValueFlag) == 0;
- }
-
-
- void CheckDuplicate(DuplicateFinder* finder,
- i::Token::Value property,
- int type,
- bool* ok);
-
// These types form an algebra over syntactic categories that is just
// rich enough to let us recognize and propagate the constructs that
// are either being counted in the preparser data, or is important
@@ -240,9 +309,13 @@ class PreParser {
static Identifier FutureStrictReserved() {
return Identifier(kFutureStrictReservedIdentifier);
}
+ static Identifier Yield() {
+ return Identifier(kYieldIdentifier);
+ }
bool IsEval() { return type_ == kEvalIdentifier; }
bool IsArguments() { return type_ == kArgumentsIdentifier; }
bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
+ bool IsYield() { return type_ == kYieldIdentifier; }
bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
bool IsFutureStrictReserved() {
return type_ == kFutureStrictReservedIdentifier;
@@ -254,6 +327,7 @@ class PreParser {
kUnknownIdentifier,
kFutureReservedIdentifier,
kFutureStrictReservedIdentifier,
+ kYieldIdentifier,
kEvalIdentifier,
kArgumentsIdentifier
};
@@ -347,7 +421,7 @@ class PreParser {
// Identifiers and string literals can be parenthesized.
// They no longer work as labels or directive prologues,
// but are still recognized in other contexts.
- return Expression(code_ | kParentesizedExpressionFlag);
+ return Expression(code_ | kParenthesizedExpressionFlag);
}
// For other types of expressions, it's not important to remember
// the parentheses.
@@ -373,7 +447,8 @@ class PreParser {
kUseStrictString = kStringLiteralFlag | 8,
kStringLiteralMask = kUseStrictString,
- kParentesizedExpressionFlag = 4, // Only if identifier or string literal.
+ // Only if identifier or string literal.
+ kParenthesizedExpressionFlag = 4,
// Below here applies if neither identifier nor string literal.
kThisExpression = 4,
@@ -412,7 +487,7 @@ class PreParser {
}
bool IsStringLiteral() {
- return code_ != kUnknownStatement;
+ return code_ == kStringLiteralExpressionStatement;
}
bool IsUseStrictLiteral() {
@@ -451,7 +526,8 @@ class PreParser {
expected_properties_(0),
with_nesting_count_(0),
language_mode_(
- (prev_ != NULL) ? prev_->language_mode() : i::CLASSIC_MODE) {
+ (prev_ != NULL) ? prev_->language_mode() : CLASSIC_MODE),
+ is_generator_(false) {
*variable = this;
}
~Scope() { *variable_ = prev_; }
@@ -461,13 +537,15 @@ class PreParser {
int expected_properties() { return expected_properties_; }
int materialized_literal_count() { return materialized_literal_count_; }
bool IsInsideWith() { return with_nesting_count_ != 0; }
+ bool is_generator() { return is_generator_; }
+ void set_is_generator(bool is_generator) { is_generator_ = is_generator; }
bool is_classic_mode() {
- return language_mode_ == i::CLASSIC_MODE;
+ return language_mode_ == CLASSIC_MODE;
}
- i::LanguageMode language_mode() {
+ LanguageMode language_mode() {
return language_mode_;
}
- void set_language_mode(i::LanguageMode language_mode) {
+ void set_language_mode(LanguageMode language_mode) {
language_mode_ = language_mode;
}
@@ -491,28 +569,16 @@ class PreParser {
int materialized_literal_count_;
int expected_properties_;
int with_nesting_count_;
- i::LanguageMode language_mode_;
+ LanguageMode language_mode_;
+ bool is_generator_;
};
- // Preparse the program. Only called in PreParseProgram after creating
- // the instance.
- PreParseResult PreParse() {
- Scope top_scope(&scope_, kTopLevelScope);
- bool ok = true;
- int start_position = scanner_->peek_location().beg_pos;
- ParseSourceElements(i::Token::EOS, &ok);
- if (stack_overflow_) return kPreParseStackOverflow;
- if (!ok) {
- ReportUnexpectedToken(scanner_->current_token());
- } else if (!scope_->is_classic_mode()) {
- CheckOctalLiteral(start_position, scanner_->location().end_pos, &ok);
- }
- return kPreParseSuccess;
- }
-
// Report syntax error
- void ReportUnexpectedToken(i::Token::Value token);
- void ReportMessageAt(i::Scanner::Location location,
+ void ReportUnexpectedToken(Token::Value token);
+ void ReportMessageAt(Scanner::Location location, const char* type) {
+ ReportMessageAt(location, type, NULL);
+ }
+ void ReportMessageAt(Scanner::Location location,
const char* type,
const char* name_opt) {
log_->LogMessage(location.beg_pos, location.end_pos, type, name_opt);
@@ -524,8 +590,6 @@ class PreParser {
log_->LogMessage(start_pos, end_pos, type, name_opt);
}
- void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
-
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
@@ -557,6 +621,7 @@ class PreParser {
Expression ParseExpression(bool accept_IN, bool* ok);
Expression ParseAssignmentExpression(bool accept_IN, bool* ok);
+ Expression ParseYieldExpression(bool* ok);
Expression ParseConditionalExpression(bool accept_IN, bool* ok);
Expression ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
Expression ParseUnaryExpression(bool* ok);
@@ -572,7 +637,7 @@ class PreParser {
Expression ParseV8Intrinsic(bool* ok);
Arguments ParseArguments(bool* ok);
- Expression ParseFunctionLiteral(bool* ok);
+ Expression ParseFunctionLiteral(bool is_generator, bool* ok);
void ParseLazyFunctionLiteralBody(bool* ok);
Identifier ParseIdentifier(bool* ok);
@@ -588,85 +653,40 @@ class PreParser {
// Log the currently parsed string literal.
Expression GetStringSymbol();
- i::Token::Value peek() {
- if (stack_overflow_) return i::Token::ILLEGAL;
- return scanner_->peek();
- }
-
- i::Token::Value Next() {
- if (stack_overflow_) return i::Token::ILLEGAL;
- {
- int marker;
- if (reinterpret_cast<uintptr_t>(&marker) < stack_limit_) {
- // Further calls to peek/Next will return illegal token.
- // The current one will still be returned. It might already
- // have been seen using peek.
- stack_overflow_ = true;
- }
- }
- return scanner_->Next();
- }
-
- bool peek_any_identifier();
-
- void set_language_mode(i::LanguageMode language_mode) {
+ void set_language_mode(LanguageMode language_mode) {
scope_->set_language_mode(language_mode);
}
bool is_classic_mode() {
- return scope_->language_mode() == i::CLASSIC_MODE;
+ return scope_->language_mode() == CLASSIC_MODE;
}
bool is_extended_mode() {
- return scope_->language_mode() == i::EXTENDED_MODE;
- }
-
- i::LanguageMode language_mode() { return scope_->language_mode(); }
-
- void Consume(i::Token::Value token) { Next(); }
-
- void Expect(i::Token::Value token, bool* ok) {
- if (Next() != token) {
- *ok = false;
- }
+ return scope_->language_mode() == EXTENDED_MODE;
}
- bool Check(i::Token::Value token) {
- i::Token::Value next = peek();
- if (next == token) {
- Consume(next);
- return true;
- }
- return false;
- }
- void ExpectSemicolon(bool* ok);
+ LanguageMode language_mode() { return scope_->language_mode(); }
- static int Precedence(i::Token::Value tok, bool accept_IN);
+ bool CheckInOrOf(bool accept_OF);
- void SetStrictModeViolation(i::Scanner::Location,
+ void SetStrictModeViolation(Scanner::Location,
const char* type,
bool* ok);
void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok);
- void StrictModeIdentifierViolation(i::Scanner::Location,
+ void StrictModeIdentifierViolation(Scanner::Location,
const char* eval_args_type,
Identifier identifier,
bool* ok);
- i::Scanner* scanner_;
- i::ParserRecorder* log_;
+ ParserRecorder* log_;
Scope* scope_;
- uintptr_t stack_limit_;
- i::Scanner::Location strict_mode_violation_location_;
+ Scanner::Location strict_mode_violation_location_;
const char* strict_mode_violation_type_;
- bool stack_overflow_;
- bool allow_lazy_;
- bool allow_modules_;
- bool allow_natives_syntax_;
bool parenthesized_function_;
- bool harmony_scoping_;
};
-} } // v8::preparser
+
+} } // v8::internal
#endif // V8_PREPARSER_H
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 0d8dadce1..4b441b9ae 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -38,10 +38,11 @@ namespace internal {
#ifdef DEBUG
-PrettyPrinter::PrettyPrinter() {
+PrettyPrinter::PrettyPrinter(Isolate* isolate) {
output_ = NULL;
size_ = 0;
pos_ = 0;
+ InitializeAstVisitor(isolate);
}
@@ -122,6 +123,14 @@ void PrettyPrinter::VisitModuleUrl(ModuleUrl* node) {
}
+void PrettyPrinter::VisitModuleStatement(ModuleStatement* node) {
+ Print("module ");
+ PrintLiteral(node->proxy()->name(), false);
+ Print(" ");
+ Visit(node->body());
+}
+
+
void PrettyPrinter::VisitExpressionStatement(ExpressionStatement* node) {
Visit(node->expression());
Print(";");
@@ -191,11 +200,25 @@ void PrettyPrinter::VisitSwitchStatement(SwitchStatement* node) {
Print(") { ");
ZoneList<CaseClause*>* cases = node->cases();
for (int i = 0; i < cases->length(); i++)
- PrintCaseClause(cases->at(i));
+ Visit(cases->at(i));
Print("}");
}
+void PrettyPrinter::VisitCaseClause(CaseClause* clause) {
+ if (clause->is_default()) {
+ Print("default");
+ } else {
+ Print("case ");
+ Visit(clause->label());
+ }
+ Print(": ");
+ PrintStatements(clause->statements());
+ if (clause->statements()->length() > 0)
+ Print(" ");
+}
+
+
void PrettyPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
PrintLabels(node->labels());
Print("do ");
@@ -246,6 +269,17 @@ void PrettyPrinter::VisitForInStatement(ForInStatement* node) {
}
+void PrettyPrinter::VisitForOfStatement(ForOfStatement* node) {
+ PrintLabels(node->labels());
+ Print("for (");
+ Visit(node->each());
+ Print(" of ");
+ Visit(node->iterable());
+ Print(") ");
+ Visit(node->body());
+}
+
+
void PrettyPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
Print("try ");
Visit(node->try_block());
@@ -277,10 +311,9 @@ void PrettyPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
}
-void PrettyPrinter::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
+void PrettyPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
Print("(");
- PrintLiteral(node->shared_function_info(), true);
+ PrintLiteral(node->name(), false);
Print(")");
}
@@ -295,7 +328,7 @@ void PrettyPrinter::VisitConditional(Conditional* node) {
void PrettyPrinter::VisitLiteral(Literal* node) {
- PrintLiteral(node->handle(), true);
+ PrintLiteral(node->value(), true);
}
@@ -344,6 +377,12 @@ void PrettyPrinter::VisitAssignment(Assignment* node) {
}
+void PrettyPrinter::VisitYield(Yield* node) {
+ Print("yield ");
+ Visit(node->expression());
+}
+
+
void PrettyPrinter::VisitThrow(Throw* node) {
Print("throw ");
Visit(node->exception());
@@ -353,11 +392,11 @@ void PrettyPrinter::VisitThrow(Throw* node) {
void PrettyPrinter::VisitProperty(Property* node) {
Expression* key = node->key();
Literal* literal = key->AsLiteral();
- if (literal != NULL && literal->handle()->IsSymbol()) {
+ if (literal != NULL && literal->value()->IsInternalizedString()) {
Print("(");
Visit(node->obj());
Print(").");
- PrintLiteral(literal->handle(), false);
+ PrintLiteral(literal->value(), false);
} else {
Visit(node->obj());
Print("[");
@@ -454,8 +493,8 @@ const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
}
-void PrettyPrinter::PrintOut(AstNode* node) {
- PrettyPrinter printer;
+void PrettyPrinter::PrintOut(Isolate* isolate, AstNode* node) {
+ PrettyPrinter printer(isolate);
PrintF("%s", printer.Print(node));
}
@@ -490,7 +529,7 @@ void PrettyPrinter::Print(const char* format, ...) {
const int slack = 32;
int new_size = size_ + (size_ >> 1) + slack;
char* new_output = NewArray<char>(new_size);
- memcpy(new_output, output_, pos_);
+ OS::MemCopy(new_output, output_, pos_);
DeleteArray(output_);
output_ = new_output;
size_ = new_size;
@@ -595,29 +634,11 @@ void PrettyPrinter::PrintFunctionLiteral(FunctionLiteral* function) {
}
-void PrettyPrinter::PrintCaseClause(CaseClause* clause) {
- if (clause->is_default()) {
- Print("default");
- } else {
- Print("case ");
- Visit(clause->label());
- }
- Print(": ");
- PrintStatements(clause->statements());
- if (clause->statements()->length() > 0)
- Print(" ");
-}
-
-
//-----------------------------------------------------------------------------
class IndentedScope BASE_EMBEDDED {
public:
- explicit IndentedScope(AstPrinter* printer) : ast_printer_(printer) {
- ast_printer_->inc_indent();
- }
-
- IndentedScope(AstPrinter* printer, const char* txt, AstNode* node = NULL)
+ IndentedScope(AstPrinter* printer, const char* txt)
: ast_printer_(printer) {
ast_printer_->PrintIndented(txt);
ast_printer_->Print("\n");
@@ -636,7 +657,7 @@ class IndentedScope BASE_EMBEDDED {
//-----------------------------------------------------------------------------
-AstPrinter::AstPrinter() : indent_(0) {
+AstPrinter::AstPrinter(Isolate* isolate) : PrettyPrinter(isolate), indent_(0) {
}
@@ -678,21 +699,16 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
}
-void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) {
- if (labels != NULL && labels->length() > 0) {
- PrintIndented(info == NULL ? "LABELS" : info);
- Print(" ");
- PrintLabels(labels);
- Print("\n");
- } else if (info != NULL) {
- PrintIndented(info);
- Print("\n");
- }
+void AstPrinter::PrintLabelsIndented(ZoneStringList* labels) {
+ if (labels == NULL || labels->length() == 0) return;
+ PrintIndented("LABELS ");
+ PrintLabels(labels);
+ Print("\n");
}
void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
- IndentedScope indent(this, s, node);
+ IndentedScope indent(this, s);
Visit(node);
}
@@ -745,18 +761,6 @@ void AstPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
}
-void AstPrinter::PrintCaseClause(CaseClause* clause) {
- if (clause->is_default()) {
- IndentedScope indent(this, "DEFAULT");
- PrintStatements(clause->statements());
- } else {
- IndentedScope indent(this, "CASE");
- Visit(clause->label());
- PrintStatements(clause->statements());
- }
-}
-
-
void AstPrinter::VisitBlock(Block* node) {
const char* block_txt = node->is_initializer_block() ? "BLOCK INIT" : "BLOCK";
IndentedScope indent(this, block_txt);
@@ -764,6 +768,7 @@ void AstPrinter::VisitBlock(Block* node) {
}
+// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
node->proxy()->var(),
@@ -771,6 +776,7 @@ void AstPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
}
+// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
PrintIndented("FUNCTION ");
PrintLiteral(node->proxy()->name(), true);
@@ -801,19 +807,21 @@ void AstPrinter::VisitExportDeclaration(ExportDeclaration* node) {
void AstPrinter::VisitModuleLiteral(ModuleLiteral* node) {
+ IndentedScope indent(this, "MODULE LITERAL");
VisitBlock(node->body());
}
void AstPrinter::VisitModuleVariable(ModuleVariable* node) {
+ IndentedScope indent(this, "MODULE VARIABLE");
Visit(node->proxy());
}
void AstPrinter::VisitModulePath(ModulePath* node) {
- IndentedScope indent(this, "PATH");
- PrintIndentedVisit("MODULE", node->module());
- PrintLiteralIndented("NAME", node->name(), false);
+ IndentedScope indent(this, "MODULE PATH");
+ PrintIndentedVisit("MODULE PATH PARENT", node->module());
+ PrintLiteralIndented("NAME", node->name(), true);
}
@@ -822,18 +830,27 @@ void AstPrinter::VisitModuleUrl(ModuleUrl* node) {
}
+void AstPrinter::VisitModuleStatement(ModuleStatement* node) {
+ IndentedScope indent(this, "MODULE STATEMENT");
+ PrintLiteralIndented("NAME", node->proxy()->name(), true);
+ PrintStatements(node->body()->statements());
+}
+
+
void AstPrinter::VisitExpressionStatement(ExpressionStatement* node) {
+ IndentedScope indent(this, "EXPRESSION STATEMENT");
Visit(node->expression());
}
void AstPrinter::VisitEmptyStatement(EmptyStatement* node) {
- PrintIndented("EMPTY\n");
+ IndentedScope indent(this, "EMPTY");
}
void AstPrinter::VisitIfStatement(IfStatement* node) {
- PrintIndentedVisit("IF", node->condition());
+ IndentedScope indent(this, "IF");
+ PrintIndentedVisit("CONDITION", node->condition());
PrintIndentedVisit("THEN", node->then_statement());
if (node->HasElseStatement()) {
PrintIndentedVisit("ELSE", node->else_statement());
@@ -842,17 +859,20 @@ void AstPrinter::VisitIfStatement(IfStatement* node) {
void AstPrinter::VisitContinueStatement(ContinueStatement* node) {
- PrintLabelsIndented("CONTINUE", node->target()->labels());
+ IndentedScope indent(this, "CONTINUE");
+ PrintLabelsIndented(node->target()->labels());
}
void AstPrinter::VisitBreakStatement(BreakStatement* node) {
- PrintLabelsIndented("BREAK", node->target()->labels());
+ IndentedScope indent(this, "BREAK");
+ PrintLabelsIndented(node->target()->labels());
}
void AstPrinter::VisitReturnStatement(ReturnStatement* node) {
- PrintIndentedVisit("RETURN", node->expression());
+ IndentedScope indent(this, "RETURN");
+ Visit(node->expression());
}
@@ -865,17 +885,29 @@ void AstPrinter::VisitWithStatement(WithStatement* node) {
void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
IndentedScope indent(this, "SWITCH");
- PrintLabelsIndented(NULL, node->labels());
+ PrintLabelsIndented(node->labels());
PrintIndentedVisit("TAG", node->tag());
for (int i = 0; i < node->cases()->length(); i++) {
- PrintCaseClause(node->cases()->at(i));
+ Visit(node->cases()->at(i));
+ }
+}
+
+
+void AstPrinter::VisitCaseClause(CaseClause* clause) {
+ if (clause->is_default()) {
+ IndentedScope indent(this, "DEFAULT");
+ PrintStatements(clause->statements());
+ } else {
+ IndentedScope indent(this, "CASE");
+ Visit(clause->label());
+ PrintStatements(clause->statements());
}
}
void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
IndentedScope indent(this, "DO");
- PrintLabelsIndented(NULL, node->labels());
+ PrintLabelsIndented(node->labels());
PrintIndentedVisit("BODY", node->body());
PrintIndentedVisit("COND", node->cond());
}
@@ -883,7 +915,7 @@ void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
void AstPrinter::VisitWhileStatement(WhileStatement* node) {
IndentedScope indent(this, "WHILE");
- PrintLabelsIndented(NULL, node->labels());
+ PrintLabelsIndented(node->labels());
PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
}
@@ -891,7 +923,7 @@ void AstPrinter::VisitWhileStatement(WhileStatement* node) {
void AstPrinter::VisitForStatement(ForStatement* node) {
IndentedScope indent(this, "FOR");
- PrintLabelsIndented(NULL, node->labels());
+ PrintLabelsIndented(node->labels());
if (node->init()) PrintIndentedVisit("INIT", node->init());
if (node->cond()) PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
@@ -907,6 +939,14 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
}
+void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
+ IndentedScope indent(this, "FOR OF");
+ PrintIndentedVisit("FOR", node->each());
+ PrintIndentedVisit("OF", node->iterable());
+ PrintIndentedVisit("BODY", node->body());
+}
+
+
void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
IndentedScope indent(this, "TRY CATCH");
PrintIndentedVisit("TRY", node->try_block());
@@ -941,23 +981,23 @@ void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
}
-void AstPrinter::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- IndentedScope indent(this, "FUNC LITERAL");
- PrintLiteralIndented("SHARED INFO", node->shared_function_info(), true);
+void AstPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
+ IndentedScope indent(this, "NATIVE FUNC LITERAL");
+ PrintLiteralIndented("NAME", node->name(), false);
}
void AstPrinter::VisitConditional(Conditional* node) {
IndentedScope indent(this, "CONDITIONAL");
- PrintIndentedVisit("?", node->condition());
+ PrintIndentedVisit("CONDITION", node->condition());
PrintIndentedVisit("THEN", node->then_expression());
PrintIndentedVisit("ELSE", node->else_expression());
}
+// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitLiteral(Literal* node) {
- PrintLiteralIndented("LITERAL", node->handle(), true);
+ PrintLiteralIndented("LITERAL", node->value(), true);
}
@@ -1012,6 +1052,7 @@ void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
}
+// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitVariableProxy(VariableProxy* node) {
Variable* var = node->var();
EmbeddedVector<char, 128> buf;
@@ -1037,23 +1078,30 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
void AstPrinter::VisitAssignment(Assignment* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
+ IndentedScope indent(this, Token::Name(node->op()));
Visit(node->target());
Visit(node->value());
}
+void AstPrinter::VisitYield(Yield* node) {
+ IndentedScope indent(this, "YIELD");
+ Visit(node->expression());
+}
+
+
void AstPrinter::VisitThrow(Throw* node) {
- PrintIndentedVisit("THROW", node->exception());
+ IndentedScope indent(this, "THROW");
+ Visit(node->exception());
}
void AstPrinter::VisitProperty(Property* node) {
- IndentedScope indent(this, "PROPERTY", node);
+ IndentedScope indent(this, "PROPERTY");
Visit(node->obj());
Literal* literal = node->key()->AsLiteral();
- if (literal != NULL && literal->handle()->IsSymbol()) {
- PrintLiteralIndented("NAME", literal->handle(), false);
+ if (literal != NULL && literal->value()->IsInternalizedString()) {
+ PrintLiteralIndented("NAME", literal->value(), false);
} else {
PrintIndentedVisit("KEY", node->key());
}
@@ -1075,14 +1123,15 @@ void AstPrinter::VisitCallNew(CallNew* node) {
void AstPrinter::VisitCallRuntime(CallRuntime* node) {
- PrintLiteralIndented("CALL RUNTIME ", node->name(), false);
- IndentedScope indent(this);
+ IndentedScope indent(this, "CALL RUNTIME");
+ PrintLiteralIndented("NAME", node->name(), false);
PrintArguments(node->arguments());
}
void AstPrinter::VisitUnaryOperation(UnaryOperation* node) {
- PrintIndentedVisit(Token::Name(node->op()), node->expression());
+ IndentedScope indent(this, Token::Name(node->op()));
+ Visit(node->expression());
}
@@ -1090,19 +1139,20 @@ void AstPrinter::VisitCountOperation(CountOperation* node) {
EmbeddedVector<char, 128> buf;
OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
Token::Name(node->op()));
- PrintIndentedVisit(buf.start(), node->expression());
+ IndentedScope indent(this, buf.start());
+ Visit(node->expression());
}
void AstPrinter::VisitBinaryOperation(BinaryOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
+ IndentedScope indent(this, Token::Name(node->op()));
Visit(node->left());
Visit(node->right());
}
void AstPrinter::VisitCompareOperation(CompareOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
+ IndentedScope indent(this, Token::Name(node->op()));
Visit(node->left());
Visit(node->right());
}
diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h
index 9ac725764..b7ff2af5f 100644
--- a/deps/v8/src/prettyprinter.h
+++ b/deps/v8/src/prettyprinter.h
@@ -38,7 +38,7 @@ namespace internal {
class PrettyPrinter: public AstVisitor {
public:
- PrettyPrinter();
+ explicit PrettyPrinter(Isolate* isolate);
virtual ~PrettyPrinter();
// The following routines print a node into a string.
@@ -50,7 +50,7 @@ class PrettyPrinter: public AstVisitor {
void Print(const char* format, ...);
// Print a node to stdout.
- static void PrintOut(AstNode* node);
+ static void PrintOut(Isolate* isolate, AstNode* node);
// Individual nodes
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
@@ -74,13 +74,15 @@ class PrettyPrinter: public AstVisitor {
void PrintDeclarations(ZoneList<Declaration*>* declarations);
void PrintFunctionLiteral(FunctionLiteral* function);
void PrintCaseClause(CaseClause* clause);
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
};
// Prints the AST structure
class AstPrinter: public PrettyPrinter {
public:
- AstPrinter();
+ explicit AstPrinter(Isolate* isolate);
virtual ~AstPrinter();
const char* PrintProgram(FunctionLiteral* program);
@@ -104,7 +106,7 @@ class AstPrinter: public PrettyPrinter {
void PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value);
- void PrintLabelsIndented(const char* info, ZoneStringList* labels);
+ void PrintLabelsIndented(ZoneStringList* labels);
void inc_indent() { indent_++; }
void dec_indent() { indent_--; }
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index 02e146f14..e363f6776 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -33,30 +33,23 @@
namespace v8 {
namespace internal {
-const char* StringsStorage::GetFunctionName(String* name) {
- return GetFunctionName(GetName(name));
-}
-
-
-const char* StringsStorage::GetFunctionName(const char* name) {
- return strlen(name) > 0 ? name : ProfileGenerator::kAnonymousFunctionName;
-}
-
-
CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
const char* name,
+ const char* name_prefix,
const char* resource_name,
int line_number,
- int security_token_id)
+ int column_number)
: tag_(tag),
+ builtin_id_(Builtins::builtin_count),
name_prefix_(name_prefix),
name_(name),
resource_name_(resource_name),
line_number_(line_number),
+ column_number_(column_number),
shared_id_(0),
- security_token_id_(security_token_id) {
-}
+ script_id_(v8::Script::kNoScriptId),
+ no_frame_ranges_(NULL),
+ bailout_reason_(kEmptyBailoutReason) { }
bool CodeEntry::is_js_function_tag(Logger::LogEventsAndTags tag) {
@@ -72,77 +65,9 @@ bool CodeEntry::is_js_function_tag(Logger::LogEventsAndTags tag) {
ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
: tree_(tree),
entry_(entry),
- total_ticks_(0),
self_ticks_(0),
- children_(CodeEntriesMatch) {
-}
-
-
-CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
- switch (tag) {
- case GC:
- return gc_entry_;
- case JS:
- case COMPILER:
- case PARALLEL_COMPILER_PROLOGUE:
- // DOM events handlers are reported as OTHER / EXTERNAL entries.
- // To avoid confusing people, let's put all these entries into
- // one bucket.
- case OTHER:
- case EXTERNAL:
- return program_entry_;
- default: return NULL;
- }
-}
-
-
-HeapEntry* HeapGraphEdge::from() const {
- return &snapshot()->entries()[from_index_];
-}
-
-
-HeapSnapshot* HeapGraphEdge::snapshot() const {
- return to_entry_->snapshot();
-}
-
-
-int HeapEntry::index() const {
- return static_cast<int>(this - &snapshot_->entries().first());
-}
-
-
-int HeapEntry::set_children_index(int index) {
- children_index_ = index;
- int next_index = index + children_count_;
- children_count_ = 0;
- return next_index;
-}
-
-
-HeapGraphEdge** HeapEntry::children_arr() {
- ASSERT(children_index_ >= 0);
- return &snapshot_->children()[children_index_];
-}
-
-
-SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) {
- return kGcRootsFirstSubrootId + delta * kObjectIdStep;
-}
-
-
-HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) {
- return reinterpret_cast<HeapObject*>(
- reinterpret_cast<char*>(kFirstGcSubrootObject) +
- delta * HeapObjectsMap::kObjectIdStep);
-}
-
-
-int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
- return static_cast<int>(
- (reinterpret_cast<char*>(subroot) -
- reinterpret_cast<char*>(kFirstGcSubrootObject)) /
- HeapObjectsMap::kObjectIdStep);
-}
+ children_(CodeEntriesMatch),
+ id_(tree->next_node_id()) { }
} } // namespace v8::internal
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index b853f33cb..acf54da1c 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -29,71 +29,26 @@
#include "profile-generator-inl.h"
+#include "compiler.h"
+#include "debug.h"
+#include "sampler.h"
#include "global-handles.h"
-#include "heap-profiler.h"
#include "scopeinfo.h"
#include "unicode.h"
#include "zone-inl.h"
-#include "debug.h"
namespace v8 {
namespace internal {
-TokenEnumerator::TokenEnumerator()
- : token_locations_(4),
- token_removed_(4) {
-}
-
-
-TokenEnumerator::~TokenEnumerator() {
- Isolate* isolate = Isolate::Current();
- for (int i = 0; i < token_locations_.length(); ++i) {
- if (!token_removed_[i]) {
- isolate->global_handles()->ClearWeakness(token_locations_[i]);
- isolate->global_handles()->Destroy(token_locations_[i]);
- }
- }
-}
-
-
-int TokenEnumerator::GetTokenId(Object* token) {
- Isolate* isolate = Isolate::Current();
- if (token == NULL) return TokenEnumerator::kNoSecurityToken;
- for (int i = 0; i < token_locations_.length(); ++i) {
- if (*token_locations_[i] == token && !token_removed_[i]) return i;
- }
- Handle<Object> handle = isolate->global_handles()->Create(token);
- // handle.location() points to a memory cell holding a pointer
- // to a token object in the V8's heap.
- isolate->global_handles()->MakeWeak(handle.location(), this,
- TokenRemovedCallback);
- token_locations_.Add(handle.location());
- token_removed_.Add(false);
- return token_locations_.length() - 1;
-}
-
-
-void TokenEnumerator::TokenRemovedCallback(v8::Persistent<v8::Value> handle,
- void* parameter) {
- reinterpret_cast<TokenEnumerator*>(parameter)->TokenRemoved(
- Utils::OpenHandle(*handle).location());
- handle.Dispose();
-}
-
-
-void TokenEnumerator::TokenRemoved(Object** token_location) {
- for (int i = 0; i < token_locations_.length(); ++i) {
- if (token_locations_[i] == token_location && !token_removed_[i]) {
- token_removed_[i] = true;
- return;
- }
- }
+bool StringsStorage::StringsMatch(void* key1, void* key2) {
+ return strcmp(reinterpret_cast<char*>(key1),
+ reinterpret_cast<char*>(key2)) == 0;
}
-StringsStorage::StringsStorage()
- : names_(StringsMatch) {
+StringsStorage::StringsStorage(Heap* heap)
+ : hash_seed_(heap->HashSeed()), names_(StringsMatch) {
}
@@ -108,12 +63,15 @@ StringsStorage::~StringsStorage() {
const char* StringsStorage::GetCopy(const char* src) {
int len = static_cast<int>(strlen(src));
- Vector<char> dst = Vector<char>::New(len + 1);
- OS::StrNCpy(dst, src, len);
- dst[len] = '\0';
- uint32_t hash =
- HashSequentialString(dst.start(), len, HEAP->HashSeed());
- return AddOrDisposeString(dst.start(), hash);
+ HashMap::Entry* entry = GetEntry(src, len);
+ if (entry->value == NULL) {
+ Vector<char> dst = Vector<char>::New(len + 1);
+ OS::StrNCpy(dst, src, len);
+ dst[len] = '\0';
+ entry->key = dst.start();
+ entry->value = entry->key;
+ }
+ return reinterpret_cast<const char*>(entry->value);
}
@@ -126,15 +84,16 @@ const char* StringsStorage::GetFormatted(const char* format, ...) {
}
-const char* StringsStorage::AddOrDisposeString(char* str, uint32_t hash) {
- HashMap::Entry* cache_entry = names_.Lookup(str, hash, true);
- if (cache_entry->value == NULL) {
+const char* StringsStorage::AddOrDisposeString(char* str, int len) {
+ HashMap::Entry* entry = GetEntry(str, len);
+ if (entry->value == NULL) {
// New entry added.
- cache_entry->value = str;
+ entry->key = str;
+ entry->value = str;
} else {
DeleteArray(str);
}
- return reinterpret_cast<const char*>(cache_entry->value);
+ return reinterpret_cast<const char*>(entry->value);
}
@@ -143,22 +102,23 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
int len = OS::VSNPrintF(str, format, args);
if (len == -1) {
DeleteArray(str.start());
- return format;
+ return GetCopy(format);
}
- uint32_t hash = HashSequentialString(
- str.start(), len, HEAP->HashSeed());
- return AddOrDisposeString(str.start(), hash);
+ return AddOrDisposeString(str.start(), len);
}
-const char* StringsStorage::GetName(String* name) {
+const char* StringsStorage::GetName(Name* name) {
if (name->IsString()) {
- int length = Min(kMaxNameSize, name->length());
+ String* str = String::cast(name);
+ int length = Min(kMaxNameSize, str->length());
+ int actual_length = 0;
SmartArrayPointer<char> data =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length);
- uint32_t hash =
- HashSequentialString(*data, length, name->GetHeap()->HashSeed());
- return AddOrDisposeString(data.Detach(), hash);
+ str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length,
+ &actual_length);
+ return AddOrDisposeString(data.Detach(), actual_length);
+ } else if (name->IsSymbol()) {
+ return "<symbol>";
}
return "";
}
@@ -169,6 +129,21 @@ const char* StringsStorage::GetName(int index) {
}
+const char* StringsStorage::GetFunctionName(Name* name) {
+ return BeautifyFunctionName(GetName(name));
+}
+
+
+const char* StringsStorage::GetFunctionName(const char* name) {
+ return BeautifyFunctionName(GetCopy(name));
+}
+
+
+const char* StringsStorage::BeautifyFunctionName(const char* name) {
+ return (*name == 0) ? ProfileGenerator::kAnonymousFunctionName : name;
+}
+
+
size_t StringsStorage::GetUsedMemorySize() const {
size_t size = sizeof(*this);
size += sizeof(HashMap::Entry) * names_.capacity();
@@ -178,15 +153,20 @@ size_t StringsStorage::GetUsedMemorySize() const {
return size;
}
+
+HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
+ uint32_t hash = StringHasher::HashSequentialString(str, len, hash_seed_);
+ return names_.Lookup(const_cast<char*>(str), hash, true);
+}
+
+
const char* const CodeEntry::kEmptyNamePrefix = "";
+const char* const CodeEntry::kEmptyResourceName = "";
+const char* const CodeEntry::kEmptyBailoutReason = "";
-void CodeEntry::CopyData(const CodeEntry& source) {
- tag_ = source.tag_;
- name_prefix_ = source.name_prefix_;
- name_ = source.name_;
- resource_name_ = source.resource_name_;
- line_number_ = source.line_number_;
+CodeEntry::~CodeEntry() {
+ delete no_frame_ranges_;
}
@@ -223,6 +203,12 @@ bool CodeEntry::IsSameAs(CodeEntry* entry) const {
}
+void CodeEntry::SetBuiltinId(Builtins::Name id) {
+ tag_ = Logger::BUILTIN_TAG;
+ builtin_id_ = id;
+}
+
+
ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
HashMap::Entry* map_entry =
children_.Lookup(entry, CodeEntryHash(entry), false);
@@ -244,23 +230,15 @@ ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
}
-double ProfileNode::GetSelfMillis() const {
- return tree_->TicksToMillis(self_ticks_);
-}
-
-
-double ProfileNode::GetTotalMillis() const {
- return tree_->TicksToMillis(total_ticks_);
-}
-
-
void ProfileNode::Print(int indent) {
- OS::Print("%5u %5u %*c %s%s [%d]",
- total_ticks_, self_ticks_,
+ OS::Print("%5u %*c %s%s %d #%d %s",
+ self_ticks_,
indent, ' ',
entry_->name_prefix(),
entry_->name(),
- entry_->security_token_id());
+ entry_->script_id(),
+ id(),
+ entry_->bailout_reason());
if (entry_->resource_name()[0] != '\0')
OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
OS::Print("\n");
@@ -285,12 +263,8 @@ class DeleteNodesCallback {
ProfileTree::ProfileTree()
- : root_entry_(Logger::FUNCTION_TAG,
- "",
- "(root)",
- "",
- 0,
- TokenEnumerator::kNoSecurityToken),
+ : root_entry_(Logger::FUNCTION_TAG, "(root)"),
+ next_node_id_(1),
root_(new ProfileNode(this, &root_entry_)) {
}
@@ -301,7 +275,7 @@ ProfileTree::~ProfileTree() {
}
-void ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path) {
+ProfileNode* ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path) {
ProfileNode* node = root_;
for (CodeEntry** entry = path.start() + path.length() - 1;
entry != path.start() - 1;
@@ -311,6 +285,7 @@ void ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path) {
}
}
node->IncrementSelfTicks();
+ return node;
}
@@ -335,63 +310,6 @@ struct NodesPair {
};
-class FilteredCloneCallback {
- public:
- FilteredCloneCallback(ProfileNode* dst_root, int security_token_id)
- : stack_(10),
- security_token_id_(security_token_id) {
- stack_.Add(NodesPair(NULL, dst_root));
- }
-
- void BeforeTraversingChild(ProfileNode* parent, ProfileNode* child) {
- if (IsTokenAcceptable(child->entry()->security_token_id(),
- parent->entry()->security_token_id())) {
- ProfileNode* clone = stack_.last().dst->FindOrAddChild(child->entry());
- clone->IncreaseSelfTicks(child->self_ticks());
- stack_.Add(NodesPair(child, clone));
- } else {
- // Attribute ticks to parent node.
- stack_.last().dst->IncreaseSelfTicks(child->self_ticks());
- }
- }
-
- void AfterAllChildrenTraversed(ProfileNode* parent) { }
-
- void AfterChildTraversed(ProfileNode*, ProfileNode* child) {
- if (stack_.last().src == child) {
- stack_.RemoveLast();
- }
- }
-
- private:
- bool IsTokenAcceptable(int token, int parent_token) {
- if (token == TokenEnumerator::kNoSecurityToken
- || token == security_token_id_) return true;
- if (token == TokenEnumerator::kInheritsSecurityToken) {
- ASSERT(parent_token != TokenEnumerator::kInheritsSecurityToken);
- return parent_token == TokenEnumerator::kNoSecurityToken
- || parent_token == security_token_id_;
- }
- return false;
- }
-
- List<NodesPair> stack_;
- int security_token_id_;
-};
-
-void ProfileTree::FilteredClone(ProfileTree* src, int security_token_id) {
- ms_to_ticks_scale_ = src->ms_to_ticks_scale_;
- FilteredCloneCallback cb(root_, security_token_id);
- src->TraverseDepthFirst(&cb);
- CalculateTotalTicks();
-}
-
-
-void ProfileTree::SetTickRatePerMs(double ticks_per_ms) {
- ms_to_ticks_scale_ = ticks_per_ms > 0 ? 1.0 / ticks_per_ms : 1.0;
-}
-
-
class Position {
public:
explicit Position(ProfileNode* node)
@@ -434,73 +352,29 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
-class CalculateTotalTicksCallback {
- public:
- void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
-
- void AfterAllChildrenTraversed(ProfileNode* node) {
- node->IncreaseTotalTicks(node->self_ticks());
- }
-
- void AfterChildTraversed(ProfileNode* parent, ProfileNode* child) {
- parent->IncreaseTotalTicks(child->total_ticks());
- }
-};
-
-
-void ProfileTree::CalculateTotalTicks() {
- CalculateTotalTicksCallback cb;
- TraverseDepthFirst(&cb);
-}
-
-
-void ProfileTree::ShortPrint() {
- OS::Print("root: %u %u %.2fms %.2fms\n",
- root_->total_ticks(), root_->self_ticks(),
- root_->GetTotalMillis(), root_->GetSelfMillis());
+CpuProfile::CpuProfile(const char* title, unsigned uid, bool record_samples)
+ : title_(title),
+ uid_(uid),
+ record_samples_(record_samples),
+ start_time_(Time::NowFromSystemTime()) {
+ timer_.Start();
}
void CpuProfile::AddPath(const Vector<CodeEntry*>& path) {
- top_down_.AddPathFromEnd(path);
- bottom_up_.AddPathFromStart(path);
+ ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path);
+ if (record_samples_) samples_.Add(top_frame_node);
}
-void CpuProfile::CalculateTotalTicks() {
- top_down_.CalculateTotalTicks();
- bottom_up_.CalculateTotalTicks();
-}
-
-
-void CpuProfile::SetActualSamplingRate(double actual_sampling_rate) {
- top_down_.SetTickRatePerMs(actual_sampling_rate);
- bottom_up_.SetTickRatePerMs(actual_sampling_rate);
-}
-
-
-CpuProfile* CpuProfile::FilteredClone(int security_token_id) {
- ASSERT(security_token_id != TokenEnumerator::kNoSecurityToken);
- CpuProfile* clone = new CpuProfile(title_, uid_);
- clone->top_down_.FilteredClone(&top_down_, security_token_id);
- clone->bottom_up_.FilteredClone(&bottom_up_, security_token_id);
- return clone;
-}
-
-
-void CpuProfile::ShortPrint() {
- OS::Print("top down ");
- top_down_.ShortPrint();
- OS::Print("bottom up ");
- bottom_up_.ShortPrint();
+void CpuProfile::CalculateTotalTicksAndSamplingRate() {
+ end_time_ = start_time_ + timer_.Elapsed();
}
void CpuProfile::Print() {
OS::Print("[Top down]:\n");
top_down_.Print();
- OS::Print("[Bottom up]:\n");
- bottom_up_.Print();
}
@@ -530,13 +404,17 @@ void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
}
-CodeEntry* CodeMap::FindEntry(Address addr) {
+CodeEntry* CodeMap::FindEntry(Address addr, Address* start) {
CodeTree::Locator locator;
if (tree_.FindGreatestLessThan(addr, &locator)) {
// locator.key() <= addr. Need to check that addr is within entry.
const CodeEntryInfo& entry = locator.value();
- if (addr < (locator.key() + entry.size))
+ if (addr < (locator.key() + entry.size)) {
+ if (start) {
+ *start = locator.key();
+ }
return entry.entry;
+ }
}
return NULL;
}
@@ -570,7 +448,12 @@ void CodeMap::MoveCode(Address from, Address to) {
void CodeMap::CodeTreePrinter::Call(
const Address& key, const CodeMap::CodeEntryInfo& value) {
- OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
+ // For shared function entries, 'size' field is used to store their IDs.
+ if (value.entry == kSharedFunctionCodeEntry) {
+ OS::Print("%p SharedFunctionInfo %d\n", key, value.size);
+ } else {
+ OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
+ }
}
@@ -580,11 +463,9 @@ void CodeMap::Print() {
}
-CpuProfilesCollection::CpuProfilesCollection()
- : profiles_uids_(UidsMatch),
- current_profiles_semaphore_(OS::CreateSemaphore(1)) {
- // Create list of unabridged profiles.
- profiles_by_token_.Add(new List<CpuProfile*>());
+CpuProfilesCollection::CpuProfilesCollection(Heap* heap)
+ : function_and_resource_names_(heap),
+ current_profiles_semaphore_(1) {
}
@@ -592,107 +473,56 @@ static void DeleteCodeEntry(CodeEntry** entry_ptr) {
delete *entry_ptr;
}
+
static void DeleteCpuProfile(CpuProfile** profile_ptr) {
delete *profile_ptr;
}
-static void DeleteProfilesList(List<CpuProfile*>** list_ptr) {
- if (*list_ptr != NULL) {
- (*list_ptr)->Iterate(DeleteCpuProfile);
- delete *list_ptr;
- }
-}
CpuProfilesCollection::~CpuProfilesCollection() {
- delete current_profiles_semaphore_;
+ finished_profiles_.Iterate(DeleteCpuProfile);
current_profiles_.Iterate(DeleteCpuProfile);
- detached_profiles_.Iterate(DeleteCpuProfile);
- profiles_by_token_.Iterate(DeleteProfilesList);
code_entries_.Iterate(DeleteCodeEntry);
}
-bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid) {
+bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid,
+ bool record_samples) {
ASSERT(uid > 0);
- current_profiles_semaphore_->Wait();
+ current_profiles_semaphore_.Wait();
if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
- current_profiles_semaphore_->Signal();
+ current_profiles_semaphore_.Signal();
return false;
}
for (int i = 0; i < current_profiles_.length(); ++i) {
if (strcmp(current_profiles_[i]->title(), title) == 0) {
// Ignore attempts to start profile with the same title.
- current_profiles_semaphore_->Signal();
+ current_profiles_semaphore_.Signal();
return false;
}
}
- current_profiles_.Add(new CpuProfile(title, uid));
- current_profiles_semaphore_->Signal();
+ current_profiles_.Add(new CpuProfile(title, uid, record_samples));
+ current_profiles_semaphore_.Signal();
return true;
}
-bool CpuProfilesCollection::StartProfiling(String* title, unsigned uid) {
- return StartProfiling(GetName(title), uid);
-}
-
-
-CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
- const char* title,
- double actual_sampling_rate) {
+CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
const int title_len = StrLength(title);
CpuProfile* profile = NULL;
- current_profiles_semaphore_->Wait();
+ current_profiles_semaphore_.Wait();
for (int i = current_profiles_.length() - 1; i >= 0; --i) {
if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
profile = current_profiles_.Remove(i);
break;
}
}
- current_profiles_semaphore_->Signal();
-
- if (profile != NULL) {
- profile->CalculateTotalTicks();
- profile->SetActualSamplingRate(actual_sampling_rate);
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- unabridged_list->Add(profile);
- HashMap::Entry* entry =
- profiles_uids_.Lookup(reinterpret_cast<void*>(profile->uid()),
- static_cast<uint32_t>(profile->uid()),
- true);
- ASSERT(entry->value == NULL);
- entry->value = reinterpret_cast<void*>(unabridged_list->length() - 1);
- return GetProfile(security_token_id, profile->uid());
- }
- return NULL;
-}
+ current_profiles_semaphore_.Signal();
-
-CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
- unsigned uid) {
- int index = GetProfileIndex(uid);
- if (index < 0) return NULL;
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- if (security_token_id == TokenEnumerator::kNoSecurityToken) {
- return unabridged_list->at(index);
- }
- List<CpuProfile*>* list = GetProfilesList(security_token_id);
- if (list->at(index) == NULL) {
- (*list)[index] =
- unabridged_list->at(index)->FilteredClone(security_token_id);
- }
- return list->at(index);
-}
-
-
-int CpuProfilesCollection::GetProfileIndex(unsigned uid) {
- HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid),
- false);
- return entry != NULL ?
- static_cast<int>(reinterpret_cast<intptr_t>(entry->value)) : -1;
+ if (profile == NULL) return NULL;
+ profile->CalculateTotalTicksAndSamplingRate();
+ finished_profiles_.Add(profile);
+ return profile;
}
@@ -708,129 +538,13 @@ bool CpuProfilesCollection::IsLastProfile(const char* title) {
void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
// Called from VM thread for a completed profile.
unsigned uid = profile->uid();
- int index = GetProfileIndex(uid);
- if (index < 0) {
- detached_profiles_.RemoveElement(profile);
- return;
- }
- profiles_uids_.Remove(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid));
- // Decrement all indexes above the deleted one.
- for (HashMap::Entry* p = profiles_uids_.Start();
- p != NULL;
- p = profiles_uids_.Next(p)) {
- intptr_t p_index = reinterpret_cast<intptr_t>(p->value);
- if (p_index > index) {
- p->value = reinterpret_cast<void*>(p_index - 1);
- }
- }
- for (int i = 0; i < profiles_by_token_.length(); ++i) {
- List<CpuProfile*>* list = profiles_by_token_[i];
- if (list != NULL && index < list->length()) {
- // Move all filtered clones into detached_profiles_,
- // so we can know that they are still in use.
- CpuProfile* cloned_profile = list->Remove(index);
- if (cloned_profile != NULL && cloned_profile != profile) {
- detached_profiles_.Add(cloned_profile);
- }
- }
- }
-}
-
-
-int CpuProfilesCollection::TokenToIndex(int security_token_id) {
- ASSERT(TokenEnumerator::kNoSecurityToken == -1);
- return security_token_id + 1; // kNoSecurityToken -> 0, 0 -> 1, ...
-}
-
-
-List<CpuProfile*>* CpuProfilesCollection::GetProfilesList(
- int security_token_id) {
- const int index = TokenToIndex(security_token_id);
- const int lists_to_add = index - profiles_by_token_.length() + 1;
- if (lists_to_add > 0) profiles_by_token_.AddBlock(NULL, lists_to_add);
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- const int current_count = unabridged_list->length();
- if (profiles_by_token_[index] == NULL) {
- profiles_by_token_[index] = new List<CpuProfile*>(current_count);
- }
- List<CpuProfile*>* list = profiles_by_token_[index];
- const int profiles_to_add = current_count - list->length();
- if (profiles_to_add > 0) list->AddBlock(NULL, profiles_to_add);
- return list;
-}
-
-
-List<CpuProfile*>* CpuProfilesCollection::Profiles(int security_token_id) {
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- if (security_token_id == TokenEnumerator::kNoSecurityToken) {
- return unabridged_list;
- }
- List<CpuProfile*>* list = GetProfilesList(security_token_id);
- const int current_count = unabridged_list->length();
- for (int i = 0; i < current_count; ++i) {
- if (list->at(i) == NULL) {
- (*list)[i] = unabridged_list->at(i)->FilteredClone(security_token_id);
+ for (int i = 0; i < finished_profiles_.length(); i++) {
+ if (uid == finished_profiles_[i]->uid()) {
+ finished_profiles_.Remove(i);
+ return;
}
}
- return list;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- String* name,
- String* resource_name,
- int line_number) {
- CodeEntry* entry = new CodeEntry(tag,
- CodeEntry::kEmptyNamePrefix,
- GetFunctionName(name),
- GetName(resource_name),
- line_number,
- TokenEnumerator::kNoSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name) {
- CodeEntry* entry = new CodeEntry(tag,
- CodeEntry::kEmptyNamePrefix,
- GetFunctionName(name),
- "",
- v8::CpuProfileNode::kNoLineNumberInfo,
- TokenEnumerator::kNoSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
- String* name) {
- CodeEntry* entry = new CodeEntry(tag,
- name_prefix,
- GetName(name),
- "",
- v8::CpuProfileNode::kNoLineNumberInfo,
- TokenEnumerator::kInheritsSecurityToken);
- code_entries_.Add(entry);
- return entry;
-}
-
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
- int args_count) {
- CodeEntry* entry = new CodeEntry(tag,
- "args_count: ",
- GetName(args_count),
- "",
- v8::CpuProfileNode::kNoLineNumberInfo,
- TokenEnumerator::kInheritsSecurityToken);
- code_entries_.Add(entry);
- return entry;
+ UNREACHABLE();
}
@@ -839,34 +553,29 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
- current_profiles_semaphore_->Wait();
+ current_profiles_semaphore_.Wait();
for (int i = 0; i < current_profiles_.length(); ++i) {
current_profiles_[i]->AddPath(path);
}
- current_profiles_semaphore_->Signal();
-}
-
-
-void SampleRateCalculator::Tick() {
- if (--wall_time_query_countdown_ == 0)
- UpdateMeasurements(OS::TimeCurrentMillis());
+ current_profiles_semaphore_.Signal();
}
-void SampleRateCalculator::UpdateMeasurements(double current_time) {
- if (measurements_count_++ != 0) {
- const double measured_ticks_per_ms =
- (kWallTimeQueryIntervalMs * ticks_per_ms_) /
- (current_time - last_wall_time_);
- // Update the average value.
- ticks_per_ms_ +=
- (measured_ticks_per_ms - ticks_per_ms_) / measurements_count_;
- // Update the externally accessible result.
- result_ = static_cast<AtomicWord>(ticks_per_ms_ * kResultScale);
- }
- last_wall_time_ = current_time;
- wall_time_query_countdown_ =
- static_cast<unsigned>(kWallTimeQueryIntervalMs * ticks_per_ms_);
+CodeEntry* CpuProfilesCollection::NewCodeEntry(
+ Logger::LogEventsAndTags tag,
+ const char* name,
+ const char* name_prefix,
+ const char* resource_name,
+ int line_number,
+ int column_number) {
+ CodeEntry* code_entry = new CodeEntry(tag,
+ name,
+ name_prefix,
+ resource_name,
+ line_number,
+ column_number);
+ code_entries_.Add(code_entry);
+ return code_entry;
}
@@ -874,17 +583,26 @@ const char* const ProfileGenerator::kAnonymousFunctionName =
"(anonymous function)";
const char* const ProfileGenerator::kProgramEntryName =
"(program)";
+const char* const ProfileGenerator::kIdleEntryName =
+ "(idle)";
const char* const ProfileGenerator::kGarbageCollectorEntryName =
"(garbage collector)";
+const char* const ProfileGenerator::kUnresolvedFunctionName =
+ "(unresolved function)";
ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
: profiles_(profiles),
program_entry_(
profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
+ idle_entry_(
+ profiles->NewCodeEntry(Logger::FUNCTION_TAG, kIdleEntryName)),
gc_entry_(
profiles->NewCodeEntry(Logger::BUILTIN_TAG,
- kGarbageCollectorEntryName)) {
+ kGarbageCollectorEntryName)),
+ unresolved_entry_(
+ profiles->NewCodeEntry(Logger::FUNCTION_TAG,
+ kUnresolvedFunctionName)) {
}
@@ -896,22 +614,46 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
CodeEntry** entry = entries.start();
memset(entry, 0, entries.length() * sizeof(*entry));
if (sample.pc != NULL) {
- *entry++ = code_map_.FindEntry(sample.pc);
-
- if (sample.has_external_callback) {
+ if (sample.has_external_callback && sample.state == EXTERNAL &&
+ sample.top_frame_type == StackFrame::EXIT) {
// Don't use PC when in external callback code, as it can point
// inside callback's code, and we will erroneously report
// that a callback calls itself.
- *(entries.start()) = NULL;
*entry++ = code_map_.FindEntry(sample.external_callback);
- } else if (sample.tos != NULL) {
- // Find out, if top of stack was pointing inside a JS function
- // meaning that we have encountered a frameless invocation.
- *entry = code_map_.FindEntry(sample.tos);
- if (*entry != NULL && !(*entry)->is_js_function()) {
- *entry = NULL;
+ } else {
+ Address start;
+ CodeEntry* pc_entry = code_map_.FindEntry(sample.pc, &start);
+ // If pc is in the function code before it set up stack frame or after the
+ // frame was destroyed SafeStackFrameIterator incorrectly thinks that
+ // ebp contains return address of the current function and skips caller's
+ // frame. Check for this case and just skip such samples.
+ if (pc_entry) {
+ List<OffsetRange>* ranges = pc_entry->no_frame_ranges();
+ if (ranges) {
+ Code* code = Code::cast(HeapObject::FromAddress(start));
+ int pc_offset = static_cast<int>(
+ sample.pc - code->instruction_start());
+ for (int i = 0; i < ranges->length(); i++) {
+ OffsetRange& range = ranges->at(i);
+ if (range.from <= pc_offset && pc_offset < range.to) {
+ return;
+ }
+ }
+ }
+ *entry++ = pc_entry;
+
+ if (pc_entry->builtin_id() == Builtins::kFunctionCall ||
+ pc_entry->builtin_id() == Builtins::kFunctionApply) {
+ // When current function is FunctionCall or FunctionApply builtin the
+ // top frame is either frame of the calling JS function or internal
+ // frame. In the latter case we know the caller for sure but in the
+ // former case we don't so we simply replace the frame with
+ // 'unresolved' entry.
+ if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
+ *entry++ = unresolved_entry_;
+ }
+ }
}
- entry++;
}
for (const Address* stack_pos = sample.stack,
@@ -940,2649 +682,22 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
}
-HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to)
- : type_(type),
- from_index_(from),
- to_index_(to),
- name_(name) {
- ASSERT(type == kContextVariable
- || type == kProperty
- || type == kInternal
- || type == kShortcut);
-}
-
-
-HeapGraphEdge::HeapGraphEdge(Type type, int index, int from, int to)
- : type_(type),
- from_index_(from),
- to_index_(to),
- index_(index) {
- ASSERT(type == kElement || type == kHidden || type == kWeak);
-}
-
-
-void HeapGraphEdge::ReplaceToIndexWithEntry(HeapSnapshot* snapshot) {
- to_entry_ = &snapshot->entries()[to_index_];
-}
-
-
-const int HeapEntry::kNoEntry = -1;
-
-HeapEntry::HeapEntry(HeapSnapshot* snapshot,
- Type type,
- const char* name,
- SnapshotObjectId id,
- int self_size)
- : type_(type),
- children_count_(0),
- children_index_(-1),
- self_size_(self_size),
- id_(id),
- snapshot_(snapshot),
- name_(name) { }
-
-
-void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
- const char* name,
- HeapEntry* entry) {
- HeapGraphEdge edge(type, name, this->index(), entry->index());
- snapshot_->edges().Add(edge);
- ++children_count_;
-}
-
-
-void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
- int index,
- HeapEntry* entry) {
- HeapGraphEdge edge(type, index, this->index(), entry->index());
- snapshot_->edges().Add(edge);
- ++children_count_;
-}
-
-
-Handle<HeapObject> HeapEntry::GetHeapObject() {
- return snapshot_->collection()->FindHeapObjectById(id());
-}
-
-
-void HeapEntry::Print(
- const char* prefix, const char* edge_name, int max_depth, int indent) {
- STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
- OS::Print("%6d @%6u %*c %s%s: ",
- self_size(), id(), indent, ' ', prefix, edge_name);
- if (type() != kString) {
- OS::Print("%s %.40s\n", TypeAsString(), name_);
- } else {
- OS::Print("\"");
- const char* c = name_;
- while (*c && (c - name_) <= 40) {
- if (*c != '\n')
- OS::Print("%c", *c);
- else
- OS::Print("\\n");
- ++c;
- }
- OS::Print("\"\n");
- }
- if (--max_depth == 0) return;
- Vector<HeapGraphEdge*> ch = children();
- for (int i = 0; i < ch.length(); ++i) {
- HeapGraphEdge& edge = *ch[i];
- const char* edge_prefix = "";
- EmbeddedVector<char, 64> index;
- const char* edge_name = index.start();
- switch (edge.type()) {
- case HeapGraphEdge::kContextVariable:
- edge_prefix = "#";
- edge_name = edge.name();
- break;
- case HeapGraphEdge::kElement:
- OS::SNPrintF(index, "%d", edge.index());
- break;
- case HeapGraphEdge::kInternal:
- edge_prefix = "$";
- edge_name = edge.name();
- break;
- case HeapGraphEdge::kProperty:
- edge_name = edge.name();
- break;
- case HeapGraphEdge::kHidden:
- edge_prefix = "$";
- OS::SNPrintF(index, "%d", edge.index());
- break;
- case HeapGraphEdge::kShortcut:
- edge_prefix = "^";
- edge_name = edge.name();
- break;
- case HeapGraphEdge::kWeak:
- edge_prefix = "w";
- OS::SNPrintF(index, "%d", edge.index());
- break;
- default:
- OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type());
- }
- edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2);
- }
-}
-
-
-const char* HeapEntry::TypeAsString() {
- switch (type()) {
- case kHidden: return "/hidden/";
- case kObject: return "/object/";
- case kClosure: return "/closure/";
- case kString: return "/string/";
- case kCode: return "/code/";
- case kArray: return "/array/";
- case kRegExp: return "/regexp/";
- case kHeapNumber: return "/number/";
- case kNative: return "/native/";
- case kSynthetic: return "/synthetic/";
- default: return "???";
- }
-}
-
-
-// It is very important to keep objects that form a heap snapshot
-// as small as possible.
-namespace { // Avoid littering the global namespace.
-
-template <size_t ptr_size> struct SnapshotSizeConstants;
-
-template <> struct SnapshotSizeConstants<4> {
- static const int kExpectedHeapGraphEdgeSize = 12;
- static const int kExpectedHeapEntrySize = 24;
- static const int kExpectedHeapSnapshotsCollectionSize = 96;
- static const int kExpectedHeapSnapshotSize = 136;
- static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
-};
-
-template <> struct SnapshotSizeConstants<8> {
- static const int kExpectedHeapGraphEdgeSize = 24;
- static const int kExpectedHeapEntrySize = 32;
- static const int kExpectedHeapSnapshotsCollectionSize = 144;
- static const int kExpectedHeapSnapshotSize = 168;
- static const uint64_t kMaxSerializableSnapshotRawSize =
- static_cast<uint64_t>(6000) * MB;
-};
-
-} // namespace
-
-HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
- HeapSnapshot::Type type,
- const char* title,
- unsigned uid)
- : collection_(collection),
- type_(type),
- title_(title),
- uid_(uid),
- root_index_(HeapEntry::kNoEntry),
- gc_roots_index_(HeapEntry::kNoEntry),
- natives_root_index_(HeapEntry::kNoEntry),
- max_snapshot_js_object_id_(0) {
- STATIC_CHECK(
- sizeof(HeapGraphEdge) ==
- SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize);
- STATIC_CHECK(
- sizeof(HeapEntry) ==
- SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
- for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
- gc_subroot_indexes_[i] = HeapEntry::kNoEntry;
- }
-}
-
-
-void HeapSnapshot::Delete() {
- collection_->RemoveSnapshot(this);
- delete this;
-}
-
-
-void HeapSnapshot::RememberLastJSObjectId() {
- max_snapshot_js_object_id_ = collection_->last_assigned_id();
-}
-
-
-HeapEntry* HeapSnapshot::AddRootEntry() {
- ASSERT(root_index_ == HeapEntry::kNoEntry);
- ASSERT(entries_.is_empty()); // Root entry must be the first one.
- HeapEntry* entry = AddEntry(HeapEntry::kObject,
- "",
- HeapObjectsMap::kInternalRootObjectId,
- 0);
- root_index_ = entry->index();
- ASSERT(root_index_ == 0);
- return entry;
-}
-
-
-HeapEntry* HeapSnapshot::AddGcRootsEntry() {
- ASSERT(gc_roots_index_ == HeapEntry::kNoEntry);
- HeapEntry* entry = AddEntry(HeapEntry::kObject,
- "(GC roots)",
- HeapObjectsMap::kGcRootsObjectId,
- 0);
- gc_roots_index_ = entry->index();
- return entry;
-}
-
-
-HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
- ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry);
- ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
- HeapEntry* entry = AddEntry(
- HeapEntry::kObject,
- VisitorSynchronization::kTagNames[tag],
- HeapObjectsMap::GetNthGcSubrootId(tag),
- 0);
- gc_subroot_indexes_[tag] = entry->index();
- return entry;
-}
-
-
-HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
- const char* name,
- SnapshotObjectId id,
- int size) {
- HeapEntry entry(this, type, name, id, size);
- entries_.Add(entry);
- return &entries_.last();
-}
-
-
-void HeapSnapshot::FillChildren() {
- ASSERT(children().is_empty());
- children().Allocate(edges().length());
- int children_index = 0;
- for (int i = 0; i < entries().length(); ++i) {
- HeapEntry* entry = &entries()[i];
- children_index = entry->set_children_index(children_index);
- }
- ASSERT(edges().length() == children_index);
- for (int i = 0; i < edges().length(); ++i) {
- HeapGraphEdge* edge = &edges()[i];
- edge->ReplaceToIndexWithEntry(this);
- edge->from()->add_child(edge);
- }
-}
-
-
-class FindEntryById {
- public:
- explicit FindEntryById(SnapshotObjectId id) : id_(id) { }
- int operator()(HeapEntry* const* entry) {
- if ((*entry)->id() == id_) return 0;
- return (*entry)->id() < id_ ? -1 : 1;
- }
- private:
- SnapshotObjectId id_;
-};
-
-
-HeapEntry* HeapSnapshot::GetEntryById(SnapshotObjectId id) {
- List<HeapEntry*>* entries_by_id = GetSortedEntriesList();
- // Perform a binary search by id.
- int index = SortedListBSearch(*entries_by_id, FindEntryById(id));
- if (index == -1)
- return NULL;
- return entries_by_id->at(index);
-}
-
-
-template<class T>
-static int SortByIds(const T* entry1_ptr,
- const T* entry2_ptr) {
- if ((*entry1_ptr)->id() == (*entry2_ptr)->id()) return 0;
- return (*entry1_ptr)->id() < (*entry2_ptr)->id() ? -1 : 1;
-}
-
-
-List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
- if (sorted_entries_.is_empty()) {
- sorted_entries_.Allocate(entries_.length());
- for (int i = 0; i < entries_.length(); ++i) {
- sorted_entries_[i] = &entries_[i];
- }
- sorted_entries_.Sort(SortByIds);
- }
- return &sorted_entries_;
-}
-
-
-void HeapSnapshot::Print(int max_depth) {
- root()->Print("", "", max_depth, 0);
-}
-
-
-template<typename T, class P>
-static size_t GetMemoryUsedByList(const List<T, P>& list) {
- return list.length() * sizeof(T) + sizeof(list);
-}
-
-
-size_t HeapSnapshot::RawSnapshotSize() const {
- STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::kExpectedHeapSnapshotSize ==
- sizeof(HeapSnapshot)); // NOLINT
- return
- sizeof(*this) +
- GetMemoryUsedByList(entries_) +
- GetMemoryUsedByList(edges_) +
- GetMemoryUsedByList(children_) +
- GetMemoryUsedByList(sorted_entries_);
-}
-
-
-// We split IDs on evens for embedder objects (see
-// HeapObjectsMap::GenerateId) and odds for native objects.
-const SnapshotObjectId HeapObjectsMap::kInternalRootObjectId = 1;
-const SnapshotObjectId HeapObjectsMap::kGcRootsObjectId =
- HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep;
-const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId =
- HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
-const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId =
- HeapObjectsMap::kGcRootsFirstSubrootId +
- VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
-
-HeapObjectsMap::HeapObjectsMap()
- : next_id_(kFirstAvailableObjectId),
- entries_map_(AddressesMatch) {
- // This dummy element solves a problem with entries_map_.
- // When we do lookup in HashMap we see no difference between two cases:
- // it has an entry with NULL as the value or it has created
- // a new entry on the fly with NULL as the default value.
- // With such dummy element we have a guaranty that all entries_map_ entries
- // will have the value field grater than 0.
- // This fact is using in MoveObject method.
- entries_.Add(EntryInfo(0, NULL, 0));
-}
-
-
-void HeapObjectsMap::SnapshotGenerationFinished() {
- RemoveDeadEntries();
-}
-
-
-void HeapObjectsMap::MoveObject(Address from, Address to) {
- ASSERT(to != NULL);
- ASSERT(from != NULL);
- if (from == to) return;
- void* from_value = entries_map_.Remove(from, AddressHash(from));
- if (from_value == NULL) return;
- int from_entry_info_index =
- static_cast<int>(reinterpret_cast<intptr_t>(from_value));
- entries_.at(from_entry_info_index).addr = to;
- HashMap::Entry* to_entry = entries_map_.Lookup(to, AddressHash(to), true);
- if (to_entry->value != NULL) {
- int to_entry_info_index =
- static_cast<int>(reinterpret_cast<intptr_t>(to_entry->value));
- // Without this operation we will have two EntryInfo's with the same
- // value in addr field. It is bad because later at RemoveDeadEntries
- // one of this entry will be removed with the corresponding entries_map_
- // entry.
- entries_.at(to_entry_info_index).addr = NULL;
- }
- to_entry->value = reinterpret_cast<void*>(from_entry_info_index);
-}
-
-
-SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
- HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false);
- if (entry == NULL) return 0;
- int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- EntryInfo& entry_info = entries_.at(entry_index);
- ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
- return entry_info.id;
-}
-
-
-SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
- unsigned int size) {
- ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
- HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true);
- if (entry->value != NULL) {
- int entry_index =
- static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- EntryInfo& entry_info = entries_.at(entry_index);
- entry_info.accessed = true;
- entry_info.size = size;
- return entry_info.id;
- }
- entry->value = reinterpret_cast<void*>(entries_.length());
- SnapshotObjectId id = next_id_;
- next_id_ += kObjectIdStep;
- entries_.Add(EntryInfo(id, addr, size));
- ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
- return id;
-}
-
-
-void HeapObjectsMap::StopHeapObjectsTracking() {
- time_intervals_.Clear();
-}
-
-void HeapObjectsMap::UpdateHeapObjectsMap() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "HeapSnapshotsCollection::UpdateHeapObjectsMap");
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next()) {
- FindOrAddEntry(obj->address(), obj->Size());
- }
- RemoveDeadEntries();
-}
-
-
-SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
- UpdateHeapObjectsMap();
- time_intervals_.Add(TimeInterval(next_id_));
- int prefered_chunk_size = stream->GetChunkSize();
- List<v8::HeapStatsUpdate> stats_buffer;
- ASSERT(!entries_.is_empty());
- EntryInfo* entry_info = &entries_.first();
- EntryInfo* end_entry_info = &entries_.last() + 1;
- for (int time_interval_index = 0;
- time_interval_index < time_intervals_.length();
- ++time_interval_index) {
- TimeInterval& time_interval = time_intervals_[time_interval_index];
- SnapshotObjectId time_interval_id = time_interval.id;
- uint32_t entries_size = 0;
- EntryInfo* start_entry_info = entry_info;
- while (entry_info < end_entry_info && entry_info->id < time_interval_id) {
- entries_size += entry_info->size;
- ++entry_info;
- }
- uint32_t entries_count =
- static_cast<uint32_t>(entry_info - start_entry_info);
- if (time_interval.count != entries_count ||
- time_interval.size != entries_size) {
- stats_buffer.Add(v8::HeapStatsUpdate(
- time_interval_index,
- time_interval.count = entries_count,
- time_interval.size = entries_size));
- if (stats_buffer.length() >= prefered_chunk_size) {
- OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
- &stats_buffer.first(), stats_buffer.length());
- if (result == OutputStream::kAbort) return last_assigned_id();
- stats_buffer.Clear();
- }
- }
- }
- ASSERT(entry_info == end_entry_info);
- if (!stats_buffer.is_empty()) {
- OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
- &stats_buffer.first(), stats_buffer.length());
- if (result == OutputStream::kAbort) return last_assigned_id();
- }
- stream->EndOfStream();
- return last_assigned_id();
-}
-
-
-void HeapObjectsMap::RemoveDeadEntries() {
- ASSERT(entries_.length() > 0 &&
- entries_.at(0).id == 0 &&
- entries_.at(0).addr == NULL);
- int first_free_entry = 1;
- for (int i = 1; i < entries_.length(); ++i) {
- EntryInfo& entry_info = entries_.at(i);
- if (entry_info.accessed) {
- if (first_free_entry != i) {
- entries_.at(first_free_entry) = entry_info;
- }
- entries_.at(first_free_entry).accessed = false;
- HashMap::Entry* entry = entries_map_.Lookup(
- entry_info.addr, AddressHash(entry_info.addr), false);
- ASSERT(entry);
- entry->value = reinterpret_cast<void*>(first_free_entry);
- ++first_free_entry;
- } else {
- if (entry_info.addr) {
- entries_map_.Remove(entry_info.addr, AddressHash(entry_info.addr));
- }
- }
- }
- entries_.Rewind(first_free_entry);
- ASSERT(static_cast<uint32_t>(entries_.length()) - 1 ==
- entries_map_.occupancy());
-}
-
-
-SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
- SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash());
- const char* label = info->GetLabel();
- id ^= HashSequentialString(label,
- static_cast<int>(strlen(label)),
- HEAP->HashSeed());
- intptr_t element_count = info->GetElementCount();
- if (element_count != -1)
- id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count),
- v8::internal::kZeroHashSeed);
- return id << 1;
-}
-
-
-size_t HeapObjectsMap::GetUsedMemorySize() const {
- return
- sizeof(*this) +
- sizeof(HashMap::Entry) * entries_map_.capacity() +
- GetMemoryUsedByList(entries_) +
- GetMemoryUsedByList(time_intervals_);
-}
-
-
-HeapSnapshotsCollection::HeapSnapshotsCollection()
- : is_tracking_objects_(false),
- snapshots_uids_(HeapSnapshotsMatch),
- token_enumerator_(new TokenEnumerator()) {
-}
-
-
-static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
- delete *snapshot_ptr;
-}
-
-
-HeapSnapshotsCollection::~HeapSnapshotsCollection() {
- delete token_enumerator_;
- snapshots_.Iterate(DeleteHeapSnapshot);
-}
-
-
-HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(HeapSnapshot::Type type,
- const char* name,
- unsigned uid) {
- is_tracking_objects_ = true; // Start watching for heap objects moves.
- return new HeapSnapshot(this, type, name, uid);
-}
-
-
-void HeapSnapshotsCollection::SnapshotGenerationFinished(
- HeapSnapshot* snapshot) {
- ids_.SnapshotGenerationFinished();
- if (snapshot != NULL) {
- snapshots_.Add(snapshot);
- HashMap::Entry* entry =
- snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
- static_cast<uint32_t>(snapshot->uid()),
- true);
- ASSERT(entry->value == NULL);
- entry->value = snapshot;
- }
-}
-
-
-HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) {
- HashMap::Entry* entry = snapshots_uids_.Lookup(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid),
- false);
- return entry != NULL ? reinterpret_cast<HeapSnapshot*>(entry->value) : NULL;
-}
-
-
-void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
- snapshots_.RemoveElement(snapshot);
- unsigned uid = snapshot->uid();
- snapshots_uids_.Remove(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid));
-}
-
-
-Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
- SnapshotObjectId id) {
- // First perform a full GC in order to avoid dead objects.
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "HeapSnapshotsCollection::FindHeapObjectById");
- AssertNoAllocation no_allocation;
- HeapObject* object = NULL;
- HeapIterator iterator(HeapIterator::kFilterUnreachable);
- // Make sure that object with the given id is still reachable.
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next()) {
- if (ids_.FindEntry(obj->address()) == id) {
- ASSERT(object == NULL);
- object = obj;
- // Can't break -- kFilterUnreachable requires full heap traversal.
- }
- }
- return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>();
-}
-
-
-size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
- STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::
- kExpectedHeapSnapshotsCollectionSize ==
- sizeof(HeapSnapshotsCollection)); // NOLINT
- size_t size = sizeof(*this);
- size += names_.GetUsedMemorySize();
- size += ids_.GetUsedMemorySize();
- size += sizeof(HashMap::Entry) * snapshots_uids_.capacity();
- size += GetMemoryUsedByList(snapshots_);
- for (int i = 0; i < snapshots_.length(); ++i) {
- size += snapshots_[i]->RawSnapshotSize();
- }
- return size;
-}
-
-
-HeapEntriesMap::HeapEntriesMap()
- : entries_(HeapThingsMatch) {
-}
-
-
-int HeapEntriesMap::Map(HeapThing thing) {
- HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), false);
- if (cache_entry == NULL) return HeapEntry::kNoEntry;
- return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
-}
-
-
-void HeapEntriesMap::Pair(HeapThing thing, int entry) {
- HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true);
- ASSERT(cache_entry->value == NULL);
- cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
-}
-
-
-HeapObjectsSet::HeapObjectsSet()
- : entries_(HeapEntriesMap::HeapThingsMatch) {
-}
-
-
-void HeapObjectsSet::Clear() {
- entries_.Clear();
-}
-
-
-bool HeapObjectsSet::Contains(Object* obj) {
- if (!obj->IsHeapObject()) return false;
- HeapObject* object = HeapObject::cast(obj);
- return entries_.Lookup(object, HeapEntriesMap::Hash(object), false) != NULL;
-}
-
-
-void HeapObjectsSet::Insert(Object* obj) {
- if (!obj->IsHeapObject()) return;
- HeapObject* object = HeapObject::cast(obj);
- entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
-}
-
-
-const char* HeapObjectsSet::GetTag(Object* obj) {
- HeapObject* object = HeapObject::cast(obj);
- HashMap::Entry* cache_entry =
- entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
- return cache_entry != NULL
- ? reinterpret_cast<const char*>(cache_entry->value)
- : NULL;
-}
-
-
-void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
- if (!obj->IsHeapObject()) return;
- HeapObject* object = HeapObject::cast(obj);
- HashMap::Entry* cache_entry =
- entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
- cache_entry->value = const_cast<char*>(tag);
-}
-
-
-HeapObject* const V8HeapExplorer::kInternalRootObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
-HeapObject* const V8HeapExplorer::kGcRootsObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
-HeapObject* const V8HeapExplorer::kFirstGcSubrootObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
-HeapObject* const V8HeapExplorer::kLastGcSubrootObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
-
-
-V8HeapExplorer::V8HeapExplorer(
- HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress)
- : heap_(Isolate::Current()->heap()),
- snapshot_(snapshot),
- collection_(snapshot_->collection()),
- progress_(progress),
- filler_(NULL) {
-}
-
-
-V8HeapExplorer::~V8HeapExplorer() {
-}
-
-
-HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) {
- return AddEntry(reinterpret_cast<HeapObject*>(ptr));
-}
-
-
-HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
- if (object == kInternalRootObject) {
- snapshot_->AddRootEntry();
- return snapshot_->root();
- } else if (object == kGcRootsObject) {
- HeapEntry* entry = snapshot_->AddGcRootsEntry();
- return entry;
- } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
- HeapEntry* entry = snapshot_->AddGcSubrootEntry(GetGcSubrootOrder(object));
- return entry;
- } else if (object->IsJSFunction()) {
- JSFunction* func = JSFunction::cast(object);
- SharedFunctionInfo* shared = func->shared();
- const char* name = shared->bound() ? "native_bind" :
- collection_->names()->GetName(String::cast(shared->name()));
- return AddEntry(object, HeapEntry::kClosure, name);
- } else if (object->IsJSRegExp()) {
- JSRegExp* re = JSRegExp::cast(object);
- return AddEntry(object,
- HeapEntry::kRegExp,
- collection_->names()->GetName(re->Pattern()));
- } else if (object->IsJSObject()) {
- const char* name = collection_->names()->GetName(
- GetConstructorName(JSObject::cast(object)));
- if (object->IsJSGlobalObject()) {
- const char* tag = objects_tags_.GetTag(object);
- if (tag != NULL) {
- name = collection_->names()->GetFormatted("%s / %s", name, tag);
- }
- }
- return AddEntry(object, HeapEntry::kObject, name);
- } else if (object->IsString()) {
- return AddEntry(object,
- HeapEntry::kString,
- collection_->names()->GetName(String::cast(object)));
- } else if (object->IsCode()) {
- return AddEntry(object, HeapEntry::kCode, "");
- } else if (object->IsSharedFunctionInfo()) {
- String* name = String::cast(SharedFunctionInfo::cast(object)->name());
- return AddEntry(object,
- HeapEntry::kCode,
- collection_->names()->GetName(name));
- } else if (object->IsScript()) {
- Object* name = Script::cast(object)->name();
- return AddEntry(object,
- HeapEntry::kCode,
- name->IsString()
- ? collection_->names()->GetName(String::cast(name))
- : "");
- } else if (object->IsNativeContext()) {
- return AddEntry(object, HeapEntry::kHidden, "system / NativeContext");
- } else if (object->IsContext()) {
- return AddEntry(object, HeapEntry::kHidden, "system / Context");
- } else if (object->IsFixedArray() ||
- object->IsFixedDoubleArray() ||
- object->IsByteArray() ||
- object->IsExternalArray()) {
- return AddEntry(object, HeapEntry::kArray, "");
- } else if (object->IsHeapNumber()) {
- return AddEntry(object, HeapEntry::kHeapNumber, "number");
- }
- return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
-}
-
-
-HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
- HeapEntry::Type type,
- const char* name) {
- int object_size = object->Size();
- SnapshotObjectId object_id =
- collection_->GetObjectId(object->address(), object_size);
- return snapshot_->AddEntry(type, name, object_id, object_size);
-}
-
-
-class GcSubrootsEnumerator : public ObjectVisitor {
- public:
- GcSubrootsEnumerator(
- SnapshotFillerInterface* filler, V8HeapExplorer* explorer)
- : filler_(filler),
- explorer_(explorer),
- previous_object_count_(0),
- object_count_(0) {
- }
- void VisitPointers(Object** start, Object** end) {
- object_count_ += end - start;
- }
- void Synchronize(VisitorSynchronization::SyncTag tag) {
- // Skip empty subroots.
- if (previous_object_count_ != object_count_) {
- previous_object_count_ = object_count_;
- filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_);
- }
- }
- private:
- SnapshotFillerInterface* filler_;
- V8HeapExplorer* explorer_;
- intptr_t previous_object_count_;
- intptr_t object_count_;
-};
-
-
-void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
- filler->AddEntry(kInternalRootObject, this);
- filler->AddEntry(kGcRootsObject, this);
- GcSubrootsEnumerator enumerator(filler, this);
- heap_->IterateRoots(&enumerator, VISIT_ALL);
-}
-
-
-const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
- switch (object->map()->instance_type()) {
- case MAP_TYPE: return "system / Map";
- case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell";
- case FOREIGN_TYPE: return "system / Foreign";
- case ODDBALL_TYPE: return "system / Oddball";
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: return "system / "#Name;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- default: return "system";
- }
-}
-
-
-int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
- int objects_count = 0;
- for (HeapObject* obj = iterator->next();
- obj != NULL;
- obj = iterator->next()) {
- objects_count++;
- }
- return objects_count;
-}
-
-
-class IndexedReferencesExtractor : public ObjectVisitor {
- public:
- IndexedReferencesExtractor(V8HeapExplorer* generator,
- HeapObject* parent_obj,
- int parent)
- : generator_(generator),
- parent_obj_(parent_obj),
- parent_(parent),
- next_index_(1) {
- }
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if (CheckVisitedAndUnmark(p)) continue;
- generator_->SetHiddenReference(parent_obj_, parent_, next_index_++, *p);
- }
- }
- static void MarkVisitedField(HeapObject* obj, int offset) {
- if (offset < 0) return;
- Address field = obj->address() + offset;
- ASSERT(!Memory::Object_at(field)->IsFailure());
- ASSERT(Memory::Object_at(field)->IsHeapObject());
- *field |= kFailureTag;
- }
-
- private:
- bool CheckVisitedAndUnmark(Object** field) {
- if ((*field)->IsFailure()) {
- intptr_t untagged = reinterpret_cast<intptr_t>(*field) & ~kFailureTagMask;
- *field = reinterpret_cast<Object*>(untagged | kHeapObjectTag);
- ASSERT((*field)->IsHeapObject());
- return true;
- }
- return false;
- }
- V8HeapExplorer* generator_;
- HeapObject* parent_obj_;
- int parent_;
- int next_index_;
-};
-
-
-void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
- HeapEntry* heap_entry = GetEntry(obj);
- if (heap_entry == NULL) return; // No interest in this object.
- int entry = heap_entry->index();
-
- bool extract_indexed_refs = true;
- if (obj->IsJSGlobalProxy()) {
- ExtractJSGlobalProxyReferences(JSGlobalProxy::cast(obj));
- } else if (obj->IsJSObject()) {
- ExtractJSObjectReferences(entry, JSObject::cast(obj));
- } else if (obj->IsString()) {
- ExtractStringReferences(entry, String::cast(obj));
- extract_indexed_refs = false;
- } else if (obj->IsContext()) {
- ExtractContextReferences(entry, Context::cast(obj));
- } else if (obj->IsMap()) {
- ExtractMapReferences(entry, Map::cast(obj));
- } else if (obj->IsSharedFunctionInfo()) {
- ExtractSharedFunctionInfoReferences(entry, SharedFunctionInfo::cast(obj));
- } else if (obj->IsScript()) {
- ExtractScriptReferences(entry, Script::cast(obj));
- } else if (obj->IsCodeCache()) {
- ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
- } else if (obj->IsCode()) {
- ExtractCodeReferences(entry, Code::cast(obj));
- } else if (obj->IsJSGlobalPropertyCell()) {
- ExtractJSGlobalPropertyCellReferences(
- entry, JSGlobalPropertyCell::cast(obj));
- extract_indexed_refs = false;
- }
- if (extract_indexed_refs) {
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
- }
-}
-
-
-void V8HeapExplorer::ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy) {
- // We need to reference JS global objects from snapshot's root.
- // We use JSGlobalProxy because this is what embedder (e.g. browser)
- // uses for the global object.
- Object* object = proxy->map()->prototype();
- bool is_debug_object = false;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- is_debug_object = object->IsGlobalObject() &&
- Isolate::Current()->debug()->IsDebugGlobal(GlobalObject::cast(object));
-#endif
- if (!is_debug_object) {
- SetUserGlobalReference(object);
- }
-}
-
-
-void V8HeapExplorer::ExtractJSObjectReferences(
- int entry, JSObject* js_obj) {
- HeapObject* obj = js_obj;
- ExtractClosureReferences(js_obj, entry);
- ExtractPropertyReferences(js_obj, entry);
- ExtractElementReferences(js_obj, entry);
- ExtractInternalReferences(js_obj, entry);
- SetPropertyReference(
- obj, entry, heap_->Proto_symbol(), js_obj->GetPrototype());
- if (obj->IsJSFunction()) {
- JSFunction* js_fun = JSFunction::cast(js_obj);
- Object* proto_or_map = js_fun->prototype_or_initial_map();
- if (!proto_or_map->IsTheHole()) {
- if (!proto_or_map->IsMap()) {
- SetPropertyReference(
- obj, entry,
- heap_->prototype_symbol(), proto_or_map,
- NULL,
- JSFunction::kPrototypeOrInitialMapOffset);
- } else {
- SetPropertyReference(
- obj, entry,
- heap_->prototype_symbol(), js_fun->prototype());
- }
- }
- SharedFunctionInfo* shared_info = js_fun->shared();
- // JSFunction has either bindings or literals and never both.
- bool bound = shared_info->bound();
- TagObject(js_fun->literals_or_bindings(),
- bound ? "(function bindings)" : "(function literals)");
- SetInternalReference(js_fun, entry,
- bound ? "bindings" : "literals",
- js_fun->literals_or_bindings(),
- JSFunction::kLiteralsOffset);
- TagObject(shared_info, "(shared function info)");
- SetInternalReference(js_fun, entry,
- "shared", shared_info,
- JSFunction::kSharedFunctionInfoOffset);
- TagObject(js_fun->unchecked_context(), "(context)");
- SetInternalReference(js_fun, entry,
- "context", js_fun->unchecked_context(),
- JSFunction::kContextOffset);
- for (int i = JSFunction::kNonWeakFieldsEndOffset;
- i < JSFunction::kSize;
- i += kPointerSize) {
- SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
- }
- } else if (obj->IsGlobalObject()) {
- GlobalObject* global_obj = GlobalObject::cast(obj);
- SetInternalReference(global_obj, entry,
- "builtins", global_obj->builtins(),
- GlobalObject::kBuiltinsOffset);
- SetInternalReference(global_obj, entry,
- "native_context", global_obj->native_context(),
- GlobalObject::kNativeContextOffset);
- SetInternalReference(global_obj, entry,
- "global_receiver", global_obj->global_receiver(),
- GlobalObject::kGlobalReceiverOffset);
- }
- TagObject(js_obj->properties(), "(object properties)");
- SetInternalReference(obj, entry,
- "properties", js_obj->properties(),
- JSObject::kPropertiesOffset);
- TagObject(js_obj->elements(), "(object elements)");
- SetInternalReference(obj, entry,
- "elements", js_obj->elements(),
- JSObject::kElementsOffset);
-}
-
-
-void V8HeapExplorer::ExtractStringReferences(int entry, String* string) {
- if (string->IsConsString()) {
- ConsString* cs = ConsString::cast(string);
- SetInternalReference(cs, entry, "first", cs->first());
- SetInternalReference(cs, entry, "second", cs->second());
- } else if (string->IsSlicedString()) {
- SlicedString* ss = SlicedString::cast(string);
- SetInternalReference(ss, entry, "parent", ss->parent());
- }
-}
-
-
-void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
-#define EXTRACT_CONTEXT_FIELD(index, type, name) \
- SetInternalReference(context, entry, #name, context->get(Context::index), \
- FixedArray::OffsetOfElementAt(Context::index));
- EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure);
- EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
- EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
- EXTRACT_CONTEXT_FIELD(GLOBAL_OBJECT_INDEX, GlobalObject, global);
- if (context->IsNativeContext()) {
- TagObject(context->jsfunction_result_caches(),
- "(context func. result caches)");
- TagObject(context->normalized_map_cache(), "(context norm. map cache)");
- TagObject(context->runtime_context(), "(runtime context)");
- TagObject(context->data(), "(context data)");
- NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
-#undef EXTRACT_CONTEXT_FIELD
- for (int i = Context::FIRST_WEAK_SLOT;
- i < Context::NATIVE_CONTEXT_SLOTS;
- ++i) {
- SetWeakReference(context, entry, i, context->get(i),
- FixedArray::OffsetOfElementAt(i));
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
- SetInternalReference(map, entry,
- "prototype", map->prototype(), Map::kPrototypeOffset);
- SetInternalReference(map, entry,
- "constructor", map->constructor(),
- Map::kConstructorOffset);
- if (map->HasTransitionArray()) {
- TransitionArray* transitions = map->transitions();
-
- Object* back_pointer = transitions->back_pointer_storage();
- TagObject(transitions->back_pointer_storage(), "(back pointer)");
- SetInternalReference(transitions, entry,
- "backpointer", back_pointer,
- TransitionArray::kBackPointerStorageOffset);
- IndexedReferencesExtractor transitions_refs(this, transitions, entry);
- transitions->Iterate(&transitions_refs);
-
- TagObject(transitions, "(transition array)");
- SetInternalReference(map, entry,
- "transitions", transitions,
- Map::kTransitionsOrBackPointerOffset);
- } else {
- Object* back_pointer = map->GetBackPointer();
- TagObject(back_pointer, "(back pointer)");
- SetInternalReference(map, entry,
- "backpointer", back_pointer,
- Map::kTransitionsOrBackPointerOffset);
- }
- DescriptorArray* descriptors = map->instance_descriptors();
- TagObject(descriptors, "(map descriptors)");
- SetInternalReference(map, entry,
- "descriptors", descriptors,
- Map::kDescriptorsOffset);
-
- SetInternalReference(map, entry,
- "code_cache", map->code_cache(),
- Map::kCodeCacheOffset);
-}
-
-
-void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
- int entry, SharedFunctionInfo* shared) {
- HeapObject* obj = shared;
- SetInternalReference(obj, entry,
- "name", shared->name(),
- SharedFunctionInfo::kNameOffset);
- TagObject(shared->code(), "(code)");
- SetInternalReference(obj, entry,
- "code", shared->code(),
- SharedFunctionInfo::kCodeOffset);
- TagObject(shared->scope_info(), "(function scope info)");
- SetInternalReference(obj, entry,
- "scope_info", shared->scope_info(),
- SharedFunctionInfo::kScopeInfoOffset);
- SetInternalReference(obj, entry,
- "instance_class_name", shared->instance_class_name(),
- SharedFunctionInfo::kInstanceClassNameOffset);
- SetInternalReference(obj, entry,
- "script", shared->script(),
- SharedFunctionInfo::kScriptOffset);
- TagObject(shared->construct_stub(), "(code)");
- SetInternalReference(obj, entry,
- "construct_stub", shared->construct_stub(),
- SharedFunctionInfo::kConstructStubOffset);
- SetInternalReference(obj, entry,
- "function_data", shared->function_data(),
- SharedFunctionInfo::kFunctionDataOffset);
- SetInternalReference(obj, entry,
- "debug_info", shared->debug_info(),
- SharedFunctionInfo::kDebugInfoOffset);
- SetInternalReference(obj, entry,
- "inferred_name", shared->inferred_name(),
- SharedFunctionInfo::kInferredNameOffset);
- SetInternalReference(obj, entry,
- "this_property_assignments",
- shared->this_property_assignments(),
- SharedFunctionInfo::kThisPropertyAssignmentsOffset);
- SetWeakReference(obj, entry,
- 1, shared->initial_map(),
- SharedFunctionInfo::kInitialMapOffset);
-}
-
-
-void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
- HeapObject* obj = script;
- SetInternalReference(obj, entry,
- "source", script->source(),
- Script::kSourceOffset);
- SetInternalReference(obj, entry,
- "name", script->name(),
- Script::kNameOffset);
- SetInternalReference(obj, entry,
- "data", script->data(),
- Script::kDataOffset);
- SetInternalReference(obj, entry,
- "context_data", script->context_data(),
- Script::kContextOffset);
- TagObject(script->line_ends(), "(script line ends)");
- SetInternalReference(obj, entry,
- "line_ends", script->line_ends(),
- Script::kLineEndsOffset);
-}
-
-
-void V8HeapExplorer::ExtractCodeCacheReferences(
- int entry, CodeCache* code_cache) {
- TagObject(code_cache->default_cache(), "(default code cache)");
- SetInternalReference(code_cache, entry,
- "default_cache", code_cache->default_cache(),
- CodeCache::kDefaultCacheOffset);
- TagObject(code_cache->normal_type_cache(), "(code type cache)");
- SetInternalReference(code_cache, entry,
- "type_cache", code_cache->normal_type_cache(),
- CodeCache::kNormalTypeCacheOffset);
-}
-
-
-void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
- TagObject(code->relocation_info(), "(code relocation info)");
- SetInternalReference(code, entry,
- "relocation_info", code->relocation_info(),
- Code::kRelocationInfoOffset);
- SetInternalReference(code, entry,
- "handler_table", code->handler_table(),
- Code::kHandlerTableOffset);
- TagObject(code->deoptimization_data(), "(code deopt data)");
- SetInternalReference(code, entry,
- "deoptimization_data", code->deoptimization_data(),
- Code::kDeoptimizationDataOffset);
- SetInternalReference(code, entry,
- "type_feedback_info", code->type_feedback_info(),
- Code::kTypeFeedbackInfoOffset);
- SetInternalReference(code, entry,
- "gc_metadata", code->gc_metadata(),
- Code::kGCMetadataOffset);
-}
-
-
-void V8HeapExplorer::ExtractJSGlobalPropertyCellReferences(
- int entry, JSGlobalPropertyCell* cell) {
- SetInternalReference(cell, entry, "value", cell->value());
-}
-
-
-void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
- if (!js_obj->IsJSFunction()) return;
-
- JSFunction* func = JSFunction::cast(js_obj);
- if (func->shared()->bound()) {
- FixedArray* bindings = func->function_bindings();
- SetNativeBindReference(js_obj, entry, "bound_this",
- bindings->get(JSFunction::kBoundThisIndex));
- SetNativeBindReference(js_obj, entry, "bound_function",
- bindings->get(JSFunction::kBoundFunctionIndex));
- for (int i = JSFunction::kBoundArgumentsStartIndex;
- i < bindings->length(); i++) {
- const char* reference_name = collection_->names()->GetFormatted(
- "bound_argument_%d",
- i - JSFunction::kBoundArgumentsStartIndex);
- SetNativeBindReference(js_obj, entry, reference_name,
- bindings->get(i));
- }
- } else {
- Context* context = func->context()->declaration_context();
- ScopeInfo* scope_info = context->closure()->shared()->scope_info();
- // Add context allocated locals.
- int context_locals = scope_info->ContextLocalCount();
- for (int i = 0; i < context_locals; ++i) {
- String* local_name = scope_info->ContextLocalName(i);
- int idx = Context::MIN_CONTEXT_SLOTS + i;
- SetClosureReference(js_obj, entry, local_name, context->get(idx));
- }
-
- // Add function variable.
- if (scope_info->HasFunctionName()) {
- String* name = scope_info->FunctionName();
- VariableMode mode;
- int idx = scope_info->FunctionContextSlotIndex(name, &mode);
- if (idx >= 0) {
- SetClosureReference(js_obj, entry, name, context->get(idx));
- }
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
- if (js_obj->HasFastProperties()) {
- DescriptorArray* descs = js_obj->map()->instance_descriptors();
- int real_size = js_obj->map()->NumberOfOwnDescriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->GetDetails(i).descriptor_index() > real_size) continue;
- switch (descs->GetType(i)) {
- case FIELD: {
- int index = descs->GetFieldIndex(i);
-
- String* k = descs->GetKey(i);
- if (index < js_obj->map()->inobject_properties()) {
- Object* value = js_obj->InObjectPropertyAt(index);
- if (k != heap_->hidden_symbol()) {
- SetPropertyReference(
- js_obj, entry,
- k, value,
- NULL,
- js_obj->GetInObjectPropertyOffset(index));
- } else {
- TagObject(value, "(hidden properties)");
- SetInternalReference(
- js_obj, entry,
- "hidden_properties", value,
- js_obj->GetInObjectPropertyOffset(index));
- }
- } else {
- Object* value = js_obj->FastPropertyAt(index);
- if (k != heap_->hidden_symbol()) {
- SetPropertyReference(js_obj, entry, k, value);
- } else {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value);
- }
- }
- break;
- }
- case CONSTANT_FUNCTION:
- SetPropertyReference(
- js_obj, entry,
- descs->GetKey(i), descs->GetConstantFunction(i));
- break;
- case CALLBACKS: {
- Object* callback_obj = descs->GetValue(i);
- if (callback_obj->IsAccessorPair()) {
- AccessorPair* accessors = AccessorPair::cast(callback_obj);
- if (Object* getter = accessors->getter()) {
- SetPropertyReference(js_obj, entry, descs->GetKey(i),
- getter, "get-%s");
- }
- if (Object* setter = accessors->setter()) {
- SetPropertyReference(js_obj, entry, descs->GetKey(i),
- setter, "set-%s");
- }
- }
- break;
- }
- case NORMAL: // only in slow mode
- case HANDLER: // only in lookup results, not in descriptors
- case INTERCEPTOR: // only in lookup results, not in descriptors
- break;
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- }
- } else {
- StringDictionary* dictionary = js_obj->property_dictionary();
- int length = dictionary->Capacity();
- for (int i = 0; i < length; ++i) {
- Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k)) {
- Object* target = dictionary->ValueAt(i);
- // We assume that global objects can only have slow properties.
- Object* value = target->IsJSGlobalPropertyCell()
- ? JSGlobalPropertyCell::cast(target)->value()
- : target;
- if (k != heap_->hidden_symbol()) {
- SetPropertyReference(js_obj, entry, String::cast(k), value);
- } else {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value);
- }
- }
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
- if (js_obj->HasFastObjectElements()) {
- FixedArray* elements = FixedArray::cast(js_obj->elements());
- int length = js_obj->IsJSArray() ?
- Smi::cast(JSArray::cast(js_obj)->length())->value() :
- elements->length();
- for (int i = 0; i < length; ++i) {
- if (!elements->get(i)->IsTheHole()) {
- SetElementReference(js_obj, entry, i, elements->get(i));
- }
- }
- } else if (js_obj->HasDictionaryElements()) {
- SeededNumberDictionary* dictionary = js_obj->element_dictionary();
- int length = dictionary->Capacity();
- for (int i = 0; i < length; ++i) {
- Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k)) {
- ASSERT(k->IsNumber());
- uint32_t index = static_cast<uint32_t>(k->Number());
- SetElementReference(js_obj, entry, index, dictionary->ValueAt(i));
- }
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) {
- int length = js_obj->GetInternalFieldCount();
- for (int i = 0; i < length; ++i) {
- Object* o = js_obj->GetInternalField(i);
- SetInternalReference(
- js_obj, entry, i, o, js_obj->GetInternalFieldOffset(i));
- }
-}
-
-
-String* V8HeapExplorer::GetConstructorName(JSObject* object) {
- Heap* heap = object->GetHeap();
- if (object->IsJSFunction()) return heap->closure_symbol();
- String* constructor_name = object->constructor_name();
- if (constructor_name == heap->Object_symbol()) {
- // Look up an immediate "constructor" property, if it is a function,
- // return its name. This is for instances of binding objects, which
- // have prototype constructor type "Object".
- Object* constructor_prop = NULL;
- LookupResult result(heap->isolate());
- object->LocalLookupRealNamedProperty(heap->constructor_symbol(), &result);
- if (!result.IsFound()) return object->constructor_name();
-
- constructor_prop = result.GetLazyValue();
- if (constructor_prop->IsJSFunction()) {
- Object* maybe_name =
- JSFunction::cast(constructor_prop)->shared()->name();
- if (maybe_name->IsString()) {
- String* name = String::cast(maybe_name);
- if (name->length() > 0) return name;
- }
- }
- }
- return object->constructor_name();
-}
-
-
-HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
- if (!obj->IsHeapObject()) return NULL;
- return filler_->FindOrAddEntry(obj, this);
-}
-
-
-class RootsReferencesExtractor : public ObjectVisitor {
- private:
- struct IndexTag {
- IndexTag(int index, VisitorSynchronization::SyncTag tag)
- : index(index), tag(tag) { }
- int index;
- VisitorSynchronization::SyncTag tag;
- };
-
- public:
- RootsReferencesExtractor()
- : collecting_all_references_(false),
- previous_reference_count_(0) {
- }
-
- void VisitPointers(Object** start, Object** end) {
- if (collecting_all_references_) {
- for (Object** p = start; p < end; p++) all_references_.Add(*p);
- } else {
- for (Object** p = start; p < end; p++) strong_references_.Add(*p);
- }
- }
-
- void SetCollectingAllReferences() { collecting_all_references_ = true; }
-
- void FillReferences(V8HeapExplorer* explorer) {
- ASSERT(strong_references_.length() <= all_references_.length());
- for (int i = 0; i < reference_tags_.length(); ++i) {
- explorer->SetGcRootsReference(reference_tags_[i].tag);
- }
- int strong_index = 0, all_index = 0, tags_index = 0;
- while (all_index < all_references_.length()) {
- if (strong_index < strong_references_.length() &&
- strong_references_[strong_index] == all_references_[all_index]) {
- explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
- false,
- all_references_[all_index++]);
- ++strong_index;
- } else {
- explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
- true,
- all_references_[all_index++]);
- }
- if (reference_tags_[tags_index].index == all_index) ++tags_index;
- }
- }
-
- void Synchronize(VisitorSynchronization::SyncTag tag) {
- if (collecting_all_references_ &&
- previous_reference_count_ != all_references_.length()) {
- previous_reference_count_ = all_references_.length();
- reference_tags_.Add(IndexTag(previous_reference_count_, tag));
- }
- }
-
- private:
- bool collecting_all_references_;
- List<Object*> strong_references_;
- List<Object*> all_references_;
- int previous_reference_count_;
- List<IndexTag> reference_tags_;
-};
-
-
-bool V8HeapExplorer::IterateAndExtractReferences(
- SnapshotFillerInterface* filler) {
- HeapIterator iterator(HeapIterator::kFilterUnreachable);
-
- filler_ = filler;
- bool interrupted = false;
-
- // Heap iteration with filtering must be finished in any case.
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next(), progress_->ProgressStep()) {
- if (!interrupted) {
- ExtractReferences(obj);
- if (!progress_->ProgressReport(false)) interrupted = true;
- }
- }
- if (interrupted) {
- filler_ = NULL;
- return false;
- }
-
- SetRootGcRootsReference();
- RootsReferencesExtractor extractor;
- heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
- extractor.SetCollectingAllReferences();
- heap_->IterateRoots(&extractor, VISIT_ALL);
- extractor.FillReferences(this);
- filler_ = NULL;
- return progress_->ProgressReport(true);
-}
-
-
-bool V8HeapExplorer::IsEssentialObject(Object* object) {
- // We have to use raw_unchecked_* versions because checked versions
- // would fail during iteration over object properties.
- return object->IsHeapObject()
- && !object->IsOddball()
- && object != heap_->raw_unchecked_empty_byte_array()
- && object != heap_->raw_unchecked_empty_fixed_array()
- && object != heap_->raw_unchecked_empty_descriptor_array()
- && object != heap_->raw_unchecked_fixed_array_map()
- && object != heap_->raw_unchecked_global_property_cell_map()
- && object != heap_->raw_unchecked_shared_function_info_map()
- && object != heap_->raw_unchecked_free_space_map()
- && object != heap_->raw_unchecked_one_pointer_filler_map()
- && object != heap_->raw_unchecked_two_pointer_filler_map();
-}
-
-
-void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj,
- int parent_entry,
- String* reference_name,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
- parent_entry,
- collection_->names()->GetName(reference_name),
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
- int parent_entry,
- const char* reference_name,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetNamedReference(HeapGraphEdge::kShortcut,
- parent_entry,
- reference_name,
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetIndexedReference(HeapGraphEdge::kElement,
- parent_entry,
- index,
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
- int parent_entry,
- const char* reference_name,
- Object* child_obj,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry == NULL) return;
- if (IsEssentialObject(child_obj)) {
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- parent_entry,
- reference_name,
- child_entry);
- }
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
-}
-
-
-void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry == NULL) return;
- if (IsEssentialObject(child_obj)) {
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- parent_entry,
- collection_->names()->GetName(index),
- child_entry);
- }
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
-}
-
-
-void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL && IsEssentialObject(child_obj)) {
- filler_->SetIndexedReference(HeapGraphEdge::kHidden,
- parent_entry,
- index,
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetIndexedReference(HeapGraphEdge::kWeak,
- parent_entry,
- index,
- child_entry);
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
- }
-}
-
-
-void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
- int parent_entry,
- String* reference_name,
- Object* child_obj,
- const char* name_format_string,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- HeapGraphEdge::Type type = reference_name->length() > 0 ?
- HeapGraphEdge::kProperty : HeapGraphEdge::kInternal;
- const char* name = name_format_string != NULL ?
- collection_->names()->GetFormatted(
- name_format_string,
- *reference_name->ToCString(DISALLOW_NULLS,
- ROBUST_STRING_TRAVERSAL)) :
- collection_->names()->GetName(reference_name);
-
- filler_->SetNamedReference(type,
- parent_entry,
- name,
- child_entry);
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
- }
-}
-
-
-void V8HeapExplorer::SetRootGcRootsReference() {
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- snapshot_->root()->index(),
- snapshot_->gc_roots());
-}
-
-
-void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- ASSERT(child_entry != NULL);
- filler_->SetNamedAutoIndexReference(
- HeapGraphEdge::kShortcut,
- snapshot_->root()->index(),
- child_entry);
-}
-
-
-void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- snapshot_->gc_roots()->index(),
- snapshot_->gc_subroot(tag));
-}
-
-
-void V8HeapExplorer::SetGcSubrootReference(
- VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- const char* name = GetStrongGcSubrootName(child_obj);
- if (name != NULL) {
- filler_->SetNamedReference(
- HeapGraphEdge::kInternal,
- snapshot_->gc_subroot(tag)->index(),
- name,
- child_entry);
- } else {
- filler_->SetIndexedAutoIndexReference(
- is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
- snapshot_->gc_subroot(tag)->index(),
- child_entry);
- }
- }
-}
-
-
-const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
- if (strong_gc_subroot_names_.is_empty()) {
-#define NAME_ENTRY(name) strong_gc_subroot_names_.SetTag(heap_->name(), #name);
-#define ROOT_NAME(type, name, camel_name) NAME_ENTRY(name)
- STRONG_ROOT_LIST(ROOT_NAME)
-#undef ROOT_NAME
-#define STRUCT_MAP_NAME(NAME, Name, name) NAME_ENTRY(name##_map)
- STRUCT_LIST(STRUCT_MAP_NAME)
-#undef STRUCT_MAP_NAME
-#define SYMBOL_NAME(name, str) NAME_ENTRY(name)
- SYMBOL_LIST(SYMBOL_NAME)
-#undef SYMBOL_NAME
-#undef NAME_ENTRY
- CHECK(!strong_gc_subroot_names_.is_empty());
- }
- return strong_gc_subroot_names_.GetTag(object);
-}
-
-
-void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
- if (IsEssentialObject(obj)) {
- HeapEntry* entry = GetEntry(obj);
- if (entry->name()[0] == '\0') {
- entry->set_name(tag);
- }
- }
-}
-
-
-class GlobalObjectsEnumerator : public ObjectVisitor {
- public:
- virtual void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsNativeContext()) {
- Context* context = Context::cast(*p);
- JSObject* proxy = context->global_proxy();
- if (proxy->IsJSGlobalProxy()) {
- Object* global = proxy->map()->prototype();
- if (global->IsJSGlobalObject()) {
- objects_.Add(Handle<JSGlobalObject>(JSGlobalObject::cast(global)));
- }
- }
- }
- }
- }
- int count() { return objects_.length(); }
- Handle<JSGlobalObject>& at(int i) { return objects_[i]; }
-
- private:
- List<Handle<JSGlobalObject> > objects_;
-};
-
-
-// Modifies heap. Must not be run during heap traversal.
-void V8HeapExplorer::TagGlobalObjects() {
- HandleScope scope;
- Isolate* isolate = Isolate::Current();
- GlobalObjectsEnumerator enumerator;
- isolate->global_handles()->IterateAllRoots(&enumerator);
- Handle<String> document_string =
- isolate->factory()->NewStringFromAscii(CStrVector("document"));
- Handle<String> url_string =
- isolate->factory()->NewStringFromAscii(CStrVector("URL"));
- const char** urls = NewArray<const char*>(enumerator.count());
- for (int i = 0, l = enumerator.count(); i < l; ++i) {
- urls[i] = NULL;
- HandleScope scope;
- Handle<JSGlobalObject> global_obj = enumerator.at(i);
- Object* obj_document;
- if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
- obj_document->IsJSObject()) {
- // FixMe: Workaround: SharedWorker's current Isolate has NULL context.
- // As result GetProperty(*url_string) will crash.
- if (!Isolate::Current()->context() && obj_document->IsJSGlobalProxy())
- continue;
- JSObject* document = JSObject::cast(obj_document);
- Object* obj_url;
- if (document->GetProperty(*url_string)->ToObject(&obj_url) &&
- obj_url->IsString()) {
- urls[i] = collection_->names()->GetName(String::cast(obj_url));
- }
- }
- }
-
- AssertNoAllocation no_allocation;
- for (int i = 0, l = enumerator.count(); i < l; ++i) {
- objects_tags_.SetTag(*enumerator.at(i), urls[i]);
- }
-
- DeleteArray(urls);
-}
-
-
-class GlobalHandlesExtractor : public ObjectVisitor {
- public:
- explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
- : explorer_(explorer) {}
- virtual ~GlobalHandlesExtractor() {}
- virtual void VisitPointers(Object** start, Object** end) {
- UNREACHABLE();
- }
- virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {
- explorer_->VisitSubtreeWrapper(p, class_id);
- }
- private:
- NativeObjectsExplorer* explorer_;
-};
-
-
-class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
- public:
- BasicHeapEntriesAllocator(
- HeapSnapshot* snapshot,
- HeapEntry::Type entries_type)
- : snapshot_(snapshot),
- collection_(snapshot_->collection()),
- entries_type_(entries_type) {
- }
- virtual HeapEntry* AllocateEntry(HeapThing ptr);
- private:
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- HeapEntry::Type entries_type_;
-};
-
-
-HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
- v8::RetainedObjectInfo* info = reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
- intptr_t elements = info->GetElementCount();
- intptr_t size = info->GetSizeInBytes();
- const char* name = elements != -1
- ? collection_->names()->GetFormatted(
- "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements)
- : collection_->names()->GetCopy(info->GetLabel());
- return snapshot_->AddEntry(
- entries_type_,
- name,
- HeapObjectsMap::GenerateId(info),
- size != -1 ? static_cast<int>(size) : 0);
-}
-
-
-NativeObjectsExplorer::NativeObjectsExplorer(
- HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress)
- : snapshot_(snapshot),
- collection_(snapshot_->collection()),
- progress_(progress),
- embedder_queried_(false),
- objects_by_info_(RetainedInfosMatch),
- native_groups_(StringsMatch),
- filler_(NULL) {
- synthetic_entries_allocator_ =
- new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic);
- native_entries_allocator_ =
- new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative);
-}
-
-
-NativeObjectsExplorer::~NativeObjectsExplorer() {
- for (HashMap::Entry* p = objects_by_info_.Start();
- p != NULL;
- p = objects_by_info_.Next(p)) {
- v8::RetainedObjectInfo* info =
- reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
- info->Dispose();
- List<HeapObject*>* objects =
- reinterpret_cast<List<HeapObject*>* >(p->value);
- delete objects;
- }
- for (HashMap::Entry* p = native_groups_.Start();
- p != NULL;
- p = native_groups_.Next(p)) {
- v8::RetainedObjectInfo* info =
- reinterpret_cast<v8::RetainedObjectInfo*>(p->value);
- info->Dispose();
- }
- delete synthetic_entries_allocator_;
- delete native_entries_allocator_;
-}
-
-
-int NativeObjectsExplorer::EstimateObjectsCount() {
- FillRetainedObjects();
- return objects_by_info_.occupancy();
-}
-
-
-void NativeObjectsExplorer::FillRetainedObjects() {
- if (embedder_queried_) return;
- Isolate* isolate = Isolate::Current();
- // Record objects that are joined into ObjectGroups.
- isolate->heap()->CallGlobalGCPrologueCallback();
- List<ObjectGroup*>* groups = isolate->global_handles()->object_groups();
- for (int i = 0; i < groups->length(); ++i) {
- ObjectGroup* group = groups->at(i);
- if (group->info_ == NULL) continue;
- List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info_);
- for (size_t j = 0; j < group->length_; ++j) {
- HeapObject* obj = HeapObject::cast(*group->objects_[j]);
- list->Add(obj);
- in_groups_.Insert(obj);
- }
- group->info_ = NULL; // Acquire info object ownership.
- }
- isolate->global_handles()->RemoveObjectGroups();
- isolate->heap()->CallGlobalGCEpilogueCallback();
- // Record objects that are not in ObjectGroups, but have class ID.
- GlobalHandlesExtractor extractor(this);
- isolate->global_handles()->IterateAllRootsWithClassIds(&extractor);
- embedder_queried_ = true;
-}
-
-void NativeObjectsExplorer::FillImplicitReferences() {
- Isolate* isolate = Isolate::Current();
- List<ImplicitRefGroup*>* groups =
- isolate->global_handles()->implicit_ref_groups();
- for (int i = 0; i < groups->length(); ++i) {
- ImplicitRefGroup* group = groups->at(i);
- HeapObject* parent = *group->parent_;
- int parent_entry =
- filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
- ASSERT(parent_entry != HeapEntry::kNoEntry);
- Object*** children = group->children_;
- for (size_t j = 0; j < group->length_; ++j) {
- Object* child = *children[j];
- HeapEntry* child_entry =
- filler_->FindOrAddEntry(child, native_entries_allocator_);
- filler_->SetNamedReference(
- HeapGraphEdge::kInternal,
- parent_entry,
- "native",
- child_entry);
- }
- }
-}
-
-List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
- v8::RetainedObjectInfo* info) {
- HashMap::Entry* entry =
- objects_by_info_.Lookup(info, InfoHash(info), true);
- if (entry->value != NULL) {
- info->Dispose();
- } else {
- entry->value = new List<HeapObject*>(4);
- }
- return reinterpret_cast<List<HeapObject*>* >(entry->value);
-}
-
-
-bool NativeObjectsExplorer::IterateAndExtractReferences(
- SnapshotFillerInterface* filler) {
- filler_ = filler;
- FillRetainedObjects();
- FillImplicitReferences();
- if (EstimateObjectsCount() > 0) {
- for (HashMap::Entry* p = objects_by_info_.Start();
- p != NULL;
- p = objects_by_info_.Next(p)) {
- v8::RetainedObjectInfo* info =
- reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
- SetNativeRootReference(info);
- List<HeapObject*>* objects =
- reinterpret_cast<List<HeapObject*>* >(p->value);
- for (int i = 0; i < objects->length(); ++i) {
- SetWrapperNativeReferences(objects->at(i), info);
- }
- }
- SetRootNativeRootsReference();
- }
- filler_ = NULL;
- return true;
-}
-
-
-class NativeGroupRetainedObjectInfo : public v8::RetainedObjectInfo {
- public:
- explicit NativeGroupRetainedObjectInfo(const char* label)
- : disposed_(false),
- hash_(reinterpret_cast<intptr_t>(label)),
- label_(label) {
- }
-
- virtual ~NativeGroupRetainedObjectInfo() {}
- virtual void Dispose() {
- CHECK(!disposed_);
- disposed_ = true;
- delete this;
+CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
+ switch (tag) {
+ case GC:
+ return gc_entry_;
+ case JS:
+ case COMPILER:
+ // DOM events handlers are reported as OTHER / EXTERNAL entries.
+ // To avoid confusing people, let's put all these entries into
+ // one bucket.
+ case OTHER:
+ case EXTERNAL:
+ return program_entry_;
+ case IDLE:
+ return idle_entry_;
+ default: return NULL;
}
- virtual bool IsEquivalent(RetainedObjectInfo* other) {
- return hash_ == other->GetHash() && !strcmp(label_, other->GetLabel());
- }
- virtual intptr_t GetHash() { return hash_; }
- virtual const char* GetLabel() { return label_; }
-
- private:
- bool disposed_;
- intptr_t hash_;
- const char* label_;
-};
-
-
-NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
- const char* label) {
- const char* label_copy = collection_->names()->GetCopy(label);
- uint32_t hash = HashSequentialString(label_copy,
- static_cast<int>(strlen(label_copy)),
- HEAP->HashSeed());
- HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy),
- hash, true);
- if (entry->value == NULL) {
- entry->value = new NativeGroupRetainedObjectInfo(label);
- }
- return static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
-}
-
-
-void NativeObjectsExplorer::SetNativeRootReference(
- v8::RetainedObjectInfo* info) {
- HeapEntry* child_entry =
- filler_->FindOrAddEntry(info, native_entries_allocator_);
- ASSERT(child_entry != NULL);
- NativeGroupRetainedObjectInfo* group_info =
- FindOrAddGroupInfo(info->GetGroupLabel());
- HeapEntry* group_entry =
- filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_);
- filler_->SetNamedAutoIndexReference(
- HeapGraphEdge::kInternal,
- group_entry->index(),
- child_entry);
-}
-
-
-void NativeObjectsExplorer::SetWrapperNativeReferences(
- HeapObject* wrapper, v8::RetainedObjectInfo* info) {
- HeapEntry* wrapper_entry = filler_->FindEntry(wrapper);
- ASSERT(wrapper_entry != NULL);
- HeapEntry* info_entry =
- filler_->FindOrAddEntry(info, native_entries_allocator_);
- ASSERT(info_entry != NULL);
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- wrapper_entry->index(),
- "native",
- info_entry);
- filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
- info_entry->index(),
- wrapper_entry);
-}
-
-
-void NativeObjectsExplorer::SetRootNativeRootsReference() {
- for (HashMap::Entry* entry = native_groups_.Start();
- entry;
- entry = native_groups_.Next(entry)) {
- NativeGroupRetainedObjectInfo* group_info =
- static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
- HeapEntry* group_entry =
- filler_->FindOrAddEntry(group_info, native_entries_allocator_);
- ASSERT(group_entry != NULL);
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- snapshot_->root()->index(),
- group_entry);
- }
-}
-
-
-void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
- if (in_groups_.Contains(*p)) return;
- Isolate* isolate = Isolate::Current();
- v8::RetainedObjectInfo* info =
- isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
- if (info == NULL) return;
- GetListMaybeDisposeInfo(info)->Add(HeapObject::cast(*p));
-}
-
-
-class SnapshotFiller : public SnapshotFillerInterface {
- public:
- explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
- : snapshot_(snapshot),
- collection_(snapshot->collection()),
- entries_(entries) { }
- HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = allocator->AllocateEntry(ptr);
- entries_->Pair(ptr, entry->index());
- return entry;
- }
- HeapEntry* FindEntry(HeapThing ptr) {
- int index = entries_->Map(ptr);
- return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
- }
- HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = FindEntry(ptr);
- return entry != NULL ? entry : AddEntry(ptr, allocator);
- }
- void SetIndexedReference(HeapGraphEdge::Type type,
- int parent,
- int index,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetIndexedReference(type, index, child_entry);
- }
- void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetIndexedReference(type, index, child_entry);
- }
- void SetNamedReference(HeapGraphEdge::Type type,
- int parent,
- const char* reference_name,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetNamedReference(type, reference_name, child_entry);
- }
- void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetNamedReference(
- type,
- collection_->names()->GetName(index),
- child_entry);
- }
-
- private:
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- HeapEntriesMap* entries_;
-};
-
-
-HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
- v8::ActivityControl* control)
- : snapshot_(snapshot),
- control_(control),
- v8_heap_explorer_(snapshot_, this),
- dom_explorer_(snapshot_, this) {
-}
-
-
-bool HeapSnapshotGenerator::GenerateSnapshot() {
- v8_heap_explorer_.TagGlobalObjects();
-
- // TODO(1562) Profiler assumes that any object that is in the heap after
- // full GC is reachable from the root when computing dominators.
- // This is not true for weakly reachable objects.
- // As a temporary solution we call GC twice.
- Isolate::Current()->heap()->CollectAllGarbage(
- Heap::kMakeHeapIterableMask,
- "HeapSnapshotGenerator::GenerateSnapshot");
- Isolate::Current()->heap()->CollectAllGarbage(
- Heap::kMakeHeapIterableMask,
- "HeapSnapshotGenerator::GenerateSnapshot");
-
-#ifdef VERIFY_HEAP
- Heap* debug_heap = Isolate::Current()->heap();
- CHECK(!debug_heap->old_data_space()->was_swept_conservatively());
- CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively());
- CHECK(!debug_heap->code_space()->was_swept_conservatively());
- CHECK(!debug_heap->cell_space()->was_swept_conservatively());
- CHECK(!debug_heap->map_space()->was_swept_conservatively());
-#endif
-
- // The following code uses heap iterators, so we want the heap to be
- // stable. It should follow TagGlobalObjects as that can allocate.
- AssertNoAllocation no_alloc;
-
-#ifdef VERIFY_HEAP
- debug_heap->Verify();
-#endif
-
- SetProgressTotal(1); // 1 pass.
-
-#ifdef VERIFY_HEAP
- debug_heap->Verify();
-#endif
-
- if (!FillReferences()) return false;
-
- snapshot_->FillChildren();
- snapshot_->RememberLastJSObjectId();
-
- progress_counter_ = progress_total_;
- if (!ProgressReport(true)) return false;
- return true;
-}
-
-
-void HeapSnapshotGenerator::ProgressStep() {
- ++progress_counter_;
-}
-
-
-bool HeapSnapshotGenerator::ProgressReport(bool force) {
- const int kProgressReportGranularity = 10000;
- if (control_ != NULL
- && (force || progress_counter_ % kProgressReportGranularity == 0)) {
- return
- control_->ReportProgressValue(progress_counter_, progress_total_) ==
- v8::ActivityControl::kContinue;
- }
- return true;
-}
-
-
-void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
- if (control_ == NULL) return;
- HeapIterator iterator(HeapIterator::kFilterUnreachable);
- progress_total_ = iterations_count * (
- v8_heap_explorer_.EstimateObjectsCount(&iterator) +
- dom_explorer_.EstimateObjectsCount());
- progress_counter_ = 0;
-}
-
-
-bool HeapSnapshotGenerator::FillReferences() {
- SnapshotFiller filler(snapshot_, &entries_);
- v8_heap_explorer_.AddRootEntries(&filler);
- return v8_heap_explorer_.IterateAndExtractReferences(&filler)
- && dom_explorer_.IterateAndExtractReferences(&filler);
-}
-
-
-template<int bytes> struct MaxDecimalDigitsIn;
-template<> struct MaxDecimalDigitsIn<4> {
- static const int kSigned = 11;
- static const int kUnsigned = 10;
-};
-template<> struct MaxDecimalDigitsIn<8> {
- static const int kSigned = 20;
- static const int kUnsigned = 20;
-};
-
-
-class OutputStreamWriter {
- public:
- explicit OutputStreamWriter(v8::OutputStream* stream)
- : stream_(stream),
- chunk_size_(stream->GetChunkSize()),
- chunk_(chunk_size_),
- chunk_pos_(0),
- aborted_(false) {
- ASSERT(chunk_size_ > 0);
- }
- bool aborted() { return aborted_; }
- void AddCharacter(char c) {
- ASSERT(c != '\0');
- ASSERT(chunk_pos_ < chunk_size_);
- chunk_[chunk_pos_++] = c;
- MaybeWriteChunk();
- }
- void AddString(const char* s) {
- AddSubstring(s, StrLength(s));
- }
- void AddSubstring(const char* s, int n) {
- if (n <= 0) return;
- ASSERT(static_cast<size_t>(n) <= strlen(s));
- const char* s_end = s + n;
- while (s < s_end) {
- int s_chunk_size = Min(
- chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
- ASSERT(s_chunk_size > 0);
- memcpy(chunk_.start() + chunk_pos_, s, s_chunk_size);
- s += s_chunk_size;
- chunk_pos_ += s_chunk_size;
- MaybeWriteChunk();
- }
- }
- void AddNumber(unsigned n) { AddNumberImpl<unsigned>(n, "%u"); }
- void Finalize() {
- if (aborted_) return;
- ASSERT(chunk_pos_ < chunk_size_);
- if (chunk_pos_ != 0) {
- WriteChunk();
- }
- stream_->EndOfStream();
- }
-
- private:
- template<typename T>
- void AddNumberImpl(T n, const char* format) {
- // Buffer for the longest value plus trailing \0
- static const int kMaxNumberSize =
- MaxDecimalDigitsIn<sizeof(T)>::kUnsigned + 1;
- if (chunk_size_ - chunk_pos_ >= kMaxNumberSize) {
- int result = OS::SNPrintF(
- chunk_.SubVector(chunk_pos_, chunk_size_), format, n);
- ASSERT(result != -1);
- chunk_pos_ += result;
- MaybeWriteChunk();
- } else {
- EmbeddedVector<char, kMaxNumberSize> buffer;
- int result = OS::SNPrintF(buffer, format, n);
- USE(result);
- ASSERT(result != -1);
- AddString(buffer.start());
- }
- }
- void MaybeWriteChunk() {
- ASSERT(chunk_pos_ <= chunk_size_);
- if (chunk_pos_ == chunk_size_) {
- WriteChunk();
- }
- }
- void WriteChunk() {
- if (aborted_) return;
- if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) ==
- v8::OutputStream::kAbort) aborted_ = true;
- chunk_pos_ = 0;
- }
-
- v8::OutputStream* stream_;
- int chunk_size_;
- ScopedVector<char> chunk_;
- int chunk_pos_;
- bool aborted_;
-};
-
-
-// type, name|index, to_node.
-const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
-// type, name, id, self_size, children_index.
-const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
-
-void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
- ASSERT(writer_ == NULL);
- writer_ = new OutputStreamWriter(stream);
-
- HeapSnapshot* original_snapshot = NULL;
- if (snapshot_->RawSnapshotSize() >=
- SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) {
- // The snapshot is too big. Serialize a fake snapshot.
- original_snapshot = snapshot_;
- snapshot_ = CreateFakeSnapshot();
- }
-
- SerializeImpl();
-
- delete writer_;
- writer_ = NULL;
-
- if (original_snapshot != NULL) {
- delete snapshot_;
- snapshot_ = original_snapshot;
- }
-}
-
-
-HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() {
- HeapSnapshot* result = new HeapSnapshot(snapshot_->collection(),
- HeapSnapshot::kFull,
- snapshot_->title(),
- snapshot_->uid());
- result->AddRootEntry();
- const char* text = snapshot_->collection()->names()->GetFormatted(
- "The snapshot is too big. "
- "Maximum snapshot size is %" V8_PTR_PREFIX "u MB. "
- "Actual snapshot size is %" V8_PTR_PREFIX "u MB.",
- SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB,
- (snapshot_->RawSnapshotSize() + MB - 1) / MB);
- HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4);
- result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message);
- result->FillChildren();
- return result;
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeImpl() {
- ASSERT(0 == snapshot_->root()->index());
- writer_->AddCharacter('{');
- writer_->AddString("\"snapshot\":{");
- SerializeSnapshot();
- if (writer_->aborted()) return;
- writer_->AddString("},\n");
- writer_->AddString("\"nodes\":[");
- SerializeNodes();
- if (writer_->aborted()) return;
- writer_->AddString("],\n");
- writer_->AddString("\"edges\":[");
- SerializeEdges();
- if (writer_->aborted()) return;
- writer_->AddString("],\n");
- writer_->AddString("\"strings\":[");
- SerializeStrings();
- if (writer_->aborted()) return;
- writer_->AddCharacter(']');
- writer_->AddCharacter('}');
- writer_->Finalize();
-}
-
-
-int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
- HashMap::Entry* cache_entry = strings_.Lookup(
- const_cast<char*>(s), ObjectHash(s), true);
- if (cache_entry->value == NULL) {
- cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
- }
- return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
-}
-
-
-static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
- int number_of_digits = 0;
- unsigned t = value;
- do {
- ++number_of_digits;
- } while (t /= 10);
-
- buffer_pos += number_of_digits;
- int result = buffer_pos;
- do {
- int last_digit = value % 10;
- buffer[--buffer_pos] = '0' + last_digit;
- value /= 10;
- } while (value);
- return result;
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
- bool first_edge) {
- // The buffer needs space for 3 unsigned ints, 3 commas, \n and \0
- static const int kBufferSize =
- MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned * 3 + 3 + 2; // NOLINT
- EmbeddedVector<char, kBufferSize> buffer;
- int edge_name_or_index = edge->type() == HeapGraphEdge::kElement
- || edge->type() == HeapGraphEdge::kHidden
- || edge->type() == HeapGraphEdge::kWeak
- ? edge->index() : GetStringId(edge->name());
- int buffer_pos = 0;
- if (!first_edge) {
- buffer[buffer_pos++] = ',';
- }
- buffer_pos = utoa(edge->type(), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(edge_name_or_index, buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry_index(edge->to()), buffer, buffer_pos);
- buffer[buffer_pos++] = '\n';
- buffer[buffer_pos++] = '\0';
- writer_->AddString(buffer.start());
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeEdges() {
- List<HeapGraphEdge*>& edges = snapshot_->children();
- for (int i = 0; i < edges.length(); ++i) {
- ASSERT(i == 0 ||
- edges[i - 1]->from()->index() <= edges[i]->from()->index());
- SerializeEdge(edges[i], i == 0);
- if (writer_->aborted()) return;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
- // The buffer needs space for 5 unsigned ints, 5 commas, \n and \0
- static const int kBufferSize =
- 5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
- + 5 + 1 + 1;
- EmbeddedVector<char, kBufferSize> buffer;
- int buffer_pos = 0;
- if (entry_index(entry) != 0) {
- buffer[buffer_pos++] = ',';
- }
- buffer_pos = utoa(entry->type(), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(GetStringId(entry->name()), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry->id(), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry->self_size(), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry->children_count(), buffer, buffer_pos);
- buffer[buffer_pos++] = '\n';
- buffer[buffer_pos++] = '\0';
- writer_->AddString(buffer.start());
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeNodes() {
- List<HeapEntry>& entries = snapshot_->entries();
- for (int i = 0; i < entries.length(); ++i) {
- SerializeNode(&entries[i]);
- if (writer_->aborted()) return;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeSnapshot() {
- writer_->AddString("\"title\":\"");
- writer_->AddString(snapshot_->title());
- writer_->AddString("\"");
- writer_->AddString(",\"uid\":");
- writer_->AddNumber(snapshot_->uid());
- writer_->AddString(",\"meta\":");
- // The object describing node serialization layout.
- // We use a set of macros to improve readability.
-#define JSON_A(s) "[" s "]"
-#define JSON_O(s) "{" s "}"
-#define JSON_S(s) "\"" s "\""
- writer_->AddString(JSON_O(
- JSON_S("node_fields") ":" JSON_A(
- JSON_S("type") ","
- JSON_S("name") ","
- JSON_S("id") ","
- JSON_S("self_size") ","
- JSON_S("edge_count")) ","
- JSON_S("node_types") ":" JSON_A(
- JSON_A(
- JSON_S("hidden") ","
- JSON_S("array") ","
- JSON_S("string") ","
- JSON_S("object") ","
- JSON_S("code") ","
- JSON_S("closure") ","
- JSON_S("regexp") ","
- JSON_S("number") ","
- JSON_S("native") ","
- JSON_S("synthetic")) ","
- JSON_S("string") ","
- JSON_S("number") ","
- JSON_S("number") ","
- JSON_S("number") ","
- JSON_S("number") ","
- JSON_S("number")) ","
- JSON_S("edge_fields") ":" JSON_A(
- JSON_S("type") ","
- JSON_S("name_or_index") ","
- JSON_S("to_node")) ","
- JSON_S("edge_types") ":" JSON_A(
- JSON_A(
- JSON_S("context") ","
- JSON_S("element") ","
- JSON_S("property") ","
- JSON_S("internal") ","
- JSON_S("hidden") ","
- JSON_S("shortcut") ","
- JSON_S("weak")) ","
- JSON_S("string_or_number") ","
- JSON_S("node"))));
-#undef JSON_S
-#undef JSON_O
-#undef JSON_A
- writer_->AddString(",\"node_count\":");
- writer_->AddNumber(snapshot_->entries().length());
- writer_->AddString(",\"edge_count\":");
- writer_->AddNumber(snapshot_->edges().length());
-}
-
-
-static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
- static const char hex_chars[] = "0123456789ABCDEF";
- w->AddString("\\u");
- w->AddCharacter(hex_chars[(u >> 12) & 0xf]);
- w->AddCharacter(hex_chars[(u >> 8) & 0xf]);
- w->AddCharacter(hex_chars[(u >> 4) & 0xf]);
- w->AddCharacter(hex_chars[u & 0xf]);
-}
-
-void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
- writer_->AddCharacter('\n');
- writer_->AddCharacter('\"');
- for ( ; *s != '\0'; ++s) {
- switch (*s) {
- case '\b':
- writer_->AddString("\\b");
- continue;
- case '\f':
- writer_->AddString("\\f");
- continue;
- case '\n':
- writer_->AddString("\\n");
- continue;
- case '\r':
- writer_->AddString("\\r");
- continue;
- case '\t':
- writer_->AddString("\\t");
- continue;
- case '\"':
- case '\\':
- writer_->AddCharacter('\\');
- writer_->AddCharacter(*s);
- continue;
- default:
- if (*s > 31 && *s < 128) {
- writer_->AddCharacter(*s);
- } else if (*s <= 31) {
- // Special character with no dedicated literal.
- WriteUChar(writer_, *s);
- } else {
- // Convert UTF-8 into \u UTF-16 literal.
- unsigned length = 1, cursor = 0;
- for ( ; length <= 4 && *(s + length) != '\0'; ++length) { }
- unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor);
- if (c != unibrow::Utf8::kBadChar) {
- WriteUChar(writer_, c);
- ASSERT(cursor != 0);
- s += cursor - 1;
- } else {
- writer_->AddCharacter('?');
- }
- }
- }
- }
- writer_->AddCharacter('\"');
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeStrings() {
- List<HashMap::Entry*> sorted_strings;
- SortHashMap(&strings_, &sorted_strings);
- writer_->AddString("\"<dummy>\"");
- for (int i = 0; i < sorted_strings.length(); ++i) {
- writer_->AddCharacter(',');
- SerializeString(
- reinterpret_cast<const unsigned char*>(sorted_strings[i]->key));
- if (writer_->aborted()) return;
- }
-}
-
-
-template<typename T>
-inline static int SortUsingEntryValue(const T* x, const T* y) {
- uintptr_t x_uint = reinterpret_cast<uintptr_t>((*x)->value);
- uintptr_t y_uint = reinterpret_cast<uintptr_t>((*y)->value);
- if (x_uint > y_uint) {
- return 1;
- } else if (x_uint == y_uint) {
- return 0;
- } else {
- return -1;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SortHashMap(
- HashMap* map, List<HashMap::Entry*>* sorted_entries) {
- for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p))
- sorted_entries->Add(p);
- sorted_entries->Sort(SortUsingEntryValue);
}
} } // namespace v8::internal
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 04f4a1c71..6e4758bec 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -35,55 +35,33 @@
namespace v8 {
namespace internal {
-class TokenEnumerator {
- public:
- TokenEnumerator();
- ~TokenEnumerator();
- int GetTokenId(Object* token);
-
- static const int kNoSecurityToken = -1;
- static const int kInheritsSecurityToken = -2;
-
- private:
- static void TokenRemovedCallback(v8::Persistent<v8::Value> handle,
- void* parameter);
- void TokenRemoved(Object** token_location);
-
- List<Object**> token_locations_;
- List<bool> token_removed_;
-
- friend class TokenEnumeratorTester;
-
- DISALLOW_COPY_AND_ASSIGN(TokenEnumerator);
-};
-
+struct OffsetRange;
// Provides a storage of strings allocated in C++ heap, to hold them
// forever, even if they disappear from JS heap or external storage.
class StringsStorage {
public:
- StringsStorage();
+ explicit StringsStorage(Heap* heap);
~StringsStorage();
const char* GetCopy(const char* src);
const char* GetFormatted(const char* format, ...);
const char* GetVFormatted(const char* format, va_list args);
- const char* GetName(String* name);
+ const char* GetName(Name* name);
const char* GetName(int index);
- inline const char* GetFunctionName(String* name);
- inline const char* GetFunctionName(const char* name);
+ const char* GetFunctionName(Name* name);
+ const char* GetFunctionName(const char* name);
size_t GetUsedMemorySize() const;
private:
static const int kMaxNameSize = 1024;
- INLINE(static bool StringsMatch(void* key1, void* key2)) {
- return strcmp(reinterpret_cast<char*>(key1),
- reinterpret_cast<char*>(key2)) == 0;
- }
- const char* AddOrDisposeString(char* str, uint32_t hash);
+ static bool StringsMatch(void* key1, void* key2);
+ const char* BeautifyFunctionName(const char* name);
+ const char* AddOrDisposeString(char* str, int len);
+ HashMap::Entry* GetEntry(const char* str, int len);
- // Mapping of strings by String::Hash to const char* strings.
+ uint32_t hash_seed_;
HashMap names_;
DISALLOW_COPY_AND_ASSIGN(StringsStorage);
@@ -93,39 +71,58 @@ class StringsStorage {
class CodeEntry {
public:
// CodeEntry doesn't own name strings, just references them.
- INLINE(CodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
+ inline CodeEntry(Logger::LogEventsAndTags tag,
const char* name,
- const char* resource_name,
- int line_number,
- int security_token_id));
-
- INLINE(bool is_js_function() const) { return is_js_function_tag(tag_); }
- INLINE(const char* name_prefix() const) { return name_prefix_; }
- INLINE(bool has_name_prefix() const) { return name_prefix_[0] != '\0'; }
- INLINE(const char* name() const) { return name_; }
- INLINE(const char* resource_name() const) { return resource_name_; }
- INLINE(int line_number() const) { return line_number_; }
- INLINE(int shared_id() const) { return shared_id_; }
- INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; }
- INLINE(int security_token_id() const) { return security_token_id_; }
-
- INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
-
- void CopyData(const CodeEntry& source);
+ const char* name_prefix = CodeEntry::kEmptyNamePrefix,
+ const char* resource_name = CodeEntry::kEmptyResourceName,
+ int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
+ int column_number = v8::CpuProfileNode::kNoColumnNumberInfo);
+ ~CodeEntry();
+
+ bool is_js_function() const { return is_js_function_tag(tag_); }
+ const char* name_prefix() const { return name_prefix_; }
+ bool has_name_prefix() const { return name_prefix_[0] != '\0'; }
+ const char* name() const { return name_; }
+ const char* resource_name() const { return resource_name_; }
+ int line_number() const { return line_number_; }
+ int column_number() const { return column_number_; }
+ void set_shared_id(int shared_id) { shared_id_ = shared_id; }
+ int script_id() const { return script_id_; }
+ void set_script_id(int script_id) { script_id_ = script_id; }
+ void set_bailout_reason(const char* bailout_reason) {
+ bailout_reason_ = bailout_reason;
+ }
+ const char* bailout_reason() const { return bailout_reason_; }
+
+ static inline bool is_js_function_tag(Logger::LogEventsAndTags tag);
+
+ List<OffsetRange>* no_frame_ranges() const { return no_frame_ranges_; }
+ void set_no_frame_ranges(List<OffsetRange>* ranges) {
+ no_frame_ranges_ = ranges;
+ }
+
+ void SetBuiltinId(Builtins::Name id);
+ Builtins::Name builtin_id() const { return builtin_id_; }
+
uint32_t GetCallUid() const;
bool IsSameAs(CodeEntry* entry) const;
static const char* const kEmptyNamePrefix;
+ static const char* const kEmptyResourceName;
+ static const char* const kEmptyBailoutReason;
private:
- Logger::LogEventsAndTags tag_;
+ Logger::LogEventsAndTags tag_ : 8;
+ Builtins::Name builtin_id_ : 8;
const char* name_prefix_;
const char* name_;
const char* resource_name_;
int line_number_;
+ int column_number_;
int shared_id_;
- int security_token_id_;
+ int script_id_;
+ List<OffsetRange>* no_frame_ranges_;
+ const char* bailout_reason_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
};
@@ -135,40 +132,37 @@ class ProfileTree;
class ProfileNode {
public:
- INLINE(ProfileNode(ProfileTree* tree, CodeEntry* entry));
+ inline ProfileNode(ProfileTree* tree, CodeEntry* entry);
ProfileNode* FindChild(CodeEntry* entry);
ProfileNode* FindOrAddChild(CodeEntry* entry);
- INLINE(void IncrementSelfTicks()) { ++self_ticks_; }
- INLINE(void IncreaseSelfTicks(unsigned amount)) { self_ticks_ += amount; }
- INLINE(void IncreaseTotalTicks(unsigned amount)) { total_ticks_ += amount; }
+ void IncrementSelfTicks() { ++self_ticks_; }
+ void IncreaseSelfTicks(unsigned amount) { self_ticks_ += amount; }
- INLINE(CodeEntry* entry() const) { return entry_; }
- INLINE(unsigned self_ticks() const) { return self_ticks_; }
- INLINE(unsigned total_ticks() const) { return total_ticks_; }
- INLINE(const List<ProfileNode*>* children() const) { return &children_list_; }
- double GetSelfMillis() const;
- double GetTotalMillis() const;
+ CodeEntry* entry() const { return entry_; }
+ unsigned self_ticks() const { return self_ticks_; }
+ const List<ProfileNode*>* children() const { return &children_list_; }
+ unsigned id() const { return id_; }
void Print(int indent);
private:
- INLINE(static bool CodeEntriesMatch(void* entry1, void* entry2)) {
+ static bool CodeEntriesMatch(void* entry1, void* entry2) {
return reinterpret_cast<CodeEntry*>(entry1)->IsSameAs(
reinterpret_cast<CodeEntry*>(entry2));
}
- INLINE(static uint32_t CodeEntryHash(CodeEntry* entry)) {
+ static uint32_t CodeEntryHash(CodeEntry* entry) {
return entry->GetCallUid();
}
ProfileTree* tree_;
CodeEntry* entry_;
- unsigned total_ticks_;
unsigned self_ticks_;
// Mapping from CodeEntry* to ProfileNode*
HashMap children_;
List<ProfileNode*> children_list_;
+ unsigned id_;
DISALLOW_COPY_AND_ASSIGN(ProfileNode);
};
@@ -179,18 +173,11 @@ class ProfileTree {
ProfileTree();
~ProfileTree();
- void AddPathFromEnd(const Vector<CodeEntry*>& path);
+ ProfileNode* AddPathFromEnd(const Vector<CodeEntry*>& path);
void AddPathFromStart(const Vector<CodeEntry*>& path);
- void CalculateTotalTicks();
- void FilteredClone(ProfileTree* src, int security_token_id);
-
- double TicksToMillis(unsigned ticks) const {
- return ticks * ms_to_ticks_scale_;
- }
ProfileNode* root() const { return root_; }
- void SetTickRatePerMs(double ticks_per_ms);
+ unsigned next_node_id() { return next_node_id_++; }
- void ShortPrint();
void Print() {
root_->Print(0);
}
@@ -200,8 +187,8 @@ class ProfileTree {
void TraverseDepthFirst(Callback* callback);
CodeEntry root_entry_;
+ unsigned next_node_id_;
ProfileNode* root_;
- double ms_to_ticks_scale_;
DISALLOW_COPY_AND_ASSIGN(ProfileTree);
};
@@ -209,30 +196,35 @@ class ProfileTree {
class CpuProfile {
public:
- CpuProfile(const char* title, unsigned uid)
- : title_(title), uid_(uid) { }
+ CpuProfile(const char* title, unsigned uid, bool record_samples);
// Add pc -> ... -> main() call path to the profile.
void AddPath(const Vector<CodeEntry*>& path);
- void CalculateTotalTicks();
- void SetActualSamplingRate(double actual_sampling_rate);
- CpuProfile* FilteredClone(int security_token_id);
+ void CalculateTotalTicksAndSamplingRate();
+
+ const char* title() const { return title_; }
+ unsigned uid() const { return uid_; }
+ const ProfileTree* top_down() const { return &top_down_; }
- INLINE(const char* title() const) { return title_; }
- INLINE(unsigned uid() const) { return uid_; }
- INLINE(const ProfileTree* top_down() const) { return &top_down_; }
- INLINE(const ProfileTree* bottom_up() const) { return &bottom_up_; }
+ int samples_count() const { return samples_.length(); }
+ ProfileNode* sample(int index) const { return samples_.at(index); }
+
+ Time start_time() const { return start_time_; }
+ Time end_time() const { return end_time_; }
void UpdateTicksScale();
- void ShortPrint();
void Print();
private:
const char* title_;
unsigned uid_;
+ bool record_samples_;
+ Time start_time_;
+ Time end_time_;
+ ElapsedTimer timer_;
+ List<ProfileNode*> samples_;
ProfileTree top_down_;
- ProfileTree bottom_up_;
DISALLOW_COPY_AND_ASSIGN(CpuProfile);
};
@@ -243,7 +235,7 @@ class CodeMap {
CodeMap() : next_shared_id_(1) { }
void AddCode(Address addr, CodeEntry* entry, unsigned size);
void MoveCode(Address from, Address to);
- CodeEntry* FindEntry(Address addr);
+ CodeEntry* FindEntry(Address addr, Address* start = NULL);
int GetSharedId(Address addr);
void Print();
@@ -286,33 +278,34 @@ class CodeMap {
class CpuProfilesCollection {
public:
- CpuProfilesCollection();
+ explicit CpuProfilesCollection(Heap* heap);
~CpuProfilesCollection();
- bool StartProfiling(const char* title, unsigned uid);
- bool StartProfiling(String* title, unsigned uid);
- CpuProfile* StopProfiling(int security_token_id,
- const char* title,
- double actual_sampling_rate);
- List<CpuProfile*>* Profiles(int security_token_id);
- const char* GetName(String* name) {
+ bool StartProfiling(const char* title, unsigned uid, bool record_samples);
+ CpuProfile* StopProfiling(const char* title);
+ List<CpuProfile*>* profiles() { return &finished_profiles_; }
+ const char* GetName(Name* name) {
return function_and_resource_names_.GetName(name);
}
const char* GetName(int args_count) {
return function_and_resource_names_.GetName(args_count);
}
- CpuProfile* GetProfile(int security_token_id, unsigned uid);
+ const char* GetFunctionName(Name* name) {
+ return function_and_resource_names_.GetFunctionName(name);
+ }
+ const char* GetFunctionName(const char* name) {
+ return function_and_resource_names_.GetFunctionName(name);
+ }
bool IsLastProfile(const char* title);
void RemoveProfile(CpuProfile* profile);
- bool HasDetachedProfiles() { return detached_profiles_.length() > 0; }
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- String* name, String* resource_name, int line_number);
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name);
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix, String* name);
- CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, int args_count);
- CodeEntry* NewCodeEntry(int security_token_id);
+ CodeEntry* NewCodeEntry(
+ Logger::LogEventsAndTags tag,
+ const char* name,
+ const char* name_prefix = CodeEntry::kEmptyNamePrefix,
+ const char* resource_name = CodeEntry::kEmptyResourceName,
+ int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
+ int column_number = v8::CpuProfileNode::kNoColumnNumberInfo);
// Called from profile generator thread.
void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
@@ -321,780 +314,48 @@ class CpuProfilesCollection {
static const int kMaxSimultaneousProfiles = 100;
private:
- const char* GetFunctionName(String* name) {
- return function_and_resource_names_.GetFunctionName(name);
- }
- const char* GetFunctionName(const char* name) {
- return function_and_resource_names_.GetFunctionName(name);
- }
- int GetProfileIndex(unsigned uid);
- List<CpuProfile*>* GetProfilesList(int security_token_id);
- int TokenToIndex(int security_token_id);
-
- INLINE(static bool UidsMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
-
StringsStorage function_and_resource_names_;
List<CodeEntry*> code_entries_;
- List<List<CpuProfile*>* > profiles_by_token_;
- // Mapping from profiles' uids to indexes in the second nested list
- // of profiles_by_token_.
- HashMap profiles_uids_;
- List<CpuProfile*> detached_profiles_;
+ List<CpuProfile*> finished_profiles_;
// Accessed by VM thread and profile generator thread.
List<CpuProfile*> current_profiles_;
- Semaphore* current_profiles_semaphore_;
+ Semaphore current_profiles_semaphore_;
DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
};
-class SampleRateCalculator {
- public:
- SampleRateCalculator()
- : result_(Logger::kSamplingIntervalMs * kResultScale),
- ticks_per_ms_(Logger::kSamplingIntervalMs),
- measurements_count_(0),
- wall_time_query_countdown_(1) {
- }
-
- double ticks_per_ms() {
- return result_ / static_cast<double>(kResultScale);
- }
- void Tick();
- void UpdateMeasurements(double current_time);
-
- // Instead of querying current wall time each tick,
- // we use this constant to control query intervals.
- static const unsigned kWallTimeQueryIntervalMs = 100;
-
- private:
- // As the result needs to be accessed from a different thread, we
- // use type that guarantees atomic writes to memory. There should
- // be <= 1000 ticks per second, thus storing a value of a 10 ** 5
- // order should provide enough precision while keeping away from a
- // potential overflow.
- static const int kResultScale = 100000;
-
- AtomicWord result_;
- // All other fields are accessed only from the sampler thread.
- double ticks_per_ms_;
- unsigned measurements_count_;
- unsigned wall_time_query_countdown_;
- double last_wall_time_;
-
- DISALLOW_COPY_AND_ASSIGN(SampleRateCalculator);
-};
-
-
class ProfileGenerator {
public:
explicit ProfileGenerator(CpuProfilesCollection* profiles);
- INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- String* name,
- String* resource_name,
- int line_number)) {
- return profiles_->NewCodeEntry(tag, name, resource_name, line_number);
- }
-
- INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name)) {
- return profiles_->NewCodeEntry(tag, name);
- }
-
- INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- const char* name_prefix,
- String* name)) {
- return profiles_->NewCodeEntry(tag, name_prefix, name);
- }
-
- INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
- int args_count)) {
- return profiles_->NewCodeEntry(tag, args_count);
- }
-
- INLINE(CodeEntry* NewCodeEntry(int security_token_id)) {
- return profiles_->NewCodeEntry(security_token_id);
- }
-
void RecordTickSample(const TickSample& sample);
- INLINE(CodeMap* code_map()) { return &code_map_; }
-
- INLINE(void Tick()) { sample_rate_calc_.Tick(); }
- INLINE(double actual_sampling_rate()) {
- return sample_rate_calc_.ticks_per_ms();
- }
+ CodeMap* code_map() { return &code_map_; }
static const char* const kAnonymousFunctionName;
static const char* const kProgramEntryName;
+ static const char* const kIdleEntryName;
static const char* const kGarbageCollectorEntryName;
+ // Used to represent frames for which we have no reliable way to
+ // detect function.
+ static const char* const kUnresolvedFunctionName;
private:
- INLINE(CodeEntry* EntryForVMState(StateTag tag));
+ CodeEntry* EntryForVMState(StateTag tag);
CpuProfilesCollection* profiles_;
CodeMap code_map_;
CodeEntry* program_entry_;
+ CodeEntry* idle_entry_;
CodeEntry* gc_entry_;
- SampleRateCalculator sample_rate_calc_;
+ CodeEntry* unresolved_entry_;
DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
};
-class HeapEntry;
-class HeapSnapshot;
-
-class HeapGraphEdge BASE_EMBEDDED {
- public:
- enum Type {
- kContextVariable = v8::HeapGraphEdge::kContextVariable,
- kElement = v8::HeapGraphEdge::kElement,
- kProperty = v8::HeapGraphEdge::kProperty,
- kInternal = v8::HeapGraphEdge::kInternal,
- kHidden = v8::HeapGraphEdge::kHidden,
- kShortcut = v8::HeapGraphEdge::kShortcut,
- kWeak = v8::HeapGraphEdge::kWeak
- };
-
- HeapGraphEdge() { }
- HeapGraphEdge(Type type, const char* name, int from, int to);
- HeapGraphEdge(Type type, int index, int from, int to);
- void ReplaceToIndexWithEntry(HeapSnapshot* snapshot);
-
- Type type() const { return static_cast<Type>(type_); }
- int index() const {
- ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
- return index_;
- }
- const char* name() const {
- ASSERT(type_ == kContextVariable
- || type_ == kProperty
- || type_ == kInternal
- || type_ == kShortcut);
- return name_;
- }
- INLINE(HeapEntry* from() const);
- HeapEntry* to() const { return to_entry_; }
-
- private:
- INLINE(HeapSnapshot* snapshot() const);
-
- unsigned type_ : 3;
- int from_index_ : 29;
- union {
- // During entries population |to_index_| is used for storing the index,
- // afterwards it is replaced with a pointer to the entry.
- int to_index_;
- HeapEntry* to_entry_;
- };
- union {
- int index_;
- const char* name_;
- };
-};
-
-
-// HeapEntry instances represent an entity from the heap (or a special
-// virtual node, e.g. root).
-class HeapEntry BASE_EMBEDDED {
- public:
- enum Type {
- kHidden = v8::HeapGraphNode::kHidden,
- kArray = v8::HeapGraphNode::kArray,
- kString = v8::HeapGraphNode::kString,
- kObject = v8::HeapGraphNode::kObject,
- kCode = v8::HeapGraphNode::kCode,
- kClosure = v8::HeapGraphNode::kClosure,
- kRegExp = v8::HeapGraphNode::kRegExp,
- kHeapNumber = v8::HeapGraphNode::kHeapNumber,
- kNative = v8::HeapGraphNode::kNative,
- kSynthetic = v8::HeapGraphNode::kSynthetic
- };
- static const int kNoEntry;
-
- HeapEntry() { }
- HeapEntry(HeapSnapshot* snapshot,
- Type type,
- const char* name,
- SnapshotObjectId id,
- int self_size);
-
- HeapSnapshot* snapshot() { return snapshot_; }
- Type type() { return static_cast<Type>(type_); }
- const char* name() { return name_; }
- void set_name(const char* name) { name_ = name; }
- inline SnapshotObjectId id() { return id_; }
- int self_size() { return self_size_; }
- INLINE(int index() const);
- int children_count() const { return children_count_; }
- INLINE(int set_children_index(int index));
- void add_child(HeapGraphEdge* edge) {
- children_arr()[children_count_++] = edge;
- }
- Vector<HeapGraphEdge*> children() {
- return Vector<HeapGraphEdge*>(children_arr(), children_count_); }
-
- void SetIndexedReference(
- HeapGraphEdge::Type type, int index, HeapEntry* entry);
- void SetNamedReference(
- HeapGraphEdge::Type type, const char* name, HeapEntry* entry);
-
- void Print(
- const char* prefix, const char* edge_name, int max_depth, int indent);
-
- Handle<HeapObject> GetHeapObject();
-
- private:
- INLINE(HeapGraphEdge** children_arr());
- const char* TypeAsString();
-
- unsigned type_: 4;
- int children_count_: 28;
- int children_index_;
- int self_size_;
- SnapshotObjectId id_;
- HeapSnapshot* snapshot_;
- const char* name_;
-};
-
-
-class HeapSnapshotsCollection;
-
-// HeapSnapshot represents a single heap snapshot. It is stored in
-// HeapSnapshotsCollection, which is also a factory for
-// HeapSnapshots. All HeapSnapshots share strings copied from JS heap
-// to be able to return them even if they were collected.
-// HeapSnapshotGenerator fills in a HeapSnapshot.
-class HeapSnapshot {
- public:
- enum Type {
- kFull = v8::HeapSnapshot::kFull
- };
-
- HeapSnapshot(HeapSnapshotsCollection* collection,
- Type type,
- const char* title,
- unsigned uid);
- void Delete();
-
- HeapSnapshotsCollection* collection() { return collection_; }
- Type type() { return type_; }
- const char* title() { return title_; }
- unsigned uid() { return uid_; }
- size_t RawSnapshotSize() const;
- HeapEntry* root() { return &entries_[root_index_]; }
- HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
- HeapEntry* natives_root() { return &entries_[natives_root_index_]; }
- HeapEntry* gc_subroot(int index) {
- return &entries_[gc_subroot_indexes_[index]];
- }
- List<HeapEntry>& entries() { return entries_; }
- List<HeapGraphEdge>& edges() { return edges_; }
- List<HeapGraphEdge*>& children() { return children_; }
- void RememberLastJSObjectId();
- SnapshotObjectId max_snapshot_js_object_id() const {
- return max_snapshot_js_object_id_;
- }
-
- HeapEntry* AddEntry(HeapEntry::Type type,
- const char* name,
- SnapshotObjectId id,
- int size);
- HeapEntry* AddRootEntry();
- HeapEntry* AddGcRootsEntry();
- HeapEntry* AddGcSubrootEntry(int tag);
- HeapEntry* AddNativesRootEntry();
- HeapEntry* GetEntryById(SnapshotObjectId id);
- List<HeapEntry*>* GetSortedEntriesList();
- void FillChildren();
-
- void Print(int max_depth);
- void PrintEntriesSize();
-
- private:
- HeapSnapshotsCollection* collection_;
- Type type_;
- const char* title_;
- unsigned uid_;
- int root_index_;
- int gc_roots_index_;
- int natives_root_index_;
- int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
- List<HeapEntry> entries_;
- List<HeapGraphEdge> edges_;
- List<HeapGraphEdge*> children_;
- List<HeapEntry*> sorted_entries_;
- SnapshotObjectId max_snapshot_js_object_id_;
-
- friend class HeapSnapshotTester;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
-};
-
-
-class HeapObjectsMap {
- public:
- HeapObjectsMap();
-
- void SnapshotGenerationFinished();
- SnapshotObjectId FindEntry(Address addr);
- SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size);
- void MoveObject(Address from, Address to);
- SnapshotObjectId last_assigned_id() const {
- return next_id_ - kObjectIdStep;
- }
-
- void StopHeapObjectsTracking();
- SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
- size_t GetUsedMemorySize() const;
-
- static SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
- static inline SnapshotObjectId GetNthGcSubrootId(int delta);
-
- static const int kObjectIdStep = 2;
- static const SnapshotObjectId kInternalRootObjectId;
- static const SnapshotObjectId kGcRootsObjectId;
- static const SnapshotObjectId kNativesRootObjectId;
- static const SnapshotObjectId kGcRootsFirstSubrootId;
- static const SnapshotObjectId kFirstAvailableObjectId;
-
- private:
- struct EntryInfo {
- EntryInfo(SnapshotObjectId id, Address addr, unsigned int size)
- : id(id), addr(addr), size(size), accessed(true) { }
- EntryInfo(SnapshotObjectId id, Address addr, unsigned int size, bool accessed)
- : id(id), addr(addr), size(size), accessed(accessed) { }
- SnapshotObjectId id;
- Address addr;
- unsigned int size;
- bool accessed;
- };
- struct TimeInterval {
- explicit TimeInterval(SnapshotObjectId id) : id(id), size(0), count(0) { }
- SnapshotObjectId id;
- uint32_t size;
- uint32_t count;
- };
-
- void UpdateHeapObjectsMap();
- void RemoveDeadEntries();
-
- static bool AddressesMatch(void* key1, void* key2) {
- return key1 == key2;
- }
-
- static uint32_t AddressHash(Address addr) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)),
- v8::internal::kZeroHashSeed);
- }
-
- SnapshotObjectId next_id_;
- HashMap entries_map_;
- List<EntryInfo> entries_;
- List<TimeInterval> time_intervals_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap);
-};
-
-
-class HeapSnapshotsCollection {
- public:
- HeapSnapshotsCollection();
- ~HeapSnapshotsCollection();
-
- bool is_tracking_objects() { return is_tracking_objects_; }
- SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) {
- return ids_.PushHeapObjectsStats(stream);
- }
- void StartHeapObjectsTracking() { is_tracking_objects_ = true; }
- void StopHeapObjectsTracking() { ids_.StopHeapObjectsTracking(); }
-
- HeapSnapshot* NewSnapshot(
- HeapSnapshot::Type type, const char* name, unsigned uid);
- void SnapshotGenerationFinished(HeapSnapshot* snapshot);
- List<HeapSnapshot*>* snapshots() { return &snapshots_; }
- HeapSnapshot* GetSnapshot(unsigned uid);
- void RemoveSnapshot(HeapSnapshot* snapshot);
-
- StringsStorage* names() { return &names_; }
- TokenEnumerator* token_enumerator() { return token_enumerator_; }
-
- SnapshotObjectId FindObjectId(Address object_addr) {
- return ids_.FindEntry(object_addr);
- }
- SnapshotObjectId GetObjectId(Address object_addr, int object_size) {
- return ids_.FindOrAddEntry(object_addr, object_size);
- }
- Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
- void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
- SnapshotObjectId last_assigned_id() const {
- return ids_.last_assigned_id();
- }
- size_t GetUsedMemorySize() const;
-
- private:
- INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
-
- bool is_tracking_objects_; // Whether tracking object moves is needed.
- List<HeapSnapshot*> snapshots_;
- // Mapping from snapshots' uids to HeapSnapshot* pointers.
- HashMap snapshots_uids_;
- StringsStorage names_;
- TokenEnumerator* token_enumerator_;
- // Mapping from HeapObject addresses to objects' uids.
- HeapObjectsMap ids_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
-};
-
-
-// A typedef for referencing anything that can be snapshotted living
-// in any kind of heap memory.
-typedef void* HeapThing;
-
-
-// An interface that creates HeapEntries by HeapThings.
-class HeapEntriesAllocator {
- public:
- virtual ~HeapEntriesAllocator() { }
- virtual HeapEntry* AllocateEntry(HeapThing ptr) = 0;
-};
-
-
-// The HeapEntriesMap instance is used to track a mapping between
-// real heap objects and their representations in heap snapshots.
-class HeapEntriesMap {
- public:
- HeapEntriesMap();
-
- int Map(HeapThing thing);
- void Pair(HeapThing thing, int entry);
-
- private:
- static uint32_t Hash(HeapThing thing) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
- v8::internal::kZeroHashSeed);
- }
- static bool HeapThingsMatch(HeapThing key1, HeapThing key2) {
- return key1 == key2;
- }
-
- HashMap entries_;
-
- friend class HeapObjectsSet;
-
- DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
-};
-
-
-class HeapObjectsSet {
- public:
- HeapObjectsSet();
- void Clear();
- bool Contains(Object* object);
- void Insert(Object* obj);
- const char* GetTag(Object* obj);
- void SetTag(Object* obj, const char* tag);
- bool is_empty() const { return entries_.occupancy() == 0; }
-
- private:
- HashMap entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapObjectsSet);
-};
-
-
-// An interface used to populate a snapshot with nodes and edges.
-class SnapshotFillerInterface {
- public:
- virtual ~SnapshotFillerInterface() { }
- virtual HeapEntry* AddEntry(HeapThing ptr,
- HeapEntriesAllocator* allocator) = 0;
- virtual HeapEntry* FindEntry(HeapThing ptr) = 0;
- virtual HeapEntry* FindOrAddEntry(HeapThing ptr,
- HeapEntriesAllocator* allocator) = 0;
- virtual void SetIndexedReference(HeapGraphEdge::Type type,
- int parent_entry,
- int index,
- HeapEntry* child_entry) = 0;
- virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent_entry,
- HeapEntry* child_entry) = 0;
- virtual void SetNamedReference(HeapGraphEdge::Type type,
- int parent_entry,
- const char* reference_name,
- HeapEntry* child_entry) = 0;
- virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent_entry,
- HeapEntry* child_entry) = 0;
-};
-
-
-class SnapshottingProgressReportingInterface {
- public:
- virtual ~SnapshottingProgressReportingInterface() { }
- virtual void ProgressStep() = 0;
- virtual bool ProgressReport(bool force) = 0;
-};
-
-
-// An implementation of V8 heap graph extractor.
-class V8HeapExplorer : public HeapEntriesAllocator {
- public:
- V8HeapExplorer(HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress);
- virtual ~V8HeapExplorer();
- virtual HeapEntry* AllocateEntry(HeapThing ptr);
- void AddRootEntries(SnapshotFillerInterface* filler);
- int EstimateObjectsCount(HeapIterator* iterator);
- bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
- void TagGlobalObjects();
-
- static String* GetConstructorName(JSObject* object);
-
- static HeapObject* const kInternalRootObject;
-
- private:
- HeapEntry* AddEntry(HeapObject* object);
- HeapEntry* AddEntry(HeapObject* object,
- HeapEntry::Type type,
- const char* name);
- const char* GetSystemEntryName(HeapObject* object);
-
- void ExtractReferences(HeapObject* obj);
- void ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy);
- void ExtractJSObjectReferences(int entry, JSObject* js_obj);
- void ExtractStringReferences(int entry, String* obj);
- void ExtractContextReferences(int entry, Context* context);
- void ExtractMapReferences(int entry, Map* map);
- void ExtractSharedFunctionInfoReferences(int entry,
- SharedFunctionInfo* shared);
- void ExtractScriptReferences(int entry, Script* script);
- void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
- void ExtractCodeReferences(int entry, Code* code);
- void ExtractJSGlobalPropertyCellReferences(int entry,
- JSGlobalPropertyCell* cell);
- void ExtractClosureReferences(JSObject* js_obj, int entry);
- void ExtractPropertyReferences(JSObject* js_obj, int entry);
- void ExtractElementReferences(JSObject* js_obj, int entry);
- void ExtractInternalReferences(JSObject* js_obj, int entry);
- bool IsEssentialObject(Object* object);
- void SetClosureReference(HeapObject* parent_obj,
- int parent,
- String* reference_name,
- Object* child);
- void SetNativeBindReference(HeapObject* parent_obj,
- int parent,
- const char* reference_name,
- Object* child);
- void SetElementReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child);
- void SetInternalReference(HeapObject* parent_obj,
- int parent,
- const char* reference_name,
- Object* child,
- int field_offset = -1);
- void SetInternalReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child,
- int field_offset = -1);
- void SetHiddenReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child);
- void SetWeakReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child_obj,
- int field_offset);
- void SetPropertyReference(HeapObject* parent_obj,
- int parent,
- String* reference_name,
- Object* child,
- const char* name_format_string = NULL,
- int field_offset = -1);
- void SetUserGlobalReference(Object* user_global);
- void SetRootGcRootsReference();
- void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
- void SetGcSubrootReference(
- VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
- const char* GetStrongGcSubrootName(Object* object);
- void TagObject(Object* obj, const char* tag);
-
- HeapEntry* GetEntry(Object* obj);
-
- static inline HeapObject* GetNthGcSubrootObject(int delta);
- static inline int GetGcSubrootOrder(HeapObject* subroot);
-
- Heap* heap_;
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- SnapshottingProgressReportingInterface* progress_;
- SnapshotFillerInterface* filler_;
- HeapObjectsSet objects_tags_;
- HeapObjectsSet strong_gc_subroot_names_;
-
- static HeapObject* const kGcRootsObject;
- static HeapObject* const kFirstGcSubrootObject;
- static HeapObject* const kLastGcSubrootObject;
-
- friend class IndexedReferencesExtractor;
- friend class GcSubrootsEnumerator;
- friend class RootsReferencesExtractor;
-
- DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
-};
-
-
-class NativeGroupRetainedObjectInfo;
-
-
-// An implementation of retained native objects extractor.
-class NativeObjectsExplorer {
- public:
- NativeObjectsExplorer(HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress);
- virtual ~NativeObjectsExplorer();
- void AddRootEntries(SnapshotFillerInterface* filler);
- int EstimateObjectsCount();
- bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
-
- private:
- void FillRetainedObjects();
- void FillImplicitReferences();
- List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info);
- void SetNativeRootReference(v8::RetainedObjectInfo* info);
- void SetRootNativeRootsReference();
- void SetWrapperNativeReferences(HeapObject* wrapper,
- v8::RetainedObjectInfo* info);
- void VisitSubtreeWrapper(Object** p, uint16_t class_id);
-
- static uint32_t InfoHash(v8::RetainedObjectInfo* info) {
- return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()),
- v8::internal::kZeroHashSeed);
- }
- static bool RetainedInfosMatch(void* key1, void* key2) {
- return key1 == key2 ||
- (reinterpret_cast<v8::RetainedObjectInfo*>(key1))->IsEquivalent(
- reinterpret_cast<v8::RetainedObjectInfo*>(key2));
- }
- INLINE(static bool StringsMatch(void* key1, void* key2)) {
- return strcmp(reinterpret_cast<char*>(key1),
- reinterpret_cast<char*>(key2)) == 0;
- }
-
- NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label);
-
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- SnapshottingProgressReportingInterface* progress_;
- bool embedder_queried_;
- HeapObjectsSet in_groups_;
- // RetainedObjectInfo* -> List<HeapObject*>*
- HashMap objects_by_info_;
- HashMap native_groups_;
- HeapEntriesAllocator* synthetic_entries_allocator_;
- HeapEntriesAllocator* native_entries_allocator_;
- // Used during references extraction.
- SnapshotFillerInterface* filler_;
-
- static HeapThing const kNativesRootObject;
-
- friend class GlobalHandlesExtractor;
-
- DISALLOW_COPY_AND_ASSIGN(NativeObjectsExplorer);
-};
-
-
-class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
- public:
- HeapSnapshotGenerator(HeapSnapshot* snapshot,
- v8::ActivityControl* control);
- bool GenerateSnapshot();
-
- private:
- bool FillReferences();
- void ProgressStep();
- bool ProgressReport(bool force = false);
- void SetProgressTotal(int iterations_count);
-
- HeapSnapshot* snapshot_;
- v8::ActivityControl* control_;
- V8HeapExplorer v8_heap_explorer_;
- NativeObjectsExplorer dom_explorer_;
- // Mapping from HeapThing pointers to HeapEntry* pointers.
- HeapEntriesMap entries_;
- // Used during snapshot generation.
- int progress_counter_;
- int progress_total_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
-};
-
-class OutputStreamWriter;
-
-class HeapSnapshotJSONSerializer {
- public:
- explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot)
- : snapshot_(snapshot),
- strings_(ObjectsMatch),
- next_node_id_(1),
- next_string_id_(1),
- writer_(NULL) {
- }
- void Serialize(v8::OutputStream* stream);
-
- private:
- INLINE(static bool ObjectsMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
-
- INLINE(static uint32_t ObjectHash(const void* key)) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)),
- v8::internal::kZeroHashSeed);
- }
-
- HeapSnapshot* CreateFakeSnapshot();
- int GetStringId(const char* s);
- int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; }
- void SerializeEdge(HeapGraphEdge* edge, bool first_edge);
- void SerializeEdges();
- void SerializeImpl();
- void SerializeNode(HeapEntry* entry);
- void SerializeNodes();
- void SerializeSnapshot();
- void SerializeString(const unsigned char* s);
- void SerializeStrings();
- void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
-
- static const int kEdgeFieldsCount;
- static const int kNodeFieldsCount;
-
- HeapSnapshot* snapshot_;
- HashMap strings_;
- int next_node_id_;
- int next_string_id_;
- OutputStreamWriter* writer_;
-
- friend class HeapSnapshotJSONSerializerEnumerator;
- friend class HeapSnapshotJSONSerializerIterator;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotJSONSerializer);
-};
-
} } // namespace v8::internal
#endif // V8_PROFILE_GENERATOR_H_
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 64e320514..659fbd1da 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -38,6 +38,12 @@ enum PropertyAttributes {
READ_ONLY = v8::ReadOnly,
DONT_ENUM = v8::DontEnum,
DONT_DELETE = v8::DontDelete,
+
+ SEALED = DONT_DELETE,
+ FROZEN = SEALED | READ_ONLY,
+
+ SYMBOLIC = 8, // Used to filter symbol names
+ DONT_SHOW = DONT_ENUM | SYMBOLIC,
ABSENT = 16 // Used in runtime to indicate a property is absent.
// ABSENT can never be stored in or returned from a descriptor's attributes
// bitfield. It is only used as a return value meaning the attributes of
@@ -49,6 +55,8 @@ namespace v8 {
namespace internal {
class Smi;
+class Type;
+class TypeInfo;
// Type of properties.
// Order of properties is significant.
@@ -59,7 +67,7 @@ enum PropertyType {
NORMAL = 0,
// Only in fast mode.
FIELD = 1,
- CONSTANT_FUNCTION = 2,
+ CONSTANT = 2,
CALLBACKS = 3,
// Only in lookup results, not in descriptors.
HANDLER = 4,
@@ -70,30 +78,143 @@ enum PropertyType {
};
+class Representation {
+ public:
+ enum Kind {
+ kNone,
+ kByte,
+ kSmi,
+ kInteger32,
+ kDouble,
+ kHeapObject,
+ kTagged,
+ kExternal,
+ kNumRepresentations
+ };
+
+ Representation() : kind_(kNone) { }
+
+ static Representation None() { return Representation(kNone); }
+ static Representation Tagged() { return Representation(kTagged); }
+ static Representation Byte() { return Representation(kByte); }
+ static Representation Smi() { return Representation(kSmi); }
+ static Representation Integer32() { return Representation(kInteger32); }
+ static Representation Double() { return Representation(kDouble); }
+ static Representation HeapObject() { return Representation(kHeapObject); }
+ static Representation External() { return Representation(kExternal); }
+
+ static Representation FromKind(Kind kind) { return Representation(kind); }
+
+ // TODO(rossberg): this should die eventually.
+ static Representation FromType(TypeInfo info);
+ static Representation FromType(Handle<Type> type);
+
+ bool Equals(const Representation& other) const {
+ return kind_ == other.kind_;
+ }
+
+ bool IsCompatibleForLoad(const Representation& other) const {
+ return (IsDouble() && other.IsDouble()) ||
+ (!IsDouble() && !other.IsDouble());
+ }
+
+ bool IsCompatibleForStore(const Representation& other) const {
+ return Equals(other);
+ }
+
+ bool is_more_general_than(const Representation& other) const {
+ ASSERT(kind_ != kExternal);
+ ASSERT(other.kind_ != kExternal);
+ if (IsHeapObject()) return other.IsDouble() || other.IsNone();
+ return kind_ > other.kind_;
+ }
+
+ bool fits_into(const Representation& other) const {
+ return other.is_more_general_than(*this) || other.Equals(*this);
+ }
+
+ Representation generalize(Representation other) {
+ if (other.fits_into(*this)) return *this;
+ if (other.is_more_general_than(*this)) return other;
+ return Representation::Tagged();
+ }
+
+ Kind kind() const { return static_cast<Kind>(kind_); }
+ bool IsNone() const { return kind_ == kNone; }
+ bool IsByte() const { return kind_ == kByte; }
+ bool IsTagged() const { return kind_ == kTagged; }
+ bool IsSmi() const { return kind_ == kSmi; }
+ bool IsSmiOrTagged() const { return IsSmi() || IsTagged(); }
+ bool IsInteger32() const { return kind_ == kInteger32; }
+ bool IsSmiOrInteger32() const { return IsSmi() || IsInteger32(); }
+ bool IsDouble() const { return kind_ == kDouble; }
+ bool IsHeapObject() const { return kind_ == kHeapObject; }
+ bool IsExternal() const { return kind_ == kExternal; }
+ bool IsSpecialization() const {
+ return IsByte() || IsSmi() || IsInteger32() || IsDouble();
+ }
+ const char* Mnemonic() const;
+
+ private:
+ explicit Representation(Kind k) : kind_(k) { }
+
+ // Make sure kind fits in int8.
+ STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
+
+ int8_t kind_;
+};
+
+
// PropertyDetails captures type and attributes for a property.
// They are used both in property dictionaries and instance descriptors.
class PropertyDetails BASE_EMBEDDED {
public:
PropertyDetails(PropertyAttributes attributes,
PropertyType type,
- int index = 0) {
+ int index) {
value_ = TypeField::encode(type)
| AttributesField::encode(attributes)
| DictionaryStorageField::encode(index);
ASSERT(type == this->type());
ASSERT(attributes == this->attributes());
- ASSERT(index == this->dictionary_index());
+ }
+
+ PropertyDetails(PropertyAttributes attributes,
+ PropertyType type,
+ Representation representation,
+ int field_index = 0) {
+ value_ = TypeField::encode(type)
+ | AttributesField::encode(attributes)
+ | RepresentationField::encode(EncodeRepresentation(representation))
+ | FieldIndexField::encode(field_index);
}
int pointer() { return DescriptorPointer::decode(value_); }
PropertyDetails set_pointer(int i) { return PropertyDetails(value_, i); }
+ PropertyDetails CopyWithRepresentation(Representation representation) {
+ return PropertyDetails(value_, representation);
+ }
+ PropertyDetails CopyAddAttributes(PropertyAttributes new_attributes) {
+ new_attributes =
+ static_cast<PropertyAttributes>(attributes() | new_attributes);
+ return PropertyDetails(value_, new_attributes);
+ }
+
// Conversion for storing details as Object*.
explicit inline PropertyDetails(Smi* smi);
inline Smi* AsSmi();
+ static uint8_t EncodeRepresentation(Representation representation) {
+ return representation.kind();
+ }
+
+ static Representation DecodeRepresentation(uint32_t bits) {
+ return Representation::FromKind(static_cast<Representation::Kind>(bits));
+ }
+
PropertyType type() { return TypeField::decode(value_); }
PropertyAttributes attributes() const {
@@ -104,8 +225,13 @@ class PropertyDetails BASE_EMBEDDED {
return DictionaryStorageField::decode(value_);
}
- int descriptor_index() {
- return DescriptorStorageField::decode(value_);
+ Representation representation() {
+ ASSERT(type() != NORMAL);
+ return DecodeRepresentation(RepresentationField::decode(value_));
+ }
+
+ int field_index() {
+ return FieldIndexField::decode(value_);
}
inline PropertyDetails AsDeleted();
@@ -123,16 +249,28 @@ class PropertyDetails BASE_EMBEDDED {
// constants can be embedded in generated code.
class TypeField: public BitField<PropertyType, 0, 3> {};
class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
+
+ // Bit fields for normalized objects.
class DeletedField: public BitField<uint32_t, 6, 1> {};
class DictionaryStorageField: public BitField<uint32_t, 7, 24> {};
- class DescriptorStorageField: public BitField<uint32_t, 7, 11> {};
- class DescriptorPointer: public BitField<uint32_t, 18, 11> {};
+
+ // Bit fields for fast objects.
+ class DescriptorPointer: public BitField<uint32_t, 6, 11> {};
+ class RepresentationField: public BitField<uint32_t, 17, 3> {};
+ class FieldIndexField: public BitField<uint32_t, 20, 11> {};
static const int kInitialIndex = 1;
private:
PropertyDetails(int value, int pointer) {
- value_ = DescriptorPointer::update(value, pointer);
+ value_ = DescriptorPointer::update(value, pointer);
+ }
+ PropertyDetails(int value, Representation representation) {
+ value_ = RepresentationField::update(
+ value, EncodeRepresentation(representation));
+ }
+ PropertyDetails(int value, PropertyAttributes attributes) {
+ value_ = AttributesField::update(value, attributes);
}
uint32_t value_;
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index 05342eea9..83a6a365b 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -55,15 +55,15 @@ void LookupResult::Print(FILE* out) {
PrintF(out, " -type = normal\n");
PrintF(out, " -entry = %d", GetDictionaryEntry());
break;
- case CONSTANT_FUNCTION:
- PrintF(out, " -type = constant function\n");
- PrintF(out, " -function:\n");
- GetConstantFunction()->Print(out);
+ case CONSTANT:
+ PrintF(out, " -type = constant\n");
+ PrintF(out, " -value:\n");
+ GetConstant()->Print(out);
PrintF(out, "\n");
break;
case FIELD:
PrintF(out, " -type = field\n");
- PrintF(out, " -index = %d", GetFieldIndex());
+ PrintF(out, " -index = %d", GetFieldIndex().field_index());
PrintF(out, "\n");
break;
case CALLBACKS:
@@ -85,7 +85,7 @@ void LookupResult::Print(FILE* out) {
GetTransitionMap()->Print(out);
PrintF(out, "\n");
return;
- case CONSTANT_FUNCTION:
+ case CONSTANT:
PrintF(out, " -type = constant property transition\n");
PrintF(out, " -map:\n");
GetTransitionMap()->Print(out);
@@ -112,7 +112,6 @@ void Descriptor::Print(FILE* out) {
GetKey()->ShortPrint(out);
PrintF(out, " @ ");
GetValue()->ShortPrint(out);
- PrintF(out, " %d\n", GetDetails().descriptor_index());
}
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 9eb4194b4..0f78ba478 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -44,19 +44,16 @@ namespace internal {
class Descriptor BASE_EMBEDDED {
public:
- static int IndexFromValue(Object* value) {
- return Smi::cast(value)->value();
- }
-
- MUST_USE_RESULT MaybeObject* KeyToSymbol() {
- if (!StringShape(key_).IsSymbol()) {
- MaybeObject* maybe_result = HEAP->LookupSymbol(key_);
+ MUST_USE_RESULT MaybeObject* KeyToUniqueName() {
+ if (!key_->IsUniqueName()) {
+ MaybeObject* maybe_result =
+ key_->GetIsolate()->heap()->InternalizeString(String::cast(key_));
if (!maybe_result->To(&key_)) return maybe_result;
}
return key_;
}
- String* GetKey() { return key_; }
+ Name* GetKey() { return key_; }
Object* GetValue() { return value_; }
PropertyDetails GetDetails() { return details_; }
@@ -64,39 +61,36 @@ class Descriptor BASE_EMBEDDED {
void Print(FILE* out);
#endif
- void SetEnumerationIndex(int index) {
- details_ = PropertyDetails(details_.attributes(), details_.type(), index);
- }
-
void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); }
private:
- String* key_;
+ Name* key_;
Object* value_;
PropertyDetails details_;
protected:
Descriptor() : details_(Smi::FromInt(0)) {}
- void Init(String* key, Object* value, PropertyDetails details) {
+ void Init(Name* key, Object* value, PropertyDetails details) {
key_ = key;
value_ = value;
details_ = details;
}
- Descriptor(String* key, Object* value, PropertyDetails details)
+ Descriptor(Name* key, Object* value, PropertyDetails details)
: key_(key),
value_(value),
details_(details) { }
- Descriptor(String* key,
+ Descriptor(Name* key,
Object* value,
PropertyAttributes attributes,
PropertyType type,
- int index)
+ Representation representation,
+ int field_index = 0)
: key_(key),
value_(value),
- details_(attributes, type, index) { }
+ details_(attributes, type, representation, field_index) { }
friend class DescriptorArray;
};
@@ -104,31 +98,82 @@ class Descriptor BASE_EMBEDDED {
class FieldDescriptor: public Descriptor {
public:
- FieldDescriptor(String* key,
+ FieldDescriptor(Name* key,
int field_index,
PropertyAttributes attributes,
- int index = 0)
- : Descriptor(key, Smi::FromInt(field_index), attributes, FIELD, index) {}
+ Representation representation)
+ : Descriptor(key, Smi::FromInt(0), attributes,
+ FIELD, representation, field_index) {}
};
-class ConstantFunctionDescriptor: public Descriptor {
+class ConstantDescriptor: public Descriptor {
public:
- ConstantFunctionDescriptor(String* key,
- JSFunction* function,
- PropertyAttributes attributes,
- int index)
- : Descriptor(key, function, attributes, CONSTANT_FUNCTION, index) {}
+ ConstantDescriptor(Name* key,
+ Object* value,
+ PropertyAttributes attributes)
+ : Descriptor(key, value, attributes, CONSTANT,
+ value->OptimalRepresentation()) {}
};
class CallbacksDescriptor: public Descriptor {
public:
- CallbacksDescriptor(String* key,
+ CallbacksDescriptor(Name* key,
Object* foreign,
- PropertyAttributes attributes,
- int index = 0)
- : Descriptor(key, foreign, attributes, CALLBACKS, index) {}
+ PropertyAttributes attributes)
+ : Descriptor(key, foreign, attributes, CALLBACKS,
+ Representation::Tagged()) {}
+};
+
+
+// Holds a property index value distinguishing if it is a field index or an
+// index inside the object header.
+class PropertyIndex {
+ public:
+ static PropertyIndex NewFieldIndex(int index) {
+ return PropertyIndex(index, false);
+ }
+ static PropertyIndex NewHeaderIndex(int index) {
+ return PropertyIndex(index, true);
+ }
+
+ bool is_field_index() { return (index_ & kHeaderIndexBit) == 0; }
+ bool is_header_index() { return (index_ & kHeaderIndexBit) != 0; }
+
+ int field_index() {
+ ASSERT(is_field_index());
+ return value();
+ }
+ int header_index() {
+ ASSERT(is_header_index());
+ return value();
+ }
+
+ bool is_inobject(Handle<JSObject> holder) {
+ if (is_header_index()) return true;
+ return field_index() < holder->map()->inobject_properties();
+ }
+
+ int translate(Handle<JSObject> holder) {
+ if (is_header_index()) return header_index();
+ int index = field_index() - holder->map()->inobject_properties();
+ if (index >= 0) return index;
+ return index + holder->map()->instance_size() / kPointerSize;
+ }
+
+ private:
+ static const int kHeaderIndexBit = 1 << 31;
+ static const int kIndexMask = ~kHeaderIndexBit;
+
+ int value() { return index_ & kIndexMask; }
+
+ PropertyIndex(int index, bool is_header_based)
+ : index_(index | (is_header_based ? kHeaderIndexBit : 0)) {
+ ASSERT(index <= kIndexMask);
+ }
+
+ int index_;
};
@@ -140,15 +185,17 @@ class LookupResult BASE_EMBEDDED {
lookup_type_(NOT_FOUND),
holder_(NULL),
cacheable_(true),
- details_(NONE, NONEXISTENT) {
+ details_(NONE, NONEXISTENT, Representation::None()) {
isolate->SetTopLookupResult(this);
}
~LookupResult() {
- ASSERT(isolate_->top_lookup_result() == this);
- isolate_->SetTopLookupResult(next_);
+ ASSERT(isolate()->top_lookup_result() == this);
+ isolate()->SetTopLookupResult(next_);
}
+ Isolate* isolate() const { return isolate_; }
+
void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
lookup_type_ = DESCRIPTOR_TYPE;
holder_ = holder;
@@ -156,23 +203,19 @@ class LookupResult BASE_EMBEDDED {
number_ = number;
}
+ bool CanHoldValue(Handle<Object> value) {
+ if (IsNormal()) return true;
+ ASSERT(!IsTransition());
+ return value->FitsRepresentation(details_.representation());
+ }
+
void TransitionResult(JSObject* holder, int number) {
lookup_type_ = TRANSITION_TYPE;
- details_ = PropertyDetails(NONE, TRANSITION);
+ details_ = PropertyDetails(NONE, TRANSITION, Representation::None());
holder_ = holder;
number_ = number;
}
- void ConstantResult(JSObject* holder) {
- lookup_type_ = CONSTANT_TYPE;
- holder_ = holder;
- details_ =
- PropertyDetails(static_cast<PropertyAttributes>(DONT_ENUM |
- DONT_DELETE),
- CALLBACKS);
- number_ = -1;
- }
-
void DictionaryResult(JSObject* holder, int entry) {
lookup_type_ = DICTIONARY_TYPE;
holder_ = holder;
@@ -183,19 +226,19 @@ class LookupResult BASE_EMBEDDED {
void HandlerResult(JSProxy* proxy) {
lookup_type_ = HANDLER_TYPE;
holder_ = proxy;
- details_ = PropertyDetails(NONE, HANDLER);
+ details_ = PropertyDetails(NONE, HANDLER, Representation::Tagged());
cacheable_ = false;
}
void InterceptorResult(JSObject* holder) {
lookup_type_ = INTERCEPTOR_TYPE;
holder_ = holder;
- details_ = PropertyDetails(NONE, INTERCEPTOR);
+ details_ = PropertyDetails(NONE, INTERCEPTOR, Representation::Tagged());
}
void NotFound() {
lookup_type_ = NOT_FOUND;
- details_ = PropertyDetails(NONE, NONEXISTENT);
+ details_ = PropertyDetails(NONE, NONEXISTENT, Representation::None());
holder_ = NULL;
}
@@ -214,6 +257,13 @@ class LookupResult BASE_EMBEDDED {
return details_.type();
}
+ Representation representation() {
+ ASSERT(IsFound());
+ ASSERT(!IsTransition());
+ ASSERT(details_.type() != NONEXISTENT);
+ return details_.representation();
+ }
+
PropertyAttributes GetAttributes() {
ASSERT(!IsTransition());
ASSERT(IsFound());
@@ -254,14 +304,17 @@ class LookupResult BASE_EMBEDDED {
return details_.type() == NORMAL;
}
+ bool IsConstant() {
+ ASSERT(!(details_.type() == CONSTANT && !IsFound()));
+ return details_.type() == CONSTANT;
+ }
+
bool IsConstantFunction() {
- ASSERT(!(details_.type() == CONSTANT_FUNCTION && !IsFound()));
- return details_.type() == CONSTANT_FUNCTION;
+ return IsConstant() && GetValue()->IsJSFunction();
}
bool IsDontDelete() { return details_.IsDontDelete(); }
bool IsDontEnum() { return details_.IsDontEnum(); }
- bool IsDeleted() { return details_.IsDeleted(); }
bool IsFound() { return lookup_type_ != NOT_FOUND; }
bool IsTransition() { return lookup_type_ == TRANSITION_TYPE; }
bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
@@ -272,34 +325,64 @@ class LookupResult BASE_EMBEDDED {
return IsFound() && !IsTransition();
}
+ bool IsDataProperty() {
+ switch (type()) {
+ case FIELD:
+ case NORMAL:
+ case CONSTANT:
+ return true;
+ case CALLBACKS: {
+ Object* callback = GetCallbackObject();
+ return callback->IsAccessorInfo() || callback->IsForeign();
+ }
+ case HANDLER:
+ case INTERCEPTOR:
+ case TRANSITION:
+ case NONEXISTENT:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+ }
+
bool IsCacheable() { return cacheable_; }
void DisallowCaching() { cacheable_ = false; }
Object* GetLazyValue() {
switch (type()) {
case FIELD:
- return holder()->FastPropertyAt(GetFieldIndex());
+ return holder()->RawFastPropertyAt(GetFieldIndex().field_index());
case NORMAL: {
Object* value;
value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
if (holder()->IsGlobalObject()) {
- value = JSGlobalPropertyCell::cast(value)->value();
+ value = PropertyCell::cast(value)->value();
}
return value;
}
- case CONSTANT_FUNCTION:
- return GetConstantFunction();
- default:
- return Smi::FromInt(0);
+ case CONSTANT:
+ return GetConstant();
+ case CALLBACKS:
+ case HANDLER:
+ case INTERCEPTOR:
+ case TRANSITION:
+ case NONEXISTENT:
+ return isolate()->heap()->the_hole_value();
}
+ UNREACHABLE();
+ return NULL;
}
- Map* GetTransitionTarget() {
+ Map* GetTransitionTarget(Map* map) {
ASSERT(IsTransition());
- TransitionArray* transitions = holder()->map()->transitions();
+ TransitionArray* transitions = map->transitions();
return transitions->GetTarget(number_);
}
+ Map* GetTransitionTarget() {
+ return GetTransitionTarget(holder()->map());
+ }
+
PropertyDetails GetTransitionDetails(Map* map) {
ASSERT(IsTransition());
TransitionArray* transitions = map->transitions();
@@ -314,6 +397,10 @@ class LookupResult BASE_EMBEDDED {
return IsTransition() && GetTransitionDetails(map).type() == FIELD;
}
+ bool IsTransitionToConstant(Map* map) {
+ return IsTransition() && GetTransitionDetails(map).type() == CONSTANT;
+ }
+
Map* GetTransitionMap() {
ASSERT(IsTransition());
return Map::cast(GetValue());
@@ -334,16 +421,13 @@ class LookupResult BASE_EMBEDDED {
return number_;
}
- int GetFieldIndex() {
+ PropertyIndex GetFieldIndex() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(IsField());
- return Descriptor::IndexFromValue(GetValue());
+ return PropertyIndex::NewFieldIndex(GetFieldIndexFromMap(holder()->map()));
}
int GetLocalFieldIndexFromMap(Map* map) {
- ASSERT(IsField());
- return Descriptor::IndexFromValue(GetValueFromMap(map)) -
- map->inobject_properties();
+ return GetFieldIndexFromMap(map) - map->inobject_properties();
}
int GetDictionaryEntry() {
@@ -352,20 +436,26 @@ class LookupResult BASE_EMBEDDED {
}
JSFunction* GetConstantFunction() {
- ASSERT(type() == CONSTANT_FUNCTION);
+ ASSERT(type() == CONSTANT);
return JSFunction::cast(GetValue());
}
+ Object* GetConstantFromMap(Map* map) {
+ ASSERT(type() == CONSTANT);
+ return GetValueFromMap(map);
+ }
+
JSFunction* GetConstantFunctionFromMap(Map* map) {
- ASSERT(type() == CONSTANT_FUNCTION);
- return JSFunction::cast(GetValueFromMap(map));
+ return JSFunction::cast(GetConstantFromMap(map));
+ }
+
+ Object* GetConstant() {
+ ASSERT(type() == CONSTANT);
+ return GetValue();
}
Object* GetCallbackObject() {
- if (lookup_type_ == CONSTANT_TYPE) {
- return HEAP->prototype_accessors();
- }
- ASSERT(!IsTransition());
+ ASSERT(type() == CALLBACKS && !IsTransition());
return GetValue();
}
@@ -388,6 +478,12 @@ class LookupResult BASE_EMBEDDED {
return map->instance_descriptors()->GetValue(number_);
}
+ int GetFieldIndexFromMap(Map* map) const {
+ ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+ ASSERT(number_ < map->NumberOfOwnDescriptors());
+ return map->instance_descriptors()->GetFieldIndex(number_);
+ }
+
void Iterate(ObjectVisitor* visitor);
private:
@@ -401,8 +497,7 @@ class LookupResult BASE_EMBEDDED {
TRANSITION_TYPE,
DICTIONARY_TYPE,
HANDLER_TYPE,
- INTERCEPTOR_TYPE,
- CONSTANT_TYPE
+ INTERCEPTOR_TYPE
} lookup_type_;
JSReceiver* holder_;
diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js
index 4e86c8892..4c03f2153 100644
--- a/deps/v8/src/proxy.js
+++ b/deps/v8/src/proxy.js
@@ -27,21 +27,25 @@
"use strict";
-global.Proxy = new $Object();
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Object = global.Object;
-var $Proxy = global.Proxy
+var $Proxy = new $Object();
-$Proxy.create = function(handler, proto) {
+// -------------------------------------------------------------------
+
+function ProxyCreate(handler, proto) {
if (!IS_SPEC_OBJECT(handler))
throw MakeTypeError("handler_non_object", ["create"])
if (IS_UNDEFINED(proto))
proto = null
- else if (!(IS_SPEC_OBJECT(proto) || proto === null))
+ else if (!(IS_SPEC_OBJECT(proto) || IS_NULL(proto)))
throw MakeTypeError("proto_non_object", ["create"])
return %CreateJSProxy(handler, proto)
}
-$Proxy.createFunction = function(handler, callTrap, constructTrap) {
+function ProxyCreateFunction(handler, callTrap, constructTrap) {
if (!IS_SPEC_OBJECT(handler))
throw MakeTypeError("handler_non_object", ["create"])
if (!IS_SPEC_FUNCTION(callTrap))
@@ -52,7 +56,7 @@ $Proxy.createFunction = function(handler, callTrap, constructTrap) {
// Make sure the trap receives 'undefined' as this.
var construct = constructTrap
constructTrap = function() {
- return %Apply(construct, void 0, arguments, 0, %_ArgumentsLength());
+ return %Apply(construct, UNDEFINED, arguments, 0, %_ArgumentsLength());
}
} else {
throw MakeTypeError("trap_function_expected",
@@ -63,17 +67,31 @@ $Proxy.createFunction = function(handler, callTrap, constructTrap) {
}
+// -------------------------------------------------------------------
+
+function SetUpProxy() {
+ %CheckIsBootstrapping()
+
+ global.Proxy = $Proxy;
+
+ // Set up non-enumerable properties of the Proxy object.
+ InstallFunctions($Proxy, DONT_ENUM, [
+ "create", ProxyCreate,
+ "createFunction", ProxyCreateFunction
+ ])
+}
+
+SetUpProxy();
+
-////////////////////////////////////////////////////////////////////////////////
-// Builtins
-////////////////////////////////////////////////////////////////////////////////
+// -------------------------------------------------------------------
+// Proxy Builtins
function DerivedConstructTrap(callTrap) {
return function() {
var proto = this.prototype
if (!IS_SPEC_OBJECT(proto)) proto = $Object.prototype
- var obj = new $Object()
- obj.__proto__ = proto
+ var obj = { __proto__: proto };
var result = %Apply(callTrap, obj, arguments, 0, %_ArgumentsLength());
return IS_SPEC_OBJECT(result) ? result : obj
}
@@ -158,6 +176,7 @@ function DerivedKeysTrap() {
var enumerableNames = []
for (var i = 0, count = 0; i < names.length; ++i) {
var name = names[i]
+ if (IS_SYMBOL(name)) continue
var desc = this.getOwnPropertyDescriptor(TO_STRING_INLINE(name))
if (!IS_UNDEFINED(desc) && desc.enumerable) {
enumerableNames[count++] = names[i]
@@ -171,9 +190,14 @@ function DerivedEnumerateTrap() {
var enumerableNames = []
for (var i = 0, count = 0; i < names.length; ++i) {
var name = names[i]
+ if (IS_SYMBOL(name)) continue
var desc = this.getPropertyDescriptor(TO_STRING_INLINE(name))
- if (!IS_UNDEFINED(desc) && desc.enumerable) {
- enumerableNames[count++] = names[i]
+ if (!IS_UNDEFINED(desc)) {
+ if (!desc.configurable) {
+ throw MakeTypeError("proxy_prop_not_configurable",
+ [this, "getPropertyDescriptor", name])
+ }
+ if (desc.enumerable) enumerableNames[count++] = names[i]
}
}
return enumerableNames
@@ -184,6 +208,6 @@ function ProxyEnumerate(proxy) {
if (IS_UNDEFINED(handler.enumerate)) {
return %Apply(DerivedEnumerateTrap, handler, [], 0, 0)
} else {
- return ToStringArray(handler.enumerate(), "enumerate")
+ return ToNameArray(handler.enumerate(), "enumerate", false)
}
}
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp-macro-assembler-irregexp.cc
index 16766cab0..3b9a2f660 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.cc
@@ -44,8 +44,8 @@ RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer,
buffer_(buffer),
pc_(0),
own_buffer_(false),
- advance_current_end_(kInvalidPC) {
-}
+ advance_current_end_(kInvalidPC),
+ isolate_(zone->isolate()) { }
RegExpMacroAssemblerIrregexp::~RegExpMacroAssemblerIrregexp() {
@@ -410,28 +410,6 @@ void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
}
-void RegExpMacroAssemblerIrregexp::CheckCharacters(
- Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- ASSERT(cp_offset >= kMinCPOffset);
- ASSERT(cp_offset + str.length() - 1 <= kMaxCPOffset);
- // It is vital that this loop is backwards due to the unchecked character
- // load below.
- for (int i = str.length() - 1; i >= 0; i--) {
- if (check_end_of_string && i == str.length() - 1) {
- Emit(BC_LOAD_CURRENT_CHAR, cp_offset + i);
- EmitOrLink(on_failure);
- } else {
- Emit(BC_LOAD_CURRENT_CHAR_UNCHECKED, cp_offset + i);
- }
- Emit(BC_CHECK_NOT_CHAR, str[i]);
- EmitOrLink(on_failure);
- }
-}
-
-
void RegExpMacroAssemblerIrregexp::IfRegisterLT(int register_index,
int comparand,
Label* on_less_than) {
@@ -467,7 +445,7 @@ Handle<HeapObject> RegExpMacroAssemblerIrregexp::GetCode(
Handle<String> source) {
Bind(&backtrack_);
Emit(BC_POP_BT, 0);
- Handle<ByteArray> array = FACTORY->NewByteArray(length());
+ Handle<ByteArray> array = isolate_->factory()->NewByteArray(length());
Copy(array->GetDataStartAddress());
return array;
}
@@ -479,7 +457,7 @@ int RegExpMacroAssemblerIrregexp::length() {
void RegExpMacroAssemblerIrregexp::Copy(Address a) {
- memcpy(a, buffer_.start(), length());
+ OS::MemCopy(a, buffer_.start(), length());
}
@@ -488,7 +466,7 @@ void RegExpMacroAssemblerIrregexp::Expand() {
Vector<byte> old_buffer = buffer_;
buffer_ = Vector<byte>::New(old_buffer.length() * 2);
own_buffer_ = true;
- memcpy(buffer_.start(), old_buffer.start(), old_buffer.length());
+ OS::MemCopy(buffer_.start(), old_buffer.start(), old_buffer.length());
if (old_buffer_was_our_own) {
old_buffer.Dispose();
}
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp-macro-assembler-irregexp.h
index 4bc29809b..f8a412d4f 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.h
@@ -103,10 +103,6 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
virtual void IfRegisterLT(int register_index, int comparand, Label* if_lt);
virtual void IfRegisterGE(int register_index, int comparand, Label* if_ge);
virtual void IfRegisterEqPos(int register_index, Label* if_eq);
@@ -138,6 +134,8 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
int advance_current_offset_;
int advance_current_end_;
+ Isolate* isolate_;
+
static const int kInvalidPC = -1;
DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp);
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index f878e8c46..1ce1fa4b2 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -383,21 +383,6 @@ void RegExpMacroAssemblerTracer::CheckNotBackReferenceIgnoreCase(
}
-void RegExpMacroAssemblerTracer::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- PrintF(" %s(str=\"",
- check_end_of_string ? "CheckCharacters" : "CheckCharactersUnchecked");
- for (int i = 0; i < str.length(); i++) {
- PrintF("0x%04x", str[i]);
- }
- PrintF("\", cp_offset=%d, label[%08x])\n",
- cp_offset, LabelToInt(on_failure));
- assembler_->CheckCharacters(str, cp_offset, on_failure, check_end_of_string);
-}
-
-
bool RegExpMacroAssemblerTracer::CheckSpecialCharacterClass(
uc16 type,
Label* on_no_match) {
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp-macro-assembler-tracer.h
index ac262df76..852fb8041 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp-macro-assembler-tracer.h
@@ -49,11 +49,6 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(
- Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
virtual void CheckNotAtStart(Label* on_not_at_start);
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc
index 82ba34d5c..7d027f880 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp-macro-assembler.cc
@@ -77,14 +77,14 @@ const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
ASSERT(subject->IsExternalString() || subject->IsSeqString());
ASSERT(start_index >= 0);
ASSERT(start_index <= subject->length());
- if (subject->IsAsciiRepresentation()) {
+ if (subject->IsOneByteRepresentation()) {
const byte* address;
if (StringShape(subject).IsExternal()) {
- const char* data = ExternalAsciiString::cast(subject)->GetChars();
+ const uint8_t* data = ExternalAsciiString::cast(subject)->GetChars();
address = reinterpret_cast<const byte*>(data);
} else {
- ASSERT(subject->IsSeqAsciiString());
- char* data = SeqAsciiString::cast(subject)->GetChars();
+ ASSERT(subject->IsSeqOneByteString());
+ const uint8_t* data = SeqOneByteString::cast(subject)->GetChars();
address = reinterpret_cast<const byte*>(data);
}
return address + start_index;
@@ -113,8 +113,8 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
ASSERT(previous_index <= subject->length());
// No allocations before calling the regexp, but we can't use
- // AssertNoAllocation, since regexps might be preempted, and another thread
- // might do allocation anyway.
+ // DisallowHeapAllocation, since regexps might be preempted, and another
+ // thread might do allocation anyway.
String* subject_ptr = *subject;
// Character offsets into string.
@@ -133,7 +133,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
slice_offset = slice->offset();
}
// Ensure that an underlying string has the same ASCII-ness.
- bool is_ascii = subject_ptr->IsAsciiRepresentation();
+ bool is_ascii = subject_ptr->IsOneByteRepresentation();
ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
// String is now either Sequential or External
int char_size_shift = is_ascii ? 0 : 1;
@@ -163,7 +163,6 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
int* output,
int output_size,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
// Ensure that the minimum stack has been allocated.
RegExpStackScope stack_scope(isolate);
Address stack_base = stack_scope.stack()->stack_base();
@@ -210,6 +209,26 @@ const byte NativeRegExpMacroAssembler::word_character_map[] = {
0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'h' - 'o'
0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'p' - 'w'
0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // 'x' - 'z'
+ // Latin-1 range
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
};
@@ -218,7 +237,6 @@ int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
Address byte_offset2,
size_t byte_length,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
isolate->regexp_macro_assembler_canonicalize();
// This function is not allowed to cause a garbage collection.
@@ -251,7 +269,6 @@ int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
Address* stack_base,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
RegExpStack* regexp_stack = isolate->regexp_stack();
size_t size = regexp_stack->stack_capacity();
Address old_stack_base = regexp_stack->stack_base();
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index bcf36735c..1ff8bd979 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -87,17 +87,6 @@ class RegExpMacroAssembler {
Label* on_equal) = 0;
virtual void CheckCharacterGT(uc16 limit, Label* on_greater) = 0;
virtual void CheckCharacterLT(uc16 limit, Label* on_less) = 0;
- // Check the current character for a match with a literal string. If we
- // fail to match then goto the on_failure label. If check_eos is set then
- // the end of input always fails. If check_eos is clear then it is the
- // caller's responsibility to ensure that the end of string is not hit.
- // If the label is NULL then we should pop a backtrack address off
- // the stack and go to that.
- virtual void CheckCharacters(
- Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_eos) = 0;
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position) = 0;
virtual void CheckNotAtStart(Label* on_not_at_start) = 0;
virtual void CheckNotBackReference(int start_reg, Label* on_no_match) = 0;
@@ -244,10 +233,10 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
static const byte* StringCharacterPosition(String* subject, int start_index);
- // Byte map of ASCII characters with a 0xff if the character is a word
+ // Byte map of one byte characters with a 0xff if the character is a word
// character (digit, letter or underscore) and 0x00 otherwise.
// Used by generated RegExp code.
- static const byte word_character_map[128];
+ static const byte word_character_map[256];
static Address word_character_map_address() {
return const_cast<Address>(&word_character_map[0]);
diff --git a/deps/v8/src/regexp-stack.cc b/deps/v8/src/regexp-stack.cc
index ff9547f3a..f3af490f1 100644
--- a/deps/v8/src/regexp-stack.cc
+++ b/deps/v8/src/regexp-stack.cc
@@ -39,7 +39,6 @@ RegExpStackScope::RegExpStackScope(Isolate* isolate)
RegExpStackScope::~RegExpStackScope() {
- ASSERT(Isolate::Current() == regexp_stack_->isolate_);
// Reset the buffer if it has grown.
regexp_stack_->Reset();
}
@@ -51,14 +50,13 @@ RegExpStack::RegExpStack()
RegExpStack::~RegExpStack() {
+ thread_local_.Free();
}
char* RegExpStack::ArchiveStack(char* to) {
size_t size = sizeof(thread_local_);
- memcpy(reinterpret_cast<void*>(to),
- &thread_local_,
- size);
+ OS::MemCopy(reinterpret_cast<void*>(to), &thread_local_, size);
thread_local_ = ThreadLocal();
return to + size;
}
@@ -66,7 +64,7 @@ char* RegExpStack::ArchiveStack(char* to) {
char* RegExpStack::RestoreStack(char* from) {
size_t size = sizeof(thread_local_);
- memcpy(&thread_local_, reinterpret_cast<void*>(from), size);
+ OS::MemCopy(&thread_local_, reinterpret_cast<void*>(from), size);
return from + size;
}
@@ -94,10 +92,11 @@ Address RegExpStack::EnsureCapacity(size_t size) {
Address new_memory = NewArray<byte>(static_cast<int>(size));
if (thread_local_.memory_size_ > 0) {
// Copy original memory into top of new memory.
- memcpy(reinterpret_cast<void*>(
- new_memory + size - thread_local_.memory_size_),
- reinterpret_cast<void*>(thread_local_.memory_),
- thread_local_.memory_size_);
+ OS::MemCopy(
+ reinterpret_cast<void*>(
+ new_memory + size - thread_local_.memory_size_),
+ reinterpret_cast<void*>(thread_local_.memory_),
+ thread_local_.memory_size_);
DeleteArray(thread_local_.memory_);
}
thread_local_.memory_ = new_memory;
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index a3675f033..22b08775b 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -25,11 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Expect $Object = global.Object;
-// Expect $Array = global.Array;
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Object = global.Object;
+// var $Array = global.Array;
var $RegExp = global.RegExp;
+// -------------------------------------------------------------------
+
// A recursive descent parser for Patterns according to the grammar of
// ECMA-262 15.10.1, with deviations noted below.
function DoConstructRegExp(object, pattern, flags) {
@@ -132,21 +136,13 @@ function BuildResultFromMatchInfo(lastMatchInfo, s) {
var start = lastMatchInfo[CAPTURE0];
var end = lastMatchInfo[CAPTURE1];
var result = %_RegExpConstructResult(numResults, start, s);
- if (start + 1 == end) {
- result[0] = %_StringCharAt(s, start);
- } else {
- result[0] = %_SubString(s, start, end);
- }
+ result[0] = %_SubString(s, start, end);
var j = REGEXP_FIRST_CAPTURE + 2;
for (var i = 1; i < numResults; i++) {
start = lastMatchInfo[j++];
if (start != -1) {
end = lastMatchInfo[j];
- if (start + 1 == end) {
- result[i] = %_StringCharAt(s, start);
- } else {
- result[i] = %_SubString(s, start, end);
- }
+ result[i] = %_SubString(s, start, end);
}
j++;
}
@@ -161,6 +157,7 @@ function RegExpExecNoTests(regexp, string, start) {
lastMatchInfoOverride = null;
return BuildResultFromMatchInfo(matchInfo, string);
}
+ regexp.lastIndex = 0;
return null;
}
@@ -192,8 +189,8 @@ function RegExpExec(string) {
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
- if (matchIndices === null) {
- if (global) this.lastIndex = 0;
+ if (IS_NULL(matchIndices)) {
+ this.lastIndex = 0;
return null;
}
@@ -235,7 +232,7 @@ function RegExpTest(string) {
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
- if (matchIndices === null) {
+ if (IS_NULL(matchIndices)) {
this.lastIndex = 0;
return false;
}
@@ -256,7 +253,10 @@ function RegExpTest(string) {
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo);
- if (matchIndices === null) return false;
+ if (IS_NULL(matchIndices)) {
+ this.lastIndex = 0;
+ return false;
+ }
lastMatchInfoOverride = null;
return true;
}
@@ -266,7 +266,7 @@ function TrimRegExp(regexp) {
if (!%_ObjectEquals(regexp_key, regexp)) {
regexp_key = regexp;
regexp_val =
- new $RegExp(SubString(regexp.source, 2, regexp.source.length),
+ new $RegExp(%_SubString(regexp.source, 2, regexp.source.length),
(regexp.ignoreCase ? regexp.multiline ? "im" : "i"
: regexp.multiline ? "m" : ""));
}
@@ -296,9 +296,9 @@ function RegExpGetLastMatch() {
return OVERRIDE_MATCH(lastMatchInfoOverride);
}
var regExpSubject = LAST_SUBJECT(lastMatchInfo);
- return SubString(regExpSubject,
- lastMatchInfo[CAPTURE0],
- lastMatchInfo[CAPTURE1]);
+ return %_SubString(regExpSubject,
+ lastMatchInfo[CAPTURE0],
+ lastMatchInfo[CAPTURE1]);
}
@@ -317,7 +317,7 @@ function RegExpGetLastParen() {
var start = lastMatchInfo[CAPTURE(length - 2)];
var end = lastMatchInfo[CAPTURE(length - 1)];
if (start != -1 && end != -1) {
- return SubString(regExpSubject, start, end);
+ return %_SubString(regExpSubject, start, end);
}
return "";
}
@@ -334,7 +334,7 @@ function RegExpGetLeftContext() {
start_index = OVERRIDE_POS(override);
subject = OVERRIDE_SUBJECT(override);
}
- return SubString(subject, 0, start_index);
+ return %_SubString(subject, 0, start_index);
}
@@ -350,7 +350,7 @@ function RegExpGetRightContext() {
var match = OVERRIDE_MATCH(override);
start_index = OVERRIDE_POS(override) + match.length;
}
- return SubString(subject, start_index, subject.length);
+ return %_SubString(subject, start_index, subject.length);
}
@@ -370,7 +370,7 @@ function RegExpMakeCaptureGetter(n) {
var matchStart = lastMatchInfo[CAPTURE(index)];
var matchEnd = lastMatchInfo[CAPTURE(index + 1)];
if (matchStart == -1 || matchEnd == -1) return '';
- return SubString(LAST_SUBJECT(lastMatchInfo), matchStart, matchEnd);
+ return %_SubString(LAST_SUBJECT(lastMatchInfo), matchStart, matchEnd);
};
}
@@ -381,10 +381,10 @@ function RegExpMakeCaptureGetter(n) {
// pairs for the match and all the captured substrings), the invariant is
// that there are at least two capture indeces. The array also contains
// the subject string for the last successful match.
-var lastMatchInfo = new InternalArray(
+var lastMatchInfo = new InternalPackedArray(
2, // REGEXP_NUMBER_OF_CAPTURES
"", // Last subject.
- void 0, // Last input - settable with RegExpSetInput.
+ UNDEFINED, // Last input - settable with RegExpSetInput.
0, // REGEXP_FIRST_CAPTURE + 0
0 // REGEXP_FIRST_CAPTURE + 1
);
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index 6541546cb..70b362fd7 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -43,7 +43,9 @@ class Processor: public AstVisitor {
result_assigned_(false),
is_set_(false),
in_try_(false),
- factory_(isolate(), zone) { }
+ factory_(zone->isolate(), zone) {
+ InitializeAstVisitor(zone->isolate());
+ }
virtual ~Processor() { }
@@ -86,6 +88,8 @@ class Processor: public AstVisitor {
#undef DEF_VISIT
void VisitIterationStatement(IterationStatement* stmt);
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
};
@@ -109,6 +113,13 @@ void Processor::VisitBlock(Block* node) {
}
+void Processor::VisitModuleStatement(ModuleStatement* node) {
+ bool set_after_body = is_set_;
+ Visit(node->body());
+ is_set_ = is_set_ && set_after_body;
+}
+
+
void Processor::VisitExpressionStatement(ExpressionStatement* node) {
// Rewrite : <x>; -> .result = <x>;
if (!is_set_ && !node->expression()->IsThrow()) {
@@ -157,6 +168,11 @@ void Processor::VisitForInStatement(ForInStatement* node) {
}
+void Processor::VisitForOfStatement(ForOfStatement* node) {
+ VisitIterationStatement(node);
+}
+
+
void Processor::VisitTryCatchStatement(TryCatchStatement* node) {
// Rewrite both try and catch blocks (reversed order).
bool set_after_catch = is_set_;
@@ -191,6 +207,11 @@ void Processor::VisitSwitchStatement(SwitchStatement* node) {
}
+void Processor::VisitCaseClause(CaseClause* clause) {
+ UNREACHABLE();
+}
+
+
void Processor::VisitContinueStatement(ContinueStatement* node) {
is_set_ = false;
}
@@ -242,7 +263,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
ZoneList<Statement*>* body = function->body();
if (!body->is_empty()) {
Variable* result = scope->NewTemporary(
- info->isolate()->factory()->result_symbol());
+ info->isolate()->factory()->result_string());
Processor processor(result, info->zone());
processor.Process(body);
if (processor.HasStackOverflow()) return false;
@@ -255,13 +276,12 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
// eval('with ({x:1}) x = 1');
// the end position of the function generated for executing the eval code
// coincides with the end of the with scope which is the position of '1'.
- int position = function->end_position();
+ int pos = function->end_position();
VariableProxy* result_proxy = processor.factory()->NewVariableProxy(
- result->name(), false, Interface::NewValue(), position);
+ result->name(), false, result->interface(), pos);
result_proxy->BindTo(result);
Statement* result_statement =
- processor.factory()->NewReturnStatement(result_proxy);
- result_statement->set_statement_pos(position);
+ processor.factory()->NewReturnStatement(result_proxy, pos);
body->Add(result_statement, info->zone());
}
}
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 23f41fa7d..7c900b37d 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -30,9 +30,9 @@
#include "runtime-profiler.h"
#include "assembler.h"
+#include "bootstrapper.h"
#include "code-stubs.h"
#include "compilation-cache.h"
-#include "deoptimizer.h"
#include "execution.h"
#include "full-codegen.h"
#include "global-handles.h"
@@ -79,22 +79,17 @@ STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
+// Maximum size in bytes of generate code for a function to allow OSR.
+static const int kOSRCodeSizeAllowanceBase =
+ 100 * FullCodeGenerator::kCodeSizeMultiplier;
+
+static const int kOSRCodeSizeAllowancePerTick =
+ 3 * FullCodeGenerator::kCodeSizeMultiplier;
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
static const int kMaxSizeEarlyOpt =
- 5 * FullCodeGenerator::kBackEdgeDistanceUnit;
-
-
-Atomic32 RuntimeProfiler::state_ = 0;
-
-// TODO(isolates): Clean up the semaphore when it is no longer required.
-static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
-
-#ifdef DEBUG
-bool RuntimeProfiler::has_been_globally_set_up_ = false;
-#endif
-bool RuntimeProfiler::enabled_ = false;
+ 5 * FullCodeGenerator::kCodeSizeMultiplier;
RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
@@ -110,23 +105,13 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
}
-void RuntimeProfiler::GlobalSetUp() {
- ASSERT(!has_been_globally_set_up_);
- enabled_ = V8::UseCrankshaft() && FLAG_opt;
-#ifdef DEBUG
- has_been_globally_set_up_ = true;
-#endif
-}
-
-
-static void GetICCounts(JSFunction* function,
+static void GetICCounts(Code* shared_code,
int* ic_with_type_info_count,
int* ic_total_count,
int* percentage) {
*ic_total_count = 0;
*ic_with_type_info_count = 0;
- Object* raw_info =
- function->shared()->code()->type_feedback_info();
+ Object* raw_info = shared_code->type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
*ic_with_type_info_count = info->ic_with_type_info_count();
@@ -140,21 +125,31 @@ static void GetICCounts(JSFunction* function,
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
ASSERT(function->IsOptimizable());
- if (FLAG_trace_opt) {
+
+ if (FLAG_trace_opt && function->PassesFilter(FLAG_hydrogen_filter)) {
PrintF("[marking ");
- function->PrintName();
- PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
+ function->ShortPrint();
PrintF(" for recompilation, reason: %s", reason);
if (FLAG_type_info_threshold > 0) {
int typeinfo, total, percentage;
- GetICCounts(function, &typeinfo, &total, &percentage);
+ GetICCounts(function->shared()->code(), &typeinfo, &total, &percentage);
PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage);
}
PrintF("]\n");
}
- if (FLAG_parallel_recompilation) {
- function->MarkForParallelRecompilation();
+
+ if (FLAG_concurrent_recompilation && !isolate_->bootstrapper()->IsActive()) {
+ if (FLAG_concurrent_osr &&
+ isolate_->optimizing_compiler_thread()->IsQueuedForOSR(function)) {
+ // Do not attempt regular recompilation if we already queued this for OSR.
+ // TODO(yangguo): This is necessary so that we don't install optimized
+ // code on a function that is already optimized, since OSR and regular
+ // recompilation race. This goes away as soon as OSR becomes one-shot.
+ return;
+ }
+ ASSERT(!function->IsInRecompileQueue());
+ function->MarkForConcurrentRecompilation();
} else {
// The next call to the function will trigger optimization.
function->MarkForLazyRecompilation();
@@ -165,8 +160,6 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// See AlwaysFullCompiler (in compiler.cc) comment on why we need
// Debug::has_break_points().
- ASSERT(function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForParallelRecompilation());
if (!FLAG_use_osr ||
isolate_->DebuggerHasBreakPoints() ||
function->IsBuiltin()) {
@@ -186,31 +179,12 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// any back edge in any unoptimized frame will trigger on-stack
// replacement for that frame.
if (FLAG_trace_osr) {
- PrintF("[patching stack checks in ");
+ PrintF("[OSR - patching back edges in ");
function->PrintName();
- PrintF(" for on-stack replacement]\n");
+ PrintF("]\n");
}
- // Get the stack check stub code object to match against. We aren't
- // prepared to generate it, but we don't expect to have to.
- bool found_code = false;
- Code* stack_check_code = NULL;
- if (FLAG_count_based_interrupts) {
- InterruptStub interrupt_stub;
- found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
- } else // NOLINT
- { // NOLINT
- StackCheckStub check_stub;
- found_code = check_stub.FindCodeInCache(&stack_check_code);
- }
- if (found_code) {
- Code* replacement_code =
- isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
- Code* unoptimized_code = shared->code();
- Deoptimizer::PatchStackCheckCode(unoptimized_code,
- stack_check_code,
- replacement_code);
- }
+ BackEdgeTable::Patch(isolate_, shared->code());
}
@@ -249,6 +223,10 @@ void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
void RuntimeProfiler::OptimizeNow() {
HandleScope scope(isolate_);
+ if (isolate_->DebuggerHasBreakPoints()) return;
+
+ DisallowHeapAllocation no_gc;
+
// Run through the JavaScript frames and collect them. If we already
// have a sample of the function, we mark it for optimizations
// (eagerly or lazily).
@@ -261,7 +239,7 @@ void RuntimeProfiler::OptimizeNow() {
frame_count++ < frame_count_limit && !it.done();
it.Advance()) {
JavaScriptFrame* frame = it.frame();
- JSFunction* function = JSFunction::cast(frame->function());
+ JSFunction* function = frame->function();
if (!FLAG_watch_ic_patching) {
// Adjust threshold each time we have processed
@@ -284,13 +262,37 @@ void RuntimeProfiler::OptimizeNow() {
Code* shared_code = shared->code();
if (shared_code->kind() != Code::FUNCTION) continue;
-
- if (function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForParallelRecompilation()) {
- int nesting = shared_code->allow_osr_at_loop_nesting_level();
- if (nesting == 0) AttemptOnStackReplacement(function);
- int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
- shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
+ if (function->IsInRecompileQueue()) continue;
+
+ if (FLAG_always_osr &&
+ shared_code->allow_osr_at_loop_nesting_level() == 0) {
+ // Testing mode: always try an OSR compile for every function.
+ for (int i = 0; i < Code::kMaxLoopNestingMarker; i++) {
+ // TODO(titzer): fix AttemptOnStackReplacement to avoid this dumb loop.
+ shared_code->set_allow_osr_at_loop_nesting_level(i);
+ AttemptOnStackReplacement(function);
+ }
+ // Fall through and do a normal optimized compile as well.
+ } else if (!frame->is_optimized() &&
+ (function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForConcurrentRecompilation() ||
+ function->IsOptimized())) {
+ // Attempt OSR if we are still running unoptimized code even though the
+ // the function has long been marked or even already been optimized.
+ int ticks = shared_code->profiler_ticks();
+ int allowance = kOSRCodeSizeAllowanceBase +
+ ticks * kOSRCodeSizeAllowancePerTick;
+ if (shared_code->CodeSize() > allowance) {
+ if (ticks < 255) shared_code->set_profiler_ticks(ticks + 1);
+ } else {
+ int nesting = shared_code->allow_osr_at_loop_nesting_level();
+ if (nesting < Code::kMaxLoopNestingMarker) {
+ int new_nesting = nesting + 1;
+ shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
+ AttemptOnStackReplacement(function);
+ }
+ }
+ continue;
}
// Only record top-level code on top of the execution stack and
@@ -324,7 +326,7 @@ void RuntimeProfiler::OptimizeNow() {
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, total, percentage;
- GetICCounts(function, &typeinfo, &total, &percentage);
+ GetICCounts(shared_code, &typeinfo, &total, &percentage);
if (percentage >= FLAG_type_info_threshold) {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
@@ -376,20 +378,10 @@ void RuntimeProfiler::OptimizeNow() {
}
-void RuntimeProfiler::NotifyTick() {
- if (FLAG_count_based_interrupts) return;
- isolate_->stack_guard()->RequestRuntimeProfilerTick();
-}
-
-
void RuntimeProfiler::SetUp() {
- ASSERT(has_been_globally_set_up_);
if (!FLAG_watch_ic_patching) {
ClearSampleBuffer();
}
- // If the ticker hasn't already started, make sure to do so to get
- // the ticks for the runtime profiler.
- if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
}
@@ -408,11 +400,6 @@ void RuntimeProfiler::TearDown() {
}
-int RuntimeProfiler::SamplerWindowSize() {
- return kSamplerWindowSize;
-}
-
-
// Update the pointers in the sampler window after a GC.
void RuntimeProfiler::UpdateSamplesAfterScavenge() {
for (int i = 0; i < kSamplerWindowSize; i++) {
@@ -429,53 +416,6 @@ void RuntimeProfiler::UpdateSamplesAfterScavenge() {
}
-void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
- // The profiler thread must still be waiting.
- ASSERT(NoBarrier_Load(&state_) >= 0);
- // In IsolateEnteredJS we have already incremented the counter and
- // undid the decrement done by the profiler thread. Increment again
- // to get the right count of active isolates.
- NoBarrier_AtomicIncrement(&state_, 1);
- semaphore.Pointer()->Signal();
-}
-
-
-bool RuntimeProfiler::IsSomeIsolateInJS() {
- return NoBarrier_Load(&state_) > 0;
-}
-
-
-bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
- Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
- ASSERT(old_state >= -1);
- if (old_state != 0) return false;
- semaphore.Pointer()->Wait();
- return true;
-}
-
-
-void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
- // Do a fake increment. If the profiler is waiting on the semaphore,
- // the returned state is 0, which can be left as an initial state in
- // case profiling is restarted later. If the profiler is not
- // waiting, the increment will prevent it from waiting, but has to
- // be undone after the profiler is stopped.
- Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
- ASSERT(new_state >= 0);
- if (new_state == 0) {
- // The profiler thread is waiting. Wake it up. It must check for
- // stop conditions before attempting to wait again.
- semaphore.Pointer()->Signal();
- }
- thread->Join();
- // The profiler thread is now stopped. Undo the increment in case it
- // was not waiting.
- if (new_state != 0) {
- NoBarrier_AtomicIncrement(&state_, -1);
- }
-}
-
-
void RuntimeProfiler::RemoveDeadSamples() {
for (int i = 0; i < kSamplerWindowSize; i++) {
Object* function = sampler_window_[i];
@@ -494,12 +434,4 @@ void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
}
-bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
- if (!RuntimeProfiler::IsSomeIsolateInJS()) {
- return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
- }
- return false;
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h
index ab6cb378e..28d6d322f 100644
--- a/deps/v8/src/runtime-profiler.h
+++ b/deps/v8/src/runtime-profiler.h
@@ -43,53 +43,16 @@ class RuntimeProfiler {
public:
explicit RuntimeProfiler(Isolate* isolate);
- static void GlobalSetUp();
-
- static inline bool IsEnabled() {
- ASSERT(has_been_globally_set_up_);
- return enabled_;
- }
-
void OptimizeNow();
- void NotifyTick();
-
void SetUp();
void Reset();
void TearDown();
- Object** SamplerWindowAddress();
- int SamplerWindowSize();
-
void NotifyICChanged() { any_ic_changed_ = true; }
// Rate limiting support.
- // VM thread interface.
- //
- // Called by isolates when their states change.
- static inline void IsolateEnteredJS(Isolate* isolate);
- static inline void IsolateExitedJS(Isolate* isolate);
-
- // Profiler thread interface.
- //
- // IsSomeIsolateInJS():
- // The profiler thread can query whether some isolate is currently
- // running JavaScript code.
- //
- // WaitForSomeIsolateToEnterJS():
- // When no isolates are running JavaScript code for some time the
- // profiler thread suspends itself by calling the wait function. The
- // wait function returns true after it waited or false immediately.
- // While the function was waiting the profiler may have been
- // disabled so it *must check* whether it is allowed to continue.
- static bool IsSomeIsolateInJS();
- static bool WaitForSomeIsolateToEnterJS();
-
- // Stops the runtime profiler thread when profiling support is being
- // turned off.
- static void StopRuntimeProfilerThreadBeforeShutdown(Thread* thread);
-
void UpdateSamplesAfterScavenge();
void RemoveDeadSamples();
void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
@@ -99,8 +62,6 @@ class RuntimeProfiler {
private:
static const int kSamplerWindowSize = 16;
- static void HandleWakeUp(Isolate* isolate);
-
void Optimize(JSFunction* function, const char* reason);
void ClearSampleBuffer();
@@ -111,6 +72,8 @@ class RuntimeProfiler {
void AddSample(JSFunction* function, int weight);
+ bool CodeSizeOKForOSR(Code* shared_code);
+
Isolate* isolate_;
int sampler_threshold_;
@@ -123,57 +86,8 @@ class RuntimeProfiler {
bool any_ic_changed_;
bool code_generated_;
-
- // Possible state values:
- // -1 => the profiler thread is waiting on the semaphore
- // 0 or positive => the number of isolates running JavaScript code.
- static Atomic32 state_;
-
-#ifdef DEBUG
- static bool has_been_globally_set_up_;
-#endif
- static bool enabled_;
};
-
-// Rate limiter intended to be used in the profiler thread.
-class RuntimeProfilerRateLimiter BASE_EMBEDDED {
- public:
- RuntimeProfilerRateLimiter() {}
-
- // Suspends the current thread (which must be the profiler thread)
- // when not executing JavaScript to minimize CPU usage. Returns
- // whether the thread was suspended (and so must check whether
- // profiling is still active.)
- //
- // Does nothing when runtime profiling is not enabled.
- bool SuspendIfNecessary();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(RuntimeProfilerRateLimiter);
-};
-
-
-// Implementation of RuntimeProfiler inline functions.
-
-void RuntimeProfiler::IsolateEnteredJS(Isolate* isolate) {
- Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
- if (new_state == 0) {
- // Just incremented from -1 to 0. -1 can only be set by the
- // profiler thread before it suspends itself and starts waiting on
- // the semaphore.
- HandleWakeUp(isolate);
- }
- ASSERT(new_state >= 0);
-}
-
-
-void RuntimeProfiler::IsolateExitedJS(Isolate* isolate) {
- Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, -1);
- ASSERT(new_state >= 0);
- USE(new_state);
-}
-
} } // namespace v8::internal
#endif // V8_RUNTIME_PROFILER_H_
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 052d02307..7f37af0bd 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -26,10 +26,12 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
+#include <limits>
#include "v8.h"
#include "accessors.h"
+#include "allocation-site-scopes.h"
#include "api.h"
#include "arguments.h"
#include "bootstrapper.h"
@@ -37,17 +39,20 @@
#include "compilation-cache.h"
#include "compiler.h"
#include "cpu.h"
+#include "cpu-profiler.h"
#include "dateparser-inl.h"
#include "debug.h"
#include "deoptimizer.h"
#include "date.h"
#include "execution.h"
+#include "full-codegen.h"
#include "global-handles.h"
#include "isolate-inl.h"
#include "jsregexp.h"
+#include "jsregexp-inl.h"
#include "json-parser.h"
+#include "json-stringifier.h"
#include "liveedit.h"
-#include "liveobjectlist-inl.h"
#include "misc-intrinsics.h"
#include "parser.h"
#include "platform.h"
@@ -57,9 +62,42 @@
#include "smart-pointers.h"
#include "string-search.h"
#include "stub-cache.h"
+#include "uri.h"
+#include "v8conversions.h"
#include "v8threads.h"
#include "vm-state-inl.h"
+#ifdef V8_I18N_SUPPORT
+#include "i18n.h"
+#include "unicode/brkiter.h"
+#include "unicode/calendar.h"
+#include "unicode/coll.h"
+#include "unicode/curramt.h"
+#include "unicode/datefmt.h"
+#include "unicode/dcfmtsym.h"
+#include "unicode/decimfmt.h"
+#include "unicode/dtfmtsym.h"
+#include "unicode/dtptngen.h"
+#include "unicode/locid.h"
+#include "unicode/numfmt.h"
+#include "unicode/numsys.h"
+#include "unicode/rbbi.h"
+#include "unicode/smpdtfmt.h"
+#include "unicode/timezone.h"
+#include "unicode/uchar.h"
+#include "unicode/ucol.h"
+#include "unicode/ucurr.h"
+#include "unicode/uloc.h"
+#include "unicode/unum.h"
+#include "unicode/uversion.h"
+#endif
+
+#ifndef _STLP_VENDOR_CSTD
+// STLPort doesn't import fpclassify and isless into the std namespace.
+using std::fpclassify;
+using std::isless;
+#endif
+
namespace v8 {
namespace internal {
@@ -136,148 +174,6 @@ namespace internal {
static_cast<LanguageMode>(args.smi_at(index));
-MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
- JSObject* boilerplate) {
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) return isolate->StackOverflow();
-
- Heap* heap = isolate->heap();
- Object* result;
- { MaybeObject* maybe_result = heap->CopyJSObject(boilerplate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSObject* copy = JSObject::cast(result);
-
- // Deep copy local properties.
- if (copy->HasFastProperties()) {
- FixedArray* properties = copy->properties();
- for (int i = 0; i < properties->length(); i++) {
- Object* value = properties->get(i);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- properties->set(i, result);
- }
- }
- int nof = copy->map()->inobject_properties();
- for (int i = 0; i < nof; i++) {
- Object* value = copy->InObjectPropertyAt(i);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- copy->InObjectPropertyAtPut(i, result);
- }
- }
- } else {
- { MaybeObject* maybe_result =
- heap->AllocateFixedArray(copy->NumberOfLocalProperties());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- FixedArray* names = FixedArray::cast(result);
- copy->GetLocalPropertyNames(names, 0);
- for (int i = 0; i < names->length(); i++) {
- ASSERT(names->get(i)->IsString());
- String* key_string = String::cast(names->get(i));
- PropertyAttributes attributes =
- copy->GetLocalPropertyAttribute(key_string);
- // Only deep copy fields from the object literal expression.
- // In particular, don't try to copy the length attribute of
- // an array.
- if (attributes != NONE) continue;
- Object* value =
- copy->GetProperty(key_string, &attributes)->ToObjectUnchecked();
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- { MaybeObject* maybe_result =
- // Creating object copy for literals. No strict mode needed.
- copy->SetProperty(key_string, result, NONE, kNonStrictMode);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- }
- }
-
- // Deep copy local elements.
- // Pixel elements cannot be created using an object literal.
- ASSERT(!copy->HasExternalArrayElements());
- switch (copy->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- FixedArray* elements = FixedArray::cast(copy->elements());
- if (elements->map() == heap->fixed_cow_array_map()) {
- isolate->counters()->cow_arrays_created_runtime()->Increment();
-#ifdef DEBUG
- for (int i = 0; i < elements->length(); i++) {
- ASSERT(!elements->get(i)->IsJSObject());
- }
-#endif
- } else {
- for (int i = 0; i < elements->length(); i++) {
- Object* value = elements->get(i);
- ASSERT(value->IsSmi() ||
- value->IsTheHole() ||
- (IsFastObjectElementsKind(copy->GetElementsKind())));
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
- js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- elements->set(i, result);
- }
- }
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- SeededNumberDictionary* element_dictionary = copy->element_dictionary();
- int capacity = element_dictionary->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = element_dictionary->KeyAt(i);
- if (element_dictionary->IsKey(k)) {
- Object* value = element_dictionary->ValueAt(i);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
- js_object);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- element_dictionary->ValueAtPut(i, result);
- }
- }
- }
- break;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNIMPLEMENTED();
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- // No contained objects, nothing to do.
- break;
- }
- return copy;
-}
-
-
static Handle<Map> ComputeObjectLiteralMap(
Handle<Context> context,
Handle<FixedArray> constant_properties,
@@ -285,40 +181,41 @@ static Handle<Map> ComputeObjectLiteralMap(
Isolate* isolate = context->GetIsolate();
int properties_length = constant_properties->length();
int number_of_properties = properties_length / 2;
- // Check that there are only symbols and array indices among keys.
- int number_of_symbol_keys = 0;
+ // Check that there are only internal strings and array indices among keys.
+ int number_of_string_keys = 0;
for (int p = 0; p != properties_length; p += 2) {
Object* key = constant_properties->get(p);
uint32_t element_index = 0;
- if (key->IsSymbol()) {
- number_of_symbol_keys++;
+ if (key->IsInternalizedString()) {
+ number_of_string_keys++;
} else if (key->ToArrayIndex(&element_index)) {
// An index key does not require space in the property backing store.
number_of_properties--;
} else {
- // Bail out as a non-symbol non-index key makes caching impossible.
+ // Bail out as a non-internalized-string non-index key makes caching
+ // impossible.
// ASSERT to make sure that the if condition after the loop is false.
- ASSERT(number_of_symbol_keys != number_of_properties);
+ ASSERT(number_of_string_keys != number_of_properties);
break;
}
}
- // If we only have symbols and array indices among keys then we can
- // use the map cache in the native context.
+ // If we only have internalized strings and array indices among keys then we
+ // can use the map cache in the native context.
const int kMaxKeys = 10;
- if ((number_of_symbol_keys == number_of_properties) &&
- (number_of_symbol_keys < kMaxKeys)) {
+ if ((number_of_string_keys == number_of_properties) &&
+ (number_of_string_keys < kMaxKeys)) {
// Create the fixed array with the key.
Handle<FixedArray> keys =
- isolate->factory()->NewFixedArray(number_of_symbol_keys);
- if (number_of_symbol_keys > 0) {
+ isolate->factory()->NewFixedArray(number_of_string_keys);
+ if (number_of_string_keys > 0) {
int index = 0;
for (int p = 0; p < properties_length; p += 2) {
Object* key = constant_properties->get(p);
- if (key->IsSymbol()) {
+ if (key->IsInternalizedString()) {
keys->set(index++, key);
}
}
- ASSERT(index == number_of_symbol_keys);
+ ASSERT(index == number_of_string_keys);
}
*is_result_from_cache = true;
return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
@@ -362,7 +259,9 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
constant_properties,
&is_result_from_cache);
- Handle<JSObject> boilerplate = isolate->factory()->NewJSObjectFromMap(map);
+ Handle<JSObject> boilerplate =
+ isolate->factory()->NewJSObjectFromMap(
+ map, isolate->heap()->GetPretenureMode());
// Normalize the elements of the boilerplate to save space if needed.
if (!should_have_fast_elements) JSObject::NormalizeElements(boilerplate);
@@ -379,6 +278,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
}
+ // TODO(verwaest): Support tracking representations in the boilerplate.
for (int index = 0; index < length; index +=2) {
Handle<Object> key(constant_properties->get(index+0), isolate);
Handle<Object> value(constant_properties->get(index+1), isolate);
@@ -391,7 +291,8 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
}
Handle<Object> result;
uint32_t element_index = 0;
- if (key->IsSymbol()) {
+ StoreMode mode = value->IsJSObject() ? FORCE_FIELD : ALLOW_AS_CONSTANT;
+ if (key->IsInternalizedString()) {
if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
// Array index as string (uint32).
result = JSObject::SetOwnElement(
@@ -400,7 +301,8 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
Handle<String> name(String::cast(*key));
ASSERT(!name->AsArrayIndex(&element_index));
result = JSObject::SetLocalPropertyIgnoreAttributes(
- boilerplate, name, value, NONE);
+ boilerplate, name, value, NONE,
+ Object::OPTIMAL_REPRESENTATION, mode);
}
} else if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
@@ -416,7 +318,8 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
Handle<String> name =
isolate->factory()->NewStringFromAscii(CStrVector(str));
result = JSObject::SetLocalPropertyIgnoreAttributes(
- boilerplate, name, value, NONE);
+ boilerplate, name, value, NONE,
+ Object::OPTIMAL_REPRESENTATION, mode);
}
// If setting the property on the boilerplate throws an
// exception, the exception is converted to an empty handle in
@@ -446,10 +349,8 @@ MaybeObject* TransitionElements(Handle<Object> object,
ElementsKind from_kind =
Handle<JSObject>::cast(object)->map()->elements_kind();
if (Map::IsValidElementsTransition(from_kind, to_kind)) {
- Handle<Object> result = JSObject::TransitionElementsKind(
- Handle<JSObject>::cast(object), to_kind);
- if (result.is_null()) return isolate->ThrowIllegalOperation();
- return *result;
+ JSObject::TransitionElementsKind(Handle<JSObject>::cast(object), to_kind);
+ return *object;
}
return isolate->ThrowIllegalOperation();
}
@@ -465,8 +366,10 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
// Create the JSArray.
Handle<JSFunction> constructor(
JSFunction::NativeContextFromLiterals(*literals)->array_function());
- Handle<JSArray> object =
- Handle<JSArray>::cast(isolate->factory()->NewJSObject(constructor));
+
+ Handle<JSArray> object = Handle<JSArray>::cast(
+ isolate->factory()->NewJSObject(
+ constructor, isolate->heap()->GetPretenureMode()));
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(elements->get(0))->value());
@@ -550,7 +453,7 @@ static Handle<Object> CreateLiteralBoilerplate(
Handle<FixedArray> array) {
Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
const bool kHasNoFunctionLiteral = false;
- switch (CompileTimeValue::GetType(array)) {
+ switch (CompileTimeValue::GetLiteralType(array)) {
case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS:
return CreateObjectLiteralBoilerplate(isolate,
literals,
@@ -584,44 +487,70 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
// Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(isolate,
- literals,
- constant_properties,
- should_have_fast_elements,
- has_function_literal);
- if (boilerplate.is_null()) return Failure::Exception();
+ Handle<Object> literal_site(literals->get(literals_index), isolate);
+ Handle<AllocationSite> site;
+ Handle<JSObject> boilerplate;
+ if (*literal_site == isolate->heap()->undefined_value()) {
+ Handle<Object> raw_boilerplate = CreateObjectLiteralBoilerplate(
+ isolate,
+ literals,
+ constant_properties,
+ should_have_fast_elements,
+ has_function_literal);
+ RETURN_IF_EMPTY_HANDLE(isolate, raw_boilerplate);
+ boilerplate = Handle<JSObject>::cast(raw_boilerplate);
+
+ AllocationSiteCreationContext creation_context(isolate);
+ site = creation_context.EnterNewScope();
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::DeepWalk(boilerplate, &creation_context));
+ creation_context.ExitScope(site, boilerplate);
+
// Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
+ literals->set(literals_index, *site);
+ } else {
+ site = Handle<AllocationSite>::cast(literal_site);
+ boilerplate = Handle<JSObject>(JSObject::cast(site->transition_info()),
+ isolate);
}
- return DeepCopyBoilerplate(isolate, JSObject::cast(*boilerplate));
+
+ AllocationSiteUsageContext usage_context(isolate, site, true);
+ usage_context.EnterNewScope();
+ Handle<Object> copy = JSObject::DeepCopy(boilerplate, &usage_context);
+ usage_context.ExitScope(site, boilerplate);
+ RETURN_IF_EMPTY_HANDLE(isolate, copy);
+ return *copy;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
- CONVERT_SMI_ARG_CHECKED(flags, 3);
- bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
- bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
-
+static Handle<AllocationSite> GetLiteralAllocationSite(
+ Isolate* isolate,
+ Handle<FixedArray> literals,
+ int literals_index,
+ Handle<FixedArray> elements) {
// Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(isolate,
- literals,
- constant_properties,
- should_have_fast_elements,
- has_function_literal);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
+ Handle<Object> literal_site(literals->get(literals_index), isolate);
+ Handle<AllocationSite> site;
+ if (*literal_site == isolate->heap()->undefined_value()) {
+ ASSERT(*elements != isolate->heap()->empty_fixed_array());
+ Handle<Object> boilerplate =
+ Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
+ if (boilerplate.is_null()) return Handle<AllocationSite>::null();
+
+ AllocationSiteCreationContext creation_context(isolate);
+ site = creation_context.EnterNewScope();
+ if (JSObject::DeepWalk(Handle<JSObject>::cast(boilerplate),
+ &creation_context).is_null()) {
+ return Handle<AllocationSite>::null();
+ }
+ creation_context.ExitScope(site, Handle<JSObject>::cast(boilerplate));
+
+ literals->set(literals_index, *site);
+ } else {
+ site = Handle<AllocationSite>::cast(literal_site);
}
- return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate));
+
+ return site;
}
@@ -632,17 +561,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- ASSERT(*elements != isolate->heap()->empty_fixed_array());
- boilerplate =
- Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return DeepCopyBoilerplate(isolate, JSObject::cast(*boilerplate));
+ Handle<AllocationSite> site = GetLiteralAllocationSite(isolate, literals,
+ literals_index, elements);
+ RETURN_IF_EMPTY_HANDLE(isolate, site);
+
+ Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()));
+ AllocationSiteUsageContext usage_context(isolate, site, true);
+ usage_context.EnterNewScope();
+ Handle<JSObject> copy = JSObject::DeepCopy(boilerplate, &usage_context);
+ usage_context.ExitScope(site, boilerplate);
+ RETURN_IF_EMPTY_HANDLE(isolate, copy);
+ return *copy;
}
@@ -653,25 +582,48 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- ASSERT(*elements != isolate->heap()->empty_fixed_array());
- boilerplate =
- Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- if (JSObject::cast(*boilerplate)->elements()->map() ==
+ Handle<AllocationSite> site = GetLiteralAllocationSite(isolate, literals,
+ literals_index, elements);
+ RETURN_IF_EMPTY_HANDLE(isolate, site);
+
+ JSObject* boilerplate = JSObject::cast(site->transition_info());
+ if (boilerplate->elements()->map() ==
isolate->heap()->fixed_cow_array_map()) {
isolate->counters()->cow_arrays_created_runtime()->Increment();
}
- return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate));
+
+ if (AllocationSite::GetMode(boilerplate->GetElementsKind()) ==
+ TRACK_ALLOCATION_SITE) {
+ return isolate->heap()->CopyJSObject(boilerplate, *site);
+ }
+
+ return isolate->heap()->CopyJSObject(boilerplate);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateSymbol) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ Handle<Object> name(args[0], isolate);
+ RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
+ Symbol* symbol;
+ MaybeObject* maybe = isolate->heap()->AllocateSymbol();
+ if (!maybe->To(&symbol)) return maybe;
+ if (name->IsString()) symbol->set_name(*name);
+ return symbol;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolName) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Symbol, symbol, 0);
+ return symbol->name();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSReceiver, handler, 0);
Object* prototype = args[1];
@@ -682,6 +634,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSFunctionProxy) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(JSReceiver, handler, 0);
Object* call_trap = args[1];
@@ -696,6 +649,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSFunctionProxy) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSProxy) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
return isolate->heap()->ToBoolean(obj->IsJSProxy());
@@ -703,6 +657,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSProxy) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSFunctionProxy) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
return isolate->heap()->ToBoolean(obj->IsJSFunctionProxy());
@@ -710,6 +665,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSFunctionProxy) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
return proxy->handler();
@@ -717,6 +673,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
return proxy->call_trap();
@@ -724,6 +681,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
return proxy->construct_trap();
@@ -731,13 +689,714 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
- proxy->Fix();
+ CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, 0);
+ JSProxy::Fix(proxy);
return isolate->heap()->undefined_value();
}
+void Runtime::FreeArrayBuffer(Isolate* isolate,
+ JSArrayBuffer* phantom_array_buffer) {
+ if (phantom_array_buffer->is_external()) return;
+
+ size_t allocated_length = NumberToSize(
+ isolate, phantom_array_buffer->byte_length());
+
+ isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
+ -static_cast<intptr_t>(allocated_length));
+ CHECK(V8::ArrayBufferAllocator() != NULL);
+ V8::ArrayBufferAllocator()->Free(
+ phantom_array_buffer->backing_store(),
+ allocated_length);
+}
+
+
+void Runtime::SetupArrayBuffer(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ bool is_external,
+ void* data,
+ size_t allocated_length) {
+ ASSERT(array_buffer->GetInternalFieldCount() ==
+ v8::ArrayBuffer::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBuffer::kInternalFieldCount; i++) {
+ array_buffer->SetInternalField(i, Smi::FromInt(0));
+ }
+ array_buffer->set_backing_store(data);
+ array_buffer->set_flag(Smi::FromInt(0));
+ array_buffer->set_is_external(is_external);
+
+ Handle<Object> byte_length =
+ isolate->factory()->NewNumberFromSize(allocated_length);
+ CHECK(byte_length->IsSmi() || byte_length->IsHeapNumber());
+ array_buffer->set_byte_length(*byte_length);
+
+ array_buffer->set_weak_next(isolate->heap()->array_buffers_list());
+ isolate->heap()->set_array_buffers_list(*array_buffer);
+ array_buffer->set_weak_first_view(isolate->heap()->undefined_value());
+}
+
+
+bool Runtime::SetupArrayBufferAllocatingData(
+ Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ size_t allocated_length,
+ bool initialize) {
+ void* data;
+ CHECK(V8::ArrayBufferAllocator() != NULL);
+ if (allocated_length != 0) {
+ if (initialize) {
+ data = V8::ArrayBufferAllocator()->Allocate(allocated_length);
+ } else {
+ data =
+ V8::ArrayBufferAllocator()->AllocateUninitialized(allocated_length);
+ }
+ if (data == NULL) return false;
+ } else {
+ data = NULL;
+ }
+
+ SetupArrayBuffer(isolate, array_buffer, false, data, allocated_length);
+
+ isolate->heap()->AdjustAmountOfExternalAllocatedMemory(allocated_length);
+
+ return true;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferInitialize) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, byteLength, 1);
+ size_t allocated_length;
+ if (byteLength->IsSmi()) {
+ allocated_length = Smi::cast(*byteLength)->value();
+ } else {
+ ASSERT(byteLength->IsHeapNumber());
+ double value = HeapNumber::cast(*byteLength)->value();
+
+ ASSERT(value >= 0);
+
+ if (value > std::numeric_limits<size_t>::max()) {
+ return isolate->Throw(
+ *isolate->factory()->NewRangeError("invalid_array_buffer_length",
+ HandleVector<Object>(NULL, 0)));
+ }
+
+ allocated_length = static_cast<size_t>(value);
+ }
+
+ if (!Runtime::SetupArrayBufferAllocatingData(isolate,
+ holder, allocated_length)) {
+ return isolate->Throw(*isolate->factory()->
+ NewRangeError("invalid_array_buffer_length",
+ HandleVector<Object>(NULL, 0)));
+ }
+
+ return *holder;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferGetByteLength) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSArrayBuffer, holder, 0);
+ return holder->byte_length();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferSliceImpl) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, source, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, target, 1);
+ CONVERT_DOUBLE_ARG_CHECKED(first, 2);
+ size_t start = static_cast<size_t>(first);
+ size_t target_length = NumberToSize(isolate, target->byte_length());
+
+ if (target_length == 0) return isolate->heap()->undefined_value();
+
+ ASSERT(NumberToSize(isolate, source->byte_length()) - target_length >= start);
+ uint8_t* source_data = reinterpret_cast<uint8_t*>(source->backing_store());
+ uint8_t* target_data = reinterpret_cast<uint8_t*>(target->backing_store());
+ CopyBytes(target_data, source_data + start, target_length);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferIsView) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, object, 0);
+ return object->IsJSArrayBufferView()
+ ? isolate->heap()->true_value()
+ : isolate->heap()->false_value();
+}
+
+
+enum TypedArrayId {
+ // arrayIds below should be synchromized with typedarray.js natives.
+ ARRAY_ID_UINT8 = 1,
+ ARRAY_ID_INT8 = 2,
+ ARRAY_ID_UINT16 = 3,
+ ARRAY_ID_INT16 = 4,
+ ARRAY_ID_UINT32 = 5,
+ ARRAY_ID_INT32 = 6,
+ ARRAY_ID_FLOAT32 = 7,
+ ARRAY_ID_FLOAT64 = 8,
+ ARRAY_ID_UINT8C = 9
+};
+
+static void ArrayIdToTypeAndSize(
+ int arrayId, ExternalArrayType* array_type, size_t* element_size) {
+ switch (arrayId) {
+ case ARRAY_ID_UINT8:
+ *array_type = kExternalUnsignedByteArray;
+ *element_size = 1;
+ break;
+ case ARRAY_ID_INT8:
+ *array_type = kExternalByteArray;
+ *element_size = 1;
+ break;
+ case ARRAY_ID_UINT16:
+ *array_type = kExternalUnsignedShortArray;
+ *element_size = 2;
+ break;
+ case ARRAY_ID_INT16:
+ *array_type = kExternalShortArray;
+ *element_size = 2;
+ break;
+ case ARRAY_ID_UINT32:
+ *array_type = kExternalUnsignedIntArray;
+ *element_size = 4;
+ break;
+ case ARRAY_ID_INT32:
+ *array_type = kExternalIntArray;
+ *element_size = 4;
+ break;
+ case ARRAY_ID_FLOAT32:
+ *array_type = kExternalFloatArray;
+ *element_size = 4;
+ break;
+ case ARRAY_ID_FLOAT64:
+ *array_type = kExternalDoubleArray;
+ *element_size = 8;
+ break;
+ case ARRAY_ID_UINT8C:
+ *array_type = kExternalPixelArray;
+ *element_size = 1;
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 5);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
+ CONVERT_SMI_ARG_CHECKED(arrayId, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, byte_offset_object, 3);
+ CONVERT_ARG_HANDLE_CHECKED(Object, byte_length_object, 4);
+
+ ASSERT(holder->GetInternalFieldCount() ==
+ v8::ArrayBufferView::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ holder->SetInternalField(i, Smi::FromInt(0));
+ }
+
+ ExternalArrayType array_type = kExternalByteArray; // Bogus initialization.
+ size_t element_size = 1; // Bogus initialization.
+ ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
+
+ holder->set_buffer(*buffer);
+ holder->set_byte_offset(*byte_offset_object);
+ holder->set_byte_length(*byte_length_object);
+
+ size_t byte_offset = NumberToSize(isolate, *byte_offset_object);
+ size_t byte_length = NumberToSize(isolate, *byte_length_object);
+ ASSERT(byte_length % element_size == 0);
+ size_t length = byte_length / element_size;
+
+ Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
+ holder->set_length(*length_obj);
+ holder->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*holder);
+
+ Handle<ExternalArray> elements =
+ isolate->factory()->NewExternalArray(
+ static_cast<int>(length), array_type,
+ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
+ holder->set_elements(*elements);
+ return isolate->heap()->undefined_value();
+}
+
+
+// Initializes a typed array from an array-like object.
+// If an array-like object happens to be a typed array of the same type,
+// initializes backing store using memove.
+//
+// Returns true if backing store was initialized or false otherwise.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
+ CONVERT_SMI_ARG_CHECKED(arrayId, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, source, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, length_obj, 3);
+
+ ASSERT(holder->GetInternalFieldCount() ==
+ v8::ArrayBufferView::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ holder->SetInternalField(i, Smi::FromInt(0));
+ }
+
+ ExternalArrayType array_type = kExternalByteArray; // Bogus initialization.
+ size_t element_size = 1; // Bogus initialization.
+ ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
+
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ size_t length = NumberToSize(isolate, *length_obj);
+ if (length > (kMaxInt / element_size)) {
+ return isolate->Throw(*isolate->factory()->
+ NewRangeError("invalid_array_buffer_length",
+ HandleVector<Object>(NULL, 0)));
+ }
+ size_t byte_length = length * element_size;
+
+ // NOTE: not initializing backing store.
+ // We assume that the caller of this function will initialize holder
+ // with the loop
+ // for(i = 0; i < length; i++) { holder[i] = source[i]; }
+ // We assume that the caller of this function is always a typed array
+ // constructor.
+ // If source is a typed array, this loop will always run to completion,
+ // so we are sure that the backing store will be initialized.
+ // Otherwise, the indexing operation might throw, so the loop will not
+ // run to completion and the typed array might remain partly initialized.
+ // However we further assume that the caller of this function is a typed array
+ // constructor, and the exception will propagate out of the constructor,
+ // therefore uninitialized memory will not be accessible by a user program.
+ //
+ // TODO(dslomov): revise this once we support subclassing.
+
+ if (!Runtime::SetupArrayBufferAllocatingData(
+ isolate, buffer, byte_length, false)) {
+ return isolate->Throw(*isolate->factory()->
+ NewRangeError("invalid_array_buffer_length",
+ HandleVector<Object>(NULL, 0)));
+ }
+
+ holder->set_buffer(*buffer);
+ holder->set_byte_offset(Smi::FromInt(0));
+ Handle<Object> byte_length_obj(
+ isolate->factory()->NewNumberFromSize(byte_length));
+ holder->set_byte_length(*byte_length_obj);
+ holder->set_length(*length_obj);
+ holder->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*holder);
+
+ Handle<ExternalArray> elements =
+ isolate->factory()->NewExternalArray(
+ static_cast<int>(length), array_type,
+ static_cast<uint8_t*>(buffer->backing_store()));
+ holder->set_elements(*elements);
+
+ if (source->IsJSTypedArray()) {
+ Handle<JSTypedArray> typed_array(JSTypedArray::cast(*source));
+
+ if (typed_array->type() == holder->type()) {
+ uint8_t* backing_store =
+ static_cast<uint8_t*>(
+ JSArrayBuffer::cast(typed_array->buffer())->backing_store());
+ size_t source_byte_offset =
+ NumberToSize(isolate, typed_array->byte_offset());
+ memcpy(
+ buffer->backing_store(),
+ backing_store + source_byte_offset,
+ byte_length);
+ return *isolate->factory()->true_value();
+ } else {
+ return *isolate->factory()->false_value();
+ }
+ }
+
+ return *isolate->factory()->false_value();
+}
+
+
+#define TYPED_ARRAY_GETTER(getter, accessor) \
+ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayGet##getter) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 1); \
+ CONVERT_ARG_HANDLE_CHECKED(Object, holder, 0); \
+ if (!holder->IsJSTypedArray()) \
+ return isolate->Throw(*isolate->factory()->NewTypeError( \
+ "not_typed_array", HandleVector<Object>(NULL, 0))); \
+ Handle<JSTypedArray> typed_array(JSTypedArray::cast(*holder)); \
+ return typed_array->accessor(); \
+ }
+
+TYPED_ARRAY_GETTER(Buffer, buffer)
+TYPED_ARRAY_GETTER(ByteLength, byte_length)
+TYPED_ARRAY_GETTER(ByteOffset, byte_offset)
+TYPED_ARRAY_GETTER(Length, length)
+
+#undef TYPED_ARRAY_GETTER
+
+// Return codes for Runtime_TypedArraySetFastCases.
+// Should be synchronized with typedarray.js natives.
+enum TypedArraySetResultCodes {
+ // Set from typed array of the same type.
+ // This is processed by TypedArraySetFastCases
+ TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE = 0,
+ // Set from typed array of the different type, overlapping in memory.
+ TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING = 1,
+ // Set from typed array of the different type, non-overlapping.
+ TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING = 2,
+ // Set from non-typed array.
+ TYPED_ARRAY_SET_NON_TYPED_ARRAY = 3
+};
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, target_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, source_obj, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, offset_obj, 2);
+
+ if (!target_obj->IsJSTypedArray())
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "not_typed_array", HandleVector<Object>(NULL, 0)));
+
+ if (!source_obj->IsJSTypedArray())
+ return Smi::FromInt(TYPED_ARRAY_SET_NON_TYPED_ARRAY);
+
+ Handle<JSTypedArray> target(JSTypedArray::cast(*target_obj));
+ Handle<JSTypedArray> source(JSTypedArray::cast(*source_obj));
+ size_t offset = NumberToSize(isolate, *offset_obj);
+ size_t target_length = NumberToSize(isolate, target->length());
+ size_t source_length = NumberToSize(isolate, source->length());
+ size_t target_byte_length = NumberToSize(isolate, target->byte_length());
+ size_t source_byte_length = NumberToSize(isolate, source->byte_length());
+ if (offset > target_length ||
+ offset + source_length > target_length ||
+ offset + source_length < offset) // overflow
+ return isolate->Throw(*isolate->factory()->NewRangeError(
+ "typed_array_set_source_too_large", HandleVector<Object>(NULL, 0)));
+
+ size_t target_offset = NumberToSize(isolate, target->byte_offset());
+ size_t source_offset = NumberToSize(isolate, source->byte_offset());
+ uint8_t* target_base =
+ static_cast<uint8_t*>(
+ JSArrayBuffer::cast(target->buffer())->backing_store()) + target_offset;
+ uint8_t* source_base =
+ static_cast<uint8_t*>(
+ JSArrayBuffer::cast(source->buffer())->backing_store()) + source_offset;
+
+ // Typed arrays of the same type: use memmove.
+ if (target->type() == source->type()) {
+ memmove(target_base + offset * target->element_size(),
+ source_base, source_byte_length);
+ return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE);
+ }
+
+ // Typed arrays of different types over the same backing store
+ if ((source_base <= target_base &&
+ source_base + source_byte_length > target_base) ||
+ (target_base <= source_base &&
+ target_base + target_byte_length > source_base)) {
+ // We do not support overlapping ArrayBuffers
+ ASSERT(
+ JSArrayBuffer::cast(target->buffer())->backing_store() ==
+ JSArrayBuffer::cast(source->buffer())->backing_store());
+ return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING);
+ } else { // Non-overlapping typed arrays
+ return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING);
+ }
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewInitialize) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, byte_offset, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, byte_length, 3);
+
+ ASSERT(holder->GetInternalFieldCount() ==
+ v8::ArrayBufferView::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ holder->SetInternalField(i, Smi::FromInt(0));
+ }
+
+ holder->set_buffer(*buffer);
+ ASSERT(byte_offset->IsNumber());
+ ASSERT(
+ NumberToSize(isolate, buffer->byte_length()) >=
+ NumberToSize(isolate, *byte_offset)
+ + NumberToSize(isolate, *byte_length));
+ holder->set_byte_offset(*byte_offset);
+ ASSERT(byte_length->IsNumber());
+ holder->set_byte_length(*byte_length);
+
+ holder->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*holder);
+
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetBuffer) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0);
+ return data_view->buffer();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetByteOffset) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0);
+ return data_view->byte_offset();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetByteLength) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0);
+ return data_view->byte_length();
+}
+
+
+inline static bool NeedToFlipBytes(bool is_little_endian) {
+#ifdef V8_TARGET_LITTLE_ENDIAN
+ return !is_little_endian;
+#else
+ return is_little_endian;
+#endif
+}
+
+
+template<int n>
+inline void CopyBytes(uint8_t* target, uint8_t* source) {
+ for (int i = 0; i < n; i++) {
+ *(target++) = *(source++);
+ }
+}
+
+
+template<int n>
+inline void FlipBytes(uint8_t* target, uint8_t* source) {
+ source = source + (n-1);
+ for (int i = 0; i < n; i++) {
+ *(target++) = *(source--);
+ }
+}
+
+
+template<typename T>
+inline static bool DataViewGetValue(
+ Isolate* isolate,
+ Handle<JSDataView> data_view,
+ Handle<Object> byte_offset_obj,
+ bool is_little_endian,
+ T* result) {
+ size_t byte_offset = NumberToSize(isolate, *byte_offset_obj);
+ Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
+
+ size_t data_view_byte_offset =
+ NumberToSize(isolate, data_view->byte_offset());
+ size_t data_view_byte_length =
+ NumberToSize(isolate, data_view->byte_length());
+ if (byte_offset + sizeof(T) > data_view_byte_length ||
+ byte_offset + sizeof(T) < byte_offset) { // overflow
+ return false;
+ }
+
+ union Value {
+ T data;
+ uint8_t bytes[sizeof(T)];
+ };
+
+ Value value;
+ size_t buffer_offset = data_view_byte_offset + byte_offset;
+ ASSERT(
+ NumberToSize(isolate, buffer->byte_length())
+ >= buffer_offset + sizeof(T));
+ uint8_t* source =
+ static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
+ if (NeedToFlipBytes(is_little_endian)) {
+ FlipBytes<sizeof(T)>(value.bytes, source);
+ } else {
+ CopyBytes<sizeof(T)>(value.bytes, source);
+ }
+ *result = value.data;
+ return true;
+}
+
+
+template<typename T>
+static bool DataViewSetValue(
+ Isolate* isolate,
+ Handle<JSDataView> data_view,
+ Handle<Object> byte_offset_obj,
+ bool is_little_endian,
+ T data) {
+ size_t byte_offset = NumberToSize(isolate, *byte_offset_obj);
+ Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
+
+ size_t data_view_byte_offset =
+ NumberToSize(isolate, data_view->byte_offset());
+ size_t data_view_byte_length =
+ NumberToSize(isolate, data_view->byte_length());
+ if (byte_offset + sizeof(T) > data_view_byte_length ||
+ byte_offset + sizeof(T) < byte_offset) { // overflow
+ return false;
+ }
+
+ union Value {
+ T data;
+ uint8_t bytes[sizeof(T)];
+ };
+
+ Value value;
+ value.data = data;
+ size_t buffer_offset = data_view_byte_offset + byte_offset;
+ ASSERT(
+ NumberToSize(isolate, buffer->byte_length())
+ >= buffer_offset + sizeof(T));
+ uint8_t* target =
+ static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
+ if (NeedToFlipBytes(is_little_endian)) {
+ FlipBytes<sizeof(T)>(target, value.bytes);
+ } else {
+ CopyBytes<sizeof(T)>(target, value.bytes);
+ }
+ return true;
+}
+
+
+#define DATA_VIEW_GETTER(TypeName, Type, Converter) \
+ RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGet##TypeName) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 3); \
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); \
+ CONVERT_ARG_HANDLE_CHECKED(Object, offset, 1); \
+ CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 2); \
+ Type result; \
+ if (DataViewGetValue( \
+ isolate, holder, offset, is_little_endian, &result)) { \
+ return isolate->heap()->Converter(result); \
+ } else { \
+ return isolate->Throw(*isolate->factory()->NewRangeError( \
+ "invalid_data_view_accessor_offset", \
+ HandleVector<Object>(NULL, 0))); \
+ } \
+ }
+
+DATA_VIEW_GETTER(Uint8, uint8_t, NumberFromUint32)
+DATA_VIEW_GETTER(Int8, int8_t, NumberFromInt32)
+DATA_VIEW_GETTER(Uint16, uint16_t, NumberFromUint32)
+DATA_VIEW_GETTER(Int16, int16_t, NumberFromInt32)
+DATA_VIEW_GETTER(Uint32, uint32_t, NumberFromUint32)
+DATA_VIEW_GETTER(Int32, int32_t, NumberFromInt32)
+DATA_VIEW_GETTER(Float32, float, NumberFromDouble)
+DATA_VIEW_GETTER(Float64, double, NumberFromDouble)
+
+#undef DATA_VIEW_GETTER
+
+
+template <typename T>
+static T DataViewConvertValue(double value);
+
+
+template <>
+int8_t DataViewConvertValue<int8_t>(double value) {
+ return static_cast<int8_t>(DoubleToInt32(value));
+}
+
+
+template <>
+int16_t DataViewConvertValue<int16_t>(double value) {
+ return static_cast<int16_t>(DoubleToInt32(value));
+}
+
+
+template <>
+int32_t DataViewConvertValue<int32_t>(double value) {
+ return DoubleToInt32(value);
+}
+
+
+template <>
+uint8_t DataViewConvertValue<uint8_t>(double value) {
+ return static_cast<uint8_t>(DoubleToUint32(value));
+}
+
+
+template <>
+uint16_t DataViewConvertValue<uint16_t>(double value) {
+ return static_cast<uint16_t>(DoubleToUint32(value));
+}
+
+
+template <>
+uint32_t DataViewConvertValue<uint32_t>(double value) {
+ return DoubleToUint32(value);
+}
+
+
+template <>
+float DataViewConvertValue<float>(double value) {
+ return static_cast<float>(value);
+}
+
+
+template <>
+double DataViewConvertValue<double>(double value) {
+ return value;
+}
+
+
+#define DATA_VIEW_SETTER(TypeName, Type) \
+ RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewSet##TypeName) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 4); \
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); \
+ CONVERT_ARG_HANDLE_CHECKED(Object, offset, 1); \
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); \
+ CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 3); \
+ Type v = DataViewConvertValue<Type>(value->Number()); \
+ if (DataViewSetValue( \
+ isolate, holder, offset, is_little_endian, v)) { \
+ return isolate->heap()->undefined_value(); \
+ } else { \
+ return isolate->Throw(*isolate->factory()->NewRangeError( \
+ "invalid_data_view_accessor_offset", \
+ HandleVector<Object>(NULL, 0))); \
+ } \
+ }
+
+DATA_VIEW_SETTER(Uint8, uint8_t)
+DATA_VIEW_SETTER(Int8, int8_t)
+DATA_VIEW_SETTER(Uint16, uint16_t)
+DATA_VIEW_SETTER(Int16, int16_t)
+DATA_VIEW_SETTER(Uint32, uint32_t)
+DATA_VIEW_SETTER(Int32, int32_t)
+DATA_VIEW_SETTER(Float32, float)
+DATA_VIEW_SETTER(Float64, double)
+
+#undef DATA_VIEW_SETTER
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -752,7 +1411,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<Object> key(args[1]);
+ Handle<Object> key(args[1], isolate);
Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
table = ObjectHashSetAdd(table, key);
holder->set_table(*table);
@@ -764,7 +1423,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHas) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<Object> key(args[1]);
+ Handle<Object> key(args[1], isolate);
Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
return isolate->heap()->ToBoolean(table->Contains(*key));
}
@@ -774,7 +1433,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<Object> key(args[1]);
+ Handle<Object> key(args[1], isolate);
Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
table = ObjectHashSetRemove(table, key);
holder->set_table(*table);
@@ -782,6 +1441,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetGetSize) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+ Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
+ return Smi::FromInt(table->NumberOfElements());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_MapInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -798,7 +1466,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGet) {
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<Object> lookup(table->Lookup(*key));
+ Handle<Object> lookup(table->Lookup(*key), isolate);
return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
}
@@ -809,7 +1477,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapHas) {
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<Object> lookup(table->Lookup(*key));
+ Handle<Object> lookup(table->Lookup(*key), isolate);
return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
@@ -820,7 +1488,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapDelete) {
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<Object> lookup(table->Lookup(*key));
+ Handle<Object> lookup(table->Lookup(*key), isolate);
Handle<ObjectHashTable> new_table =
PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
holder->set_table(*new_table);
@@ -841,69 +1509,88 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGetSize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
- ASSERT(weakmap->map()->inobject_properties() == 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+ return Smi::FromInt(table->NumberOfElements());
+}
+
+
+static JSWeakCollection* WeakCollectionInitialize(Isolate* isolate,
+ Handle<JSWeakCollection> weak_collection) {
+ ASSERT(weak_collection->map()->inobject_properties() == 0);
Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
- weakmap->set_table(*table);
- weakmap->set_next(Smi::FromInt(0));
- return *weakmap;
+ weak_collection->set_table(*table);
+ weak_collection->set_next(Smi::FromInt(0));
+ return *weak_collection;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionInitialize) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
+ return WeakCollectionInitialize(isolate, weak_collection);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionGet) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
- Handle<Object> lookup(table->Lookup(*key));
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<ObjectHashTable> table(
+ ObjectHashTable::cast(weak_collection->table()));
+ Handle<Object> lookup(table->Lookup(*key), isolate);
return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapHas) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionHas) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
- Handle<Object> lookup(table->Lookup(*key));
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<ObjectHashTable> table(
+ ObjectHashTable::cast(weak_collection->table()));
+ Handle<Object> lookup(table->Lookup(*key), isolate);
return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapDelete) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionDelete) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
- Handle<Object> lookup(table->Lookup(*key));
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(
+ weak_collection->table()));
+ Handle<Object> lookup(table->Lookup(*key), isolate);
Handle<ObjectHashTable> new_table =
PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
- weakmap->set_table(*new_table);
+ weak_collection->set_table(*new_table);
return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionSet) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
- Handle<Object> value(args[2]);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<Object> value(args[2], isolate);
+ Handle<ObjectHashTable> table(
+ ObjectHashTable::cast(weak_collection->table()));
Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
- weakmap->set_table(*new_table);
+ weak_collection->set_table(*new_table);
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
if (!obj->IsJSObject()) return isolate->heap()->null_value();
@@ -912,35 +1599,73 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
- NoHandleAllocation ha;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSReceiver, input_obj, 0);
- Object* obj = input_obj;
+ CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
// We don't expect access checks to be needed on JSProxy objects.
ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
do {
if (obj->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(JSObject::cast(obj),
- isolate->heap()->Proto_symbol(),
- v8::ACCESS_GET)) {
- isolate->ReportFailedAccessCheck(JSObject::cast(obj), v8::ACCESS_GET);
+ !isolate->MayNamedAccessWrapper(Handle<JSObject>::cast(obj),
+ isolate->factory()->proto_string(),
+ v8::ACCESS_GET)) {
+ isolate->ReportFailedAccessCheck(JSObject::cast(*obj), v8::ACCESS_GET);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
}
- obj = obj->GetPrototype();
+ obj = handle(obj->GetPrototype(isolate), isolate);
} while (obj->IsJSObject() &&
- JSObject::cast(obj)->map()->is_hidden_prototype());
- return obj;
+ JSObject::cast(*obj)->map()->is_hidden_prototype());
+ return *obj;
+}
+
+
+static inline Object* GetPrototypeSkipHiddenPrototypes(Isolate* isolate,
+ Object* receiver) {
+ Object* current = receiver->GetPrototype(isolate);
+ while (current->IsJSObject() &&
+ JSObject::cast(current)->map()->is_hidden_prototype()) {
+ current = current->GetPrototype(isolate);
+ }
+ return current;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
+ if (FLAG_harmony_observation && obj->map()->is_observed()) {
+ Handle<Object> old_value(
+ GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate);
+
+ Handle<Object> result = JSObject::SetPrototype(obj, prototype, true);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+
+ Handle<Object> new_value(
+ GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate);
+ if (!new_value->SameValue(*old_value)) {
+ JSObject::EnqueueChangeRecord(obj, "prototype",
+ isolate->factory()->proto_string(),
+ old_value);
+ }
+ return *result;
+ }
+ Handle<Object> result = JSObject::SetPrototype(obj, prototype, true);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
// See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
Object* O = args[0];
Object* V = args[1];
while (true) {
- Object* prototype = V->GetPrototype();
+ Object* prototype = V->GetPrototype(isolate);
if (prototype->IsNull()) return isolate->heap()->false_value();
if (O == prototype) return isolate->heap()->true_value();
V = prototype;
@@ -948,104 +1673,109 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
}
-// Recursively traverses hidden prototypes if property is not found
-static void GetOwnPropertyImplementation(JSObject* obj,
- String* name,
- LookupResult* result) {
- obj->LocalLookupRealNamedProperty(name, result);
-
- if (result->IsFound()) return;
-
- Object* proto = obj->GetPrototype();
- if (proto->IsJSObject() &&
- JSObject::cast(proto)->map()->is_hidden_prototype())
- GetOwnPropertyImplementation(JSObject::cast(proto),
- name, result);
+static bool CheckAccessException(Object* callback,
+ v8::AccessType access_type) {
+ DisallowHeapAllocation no_gc;
+ if (callback->IsAccessorInfo()) {
+ AccessorInfo* info = AccessorInfo::cast(callback);
+ return
+ (access_type == v8::ACCESS_HAS &&
+ (info->all_can_read() || info->all_can_write())) ||
+ (access_type == v8::ACCESS_GET && info->all_can_read()) ||
+ (access_type == v8::ACCESS_SET && info->all_can_write());
+ }
+ if (callback->IsAccessorPair()) {
+ AccessorPair* info = AccessorPair::cast(callback);
+ return
+ (access_type == v8::ACCESS_HAS &&
+ (info->all_can_read() || info->all_can_write())) ||
+ (access_type == v8::ACCESS_GET && info->all_can_read()) ||
+ (access_type == v8::ACCESS_SET && info->all_can_write());
+ }
+ return false;
}
-static bool CheckAccessException(LookupResult* result,
- v8::AccessType access_type) {
- if (result->type() == CALLBACKS) {
- Object* callback = result->GetCallbackObject();
- if (callback->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(callback);
- bool can_access =
- (access_type == v8::ACCESS_HAS &&
- (info->all_can_read() || info->all_can_write())) ||
- (access_type == v8::ACCESS_GET && info->all_can_read()) ||
- (access_type == v8::ACCESS_SET && info->all_can_write());
- return can_access;
+template<class Key>
+static bool CheckGenericAccess(
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Key key,
+ v8::AccessType access_type,
+ bool (Isolate::*mayAccess)(Handle<JSObject>, Key, v8::AccessType)) {
+ Isolate* isolate = receiver->GetIsolate();
+ for (Handle<JSObject> current = receiver;
+ true;
+ current = handle(JSObject::cast(current->GetPrototype()), isolate)) {
+ if (current->IsAccessCheckNeeded() &&
+ !(isolate->*mayAccess)(current, key, access_type)) {
+ return false;
}
+ if (current.is_identical_to(holder)) break;
}
-
- return false;
+ return true;
}
-static bool CheckAccess(JSObject* obj,
- String* name,
- LookupResult* result,
- v8::AccessType access_type) {
- ASSERT(result->IsProperty());
+enum AccessCheckResult {
+ ACCESS_FORBIDDEN,
+ ACCESS_ALLOWED,
+ ACCESS_ABSENT
+};
- JSObject* holder = result->holder();
- JSObject* current = obj;
- Isolate* isolate = obj->GetIsolate();
- while (true) {
- if (current->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(current, name, access_type)) {
- // Access check callback denied the access, but some properties
- // can have a special permissions which override callbacks descision
- // (currently see v8::AccessControl).
- break;
- }
- if (current == holder) {
- return true;
+static AccessCheckResult CheckPropertyAccess(Handle<JSObject> obj,
+ Handle<Name> name,
+ v8::AccessType access_type) {
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ // TODO(1095): we should traverse hidden prototype hierachy as well.
+ if (CheckGenericAccess(
+ obj, obj, index, access_type, &Isolate::MayIndexedAccessWrapper)) {
+ return ACCESS_ALLOWED;
}
- current = JSObject::cast(current->GetPrototype());
+ obj->GetIsolate()->ReportFailedAccessCheck(*obj, access_type);
+ return ACCESS_FORBIDDEN;
}
+ Isolate* isolate = obj->GetIsolate();
+ LookupResult lookup(isolate);
+ obj->LocalLookup(*name, &lookup, true);
+
+ if (!lookup.IsProperty()) return ACCESS_ABSENT;
+ Handle<JSObject> holder(lookup.holder(), isolate);
+ if (CheckGenericAccess<Handle<Object> >(
+ obj, holder, name, access_type, &Isolate::MayNamedAccessWrapper)) {
+ return ACCESS_ALLOWED;
+ }
+
+ // Access check callback denied the access, but some properties
+ // can have a special permissions which override callbacks descision
+ // (currently see v8::AccessControl).
// API callbacks can have per callback access exceptions.
- switch (result->type()) {
- case CALLBACKS: {
- if (CheckAccessException(result, access_type)) {
- return true;
+ switch (lookup.type()) {
+ case CALLBACKS:
+ if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
+ return ACCESS_ALLOWED;
}
break;
- }
- case INTERCEPTOR: {
+ case INTERCEPTOR:
// If the object has an interceptor, try real named properties.
// Overwrite the result to fetch the correct property later.
- holder->LookupRealNamedProperty(name, result);
- if (result->IsProperty()) {
- if (CheckAccessException(result, access_type)) {
- return true;
+ holder->LookupRealNamedProperty(*name, &lookup);
+ if (lookup.IsProperty() && lookup.IsPropertyCallbacks()) {
+ if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
+ return ACCESS_ALLOWED;
}
}
break;
- }
default:
break;
}
- isolate->ReportFailedAccessCheck(current, access_type);
- return false;
-}
-
-
-// TODO(1095): we should traverse hidden prototype hierachy as well.
-static bool CheckElementAccess(JSObject* obj,
- uint32_t index,
- v8::AccessType access_type) {
- if (obj->IsAccessCheckNeeded() &&
- !obj->GetIsolate()->MayIndexedAccess(obj, index, access_type)) {
- return false;
- }
-
- return true;
+ isolate->ReportFailedAccessCheck(*obj, access_type);
+ return ACCESS_FORBIDDEN;
}
@@ -1062,145 +1792,63 @@ enum PropertyDescriptorIndices {
};
-static MaybeObject* GetOwnProperty(Isolate* isolate,
- Handle<JSObject> obj,
- Handle<String> name) {
+static Handle<Object> GetOwnProperty(Isolate* isolate,
+ Handle<JSObject> obj,
+ Handle<Name> name) {
Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+ // Due to some WebKit tests, we want to make sure that we do not log
+ // more than one access failure here.
+ AccessCheckResult access_check_result =
+ CheckPropertyAccess(obj, name, v8::ACCESS_HAS);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ switch (access_check_result) {
+ case ACCESS_FORBIDDEN: return factory->false_value();
+ case ACCESS_ALLOWED: break;
+ case ACCESS_ABSENT: return factory->undefined_value();
+ }
+
+ PropertyAttributes attrs = obj->GetLocalPropertyAttribute(*name);
+ if (attrs == ABSENT) {
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return factory->undefined_value();
+ }
+ ASSERT(!isolate->has_scheduled_exception());
+ AccessorPair* raw_accessors = obj->GetLocalPropertyAccessorPair(*name);
+ Handle<AccessorPair> accessors(raw_accessors, isolate);
Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
- Handle<JSArray> desc = isolate->factory()->NewJSArrayWithElements(elms);
- LookupResult result(isolate);
- // This could be an element.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- switch (obj->HasLocalElement(index)) {
- case JSObject::UNDEFINED_ELEMENT:
- return heap->undefined_value();
-
- case JSObject::STRING_CHARACTER_ELEMENT: {
- // Special handling of string objects according to ECMAScript 5
- // 15.5.5.2. Note that this might be a string object with elements
- // other than the actual string value. This is covered by the
- // subsequent cases.
- Handle<JSValue> js_value = Handle<JSValue>::cast(obj);
- Handle<String> str(String::cast(js_value->value()));
- Handle<String> substr = SubString(str, index, index + 1, NOT_TENURED);
-
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- elms->set(VALUE_INDEX, *substr);
- elms->set(WRITABLE_INDEX, heap->false_value());
- elms->set(ENUMERABLE_INDEX, heap->true_value());
- elms->set(CONFIGURABLE_INDEX, heap->false_value());
- return *desc;
- }
-
- case JSObject::INTERCEPTED_ELEMENT:
- case JSObject::FAST_ELEMENT: {
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- Handle<Object> value = Object::GetElement(obj, index);
- RETURN_IF_EMPTY_HANDLE(isolate, value);
- elms->set(VALUE_INDEX, *value);
- elms->set(WRITABLE_INDEX, heap->true_value());
- elms->set(ENUMERABLE_INDEX, heap->true_value());
- elms->set(CONFIGURABLE_INDEX, heap->true_value());
- return *desc;
- }
-
- case JSObject::DICTIONARY_ELEMENT: {
- Handle<JSObject> holder = obj;
- if (obj->IsJSGlobalProxy()) {
- Object* proto = obj->GetPrototype();
- if (proto->IsNull()) return heap->undefined_value();
- ASSERT(proto->IsJSGlobalObject());
- holder = Handle<JSObject>(JSObject::cast(proto));
- }
- FixedArray* elements = FixedArray::cast(holder->elements());
- SeededNumberDictionary* dictionary = NULL;
- if (elements->map() == heap->non_strict_arguments_elements_map()) {
- dictionary = SeededNumberDictionary::cast(elements->get(1));
- } else {
- dictionary = SeededNumberDictionary::cast(elements);
- }
- int entry = dictionary->FindEntry(index);
- ASSERT(entry != SeededNumberDictionary::kNotFound);
- PropertyDetails details = dictionary->DetailsAt(entry);
- switch (details.type()) {
- case CALLBACKS: {
- // This is an accessor property with getter and/or setter.
- AccessorPair* accessors =
- AccessorPair::cast(dictionary->ValueAt(entry));
- elms->set(IS_ACCESSOR_INDEX, heap->true_value());
- if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
- elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER));
- }
- if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
- elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER));
- }
- break;
- }
- case NORMAL: {
- // This is a data property.
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- Handle<Object> value = Object::GetElement(obj, index);
- ASSERT(!value.is_null());
- elms->set(VALUE_INDEX, *value);
- elms->set(WRITABLE_INDEX, heap->ToBoolean(!details.IsReadOnly()));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!details.IsDontEnum()));
- elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!details.IsDontDelete()));
- return *desc;
- }
- }
- }
-
- // Use recursive implementation to also traverse hidden prototypes
- GetOwnPropertyImplementation(*obj, *name, &result);
-
- if (!result.IsProperty()) {
- return heap->undefined_value();
- }
-
- if (!CheckAccess(*obj, *name, &result, v8::ACCESS_HAS)) {
- return heap->false_value();
- }
-
- elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!result.IsDontEnum()));
- elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!result.IsDontDelete()));
-
- bool is_js_accessor = result.IsPropertyCallbacks() &&
- (result.GetCallbackObject()->IsAccessorPair());
-
- if (is_js_accessor) {
- // __defineGetter__/__defineSetter__ callback.
- elms->set(IS_ACCESSOR_INDEX, heap->true_value());
-
- AccessorPair* accessors = AccessorPair::cast(result.GetCallbackObject());
- Object* getter = accessors->GetComponent(ACCESSOR_GETTER);
- if (!getter->IsMap() && CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
- elms->set(GETTER_INDEX, getter);
- }
- Object* setter = accessors->GetComponent(ACCESSOR_SETTER);
- if (!setter->IsMap() && CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
- elms->set(SETTER_INDEX, setter);
- }
+ elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0));
+ elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0));
+ elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(raw_accessors != NULL));
+
+ if (raw_accessors == NULL) {
+ elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0));
+ // GetProperty does access check.
+ Handle<Object> value = GetProperty(isolate, obj, name);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<Object>::null());
+ elms->set(VALUE_INDEX, *value);
} else {
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- elms->set(WRITABLE_INDEX, heap->ToBoolean(!result.IsReadOnly()));
+ // Access checks are performed for both accessors separately.
+ // When they fail, the respective field is not set in the descriptor.
+ Handle<Object> getter(accessors->GetComponent(ACCESSOR_GETTER), isolate);
+ Handle<Object> setter(accessors->GetComponent(ACCESSOR_SETTER), isolate);
+
+ if (!getter->IsMap() && CheckPropertyAccess(obj, name, v8::ACCESS_GET)) {
+ ASSERT(!isolate->has_scheduled_exception());
+ elms->set(GETTER_INDEX, *getter);
+ } else {
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ }
- PropertyAttributes attrs;
- Object* value;
- // GetProperty will check access and report any violations.
- { MaybeObject* maybe_value = obj->GetProperty(*obj, &result, *name, &attrs);
- if (!maybe_value->ToObject(&value)) return maybe_value;
+ if (!setter->IsMap() && CheckPropertyAccess(obj, name, v8::ACCESS_SET)) {
+ ASSERT(!isolate->has_scheduled_exception());
+ elms->set(SETTER_INDEX, *setter);
+ } else {
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
- elms->set(VALUE_INDEX, value);
}
- return *desc;
+ return isolate->factory()->NewJSArrayWithElements(elms);
}
@@ -1212,22 +1860,28 @@ static MaybeObject* GetOwnProperty(Isolate* isolate,
// if args[1] is an accessor on args[0]
// [true, GetFunction, SetFunction, Enumerable, Configurable]
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
- ASSERT(args.length() == 2);
HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
- return GetOwnProperty(isolate, obj, name);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ Handle<Object> result = GetOwnProperty(isolate, obj, name);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- return obj->PreventExtensions();
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ Handle<Object> result = JSObject::PreventExtensions(obj);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
if (obj->IsJSGlobalProxy()) {
@@ -1246,9 +1900,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
- Handle<Object> result =
- RegExpImpl::Compile(re, pattern, flags, isolate->runtime_zone());
- if (result.is_null()) return Failure::Exception();
+ Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
@@ -1262,6 +1915,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateApiFunction) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* arg = args[0];
bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo();
@@ -1270,6 +1924,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(HeapObject, templ, 0);
CONVERT_SMI_ARG_CHECKED(index, 1)
@@ -1288,6 +1943,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(HeapObject, object, 0);
Map* old_map = object->map();
@@ -1306,6 +1962,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(HeapObject, object, 0);
Map* old_map = object->map();
@@ -1322,6 +1979,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
}
+// Transform getter or setter into something DefineAccessor can handle.
+static Handle<Object> InstantiateAccessorComponent(Isolate* isolate,
+ Handle<Object> component) {
+ if (component->IsUndefined()) return isolate->factory()->null_value();
+ Handle<FunctionTemplateInfo> info =
+ Handle<FunctionTemplateInfo>::cast(component);
+ return Utils::OpenHandle(*Utils::ToLocal(info)->GetFunction());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAccessorProperty) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 6);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
+ CONVERT_SMI_ARG_CHECKED(attribute, 4);
+ CONVERT_SMI_ARG_CHECKED(access_control, 5);
+ JSObject::DefineAccessor(object,
+ name,
+ InstantiateAccessorComponent(isolate, getter),
+ InstantiateAccessorComponent(isolate, setter),
+ static_cast<PropertyAttributes>(attribute),
+ static_cast<v8::AccessControl>(access_control));
+ return isolate->heap()->undefined_value();
+}
+
+
static Failure* ThrowRedeclarationError(Isolate* isolate,
const char* type,
Handle<String> name) {
@@ -1336,8 +2022,8 @@ static Failure* ThrowRedeclarationError(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
- ASSERT(args.length() == 3);
HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
Handle<GlobalObject> global = Handle<GlobalObject>(
isolate->context()->global_object());
@@ -1358,8 +2044,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
bool is_var = value->IsUndefined();
bool is_const = value->IsTheHole();
bool is_function = value->IsSharedFunctionInfo();
- bool is_module = value->IsJSModule();
- ASSERT(is_var + is_const + is_function + is_module == 1);
+ ASSERT(is_var + is_const + is_function == 1);
if (is_var || is_const) {
// Lookup the property in the global object, and don't set the
@@ -1367,13 +2052,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
// Do the lookup locally only, see ES5 erratum.
LookupResult lookup(isolate);
if (FLAG_es52_globals) {
- Object* obj = *global;
- do {
- JSObject::cast(obj)->LocalLookup(*name, &lookup);
- if (lookup.IsFound()) break;
- obj = obj->GetPrototype();
- } while (obj->IsJSObject() &&
- JSObject::cast(obj)->map()->is_hidden_prototype());
+ global->LocalLookup(*name, &lookup, true);
} else {
global->Lookup(*name, &lookup);
}
@@ -1397,30 +2076,29 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
}
LookupResult lookup(isolate);
- global->LocalLookup(*name, &lookup);
+ global->LocalLookup(*name, &lookup, true);
// Compute the property attributes. According to ECMA-262,
// the property must be non-configurable except in eval.
int attr = NONE;
bool is_eval = DeclareGlobalsEvalFlag::decode(flags);
- if (!is_eval || is_module) {
+ if (!is_eval) {
attr |= DONT_DELETE;
}
bool is_native = DeclareGlobalsNativeFlag::decode(flags);
- if (is_const || is_module || (is_native && is_function)) {
+ if (is_const || (is_native && is_function)) {
attr |= READ_ONLY;
}
LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
- if (!lookup.IsFound() || is_function || is_module) {
+ if (!lookup.IsFound() || is_function) {
// If the local property exists, check that we can reconfigure it
// as required for function declarations.
if (lookup.IsFound() && lookup.IsDontDelete()) {
if (lookup.IsReadOnly() || lookup.IsDontEnum() ||
lookup.IsPropertyCallbacks()) {
- return ThrowRedeclarationError(
- isolate, is_function ? "function" : "module", name);
+ return ThrowRedeclarationError(isolate, "function", name);
}
// If the existing property is not configurable, keep its attributes.
attr = lookup.GetAttributes();
@@ -1514,7 +2192,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
// Declare the property by setting it to the initial value if provided,
// or undefined, and use the correct mode (e.g. READ_ONLY attribute for
// constant declarations).
- ASSERT(!object->HasLocalProperty(*name));
+ ASSERT(!JSReceiver::HasLocalProperty(object, name));
Handle<Object> value(isolate->heap()->undefined_value(), isolate);
if (*initial_value != NULL) value = initial_value;
// Declaring a const context slot is a conflicting declaration if
@@ -1546,7 +2224,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
- NoHandleAllocation nha;
+ HandleScope scope(isolate);
// args[0] == name
// args[1] == language_mode
// args[2] == value (optional)
@@ -1557,7 +2235,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
bool assign = args.length() == 3;
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- GlobalObject* global = isolate->context()->global_object();
RUNTIME_ASSERT(args[1]->IsSmi());
CONVERT_LANGUAGE_MODE_ARG(language_mode, 1);
StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
@@ -1574,41 +2251,40 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
// to assign to the property.
// Note that objects can have hidden prototypes, so we need to traverse
// the whole chain of hidden prototypes to do a 'local' lookup.
- Object* object = global;
LookupResult lookup(isolate);
- while (object->IsJSObject() &&
- JSObject::cast(object)->map()->is_hidden_prototype()) {
- JSObject* raw_holder = JSObject::cast(object);
- raw_holder->LocalLookup(*name, &lookup);
- if (lookup.IsInterceptor()) {
- HandleScope handle_scope(isolate);
- Handle<JSObject> holder(raw_holder);
- PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
- // Update the raw pointer in case it's changed due to GC.
- raw_holder = *holder;
- if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
- // Found an interceptor that's not read only.
- if (assign) {
- return raw_holder->SetProperty(
- &lookup, *name, args[2], attributes, strict_mode_flag);
- } else {
- return isolate->heap()->undefined_value();
- }
+ isolate->context()->global_object()->LocalLookup(*name, &lookup, true);
+ if (lookup.IsInterceptor()) {
+ PropertyAttributes intercepted =
+ lookup.holder()->GetPropertyAttribute(*name);
+ if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
+ // Found an interceptor that's not read only.
+ if (assign) {
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<Object> result = JSObject::SetPropertyForResult(
+ handle(lookup.holder()), &lookup, name, value, attributes,
+ strict_mode_flag);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
+ } else {
+ return isolate->heap()->undefined_value();
}
}
- object = raw_holder->GetPrototype();
}
- // Reload global in case the loop above performed a GC.
- global = isolate->context()->global_object();
if (assign) {
- return global->SetProperty(*name, args[2], attributes, strict_mode_flag);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<GlobalObject> global(isolate->context()->global_object());
+ Handle<Object> result = JSReceiver::SetProperty(
+ global, name, value, attributes, strict_mode_flag);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
+ SealHandleScope shs(isolate);
// All constants are declared with an initial value. The name
// of the constant is the first argument and the initial value
// is the second.
@@ -1632,9 +2308,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
LookupResult lookup(isolate);
global->LocalLookup(*name, &lookup);
if (!lookup.IsFound()) {
- return global->SetLocalPropertyIgnoreAttributes(*name,
- *value,
- attributes);
+ HandleScope handle_scope(isolate);
+ Handle<GlobalObject> global(isolate->context()->global_object());
+ RETURN_IF_EMPTY_HANDLE(
+ isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(global, name, value,
+ attributes));
+ return *value;
}
if (!lookup.IsReadOnly()) {
@@ -1660,19 +2340,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// Strict mode handling not needed (const is disallowed in strict mode).
if (lookup.IsField()) {
FixedArray* properties = global->properties();
- int index = lookup.GetFieldIndex();
+ int index = lookup.GetFieldIndex().field_index();
if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) {
properties->set(index, *value);
}
} else if (lookup.IsNormal()) {
if (global->GetNormalizedProperty(&lookup)->IsTheHole() ||
!lookup.IsReadOnly()) {
- global->SetNormalizedProperty(&lookup, *value);
+ HandleScope scope(isolate);
+ JSObject::SetNormalizedProperty(Handle<JSObject>(global), &lookup, value);
}
} else {
// Ignore re-initialization of constants that have already been
- // assigned a function value.
- ASSERT(lookup.IsReadOnly() && lookup.IsConstantFunction());
+ // assigned a constant value.
+ ASSERT(lookup.IsReadOnly() && lookup.IsConstant());
}
// Use the set value as the result of the operation.
@@ -1750,13 +2431,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
if (lookup.IsField()) {
FixedArray* properties = object->properties();
- int index = lookup.GetFieldIndex();
+ int index = lookup.GetFieldIndex().field_index();
if (properties->get(index)->IsTheHole()) {
properties->set(index, *value);
}
} else if (lookup.IsNormal()) {
if (object->GetNormalizedProperty(&lookup)->IsTheHole()) {
- object->SetNormalizedProperty(&lookup, *value);
+ JSObject::SetNormalizedProperty(object, &lookup, value);
}
} else {
// We should not reach here. Any real, named property should be
@@ -1801,7 +2482,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
// length of a string, i.e. it is always a Smi. We check anyway for security.
CONVERT_SMI_ARG_CHECKED(index, 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
- RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
RUNTIME_ASSERT(index >= 0);
RUNTIME_ASSERT(index <= subject->length());
isolate->counters()->regexp_entry_runtime()->Increment();
@@ -1809,12 +2489,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
subject,
index,
last_match_info);
- if (result.is_null()) return Failure::Exception();
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_SMI_ARG_CHECKED(elements_count, 0);
if (elements_count < 0 ||
@@ -1824,7 +2505,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
}
Object* new_object;
{ MaybeObject* maybe_new_object =
- isolate->heap()->AllocateFixedArrayWithHoles(elements_count);
+ isolate->heap()->AllocateFixedArray(elements_count);
if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
}
FixedArray* elements = FixedArray::cast(new_object);
@@ -1833,7 +2514,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
}
{
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
HandleScope scope(isolate);
reinterpret_cast<HeapObject*>(new_object)->
set_map(isolate->native_context()->regexp_result_map());
@@ -1850,41 +2531,41 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
- AssertNoAllocation no_alloc;
+ HandleScope scope(isolate);
+ DisallowHeapAllocation no_allocation;
ASSERT(args.length() == 5);
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_CHECKED(String, source, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
// If source is the empty string we set it to "(?:)" instead as
// suggested by ECMA-262, 5th, section 15.10.4.1.
- if (source->length() == 0) source = isolate->heap()->query_colon_symbol();
+ if (source->length() == 0) source = isolate->factory()->query_colon_string();
- Object* global = args[2];
- if (!global->IsTrue()) global = isolate->heap()->false_value();
+ CONVERT_ARG_HANDLE_CHECKED(Object, global, 2);
+ if (!global->IsTrue()) global = isolate->factory()->false_value();
- Object* ignoreCase = args[3];
- if (!ignoreCase->IsTrue()) ignoreCase = isolate->heap()->false_value();
+ CONVERT_ARG_HANDLE_CHECKED(Object, ignoreCase, 3);
+ if (!ignoreCase->IsTrue()) ignoreCase = isolate->factory()->false_value();
- Object* multiline = args[4];
- if (!multiline->IsTrue()) multiline = isolate->heap()->false_value();
+ CONVERT_ARG_HANDLE_CHECKED(Object, multiline, 4);
+ if (!multiline->IsTrue()) multiline = isolate->factory()->false_value();
Map* map = regexp->map();
Object* constructor = map->constructor();
if (constructor->IsJSFunction() &&
JSFunction::cast(constructor)->initial_map() == map) {
// If we still have the original map, set in-object properties directly.
- regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, source);
+ regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, *source);
// Both true and false are immovable immortal objects so no need for write
// barrier.
regexp->InObjectPropertyAtPut(
- JSRegExp::kGlobalFieldIndex, global, SKIP_WRITE_BARRIER);
+ JSRegExp::kGlobalFieldIndex, *global, SKIP_WRITE_BARRIER);
regexp->InObjectPropertyAtPut(
- JSRegExp::kIgnoreCaseFieldIndex, ignoreCase, SKIP_WRITE_BARRIER);
+ JSRegExp::kIgnoreCaseFieldIndex, *ignoreCase, SKIP_WRITE_BARRIER);
regexp->InObjectPropertyAtPut(
- JSRegExp::kMultilineFieldIndex, multiline, SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
- Smi::FromInt(0),
- SKIP_WRITE_BARRIER); // It's a Smi.
- return regexp;
+ JSRegExp::kMultilineFieldIndex, *multiline, SKIP_WRITE_BARRIER);
+ regexp->InObjectPropertyAtPut(
+ JSRegExp::kLastIndexFieldIndex, Smi::FromInt(0), SKIP_WRITE_BARRIER);
+ return *regexp;
}
// Map has changed, so use generic, but slower, method.
@@ -1892,32 +2573,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- Heap* heap = isolate->heap();
- MaybeObject* result;
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->source_symbol(),
- source,
- final);
- ASSERT(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->global_symbol(),
- global,
- final);
- ASSERT(!result->IsFailure());
- result =
- regexp->SetLocalPropertyIgnoreAttributes(heap->ignore_case_symbol(),
- ignoreCase,
- final);
- ASSERT(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->multiline_symbol(),
- multiline,
- final);
- ASSERT(!result->IsFailure());
- result =
- regexp->SetLocalPropertyIgnoreAttributes(heap->last_index_symbol(),
- Smi::FromInt(0),
- writable);
- ASSERT(!result->IsFailure());
- USE(result);
- return regexp;
+ Handle<Object> zero(Smi::FromInt(0), isolate);
+ Factory* factory = isolate->factory();
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->source_string(), source, final));
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->global_string(), global, final));
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->ignore_case_string(), ignoreCase, final));
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->multiline_string(), multiline, final));
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->last_index_string(), zero, writable));
+ return *regexp;
}
@@ -1936,7 +2604,7 @@ static Handle<JSFunction> InstallBuiltin(Isolate* isolate,
Handle<JSObject> holder,
const char* name,
Builtins::Name builtin_name) {
- Handle<String> key = isolate->factory()->LookupAsciiSymbol(name);
+ Handle<String> key = isolate->factory()->InternalizeUtf8String(name);
Handle<Code> code(isolate->builtins()->builtin(builtin_name));
Handle<JSFunction> optimized =
isolate->factory()->NewFunction(key,
@@ -1967,15 +2635,34 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsClassicModeFunction) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
+ if (!callable->IsJSFunction()) {
+ HandleScope scope(isolate);
+ bool threw = false;
+ Handle<Object> delegate = Execution::TryGetFunctionDelegate(
+ isolate, Handle<JSReceiver>(callable), &threw);
+ if (threw) return Failure::Exception();
+ callable = JSFunction::cast(*delegate);
+ }
+ JSFunction* function = JSFunction::cast(callable);
+ SharedFunctionInfo* shared = function->shared();
+ return isolate->heap()->ToBoolean(shared->is_classic_mode());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
if (!callable->IsJSFunction()) {
HandleScope scope(isolate);
bool threw = false;
- Handle<Object> delegate =
- Execution::TryGetFunctionDelegate(Handle<JSReceiver>(callable), &threw);
+ Handle<Object> delegate = Execution::TryGetFunctionDelegate(
+ isolate, Handle<JSReceiver>(callable), &threw);
if (threw) return Failure::Exception();
callable = JSFunction::cast(*delegate);
}
@@ -2025,7 +2712,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2034,7 +2721,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2045,7 +2732,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
return isolate->heap()->ToBoolean(
@@ -2054,7 +2741,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
f->shared()->set_name_should_print_as_anonymous(true);
@@ -2062,8 +2749,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsGenerator) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSFunction, f, 0);
+ return isolate->heap()->ToBoolean(f->shared()->is_generator());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2096,7 +2791,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
@@ -2106,6 +2801,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(Code, code, 0);
@@ -2119,7 +2815,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
@@ -2130,7 +2826,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
@@ -2141,26 +2837,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
- NoHandleAllocation ha;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
ASSERT(fun->should_have_prototype());
- Object* obj;
- { MaybeObject* maybe_obj =
- Accessors::FunctionSetPrototype(fun, args[1], NULL);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ Accessors::FunctionSetPrototype(fun, value);
return args[0]; // return TOS
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- String* name = isolate->heap()->prototype_symbol();
+ String* name = isolate->heap()->prototype_string();
if (function->HasFastProperties()) {
// Construct a new field descriptor with updated attributes.
@@ -2172,8 +2865,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
CallbacksDescriptor new_desc(name,
instance_desc->GetValue(index),
- static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
- details.descriptor_index());
+ static_cast<PropertyAttributes>(details.attributes() | READ_ONLY));
// Create a new map featuring the new field descriptors array.
Map* new_map;
@@ -2186,7 +2878,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
} else { // Dictionary properties.
// Directly manipulate the property details.
int entry = function->property_dictionary()->FindEntry(name);
- ASSERT(entry != StringDictionary::kNotFound);
+ ASSERT(entry != NameDictionary::kNotFound);
PropertyDetails details = function->property_dictionary()->DetailsAt(entry);
PropertyDetails new_details(
static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
@@ -2199,7 +2891,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2208,7 +2900,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsBuiltin) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2233,24 +2925,27 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
return Failure::Exception();
}
+ // Mark both, the source and the target, as un-flushable because the
+ // shared unoptimized code makes them impossible to enqueue in a list.
+ ASSERT(target_shared->code()->gc_metadata() == NULL);
+ ASSERT(source_shared->code()->gc_metadata() == NULL);
+ target_shared->set_dont_flush(true);
+ source_shared->set_dont_flush(true);
+
// Set the code, scope info, formal parameter count, and the length
- // of the target shared function info. Set the source code of the
- // target function to undefined. SetCode is only used for built-in
- // constructors like String, Array, and Object, and some web code
- // doesn't like seeing source code for constructors.
- target_shared->set_code(source_shared->code());
+ // of the target shared function info.
+ target_shared->ReplaceCode(source_shared->code());
target_shared->set_scope_info(source_shared->scope_info());
target_shared->set_length(source_shared->length());
target_shared->set_formal_parameter_count(
source_shared->formal_parameter_count());
- target_shared->set_script(isolate->heap()->undefined_value());
-
- // Since we don't store the source we should never optimize this.
- target_shared->code()->set_optimizable(false);
-
- // Clear the optimization hints related to the compiled code as these
- // are no longer valid when the code is overwritten.
- target_shared->ClearThisPropertyAssignmentsInfo();
+ target_shared->set_script(source_shared->script());
+ target_shared->set_start_position_and_type(
+ source_shared->start_position_and_type());
+ target_shared->set_end_position(source_shared->end_position());
+ bool was_native = target_shared->native();
+ target_shared->set_compiler_hints(source_shared->compiler_hints());
+ target_shared->set_native(was_native);
// Set the code of the target function.
target->ReplaceCode(source_shared->code());
@@ -2270,7 +2965,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
target->set_literals(*literals);
if (isolate->logger()->is_logging_code_events() ||
- CpuProfiler::is_profiling(isolate)) {
+ isolate->cpu_profiler()->is_profiling()) {
isolate->logger()->LogExistingFunction(
source_shared, Handle<Code>(source_shared->code()));
}
@@ -2282,28 +2977,186 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
CONVERT_SMI_ARG_CHECKED(num, 1);
RUNTIME_ASSERT(num >= 0);
- SetExpectedNofProperties(function, num);
+ // If objects constructed from this function exist then changing
+ // 'estimated_nof_properties' is dangerous since the previous value might
+ // have been compiled into the fast construct stub. Moreover, the inobject
+ // slack tracking logic might have adjusted the previous value, so even
+ // passing the same value is risky.
+ if (!func->shared()->live_objects_may_exist()) {
+ func->shared()->set_expected_nof_properties(num);
+ if (func->has_initial_map()) {
+ Handle<Map> new_initial_map =
+ func->GetIsolate()->factory()->CopyMap(
+ Handle<Map>(func->initial_map()));
+ new_initial_map->set_unused_property_fields(num);
+ func->set_initial_map(*new_initial_map);
+ }
+ }
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 0);
+
+ JavaScriptFrameIterator it(isolate);
+ JavaScriptFrame* frame = it.frame();
+ JSFunction* function = frame->function();
+ RUNTIME_ASSERT(function->shared()->is_generator());
+
+ JSGeneratorObject* generator;
+ if (frame->IsConstructor()) {
+ generator = JSGeneratorObject::cast(frame->receiver());
+ } else {
+ MaybeObject* maybe_generator =
+ isolate->heap()->AllocateJSGeneratorObject(function);
+ if (!maybe_generator->To(&generator)) return maybe_generator;
+ }
+ generator->set_function(function);
+ generator->set_context(Context::cast(frame->context()));
+ generator->set_receiver(frame->receiver());
+ generator->set_continuation(0);
+ generator->set_operand_stack(isolate->heap()->empty_fixed_array());
+ generator->set_stack_handler_index(-1);
+
+ return generator;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
+
+ JavaScriptFrameIterator stack_iterator(isolate);
+ JavaScriptFrame* frame = stack_iterator.frame();
+ RUNTIME_ASSERT(frame->function()->shared()->is_generator());
+ ASSERT_EQ(frame->function(), generator_object->function());
+
+ // The caller should have saved the context and continuation already.
+ ASSERT_EQ(generator_object->context(), Context::cast(frame->context()));
+ ASSERT_LT(0, generator_object->continuation());
+
+ // We expect there to be at least two values on the operand stack: the return
+ // value of the yield expression, and the argument to this runtime call.
+ // Neither of those should be saved.
+ int operands_count = frame->ComputeOperandsCount();
+ ASSERT_GE(operands_count, 2);
+ operands_count -= 2;
+
+ if (operands_count == 0) {
+ // Although it's semantically harmless to call this function with an
+ // operands_count of zero, it is also unnecessary.
+ ASSERT_EQ(generator_object->operand_stack(),
+ isolate->heap()->empty_fixed_array());
+ ASSERT_EQ(generator_object->stack_handler_index(), -1);
+ // If there are no operands on the stack, there shouldn't be a handler
+ // active either.
+ ASSERT(!frame->HasHandler());
+ } else {
+ int stack_handler_index = -1;
+ MaybeObject* alloc = isolate->heap()->AllocateFixedArray(operands_count);
+ FixedArray* operand_stack;
+ if (!alloc->To(&operand_stack)) return alloc;
+ frame->SaveOperandStack(operand_stack, &stack_handler_index);
+ generator_object->set_operand_stack(operand_stack);
+ generator_object->set_stack_handler_index(stack_handler_index);
+ }
+
return isolate->heap()->undefined_value();
}
+// Note that this function is the slow path for resuming generators. It is only
+// called if the suspended activation had operands on the stack, stack handlers
+// needing rewinding, or if the resume should throw an exception. The fast path
+// is handled directly in FullCodeGenerator::EmitGeneratorResume(), which is
+// inlined into GeneratorNext and GeneratorThrow. EmitGeneratorResumeResume is
+// called in any case, as it needs to reconstruct the stack frame and make space
+// for arguments and operands.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
+ CONVERT_ARG_CHECKED(Object, value, 1);
+ CONVERT_SMI_ARG_CHECKED(resume_mode_int, 2);
+ JavaScriptFrameIterator stack_iterator(isolate);
+ JavaScriptFrame* frame = stack_iterator.frame();
+
+ ASSERT_EQ(frame->function(), generator_object->function());
+ ASSERT(frame->function()->is_compiled());
+
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
+
+ Address pc = generator_object->function()->code()->instruction_start();
+ int offset = generator_object->continuation();
+ ASSERT(offset > 0);
+ frame->set_pc(pc + offset);
+ generator_object->set_continuation(JSGeneratorObject::kGeneratorExecuting);
+
+ FixedArray* operand_stack = generator_object->operand_stack();
+ int operands_count = operand_stack->length();
+ if (operands_count != 0) {
+ frame->RestoreOperandStack(operand_stack,
+ generator_object->stack_handler_index());
+ generator_object->set_operand_stack(isolate->heap()->empty_fixed_array());
+ generator_object->set_stack_handler_index(-1);
+ }
+
+ JSGeneratorObject::ResumeMode resume_mode =
+ static_cast<JSGeneratorObject::ResumeMode>(resume_mode_int);
+ switch (resume_mode) {
+ case JSGeneratorObject::NEXT:
+ return value;
+ case JSGeneratorObject::THROW:
+ return isolate->Throw(value);
+ }
+
+ UNREACHABLE();
+ return isolate->ThrowIllegalOperation();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowGeneratorStateError) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+ int continuation = generator->continuation();
+ const char* message = continuation == JSGeneratorObject::kGeneratorClosed ?
+ "generator_finished" : "generator_running";
+ Vector< Handle<Object> > argv = HandleVector<Object>(NULL, 0);
+ Handle<Object> error = isolate->factory()->NewError(message, argv);
+ return isolate->Throw(*error);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectFreeze) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ Handle<Object> result = JSObject::Freeze(object);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
+}
+
+
MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate,
Object* char_code) {
- uint32_t code;
- if (char_code->ToArrayIndex(&code)) {
- if (code <= 0xffff) {
- return isolate->heap()->LookupSingleCharacterStringFromCode(code);
- }
+ if (char_code->IsNumber()) {
+ return isolate->heap()->LookupSingleCharacterStringFromCode(
+ NumberToUint32(char_code) & 0xffff);
}
return isolate->heap()->empty_string();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, subject, 0);
@@ -2327,7 +3180,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CharFromCode) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
return CharFromCode(isolate, args[0]);
}
@@ -2402,7 +3255,8 @@ class FixedArrayBuilder {
}
Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
- FACTORY->SetContent(target_array, array_);
+ Factory* factory = target_array->GetIsolate()->factory();
+ factory->SetContent(target_array, array_);
target_array->set_length(Smi::FromInt(length_));
return target_array;
}
@@ -2442,7 +3296,7 @@ class ReplacementStringBuilder {
array_builder_(heap->isolate(), estimated_part_count),
subject_(subject),
character_count_(0),
- is_ascii_(subject->IsAsciiRepresentation()) {
+ is_ascii_(subject->IsOneByteRepresentation()) {
// Require a non-zero initial size. Ensures that doubling the size to
// extend the array will work.
ASSERT(estimated_part_count > 0);
@@ -2482,7 +3336,7 @@ class ReplacementStringBuilder {
int length = string->length();
ASSERT(length > 0);
AddElement(*string);
- if (!string->IsAsciiRepresentation()) {
+ if (!string->IsOneByteRepresentation()) {
is_ascii_ = false;
}
IncrementCharacterCount(length);
@@ -2496,9 +3350,9 @@ class ReplacementStringBuilder {
Handle<String> joined_string;
if (is_ascii_) {
- Handle<SeqAsciiString> seq = NewRawAsciiString(character_count_);
- AssertNoAllocation no_alloc;
- char* char_buffer = seq->GetChars();
+ Handle<SeqOneByteString> seq = NewRawOneByteString(character_count_);
+ DisallowHeapAllocation no_gc;
+ uint8_t* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
char_buffer,
*array_builder_.array(),
@@ -2507,7 +3361,7 @@ class ReplacementStringBuilder {
} else {
// Non-ASCII.
Handle<SeqTwoByteString> seq = NewRawTwoByteString(character_count_);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
uc16* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
char_buffer,
@@ -2527,8 +3381,8 @@ class ReplacementStringBuilder {
}
private:
- Handle<SeqAsciiString> NewRawAsciiString(int length) {
- return heap_->isolate()->factory()->NewRawAsciiString(length);
+ Handle<SeqOneByteString> NewRawOneByteString(int length) {
+ return heap_->isolate()->factory()->NewRawOneByteString(length);
}
@@ -2749,13 +3603,13 @@ bool CompiledReplacement::Compile(Handle<String> replacement,
int capture_count,
int subject_length) {
{
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
String::FlatContent content = replacement->GetFlatContent();
ASSERT(content.IsFlat());
bool simple = false;
if (content.IsAscii()) {
simple = ParseReplacementPattern(&parts_,
- content.ToAsciiVector(),
+ content.ToOneByteVector(),
capture_count,
subject_length,
zone());
@@ -2831,7 +3685,7 @@ void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
}
-void FindAsciiStringIndices(Vector<const char> subject,
+void FindAsciiStringIndices(Vector<const uint8_t> subject,
char pattern,
ZoneList<int>* indices,
unsigned int limit,
@@ -2839,11 +3693,11 @@ void FindAsciiStringIndices(Vector<const char> subject,
ASSERT(limit > 0);
// Collect indices of pattern in subject using memchr.
// Stop after finding at most limit values.
- const char* subject_start = reinterpret_cast<const char*>(subject.start());
- const char* subject_end = subject_start + subject.length();
- const char* pos = subject_start;
+ const uint8_t* subject_start = subject.start();
+ const uint8_t* subject_end = subject_start + subject.length();
+ const uint8_t* pos = subject_start;
while (limit > 0) {
- pos = reinterpret_cast<const char*>(
+ pos = reinterpret_cast<const uint8_t*>(
memchr(pos, pattern, subject_end - pos));
if (pos == NULL) return;
indices->Add(static_cast<int>(pos - subject_start), zone);
@@ -2853,6 +3707,23 @@ void FindAsciiStringIndices(Vector<const char> subject,
}
+void FindTwoByteStringIndices(const Vector<const uc16> subject,
+ uc16 pattern,
+ ZoneList<int>* indices,
+ unsigned int limit,
+ Zone* zone) {
+ ASSERT(limit > 0);
+ const uc16* subject_start = subject.start();
+ const uc16* subject_end = subject_start + subject.length();
+ for (const uc16* pos = subject_start; pos < subject_end && limit > 0; pos++) {
+ if (*pos == pattern) {
+ indices->Add(static_cast<int>(pos - subject_start), zone);
+ limit--;
+ }
+ }
+}
+
+
template <typename SubjectChar, typename PatternChar>
void FindStringIndices(Isolate* isolate,
Vector<const SubjectChar> subject,
@@ -2883,15 +3754,16 @@ void FindStringIndicesDispatch(Isolate* isolate,
unsigned int limit,
Zone* zone) {
{
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
String::FlatContent subject_content = subject->GetFlatContent();
String::FlatContent pattern_content = pattern->GetFlatContent();
ASSERT(subject_content.IsFlat());
ASSERT(pattern_content.IsFlat());
if (subject_content.IsAscii()) {
- Vector<const char> subject_vector = subject_content.ToAsciiVector();
+ Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector();
if (pattern_content.IsAscii()) {
- Vector<const char> pattern_vector = pattern_content.ToAsciiVector();
+ Vector<const uint8_t> pattern_vector =
+ pattern_content.ToOneByteVector();
if (pattern_vector.length() == 1) {
FindAsciiStringIndices(subject_vector,
pattern_vector[0],
@@ -2917,19 +3789,38 @@ void FindStringIndicesDispatch(Isolate* isolate,
} else {
Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
if (pattern_content.IsAscii()) {
- FindStringIndices(isolate,
- subject_vector,
- pattern_content.ToAsciiVector(),
- indices,
- limit,
- zone);
+ Vector<const uint8_t> pattern_vector =
+ pattern_content.ToOneByteVector();
+ if (pattern_vector.length() == 1) {
+ FindTwoByteStringIndices(subject_vector,
+ pattern_vector[0],
+ indices,
+ limit,
+ zone);
+ } else {
+ FindStringIndices(isolate,
+ subject_vector,
+ pattern_vector,
+ indices,
+ limit,
+ zone);
+ }
} else {
- FindStringIndices(isolate,
- subject_vector,
- pattern_content.ToUC16Vector(),
- indices,
- limit,
- zone);
+ Vector<const uc16> pattern_vector = pattern_content.ToUC16Vector();
+ if (pattern_vector.length() == 1) {
+ FindTwoByteStringIndices(subject_vector,
+ pattern_vector[0],
+ indices,
+ limit,
+ zone);
+ } else {
+ FindStringIndices(isolate,
+ subject_vector,
+ pattern_vector,
+ indices,
+ limit,
+ zone);
+ }
}
}
}
@@ -2937,7 +3828,7 @@ void FindStringIndicesDispatch(Isolate* isolate,
template<typename ResultSeqString>
-MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
+MUST_USE_RESULT static MaybeObject* StringReplaceGlobalAtomRegExpWithString(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> pattern_regexp,
@@ -2946,9 +3837,8 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
ASSERT(subject->IsFlat());
ASSERT(replacement->IsFlat());
- Zone* zone = isolate->runtime_zone();
- ZoneScope zone_space(zone, DELETE_ON_EXIT);
- ZoneList<int> indices(8, zone);
+ ZoneScope zone_scope(isolate->runtime_zone());
+ ZoneList<int> indices(8, zone_scope.zone());
ASSERT_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
String* pattern =
String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
@@ -2957,7 +3847,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
int replacement_len = replacement->length();
FindStringIndicesDispatch(
- isolate, *subject, pattern, &indices, 0xffffffff, zone);
+ isolate, *subject, pattern, &indices, 0xffffffff, zone_scope.zone());
int matches = indices.length();
if (matches == 0) return *subject;
@@ -2968,7 +3858,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
static_cast<int64_t>(pattern_len)) *
static_cast<int64_t>(matches) +
static_cast<int64_t>(subject_len);
- if (result_len_64 > INT_MAX) return Failure::OutOfMemoryException();
+ if (result_len_64 > INT_MAX) return Failure::OutOfMemoryException(0x11);
int result_len = static_cast<int>(result_len_64);
int subject_pos = 0;
@@ -2977,7 +3867,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
Handle<ResultSeqString> result;
if (ResultSeqString::kHasAsciiEncoding) {
result = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawAsciiString(result_len));
+ isolate->factory()->NewRawOneByteString(result_len));
} else {
result = Handle<ResultSeqString>::cast(
isolate->factory()->NewRawTwoByteString(result_len));
@@ -3020,7 +3910,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
}
-MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
+MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithString(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
@@ -3029,32 +3919,29 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
ASSERT(subject->IsFlat());
ASSERT(replacement->IsFlat());
- bool is_global = regexp->GetFlags().is_global();
int capture_count = regexp->CaptureCount();
int subject_length = subject->length();
// CompiledReplacement uses zone allocation.
- Zone* zone = isolate->runtime_zone();
- ZoneScope zonescope(zone, DELETE_ON_EXIT);
- CompiledReplacement compiled_replacement(zone);
+ ZoneScope zone_scope(isolate->runtime_zone());
+ CompiledReplacement compiled_replacement(zone_scope.zone());
bool simple_replace = compiled_replacement.Compile(replacement,
capture_count,
subject_length);
// Shortcut for simple non-regexp global replacements
- if (is_global &&
- regexp->TypeTag() == JSRegExp::ATOM &&
- simple_replace) {
- if (subject->HasOnlyAsciiChars() && replacement->HasOnlyAsciiChars()) {
- return StringReplaceAtomRegExpWithString<SeqAsciiString>(
+ if (regexp->TypeTag() == JSRegExp::ATOM && simple_replace) {
+ if (subject->HasOnlyOneByteChars() &&
+ replacement->HasOnlyOneByteChars()) {
+ return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
isolate, subject, regexp, replacement, last_match_info);
} else {
- return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
+ return StringReplaceGlobalAtomRegExpWithString<SeqTwoByteString>(
isolate, subject, regexp, replacement, last_match_info);
}
}
- RegExpImpl::GlobalCache global_cache(regexp, subject, is_global, isolate);
+ RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
if (global_cache.HasException()) return Failure::Exception();
int32_t* current_match = global_cache.FetchNext();
@@ -3066,8 +3953,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
// Guessing the number of parts that the final result string is built
// from. Global regexps can match any number of times, so we guess
// conservatively.
- int expected_parts =
- (compiled_replacement.parts() + 1) * (is_global ? 4 : 1) + 1;
+ int expected_parts = (compiled_replacement.parts() + 1) * 4 + 1;
ReplacementStringBuilder builder(isolate->heap(),
subject,
expected_parts);
@@ -3099,9 +3985,6 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
}
prev = end;
- // Only continue checking for global regexps.
- if (!is_global) break;
-
current_match = global_cache.FetchNext();
} while (current_match != NULL);
@@ -3122,37 +4005,26 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
template <typename ResultSeqString>
-MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
+MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithEmptyString(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
Handle<JSArray> last_match_info) {
ASSERT(subject->IsFlat());
- bool is_global = regexp->GetFlags().is_global();
-
// Shortcut for simple non-regexp global replacements
- if (is_global &&
- regexp->TypeTag() == JSRegExp::ATOM) {
- Handle<String> empty_string(HEAP->empty_string());
- if (subject->HasOnlyAsciiChars()) {
- return StringReplaceAtomRegExpWithString<SeqAsciiString>(
- isolate,
- subject,
- regexp,
- empty_string,
- last_match_info);
+ if (regexp->TypeTag() == JSRegExp::ATOM) {
+ Handle<String> empty_string = isolate->factory()->empty_string();
+ if (subject->IsOneByteRepresentation()) {
+ return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
+ isolate, subject, regexp, empty_string, last_match_info);
} else {
- return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
- isolate,
- subject,
- regexp,
- empty_string,
- last_match_info);
+ return StringReplaceGlobalAtomRegExpWithString<SeqTwoByteString>(
+ isolate, subject, regexp, empty_string, last_match_info);
}
}
- RegExpImpl::GlobalCache global_cache(regexp, subject, is_global, isolate);
+ RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
if (global_cache.HasException()) return Failure::Exception();
int32_t* current_match = global_cache.FetchNext();
@@ -3172,29 +4044,12 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
Handle<ResultSeqString> answer;
if (ResultSeqString::kHasAsciiEncoding) {
answer = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawAsciiString(new_length));
+ isolate->factory()->NewRawOneByteString(new_length));
} else {
answer = Handle<ResultSeqString>::cast(
isolate->factory()->NewRawTwoByteString(new_length));
}
- if (!is_global) {
- RegExpImpl::SetLastMatchInfo(
- last_match_info, subject, capture_count, current_match);
- if (start == end) {
- return *subject;
- } else {
- if (start > 0) {
- String::WriteToFlat(*subject, answer->GetChars(), 0, start);
- }
- if (end < subject_length) {
- String::WriteToFlat(
- *subject, answer->GetChars() + start, end, subject_length);
- }
- return *answer;
- }
- }
-
int prev = 0;
int position = 0;
@@ -3203,8 +4058,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
end = current_match[1];
if (prev < start) {
// Add substring subject[prev;start] to answer string.
- String::WriteToFlat(
- *subject, answer->GetChars() + position, prev, start);
+ String::WriteToFlat(*subject, answer->GetChars() + position, prev, start);
position += start - prev;
}
prev = end;
@@ -3246,33 +4100,32 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) {
- ASSERT(args.length() == 4);
-
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceGlobalRegExpWithString) {
HandleScope scope(isolate);
+ ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
- if (!subject->IsFlat()) subject = FlattenGetString(subject);
-
- if (!replacement->IsFlat()) replacement = FlattenGetString(replacement);
+ ASSERT(regexp->GetFlags().is_global());
- ASSERT(last_match_info->HasFastObjectElements());
+ if (!subject->IsFlat()) subject = FlattenGetString(subject);
if (replacement->length() == 0) {
- if (subject->HasOnlyAsciiChars()) {
- return StringReplaceRegExpWithEmptyString<SeqAsciiString>(
+ if (subject->HasOnlyOneByteChars()) {
+ return StringReplaceGlobalRegExpWithEmptyString<SeqOneByteString>(
isolate, subject, regexp, last_match_info);
} else {
- return StringReplaceRegExpWithEmptyString<SeqTwoByteString>(
+ return StringReplaceGlobalRegExpWithEmptyString<SeqTwoByteString>(
isolate, subject, regexp, last_match_info);
}
}
- return StringReplaceRegExpWithString(
+ if (!replacement->IsFlat()) replacement = FlattenGetString(replacement);
+
+ return StringReplaceGlobalRegExpWithString(
isolate, subject, regexp, replacement, last_match_info);
}
@@ -3323,8 +4176,8 @@ Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) {
- ASSERT(args.length() == 3);
HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_ARG_HANDLE_CHECKED(String, search, 1);
CONVERT_ARG_HANDLE_CHECKED(String, replace, 2);
@@ -3368,17 +4221,17 @@ int Runtime::StringMatch(Isolate* isolate,
if (!sub->IsFlat()) FlattenString(sub);
if (!pat->IsFlat()) FlattenString(pat);
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
// Extract flattened substrings of cons strings before determining asciiness.
String::FlatContent seq_sub = sub->GetFlatContent();
String::FlatContent seq_pat = pat->GetFlatContent();
// dispatch on type of strings
if (seq_pat.IsAscii()) {
- Vector<const char> pat_vector = seq_pat.ToAsciiVector();
+ Vector<const uint8_t> pat_vector = seq_pat.ToOneByteVector();
if (seq_sub.IsAscii()) {
return SearchString(isolate,
- seq_sub.ToAsciiVector(),
+ seq_sub.ToOneByteVector(),
pat_vector,
start_index);
}
@@ -3390,7 +4243,7 @@ int Runtime::StringMatch(Isolate* isolate,
Vector<const uc16> pat_vector = seq_pat.ToUC16Vector();
if (seq_sub.IsAscii()) {
return SearchString(isolate,
- seq_sub.ToAsciiVector(),
+ seq_sub.ToOneByteVector(),
pat_vector,
start_index);
}
@@ -3402,7 +4255,7 @@ int Runtime::StringMatch(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringIndexOf) {
- HandleScope scope(isolate); // create a new handle scope
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
@@ -3430,7 +4283,7 @@ static int StringMatchBackwards(Vector<const schar> subject,
if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
for (int i = 0; i < pattern_length; i++) {
uc16 c = pattern[i];
- if (c > String::kMaxAsciiCharCode) {
+ if (c > String::kMaxOneByteCharCode) {
return -1;
}
}
@@ -3453,8 +4306,9 @@ static int StringMatchBackwards(Vector<const schar> subject,
return -1;
}
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
- HandleScope scope(isolate); // create a new handle scope
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
@@ -3479,15 +4333,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
if (!pat->IsFlat()) FlattenString(pat);
int position = -1;
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
String::FlatContent sub_content = sub->GetFlatContent();
String::FlatContent pat_content = pat->GetFlatContent();
if (pat_content.IsAscii()) {
- Vector<const char> pat_vector = pat_content.ToAsciiVector();
+ Vector<const uint8_t> pat_vector = pat_content.ToOneByteVector();
if (sub_content.IsAscii()) {
- position = StringMatchBackwards(sub_content.ToAsciiVector(),
+ position = StringMatchBackwards(sub_content.ToOneByteVector(),
pat_vector,
start_index);
} else {
@@ -3498,7 +4352,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
} else {
Vector<const uc16> pat_vector = pat_content.ToUC16Vector();
if (sub_content.IsAscii()) {
- position = StringMatchBackwards(sub_content.ToAsciiVector(),
+ position = StringMatchBackwards(sub_content.ToOneByteVector(),
pat_vector,
start_index);
} else {
@@ -3513,7 +4367,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, str1, 0);
@@ -3542,17 +4396,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
str1->TryFlatten();
str2->TryFlatten();
- StringInputBuffer& buf1 =
- *isolate->runtime_state()->string_locale_compare_buf1();
- StringInputBuffer& buf2 =
- *isolate->runtime_state()->string_locale_compare_buf2();
-
- buf1.Reset(str1);
- buf2.Reset(str2);
+ ConsStringIteratorOp* op1 =
+ isolate->runtime_state()->string_locale_compare_it1();
+ ConsStringIteratorOp* op2 =
+ isolate->runtime_state()->string_locale_compare_it2();
+ // TODO(dcarney) Can do array compares here more efficiently.
+ StringCharacterStream stream1(str1, op1);
+ StringCharacterStream stream2(str2, op2);
for (int i = 0; i < end; i++) {
- uint16_t char1 = buf1.GetNext();
- uint16_t char2 = buf2.GetNext();
+ uint16_t char1 = stream1.GetNext();
+ uint16_t char2 = stream2.GetNext();
if (char1 != char2) return Smi::FromInt(char1 - char2);
}
@@ -3561,7 +4415,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(String, value, 0);
@@ -3583,32 +4437,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
RUNTIME_ASSERT(start >= 0);
RUNTIME_ASSERT(end <= value->length());
isolate->counters()->sub_string_runtime()->Increment();
+ if (end - start == 1) {
+ return isolate->heap()->LookupSingleCharacterStringFromCode(
+ value->Get(start));
+ }
return value->SubString(start, end);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
+ HandleScope handles(isolate);
ASSERT_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
- HandleScope handles;
RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
if (global_cache.HasException()) return Failure::Exception();
int capture_count = regexp->CaptureCount();
- Zone* zone = isolate->runtime_zone();
- ZoneScope zone_space(zone, DELETE_ON_EXIT);
- ZoneList<int> offsets(8, zone);
+ ZoneScope zone_scope(isolate->runtime_zone());
+ ZoneList<int> offsets(8, zone_scope.zone());
while (true) {
int32_t* match = global_cache.FetchNext();
if (match == NULL) break;
- offsets.Add(match[0], zone); // start
- offsets.Add(match[1], zone); // end
+ offsets.Add(match[0], zone_scope.zone()); // start
+ offsets.Add(match[1], zone_scope.zone()); // end
}
if (global_cache.HasException()) return Failure::Exception();
@@ -3664,7 +4521,7 @@ static MaybeObject* SearchRegExpMultiple(
isolate->heap(),
*subject,
regexp->data(),
- RegExpResultsCache::REGEXP_MULTIPLE_INDICES));
+ RegExpResultsCache::REGEXP_MULTIPLE_INDICES), isolate);
if (*cached_answer != Smi::FromInt(0)) {
Handle<FixedArray> cached_fixed_array =
Handle<FixedArray>(FixedArray::cast(*cached_answer));
@@ -3797,8 +4654,8 @@ static MaybeObject* SearchRegExpMultiple(
// lastMatchInfoOverride to maintain the last match info, so we don't need to
// set any other last match array info.
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
- ASSERT(args.length() == 4);
HandleScope handles(isolate);
+ ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
if (!subject->IsFlat()) FlattenString(subject);
@@ -3806,7 +4663,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
- ASSERT(last_match_info->HasFastObjectElements());
ASSERT(regexp->GetFlags().is_global());
if (regexp->CaptureCount() == 0) {
@@ -3820,7 +4676,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(radix, 1);
RUNTIME_ASSERT(2 <= radix && radix <= 36);
@@ -3838,110 +4694,105 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
// Slow case.
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- if (isnan(value)) {
- return *isolate->factory()->nan_symbol();
+ if (std::isnan(value)) {
+ return *isolate->factory()->nan_string();
}
- if (isinf(value)) {
+ if (std::isinf(value)) {
if (value < 0) {
- return *isolate->factory()->minus_infinity_symbol();
+ return *isolate->factory()->minus_infinity_string();
}
- return *isolate->factory()->infinity_symbol();
+ return *isolate->factory()->infinity_string();
}
char* str = DoubleToRadixCString(value, radix);
MaybeObject* result =
- isolate->heap()->AllocateStringFromAscii(CStrVector(str));
+ isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
DeleteArray(str);
return result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- if (isnan(value)) {
- return *isolate->factory()->nan_symbol();
- }
- if (isinf(value)) {
- if (value < 0) {
- return *isolate->factory()->minus_infinity_symbol();
- }
- return *isolate->factory()->infinity_symbol();
- }
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= 0);
char* str = DoubleToFixedCString(value, f);
MaybeObject* res =
- isolate->heap()->AllocateStringFromAscii(CStrVector(str));
+ isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
DeleteArray(str);
return res;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- if (isnan(value)) {
- return *isolate->factory()->nan_symbol();
- }
- if (isinf(value)) {
- if (value < 0) {
- return *isolate->factory()->minus_infinity_symbol();
- }
- return *isolate->factory()->infinity_symbol();
- }
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= -1 && f <= 20);
char* str = DoubleToExponentialCString(value, f);
MaybeObject* res =
- isolate->heap()->AllocateStringFromAscii(CStrVector(str));
+ isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
DeleteArray(str);
return res;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- if (isnan(value)) {
- return *isolate->factory()->nan_symbol();
- }
- if (isinf(value)) {
- if (value < 0) {
- return *isolate->factory()->minus_infinity_symbol();
- }
- return *isolate->factory()->infinity_symbol();
- }
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= 1 && f <= 21);
char* str = DoubleToPrecisionCString(value, f);
MaybeObject* res =
- isolate->heap()->AllocateStringFromAscii(CStrVector(str));
+ isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
DeleteArray(str);
return res;
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsValidSmi) {
+ HandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+
+ CONVERT_NUMBER_CHECKED(int32_t, number, Int32, args[0]);
+ if (Smi::IsValid(number)) {
+ return isolate->heap()->true_value();
+ } else {
+ return isolate->heap()->false_value();
+ }
+}
+
+
// Returns a single character string where first character equals
// string->Get(index).
static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
if (index < static_cast<uint32_t>(string->length())) {
string->TryFlatten();
return LookupSingleCharacterStringFromCode(
+ string->GetIsolate(),
string->Get(index));
}
return Execution::CharAt(string, index);
}
+MaybeObject* Runtime::GetElementOrCharAtOrFail(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index) {
+ CALL_HEAP_FUNCTION_PASS_EXCEPTION(isolate,
+ GetElementOrCharAt(isolate, object, index));
+}
+
+
MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
Handle<Object> object,
uint32_t index) {
@@ -3960,13 +4811,47 @@ MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
}
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- return object->GetPrototype()->GetElement(index);
+ return object->GetPrototype(isolate)->GetElement(isolate, index);
}
- return object->GetElement(index);
+ return object->GetElement(isolate, index);
}
+MaybeObject* Runtime::HasObjectProperty(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> key) {
+ HandleScope scope(isolate);
+
+ // Check if the given key is an array index.
+ uint32_t index;
+ if (key->ToArrayIndex(&index)) {
+ return isolate->heap()->ToBoolean(JSReceiver::HasElement(object, index));
+ }
+
+ // Convert the key to a name - possibly by calling back into JavaScript.
+ Handle<Name> name;
+ if (key->IsName()) {
+ name = Handle<Name>::cast(key);
+ } else {
+ bool has_pending_exception = false;
+ Handle<Object> converted =
+ Execution::ToString(isolate, key, &has_pending_exception);
+ if (has_pending_exception) return Failure::Exception();
+ name = Handle<Name>::cast(converted);
+ }
+
+ return isolate->heap()->ToBoolean(JSReceiver::HasProperty(object, name));
+}
+
+MaybeObject* Runtime::GetObjectPropertyOrFail(
+ Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> key) {
+ CALL_HEAP_FUNCTION_PASS_EXCEPTION(isolate,
+ GetObjectProperty(isolate, object, key));
+}
+
MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key) {
@@ -3986,16 +4871,16 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
return GetElementOrCharAt(isolate, object, index);
}
- // Convert the key to a string - possibly by calling back into JavaScript.
- Handle<String> name;
- if (key->IsString()) {
- name = Handle<String>::cast(key);
+ // Convert the key to a name - possibly by calling back into JavaScript.
+ Handle<Name> name;
+ if (key->IsName()) {
+ name = Handle<Name>::cast(key);
} else {
bool has_pending_exception = false;
Handle<Object> converted =
- Execution::ToString(key, &has_pending_exception);
+ Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
- name = Handle<String>::cast(converted);
+ name = Handle<Name>::cast(converted);
}
// Check if the name is trivially convertible to an index and get
@@ -4009,7 +4894,7 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
Handle<Object> object = args.at<Object>(0);
@@ -4019,9 +4904,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
}
-// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
+// KeyedGetProperty is called from KeyedLoadIC::GenerateGeneric.
RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
// Fast cases for getting named properties of the receiver JSObject
@@ -4038,16 +4923,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
if (args[0]->IsJSObject()) {
if (!args[0]->IsJSGlobalProxy() &&
!args[0]->IsAccessCheckNeeded() &&
- args[1]->IsString()) {
+ args[1]->IsName()) {
JSObject* receiver = JSObject::cast(args[0]);
- String* key = String::cast(args[1]);
+ Name* key = Name::cast(args[1]);
if (receiver->HasFastProperties()) {
// Attempt to use lookup cache.
Map* receiver_map = receiver->map();
KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
int offset = keyed_lookup_cache->Lookup(receiver_map, key);
if (offset != -1) {
- Object* value = receiver->FastPropertyAt(offset);
+ // Doubles are not cached, so raw read the value.
+ Object* value = receiver->RawFastPropertyAt(offset);
return value->IsTheHole()
? isolate->heap()->undefined_value()
: value;
@@ -4057,25 +4943,30 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
LookupResult result(isolate);
receiver->LocalLookup(key, &result);
if (result.IsField()) {
- int offset = result.GetFieldIndex();
- keyed_lookup_cache->Update(receiver_map, key, offset);
- return receiver->FastPropertyAt(offset);
+ int offset = result.GetFieldIndex().field_index();
+ // Do not track double fields in the keyed lookup cache. Reading
+ // double values requires boxing.
+ if (!FLAG_track_double_fields ||
+ !result.representation().IsDouble()) {
+ keyed_lookup_cache->Update(receiver_map, key, offset);
+ }
+ return receiver->FastPropertyAt(result.representation(), offset);
}
} else {
// Attempt dictionary lookup.
- StringDictionary* dictionary = receiver->property_dictionary();
+ NameDictionary* dictionary = receiver->property_dictionary();
int entry = dictionary->FindEntry(key);
- if ((entry != StringDictionary::kNotFound) &&
+ if ((entry != NameDictionary::kNotFound) &&
(dictionary->DetailsAt(entry).type() == NORMAL)) {
Object* value = dictionary->ValueAt(entry);
if (!receiver->IsGlobalObject()) return value;
- value = JSGlobalPropertyCell::cast(value)->value();
+ value = PropertyCell::cast(value)->value();
if (!value->IsTheHole()) return value;
// If value is the hole do the general lookup.
}
}
} else if (FLAG_smi_only_arrays && args.at<Object>(1)->IsSmi()) {
- // JSObject without a string key. If the key is a Smi, check for a
+ // JSObject without a name key. If the key is a Smi, check for a
// definite out-of-bounds access to elements, which is a strong indicator
// that subsequent accesses will also call the runtime. Proactively
// transition elements to FAST_*_ELEMENTS to avoid excessive boxing of
@@ -4083,8 +4974,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
// become FAST_DOUBLE_ELEMENTS.
Handle<JSObject> js_object(args.at<JSObject>(0));
ElementsKind elements_kind = js_object->GetElementsKind();
- if (IsFastElementsKind(elements_kind) &&
- !IsFastObjectElementsKind(elements_kind)) {
+ if (IsFastDoubleElementsKind(elements_kind)) {
FixedArrayBase* elements = js_object->elements();
if (args.at<Smi>(1)->value() >= elements->length()) {
if (IsFastHoleyElementsKind(elements_kind)) {
@@ -4097,6 +4987,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
isolate);
if (maybe_object->IsFailure()) return maybe_object;
}
+ } else {
+ ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
+ !IsFastElementsKind(elements_kind));
}
}
} else if (args[0]->IsString() && args[1]->IsSmi()) {
@@ -4129,11 +5022,11 @@ static bool IsValidAccessor(Handle<Object> obj) {
// Step 12 - update an existing accessor property with an accessor or generic
// descriptor.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
- ASSERT(args.length() == 5);
HandleScope scope(isolate);
+ ASSERT(args.length() == 5);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
RUNTIME_ASSERT(!obj->IsNull());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2);
RUNTIME_ASSERT(IsValidAccessor(getter));
CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
@@ -4144,10 +5037,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
bool fast = obj->HasFastProperties();
JSObject::DefineAccessor(obj, name, getter, setter, attr);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (fast) JSObject::TransformToFastProperties(obj, 0);
return isolate->heap()->undefined_value();
}
+
// Implements part of 8.12.9 DefineOwnProperty.
// There are 3 cases that lead here:
// Step 4a - define a new data property.
@@ -4155,10 +5050,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
// Step 12 - update an existing data property with a data or generic
// descriptor.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
- ASSERT(args.length() == 4);
HandleScope scope(isolate);
+ ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSObject, js_object, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, obj_value, 2);
CONVERT_SMI_ARG_CHECKED(unchecked, 3);
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
@@ -4181,11 +5076,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
// TODO(mstarzinger): So far this only works if property attributes don't
// change, this should be fixed once we cleanup the underlying code.
if (callback->IsForeign() && result.GetAttributes() == attr) {
- return js_object->SetPropertyWithCallback(callback,
- *name,
- *obj_value,
- result.holder(),
- kStrictMode);
+ Handle<Object> result_object =
+ JSObject::SetPropertyWithCallback(js_object,
+ handle(callback, isolate),
+ name,
+ obj_value,
+ handle(result.holder()),
+ kStrictMode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result_object);
+ return *result_object;
}
}
@@ -4206,9 +5105,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
JSObject::NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
// Use IgnoreAttributes version since a readonly property may be
// overridden and SetProperty does not allow this.
- return js_object->SetLocalPropertyIgnoreAttributes(*name,
- *obj_value,
- attr);
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
+ js_object, name, obj_value, attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
return Runtime::ForceSetObjectProperty(isolate,
@@ -4219,6 +5119,48 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
}
+// Return property without being observable by accessors or interceptors.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDataProperty) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
+ LookupResult lookup(isolate);
+ object->LookupRealNamedProperty(*key, &lookup);
+ if (!lookup.IsFound()) return isolate->heap()->undefined_value();
+ switch (lookup.type()) {
+ case NORMAL:
+ return lookup.holder()->GetNormalizedProperty(&lookup);
+ case FIELD:
+ return lookup.holder()->FastPropertyAt(
+ lookup.representation(),
+ lookup.GetFieldIndex().field_index());
+ case CONSTANT:
+ return lookup.GetConstant();
+ case CALLBACKS:
+ case HANDLER:
+ case INTERCEPTOR:
+ case TRANSITION:
+ return isolate->heap()->undefined_value();
+ case NONEXISTENT:
+ UNREACHABLE();
+ }
+ return isolate->heap()->undefined_value();
+}
+
+
+MaybeObject* Runtime::SetObjectPropertyOrFail(
+ Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyAttributes attr,
+ StrictModeFlag strict_mode) {
+ CALL_HEAP_FUNCTION_PASS_EXCEPTION(isolate,
+ SetObjectProperty(isolate, object, key, value, attr, strict_mode));
+}
+
+
MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
@@ -4238,10 +5180,14 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
if (object->IsJSProxy()) {
bool has_pending_exception = false;
- Handle<Object> name = Execution::ToString(key, &has_pending_exception);
+ Handle<Object> name_object = key->IsSymbol()
+ ? key : Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
- return JSProxy::cast(*object)->SetProperty(
- String::cast(*name), *value, attr, strict_mode);
+ Handle<Name> name = Handle<Name>::cast(name_object);
+ Handle<Object> result = JSReceiver::SetProperty(
+ Handle<JSProxy>::cast(object), name, value, attr, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
// If the object isn't a JavaScript object, we ignore the store.
@@ -4264,31 +5210,50 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
}
js_object->ValidateElements();
- Handle<Object> result = JSObject::SetElement(
- js_object, index, value, attr, strict_mode, set_mode);
+ if (js_object->HasExternalArrayElements()) {
+ if (!value->IsNumber() && !value->IsUndefined()) {
+ bool has_exception;
+ Handle<Object> number =
+ Execution::ToNumber(isolate, value, &has_exception);
+ if (has_exception) return Failure::Exception();
+ value = number;
+ }
+ }
+ MaybeObject* result = js_object->SetElement(
+ index, *value, attr, strict_mode, true, set_mode);
js_object->ValidateElements();
- if (result.is_null()) return Failure::Exception();
+ if (result->IsFailure()) return result;
return *value;
}
- if (key->IsString()) {
- Handle<Object> result;
- if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
- result = JSObject::SetElement(
- js_object, index, value, attr, strict_mode, set_mode);
+ if (key->IsName()) {
+ Handle<Name> name = Handle<Name>::cast(key);
+ if (name->AsArrayIndex(&index)) {
+ if (js_object->HasExternalArrayElements()) {
+ if (!value->IsNumber() && !value->IsUndefined()) {
+ bool has_exception;
+ Handle<Object> number =
+ Execution::ToNumber(isolate, value, &has_exception);
+ if (has_exception) return Failure::Exception();
+ value = number;
+ }
+ }
+ MaybeObject* result = js_object->SetElement(
+ index, *value, attr, strict_mode, true, set_mode);
+ if (result->IsFailure()) return result;
} else {
- Handle<String> key_string = Handle<String>::cast(key);
- key_string->TryFlatten();
- result = JSReceiver::SetProperty(
- js_object, key_string, value, attr, strict_mode);
+ if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
+ Handle<Object> result =
+ JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
}
- if (result.is_null()) return Failure::Exception();
return *value;
}
// Call-back into JavaScript to convert the key to a string.
bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+ Handle<Object> converted =
+ Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
Handle<String> name = Handle<String>::cast(converted);
@@ -4296,7 +5261,10 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
return js_object->SetElement(
index, *value, attr, strict_mode, true, set_mode);
} else {
- return js_object->SetProperty(*name, *value, attr, strict_mode);
+ Handle<Object> result =
+ JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
}
@@ -4326,22 +5294,24 @@ MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
}
- if (key->IsString()) {
- if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
+ if (key->IsName()) {
+ Handle<Name> name = Handle<Name>::cast(key);
+ if (name->AsArrayIndex(&index)) {
return js_object->SetElement(
index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
} else {
- Handle<String> key_string = Handle<String>::cast(key);
- key_string->TryFlatten();
- return js_object->SetLocalPropertyIgnoreAttributes(*key_string,
- *value,
- attr);
+ if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
+ js_object, name, value, attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
}
// Call-back into JavaScript to convert the key to a string.
bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+ Handle<Object> converted =
+ Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
Handle<String> name = Handle<String>::cast(converted);
@@ -4349,14 +5319,18 @@ MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
return js_object->SetElement(
index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
} else {
- return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr);
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
+ js_object, name, value, attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
}
-MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<Object> key) {
+MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<Object> key,
+ JSReceiver::DeleteMode mode) {
HandleScope scope(isolate);
// Check if the given key is an array index.
@@ -4372,27 +5346,32 @@ MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
return isolate->heap()->true_value();
}
- return receiver->DeleteElement(index, JSReceiver::FORCE_DELETION);
+ Handle<Object> result = JSReceiver::DeleteElement(receiver, index, mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
- Handle<String> key_string;
- if (key->IsString()) {
- key_string = Handle<String>::cast(key);
+ Handle<Name> name;
+ if (key->IsName()) {
+ name = Handle<Name>::cast(key);
} else {
// Call-back into JavaScript to convert the key to a string.
bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+ Handle<Object> converted = Execution::ToString(
+ isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
- key_string = Handle<String>::cast(converted);
+ name = Handle<String>::cast(converted);
}
- key_string->TryFlatten();
- return receiver->DeleteProperty(*key_string, JSReceiver::FORCE_DELETION);
+ if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
+ Handle<Object> result = JSReceiver::DeleteProperty(receiver, name, mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
Handle<Object> object = args.at<Object>(0);
@@ -4420,35 +5399,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
- NoHandleAllocation ha;
- RUNTIME_ASSERT(args.length() == 1);
- Handle<Object> object = args.at<Object>(0);
- if (object->IsJSObject()) {
- Handle<JSObject> js_object(Handle<JSObject>::cast(object));
- ElementsKind new_kind = js_object->HasFastHoleyElements()
- ? FAST_HOLEY_DOUBLE_ELEMENTS
- : FAST_DOUBLE_ELEMENTS;
- return TransitionElements(object, new_kind, isolate);
- } else {
- return *object;
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) {
- NoHandleAllocation ha;
- RUNTIME_ASSERT(args.length() == 1);
- Handle<Object> object = args.at<Object>(0);
- if (object->IsJSObject()) {
- Handle<JSObject> js_object(Handle<JSObject>::cast(object));
- ElementsKind new_kind = js_object->HasFastHoleyElements()
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
- return TransitionElements(object, new_kind, isolate);
- } else {
- return *object;
- }
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsKind) {
+ HandleScope scope(isolate);
+ RUNTIME_ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Map, map, 1);
+ JSObject::TransitionElementsKind(array, map->elements_kind());
+ return *array;
}
@@ -4456,7 +5413,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) {
// This is used to decide if we should transform null and undefined
// into the global object when doing call and apply.
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
@@ -4470,16 +5427,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
+ HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 5);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_SMI_ARG_CHECKED(store_index, 1);
Handle<Object> value = args.at<Object>(2);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 3);
CONVERT_SMI_ARG_CHECKED(literal_index, 4);
- HandleScope scope;
- Object* raw_boilerplate_object = literals->get(literal_index);
- Handle<JSArray> boilerplate_object(JSArray::cast(raw_boilerplate_object));
+ Object* raw_literal_cell = literals->get(literal_index);
+ JSArray* boilerplate = NULL;
+ if (raw_literal_cell->IsAllocationSite()) {
+ AllocationSite* site = AllocationSite::cast(raw_literal_cell);
+ boilerplate = JSArray::cast(site->transition_info());
+ } else {
+ boilerplate = JSArray::cast(raw_literal_cell);
+ }
+ Handle<JSArray> boilerplate_object(boilerplate);
ElementsKind elements_kind = object->GetElementsKind();
ASSERT(IsFastElementsKind(elements_kind));
// Smis should never trigger transitions.
@@ -4522,6 +5486,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
// Check whether debugger and is about to step into the callback that is passed
// to a built-in function such as Array.forEach.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugCallbackSupportsStepping) {
+ SealHandleScope shs(isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (!isolate->IsDebuggerActive() || !isolate->debug()->StepInActive()) {
return isolate->heap()->false_value();
@@ -4541,6 +5506,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugCallbackSupportsStepping) {
// Set one shot breakpoints for the callback function that is passed to a
// built-in function such as Array.forEach to enable stepping into the callback.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
+ SealHandleScope shs(isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = isolate->debug();
if (!debug->IsStepping()) return isolate->heap()->undefined_value();
@@ -4559,10 +5525,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
// Set a local property, even if it is READ_ONLY. If the property does not
// exist, it will be added with attributes NONE.
RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
- NoHandleAllocation ha;
+ HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- CONVERT_ARG_CHECKED(String, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
// Compute attributes.
PropertyAttributes attributes = NONE;
if (args.length() == 4) {
@@ -4572,73 +5539,82 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
(unchecked_value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
attributes = static_cast<PropertyAttributes>(unchecked_value);
}
-
- return object->
- SetLocalPropertyIgnoreAttributes(name, args[2], attributes);
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
+ object, name, value, attributes);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
- NoHandleAllocation ha;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
-
- CONVERT_ARG_CHECKED(JSReceiver, object, 0);
- CONVERT_ARG_CHECKED(String, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 2);
- return object->DeleteProperty(key, (strict_mode == kStrictMode)
- ? JSReceiver::STRICT_DELETION
- : JSReceiver::NORMAL_DELETION);
+ JSReceiver::DeleteMode delete_mode = (strict_mode == kStrictMode)
+ ? JSReceiver::STRICT_DELETION : JSReceiver::NORMAL_DELETION;
+ Handle<Object> result = JSReceiver::DeleteProperty(object, key, delete_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
-static Object* HasLocalPropertyImplementation(Isolate* isolate,
- Handle<JSObject> object,
- Handle<String> key) {
- if (object->HasLocalProperty(*key)) return isolate->heap()->true_value();
+static MaybeObject* HasLocalPropertyImplementation(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Name> key) {
+ if (JSReceiver::HasLocalProperty(object, key)) {
+ return isolate->heap()->true_value();
+ }
// Handle hidden prototypes. If there's a hidden prototype above this thing
// then we have to check it for properties, because they are supposed to
// look like they are on this object.
- Handle<Object> proto(object->GetPrototype());
+ Handle<Object> proto(object->GetPrototype(), isolate);
if (proto->IsJSObject() &&
Handle<JSObject>::cast(proto)->map()->is_hidden_prototype()) {
return HasLocalPropertyImplementation(isolate,
Handle<JSObject>::cast(proto),
key);
}
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->false_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
- NoHandleAllocation ha;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(String, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
+ Handle<Object> object = args.at<Object>(0);
uint32_t index;
const bool key_is_array_index = key->AsArrayIndex(&index);
- Object* obj = args[0];
// Only JS objects can have properties.
- if (obj->IsJSObject()) {
- JSObject* object = JSObject::cast(obj);
+ if (object->IsJSObject()) {
+ Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
// Fast case: either the key is a real named property or it is not
// an array index and there are no interceptors or hidden
// prototypes.
- if (object->HasRealNamedProperty(key)) return isolate->heap()->true_value();
- Map* map = object->map();
+ if (JSObject::HasRealNamedProperty(js_obj, key)) {
+ ASSERT(!isolate->has_scheduled_exception());
+ return isolate->heap()->true_value();
+ } else {
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ }
+ Map* map = js_obj->map();
if (!key_is_array_index &&
!map->has_named_interceptor() &&
!HeapObject::cast(map->prototype())->map()->is_hidden_prototype()) {
return isolate->heap()->false_value();
}
// Slow case.
- HandleScope scope(isolate);
return HasLocalPropertyImplementation(isolate,
- Handle<JSObject>(object),
- Handle<String>(key));
- } else if (obj->IsString() && key_is_array_index) {
+ Handle<JSObject>(js_obj),
+ Handle<Name>(key));
+ } else if (object->IsString() && key_is_array_index) {
// Well, there is one exception: Handle [] on strings.
- String* string = String::cast(obj);
+ Handle<String> string = Handle<String>::cast(object);
if (index < static_cast<uint32_t>(string->length())) {
return isolate->heap()->true_value();
}
@@ -4648,73 +5624,45 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
- NoHandleAllocation na;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_CHECKED(String, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
- bool result = receiver->HasProperty(key);
+ bool result = JSReceiver::HasProperty(receiver, key);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (isolate->has_pending_exception()) return Failure::Exception();
return isolate->heap()->ToBoolean(result);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
- NoHandleAllocation na;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
- bool result = receiver->HasElement(index);
+ bool result = JSReceiver::HasElement(receiver, index);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (isolate->has_pending_exception()) return Failure::Exception();
return isolate->heap()->ToBoolean(result);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, object, 0);
- CONVERT_ARG_CHECKED(String, key, 1);
-
- uint32_t index;
- if (key->AsArrayIndex(&index)) {
- JSObject::LocalElementType type = object->HasLocalElement(index);
- switch (type) {
- case JSObject::UNDEFINED_ELEMENT:
- case JSObject::STRING_CHARACTER_ELEMENT:
- return isolate->heap()->false_value();
- case JSObject::INTERCEPTED_ELEMENT:
- case JSObject::FAST_ELEMENT:
- return isolate->heap()->true_value();
- case JSObject::DICTIONARY_ELEMENT: {
- if (object->IsJSGlobalProxy()) {
- Object* proto = object->GetPrototype();
- if (proto->IsNull()) {
- return isolate->heap()->false_value();
- }
- ASSERT(proto->IsJSGlobalObject());
- object = JSObject::cast(proto);
- }
- FixedArray* elements = FixedArray::cast(object->elements());
- SeededNumberDictionary* dictionary = NULL;
- if (elements->map() ==
- isolate->heap()->non_strict_arguments_elements_map()) {
- dictionary = SeededNumberDictionary::cast(elements->get(1));
- } else {
- dictionary = SeededNumberDictionary::cast(elements);
- }
- int entry = dictionary->FindEntry(index);
- ASSERT(entry != SeededNumberDictionary::kNotFound);
- PropertyDetails details = dictionary->DetailsAt(entry);
- return isolate->heap()->ToBoolean(!details.IsDontEnum());
- }
- }
- }
+ CONVERT_ARG_CHECKED(Name, key, 1);
PropertyAttributes att = object->GetLocalPropertyAttribute(key);
- return isolate->heap()->ToBoolean(att != ABSENT && (att & DONT_ENUM) == 0);
+ if (att == ABSENT || (att & DONT_ENUM) != 0) {
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ return isolate->heap()->false_value();
+ }
+ ASSERT(!isolate->has_scheduled_exception());
+ return isolate->heap()->true_value();
}
@@ -4735,6 +5683,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) {
// have none, the map of the object. This is used to speed up
// the check for deletions during a for-in.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, raw_object, 0);
@@ -4774,11 +5723,13 @@ static int LocalPrototypeChainLength(JSObject* obj) {
// args[0]: object
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
HandleScope scope(isolate);
- ASSERT(args.length() == 1);
+ ASSERT(args.length() == 2);
if (!args[0]->IsJSObject()) {
return isolate->heap()->undefined_value();
}
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(include_symbols, 1);
+ PropertyAttributes filter = include_symbols ? NONE : SYMBOLIC;
// Skip the global proxy as it has no properties and always delegates to the
// real global object.
@@ -4789,6 +5740,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
isolate->ReportFailedAccessCheck(*obj, v8::ACCESS_KEYS);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
@@ -4808,10 +5760,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
isolate->ReportFailedAccessCheck(*jsproto, v8::ACCESS_KEYS);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
int n;
- n = jsproto->NumberOfLocalProperties();
+ n = jsproto->NumberOfLocalProperties(filter);
local_property_count[i] = n;
total_property_count += n;
if (i < length - 1) {
@@ -4828,7 +5781,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
int proto_with_hidden_properties = 0;
int next_copy_index = 0;
for (int i = 0; i < length; i++) {
- jsproto->GetLocalPropertyNames(*names, next_copy_index);
+ jsproto->GetLocalPropertyNames(*names, next_copy_index, filter);
next_copy_index += local_property_count[i];
if (jsproto->HasHiddenProperties()) {
proto_with_hidden_properties++;
@@ -4838,7 +5791,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
}
}
- // Filter out name of hidden propeties object.
+ // Filter out name of hidden properties object.
if (proto_with_hidden_properties > 0) {
Handle<FixedArray> old_names = names;
names = isolate->factory()->NewFixedArray(
@@ -4846,7 +5799,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
int dest_pos = 0;
for (int i = 0; i < total_property_count; i++) {
Object* name = old_names->get(i);
- if (name == isolate->heap()->hidden_symbol()) {
+ if (name == isolate->heap()->hidden_string()) {
continue;
}
names->set(dest_pos++, name);
@@ -4923,9 +5876,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetIndexedInterceptorElementNames) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
+ HandleScope scope(isolate);
ASSERT_EQ(args.length(), 1);
CONVERT_ARG_CHECKED(JSObject, raw_object, 0);
- HandleScope scope(isolate);
Handle<JSObject> object(raw_object);
if (object->IsJSGlobalProxy()) {
@@ -4934,10 +5887,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
!isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
- Handle<Object> proto(object->GetPrototype());
+ Handle<Object> proto(object->GetPrototype(), isolate);
// If proxy is detached we simply return an empty array.
if (proto->IsNull()) return *isolate->factory()->NewJSArray(0);
object = Handle<JSObject>::cast(proto);
@@ -4971,7 +5925,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
// Compute the frame holding the arguments.
@@ -4989,11 +5943,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
return frame->GetParameter(index);
}
+ if (args[0]->IsSymbol()) {
+ // Lookup in the initial Object.prototype object.
+ return isolate->initial_object_prototype()->GetProperty(
+ Symbol::cast(args[0]));
+ }
+
// Convert the key to a string.
HandleScope scope(isolate);
bool exception = false;
Handle<Object> converted =
- Execution::ToString(args.at<Object>(0), &exception);
+ Execution::ToString(isolate, args.at<Object>(0), &exception);
if (exception) return Failure::Exception();
Handle<String> key = Handle<String>::cast(converted);
@@ -5002,16 +5962,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
if (index < n) {
return frame->GetParameter(index);
} else {
- return isolate->initial_object_prototype()->GetElement(index);
+ return isolate->initial_object_prototype()->GetElement(isolate, index);
}
}
// Handle special arguments properties.
- if (key->Equals(isolate->heap()->length_symbol())) return Smi::FromInt(n);
- if (key->Equals(isolate->heap()->callee_symbol())) {
- Object* function = frame->function();
- if (function->IsJSFunction() &&
- !JSFunction::cast(function)->shared()->is_classic_mode()) {
+ if (key->Equals(isolate->heap()->length_string())) return Smi::FromInt(n);
+ if (key->Equals(isolate->heap()->callee_string())) {
+ JSFunction* function = frame->function();
+ if (!function->shared()->is_classic_mode()) {
return isolate->Throw(*isolate->factory()->NewTypeError(
"strict_arguments_callee", HandleVector<Object>(NULL, 0)));
}
@@ -5024,65 +5983,69 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- Object* object = args[0];
- return (object->IsJSObject() && !object->IsGlobalObject())
- ? JSObject::cast(object)->TransformToFastProperties(0)
- : object;
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ if (object->IsJSObject() && !object->IsGlobalObject()) {
+ JSObject::TransformToFastProperties(Handle<JSObject>::cast(object), 0);
+ }
+ return *object;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- return args[0]->ToBoolean();
+ return isolate->heap()->ToBoolean(args[0]->BooleanValue());
}
// Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
// Possible optimizations: put the type string into the oddballs.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Typeof) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
Object* obj = args[0];
- if (obj->IsNumber()) return isolate->heap()->number_symbol();
+ if (obj->IsNumber()) return isolate->heap()->number_string();
HeapObject* heap_obj = HeapObject::cast(obj);
// typeof an undetectable object is 'undefined'
if (heap_obj->map()->is_undetectable()) {
- return isolate->heap()->undefined_symbol();
+ return isolate->heap()->undefined_string();
}
InstanceType instance_type = heap_obj->map()->instance_type();
if (instance_type < FIRST_NONSTRING_TYPE) {
- return isolate->heap()->string_symbol();
+ return isolate->heap()->string_string();
}
switch (instance_type) {
case ODDBALL_TYPE:
if (heap_obj->IsTrue() || heap_obj->IsFalse()) {
- return isolate->heap()->boolean_symbol();
+ return isolate->heap()->boolean_string();
}
if (heap_obj->IsNull()) {
return FLAG_harmony_typeof
- ? isolate->heap()->null_symbol()
- : isolate->heap()->object_symbol();
+ ? isolate->heap()->null_string()
+ : isolate->heap()->object_string();
}
ASSERT(heap_obj->IsUndefined());
- return isolate->heap()->undefined_symbol();
+ return isolate->heap()->undefined_string();
+ case SYMBOL_TYPE:
+ return isolate->heap()->symbol_string();
case JS_FUNCTION_TYPE:
case JS_FUNCTION_PROXY_TYPE:
- return isolate->heap()->function_symbol();
+ return isolate->heap()->function_string();
default:
// For any kind of object not handled above, the spec rule for
// host objects gives that it is okay to return "object"
- return isolate->heap()->object_symbol();
+ return isolate->heap()->object_string();
}
}
-static bool AreDigits(const char*s, int from, int to) {
+static bool AreDigits(const uint8_t*s, int from, int to) {
for (int i = from; i < to; i++) {
if (s[i] < '0' || s[i] > '9') return false;
}
@@ -5091,7 +6054,7 @@ static bool AreDigits(const char*s, int from, int to) {
}
-static int ParseDecimalInteger(const char*s, int from, int to) {
+static int ParseDecimalInteger(const uint8_t*s, int from, int to) {
ASSERT(to - from < 10); // Overflow is not possible.
ASSERT(from < to);
int d = s[from] - '0';
@@ -5105,17 +6068,17 @@ static int ParseDecimalInteger(const char*s, int from, int to) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, subject, 0);
subject->TryFlatten();
// Fast case: short integer or some sorts of junk values.
int len = subject->length();
- if (subject->IsSeqAsciiString()) {
+ if (subject->IsSeqOneByteString()) {
if (len == 0) return Smi::FromInt(0);
- char const* data = SeqAsciiString::cast(subject)->GetChars();
+ uint8_t const* data = SeqOneByteString::cast(subject)->GetChars();
bool minus = (data[0] == '-');
int start_pos = (minus ? 1 : 0);
@@ -5125,8 +6088,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
// Fast check for a junk value. A valid string may start from a
// whitespace, a sign ('+' or '-'), the decimal point, a decimal digit or
// the 'I' character ('Infinity'). All of that have codes not greater than
- // '9' except 'I'.
- if (data[start_pos] != 'I') {
+ // '9' except 'I' and &nbsp;.
+ if (data[start_pos] != 'I' && data[start_pos] != 0xa0) {
return isolate->heap()->nan_value();
}
} else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
@@ -5154,614 +6117,82 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
}
// Slower case.
+ int flags = ALLOW_HEX;
+ if (FLAG_harmony_numeric_literals) {
+ // The current spec draft has not updated "ToNumber Applied to the String
+ // Type", https://bugs.ecmascript.org/show_bug.cgi?id=1584
+ flags |= ALLOW_OCTAL | ALLOW_BINARY;
+ }
return isolate->heap()->NumberFromDouble(
- StringToDouble(isolate->unicode_cache(), subject, ALLOW_HEX));
+ StringToDouble(isolate->unicode_cache(), subject, flags));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringFromCharCodeArray) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSArray, codes, 0);
- int length = Smi::cast(codes->length())->value();
-
- // Check if the string can be ASCII.
- int i;
- for (i = 0; i < length; i++) {
- Object* element;
- { MaybeObject* maybe_element = codes->GetElement(i);
- // We probably can't get an exception here, but just in order to enforce
- // the checking of inputs in the runtime calls we check here.
- if (!maybe_element->ToObject(&element)) return maybe_element;
- }
- CONVERT_NUMBER_CHECKED(int, chr, Int32, element);
- if ((chr & 0xffff) > String::kMaxAsciiCharCode)
- break;
- }
-
- MaybeObject* maybe_object = NULL;
- if (i == length) { // The string is ASCII.
- maybe_object = isolate->heap()->AllocateRawAsciiString(length);
- } else { // The string is not ASCII.
- maybe_object = isolate->heap()->AllocateRawTwoByteString(length);
- }
-
- Object* object = NULL;
- if (!maybe_object->ToObject(&object)) return maybe_object;
- String* result = String::cast(object);
- for (int i = 0; i < length; i++) {
- Object* element;
- { MaybeObject* maybe_element = codes->GetElement(i);
- if (!maybe_element->ToObject(&element)) return maybe_element;
- }
- CONVERT_NUMBER_CHECKED(int, chr, Int32, element);
- result->Set(i, chr & 0xffff);
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewString) {
+ SealHandleScope shs(isolate);
+ CONVERT_SMI_ARG_CHECKED(length, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
+ if (length == 0) return isolate->heap()->empty_string();
+ if (is_one_byte) {
+ return isolate->heap()->AllocateRawOneByteString(length);
+ } else {
+ return isolate->heap()->AllocateRawTwoByteString(length);
}
- return result;
}
-// kNotEscaped is generated by the following:
-//
-// #!/bin/perl
-// for (my $i = 0; $i < 256; $i++) {
-// print "\n" if $i % 16 == 0;
-// my $c = chr($i);
-// my $escaped = 1;
-// $escaped = 0 if $c =~ m#[A-Za-z0-9@*_+./-]#;
-// print $escaped ? "0, " : "1, ";
-// }
-
-
-static bool IsNotEscaped(uint16_t character) {
- // Only for 8 bit characters, the rest are always escaped (in a different way)
- ASSERT(character < 256);
- static const char kNotEscaped[256] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
- 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- };
- return kNotEscaped[character] != 0;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TruncateString) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(SeqString, string, 0);
+ CONVERT_SMI_ARG_CHECKED(new_length, 1);
+ return *SeqString::Truncate(string, new_length);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) {
- const char hex_chars[] = "0123456789ABCDEF";
- NoHandleAllocation ha;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(String, source, 0);
-
- source->TryFlatten();
-
- int escaped_length = 0;
- int length = source->length();
- {
- Access<StringInputBuffer> buffer(
- isolate->runtime_state()->string_input_buffer());
- buffer->Reset(source);
- while (buffer->has_more()) {
- uint16_t character = buffer->GetNext();
- if (character >= 256) {
- escaped_length += 6;
- } else if (IsNotEscaped(character)) {
- escaped_length++;
- } else {
- escaped_length += 3;
- }
- // We don't allow strings that are longer than a maximal length.
- ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
- if (escaped_length > String::kMaxLength) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
- }
- }
- // No length change implies no change. Return original string if no change.
- if (escaped_length == length) {
- return source;
- }
- Object* o;
- { MaybeObject* maybe_o =
- isolate->heap()->AllocateRawAsciiString(escaped_length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- String* destination = String::cast(o);
- int dest_position = 0;
-
- Access<StringInputBuffer> buffer(
- isolate->runtime_state()->string_input_buffer());
- buffer->Rewind();
- while (buffer->has_more()) {
- uint16_t chr = buffer->GetNext();
- if (chr >= 256) {
- destination->Set(dest_position, '%');
- destination->Set(dest_position+1, 'u');
- destination->Set(dest_position+2, hex_chars[chr >> 12]);
- destination->Set(dest_position+3, hex_chars[(chr >> 8) & 0xf]);
- destination->Set(dest_position+4, hex_chars[(chr >> 4) & 0xf]);
- destination->Set(dest_position+5, hex_chars[chr & 0xf]);
- dest_position += 6;
- } else if (IsNotEscaped(chr)) {
- destination->Set(dest_position, chr);
- dest_position++;
- } else {
- destination->Set(dest_position, '%');
- destination->Set(dest_position+1, hex_chars[chr >> 4]);
- destination->Set(dest_position+2, hex_chars[chr & 0xf]);
- dest_position += 3;
- }
- }
- return destination;
-}
-
-
-static inline int TwoDigitHex(uint16_t character1, uint16_t character2) {
- static const signed char kHexValue['g'] = {
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1,
- -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 10, 11, 12, 13, 14, 15 };
-
- if (character1 > 'f') return -1;
- int hi = kHexValue[character1];
- if (hi == -1) return -1;
- if (character2 > 'f') return -1;
- int lo = kHexValue[character2];
- if (lo == -1) return -1;
- return (hi << 4) + lo;
-}
-
-
-static inline int Unescape(String* source,
- int i,
- int length,
- int* step) {
- uint16_t character = source->Get(i);
- int32_t hi = 0;
- int32_t lo = 0;
- if (character == '%' &&
- i <= length - 6 &&
- source->Get(i + 1) == 'u' &&
- (hi = TwoDigitHex(source->Get(i + 2),
- source->Get(i + 3))) != -1 &&
- (lo = TwoDigitHex(source->Get(i + 4),
- source->Get(i + 5))) != -1) {
- *step = 6;
- return (hi << 8) + lo;
- } else if (character == '%' &&
- i <= length - 3 &&
- (lo = TwoDigitHex(source->Get(i + 1),
- source->Get(i + 2))) != -1) {
- *step = 3;
- return lo;
- } else {
- *step = 1;
- return character;
- }
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
+ Handle<String> string = FlattenGetString(source);
+ ASSERT(string->IsFlat());
+ Handle<String> result = string->IsOneByteRepresentationUnderneath()
+ ? URIEscape::Escape<uint8_t>(isolate, source)
+ : URIEscape::Escape<uc16>(isolate, source);
+ if (result.is_null()) return Failure::OutOfMemoryException(0x12);
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) {
- NoHandleAllocation ha;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(String, source, 0);
-
- source->TryFlatten();
-
- bool ascii = true;
- int length = source->length();
-
- int unescaped_length = 0;
- for (int i = 0; i < length; unescaped_length++) {
- int step;
- if (Unescape(source, i, length, &step) > String::kMaxAsciiCharCode) {
- ascii = false;
- }
- i += step;
- }
-
- // No length change implies no change. Return original string if no change.
- if (unescaped_length == length)
- return source;
-
- Object* o;
- { MaybeObject* maybe_o =
- ascii ?
- isolate->heap()->AllocateRawAsciiString(unescaped_length) :
- isolate->heap()->AllocateRawTwoByteString(unescaped_length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- String* destination = String::cast(o);
-
- int dest_position = 0;
- for (int i = 0; i < length; dest_position++) {
- int step;
- destination->Set(dest_position, Unescape(source, i, length, &step));
- i += step;
- }
- return destination;
-}
-
-
-static const unsigned int kQuoteTableLength = 128u;
-
-static const int kJsonQuotesCharactersPerEntry = 8;
-static const char* const JsonQuotes =
- "\\u0000 \\u0001 \\u0002 \\u0003 "
- "\\u0004 \\u0005 \\u0006 \\u0007 "
- "\\b \\t \\n \\u000b "
- "\\f \\r \\u000e \\u000f "
- "\\u0010 \\u0011 \\u0012 \\u0013 "
- "\\u0014 \\u0015 \\u0016 \\u0017 "
- "\\u0018 \\u0019 \\u001a \\u001b "
- "\\u001c \\u001d \\u001e \\u001f "
- " ! \\\" # "
- "$ % & ' "
- "( ) * + "
- ", - . / "
- "0 1 2 3 "
- "4 5 6 7 "
- "8 9 : ; "
- "< = > ? "
- "@ A B C "
- "D E F G "
- "H I J K "
- "L M N O "
- "P Q R S "
- "T U V W "
- "X Y Z [ "
- "\\\\ ] ^ _ "
- "` a b c "
- "d e f g "
- "h i j k "
- "l m n o "
- "p q r s "
- "t u v w "
- "x y z { "
- "| } ~ \177 ";
-
-
-// For a string that is less than 32k characters it should always be
-// possible to allocate it in new space.
-static const int kMaxGuaranteedNewSpaceString = 32 * 1024;
-
-
-// Doing JSON quoting cannot make the string more than this many times larger.
-static const int kJsonQuoteWorstCaseBlowup = 6;
-
-static const int kSpaceForQuotesAndComma = 3;
-static const int kSpaceForBrackets = 2;
-
-// Covers the entire ASCII range (all other characters are unchanged by JSON
-// quoting).
-static const byte JsonQuoteLengths[kQuoteTableLength] = {
- 6, 6, 6, 6, 6, 6, 6, 6,
- 2, 2, 2, 6, 2, 2, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 6,
- 1, 1, 2, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 2, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
-};
-
-
-template <typename StringType>
-MaybeObject* AllocateRawString(Isolate* isolate, int length);
-
-
-template <>
-MaybeObject* AllocateRawString<SeqTwoByteString>(Isolate* isolate, int length) {
- return isolate->heap()->AllocateRawTwoByteString(length);
-}
-
-
-template <>
-MaybeObject* AllocateRawString<SeqAsciiString>(Isolate* isolate, int length) {
- return isolate->heap()->AllocateRawAsciiString(length);
-}
-
-
-template <typename Char, typename StringType, bool comma>
-static MaybeObject* SlowQuoteJsonString(Isolate* isolate,
- Vector<const Char> characters) {
- int length = characters.length();
- const Char* read_cursor = characters.start();
- const Char* end = read_cursor + length;
- const int kSpaceForQuotes = 2 + (comma ? 1 :0);
- int quoted_length = kSpaceForQuotes;
- while (read_cursor < end) {
- Char c = *(read_cursor++);
- if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
- quoted_length++;
- } else {
- quoted_length += JsonQuoteLengths[static_cast<unsigned>(c)];
- }
- }
- MaybeObject* new_alloc = AllocateRawString<StringType>(isolate,
- quoted_length);
- Object* new_object;
- if (!new_alloc->ToObject(&new_object)) {
- return new_alloc;
- }
- StringType* new_string = StringType::cast(new_object);
-
- Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize);
- if (comma) *(write_cursor++) = ',';
- *(write_cursor++) = '"';
-
- read_cursor = characters.start();
- while (read_cursor < end) {
- Char c = *(read_cursor++);
- if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
- *(write_cursor++) = c;
- } else {
- int len = JsonQuoteLengths[static_cast<unsigned>(c)];
- const char* replacement = JsonQuotes +
- static_cast<unsigned>(c) * kJsonQuotesCharactersPerEntry;
- for (int i = 0; i < len; i++) {
- *write_cursor++ = *replacement++;
- }
- }
- }
- *(write_cursor++) = '"';
- return new_string;
-}
-
-
-template <typename SinkChar, typename SourceChar>
-static inline SinkChar* WriteQuoteJsonString(
- Isolate* isolate,
- SinkChar* write_cursor,
- Vector<const SourceChar> characters) {
- // SinkChar is only char if SourceChar is guaranteed to be char.
- ASSERT(sizeof(SinkChar) >= sizeof(SourceChar));
- const SourceChar* read_cursor = characters.start();
- const SourceChar* end = read_cursor + characters.length();
- *(write_cursor++) = '"';
- while (read_cursor < end) {
- SourceChar c = *(read_cursor++);
- if (sizeof(SourceChar) > 1u &&
- static_cast<unsigned>(c) >= kQuoteTableLength) {
- *(write_cursor++) = static_cast<SinkChar>(c);
- } else {
- int len = JsonQuoteLengths[static_cast<unsigned>(c)];
- const char* replacement = JsonQuotes +
- static_cast<unsigned>(c) * kJsonQuotesCharactersPerEntry;
- write_cursor[0] = replacement[0];
- if (len > 1) {
- write_cursor[1] = replacement[1];
- if (len > 2) {
- ASSERT(len == 6);
- write_cursor[2] = replacement[2];
- write_cursor[3] = replacement[3];
- write_cursor[4] = replacement[4];
- write_cursor[5] = replacement[5];
- }
- }
- write_cursor += len;
- }
- }
- *(write_cursor++) = '"';
- return write_cursor;
-}
-
-
-template <typename Char, typename StringType, bool comma>
-static MaybeObject* QuoteJsonString(Isolate* isolate,
- Vector<const Char> characters) {
- int length = characters.length();
- isolate->counters()->quote_json_char_count()->Increment(length);
- int worst_case_length =
- length * kJsonQuoteWorstCaseBlowup + kSpaceForQuotesAndComma;
- if (worst_case_length > kMaxGuaranteedNewSpaceString) {
- return SlowQuoteJsonString<Char, StringType, comma>(isolate, characters);
- }
-
- MaybeObject* new_alloc = AllocateRawString<StringType>(isolate,
- worst_case_length);
- Object* new_object;
- if (!new_alloc->ToObject(&new_object)) {
- return new_alloc;
- }
- if (!isolate->heap()->new_space()->Contains(new_object)) {
- // Even if our string is small enough to fit in new space we still have to
- // handle it being allocated in old space as may happen in the third
- // attempt. See CALL_AND_RETRY in heap-inl.h and similar code in
- // CEntryStub::GenerateCore.
- return SlowQuoteJsonString<Char, StringType, comma>(isolate, characters);
- }
- StringType* new_string = StringType::cast(new_object);
- ASSERT(isolate->heap()->new_space()->Contains(new_string));
-
- Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize);
- if (comma) *(write_cursor++) = ',';
- write_cursor = WriteQuoteJsonString<Char, Char>(isolate,
- write_cursor,
- characters);
- int final_length = static_cast<int>(
- write_cursor - reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize));
- isolate->heap()->new_space()->
- template ShrinkStringAtAllocationBoundary<StringType>(
- new_string, final_length);
- return new_string;
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
+ Handle<String> string = FlattenGetString(source);
+ ASSERT(string->IsFlat());
+ return string->IsOneByteRepresentationUnderneath()
+ ? *URIUnescape::Unescape<uint8_t>(isolate, source)
+ : *URIUnescape::Unescape<uc16>(isolate, source);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) {
- NoHandleAllocation ha;
- CONVERT_ARG_CHECKED(String, str, 0);
- if (!str->IsFlat()) {
- MaybeObject* try_flatten = str->TryFlatten();
- Object* flat;
- if (!try_flatten->ToObject(&flat)) {
- return try_flatten;
- }
- str = String::cast(flat);
- ASSERT(str->IsFlat());
- }
- String::FlatContent flat = str->GetFlatContent();
- ASSERT(flat.IsFlat());
- if (flat.IsTwoByte()) {
- return QuoteJsonString<uc16, SeqTwoByteString, false>(isolate,
- flat.ToUC16Vector());
- } else {
- return QuoteJsonString<char, SeqAsciiString, false>(isolate,
- flat.ToAsciiVector());
- }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringComma) {
- NoHandleAllocation ha;
- CONVERT_ARG_CHECKED(String, str, 0);
- if (!str->IsFlat()) {
- MaybeObject* try_flatten = str->TryFlatten();
- Object* flat;
- if (!try_flatten->ToObject(&flat)) {
- return try_flatten;
- }
- str = String::cast(flat);
- ASSERT(str->IsFlat());
- }
- String::FlatContent flat = str->GetFlatContent();
- if (flat.IsTwoByte()) {
- return QuoteJsonString<uc16, SeqTwoByteString, true>(isolate,
- flat.ToUC16Vector());
- } else {
- return QuoteJsonString<char, SeqAsciiString, true>(isolate,
- flat.ToAsciiVector());
- }
-}
-
-
-template <typename Char, typename StringType>
-static MaybeObject* QuoteJsonStringArray(Isolate* isolate,
- FixedArray* array,
- int worst_case_length) {
- int length = array->length();
-
- MaybeObject* new_alloc = AllocateRawString<StringType>(isolate,
- worst_case_length);
- Object* new_object;
- if (!new_alloc->ToObject(&new_object)) {
- return new_alloc;
- }
- if (!isolate->heap()->new_space()->Contains(new_object)) {
- // Even if our string is small enough to fit in new space we still have to
- // handle it being allocated in old space as may happen in the third
- // attempt. See CALL_AND_RETRY in heap-inl.h and similar code in
- // CEntryStub::GenerateCore.
- return isolate->heap()->undefined_value();
- }
- AssertNoAllocation no_gc;
- StringType* new_string = StringType::cast(new_object);
- ASSERT(isolate->heap()->new_space()->Contains(new_string));
-
- Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize);
- *(write_cursor++) = '[';
- for (int i = 0; i < length; i++) {
- if (i != 0) *(write_cursor++) = ',';
- String* str = String::cast(array->get(i));
- String::FlatContent content = str->GetFlatContent();
- ASSERT(content.IsFlat());
- if (content.IsTwoByte()) {
- write_cursor = WriteQuoteJsonString<Char, uc16>(isolate,
- write_cursor,
- content.ToUC16Vector());
- } else {
- write_cursor = WriteQuoteJsonString<Char, char>(isolate,
- write_cursor,
- content.ToAsciiVector());
- }
- }
- *(write_cursor++) = ']';
-
- int final_length = static_cast<int>(
- write_cursor - reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize));
- isolate->heap()->new_space()->
- template ShrinkStringAtAllocationBoundary<StringType>(
- new_string, final_length);
- return new_string;
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
+ ASSERT(args.length() == 1);
+ return BasicJsonStringifier::StringifyString(isolate, string);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) {
- NoHandleAllocation ha;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BasicJSONStringify) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSArray, array, 0);
-
- if (!array->HasFastObjectElements()) {
- return isolate->heap()->undefined_value();
- }
- FixedArray* elements = FixedArray::cast(array->elements());
- int n = elements->length();
- bool ascii = true;
- int total_length = 0;
-
- for (int i = 0; i < n; i++) {
- Object* elt = elements->get(i);
- if (!elt->IsString()) return isolate->heap()->undefined_value();
- String* element = String::cast(elt);
- if (!element->IsFlat()) return isolate->heap()->undefined_value();
- total_length += element->length();
- if (ascii && element->IsTwoByteRepresentation()) {
- ascii = false;
- }
- }
-
- int worst_case_length =
- kSpaceForBrackets + n * kSpaceForQuotesAndComma
- + total_length * kJsonQuoteWorstCaseBlowup;
-
- if (worst_case_length > kMaxGuaranteedNewSpaceString) {
- return isolate->heap()->undefined_value();
- }
-
- if (ascii) {
- return QuoteJsonStringArray<char, SeqAsciiString>(isolate,
- elements,
- worst_case_length);
- } else {
- return QuoteJsonStringArray<uc16, SeqTwoByteString>(isolate,
- elements,
- worst_case_length);
- }
+ BasicJsonStringifier stringifier(isolate);
+ return stringifier.Stringify(Handle<Object>(args[0], isolate));
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(String, s, 0);
CONVERT_SMI_ARG_CHECKED(radix, 1);
@@ -5775,7 +6206,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(String, str, 0);
// ECMA-262 section 15.1.2.3, empty string is NaN
@@ -5806,8 +6237,8 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
Object* o;
- { MaybeObject* maybe_o = s->IsAsciiRepresentation()
- ? isolate->heap()->AllocateRawAsciiString(length)
+ { MaybeObject* maybe_o = s->IsOneByteRepresentation()
+ ? isolate->heap()->AllocateRawOneByteString(length)
: isolate->heap()->AllocateRawTwoByteString(length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
@@ -5816,15 +6247,15 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// Convert all characters to upper case, assuming that they will fit
// in the buffer
- Access<StringInputBuffer> buffer(
- isolate->runtime_state()->string_input_buffer());
- buffer->Reset(s);
+ Access<ConsStringIteratorOp> op(
+ isolate->runtime_state()->string_iterator());
+ StringCharacterStream stream(s, op.value());
unibrow::uchar chars[Converter::kMaxWidth];
// We can assume that the string is not empty
- uc32 current = buffer->GetNext();
+ uc32 current = stream.GetNext();
for (int i = 0; i < length;) {
- bool has_next = buffer->has_more();
- uc32 next = has_next ? buffer->GetNext() : 0;
+ bool has_next = stream.HasMore();
+ uc32 next = has_next ? stream.GetNext() : 0;
int char_length = mapping->get(current, next, chars);
if (char_length == 0) {
// The case conversion of this character is the character itself.
@@ -5854,8 +6285,8 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
if (next_length == 0) next_length = 1;
}
int current_length = i + char_length + next_length;
- while (buffer->has_more()) {
- current = buffer->GetNext();
+ while (stream.HasMore()) {
+ current = stream.GetNext();
// NOTE: we use 0 as the next character here because, while
// the next character may affect what a character converts to,
// it does not in any case affect the length of what it convert
@@ -5865,7 +6296,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
current_length += char_length;
if (current_length > Smi::kMaxValue) {
isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x13);
}
}
// Try again with the real length.
@@ -5894,7 +6325,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
namespace {
static const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF;
-
+static const uintptr_t kAsciiMask = kOneInEveryByte << 7;
// Given a word and two range boundaries returns a word with high bit
// set in every byte iff the corresponding input byte was strictly in
@@ -5904,11 +6335,9 @@ static const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF;
// Requires: all bytes in the input word and the boundaries must be
// ASCII (less than 0x7F).
static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
- // Every byte in an ASCII string is less than or equal to 0x7F.
- ASSERT((w & (kOneInEveryByte * 0x7F)) == w);
// Use strict inequalities since in edge cases the function could be
// further simplified.
- ASSERT(0 < m && m < n && n < 0x7F);
+ ASSERT(0 < m && m < n);
// Has high bit set in every w byte less than n.
uintptr_t tmp1 = kOneInEveryByte * (0x7F + n) - w;
// Has high bit set in every w byte greater than m.
@@ -5925,7 +6354,7 @@ enum AsciiCaseConversion {
template <AsciiCaseConversion dir>
struct FastAsciiConverter {
- static bool Convert(char* dst, char* src, int length) {
+ static bool Convert(char* dst, char* src, int length, bool* changed_out) {
#ifdef DEBUG
char* saved_dst = dst;
char* saved_src = src;
@@ -5937,12 +6366,14 @@ struct FastAsciiConverter {
const char lo = (dir == ASCII_TO_LOWER) ? 'A' - 1 : 'a' - 1;
const char hi = (dir == ASCII_TO_LOWER) ? 'Z' + 1 : 'z' + 1;
bool changed = false;
+ uintptr_t or_acc = 0;
char* const limit = src + length;
#ifdef V8_HOST_CAN_READ_UNALIGNED
// Process the prefix of the input that requires no conversion one
// (machine) word at a time.
while (src <= limit - sizeof(uintptr_t)) {
uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
+ or_acc |= w;
if (AsciiRangeMask(w, lo, hi) != 0) {
changed = true;
break;
@@ -5955,6 +6386,7 @@ struct FastAsciiConverter {
// required one word at a time.
while (src <= limit - sizeof(uintptr_t)) {
uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
+ or_acc |= w;
uintptr_t m = AsciiRangeMask(w, lo, hi);
// The mask has high (7th) bit set in every byte that needs
// conversion and we know that the distance between cases is
@@ -5968,6 +6400,7 @@ struct FastAsciiConverter {
// unaligned access is not supported).
while (src < limit) {
char c = *src;
+ or_acc |= c;
if (lo < c && c < hi) {
c ^= (1 << 5);
changed = true;
@@ -5976,10 +6409,14 @@ struct FastAsciiConverter {
++src;
++dst;
}
+ if ((or_acc & kAsciiMask) != 0) {
+ return false;
+ }
#ifdef DEBUG
CheckConvert(saved_dst, saved_src, length, changed);
#endif
- return changed;
+ *changed_out = changed;
+ return true;
}
#ifdef DEBUG
@@ -6024,7 +6461,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
Arguments args,
Isolate* isolate,
unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(String, s, 0);
s = s->TryFlattenGetString();
@@ -6038,15 +6475,22 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
// character is also ASCII. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
- if (s->IsSeqAsciiString()) {
+ if (s->IsSeqOneByteString()) {
Object* o;
- { MaybeObject* maybe_o = isolate->heap()->AllocateRawAsciiString(length);
+ { MaybeObject* maybe_o = isolate->heap()->AllocateRawOneByteString(length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
- SeqAsciiString* result = SeqAsciiString::cast(o);
- bool has_changed_character = ConvertTraits::AsciiConverter::Convert(
- result->GetChars(), SeqAsciiString::cast(s)->GetChars(), length);
- return has_changed_character ? result : s;
+ SeqOneByteString* result = SeqOneByteString::cast(o);
+ bool has_changed_character;
+ bool is_ascii = ConvertTraits::AsciiConverter::Convert(
+ reinterpret_cast<char*>(result->GetChars()),
+ reinterpret_cast<char*>(SeqOneByteString::cast(s)->GetChars()),
+ length,
+ &has_changed_character);
+ // If not ASCII, we discard the result and take the 2 byte path.
+ if (is_ascii) {
+ return has_changed_character ? result : s;
+ }
}
Object* answer;
@@ -6084,7 +6528,7 @@ static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(String, s, 0);
@@ -6112,8 +6556,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
- ASSERT(args.length() == 3);
HandleScope handle_scope(isolate);
+ ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
@@ -6123,11 +6567,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
RUNTIME_ASSERT(pattern_length > 0);
if (limit == 0xffffffffu) {
- Handle<Object> cached_answer(RegExpResultsCache::Lookup(
- isolate->heap(),
- *subject,
- *pattern,
- RegExpResultsCache::STRING_SPLIT_SUBSTRINGS));
+ Handle<Object> cached_answer(
+ RegExpResultsCache::Lookup(isolate->heap(),
+ *subject,
+ *pattern,
+ RegExpResultsCache::STRING_SPLIT_SUBSTRINGS),
+ isolate);
if (*cached_answer != Smi::FromInt(0)) {
// The cache FixedArray is a COW-array and can therefore be reused.
Handle<JSArray> result =
@@ -6145,18 +6590,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
static const int kMaxInitialListCapacity = 16;
- Zone* zone = isolate->runtime_zone();
- ZoneScope scope(zone, DELETE_ON_EXIT);
+ ZoneScope zone_scope(isolate->runtime_zone());
// Find (up to limit) indices of separator and end-of-string in subject
int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
- ZoneList<int> indices(initial_capacity, zone);
+ ZoneList<int> indices(initial_capacity, zone_scope.zone());
if (!pattern->IsFlat()) FlattenString(pattern);
- FindStringIndicesDispatch(isolate, *subject, *pattern, &indices, limit, zone);
+ FindStringIndicesDispatch(isolate, *subject, *pattern,
+ &indices, limit, zone_scope.zone());
if (static_cast<uint32_t>(indices.length()) < limit) {
- indices.Add(subject_length, zone);
+ indices.Add(subject_length, zone_scope.zone());
}
// The list indices now contains the end of each part to create.
@@ -6165,8 +6610,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
int part_count = indices.length();
Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
- MaybeObject* maybe_result = result->EnsureCanContainHeapObjectElements();
- if (maybe_result->IsFailure()) return maybe_result;
+ JSObject::EnsureCanContainHeapObjectElements(result);
result->set_length(Smi::FromInt(part_count));
ASSERT(result->HasFastObjectElements());
@@ -6179,7 +6623,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
Handle<FixedArray> elements(FixedArray::cast(result->elements()));
int part_start = 0;
for (int i = 0; i < part_count; i++) {
- HandleScope local_loop_handle;
+ HandleScope local_loop_handle(isolate);
int part_end = indices.at(i);
Handle<String> substring =
isolate->factory()->NewProperSubString(subject, part_start, part_end);
@@ -6206,10 +6650,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
// not in the cache and fills the remainder with smi zeros. Returns
// the length of the successfully copied prefix.
static int CopyCachedAsciiCharsToArray(Heap* heap,
- const char* chars,
+ const uint8_t* chars,
FixedArray* elements,
int length) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
FixedArray* ascii_cache = heap->single_character_string_cache();
Object* undefined = heap->undefined_value();
int i;
@@ -6247,7 +6691,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
Handle<FixedArray> elements;
int position = 0;
- if (s->IsFlat() && s->IsAsciiRepresentation()) {
+ if (s->IsFlat() && s->IsOneByteRepresentation()) {
// Try using cached chars where possible.
Object* obj;
{ MaybeObject* maybe_obj =
@@ -6255,9 +6699,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
elements = Handle<FixedArray>(FixedArray::cast(obj), isolate);
+ DisallowHeapAllocation no_gc;
String::FlatContent content = s->GetFlatContent();
if (content.IsAscii()) {
- Vector<const char> chars = content.ToAsciiVector();
+ Vector<const uint8_t> chars = content.ToOneByteVector();
// Note, this will initialize all elements (not only the prefix)
// to prevent GC from seeing partially initialized array.
position = CopyCachedAsciiCharsToArray(isolate->heap(),
@@ -6273,7 +6718,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
elements = isolate->factory()->NewFixedArray(length);
}
for (int i = position; i < length; ++i) {
- Handle<Object> str = LookupSingleCharacterStringFromCode(s->Get(i));
+ Handle<Object> str =
+ LookupSingleCharacterStringFromCode(isolate, s->Get(i));
elements->set(i, *str);
}
@@ -6288,10 +6734,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, value, 0);
- return value->ToObject();
+ return value->ToObject(isolate);
}
@@ -6303,7 +6749,7 @@ bool Runtime::IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* number = args[0];
@@ -6314,18 +6760,34 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* number = args[0];
RUNTIME_ASSERT(number->IsNumber());
- return isolate->heap()->NumberToString(number, false);
+ return isolate->heap()->NumberToString(
+ number, false, isolate->heap()->GetPretenureMode());
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_ARG_CHECKED(number, 0);
+
+ // We do not include 0 so that we don't have to treat +0 / -0 cases.
+ if (number > 0 && number <= Smi::kMaxValue) {
+ return Smi::FromInt(static_cast<int>(number));
+ }
+ return isolate->heap()->NumberFromDouble(DoubleToInteger(number));
+}
+
+
+// ES6 draft 9.1.11
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPositiveInteger) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
@@ -6334,12 +6796,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
if (number > 0 && number <= Smi::kMaxValue) {
return Smi::FromInt(static_cast<int>(number));
}
+ if (number <= 0) {
+ return Smi::FromInt(0);
+ }
return isolate->heap()->NumberFromDouble(DoubleToInteger(number));
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
@@ -6358,7 +6823,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
@@ -6367,7 +6832,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
@@ -6383,7 +6848,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
// Converts a Number to a Smi, if possible. Returns NaN if the number is not
// a small integer.
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
@@ -6402,14 +6867,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateHeapNumber) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return isolate->heap()->AllocateHeapNumber(0);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6419,7 +6884,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6429,7 +6894,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6439,7 +6904,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6448,7 +6913,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return isolate->heap()->NumberFromDouble(9876543210.0);
@@ -6456,7 +6921,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6466,7 +6931,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6478,8 +6943,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberImul) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 2);
+
+ CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+ return isolate->heap()->NumberFromInt32(x * y);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, str1, 0);
CONVERT_ARG_CHECKED(String, str2, 1);
@@ -6528,25 +7003,24 @@ static inline void StringBuilderConcatHelper(String* special,
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
- NoHandleAllocation ha;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSArray, array, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
if (!args[1]->IsSmi()) {
isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x14);
}
int array_length = args.smi_at(1);
- CONVERT_ARG_CHECKED(String, special, 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, special, 2);
// This assumption is used by the slice encoding in one or two smis.
ASSERT(Smi::kMaxValue >= String::kMaxLength);
- MaybeObject* maybe_result = array->EnsureCanContainHeapObjectElements();
- if (maybe_result->IsFailure()) return maybe_result;
+ JSObject::EnsureCanContainHeapObjectElements(array);
int special_length = special->length();
if (!array->HasFastObjectElements()) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
FixedArray* fixed_array = FixedArray::cast(array->elements());
if (fixed_array->length() < array_length) {
@@ -6560,7 +7034,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
if (first->IsString()) return first;
}
- bool ascii = special->HasOnlyAsciiChars();
+ bool one_byte = special->HasOnlyOneByteChars();
int position = 0;
for (int i = 0; i < array_length; i++) {
int increment = 0;
@@ -6580,37 +7054,37 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
// Get the position and check that it is a positive smi.
i++;
if (i >= array_length) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
Object* next_smi = fixed_array->get(i);
if (!next_smi->IsSmi()) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
pos = Smi::cast(next_smi)->value();
if (pos < 0) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
}
ASSERT(pos >= 0);
ASSERT(len >= 0);
if (pos > special_length || len > special_length - pos) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
increment = len;
} else if (elt->IsString()) {
String* element = String::cast(elt);
int element_length = element->length();
increment = element_length;
- if (ascii && !element->HasOnlyAsciiChars()) {
- ascii = false;
+ if (one_byte && !element->HasOnlyOneByteChars()) {
+ one_byte = false;
}
} else {
ASSERT(!elt->IsTheHole());
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
if (increment > String::kMaxLength - position) {
isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x15);
}
position += increment;
}
@@ -6618,13 +7092,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
int length = position;
Object* object;
- if (ascii) {
+ if (one_byte) {
{ MaybeObject* maybe_object =
- isolate->heap()->AllocateRawAsciiString(length);
+ isolate->heap()->AllocateRawOneByteString(length);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
- SeqAsciiString* answer = SeqAsciiString::cast(object);
- StringBuilderConcatHelper(special,
+ SeqOneByteString* answer = SeqOneByteString::cast(object);
+ StringBuilderConcatHelper(*special,
answer->GetChars(),
fixed_array,
array_length);
@@ -6635,7 +7109,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
if (!maybe_object->ToObject(&object)) return maybe_object;
}
SeqTwoByteString* answer = SeqTwoByteString::cast(object);
- StringBuilderConcatHelper(special,
+ StringBuilderConcatHelper(*special,
answer->GetChars(),
fixed_array,
array_length);
@@ -6645,18 +7119,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSArray, array, 0);
if (!args[1]->IsSmi()) {
isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x16);
}
int array_length = args.smi_at(1);
CONVERT_ARG_CHECKED(String, separator, 2);
if (!array->HasFastObjectElements()) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
FixedArray* fixed_array = FixedArray::cast(array->elements());
if (fixed_array->length() < array_length) {
@@ -6675,20 +7149,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
(String::kMaxLength + separator_length - 1) / separator_length;
if (max_nof_separators < (array_length - 1)) {
isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x17);
}
int length = (array_length - 1) * separator_length;
for (int i = 0; i < array_length; i++) {
Object* element_obj = fixed_array->get(i);
if (!element_obj->IsString()) {
// TODO(1161): handle this case.
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
String* element = String::cast(element_obj);
int increment = element->length();
if (increment > String::kMaxLength - length) {
isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x18);
}
length += increment;
}
@@ -6723,7 +7197,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
}
ASSERT(sink == end);
- ASSERT(!answer->HasOnlyAsciiChars()); // Use %_FastAsciiArrayJoin instead.
+ // Use %_FastAsciiArrayJoin instead.
+ ASSERT(!answer->IsOneByteRepresentation());
return answer;
}
@@ -6769,7 +7244,7 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements,
RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSArray, elements_array, 0);
RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements());
@@ -6782,10 +7257,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
// Find total length of join result.
int string_length = 0;
- bool is_ascii = separator->IsAsciiRepresentation();
+ bool is_ascii = separator->IsOneByteRepresentation();
int max_string_length;
if (is_ascii) {
- max_string_length = SeqAsciiString::kMaxLength;
+ max_string_length = SeqOneByteString::kMaxLength;
} else {
max_string_length = SeqTwoByteString::kMaxLength;
}
@@ -6799,7 +7274,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
RUNTIME_ASSERT(elements->get(i + 1)->IsString());
String* string = String::cast(elements->get(i + 1));
int length = string->length();
- if (is_ascii && !string->IsAsciiRepresentation()) {
+ if (is_ascii && !string->IsOneByteRepresentation()) {
is_ascii = false;
max_string_length = SeqTwoByteString::kMaxLength;
}
@@ -6835,16 +7310,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
if (is_ascii) {
MaybeObject* result_allocation =
- isolate->heap()->AllocateRawAsciiString(string_length);
+ isolate->heap()->AllocateRawOneByteString(string_length);
if (result_allocation->IsFailure()) return result_allocation;
- SeqAsciiString* result_string =
- SeqAsciiString::cast(result_allocation->ToObjectUnchecked());
- JoinSparseArrayWithSeparator<char>(elements,
- elements_length,
- array_length,
- separator,
- Vector<char>(result_string->GetChars(),
- string_length));
+ SeqOneByteString* result_string =
+ SeqOneByteString::cast(result_allocation->ToObjectUnchecked());
+ JoinSparseArrayWithSeparator<uint8_t>(elements,
+ elements_length,
+ array_length,
+ separator,
+ Vector<uint8_t>(
+ result_string->GetChars(),
+ string_length));
return result_string;
} else {
MaybeObject* result_allocation =
@@ -6864,7 +7340,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6874,7 +7350,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6884,7 +7360,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6893,17 +7369,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberNot) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- return isolate->heap()->NumberFromInt32(~x);
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6913,7 +7380,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
@@ -6923,7 +7390,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6933,13 +7400,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- if (isnan(x)) return Smi::FromInt(NOT_EQUAL);
- if (isnan(y)) return Smi::FromInt(NOT_EQUAL);
+ if (std::isnan(x)) return Smi::FromInt(NOT_EQUAL);
+ if (std::isnan(y)) return Smi::FromInt(NOT_EQUAL);
if (x == y) return Smi::FromInt(EQUAL);
Object* result;
if ((fpclassify(x) == FP_ZERO) && (fpclassify(y) == FP_ZERO)) {
@@ -6952,7 +7419,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, x, 0);
@@ -6970,12 +7437,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- if (isnan(x) || isnan(y)) return args[2];
+ if (std::isnan(x) || std::isnan(y)) return args[2];
if (x == y) return Smi::FromInt(EQUAL);
if (isless(x, y)) return Smi::FromInt(LESS);
return Smi::FromInt(GREATER);
@@ -6985,7 +7452,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
// Compare two Smis as if they were converted to strings and then
// compared lexicographically.
RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(x_value, 0);
CONVERT_SMI_ARG_CHECKED(y_value, 1);
@@ -7059,23 +7526,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
}
-static Object* StringInputBufferCompare(RuntimeState* state,
+static Object* StringCharacterStreamCompare(RuntimeState* state,
String* x,
String* y) {
- StringInputBuffer& bufx = *state->string_input_buffer_compare_bufx();
- StringInputBuffer& bufy = *state->string_input_buffer_compare_bufy();
- bufx.Reset(x);
- bufy.Reset(y);
- while (bufx.has_more() && bufy.has_more()) {
- int d = bufx.GetNext() - bufy.GetNext();
+ StringCharacterStream stream_x(x, state->string_iterator_compare_x());
+ StringCharacterStream stream_y(y, state->string_iterator_compare_y());
+ while (stream_x.HasMore() && stream_y.HasMore()) {
+ int d = stream_x.GetNext() - stream_y.GetNext();
if (d < 0) return Smi::FromInt(LESS);
else if (d > 0) return Smi::FromInt(GREATER);
}
// x is (non-trivial) prefix of y:
- if (bufy.has_more()) return Smi::FromInt(LESS);
+ if (stream_y.HasMore()) return Smi::FromInt(LESS);
// y is prefix of x:
- return Smi::FromInt(bufx.has_more() ? GREATER : EQUAL);
+ return Smi::FromInt(stream_x.HasMore() ? GREATER : EQUAL);
}
@@ -7091,12 +7556,13 @@ static Object* FlatStringCompare(String* x, String* y) {
equal_prefix_result = Smi::FromInt(LESS);
}
int r;
+ DisallowHeapAllocation no_gc;
String::FlatContent x_content = x->GetFlatContent();
String::FlatContent y_content = y->GetFlatContent();
if (x_content.IsAscii()) {
- Vector<const char> x_chars = x_content.ToAsciiVector();
+ Vector<const uint8_t> x_chars = x_content.ToOneByteVector();
if (y_content.IsAscii()) {
- Vector<const char> y_chars = y_content.ToAsciiVector();
+ Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
} else {
Vector<const uc16> y_chars = y_content.ToUC16Vector();
@@ -7105,7 +7571,7 @@ static Object* FlatStringCompare(String* x, String* y) {
} else {
Vector<const uc16> x_chars = x_content.ToUC16Vector();
if (y_content.IsAscii()) {
- Vector<const char> y_chars = y_content.ToAsciiVector();
+ Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
} else {
Vector<const uc16> y_chars = y_content.ToUC16Vector();
@@ -7119,13 +7585,13 @@ static Object* FlatStringCompare(String* x, String* y) {
result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
}
ASSERT(result ==
- StringInputBufferCompare(Isolate::Current()->runtime_state(), x, y));
+ StringCharacterStreamCompare(x->GetIsolate()->runtime_state(), x, y));
return result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, x, 0);
@@ -7155,12 +7621,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
}
return (x->IsFlat() && y->IsFlat()) ? FlatStringCompare(x, y)
- : StringInputBufferCompare(isolate->runtime_state(), x, y);
+ : StringCharacterStreamCompare(isolate->runtime_state(), x, y);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_acos()->Increment();
@@ -7170,7 +7636,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_asin()->Increment();
@@ -7180,7 +7646,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_atan()->Increment();
@@ -7193,14 +7659,14 @@ static const double kPiDividedBy4 = 0.78539816339744830962;
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_atan2()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
double result;
- if (isinf(x) && isinf(y)) {
+ if (std::isinf(x) && std::isinf(y)) {
// Make sure that the result in case of two infinite arguments
// is a multiple of Pi / 4. The sign of the result is determined
// by the first argument (x) and the sign of the second argument
@@ -7216,7 +7682,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_ceil()->Increment();
@@ -7226,7 +7692,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_cos()->Increment();
@@ -7236,17 +7702,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_exp()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::EXP, x);
+ lazily_initialize_fast_exp();
+ return isolate->heap()->NumberFromDouble(fast_exp(x));
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_floor()->Increment();
@@ -7256,7 +7723,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_log()->Increment();
@@ -7264,10 +7731,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
}
+
// Slow version of Math.pow. We check for fast paths for special cases.
// Used if SSE2/VFP3 is not available.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_pow()->Increment();
@@ -7281,27 +7749,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
}
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- int y_int = static_cast<int>(y);
- double result;
- if (y == y_int) {
- result = power_double_int(x, y_int); // Returns 1 if exponent is 0.
- } else if (y == 0.5) {
- result = (isinf(x)) ? V8_INFINITY
- : fast_sqrt(x + 0.0); // Convert -0 to +0.
- } else if (y == -0.5) {
- result = (isinf(x)) ? 0
- : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
- } else {
- result = power_double_double(x, y);
- }
- if (isnan(result)) return isolate->heap()->nan_value();
+ double result = power_helper(x, y);
+ if (std::isnan(result)) return isolate->heap()->nan_value();
return isolate->heap()->AllocateHeapNumber(result);
}
+
// Fast version of Math.pow if we know that y is not an integer and y is not
// -0.5 or 0.5. Used as slow case from full codegen.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_pow()->Increment();
@@ -7311,14 +7768,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
return Smi::FromInt(1);
} else {
double result = power_double_double(x, y);
- if (isnan(result)) return isolate->heap()->nan_value();
+ if (std::isnan(result)) return isolate->heap()->nan_value();
return isolate->heap()->AllocateHeapNumber(result);
}
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_round()->Increment();
@@ -7361,7 +7818,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_sin()->Increment();
@@ -7371,7 +7828,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_sqrt()->Increment();
@@ -7381,7 +7838,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_tan()->Increment();
@@ -7391,7 +7848,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(year, 0);
@@ -7413,7 +7870,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateSetValue) {
Object* value = NULL;
bool is_value_nan = false;
- if (isnan(time)) {
+ if (std::isnan(time)) {
value = isolate->heap()->nan_value();
is_value_nan = true;
} else if (!is_utc &&
@@ -7534,7 +7991,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
JSFunction* callee = JSFunction::cast(args[0]);
@@ -7549,26 +8006,37 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
// Allocate the elements if needed.
if (length > 0) {
// Allocate the fixed array.
- Object* obj;
- { MaybeObject* maybe_obj = isolate->heap()->AllocateRawFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ FixedArray* array;
+ { MaybeObject* maybe_obj =
+ isolate->heap()->AllocateUninitializedFixedArray(length);
+ if (!maybe_obj->To(&array)) return maybe_obj;
}
- AssertNoAllocation no_gc;
- FixedArray* array = reinterpret_cast<FixedArray*>(obj);
- array->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
- array->set_length(length);
-
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < length; i++) {
array->set(i, *--parameters, mode);
}
- JSObject::cast(result)->set_elements(FixedArray::cast(obj));
+ JSObject::cast(result)->set_elements(array);
}
return result;
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosureFromStubFailure) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
+ Handle<Context> context(isolate->context());
+ PretenureFlag pretenure_flag = NOT_TENURED;
+ Handle<JSFunction> result =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
+ context,
+ pretenure_flag);
+ return *result;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -7591,10 +8059,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
// into C++ code. Collect these in a newly allocated array of handles (possibly
// prefixed by a number of empty handles).
static SmartArrayPointer<Handle<Object> > GetCallerArguments(
+ Isolate* isolate,
int prefix_argc,
int* total_argc) {
// Find frame containing arguments passed to the caller.
- JavaScriptFrameIterator it;
+ JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
List<JSFunction*> functions(2);
frame->GetFunctions(&functions);
@@ -7613,7 +8082,7 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments(
SmartArrayPointer<Handle<Object> > param_data(
NewArray<Handle<Object> >(*total_argc));
for (int i = 0; i < args_count; i++) {
- Handle<Object> val = args_slots[i].GetValue();
+ Handle<Object> val = args_slots[i].GetValue(isolate);
param_data[prefix_argc + i] = val;
}
@@ -7629,7 +8098,7 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments(
SmartArrayPointer<Handle<Object> > param_data(
NewArray<Handle<Object> >(*total_argc));
for (int i = 0; i < args_count; i++) {
- Handle<Object> val = Handle<Object>(frame->GetParameter(i));
+ Handle<Object> val = Handle<Object>(frame->GetParameter(i), isolate);
param_data[prefix_argc + i] = val;
}
return param_data;
@@ -7648,7 +8117,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
bound_function->shared()->set_bound(true);
// Get all arguments of calling function (Function.prototype.bind).
int argc = 0;
- SmartArrayPointer<Handle<Object> > arguments = GetCallerArguments(0, &argc);
+ SmartArrayPointer<Handle<Object> > arguments =
+ GetCallerArguments(isolate, 0, &argc);
// Don't count the this-arg.
if (argc > 0) {
ASSERT(*arguments[0] == args[2]);
@@ -7665,7 +8135,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
JSFunction::cast(*bindee)->function_bindings());
new_bindings =
isolate->factory()->NewFixedArray(old_bindings->length() + argc);
- bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex));
+ bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex),
+ isolate);
i = 0;
for (int n = old_bindings->length(); i < n; i++) {
new_bindings->set(i, old_bindings->get(i));
@@ -7686,11 +8157,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
bound_function->set_function_bindings(*new_bindings);
// Update length.
- Handle<String> length_symbol = isolate->factory()->length_symbol();
+ Handle<String> length_string = isolate->factory()->length_string();
Handle<Object> new_length(args.at<Object>(3));
PropertyAttributes attr =
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
- ForceSetProperty(bound_function, length_symbol, new_length, attr);
+ ForceSetProperty(bound_function, length_string, new_length, attr);
return *bound_function;
}
@@ -7724,21 +8195,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
Handle<FixedArray>(FixedArray::cast(function->function_bindings()));
int bound_argc = bound_args->length() - JSFunction::kBoundArgumentsStartIndex;
Handle<Object> bound_function(
- JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)));
+ JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)),
+ isolate);
ASSERT(!bound_function->IsJSFunction() ||
!Handle<JSFunction>::cast(bound_function)->shared()->bound());
int total_argc = 0;
SmartArrayPointer<Handle<Object> > param_data =
- GetCallerArguments(bound_argc, &total_argc);
+ GetCallerArguments(isolate, bound_argc, &total_argc);
for (int i = 0; i < bound_argc; i++) {
param_data[i] = Handle<Object>(bound_args->get(
- JSFunction::kBoundArgumentsStartIndex + i));
+ JSFunction::kBoundArgumentsStartIndex + i), isolate);
}
if (!bound_function->IsJSFunction()) {
bool exception_thrown;
- bound_function = Execution::TryGetConstructorDelegate(bound_function,
+ bound_function = Execution::TryGetConstructorDelegate(isolate,
+ bound_function,
&exception_thrown);
if (exception_thrown) return Failure::Exception();
}
@@ -7756,20 +8229,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
}
-static void TrySettingInlineConstructStub(Isolate* isolate,
- Handle<JSFunction> function) {
- Handle<Object> prototype = isolate->factory()->null_value();
- if (function->has_instance_prototype()) {
- prototype = Handle<Object>(function->instance_prototype(), isolate);
- }
- if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
- ConstructStubCompiler compiler(isolate);
- Handle<Code> code = compiler.CompileConstructStub(function);
- function->shared()->set_construct_stub(*code);
- }
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -7809,7 +8268,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
// called using 'new' and creates a new JSFunction object that
// is returned. The receiver object is only used for error
// reporting if an error occurs when constructing the new
- // JSFunction. FACTORY->NewJSObject() should not be used to
+ // JSFunction. Factory::NewJSObject() should not be used to
// allocate JSFunctions since it does not properly initialize
// the shared part of the function. Since the receiver is
// ignored anyway, we use the global object as the receiver
@@ -7833,13 +8292,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
shared->CompleteInobjectSlackTracking();
}
- bool first_allocation = !shared->live_objects_may_exist();
Handle<JSObject> result = isolate->factory()->NewJSObject(function);
RETURN_IF_EMPTY_HANDLE(isolate, result);
- // Delay setting the stub if inobject slack tracking is in progress.
- if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) {
- TrySettingInlineConstructStub(isolate, function);
- }
isolate->counters()->constructed_objects()->Increment();
isolate->counters()->constructed_objects_runtime()->Increment();
@@ -7854,7 +8308,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FinalizeInstanceSize) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
function->shared()->CompleteInobjectSlackTracking();
- TrySettingInlineConstructStub(isolate, function);
return isolate->heap()->undefined_value();
}
@@ -7885,38 +8338,41 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyCompile) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- Handle<JSFunction> function = args.at<JSFunction>(0);
-
+bool AllowOptimization(Isolate* isolate, Handle<JSFunction> function) {
// If the function is not compiled ignore the lazy
// recompilation. This can happen if the debugger is activated and
// the function is returned to the not compiled state.
- if (!function->shared()->is_compiled()) {
- function->ReplaceCode(function->shared()->code());
- return function->code();
- }
+ if (!function->shared()->is_compiled()) return false;
// If the function is not optimizable or debugger is active continue using the
// code from the full compiler.
- if (!FLAG_crankshaft ||
- !function->shared()->code()->optimizable() ||
+ if (!isolate->use_crankshaft() ||
+ function->shared()->optimization_disabled() ||
isolate->DebuggerHasBreakPoints()) {
if (FLAG_trace_opt) {
PrintF("[failed to optimize ");
function->PrintName();
PrintF(": is code optimizable: %s, is debugger enabled: %s]\n",
- function->shared()->code()->optimizable() ? "T" : "F",
+ function->shared()->optimization_disabled() ? "F" : "T",
isolate->DebuggerHasBreakPoints() ? "T" : "F");
}
+ return false;
+ }
+ return true;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+
+ if (!AllowOptimization(isolate, function)) {
function->ReplaceCode(function->shared()->code());
return function->code();
}
function->shared()->code()->set_profiler_ticks(0);
- if (JSFunction::CompileOptimized(function,
- BailoutId::None(),
- CLEAR_EXCEPTION)) {
+ if (JSFunction::CompileOptimized(function, CLEAR_EXCEPTION)) {
return function->code();
}
if (FLAG_trace_opt) {
@@ -7929,37 +8385,54 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ConcurrentRecompile) {
HandleScope handle_scope(isolate);
- ASSERT(FLAG_parallel_recompilation);
- Compiler::RecompileParallel(args.at<JSFunction>(0));
- return *isolate->factory()->undefined_value();
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ if (!AllowOptimization(isolate, function)) {
+ function->ReplaceCode(function->shared()->code());
+ return isolate->heap()->undefined_value();
+ }
+ function->shared()->code()->set_profiler_ticks(0);
+ ASSERT(FLAG_concurrent_recompilation);
+ if (!Compiler::RecompileConcurrent(function)) {
+ function->ReplaceCode(function->shared()->code());
+ }
+ return isolate->heap()->undefined_value();
}
class ActivationsFinder : public ThreadVisitor {
public:
- explicit ActivationsFinder(JSFunction* function)
- : function_(function), has_activations_(false) {}
+ Code* code_;
+ bool has_code_activations_;
+
+ explicit ActivationsFinder(Code* code)
+ : code_(code),
+ has_code_activations_(false) { }
void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- if (has_activations_) return;
+ JavaScriptFrameIterator it(isolate, top);
+ VisitFrames(&it);
+ }
- for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->is_optimized() && frame->function() == function_) {
- has_activations_ = true;
- return;
- }
+ void VisitFrames(JavaScriptFrameIterator* it) {
+ for (; !it->done(); it->Advance()) {
+ JavaScriptFrame* frame = it->frame();
+ if (code_->contains(frame->pc())) has_code_activations_ = true;
}
}
+};
- bool has_activations() { return has_activations_; }
- private:
- JSFunction* function_;
- bool has_activations_;
-};
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyStubFailure) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+ ASSERT(AllowHeapAllocation::IsAllowed());
+ delete deoptimizer;
+ return isolate->heap()->undefined_value();
+}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
@@ -7969,17 +8442,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
Deoptimizer::BailoutType type =
static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- ASSERT(isolate->heap()->IsAllocationAllowed());
- JavaScriptFrameIterator it(isolate);
+ ASSERT(AllowHeapAllocation::IsAllowed());
+
+ Handle<JSFunction> function = deoptimizer->function();
+ Handle<Code> optimized_code = deoptimizer->compiled_code();
+
+ ASSERT(optimized_code->kind() == Code::OPTIMIZED_FUNCTION);
+ ASSERT(type == deoptimizer->bailout_type());
// Make sure to materialize objects before causing any allocation.
+ JavaScriptFrameIterator it(isolate);
deoptimizer->MaterializeHeapObjects(&it);
delete deoptimizer;
JavaScriptFrame* frame = it.frame();
RUNTIME_ASSERT(frame->function()->IsJSFunction());
- Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
- RUNTIME_ASSERT(type != Deoptimizer::EAGER || function->IsOptimized());
+ ASSERT(frame->function() == *function);
// Avoid doing too much work when running with --always-opt and keep
// the optimized code around.
@@ -7987,45 +8465,31 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
return isolate->heap()->undefined_value();
}
- // Find other optimized activations of the function or functions that
- // share the same optimized code.
- bool has_other_activations = false;
- while (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- JSFunction* other_function = JSFunction::cast(frame->function());
- if (frame->is_optimized() && other_function->code() == function->code()) {
- has_other_activations = true;
- break;
- }
- it.Advance();
- }
-
- if (!has_other_activations) {
- ActivationsFinder activations_finder(*function);
- isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
- has_other_activations = activations_finder.has_activations();
- }
+ // Search for other activations of the same function and code.
+ ActivationsFinder activations_finder(*optimized_code);
+ activations_finder.VisitFrames(&it);
+ isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
- if (!has_other_activations) {
- if (FLAG_trace_deopt) {
- PrintF("[removing optimized code for: ");
- function->PrintName();
- PrintF("]\n");
+ if (!activations_finder.has_code_activations_) {
+ if (function->code() == *optimized_code) {
+ if (FLAG_trace_deopt) {
+ PrintF("[removing optimized code for: ");
+ function->PrintName();
+ PrintF("]\n");
+ }
+ function->ReplaceCode(function->shared()->code());
}
- function->ReplaceCode(function->shared()->code());
} else {
+ // TODO(titzer): we should probably do DeoptimizeCodeList(code)
+ // unconditionally if the code is not already marked for deoptimization.
+ // If there is an index by shared function info, all the better.
Deoptimizer::DeoptimizeFunction(*function);
}
- // Flush optimized code cache for this function.
- function->shared()->ClearOptimizedCodeMap();
-
- return isolate->heap()->undefined_value();
-}
+ // Evict optimized code for this function from the cache so that it doesn't
+ // get used for new closures.
+ function->shared()->EvictFromOptimizedCodeMap(*optimized_code,
+ "notify deoptimized");
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyOSR) {
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- delete deoptimizer;
return isolate->heap()->undefined_value();
}
@@ -8056,6 +8520,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearFunctionTypeFeedback) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
+ SealHandleScope shs(isolate);
#if defined(USE_SIMULATOR)
return isolate->heap()->true_value();
#else
@@ -8064,6 +8529,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConcurrentRecompilationSupported) {
+ HandleScope scope(isolate);
+ return FLAG_concurrent_recompilation
+ ? isolate->heap()->true_value() : isolate->heap()->false_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
@@ -8076,28 +8548,51 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
if (args.length() == 2 &&
unoptimized->kind() == Code::FUNCTION) {
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
- CHECK(type->IsEqualTo(CStrVector("osr")));
- isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
- unoptimized->set_allow_osr_at_loop_nesting_level(
- Code::kMaxLoopNestingMarker);
+ if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) {
+ // Start patching from the currently patched loop nesting level.
+ int current_level = unoptimized->allow_osr_at_loop_nesting_level();
+ ASSERT(BackEdgeTable::Verify(isolate, unoptimized, current_level));
+ for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) {
+ unoptimized->set_allow_osr_at_loop_nesting_level(i);
+ isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
+ }
+ } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent"))) {
+ function->MarkForConcurrentRecompilation();
+ }
}
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NeverOptimizeFunction) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
- // The least significant bit (after untagging) indicates whether the
- // function is currently optimized, regardless of reason.
- if (!V8::UseCrankshaft()) {
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ ASSERT(!function->IsOptimized());
+ function->shared()->set_optimization_disabled(true);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
+ HandleScope scope(isolate);
+ RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
+ if (!isolate->use_crankshaft()) {
return Smi::FromInt(4); // 4 == "never".
}
+ bool sync_with_compiler_thread = true;
+ if (args.length() == 2) {
+ CONVERT_ARG_HANDLE_CHECKED(String, sync, 1);
+ if (sync->IsOneByteEqualTo(STATIC_ASCII_VECTOR("no sync"))) {
+ sync_with_compiler_thread = false;
+ }
+ }
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (FLAG_parallel_recompilation) {
- if (function->IsMarkedForLazyRecompilation()) {
- return Smi::FromInt(5);
+ if (FLAG_concurrent_recompilation && sync_with_compiler_thread) {
+ while (function->IsInRecompileQueue()) {
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ OS::Sleep(50);
}
}
if (FLAG_always_opt) {
@@ -8106,11 +8601,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
return function->IsOptimized() ? Smi::FromInt(3) // 3 == "always".
: Smi::FromInt(2); // 2 == "no".
}
+ if (FLAG_deopt_every_n_times) {
+ return Smi::FromInt(6); // 6 == "maybe deopted".
+ }
return function->IsOptimized() ? Smi::FromInt(1) // 1 == "yes".
: Smi::FromInt(2); // 2 == "no".
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_UnblockConcurrentRecompilation) {
+ RUNTIME_ASSERT(FLAG_block_concurrent_recompilation);
+ isolate->optimizing_compiler_thread()->Unblock();
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -8119,134 +8624,150 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
}
+static bool IsSuitableForOnStackReplacement(Isolate* isolate,
+ Handle<JSFunction> function,
+ Handle<Code> unoptimized) {
+ // Keep track of whether we've succeeded in optimizing.
+ if (!isolate->use_crankshaft() || !unoptimized->optimizable()) return false;
+ // If we are trying to do OSR when there are already optimized
+ // activations of the function, it means (a) the function is directly or
+ // indirectly recursive and (b) an optimized invocation has been
+ // deoptimized so that we are currently in an unoptimized activation.
+ // Check for optimized activations of this function.
+ for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ if (frame->is_optimized() && frame->function() == *function) return false;
+ }
+
+ return true;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
HandleScope scope(isolate);
- ASSERT(args.length() == 1);
+ ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CONVERT_NUMBER_CHECKED(uint32_t, pc_offset, Uint32, args[1]);
+ Handle<Code> unoptimized(function->shared()->code(), isolate);
+
+#ifdef DEBUG
+ JavaScriptFrameIterator it(isolate);
+ JavaScriptFrame* frame = it.frame();
+ ASSERT_EQ(frame->function(), *function);
+ ASSERT_EQ(frame->LookupCode(), *unoptimized);
+ ASSERT(unoptimized->contains(frame->pc()));
+
+ ASSERT(pc_offset ==
+ static_cast<uint32_t>(frame->pc() - unoptimized->instruction_start()));
+#endif // DEBUG
// We're not prepared to handle a function with arguments object.
ASSERT(!function->shared()->uses_arguments());
- // We have hit a back edge in an unoptimized frame for a function that was
- // selected for on-stack replacement. Find the unoptimized code object.
- Handle<Code> unoptimized(function->shared()->code(), isolate);
- // Keep track of whether we've succeeded in optimizing.
- bool succeeded = unoptimized->optimizable();
- if (succeeded) {
- // If we are trying to do OSR when there are already optimized
- // activations of the function, it means (a) the function is directly or
- // indirectly recursive and (b) an optimized invocation has been
- // deoptimized so that we are currently in an unoptimized activation.
- // Check for optimized activations of this function.
- JavaScriptFrameIterator it(isolate);
- while (succeeded && !it.done()) {
- JavaScriptFrame* frame = it.frame();
- succeeded = !frame->is_optimized() || frame->function() != *function;
- it.Advance();
+ Handle<Code> result = Handle<Code>::null();
+ BailoutId ast_id = BailoutId::None();
+
+ if (FLAG_concurrent_osr) {
+ if (isolate->optimizing_compiler_thread()->
+ IsQueuedForOSR(function, pc_offset)) {
+ // Still waiting for the optimizing compiler thread to finish. Carry on.
+ if (FLAG_trace_osr) {
+ PrintF("[COSR - polling recompile tasks for ");
+ function->PrintName();
+ PrintF("]\n");
+ }
+ return NULL;
}
- }
- BailoutId ast_id = BailoutId::None();
- if (succeeded) {
- // The top JS function is this one, the PC is somewhere in the
- // unoptimized code.
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- ASSERT(frame->function() == *function);
- ASSERT(frame->LookupCode() == *unoptimized);
- ASSERT(unoptimized->contains(frame->pc()));
-
- // Use linear search of the unoptimized code's stack check table to find
- // the AST id matching the PC.
- Address start = unoptimized->instruction_start();
- unsigned target_pc_offset = static_cast<unsigned>(frame->pc() - start);
- Address table_cursor = start + unoptimized->stack_check_table_offset();
- uint32_t table_length = Memory::uint32_at(table_cursor);
- table_cursor += kIntSize;
- for (unsigned i = 0; i < table_length; ++i) {
- // Table entries are (AST id, pc offset) pairs.
- uint32_t pc_offset = Memory::uint32_at(table_cursor + kIntSize);
- if (pc_offset == target_pc_offset) {
- ast_id = BailoutId(static_cast<int>(Memory::uint32_at(table_cursor)));
- break;
+ RecompileJob* job = isolate->optimizing_compiler_thread()->
+ FindReadyOSRCandidate(function, pc_offset);
+
+ if (job == NULL) {
+ if (IsSuitableForOnStackReplacement(isolate, function, unoptimized) &&
+ Compiler::RecompileConcurrent(function, pc_offset)) {
+ if (function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForConcurrentRecompilation()) {
+ // Prevent regular recompilation if we queue this for OSR.
+ // TODO(yangguo): remove this as soon as OSR becomes one-shot.
+ function->ReplaceCode(*unoptimized);
+ }
+ return NULL;
}
- table_cursor += 2 * kIntSize;
+ // Fall through to the end in case of failure.
+ } else {
+ // TODO(titzer): don't install the OSR code into the function.
+ ast_id = job->info()->osr_ast_id();
+ result = Compiler::InstallOptimizedCode(job);
}
+ } else if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
+ ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset);
ASSERT(!ast_id.IsNone());
if (FLAG_trace_osr) {
- PrintF("[replacing on-stack at AST id %d in ", ast_id.ToInt());
+ PrintF("[OSR - replacing at AST id %d in ", ast_id.ToInt());
function->PrintName();
PrintF("]\n");
}
+ // Attempt OSR compilation.
+ result = JSFunction::CompileOsr(function, ast_id, CLEAR_EXCEPTION);
+ }
- // Try to compile the optimized code. A true return value from
- // CompileOptimized means that compilation succeeded, not necessarily
- // that optimization succeeded.
- if (JSFunction::CompileOptimized(function, ast_id, CLEAR_EXCEPTION) &&
- function->IsOptimized()) {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- function->code()->deoptimization_data());
- if (data->OsrPcOffset()->value() >= 0) {
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement offset %d in optimized code]\n",
- data->OsrPcOffset()->value());
- }
- ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
- } else {
- // We may never generate the desired OSR entry if we emit an
- // early deoptimize.
- succeeded = false;
+ // Revert the patched back edge table, regardless of whether OSR succeeds.
+ BackEdgeTable::Revert(isolate, *unoptimized);
+
+ // Check whether we ended up with usable optimized code.
+ if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(result->deoptimization_data());
+
+ if (data->OsrPcOffset()->value() >= 0) {
+ ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
+ if (FLAG_trace_osr) {
+ PrintF("[OSR - entry at AST id %d, offset %d in optimized code]\n",
+ ast_id.ToInt(), data->OsrPcOffset()->value());
}
- } else {
- succeeded = false;
+ // TODO(titzer): this is a massive hack to make the deopt counts
+ // match. Fix heuristics for reenabling optimizations!
+ function->shared()->increment_deopt_count();
+ return *result;
}
}
- // Revert to the original stack checks in the original unoptimized code.
if (FLAG_trace_osr) {
- PrintF("[restoring original stack checks in ");
+ PrintF("[OSR - optimization failed for ");
function->PrintName();
PrintF("]\n");
}
- Handle<Code> check_code;
- if (FLAG_count_based_interrupts) {
- InterruptStub interrupt_stub;
- check_code = interrupt_stub.GetCode();
- } else // NOLINT
- { // NOLINT
- StackCheckStub check_stub;
- check_code = check_stub.GetCode();
- }
- Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement();
- Deoptimizer::RevertStackCheckCode(*unoptimized,
- *check_code,
- *replacement_code);
-
- // Allow OSR only at nesting level zero again.
- unoptimized->set_allow_osr_at_loop_nesting_level(0);
-
- // If the optimization attempt succeeded, return the AST id tagged as a
- // smi. This tells the builtin that we need to translate the unoptimized
- // frame to an optimized one.
- if (succeeded) {
- ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
- return Smi::FromInt(ast_id.ToInt());
- } else {
- if (function->IsMarkedForLazyRecompilation()) {
- function->ReplaceCode(function->shared()->code());
- }
- return Smi::FromInt(-1);
+
+ if (function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForConcurrentRecompilation()) {
+ function->ReplaceCode(function->shared()->code());
}
+ return NULL;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAllocationTimeout) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 2);
+#ifdef DEBUG
+ CONVERT_SMI_ARG_CHECKED(interval, 0);
+ CONVERT_SMI_ARG_CHECKED(timeout, 1);
+ isolate->heap()->set_allocation_timeout(timeout);
+ FLAG_gc_interval = interval;
+#endif
+ return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckIsBootstrapping) {
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetRootNaN) {
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
return isolate->heap()->nan_value();
}
@@ -8274,14 +8795,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) {
MaybeObject* maybe = args[1 + i];
Object* object;
if (!maybe->To<Object>(&object)) return maybe;
- argv[i] = Handle<Object>(object);
+ argv[i] = Handle<Object>(object, isolate);
}
bool threw;
Handle<JSReceiver> hfun(fun);
- Handle<Object> hreceiver(receiver);
- Handle<Object> result =
- Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
+ Handle<Object> hreceiver(receiver, isolate);
+ Handle<Object> result = Execution::Call(
+ isolate, hfun, hreceiver, argc, argv, &threw, true);
if (threw) return Failure::Exception();
return *result;
@@ -8296,8 +8817,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, arguments, 2);
CONVERT_SMI_ARG_CHECKED(offset, 3);
CONVERT_SMI_ARG_CHECKED(argc, 4);
- ASSERT(offset >= 0);
- ASSERT(argc >= 0);
+ RUNTIME_ASSERT(offset >= 0);
+ RUNTIME_ASSERT(argc >= 0);
// If there are too many arguments, allocate argv via malloc.
const int argv_small_size = 10;
@@ -8311,12 +8832,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
}
for (int i = 0; i < argc; ++i) {
- argv[i] = Object::GetElement(arguments, offset + i);
+ argv[i] = Object::GetElement(isolate, arguments, offset + i);
}
bool threw;
- Handle<Object> result =
- Execution::Call(fun, receiver, argc, argv, &threw, true);
+ Handle<Object> result = Execution::Call(
+ isolate, fun, receiver, argc, argv, &threw, true);
if (threw) return Failure::Exception();
return *result;
@@ -8327,7 +8848,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionDelegate) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(!args[0]->IsJSFunction());
- return *Execution::GetFunctionDelegate(args.at<Object>(0));
+ return *Execution::GetFunctionDelegate(isolate, args.at<Object>(0));
}
@@ -8335,12 +8856,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(!args[0]->IsJSFunction());
- return *Execution::GetConstructorDelegate(args.at<Object>(0));
+ return *Execution::GetConstructorDelegate(isolate, args.at<Object>(0));
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -8360,7 +8881,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -8377,14 +8898,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
- JSObject* extension_object;
- if (args[0]->IsJSObject()) {
- extension_object = JSObject::cast(args[0]);
+ JSReceiver* extension_object;
+ if (args[0]->IsJSReceiver()) {
+ extension_object = JSReceiver::cast(args[0]);
} else {
// Convert the object to a proper JavaScript object.
- MaybeObject* maybe_js_object = args[0]->ToObject();
+ MaybeObject* maybe_js_object = args[0]->ToObject(isolate);
if (!maybe_js_object->To(&extension_object)) {
if (Failure::cast(maybe_js_object)->IsInternalError()) {
HandleScope scope(isolate);
@@ -8421,7 +8942,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
String* name = String::cast(args[0]);
Object* thrown_object = args[1];
@@ -8447,7 +8968,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
ScopeInfo* scope_info = ScopeInfo::cast(args[0]);
JSFunction* function;
@@ -8471,6 +8992,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
return isolate->heap()->ToBoolean(obj->IsJSModule());
@@ -8478,20 +9000,90 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSModule, instance, 0);
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_SMI_ARG_CHECKED(index, 0);
+
+ if (!args[1]->IsScopeInfo()) {
+ // Module already initialized. Find hosting context and retrieve context.
+ Context* host = Context::cast(isolate->context())->global_context();
+ Context* context = Context::cast(host->get(index));
+ ASSERT(context->previous() == isolate->context());
+ isolate->set_context(context);
+ return context;
+ }
- Context* context = Context::cast(instance->context());
+ CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
+
+ // Allocate module context.
+ HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
+ Handle<Context> context = factory->NewModuleContext(scope_info);
+ Handle<JSModule> module = factory->NewJSModule(context, scope_info);
+ context->set_module(*module);
Context* previous = isolate->context();
- ASSERT(context->IsModuleContext());
- // Initialize the context links.
context->set_previous(previous);
context->set_closure(previous->closure());
context->set_global_object(previous->global_object());
- isolate->set_context(context);
+ isolate->set_context(*context);
- return context;
+ // Find hosting scope and initialize internal variable holding module there.
+ previous->global_context()->set(index, *context);
+
+ return *context;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, descriptions, 0);
+ Context* host_context = isolate->context();
+
+ for (int i = 0; i < descriptions->length(); ++i) {
+ Handle<ModuleInfo> description(ModuleInfo::cast(descriptions->get(i)));
+ int host_index = description->host_index();
+ Handle<Context> context(Context::cast(host_context->get(host_index)));
+ Handle<JSModule> module(context->module());
+
+ for (int j = 0; j < description->length(); ++j) {
+ Handle<String> name(description->name(j));
+ VariableMode mode = description->mode(j);
+ int index = description->index(j);
+ switch (mode) {
+ case VAR:
+ case LET:
+ case CONST:
+ case CONST_HARMONY: {
+ PropertyAttributes attr =
+ IsImmutableVariableMode(mode) ? FROZEN : SEALED;
+ Handle<AccessorInfo> info =
+ Accessors::MakeModuleExport(name, index, attr);
+ Handle<Object> result = JSObject::SetAccessor(module, info);
+ ASSERT(!(result.is_null() || result->IsUndefined()));
+ USE(result);
+ break;
+ }
+ case MODULE: {
+ Object* referenced_context = Context::cast(host_context)->get(index);
+ Handle<JSModule> value(Context::cast(referenced_context)->module());
+ JSReceiver::SetProperty(module, name, value, FROZEN, kStrictMode);
+ break;
+ }
+ case INTERNAL:
+ case TEMPORARY:
+ case DYNAMIC:
+ case DYNAMIC_GLOBAL:
+ case DYNAMIC_LOCAL:
+ UNREACHABLE();
+ }
+ }
+
+ JSObject::PreventExtensions(module);
+ }
+
+ ASSERT(!isolate->has_pending_exception());
+ return isolate->heap()->undefined_value();
}
@@ -8526,7 +9118,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
// the global object, or the subject of a with. Try to delete it
// (respecting DONT_DELETE).
Handle<JSObject> object = Handle<JSObject>::cast(holder);
- return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
+ Handle<Object> result = JSReceiver::DeleteProperty(object, name);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -8544,6 +9138,7 @@ struct ObjectPair {
MaybeObject* y;
};
+
static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) {
ObjectPair result = {x, y};
// Pointers x and y returned in rax and rdx, in AMD-x64-abi.
@@ -8610,6 +9205,9 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
&index,
&attributes,
&binding_flags);
+ if (isolate->has_pending_exception()) {
+ return MakePair(Failure::Exception(), NULL);
+ }
// If the index is non-negative, the slot has been found in a context.
if (index >= 0) {
@@ -8650,12 +9248,15 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
// object, subject of a with, or a global object. We read the named
// property from it.
if (!holder.is_null()) {
- Handle<JSObject> object = Handle<JSObject>::cast(holder);
- ASSERT(object->HasProperty(*name));
+ Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
+ ASSERT(object->IsJSProxy() || JSReceiver::HasProperty(object, name));
// GetProperty below can cause GC.
- Handle<Object> receiver_handle(object->IsGlobalObject()
- ? GlobalObject::cast(*object)->global_receiver()
- : ComputeReceiverForNonGlobal(isolate, *object));
+ Handle<Object> receiver_handle(
+ object->IsGlobalObject()
+ ? GlobalObject::cast(*object)->global_receiver()
+ : object->IsJSProxy() ? static_cast<Object*>(*object)
+ : ComputeReceiverForNonGlobal(isolate, JSObject::cast(*object)),
+ isolate);
// No need to unhole the value here. This is taken care of by the
// GetProperty function.
@@ -8707,6 +9308,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
&index,
&attributes,
&binding_flags);
+ if (isolate->has_pending_exception()) return Failure::Exception();
if (index >= 0) {
// The property was found in a context slot.
@@ -8735,11 +9337,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
// Slow case: The property is not in a context slot. It is either in a
// context extension object, a property of the subject of a with, or a
// property of the global object.
- Handle<JSObject> object;
+ Handle<JSReceiver> object;
if (!holder.is_null()) {
// The property exists on the holder.
- object = Handle<JSObject>::cast(holder);
+ object = Handle<JSReceiver>::cast(holder);
} else {
// The property was not found.
ASSERT(attributes == ABSENT);
@@ -8753,7 +9355,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
}
// In non-strict mode, the property is added to the global object.
attributes = NONE;
- object = Handle<JSObject>(isolate->context()->global_object());
+ object = Handle<JSReceiver>(isolate->context()->global_object());
}
// Set the property if it's not read only or doesn't yet exist.
@@ -8790,6 +9392,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ReThrow) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PromoteScheduledException) {
+ SealHandleScope shs(isolate);
ASSERT_EQ(0, args.length());
return isolate->PromoteScheduledException();
}
@@ -8817,11 +9420,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowNotDateError) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
// First check if this is a real stack overflow.
if (isolate->stack_guard()->IsStackOverflow()) {
- NoHandleAllocation na;
return isolate->StackOverflow();
}
@@ -8829,23 +9432,41 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TryInstallRecompiledCode) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+ // First check if this is a real stack overflow.
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ SealHandleScope shs(isolate);
+ return isolate->StackOverflow();
+ }
+
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ return (function->IsOptimized()) ? function->code()
+ : function->shared()->code();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return Execution::HandleStackGuardInterrupt(isolate);
}
-static int StackSize() {
+static int StackSize(Isolate* isolate) {
int n = 0;
- for (JavaScriptFrameIterator it; !it.done(); it.Advance()) n++;
+ for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) n++;
return n;
}
-static void PrintTransition(Object* result) {
+static void PrintTransition(Isolate* isolate, Object* result) {
// indentation
{ const int nmax = 80;
- int n = StackSize();
+ int n = StackSize(isolate);
if (n <= nmax)
PrintF("%4d:%*s", n, n, "");
else
@@ -8853,7 +9474,7 @@ static void PrintTransition(Object* result) {
}
if (result == NULL) {
- JavaScriptFrame::PrintTop(stdout, true, false);
+ JavaScriptFrame::PrintTop(isolate, stdout, true, false);
PrintF(" {\n");
} else {
// function result
@@ -8865,22 +9486,22 @@ static void PrintTransition(Object* result) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
- NoHandleAllocation ha;
- PrintTransition(NULL);
+ PrintTransition(isolate, NULL);
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceExit) {
- NoHandleAllocation ha;
- PrintTransition(args[0]);
+ SealHandleScope shs(isolate);
+ PrintTransition(isolate, args[0]);
return args[0]; // return TOS
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
#ifdef DEBUG
@@ -8911,15 +9532,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
- NoHandleAllocation ha;
- isolate->PrintStack();
+ isolate->PrintStack(stdout);
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCurrentTime) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
// According to ECMA-262, section 15.9.1, page 117, the precision of
@@ -8940,19 +9561,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
CONVERT_ARG_HANDLE_CHECKED(JSArray, output, 1);
- MaybeObject* maybe_result_array =
- output->EnsureCanContainHeapObjectElements();
- if (maybe_result_array->IsFailure()) return maybe_result_array;
+ JSObject::EnsureCanContainHeapObjectElements(output);
RUNTIME_ASSERT(output->HasFastObjectElements());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_gc;
FixedArray* output_array = FixedArray::cast(output->elements());
RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
bool result;
String::FlatContent str_content = str->GetFlatContent();
if (str_content.IsAscii()) {
- result = DateParser::Parse(str_content.ToAsciiVector(),
+ result = DateParser::Parse(str_content.ToOneByteVector(),
output_array,
isolate->unicode_cache());
} else {
@@ -8971,7 +9590,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -8982,7 +9601,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -8993,6 +9612,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* global = args[0];
if (!global->IsJSGlobalObject()) return isolate->heap()->null_value();
@@ -9005,14 +9625,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
ASSERT_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- Zone* zone = isolate->runtime_zone();
- source = Handle<String>(source->TryFlattenGetString());
+ source = Handle<String>(FlattenGetString(source));
// Optimized fast case where we only have ASCII characters.
Handle<Object> result;
- if (source->IsSeqAsciiString()) {
- result = JsonParser<true>::Parse(source, zone);
+ if (source->IsSeqOneByteString()) {
+ result = JsonParser<true>::Parse(source);
} else {
- result = JsonParser<false>::Parse(source, zone);
+ result = JsonParser<false>::Parse(source);
}
if (result.is_null()) {
// Syntax error or stack overflow in scanner.
@@ -9034,7 +9653,7 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate,
return false;
} else {
// Callback set. Let it decide if code generation is allowed.
- VMState state(isolate, EXTERNAL);
+ VMState<EXTERNAL> state(isolate);
return callback(v8::Utils::ToLocal(context));
}
}
@@ -9042,8 +9661,9 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
HandleScope scope(isolate);
- ASSERT_EQ(1, args.length());
+ ASSERT_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(function_literal_only, 1);
// Extract native context.
Handle<Context> context(isolate->context()->native_context());
@@ -9059,9 +9679,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
}
// Compile source string in the native context.
+ ParseRestriction restriction = function_literal_only
+ ? ONLY_SINGLE_FUNCTION_LITERAL : NO_PARSE_RESTRICTION;
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
- source, context, true, CLASSIC_MODE, RelocInfo::kNoPosition);
- if (shared.is_null()) return Failure::Exception();
+ source, context, true, CLASSIC_MODE, restriction, RelocInfo::kNoPosition);
+ RETURN_IF_EMPTY_HANDLE(isolate, shared);
Handle<JSFunction> fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
context,
@@ -9093,11 +9715,13 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
// and return the compiled function bound in the local context.
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
source,
- Handle<Context>(isolate->context()),
+ context,
context->IsNativeContext(),
language_mode,
+ NO_PARSE_RESTRICTION,
scope_position);
- if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, shared,
+ MakePair(Failure::Exception(), NULL));
Handle<JSFunction> compiled =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared, context, NOT_TENURED);
@@ -9106,9 +9730,9 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 5);
- HandleScope scope(isolate);
Handle<Object> callee = args.at<Object>(0);
// If "eval" didn't refer to the original GlobalEval, it's not a
@@ -9131,40 +9755,25 @@ RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNewFunctionAttributes) {
- // This utility adjusts the property attributes for newly created Function
- // object ("new Function(...)") by changing the map.
- // All it does is changing the prototype property to enumerable
- // as specified in ECMA262, 15.3.5.2.
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
-
- Handle<Map> map = func->shared()->is_classic_mode()
- ? isolate->function_instance_map()
- : isolate->strict_mode_function_instance_map();
-
- ASSERT(func->map()->instance_type() == map->instance_type());
- ASSERT(func->map()->instance_size() == map->instance_size());
- func->set_map(*map);
- return *func;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
- // Allocate a block of memory in NewSpace (filled with a filler).
- // Use as fallback for allocation in generated code when NewSpace
+static MaybeObject* Allocate(Isolate* isolate,
+ int size,
+ AllocationSpace space) {
+ // Allocate a block of memory in the given space (filled with a filler).
+ // Use as fallback for allocation in generated code when the space
// is full.
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
- int size = size_smi->value();
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(IsAligned(size, kPointerSize));
RUNTIME_ASSERT(size > 0);
Heap* heap = isolate->heap();
- const int kMinFreeNewSpaceAfterGC = heap->InitialSemiSpaceSize() * 3/4;
- RUNTIME_ASSERT(size <= kMinFreeNewSpaceAfterGC);
+ RUNTIME_ASSERT(size <= heap->MaxRegularSpaceAllocationSize());
Object* allocation;
- { MaybeObject* maybe_allocation = heap->new_space()->AllocateRaw(size);
+ { MaybeObject* maybe_allocation;
+ if (space == NEW_SPACE) {
+ maybe_allocation = heap->new_space()->AllocateRaw(size);
+ } else {
+ ASSERT(space == OLD_POINTER_SPACE || space == OLD_DATA_SPACE);
+ maybe_allocation = heap->paged_space(space)->AllocateRaw(size);
+ }
if (maybe_allocation->ToObject(&allocation)) {
heap->CreateFillerObjectAt(HeapObject::cast(allocation)->address(), size);
}
@@ -9173,13 +9782,38 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
+ return Allocate(isolate, size_smi->value(), NEW_SPACE);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldPointerSpace) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
+ return Allocate(isolate, size_smi->value(), OLD_POINTER_SPACE);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldDataSpace) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
+ return Allocate(isolate, size_smi->value(), OLD_DATA_SPACE);
+}
+
+
// Push an object unto an array of objects if it is not already in the
// array. Returns true if the element was pushed on the stack and
// false otherwise.
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSArray, array, 0);
- CONVERT_ARG_CHECKED(JSObject, element, 1);
+ CONVERT_ARG_CHECKED(JSReceiver, element, 1);
RUNTIME_ASSERT(array->HasFastSmiOrObjectElements());
int length = Smi::cast(array->length())->value();
FixedArray* elements = FixedArray::cast(array->elements());
@@ -9216,14 +9850,18 @@ class ArrayConcatVisitor {
storage_(Handle<FixedArray>::cast(
isolate->global_handles()->Create(*storage))),
index_offset_(0u),
- fast_elements_(fast_elements) { }
+ fast_elements_(fast_elements),
+ exceeds_array_limit_(false) { }
~ArrayConcatVisitor() {
clear_storage();
}
void visit(uint32_t i, Handle<Object> elm) {
- if (i >= JSObject::kMaxElementCount - index_offset_) return;
+ if (i > JSObject::kMaxElementCount - index_offset_) {
+ exceeds_array_limit_ = true;
+ return;
+ }
uint32_t index = index_offset_ + i;
if (fast_elements_) {
@@ -9248,7 +9886,7 @@ class ArrayConcatVisitor {
clear_storage();
set_storage(*result);
}
-}
+ }
void increase_index_offset(uint32_t delta) {
if (JSObject::kMaxElementCount - index_offset_ < delta) {
@@ -9258,6 +9896,10 @@ class ArrayConcatVisitor {
}
}
+ bool exceeds_array_limit() {
+ return exceeds_array_limit_;
+ }
+
Handle<JSArray> ToArray() {
Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
Handle<Object> length =
@@ -9286,8 +9928,8 @@ class ArrayConcatVisitor {
current_storage->length()));
uint32_t current_length = static_cast<uint32_t>(current_storage->length());
for (uint32_t i = 0; i < current_length; i++) {
- HandleScope loop_scope;
- Handle<Object> element(current_storage->get(i));
+ HandleScope loop_scope(isolate_);
+ Handle<Object> element(current_storage->get(i), isolate_);
if (!element->IsTheHole()) {
Handle<SeededNumberDictionary> new_storage =
isolate_->factory()->DictionaryAtNumberPut(slow_storage, i, element);
@@ -9316,7 +9958,8 @@ class ArrayConcatVisitor {
// Index after last seen index. Always less than or equal to
// JSObject::kMaxElementCount.
uint32_t index_offset_;
- bool fast_elements_;
+ bool fast_elements_ : 1;
+ bool exceeds_array_limit_ : 1;
};
@@ -9339,16 +9982,28 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
break;
}
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- // TODO(1810): Decide if it's worthwhile to implement this.
- UNREACHABLE();
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ // Fast elements can't have lengths that are not representable by
+ // a 32-bit signed integer.
+ ASSERT(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
+ int fast_length = static_cast<int>(length);
+ if (array->elements()->IsFixedArray()) {
+ ASSERT(FixedArray::cast(array->elements())->length() == 0);
+ break;
+ }
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(array->elements()));
+ for (int i = 0; i < fast_length; i++) {
+ if (!elements->is_the_hole(i)) element_count++;
+ }
break;
+ }
case DICTIONARY_ELEMENTS: {
Handle<SeededNumberDictionary> dictionary(
SeededNumberDictionary::cast(array->elements()));
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
- Handle<Object> key(dictionary->KeyAt(i));
+ Handle<Object> key(dictionary->KeyAt(i), array->GetIsolate());
if (dictionary->IsKey(*key)) {
element_count++;
}
@@ -9389,16 +10044,17 @@ static void IterateExternalArrayElements(Isolate* isolate,
if (elements_are_ints) {
if (elements_are_guaranteed_smis) {
for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope;
- Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get_scalar(j))));
+ HandleScope loop_scope(isolate);
+ Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get_scalar(j))),
+ isolate);
visitor->visit(j, e);
}
} else {
for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope;
+ HandleScope loop_scope(isolate);
int64_t val = static_cast<int64_t>(array->get_scalar(j));
if (Smi::IsValid(static_cast<intptr_t>(val))) {
- Handle<Smi> e(Smi::FromInt(static_cast<int>(val)));
+ Handle<Smi> e(Smi::FromInt(static_cast<int>(val)), isolate);
visitor->visit(j, e);
} else {
Handle<Object> e =
@@ -9428,6 +10084,7 @@ static int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
static void CollectElementIndices(Handle<JSObject> object,
uint32_t range,
List<uint32_t>* indices) {
+ Isolate* isolate = object->GetIsolate();
ElementsKind kind = object->GetElementsKind();
switch (kind) {
case FAST_SMI_ELEMENTS:
@@ -9455,8 +10112,8 @@ static void CollectElementIndices(Handle<JSObject> object,
SeededNumberDictionary::cast(object->elements()));
uint32_t capacity = dict->Capacity();
for (uint32_t j = 0; j < capacity; j++) {
- HandleScope loop_scope;
- Handle<Object> k(dict->KeyAt(j));
+ HandleScope loop_scope(isolate);
+ Handle<Object> k(dict->KeyAt(j), isolate);
if (dict->IsKey(*k)) {
ASSERT(k->IsNumber());
uint32_t index = static_cast<uint32_t>(k->Number());
@@ -9535,7 +10192,7 @@ static void CollectElementIndices(Handle<JSObject> object,
}
}
- Handle<Object> prototype(object->GetPrototype());
+ Handle<Object> prototype(object->GetPrototype(), isolate);
if (prototype->IsJSObject()) {
// The prototype will usually have no inherited element indices,
// but we have to check.
@@ -9573,10 +10230,10 @@ static bool IterateElements(Isolate* isolate,
Handle<Object> element_value(elements->get(j), isolate);
if (!element_value->IsTheHole()) {
visitor->visit(j, element_value);
- } else if (receiver->HasElement(j)) {
+ } else if (JSReceiver::HasElement(receiver, j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
- element_value = Object::GetElement(receiver, j);
+ element_value = Object::GetElement(isolate, receiver, j);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
visitor->visit(j, element_value);
}
@@ -9585,8 +10242,28 @@ static bool IterateElements(Isolate* isolate,
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
- // TODO(1810): Decide if it's worthwhile to implement this.
- UNREACHABLE();
+ // Run through the elements FixedArray and use HasElement and GetElement
+ // to check the prototype for missing elements.
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(receiver->elements()));
+ int fast_length = static_cast<int>(length);
+ ASSERT(fast_length <= elements->length());
+ for (int j = 0; j < fast_length; j++) {
+ HandleScope loop_scope(isolate);
+ if (!elements->is_the_hole(j)) {
+ double double_value = elements->get_scalar(j);
+ Handle<Object> element_value =
+ isolate->factory()->NewNumber(double_value);
+ visitor->visit(j, element_value);
+ } else if (JSReceiver::HasElement(receiver, j)) {
+ // Call GetElement on receiver, not its prototype, or getters won't
+ // have the correct receiver.
+ Handle<Object> element_value =
+ Object::GetElement(isolate, receiver, j);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
+ visitor->visit(j, element_value);
+ }
+ }
break;
}
case DICTIONARY_ELEMENTS: {
@@ -9599,9 +10276,9 @@ static bool IterateElements(Isolate* isolate,
int j = 0;
int n = indices.length();
while (j < n) {
- HandleScope loop_scope;
+ HandleScope loop_scope(isolate);
uint32_t index = indices[j];
- Handle<Object> element = Object::GetElement(receiver, index);
+ Handle<Object> element = Object::GetElement(isolate, receiver, index);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element, false);
visitor->visit(index, element);
// Skip to next different index (i.e., omit duplicates).
@@ -9615,7 +10292,7 @@ static bool IterateElements(Isolate* isolate,
Handle<ExternalPixelArray> pixels(ExternalPixelArray::cast(
receiver->elements()));
for (uint32_t j = 0; j < length; j++) {
- Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)));
+ Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate);
visitor->visit(j, e);
}
break;
@@ -9676,8 +10353,8 @@ static bool IterateElements(Isolate* isolate,
* following the ECMAScript 5 specification.
*/
RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
- ASSERT(args.length() == 1);
HandleScope handle_scope(isolate);
+ ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 0);
int argument_count = static_cast<int>(arguments->length()->Number());
@@ -9689,48 +10366,51 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
// that mutate other arguments (but will otherwise be precise).
// The number of elements is precise if there are no inherited elements.
+ ElementsKind kind = FAST_SMI_ELEMENTS;
+
uint32_t estimate_result_length = 0;
uint32_t estimate_nof_elements = 0;
- {
- for (int i = 0; i < argument_count; i++) {
- HandleScope loop_scope;
- Handle<Object> obj(elements->get(i));
- uint32_t length_estimate;
- uint32_t element_estimate;
- if (obj->IsJSArray()) {
- Handle<JSArray> array(Handle<JSArray>::cast(obj));
- // TODO(1810): Find out if it's worthwhile to properly support
- // arbitrary ElementsKinds. For now, pessimistically transition to
- // FAST_*_ELEMENTS.
- if (array->HasFastDoubleElements()) {
- ElementsKind to_kind = FAST_ELEMENTS;
- if (array->HasFastHoleyElements()) {
- to_kind = FAST_HOLEY_ELEMENTS;
- }
- array = Handle<JSArray>::cast(
- JSObject::TransitionElementsKind(array, to_kind));
+ for (int i = 0; i < argument_count; i++) {
+ HandleScope loop_scope(isolate);
+ Handle<Object> obj(elements->get(i), isolate);
+ uint32_t length_estimate;
+ uint32_t element_estimate;
+ if (obj->IsJSArray()) {
+ Handle<JSArray> array(Handle<JSArray>::cast(obj));
+ length_estimate = static_cast<uint32_t>(array->length()->Number());
+ if (length_estimate != 0) {
+ ElementsKind array_kind =
+ GetPackedElementsKind(array->map()->elements_kind());
+ if (IsMoreGeneralElementsKindTransition(kind, array_kind)) {
+ kind = array_kind;
}
- length_estimate =
- static_cast<uint32_t>(array->length()->Number());
- element_estimate =
- EstimateElementCount(array);
- } else {
- length_estimate = 1;
- element_estimate = 1;
- }
- // Avoid overflows by capping at kMaxElementCount.
- if (JSObject::kMaxElementCount - estimate_result_length <
- length_estimate) {
- estimate_result_length = JSObject::kMaxElementCount;
- } else {
- estimate_result_length += length_estimate;
}
- if (JSObject::kMaxElementCount - estimate_nof_elements <
- element_estimate) {
- estimate_nof_elements = JSObject::kMaxElementCount;
- } else {
- estimate_nof_elements += element_estimate;
+ element_estimate = EstimateElementCount(array);
+ } else {
+ if (obj->IsHeapObject()) {
+ if (obj->IsNumber()) {
+ if (IsMoreGeneralElementsKindTransition(kind, FAST_DOUBLE_ELEMENTS)) {
+ kind = FAST_DOUBLE_ELEMENTS;
+ }
+ } else if (IsMoreGeneralElementsKindTransition(kind, FAST_ELEMENTS)) {
+ kind = FAST_ELEMENTS;
+ }
}
+ length_estimate = 1;
+ element_estimate = 1;
+ }
+ // Avoid overflows by capping at kMaxElementCount.
+ if (JSObject::kMaxElementCount - estimate_result_length <
+ length_estimate) {
+ estimate_result_length = JSObject::kMaxElementCount;
+ } else {
+ estimate_result_length += length_estimate;
+ }
+ if (JSObject::kMaxElementCount - estimate_nof_elements <
+ element_estimate) {
+ estimate_nof_elements = JSObject::kMaxElementCount;
+ } else {
+ estimate_nof_elements += element_estimate;
}
}
@@ -9741,8 +10421,76 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
Handle<FixedArray> storage;
if (fast_case) {
- // The backing storage array must have non-existing elements to
- // preserve holes across concat operations.
+ if (kind == FAST_DOUBLE_ELEMENTS) {
+ Handle<FixedDoubleArray> double_storage =
+ isolate->factory()->NewFixedDoubleArray(estimate_result_length);
+ int j = 0;
+ bool failure = false;
+ for (int i = 0; i < argument_count; i++) {
+ Handle<Object> obj(elements->get(i), isolate);
+ if (obj->IsSmi()) {
+ double_storage->set(j, Smi::cast(*obj)->value());
+ j++;
+ } else if (obj->IsNumber()) {
+ double_storage->set(j, obj->Number());
+ j++;
+ } else {
+ JSArray* array = JSArray::cast(*obj);
+ uint32_t length = static_cast<uint32_t>(array->length()->Number());
+ switch (array->map()->elements_kind()) {
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ // Empty fixed array indicates that there are no elements.
+ if (array->elements()->IsFixedArray()) break;
+ FixedDoubleArray* elements =
+ FixedDoubleArray::cast(array->elements());
+ for (uint32_t i = 0; i < length; i++) {
+ if (elements->is_the_hole(i)) {
+ failure = true;
+ break;
+ }
+ double double_value = elements->get_scalar(i);
+ double_storage->set(j, double_value);
+ j++;
+ }
+ break;
+ }
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ELEMENTS: {
+ FixedArray* elements(
+ FixedArray::cast(array->elements()));
+ for (uint32_t i = 0; i < length; i++) {
+ Object* element = elements->get(i);
+ if (element->IsTheHole()) {
+ failure = true;
+ break;
+ }
+ int32_t int_value = Smi::cast(element)->value();
+ double_storage->set(j, int_value);
+ j++;
+ }
+ break;
+ }
+ case FAST_HOLEY_ELEMENTS:
+ ASSERT_EQ(0, length);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ if (failure) break;
+ }
+ Handle<JSArray> array = isolate->factory()->NewJSArray(0);
+ Smi* length = Smi::FromInt(j);
+ Handle<Map> map;
+ map = isolate->factory()->GetElementsTransitionMap(array, kind);
+ array->set_map(*map);
+ array->set_length(length);
+ array->set_elements(*double_storage);
+ return *array;
+ }
+ // The backing storage array must have non-existing elements to preserve
+ // holes across concat operations.
storage = isolate->factory()->NewFixedArrayWithHoles(
estimate_result_length);
} else {
@@ -9756,7 +10504,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
ArrayConcatVisitor visitor(isolate, storage, fast_case);
for (int i = 0; i < argument_count; i++) {
- Handle<Object> obj(elements->get(i));
+ Handle<Object> obj(elements->get(i), isolate);
if (obj->IsJSArray()) {
Handle<JSArray> array = Handle<JSArray>::cast(obj);
if (!IterateElements(isolate, array, &visitor)) {
@@ -9768,6 +10516,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
}
}
+ if (visitor.exceeds_array_limit()) {
+ return isolate->Throw(
+ *isolate->factory()->NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
+ }
return *visitor.ToArray();
}
@@ -9775,33 +10528,37 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
// This will not allocate (flatten the string), but it may run
// very slowly for very deeply nested ConsStrings. For debugging use only.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, string, 0);
- StringInputBuffer buffer(string);
- while (buffer.has_more()) {
- uint16_t character = buffer.GetNext();
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(string, &op);
+ while (stream.HasMore()) {
+ uint16_t character = stream.GetNext();
PrintF("%c", character);
}
return string;
}
+
// Moves all own elements of an object, that are below a limit, to positions
// starting at zero. All undefined values are placed after non-undefined values,
// and are followed by non-existing element. Does not change the length
// property.
// Returns the number of non-undefined elements collected.
RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
- return object->PrepareElementsForSort(limit);
+ return *JSObject::PrepareElementsForSort(object, limit);
}
// Move contents of argument 0 (an array) to argument 1 (an array)
RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSArray, from, 0);
CONVERT_ARG_CHECKED(JSArray, to, 1);
@@ -9827,6 +10584,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
// How many elements does this object/array have?
RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, object, 0);
HeapObject* elements = object->elements();
@@ -9842,66 +10600,68 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) {
// Returns an array that tells you where in the [0, length) interval an array
-// might have elements. Can either return keys (positive integers) or
-// intervals (pair of a negative integer (-start-1) followed by a
-// positive (length)) or undefined values.
+// might have elements. Can either return an array of keys (positive integers
+// or undefined) or a number representing the positive length of an interval
+// starting at index 0.
// Intervals can span over some keys that are not in the object.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
- ASSERT(args.length() == 2);
HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
if (array->elements()->IsDictionary()) {
- // Create an array and get all the keys into it, then remove all the
- // keys that are not integers in the range 0 to length-1.
- bool threw = false;
- Handle<FixedArray> keys =
- GetKeysInFixedArrayFor(array, INCLUDE_PROTOS, &threw);
- if (threw) return Failure::Exception();
-
- int keys_length = keys->length();
- for (int i = 0; i < keys_length; i++) {
- Object* key = keys->get(i);
- uint32_t index = 0;
- if (!key->ToArrayIndex(&index) || index >= length) {
- // Zap invalid keys.
- keys->set_undefined(i);
+ Handle<FixedArray> keys = isolate->factory()->empty_fixed_array();
+ for (Handle<Object> p = array;
+ !p->IsNull();
+ p = Handle<Object>(p->GetPrototype(isolate), isolate)) {
+ if (p->IsJSProxy() || JSObject::cast(*p)->HasIndexedInterceptor()) {
+ // Bail out if we find a proxy or interceptor, likely not worth
+ // collecting keys in that case.
+ return *isolate->factory()->NewNumberFromUint(length);
}
+ Handle<JSObject> current = Handle<JSObject>::cast(p);
+ Handle<FixedArray> current_keys =
+ isolate->factory()->NewFixedArray(
+ current->NumberOfLocalElements(NONE));
+ current->GetLocalElementKeys(*current_keys, NONE);
+ keys = UnionOfKeys(keys, current_keys);
+ }
+ // Erase any keys >= length.
+ // TODO(adamk): Remove this step when the contract of %GetArrayKeys
+ // is changed to let this happen on the JS side.
+ for (int i = 0; i < keys->length(); i++) {
+ if (NumberToUint32(keys->get(i)) >= length) keys->set_undefined(i);
}
return *isolate->factory()->NewJSArrayWithElements(keys);
} else {
ASSERT(array->HasFastSmiOrObjectElements() ||
array->HasFastDoubleElements());
- Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
- // -1 means start of array.
- single_interval->set(0, Smi::FromInt(-1));
- FixedArrayBase* elements = FixedArrayBase::cast(array->elements());
- uint32_t actual_length =
- static_cast<uint32_t>(elements->length());
- uint32_t min_length = actual_length < length ? actual_length : length;
- Handle<Object> length_object =
- isolate->factory()->NewNumber(static_cast<double>(min_length));
- single_interval->set(1, *length_object);
- return *isolate->factory()->NewJSArrayWithElements(single_interval);
+ uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
+ return *isolate->factory()->NewNumberFromUint(Min(actual_length, length));
}
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_CHECKED(String, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
CONVERT_SMI_ARG_CHECKED(flag, 2);
AccessorComponent component = flag == 0 ? ACCESSOR_GETTER : ACCESSOR_SETTER;
if (!receiver->IsJSObject()) return isolate->heap()->undefined_value();
- return JSObject::cast(receiver)->LookupAccessor(name, component);
+ Handle<Object> result =
+ JSObject::GetAccessor(Handle<JSObject>::cast(receiver), name, component);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugBreak) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
- return Execution::DebugBreakHelper();
+ return Execution::DebugBreakHelper(isolate);
}
@@ -9922,6 +10682,7 @@ static StackFrame::Id UnwrapFrameId(int wrapped) {
// clearing the event listener function
// args[1]: object supplied during callback
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDebugEventListener) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
RUNTIME_ASSERT(args[0]->IsJSFunction() ||
args[0]->IsUndefined() ||
@@ -9935,6 +10696,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDebugEventListener) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Break) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
isolate->stack_guard()->DebugBreak();
return isolate->heap()->undefined_value();
@@ -9943,7 +10705,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Break) {
static MaybeObject* DebugLookupResultValue(Heap* heap,
Object* receiver,
- String* name,
+ Name* name,
LookupResult* result,
bool* caught_exception) {
Object* value;
@@ -9954,32 +10716,37 @@ static MaybeObject* DebugLookupResultValue(Heap* heap,
return heap->undefined_value();
}
return value;
- case FIELD:
- value =
- JSObject::cast(
- result->holder())->FastPropertyAt(result->GetFieldIndex());
+ case FIELD: {
+ Object* value;
+ MaybeObject* maybe_value =
+ JSObject::cast(result->holder())->FastPropertyAt(
+ result->representation(),
+ result->GetFieldIndex().field_index());
+ if (!maybe_value->To(&value)) return maybe_value;
if (value->IsTheHole()) {
return heap->undefined_value();
}
return value;
- case CONSTANT_FUNCTION:
- return result->GetConstantFunction();
+ }
+ case CONSTANT:
+ return result->GetConstant();
case CALLBACKS: {
Object* structure = result->GetCallbackObject();
if (structure->IsForeign() || structure->IsAccessorInfo()) {
- MaybeObject* maybe_value = result->holder()->GetPropertyWithCallback(
- receiver, structure, name);
- if (!maybe_value->ToObject(&value)) {
- if (maybe_value->IsRetryAfterGC()) return maybe_value;
- ASSERT(maybe_value->IsException());
- maybe_value = heap->isolate()->pending_exception();
+ Isolate* isolate = heap->isolate();
+ HandleScope scope(isolate);
+ Handle<Object> value = JSObject::GetPropertyWithCallback(
+ handle(result->holder(), isolate),
+ handle(receiver, isolate),
+ handle(structure, isolate),
+ handle(name, isolate));
+ if (value.is_null()) {
+ MaybeObject* exception = heap->isolate()->pending_exception();
heap->isolate()->clear_pending_exception();
- if (caught_exception != NULL) {
- *caught_exception = true;
- }
- return maybe_value;
+ if (caught_exception != NULL) *caught_exception = true;
+ return exception;
}
- return value;
+ return *value;
} else {
return heap->undefined_value();
}
@@ -10015,7 +10782,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
// Make sure to set the current context to the context before the debugger was
// entered (if the debugger is entered). The reason for switching context here
@@ -10048,7 +10815,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
}
}
details->set(0, element_or_char);
- details->set(1, PropertyDetails(NONE, NORMAL).AsSmi());
+ details->set(
+ 1, PropertyDetails(NONE, NORMAL, Representation::None()).AsSmi());
return *isolate->factory()->NewJSArrayWithElements(details);
}
@@ -10113,7 +10881,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) {
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
LookupResult result(isolate);
obj->Lookup(*name, &result);
@@ -10127,6 +10895,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) {
// Return the property type calculated from the property details.
// args[0]: smi with property details.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
return Smi::FromInt(static_cast<int>(details.type()));
@@ -10136,6 +10905,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) {
// Return the property attribute calculated from the property details.
// args[0]: smi with property details.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
return Smi::FromInt(static_cast<int>(details.attributes()));
@@ -10145,6 +10915,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
// Return the property insertion index calculated from the property details.
// args[0]: smi with property details.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
// TODO(verwaest): Depends on the type of details.
@@ -10160,10 +10931,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) {
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
RUNTIME_ASSERT(obj->HasNamedInterceptor());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
PropertyAttributes attributes;
- return obj->GetPropertyWithInterceptor(*obj, *name, &attributes);
+ Handle<Object> result =
+ JSObject::GetPropertyWithInterceptor(obj, obj, name, &attributes);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -10182,13 +10956,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckExecutionState) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() >= 1);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
// Check that the break id is valid.
if (isolate->debug()->break_id() == 0 ||
break_id != isolate->debug()->break_id()) {
return isolate->Throw(
- isolate->heap()->illegal_execution_state_symbol());
+ isolate->heap()->illegal_execution_state_string());
}
return isolate->heap()->true_value();
@@ -10392,7 +11167,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
- ASSERT(*scope_info != ScopeInfo::Empty());
+ ASSERT(*scope_info != ScopeInfo::Empty(isolate));
// Get the locals names and values into a temporary array.
//
@@ -10561,7 +11336,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
Handle<Object> receiver(it.frame()->receiver(), isolate);
if (!receiver->IsJSObject() &&
shared->is_classic_mode() &&
- !shared->native()) {
+ !function->IsBuiltin()) {
// If the receiver is not a JSObject and the function is not a
// builtin or strict-mode we have hit an optimization where a
// value object is not converted into a wrapped JS objects. To
@@ -10571,6 +11346,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
it.Advance();
Handle<Context> calling_frames_native_context(
Context::cast(Context::cast(it.frame()->context())->native_context()));
+ ASSERT(!receiver->IsUndefined() && !receiver->IsNull());
receiver =
isolate->factory()->ToObject(receiver, calling_frames_native_context);
}
@@ -10581,57 +11357,28 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
}
-// Copy all the context locals into an object used to materialize a scope.
-static bool CopyContextLocalsToScopeObject(
- Isolate* isolate,
- Handle<ScopeInfo> scope_info,
- Handle<Context> context,
- Handle<JSObject> scope_object) {
- // Fill all context locals to the context extension.
- for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
- VariableMode mode;
- InitializationFlag init_flag;
- int context_index = scope_info->ContextSlotIndex(
- scope_info->ContextLocalName(i), &mode, &init_flag);
-
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(scope_object,
- Handle<String>(scope_info->ContextLocalName(i)),
- Handle<Object>(context->get(context_index), isolate),
- NONE,
- kNonStrictMode),
- false);
- }
-
- return true;
-}
-
-
// Create a plain JSObject which materializes the local scope for the specified
// frame.
-static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
+static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
Isolate* isolate,
- JavaScriptFrame* frame,
+ Handle<JSObject> target,
+ Handle<JSFunction> function,
FrameInspector* frame_inspector) {
- Handle<JSFunction> function(JSFunction::cast(frame_inspector->GetFunction()));
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
- // Allocate and initialize a JSObject with all the arguments, stack locals
- // heap locals and extension properties of the debugged function.
- Handle<JSObject> local_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
-
// First fill all parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
- Handle<Object> value(
- i < frame_inspector->GetParametersCount() ?
- frame_inspector->GetParameter(i) : isolate->heap()->undefined_value());
+ Handle<Object> value(i < frame_inspector->GetParametersCount()
+ ? frame_inspector->GetParameter(i)
+ : isolate->heap()->undefined_value(),
+ isolate);
+ ASSERT(!value->IsTheHole());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(local_scope,
+ SetProperty(isolate,
+ target,
Handle<String>(scope_info->ParameterName(i)),
value,
NONE,
@@ -10641,54 +11388,106 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
// Second fill all stack locals.
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+ Handle<Object> value(frame_inspector->GetExpression(i), isolate);
+ if (value->IsTheHole()) continue;
+
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(local_scope,
+ SetProperty(isolate,
+ target,
Handle<String>(scope_info->StackLocalName(i)),
- Handle<Object>(frame_inspector->GetExpression(i)),
+ value,
NONE,
kNonStrictMode),
Handle<JSObject>());
}
- if (scope_info->HasContext()) {
- // Third fill all context locals.
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->declaration_context());
- if (!CopyContextLocalsToScopeObject(
- isolate, scope_info, function_context, local_scope)) {
- return Handle<JSObject>();
- }
+ return target;
+}
- // Finally copy any properties from the function context extension.
- // These will be variables introduced by eval.
- if (function_context->closure() == *function) {
- if (function_context->has_extension() &&
- !function_context->IsNativeContext()) {
- Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- bool threw = false;
- Handle<FixedArray> keys =
- GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
- if (threw) return Handle<JSObject>();
-
- for (int i = 0; i < keys->length(); i++) {
- // Names of variables introduced by eval are strings.
- ASSERT(keys->get(i)->IsString());
- Handle<String> key(String::cast(keys->get(i)));
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(local_scope,
- key,
- GetProperty(ext, key),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
- }
+
+static void UpdateStackLocalsFromMaterializedObject(Isolate* isolate,
+ Handle<JSObject> target,
+ Handle<JSFunction> function,
+ JavaScriptFrame* frame,
+ int inlined_jsframe_index) {
+ if (inlined_jsframe_index != 0 || frame->is_optimized()) {
+ // Optimized frames are not supported.
+ // TODO(yangguo): make sure all code deoptimized when debugger is active
+ // and assert that this cannot happen.
+ return;
+ }
+
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+
+ // Parameters.
+ for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+ ASSERT(!frame->GetParameter(i)->IsTheHole());
+ HandleScope scope(isolate);
+ Handle<Object> value = GetProperty(
+ isolate, target, Handle<String>(scope_info->ParameterName(i)));
+ frame->SetParameterValue(i, *value);
+ }
+
+ // Stack locals.
+ for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+ if (frame->GetExpression(i)->IsTheHole()) continue;
+ HandleScope scope(isolate);
+ Handle<Object> value = GetProperty(
+ isolate, target, Handle<String>(scope_info->StackLocalName(i)));
+ frame->SetExpression(i, *value);
+ }
+}
+
+
+static Handle<JSObject> MaterializeLocalContext(Isolate* isolate,
+ Handle<JSObject> target,
+ Handle<JSFunction> function,
+ JavaScriptFrame* frame) {
+ HandleScope scope(isolate);
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+
+ if (!scope_info->HasContext()) return target;
+
+ // Third fill all context locals.
+ Handle<Context> frame_context(Context::cast(frame->context()));
+ Handle<Context> function_context(frame_context->declaration_context());
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(
+ scope_info, function_context, target)) {
+ return Handle<JSObject>();
+ }
+
+ // Finally copy any properties from the function context extension.
+ // These will be variables introduced by eval.
+ if (function_context->closure() == *function) {
+ if (function_context->has_extension() &&
+ !function_context->IsNativeContext()) {
+ Handle<JSObject> ext(JSObject::cast(function_context->extension()));
+ bool threw = false;
+ Handle<FixedArray> keys =
+ GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
+ if (threw) return Handle<JSObject>();
+
+ for (int i = 0; i < keys->length(); i++) {
+ // Names of variables introduced by eval are strings.
+ ASSERT(keys->get(i)->IsString());
+ Handle<String> key(String::cast(keys->get(i)));
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
+ SetProperty(isolate,
+ target,
+ key,
+ GetProperty(isolate, ext, key),
+ NONE,
+ kNonStrictMode),
+ Handle<JSObject>());
}
}
}
- return local_scope;
+ return target;
}
@@ -10697,9 +11496,15 @@ static Handle<JSObject> MaterializeLocalScope(
JavaScriptFrame* frame,
int inlined_jsframe_index) {
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- return MaterializeLocalScopeWithFrameInspector(isolate,
- frame,
- &frame_inspector);
+ Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
+
+ Handle<JSObject> local_scope =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ local_scope = MaterializeStackLocalsWithFrameInspector(
+ isolate, local_scope, function, &frame_inspector);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, local_scope, Handle<JSObject>());
+
+ return MaterializeLocalContext(isolate, local_scope, function, frame);
}
@@ -10735,15 +11540,18 @@ static bool SetLocalVariableValue(Isolate* isolate,
return false;
}
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<JSFunction> function(frame->function());
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
+ bool default_result = false;
+
// Parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
if (scope_info->ParameterName(i)->Equals(*variable_name)) {
frame->SetParameterValue(i, *new_value);
- return true;
+ // Argument might be shadowed in heap context, don't stop here.
+ default_result = true;
}
}
@@ -10770,17 +11578,22 @@ static bool SetLocalVariableValue(Isolate* isolate,
!function_context->IsNativeContext()) {
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- if (ext->HasProperty(*variable_name)) {
+ if (JSReceiver::HasProperty(ext, variable_name)) {
// We don't expect this to do anything except replacing
// property value.
- SetProperty(ext, variable_name, new_value, NONE, kNonStrictMode);
+ SetProperty(isolate,
+ ext,
+ variable_name,
+ new_value,
+ NONE,
+ kNonStrictMode);
return true;
}
}
}
}
- return false;
+ return default_result;
}
@@ -10799,8 +11612,8 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals to the context extension.
- if (!CopyContextLocalsToScopeObject(
- isolate, scope_info, context, closure_scope)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(
+ scope_info, context, closure_scope)) {
return Handle<JSObject>();
}
@@ -10819,9 +11632,10 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
Handle<String> key(String::cast(keys->get(i)));
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(closure_scope,
+ SetProperty(isolate,
+ closure_scope,
key,
- GetProperty(ext, key),
+ GetProperty(isolate, ext, key),
NONE,
kNonStrictMode),
Handle<JSObject>());
@@ -10852,9 +11666,14 @@ static bool SetClosureVariableValue(Isolate* isolate,
// be variables introduced by eval.
if (context->has_extension()) {
Handle<JSObject> ext(JSObject::cast(context->extension()));
- if (ext->HasProperty(*variable_name)) {
+ if (JSReceiver::HasProperty(ext, variable_name)) {
// We don't expect this to do anything except replacing property value.
- SetProperty(ext, variable_name, new_value, NONE, kNonStrictMode);
+ SetProperty(isolate,
+ ext,
+ variable_name,
+ new_value,
+ NONE,
+ kNonStrictMode);
return true;
}
}
@@ -10869,12 +11688,18 @@ static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
Handle<Context> context) {
ASSERT(context->IsCatchContext());
Handle<String> name(String::cast(context->extension()));
- Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX));
+ Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX),
+ isolate);
Handle<JSObject> catch_scope =
isolate->factory()->NewJSObject(isolate->object_function());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(catch_scope, name, thrown_object, NONE, kNonStrictMode),
+ SetProperty(isolate,
+ catch_scope,
+ name,
+ thrown_object,
+ NONE,
+ kNonStrictMode),
Handle<JSObject>());
return catch_scope;
}
@@ -10908,8 +11733,8 @@ static Handle<JSObject> MaterializeBlockScope(
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals.
- if (!CopyContextLocalsToScopeObject(
- isolate, scope_info, context, block_scope)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(
+ scope_info, context, block_scope)) {
return Handle<JSObject>();
}
@@ -10931,8 +11756,8 @@ static Handle<JSObject> MaterializeModuleScope(
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals.
- if (!CopyContextLocalsToScopeObject(
- isolate, scope_info, context, module_scope)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(
+ scope_info, context, module_scope)) {
return Handle<JSObject>();
}
@@ -10962,7 +11787,7 @@ class ScopeIterator {
: isolate_(isolate),
frame_(frame),
inlined_jsframe_index_(inlined_jsframe_index),
- function_(JSFunction::cast(frame->function())),
+ function_(frame->function()),
context_(Context::cast(frame->context())),
nested_scope_chain_(4),
failed_(false) {
@@ -10987,7 +11812,9 @@ class ScopeIterator {
// Find the break point where execution has stopped.
BreakLocationIterator break_location_iterator(debug_info,
ALL_BREAK_LOCATIONS);
- break_location_iterator.FindBreakLocationFromAddress(frame->pc());
+ // pc points to the instruction after the current one, possibly a break
+ // location as well. So the "- 1" to exclude it from the search.
+ break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
if (break_location_iterator.IsExit()) {
// We are within the return sequence. At the momemt it is not possible to
// get a source position which is consistent with the current scope chain.
@@ -11000,7 +11827,9 @@ class ScopeIterator {
context_ = Handle<Context>(context_->previous(), isolate_);
}
}
- if (scope_info->Type() != EVAL_SCOPE) nested_scope_chain_.Add(scope_info);
+ if (scope_info->scope_type() != EVAL_SCOPE) {
+ nested_scope_chain_.Add(scope_info);
+ }
} else {
// Reparse the code and analyze the scopes.
Handle<Script> script(Script::cast(shared_info->script()));
@@ -11008,24 +11837,24 @@ class ScopeIterator {
// Check whether we are in global, eval or function code.
Handle<ScopeInfo> scope_info(shared_info->scope_info());
- if (scope_info->Type() != FUNCTION_SCOPE) {
+ if (scope_info->scope_type() != FUNCTION_SCOPE) {
// Global or eval code.
CompilationInfoWithZone info(script);
- if (scope_info->Type() == GLOBAL_SCOPE) {
+ if (scope_info->scope_type() == GLOBAL_SCOPE) {
info.MarkAsGlobal();
} else {
- ASSERT(scope_info->Type() == EVAL_SCOPE);
+ ASSERT(scope_info->scope_type() == EVAL_SCOPE);
info.MarkAsEval();
info.SetContext(Handle<Context>(function_->context()));
}
- if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
+ if (Parser::Parse(&info) && Scope::Analyze(&info)) {
scope = info.function()->scope();
}
RetrieveScopeChain(scope, shared_info);
} else {
// Function code
CompilationInfoWithZone info(shared_info);
- if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
+ if (Parser::Parse(&info) && Scope::Analyze(&info)) {
scope = info.function()->scope();
}
RetrieveScopeChain(scope, shared_info);
@@ -11080,7 +11909,7 @@ class ScopeIterator {
ASSERT(!failed_);
if (!nested_scope_chain_.is_empty()) {
Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
- switch (scope_info->Type()) {
+ switch (scope_info->scope_type()) {
case FUNCTION_SCOPE:
ASSERT(context_->IsFunctionContext() ||
!scope_info->HasContext());
@@ -11221,7 +12050,7 @@ class ScopeIterator {
if (!CurrentContext().is_null()) {
CurrentContext()->Print();
if (CurrentContext()->has_extension()) {
- Handle<Object> extension(CurrentContext()->extension());
+ Handle<Object> extension(CurrentContext()->extension(), isolate_);
if (extension->IsJSContextExtensionObject()) {
extension->Print();
}
@@ -11245,7 +12074,7 @@ class ScopeIterator {
PrintF("Closure:\n");
CurrentContext()->Print();
if (CurrentContext()->has_extension()) {
- Handle<Object> extension(CurrentContext()->extension());
+ Handle<Object> extension(CurrentContext()->extension(), isolate_);
if (extension->IsJSContextExtensionObject()) {
extension->Print();
}
@@ -11318,6 +12147,86 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeCount) {
}
+// Returns the list of step-in positions (text offset) in a function of the
+// stack frame in a range from the current debug break position to the end
+// of the corresponding statement.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+
+ // Check arguments.
+ Object* check;
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
+ if (!maybe_check->ToObject(&check)) return maybe_check;
+ }
+ CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
+
+ // Get the frame where the debugging is performed.
+ StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ JavaScriptFrameIterator frame_it(isolate, id);
+ RUNTIME_ASSERT(!frame_it.done());
+
+ JavaScriptFrame* frame = frame_it.frame();
+
+ Handle<JSFunction> fun =
+ Handle<JSFunction>(frame->function());
+ Handle<SharedFunctionInfo> shared =
+ Handle<SharedFunctionInfo>(fun->shared());
+
+ if (!isolate->debug()->EnsureDebugInfo(shared, fun)) {
+ return isolate->heap()->undefined_value();
+ }
+
+ Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared);
+
+ int len = 0;
+ Handle<JSArray> array(isolate->factory()->NewJSArray(10));
+ // Find the break point where execution has stopped.
+ BreakLocationIterator break_location_iterator(debug_info,
+ ALL_BREAK_LOCATIONS);
+
+ break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
+ int current_statement_pos = break_location_iterator.statement_position();
+
+ while (!break_location_iterator.Done()) {
+ bool accept;
+ if (break_location_iterator.pc() > frame->pc()) {
+ accept = true;
+ } else {
+ StackFrame::Id break_frame_id = isolate->debug()->break_frame_id();
+ // The break point is near our pc. Could be a step-in possibility,
+ // that is currently taken by active debugger call.
+ if (break_frame_id == StackFrame::NO_ID) {
+ // We are not stepping.
+ accept = false;
+ } else {
+ JavaScriptFrameIterator additional_frame_it(isolate, break_frame_id);
+ // If our frame is a top frame and we are stepping, we can do step-in
+ // at this place.
+ accept = additional_frame_it.frame()->id() == id;
+ }
+ }
+ if (accept) {
+ if (break_location_iterator.IsStepInLocation(isolate)) {
+ Smi* position_value = Smi::FromInt(break_location_iterator.position());
+ JSObject::SetElement(array, len,
+ Handle<Object>(position_value, isolate),
+ NONE, kNonStrictMode);
+ len++;
+ }
+ }
+ // Advance iterator.
+ break_location_iterator.Next();
+ if (current_statement_pos !=
+ break_location_iterator.statement_position()) {
+ break;
+ }
+ }
+ return *array;
+}
+
+
static const int kScopeDetailsTypeIndex = 0;
static const int kScopeDetailsObjectIndex = 1;
static const int kScopeDetailsSize = 2;
@@ -11338,6 +12247,7 @@ static MaybeObject* MaterializeScopeDetails(Isolate* isolate,
return *isolate->factory()->NewJSArrayWithElements(details);
}
+
// Return an array with scope details
// args[0]: number: break id
// args[1]: number: frame index
@@ -11482,7 +12392,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrintScopes) {
#ifdef DEBUG
// Print the scopes for the top frame.
- StackFrameLocator locator;
+ StackFrameLocator locator(isolate);
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
for (ScopeIterator it(isolate, frame, 0);
!it.Done();
@@ -11589,14 +12499,28 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDisableBreak) {
}
+static bool IsPositionAlignmentCodeCorrect(int alignment) {
+ return alignment == STATEMENT_ALIGNED || alignment == BREAK_POSITION_ALIGNED;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetBreakLocations) {
HandleScope scope(isolate);
- ASSERT(args.length() == 1);
+ ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[1]);
+
+ if (!IsPositionAlignmentCodeCorrect(statement_aligned_code)) {
+ return isolate->ThrowIllegalOperation();
+ }
+ BreakPositionAlignment alignment =
+ static_cast<BreakPositionAlignment>(statement_aligned_code);
+
Handle<SharedFunctionInfo> shared(fun->shared());
// Find the number of break points
- Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
+ Handle<Object> break_locations =
+ Debug::GetSourceBreakLocations(shared, alignment);
if (break_locations->IsUndefined()) return isolate->heap()->undefined_value();
// Return array as JS array
return *isolate->factory()->NewJSArrayWithElements(
@@ -11629,14 +12553,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
// GetScriptFromScriptData.
// args[0]: script to set break point in
// args[1]: number: break source position (within the script source)
-// args[2]: number: break point object
+// args[2]: number, breakpoint position alignment
+// args[3]: number: break point object
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
HandleScope scope(isolate);
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
RUNTIME_ASSERT(source_position >= 0);
- Handle<Object> break_point_object_arg = args.at<Object>(2);
+ CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[2]);
+ Handle<Object> break_point_object_arg = args.at<Object>(3);
+
+ if (!IsPositionAlignmentCodeCorrect(statement_aligned_code)) {
+ return isolate->ThrowIllegalOperation();
+ }
+ BreakPositionAlignment alignment =
+ static_cast<BreakPositionAlignment>(statement_aligned_code);
// Get the script from the script wrapper.
RUNTIME_ASSERT(wrapper->value()->IsScript());
@@ -11644,7 +12576,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
// Set break point.
if (!isolate->debug()->SetBreakPointForScript(script, break_point_object_arg,
- &source_position)) {
+ &source_position,
+ alignment)) {
return isolate->heap()->undefined_value();
}
@@ -11706,7 +12639,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsBreakOnException) {
// of frames to step down.
RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
HandleScope scope(isolate);
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 4);
// Check arguments.
Object* check;
{ MaybeObject* maybe_check = Runtime_CheckExecutionState(
@@ -11714,7 +12647,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
if (!maybe_check->ToObject(&check)) return maybe_check;
}
if (!args[1]->IsNumber() || !args[2]->IsNumber()) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
+ }
+
+ CONVERT_NUMBER_CHECKED(int, wrapped_frame_id, Int32, args[3]);
+
+ StackFrame::Id frame_id;
+ if (wrapped_frame_id == 0) {
+ frame_id = StackFrame::NO_ID;
+ } else {
+ frame_id = UnwrapFrameId(wrapped_frame_id);
}
// Get the step action and check validity.
@@ -11724,13 +12666,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
step_action != StepOut &&
step_action != StepInMin &&
step_action != StepMin) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
+ }
+
+ if (frame_id != StackFrame::NO_ID && step_action != StepNext &&
+ step_action != StepMin && step_action != StepOut) {
+ return isolate->ThrowIllegalOperation();
}
// Get the number of steps.
int step_count = NumberToInt32(args[2]);
if (step_count < 1) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
// Clear all current stepping setup.
@@ -11738,7 +12685,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
// Prepare step.
isolate->debug()->PrepareStep(static_cast<StepAction>(step_action),
- step_count);
+ step_count,
+ frame_id);
return isolate->heap()->undefined_value();
}
@@ -11752,129 +12700,80 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearStepping) {
}
-// Creates a copy of the with context chain. The copy of the context chain is
-// is linked to the function context supplied.
-static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
- Handle<JSFunction> function,
- Handle<Context> base,
- JavaScriptFrame* frame,
- int inlined_jsframe_index) {
- HandleScope scope(isolate);
- List<Handle<ScopeInfo> > scope_chain;
- List<Handle<Context> > context_chain;
-
- ScopeIterator it(isolate, frame, inlined_jsframe_index);
- if (it.Failed()) return Handle<Context>::null();
-
- for (; it.Type() != ScopeIterator::ScopeTypeGlobal &&
- it.Type() != ScopeIterator::ScopeTypeLocal ; it.Next()) {
- ASSERT(!it.Done());
- scope_chain.Add(it.CurrentScopeInfo());
- context_chain.Add(it.CurrentContext());
- }
-
- // At the end of the chain. Return the base context to link to.
- Handle<Context> context = base;
-
- // Iteratively copy and or materialize the nested contexts.
- while (!scope_chain.is_empty()) {
- Handle<ScopeInfo> scope_info = scope_chain.RemoveLast();
- Handle<Context> current = context_chain.RemoveLast();
- ASSERT(!(scope_info->HasContext() & current.is_null()));
-
- if (scope_info->Type() == CATCH_SCOPE) {
- Handle<String> name(String::cast(current->extension()));
- Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
- context =
- isolate->factory()->NewCatchContext(function,
- context,
- name,
- thrown_object);
- } else if (scope_info->Type() == BLOCK_SCOPE) {
- // Materialize the contents of the block scope into a JSObject.
- Handle<JSObject> block_scope_object =
- MaterializeBlockScope(isolate, current);
- CHECK(!block_scope_object.is_null());
- // Allocate a new function context for the debug evaluation and set the
- // extension object.
- Handle<Context> new_context =
- isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
- function);
- new_context->set_extension(*block_scope_object);
- new_context->set_previous(*context);
- context = new_context;
- } else {
- ASSERT(scope_info->Type() == WITH_SCOPE);
- ASSERT(current->IsWithContext());
- Handle<JSObject> extension(JSObject::cast(current->extension()));
- context =
- isolate->factory()->NewWithContext(function, context, extension);
- }
- }
-
- return scope.CloseAndEscape(context);
-}
-
-
// Helper function to find or create the arguments object for
// Runtime_DebugEvaluate.
-static Handle<Object> GetArgumentsObject(Isolate* isolate,
- JavaScriptFrame* frame,
- FrameInspector* frame_inspector,
- Handle<ScopeInfo> scope_info,
- Handle<Context> function_context) {
- // Try to find the value of 'arguments' to pass as parameter. If it is not
- // found (that is the debugged function does not reference 'arguments' and
- // does not support eval) then create an 'arguments' object.
- int index;
- if (scope_info->StackLocalCount() > 0) {
- index = scope_info->StackSlotIndex(isolate->heap()->arguments_symbol());
- if (index != -1) {
- return Handle<Object>(frame->GetExpression(index), isolate);
- }
+static Handle<JSObject> MaterializeArgumentsObject(
+ Isolate* isolate,
+ Handle<JSObject> target,
+ Handle<JSFunction> function) {
+ // Do not materialize the arguments object for eval or top-level code.
+ // Skip if "arguments" is already taken.
+ if (!function->shared()->is_function() ||
+ JSReceiver::HasLocalProperty(target,
+ isolate->factory()->arguments_string())) {
+ return target;
+ }
+
+ // FunctionGetArguments can't throw an exception.
+ Handle<JSObject> arguments = Handle<JSObject>::cast(
+ Accessors::FunctionGetArguments(function));
+ SetProperty(isolate,
+ target,
+ isolate->factory()->arguments_string(),
+ arguments,
+ ::NONE,
+ kNonStrictMode);
+ return target;
+}
+
+
+// Compile and evaluate source for the given context.
+static MaybeObject* DebugEvaluate(Isolate* isolate,
+ Handle<Context> context,
+ Handle<Object> context_extension,
+ Handle<Object> receiver,
+ Handle<String> source) {
+ if (context_extension->IsJSObject()) {
+ Handle<JSObject> extension = Handle<JSObject>::cast(context_extension);
+ Handle<JSFunction> closure(context->closure(), isolate);
+ context = isolate->factory()->NewWithContext(closure, context, extension);
}
- if (scope_info->HasHeapAllocatedLocals()) {
- VariableMode mode;
- InitializationFlag init_flag;
- index = scope_info->ContextSlotIndex(
- isolate->heap()->arguments_symbol(), &mode, &init_flag);
- if (index != -1) {
- return Handle<Object>(function_context->get(index), isolate);
- }
- }
+ Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
+ source,
+ context,
+ context->IsNativeContext(),
+ CLASSIC_MODE,
+ NO_PARSE_RESTRICTION,
+ RelocInfo::kNoPosition);
+ RETURN_IF_EMPTY_HANDLE(isolate, shared);
- Handle<JSFunction> function(JSFunction::cast(frame_inspector->GetFunction()));
- int length = frame_inspector->GetParametersCount();
- Handle<JSObject> arguments =
- isolate->factory()->NewArgumentsObject(function, length);
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
+ Handle<JSFunction> eval_fun =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, context, NOT_TENURED);
+ bool pending_exception;
+ Handle<Object> result = Execution::Call(
+ isolate, eval_fun, receiver, 0, NULL, &pending_exception);
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < length; i++) {
- array->set(i, frame_inspector->GetParameter(i), mode);
- }
- arguments->set_elements(*array);
- return arguments;
-}
+ if (pending_exception) return Failure::Exception();
+ // Skip the global proxy as it has no properties and always delegates to the
+ // real global object.
+ if (result->IsJSGlobalProxy()) {
+ result = Handle<JSObject>(JSObject::cast(result->GetPrototype(isolate)));
+ }
-static const char kSourceStr[] =
- "(function(arguments,__source__){return eval(__source__);})";
+ // Clear the oneshot breakpoints so that the debugger does not step further.
+ isolate->debug()->ClearStepping();
+ return *result;
+}
// Evaluate a piece of JavaScript in the context of a stack frame for
-// debugging. This is accomplished by creating a new context which in its
-// extension part has all the parameters and locals of the function on the
-// stack frame. A function which calls eval with the code to evaluate is then
-// compiled in this context and called in this context. As this context
-// replaces the context of the function on the stack frame a new (empty)
-// function is created as well to be used as the closure for the context.
-// This function and the context acts as replacements for the function on the
-// stack frame presenting the same view of the values of parameters and
-// local variables as if the piece of JavaScript was evaluated at the point
-// where the function on the stack frame is currently stopped.
+// debugging. Things that need special attention are:
+// - Parameters and stack-allocated locals need to be materialized. Altered
+// values need to be written back to the stack afterwards.
+// - The arguments object needs to materialized.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
HandleScope scope(isolate);
@@ -11882,20 +12781,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
// evaluated.
ASSERT(args.length() == 6);
Object* check_result;
- { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(
+ { MaybeObject* maybe_result = Runtime_CheckExecutionState(
RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check_result->ToObject(&check_result)) {
- return maybe_check_result;
- }
+ if (!maybe_result->ToObject(&check_result)) return maybe_result;
}
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
CONVERT_ARG_HANDLE_CHECKED(String, source, 3);
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 4);
- Handle<Object> additional_context(args[5]);
+ Handle<Object> context_extension(args[5], isolate);
// Handle the processing of break.
- DisableBreak disable_break_save(disable_break);
+ DisableBreak disable_break_save(isolate, disable_break);
// Get the frame where the debugging is performed.
StackFrame::Id id = UnwrapFrameId(wrapped_id);
@@ -11903,7 +12800,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
JavaScriptFrame* frame = it.frame();
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
- Handle<ScopeInfo> scope_info(function->shared()->scope_info());
// Traverse the saved contexts chain to find the active context for the
// selected frame.
@@ -11912,126 +12808,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
SaveContext savex(isolate);
isolate->set_context(*(save->context()));
- // Create the (empty) function replacing the function on the stack frame for
- // the purpose of evaluating in the context created below. It is important
- // that this function does not describe any parameters and local variables
- // in the context. If it does then this will cause problems with the lookup
- // in Context::Lookup, where context slots for parameters and local variables
- // are looked at before the extension object.
- Handle<JSFunction> go_between =
- isolate->factory()->NewFunction(isolate->factory()->empty_string(),
- isolate->factory()->undefined_value());
- go_between->set_context(function->context());
-#ifdef DEBUG
- Handle<ScopeInfo> go_between_scope_info(go_between->shared()->scope_info());
- ASSERT(go_between_scope_info->ParameterCount() == 0);
- ASSERT(go_between_scope_info->ContextLocalCount() == 0);
-#endif
-
- // Materialize the content of the local scope into a JSObject.
- Handle<JSObject> local_scope = MaterializeLocalScopeWithFrameInspector(
- isolate, frame, &frame_inspector);
- RETURN_IF_EMPTY_HANDLE(isolate, local_scope);
-
- // Allocate a new context for the debug evaluation and set the extension
- // object build.
- Handle<Context> context =
- isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
- go_between);
-
- // Use the materialized local scope in a with context.
- context =
- isolate->factory()->NewWithContext(go_between, context, local_scope);
+ // Evaluate on the context of the frame.
+ Handle<Context> context(Context::cast(frame->context()));
+ ASSERT(!context.is_null());
- // Copy any with contexts present and chain them in front of this context.
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context;
- // Get the function's context if it has one.
- if (scope_info->HasContext()) {
- function_context = Handle<Context>(frame_context->declaration_context());
- }
- context = CopyNestedScopeContextChain(isolate,
- go_between,
- context,
- frame,
- inlined_jsframe_index);
- if (context.is_null()) {
- ASSERT(isolate->has_pending_exception());
- MaybeObject* exception = isolate->pending_exception();
- isolate->clear_pending_exception();
- return exception;
- }
+ // Materialize stack locals and the arguments object.
+ Handle<JSObject> materialized =
+ isolate->factory()->NewJSObject(isolate->object_function());
- if (additional_context->IsJSObject()) {
- Handle<JSObject> extension = Handle<JSObject>::cast(additional_context);
- context =
- isolate->factory()->NewWithContext(go_between, context, extension);
- }
+ materialized = MaterializeStackLocalsWithFrameInspector(
+ isolate, materialized, function, &frame_inspector);
+ RETURN_IF_EMPTY_HANDLE(isolate, materialized);
- // Wrap the evaluation statement in a new function compiled in the newly
- // created context. The function has one parameter which has to be called
- // 'arguments'. This it to have access to what would have been 'arguments' in
- // the function being debugged.
- // function(arguments,__source__) {return eval(__source__);}
+ materialized = MaterializeArgumentsObject(isolate, materialized, function);
+ RETURN_IF_EMPTY_HANDLE(isolate, materialized);
- Handle<String> function_source =
- isolate->factory()->NewStringFromAscii(
- Vector<const char>(kSourceStr, sizeof(kSourceStr) - 1));
+ // Add the materialized object in a with-scope to shadow the stack locals.
+ context = isolate->factory()->NewWithContext(function, context, materialized);
- // Currently, the eval code will be executed in non-strict mode,
- // even in the strict code context.
- Handle<SharedFunctionInfo> shared =
- Compiler::CompileEval(function_source,
- context,
- context->IsNativeContext(),
- CLASSIC_MODE,
- RelocInfo::kNoPosition);
- if (shared.is_null()) return Failure::Exception();
- Handle<JSFunction> compiled_function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context);
-
- // Invoke the result of the compilation to get the evaluation function.
- bool has_pending_exception;
Handle<Object> receiver(frame->receiver(), isolate);
- Handle<Object> evaluation_function =
- Execution::Call(compiled_function, receiver, 0, NULL,
- &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
-
- Handle<Object> arguments = GetArgumentsObject(isolate,
- frame,
- &frame_inspector,
- scope_info,
- function_context);
-
- // Check if eval is blocked in the context and temporarily allow it
- // for debugger.
- Handle<Context> native_context = Handle<Context>(context->native_context());
- bool eval_disabled =
- native_context->allow_code_gen_from_strings()->IsFalse();
- if (eval_disabled) {
- native_context->set_allow_code_gen_from_strings(
- isolate->heap()->true_value());
- }
- // Invoke the evaluation function and return the result.
- Handle<Object> argv[] = { arguments, source };
- Handle<Object> result =
- Execution::Call(Handle<JSFunction>::cast(evaluation_function),
- receiver,
- ARRAY_SIZE(argv),
- argv,
- &has_pending_exception);
- if (eval_disabled) {
- native_context->set_allow_code_gen_from_strings(
- isolate->heap()->false_value());
+ Object* evaluate_result_object;
+ { MaybeObject* maybe_result =
+ DebugEvaluate(isolate, context, context_extension, receiver, source);
+ if (!maybe_result->ToObject(&evaluate_result_object)) return maybe_result;
}
- if (has_pending_exception) return Failure::Exception();
- // Skip the global proxy as it has no properties and always delegates to the
- // real global object.
- if (result->IsJSGlobalProxy()) {
- result = Handle<JSObject>(JSObject::cast(result->GetPrototype()));
- }
+ Handle<Object> result(evaluate_result_object, isolate);
+
+ // Write back potential changes to materialized stack locals to the stack.
+ UpdateStackLocalsFromMaterializedObject(
+ isolate, materialized, function, frame, inlined_jsframe_index);
return *result;
}
@@ -12044,18 +12850,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
// evaluated.
ASSERT(args.length() == 4);
Object* check_result;
- { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(
+ { MaybeObject* maybe_result = Runtime_CheckExecutionState(
RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check_result->ToObject(&check_result)) {
- return maybe_check_result;
- }
+ if (!maybe_result->ToObject(&check_result)) return maybe_result;
}
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2);
- Handle<Object> additional_context(args[3]);
+ Handle<Object> context_extension(args[3], isolate);
// Handle the processing of break.
- DisableBreak disable_break_save(disable_break);
+ DisableBreak disable_break_save(isolate, disable_break);
// Enter the top context from before the debugger was invoked.
SaveContext save(isolate);
@@ -12070,44 +12874,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
// Get the native context now set to the top context from before the
// debugger was invoked.
Handle<Context> context = isolate->native_context();
-
- bool is_global = true;
-
- if (additional_context->IsJSObject()) {
- // Create a new with context with the additional context information between
- // the context of the debugged function and the eval code to be executed.
- context = isolate->factory()->NewWithContext(
- Handle<JSFunction>(context->closure()),
- context,
- Handle<JSObject>::cast(additional_context));
- is_global = false;
- }
-
- // Compile the source to be evaluated.
- // Currently, the eval code will be executed in non-strict mode,
- // even in the strict code context.
- Handle<SharedFunctionInfo> shared =
- Compiler::CompileEval(source,
- context,
- is_global,
- CLASSIC_MODE,
- RelocInfo::kNoPosition);
- if (shared.is_null()) return Failure::Exception();
- Handle<JSFunction> compiled_function =
- Handle<JSFunction>(
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
- context));
-
- // Invoke the result of the compilation to get the evaluation function.
- bool has_pending_exception;
Handle<Object> receiver = isolate->global_object();
- Handle<Object> result =
- Execution::Call(compiled_function, receiver, 0, NULL,
- &has_pending_exception);
- // Clear the oneshot breakpoints so that the debugger does not step further.
- isolate->debug()->ClearStepping();
- if (has_pending_exception) return Failure::Exception();
- return *result;
+ return DebugEvaluate(isolate, context, context_extension, receiver, source);
}
@@ -12144,8 +12912,9 @@ static int DebugReferencedBy(HeapIterator* iterator,
Object* instance_filter, int max_references,
FixedArray* instances, int instances_size,
JSFunction* arguments_function) {
- NoHandleAllocation ha;
- AssertNoAllocation no_alloc;
+ Isolate* isolate = target->GetIsolate();
+ SealHandleScope shs(isolate);
+ DisallowHeapAllocation no_allocation;
// Iterate the heap.
int count = 0;
@@ -12170,7 +12939,7 @@ static int DebugReferencedBy(HeapIterator* iterator,
if (!instance_filter->IsUndefined()) {
Object* V = obj;
while (true) {
- Object* prototype = V->GetPrototype();
+ Object* prototype = V->GetPrototype(isolate);
if (prototype->IsNull()) {
break;
}
@@ -12213,6 +12982,7 @@ static int DebugReferencedBy(HeapIterator* iterator,
// args[1]: constructor function for instances to exclude (Mirror)
// args[2]: the the maximum number of objects to return
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
// First perform a full GC in order to avoid references from dead objects.
@@ -12240,29 +13010,30 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
// Get the number of referencing objects.
int count;
- HeapIterator heap_iterator;
+ Heap* heap = isolate->heap();
+ HeapIterator heap_iterator(heap);
count = DebugReferencedBy(&heap_iterator,
target, instance_filter, max_references,
NULL, 0, arguments_function);
// Allocate an array to hold the result.
Object* object;
- { MaybeObject* maybe_object = isolate->heap()->AllocateFixedArray(count);
+ { MaybeObject* maybe_object = heap->AllocateFixedArray(count);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
FixedArray* instances = FixedArray::cast(object);
// Fill the referencing objects.
// AllocateFixedArray above does not make the heap non-iterable.
- ASSERT(HEAP->IsHeapIterable());
- HeapIterator heap_iterator2;
+ ASSERT(heap->IsHeapIterable());
+ HeapIterator heap_iterator2(heap);
count = DebugReferencedBy(&heap_iterator2,
target, instance_filter, max_references,
instances, count, arguments_function);
// Return result as JS array.
Object* result;
- MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+ MaybeObject* maybe_result = heap->AllocateJSObject(
isolate->context()->native_context()->array_function());
if (!maybe_result->ToObject(&result)) return maybe_result;
return JSArray::cast(result)->SetContent(instances);
@@ -12275,7 +13046,7 @@ static int DebugConstructedBy(HeapIterator* iterator,
int max_references,
FixedArray* instances,
int instances_size) {
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation;
// Iterate the heap.
int count = 0;
@@ -12305,11 +13076,12 @@ static int DebugConstructedBy(HeapIterator* iterator,
// args[0]: the constructor to find instances of
// args[1]: the the maximum number of objects to return
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
// First perform a full GC in order to avoid dead objects.
- isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "%DebugConstructedBy");
+ Heap* heap = isolate->heap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy");
// Check parameters.
CONVERT_ARG_CHECKED(JSFunction, constructor, 0);
@@ -12318,7 +13090,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
// Get the number of referencing objects.
int count;
- HeapIterator heap_iterator;
+ HeapIterator heap_iterator(heap);
count = DebugConstructedBy(&heap_iterator,
constructor,
max_references,
@@ -12327,14 +13099,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
// Allocate an array to hold the result.
Object* object;
- { MaybeObject* maybe_object = isolate->heap()->AllocateFixedArray(count);
+ { MaybeObject* maybe_object = heap->AllocateFixedArray(count);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
FixedArray* instances = FixedArray::cast(object);
- ASSERT(HEAP->IsHeapIterable());
+ ASSERT(isolate->heap()->IsHeapIterable());
// Fill the referencing objects.
- HeapIterator heap_iterator2;
+ HeapIterator heap_iterator2(heap);
count = DebugConstructedBy(&heap_iterator2,
constructor,
max_references,
@@ -12344,7 +13116,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
// Return result as JS array.
Object* result;
{ MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
- isolate->context()->native_context()->array_function());
+ isolate->context()->native_context()->array_function());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
return JSArray::cast(result)->SetContent(instances);
@@ -12354,12 +13126,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
// Find the effective prototype object as returned by __proto__.
// args[0]: the object to find the prototype for.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
-
CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- // Use the __proto__ accessor.
- return Accessors::ObjectPrototype.getter(obj, NULL);
+ return GetPrototypeSkipHiddenPrototypes(isolate, obj);
}
@@ -12374,7 +13144,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) {
RUNTIME_ASSERT(script_wrapper->value()->IsScript());
Handle<Script> script(Script::cast(script_wrapper->value()));
- int compilation_state = Smi::cast(script->compilation_state())->value();
+ int compilation_state = script->compilation_state();
RUNTIME_ASSERT(compilation_state == Script::COMPILATION_STATE_INITIAL);
script->set_source(*source);
@@ -12383,15 +13153,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
- CPU::DebugBreak();
+ OS::DebugBreak();
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) {
-#ifdef DEBUG
HandleScope scope(isolate);
+#ifdef DEBUG
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
@@ -12405,8 +13176,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
-#ifdef DEBUG
HandleScope scope(isolate);
+#ifdef DEBUG
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
@@ -12420,7 +13191,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) {
- NoHandleAllocation ha;
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -12431,7 +13202,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) {
static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
Script* script,
FixedArray* buffer) {
- AssertNoAllocation no_allocations;
+ DisallowHeapAllocation no_allocation;
int counter = 0;
int buffer_size = buffer->length();
for (HeapObject* obj = iterator->next();
@@ -12453,14 +13224,15 @@ static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
return counter;
}
+
// For a script finds all SharedFunctionInfo's in the heap that points
// to this script. Returns JSArray of SharedFunctionInfo wrapped
// in OpaqueReferences.
RUNTIME_FUNCTION(MaybeObject*,
Runtime_LiveEditFindSharedFunctionInfosForScript) {
+ HandleScope scope(isolate);
CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 1);
- HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSValue, script_value, 0);
RUNTIME_ASSERT(script_value->value()->IsScript());
@@ -12471,19 +13243,20 @@ RUNTIME_FUNCTION(MaybeObject*,
Handle<FixedArray> array;
array = isolate->factory()->NewFixedArray(kBufferSize);
int number;
+ Heap* heap = isolate->heap();
{
- isolate->heap()->EnsureHeapIsIterable();
- AssertNoAllocation no_allocations;
- HeapIterator heap_iterator;
+ heap->EnsureHeapIsIterable();
+ DisallowHeapAllocation no_allocation;
+ HeapIterator heap_iterator(heap);
Script* scr = *script;
FixedArray* arr = *array;
number = FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
}
if (number > kBufferSize) {
array = isolate->factory()->NewFixedArray(number);
- isolate->heap()->EnsureHeapIsIterable();
- AssertNoAllocation no_allocations;
- HeapIterator heap_iterator;
+ heap->EnsureHeapIsIterable();
+ DisallowHeapAllocation no_allocation;
+ HeapIterator heap_iterator(heap);
Script* scr = *script;
FixedArray* arr = *array;
FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
@@ -12497,6 +13270,7 @@ RUNTIME_FUNCTION(MaybeObject*,
return *result;
}
+
// For a script calculates compilation information about all its functions.
// The script source is explicitly specified by the second argument.
// The source of the actual script is not used, however it is important that
@@ -12505,9 +13279,9 @@ RUNTIME_FUNCTION(MaybeObject*,
// each function with all its descendant is always stored in a continues range
// with the function itself going first. The root function is a script function.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
+ HandleScope scope(isolate);
CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
- HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSValue, script, 0);
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
@@ -12523,13 +13297,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
return result;
}
+
// Changes the source of the script to a new_source.
// If old_script_name is provided (i.e. is a String), also creates a copy of
// the script with its original source and sends notification to debugger.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) {
+ HandleScope scope(isolate);
CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 3);
- HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSValue, original_script_value, 0);
CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1);
Handle<Object> old_script_name(args[2], isolate);
@@ -12551,9 +13326,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) {
+ HandleScope scope(isolate);
CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 1);
- HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0);
return LiveEdit::FunctionSourceUpdated(shared_info);
}
@@ -12561,20 +13336,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) {
// Replaces code of SharedFunctionInfo with a new one.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) {
+ HandleScope scope(isolate);
CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
- HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 1);
return LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info);
}
+
// Connects SharedFunctionInfo to another script.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
+ HandleScope scope(isolate);
CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
- HandleScope scope(isolate);
Handle<Object> function_object(args[0], isolate);
Handle<Object> script_object(args[1], isolate);
@@ -12599,9 +13375,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
// In a code of a parent function replaces original function as embedded object
// with a substitution one.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) {
+ HandleScope scope(isolate);
CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 3);
- HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSValue, parent_wrapper, 0);
CONVERT_ARG_HANDLE_CHECKED(JSValue, orig_wrapper, 1);
@@ -12620,9 +13396,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) {
// (change_begin, change_end, change_end_new_position).
// Each group describes a change in text; groups are sorted by change_begin.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
+ HandleScope scope(isolate);
CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
- HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1);
@@ -12635,23 +13411,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
// Returns array of the same length with corresponding results of
// LiveEdit::FunctionPatchabilityStatus type.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) {
+ HandleScope scope(isolate);
CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
- HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1);
- return *LiveEdit::CheckAndDropActivations(shared_array, do_drop,
- isolate->runtime_zone());
+ return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
}
+
// Compares 2 strings line-by-line, then token-wise and returns diff in form
// of JSArray of triplets (pos1, pos1_end, pos2_end) describing list
// of diff chunks.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
+ HandleScope scope(isolate);
CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
- HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(String, s1, 0);
CONVERT_ARG_HANDLE_CHECKED(String, s2, 1);
@@ -12662,8 +13438,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
// Restarts a call frame and completely drops all frames above.
// Returns true if successful. Otherwise returns undefined or an error message.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) {
- CHECK(isolate->debugger()->live_edit_enabled());
HandleScope scope(isolate);
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
// Check arguments.
@@ -12690,10 +13466,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) {
}
if (it.done()) return heap->undefined_value();
- const char* error_message =
- LiveEdit::RestartFrame(it.frame(), isolate->runtime_zone());
+ const char* error_message = LiveEdit::RestartFrame(it.frame());
if (error_message) {
- return *(isolate->factory()->LookupAsciiSymbol(error_message));
+ return *(isolate->factory()->InternalizeUtf8String(error_message));
}
return heap->true_value();
}
@@ -12702,9 +13477,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) {
// A testing entry. Returns statement position which is the closest to
// source_position.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) {
+ HandleScope scope(isolate);
CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
- HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
@@ -12740,8 +13515,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) {
// This is used in unit tests to run code as if debugger is entered or simply
// to have a stack with C++ frame in the middle.
RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
- ASSERT(args.length() == 2);
HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_BOOLEAN_ARG_CHECKED(without_debugger, 1);
@@ -12749,11 +13524,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
bool pending_exception;
{
if (without_debugger) {
- result = Execution::Call(function, isolate->global_object(), 0, NULL,
+ result = Execution::Call(isolate,
+ function,
+ isolate->global_object(),
+ 0,
+ NULL,
&pending_exception);
} else {
- EnterDebugger enter_debugger;
- result = Execution::Call(function, isolate->global_object(), 0, NULL,
+ EnterDebugger enter_debugger(isolate);
+ result = Execution::Call(isolate,
+ function,
+ isolate->global_object(),
+ 0,
+ NULL,
&pending_exception);
}
}
@@ -12767,6 +13550,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
// Sets a v8 flag.
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(String, arg, 0);
SmartArrayPointer<char> flags =
arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -12778,6 +13562,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
// Performs a GC.
// Presently, it only does a full GC.
RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
+ SealHandleScope shs(isolate);
isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "%CollectGarbage");
return isolate->heap()->undefined_value();
}
@@ -12785,6 +13570,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
// Gets the current heap usage.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
+ SealHandleScope shs(isolate);
int usage = static_cast<int>(isolate->heap()->SizeOfObjects());
if (!Smi::IsValid(usage)) {
return *isolate->factory()->NewNumberFromInt(usage);
@@ -12792,221 +13578,642 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
return Smi::FromInt(usage);
}
+#endif // ENABLE_DEBUGGER_SUPPORT
-// Captures a live object list from the present heap.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLOLEnabled) {
-#ifdef LIVE_OBJECT_LIST
- return isolate->heap()->true_value();
-#else
- return isolate->heap()->false_value();
-#endif
-}
+#ifdef V8_I18N_SUPPORT
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CanonicalizeLanguageTag) {
+ HandleScope scope(isolate);
-// Captures a live object list from the present heap.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CaptureLOL) {
-#ifdef LIVE_OBJECT_LIST
- return LiveObjectList::Capture();
-#else
- return isolate->heap()->undefined_value();
-#endif
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, locale_id_str, 0);
+
+ v8::String::Utf8Value locale_id(v8::Utils::ToLocal(locale_id_str));
+
+ // Return value which denotes invalid language tag.
+ const char* const kInvalidTag = "invalid-tag";
+
+ UErrorCode error = U_ZERO_ERROR;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+
+ uloc_forLanguageTag(*locale_id, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &error);
+ if (U_FAILURE(error) || icu_length == 0) {
+ return isolate->heap()->AllocateStringFromOneByte(CStrVector(kInvalidTag));
+ }
+
+ char result[ULOC_FULLNAME_CAPACITY];
+
+ // Force strict BCP47 rules.
+ uloc_toLanguageTag(icu_result, result, ULOC_FULLNAME_CAPACITY, TRUE, &error);
+
+ if (U_FAILURE(error)) {
+ return isolate->heap()->AllocateStringFromOneByte(CStrVector(kInvalidTag));
+ }
+
+ return isolate->heap()->AllocateStringFromOneByte(CStrVector(result));
}
-// Deletes the specified live object list.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteLOL) {
-#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_ARG_CHECKED(id, 0);
- bool success = LiveObjectList::Delete(id);
- return isolate->heap()->ToBoolean(success);
-#else
- return isolate->heap()->undefined_value();
-#endif
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AvailableLocalesOf) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, service, 0);
+
+ const icu::Locale* available_locales = NULL;
+ int32_t count = 0;
+
+ if (service->IsUtf8EqualTo(CStrVector("collator"))) {
+ available_locales = icu::Collator::getAvailableLocales(count);
+ } else if (service->IsUtf8EqualTo(CStrVector("numberformat"))) {
+ available_locales = icu::NumberFormat::getAvailableLocales(count);
+ } else if (service->IsUtf8EqualTo(CStrVector("dateformat"))) {
+ available_locales = icu::DateFormat::getAvailableLocales(count);
+ } else if (service->IsUtf8EqualTo(CStrVector("breakiterator"))) {
+ available_locales = icu::BreakIterator::getAvailableLocales(count);
+ }
+
+ UErrorCode error = U_ZERO_ERROR;
+ char result[ULOC_FULLNAME_CAPACITY];
+ Handle<JSObject> locales =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ for (int32_t i = 0; i < count; ++i) {
+ const char* icu_name = available_locales[i].getName();
+
+ error = U_ZERO_ERROR;
+ // No need to force strict BCP47 rules.
+ uloc_toLanguageTag(icu_name, result, ULOC_FULLNAME_CAPACITY, FALSE, &error);
+ if (U_FAILURE(error)) {
+ // This shouldn't happen, but lets not break the user.
+ continue;
+ }
+
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ locales,
+ isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ isolate->factory()->NewNumber(i),
+ NONE));
+ }
+
+ return *locales;
}
-// Generates the response to a debugger request for a dump of the objects
-// contained in the difference between the captured live object lists
-// specified by id1 and id2.
-// If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be
-// dumped.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DumpLOL) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_ARG_CHECKED(id1, 0);
- CONVERT_SMI_ARG_CHECKED(id2, 1);
- CONVERT_SMI_ARG_CHECKED(start, 2);
- CONVERT_SMI_ARG_CHECKED(count, 3);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 4);
- EnterDebugger enter_debugger;
- return LiveObjectList::Dump(id1, id2, start, count, filter_obj);
-#else
- return isolate->heap()->undefined_value();
-#endif
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultICULocale) {
+ SealHandleScope shs(isolate);
+
+ ASSERT(args.length() == 0);
+
+ icu::Locale default_locale;
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ UErrorCode status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ default_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ return isolate->heap()->AllocateStringFromOneByte(CStrVector(result));
+ }
+
+ return isolate->heap()->AllocateStringFromOneByte(CStrVector("und"));
}
-// Gets the specified object as requested by the debugger.
-// This is only used for obj ids shown in live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObj) {
-#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_ARG_CHECKED(obj_id, 0);
- Object* result = LiveObjectList::GetObj(obj_id);
- return result;
-#else
- return isolate->heap()->undefined_value();
-#endif
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLanguageTagVariants) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, input, 0);
+
+ uint32_t length = static_cast<uint32_t>(input->length()->Number());
+ Handle<FixedArray> output = isolate->factory()->NewFixedArray(length);
+ Handle<Name> maximized =
+ isolate->factory()->NewStringFromAscii(CStrVector("maximized"));
+ Handle<Name> base =
+ isolate->factory()->NewStringFromAscii(CStrVector("base"));
+ for (unsigned int i = 0; i < length; ++i) {
+ MaybeObject* maybe_string = input->GetElement(isolate, i);
+ Object* locale_id;
+ if (!maybe_string->ToObject(&locale_id) || !locale_id->IsString()) {
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
+ }
+
+ v8::String::Utf8Value utf8_locale_id(
+ v8::Utils::ToLocal(Handle<String>(String::cast(locale_id))));
+
+ UErrorCode error = U_ZERO_ERROR;
+
+ // Convert from BCP47 to ICU format.
+ // de-DE-u-co-phonebk -> de_DE@collation=phonebook
+ char icu_locale[ULOC_FULLNAME_CAPACITY];
+ int icu_locale_length = 0;
+ uloc_forLanguageTag(*utf8_locale_id, icu_locale, ULOC_FULLNAME_CAPACITY,
+ &icu_locale_length, &error);
+ if (U_FAILURE(error) || icu_locale_length == 0) {
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
+ }
+
+ // Maximize the locale.
+ // de_DE@collation=phonebook -> de_Latn_DE@collation=phonebook
+ char icu_max_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_addLikelySubtags(
+ icu_locale, icu_max_locale, ULOC_FULLNAME_CAPACITY, &error);
+
+ // Remove extensions from maximized locale.
+ // de_Latn_DE@collation=phonebook -> de_Latn_DE
+ char icu_base_max_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_getBaseName(
+ icu_max_locale, icu_base_max_locale, ULOC_FULLNAME_CAPACITY, &error);
+
+ // Get original name without extensions.
+ // de_DE@collation=phonebook -> de_DE
+ char icu_base_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_getBaseName(
+ icu_locale, icu_base_locale, ULOC_FULLNAME_CAPACITY, &error);
+
+ // Convert from ICU locale format to BCP47 format.
+ // de_Latn_DE -> de-Latn-DE
+ char base_max_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_toLanguageTag(icu_base_max_locale, base_max_locale,
+ ULOC_FULLNAME_CAPACITY, FALSE, &error);
+
+ // de_DE -> de-DE
+ char base_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_toLanguageTag(
+ icu_base_locale, base_locale, ULOC_FULLNAME_CAPACITY, FALSE, &error);
+
+ if (U_FAILURE(error)) {
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
+ }
+
+ Handle<JSObject> result =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ result,
+ maximized,
+ isolate->factory()->NewStringFromAscii(CStrVector(base_max_locale)),
+ NONE));
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ result,
+ base,
+ isolate->factory()->NewStringFromAscii(CStrVector(base_locale)),
+ NONE));
+ output->set(i, *result);
+ }
+
+ Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(output);
+ result->set_length(Smi::FromInt(length));
+ return *result;
}
-// Gets the obj id for the specified address if valid.
-// This is only used for obj ids shown in live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjId) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_ARG_HANDLE_CHECKED(String, address, 0);
- Object* result = LiveObjectList::GetObjId(address);
- return result;
-#else
- return isolate->heap()->undefined_value();
-#endif
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateDateTimeFormat) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
+
+ Handle<ObjectTemplateInfo> date_format_template =
+ I18N::GetTemplate(isolate);
+
+ // Create an empty object wrapper.
+ bool has_pending_exception = false;
+ Handle<JSObject> local_object = Execution::InstantiateObject(
+ date_format_template, &has_pending_exception);
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
+ }
+
+ // Set date time formatter as internal field of the resulting JS object.
+ icu::SimpleDateFormat* date_format = DateFormat::InitializeDateTimeFormat(
+ isolate, locale, options, resolved);
+
+ if (!date_format) return isolate->ThrowIllegalOperation();
+
+ local_object->SetInternalField(0, reinterpret_cast<Smi*>(date_format));
+
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ local_object,
+ isolate->factory()->NewStringFromAscii(CStrVector("dateFormat")),
+ isolate->factory()->NewStringFromAscii(CStrVector("valid")),
+ NONE));
+
+ // Make object handle weak so we can delete the data format once GC kicks in.
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
+ NULL,
+ DateFormat::DeleteDateFormat);
+ return *local_object;
}
-// Gets the retainers that references the specified object alive.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjRetainers) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_ARG_CHECKED(obj_id, 0);
- RUNTIME_ASSERT(args[1]->IsUndefined() || args[1]->IsJSObject());
- RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsBoolean());
- RUNTIME_ASSERT(args[3]->IsUndefined() || args[3]->IsSmi());
- RUNTIME_ASSERT(args[4]->IsUndefined() || args[4]->IsSmi());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 5);
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalDateFormat) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
- Handle<JSObject> instance_filter;
- if (args[1]->IsJSObject()) {
- instance_filter = args.at<JSObject>(1);
+ bool has_pending_exception = false;
+ Handle<Object> value =
+ Execution::ToNumber(isolate, date, &has_pending_exception);
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
}
- bool verbose = false;
- if (args[2]->IsBoolean()) {
- verbose = args[2]->IsTrue();
+
+ icu::SimpleDateFormat* date_format =
+ DateFormat::UnpackDateFormat(isolate, date_format_holder);
+ if (!date_format) return isolate->ThrowIllegalOperation();
+
+ icu::UnicodeString result;
+ date_format->format(value->Number(), result);
+
+ return *isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ result.length()));
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalDateParse) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, date_string, 1);
+
+ v8::String::Utf8Value utf8_date(v8::Utils::ToLocal(date_string));
+ icu::UnicodeString u_date(icu::UnicodeString::fromUTF8(*utf8_date));
+ icu::SimpleDateFormat* date_format =
+ DateFormat::UnpackDateFormat(isolate, date_format_holder);
+ if (!date_format) return isolate->ThrowIllegalOperation();
+
+ UErrorCode status = U_ZERO_ERROR;
+ UDate date = date_format->parse(u_date, status);
+ if (U_FAILURE(status)) return isolate->heap()->undefined_value();
+
+ bool has_pending_exception = false;
+ Handle<JSDate> result = Handle<JSDate>::cast(
+ Execution::NewDate(
+ isolate, static_cast<double>(date), &has_pending_exception));
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
}
- int start = 0;
- if (args[3]->IsSmi()) {
- start = args.smi_at(3);
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateNumberFormat) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
+
+ Handle<ObjectTemplateInfo> number_format_template =
+ I18N::GetTemplate(isolate);
+
+ // Create an empty object wrapper.
+ bool has_pending_exception = false;
+ Handle<JSObject> local_object = Execution::InstantiateObject(
+ number_format_template, &has_pending_exception);
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
}
- int limit = Smi::kMaxValue;
- if (args[4]->IsSmi()) {
- limit = args.smi_at(4);
+
+ // Set number formatter as internal field of the resulting JS object.
+ icu::DecimalFormat* number_format = NumberFormat::InitializeNumberFormat(
+ isolate, locale, options, resolved);
+
+ if (!number_format) return isolate->ThrowIllegalOperation();
+
+ local_object->SetInternalField(0, reinterpret_cast<Smi*>(number_format));
+
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ local_object,
+ isolate->factory()->NewStringFromAscii(CStrVector("numberFormat")),
+ isolate->factory()->NewStringFromAscii(CStrVector("valid")),
+ NONE));
+
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
+ NULL,
+ NumberFormat::DeleteNumberFormat);
+ return *local_object;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalNumberFormat) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, number, 1);
+
+ bool has_pending_exception = false;
+ Handle<Object> value = Execution::ToNumber(
+ isolate, number, &has_pending_exception);
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
}
- return LiveObjectList::GetObjRetainers(obj_id,
- instance_filter,
- verbose,
- start,
- limit,
- filter_obj);
-#else
- return isolate->heap()->undefined_value();
-#endif
+ icu::DecimalFormat* number_format =
+ NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
+ if (!number_format) return isolate->ThrowIllegalOperation();
+
+ icu::UnicodeString result;
+ number_format->format(value->Number(), result);
+
+ return *isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ result.length()));
}
-// Gets the reference path between 2 objects.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLPath) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_ARG_CHECKED(obj_id1, 0);
- CONVERT_SMI_ARG_CHECKED(obj_id2, 1);
- RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsJSObject());
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalNumberParse) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 2);
- Handle<JSObject> instance_filter;
- if (args[2]->IsJSObject()) {
- instance_filter = args.at<JSObject>(2);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, number_string, 1);
+
+ v8::String::Utf8Value utf8_number(v8::Utils::ToLocal(number_string));
+ icu::UnicodeString u_number(icu::UnicodeString::fromUTF8(*utf8_number));
+ icu::DecimalFormat* number_format =
+ NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
+ if (!number_format) return isolate->ThrowIllegalOperation();
+
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Formattable result;
+ // ICU 4.6 doesn't support parseCurrency call. We need to wait for ICU49
+ // to be part of Chrome.
+ // TODO(cira): Include currency parsing code using parseCurrency call.
+ // We need to check if the formatter parses all currencies or only the
+ // one it was constructed with (it will impact the API - how to return ISO
+ // code and the value).
+ number_format->parse(u_number, result, status);
+ if (U_FAILURE(status)) return isolate->heap()->undefined_value();
+
+ switch (result.getType()) {
+ case icu::Formattable::kDouble:
+ return *isolate->factory()->NewNumber(result.getDouble());
+ case icu::Formattable::kLong:
+ return *isolate->factory()->NewNumberFromInt(result.getLong());
+ case icu::Formattable::kInt64:
+ return *isolate->factory()->NewNumber(
+ static_cast<double>(result.getInt64()));
+ default:
+ return isolate->heap()->undefined_value();
}
+}
- Object* result =
- LiveObjectList::GetPath(obj_id1, obj_id2, instance_filter);
- return result;
-#else
- return isolate->heap()->undefined_value();
-#endif
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCollator) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
+
+ Handle<ObjectTemplateInfo> collator_template = I18N::GetTemplate(isolate);
+
+ // Create an empty object wrapper.
+ bool has_pending_exception = false;
+ Handle<JSObject> local_object = Execution::InstantiateObject(
+ collator_template, &has_pending_exception);
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
+ }
+
+ // Set collator as internal field of the resulting JS object.
+ icu::Collator* collator = Collator::InitializeCollator(
+ isolate, locale, options, resolved);
+
+ if (!collator) return isolate->ThrowIllegalOperation();
+
+ local_object->SetInternalField(0, reinterpret_cast<Smi*>(collator));
+
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ local_object,
+ isolate->factory()->NewStringFromAscii(CStrVector("collator")),
+ isolate->factory()->NewStringFromAscii(CStrVector("valid")),
+ NONE));
+
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
+ NULL,
+ Collator::DeleteCollator);
+ return *local_object;
}
-// Generates the response to a debugger request for a list of all
-// previously captured live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InfoLOL) {
-#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_ARG_CHECKED(start, 0);
- CONVERT_SMI_ARG_CHECKED(count, 1);
- return LiveObjectList::Info(start, count);
-#else
- return isolate->heap()->undefined_value();
-#endif
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalCompare) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, collator_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, string1, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, string2, 2);
+
+ icu::Collator* collator = Collator::UnpackCollator(isolate, collator_holder);
+ if (!collator) return isolate->ThrowIllegalOperation();
+
+ v8::String::Value string_value1(v8::Utils::ToLocal(string1));
+ v8::String::Value string_value2(v8::Utils::ToLocal(string2));
+ const UChar* u_string1 = reinterpret_cast<const UChar*>(*string_value1);
+ const UChar* u_string2 = reinterpret_cast<const UChar*>(*string_value2);
+ UErrorCode status = U_ZERO_ERROR;
+ UCollationResult result = collator->compare(u_string1,
+ string_value1.length(),
+ u_string2,
+ string_value2.length(),
+ status);
+ if (U_FAILURE(status)) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(result);
}
-// Gets a dump of the specified object as requested by the debugger.
-// This is only used for obj ids shown in live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PrintLOLObj) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_ARG_CHECKED(obj_id, 0);
- Object* result = LiveObjectList::PrintObj(obj_id);
- return result;
-#else
- return isolate->heap()->undefined_value();
-#endif
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateBreakIterator) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
+
+ Handle<ObjectTemplateInfo> break_iterator_template =
+ I18N::GetTemplate2(isolate);
+
+ // Create an empty object wrapper.
+ bool has_pending_exception = false;
+ Handle<JSObject> local_object = Execution::InstantiateObject(
+ break_iterator_template, &has_pending_exception);
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
+ }
+
+ // Set break iterator as internal field of the resulting JS object.
+ icu::BreakIterator* break_iterator = BreakIterator::InitializeBreakIterator(
+ isolate, locale, options, resolved);
+
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ local_object->SetInternalField(0, reinterpret_cast<Smi*>(break_iterator));
+ // Make sure that the pointer to adopted text is NULL.
+ local_object->SetInternalField(1, reinterpret_cast<Smi*>(NULL));
+
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ local_object,
+ isolate->factory()->NewStringFromAscii(CStrVector("breakIterator")),
+ isolate->factory()->NewStringFromAscii(CStrVector("valid")),
+ NONE));
+
+ // Make object handle weak so we can delete the break iterator once GC kicks
+ // in.
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
+ NULL,
+ BreakIterator::DeleteBreakIterator);
+ return *local_object;
}
-// Resets and releases all previously captured live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ResetLOL) {
-#ifdef LIVE_OBJECT_LIST
- LiveObjectList::Reset();
- return isolate->heap()->undefined_value();
-#else
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorAdoptText) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, text, 1);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ icu::UnicodeString* u_text = reinterpret_cast<icu::UnicodeString*>(
+ break_iterator_holder->GetInternalField(1));
+ delete u_text;
+
+ v8::String::Value text_value(v8::Utils::ToLocal(text));
+ u_text = new icu::UnicodeString(
+ reinterpret_cast<const UChar*>(*text_value), text_value.length());
+ break_iterator_holder->SetInternalField(1, reinterpret_cast<Smi*>(u_text));
+
+ break_iterator->setText(*u_text);
+
return isolate->heap()->undefined_value();
-#endif
}
-// Generates the response to a debugger request for a summary of the types
-// of objects in the difference between the captured live object lists
-// specified by id1 and id2.
-// If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be
-// summarized.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SummarizeLOL) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_ARG_CHECKED(id1, 0);
- CONVERT_SMI_ARG_CHECKED(id2, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 2);
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorFirst) {
+ HandleScope scope(isolate);
- EnterDebugger enter_debugger;
- return LiveObjectList::Summarize(id1, id2, filter_obj);
-#else
- return isolate->heap()->undefined_value();
-#endif
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->first());
}
-#endif // ENABLE_DEBUGGER_SUPPORT
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorNext) {
+ HandleScope scope(isolate);
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
- NoHandleAllocation ha;
- v8::V8::ResumeProfiler();
- return isolate->heap()->undefined_value();
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->next());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
- NoHandleAllocation ha;
- v8::V8::PauseProfiler();
- return isolate->heap()->undefined_value();
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorCurrent) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->current());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorBreakType) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
+ icu::RuleBasedBreakIterator* rule_based_iterator =
+ static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
+ int32_t status = rule_based_iterator->getRuleStatus();
+ // Keep return values in sync with JavaScript BreakType enum.
+ if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("none"));
+ } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("number"));
+ } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("letter"));
+ } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("kana"));
+ } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("ideo"));
+ } else {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("unknown"));
+ }
}
+#endif // V8_I18N_SUPPORT
// Finds the script object from the script data. NOTE: This operation uses
@@ -13020,9 +14227,11 @@ static Handle<Object> Runtime_GetScriptFromScriptName(
// Scan the heap for Script objects to find the script with the requested
// script data.
Handle<Script> script;
- script_name->GetHeap()->EnsureHeapIsIterable();
- AssertNoAllocation no_allocation_during_heap_iteration;
- HeapIterator iterator;
+ Factory* factory = script_name->GetIsolate()->factory();
+ Heap* heap = script_name->GetHeap();
+ heap->EnsureHeapIsIterable();
+ DisallowHeapAllocation no_allocation_during_heap_iteration;
+ HeapIterator iterator(heap);
HeapObject* obj = NULL;
while (script.is_null() && ((obj = iterator.next()) != NULL)) {
// If a script is found check if it has the script data requested.
@@ -13036,7 +14245,7 @@ static Handle<Object> Runtime_GetScriptFromScriptName(
}
// If no script with the requested script data is found return undefined.
- if (script.is_null()) return FACTORY->undefined_value();
+ if (script.is_null()) return factory->undefined_value();
// Return the script found.
return GetScriptWrapper(script);
@@ -13060,136 +14269,93 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScript) {
}
-// Determines whether the given stack frame should be displayed in
-// a stack trace. The caller is the error constructor that asked
-// for the stack trace to be collected. The first time a construct
-// call to this function is encountered it is skipped. The seen_caller
-// in/out parameter is used to remember if the caller has been seen
-// yet.
-static bool ShowFrameInStackTrace(StackFrame* raw_frame,
- Object* caller,
- bool* seen_caller) {
- // Only display JS frames.
- if (!raw_frame->is_java_script()) {
- return false;
- }
- JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- Object* raw_fun = frame->function();
- // Not sure when this can happen but skip it just in case.
- if (!raw_fun->IsJSFunction()) {
- return false;
- }
- if ((raw_fun == caller) && !(*seen_caller)) {
- *seen_caller = true;
- return false;
- }
- // Skip all frames until we've seen the caller.
- if (!(*seen_caller)) return false;
- // Also, skip non-visible built-in functions and any call with the builtins
- // object as receiver, so as to not reveal either the builtins object or
- // an internal function.
- // The --builtins-in-stack-traces command line flag allows including
- // internal call sites in the stack trace for debugging purposes.
- if (!FLAG_builtins_in_stack_traces) {
- JSFunction* fun = JSFunction::cast(raw_fun);
- if (frame->receiver()->IsJSBuiltinsObject() ||
- (fun->IsBuiltin() && !fun->shared()->native())) {
- return false;
- }
- }
- return true;
-}
-
-
// Collect the raw data for a stack trace. Returns an array of 4
// element segments each containing a receiver, function, code and
// native code offset.
RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
+ HandleScope scope(isolate);
ASSERT_EQ(args.length(), 3);
CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
Handle<Object> caller = args.at<Object>(1);
CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[2]);
- HandleScope scope(isolate);
- Factory* factory = isolate->factory();
+ // Optionally capture a more detailed stack trace for the message.
+ isolate->CaptureAndSetDetailedStackTrace(error_object);
+ // Capture a simple stack trace for the stack property.
+ return *isolate->CaptureSimpleStackTrace(error_object, caller, limit);
+}
- limit = Max(limit, 0); // Ensure that limit is not negative.
- int initial_size = Min(limit, 10);
- Handle<FixedArray> elements =
- factory->NewFixedArrayWithHoles(initial_size * 4);
- StackFrameIterator iter(isolate);
- // If the caller parameter is a function we skip frames until we're
- // under it before starting to collect.
- bool seen_caller = !caller->IsJSFunction();
- int cursor = 0;
- int frames_seen = 0;
- while (!iter.done() && frames_seen < limit) {
- StackFrame* raw_frame = iter.frame();
- if (ShowFrameInStackTrace(raw_frame, *caller, &seen_caller)) {
- frames_seen++;
- JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- // Set initial size to the maximum inlining level + 1 for the outermost
- // function.
- List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
- frame->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0; i--) {
- if (cursor + 4 > elements->length()) {
- int new_capacity = JSObject::NewElementsCapacity(elements->length());
- Handle<FixedArray> new_elements =
- factory->NewFixedArrayWithHoles(new_capacity);
- for (int i = 0; i < cursor; i++) {
- new_elements->set(i, elements->get(i));
- }
- elements = new_elements;
- }
- ASSERT(cursor + 4 <= elements->length());
-
- Handle<Object> recv = frames[i].receiver();
- Handle<JSFunction> fun = frames[i].function();
- Handle<Code> code = frames[i].code();
- Handle<Smi> offset(Smi::FromInt(frames[i].offset()));
- elements->set(cursor++, *recv);
- elements->set(cursor++, *fun);
- elements->set(cursor++, *code);
- elements->set(cursor++, *offset);
- }
- }
- iter.Advance();
- }
- Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
- // Capture and attach a more detailed stack trace if necessary.
- isolate->CaptureAndSetCurrentStackTraceFor(error_object);
- result->set_length(Smi::FromInt(cursor));
+// Retrieve the stack trace. This is the raw stack trace that yet has to
+// be formatted. Since we only need this once, clear it afterwards.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetAndClearOverflowedStackTrace) {
+ HandleScope scope(isolate);
+ ASSERT_EQ(args.length(), 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
+ Handle<String> key = isolate->factory()->hidden_stack_trace_string();
+ Handle<Object> result(error_object->GetHiddenProperty(*key), isolate);
+ if (result->IsTheHole()) return isolate->heap()->undefined_value();
+ RUNTIME_ASSERT(result->IsJSArray() || result->IsUndefined());
+ JSObject::DeleteHiddenProperty(error_object, key);
return *result;
}
// Returns V8 version as a string.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
+ SealHandleScope shs(isolate);
ASSERT_EQ(args.length(), 0);
- NoHandleAllocation ha;
-
const char* version_string = v8::V8::GetVersion();
- return isolate->heap()->AllocateStringFromAscii(CStrVector(version_string),
+ return isolate->heap()->AllocateStringFromOneByte(CStrVector(version_string),
NOT_TENURED);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
OS::PrintError("abort: %s\n",
reinterpret_cast<char*>(args[0]) + args.smi_at(1));
- isolate->PrintStack();
+ isolate->PrintStack(stderr);
OS::Abort();
UNREACHABLE();
return NULL;
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FlattenString) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
+ FlattenString(str);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyContextDisposed) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ isolate->heap()->NotifyContextDisposed();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MigrateInstance) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ if (!object->IsJSObject()) return Smi::FromInt(0);
+ Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+ if (!js_object->map()->is_deprecated()) return Smi::FromInt(0);
+ JSObject::MigrateInstance(js_object);
+ return *object;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
+ SealHandleScope shs(isolate);
// This is only called from codegen, so checks might be more lax.
CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0);
Object* key = args[1];
@@ -13226,17 +14392,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
HandleScope scope(isolate);
Handle<JSFunctionResultCache> cache_handle(cache);
- Handle<Object> key_handle(key);
+ Handle<Object> key_handle(key, isolate);
Handle<Object> value;
{
Handle<JSFunction> factory(JSFunction::cast(
cache_handle->get(JSFunctionResultCache::kFactoryIndex)));
// TODO(antonm): consider passing a receiver when constructing a cache.
- Handle<Object> receiver(isolate->native_context()->global_object());
+ Handle<Object> receiver(isolate->native_context()->global_object(),
+ isolate);
// This handle is nor shared, nor used later, so it's safe.
Handle<Object> argv[] = { key_handle };
bool pending_exception;
- value = Execution::Call(factory,
+ value = Execution::Call(isolate,
+ factory,
receiver,
ARRAY_SIZE(argv),
argv,
@@ -13285,40 +14453,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewMessageObject) {
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(String, type, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 1);
- return *isolate->factory()->NewJSMessageObject(
- type,
- arguments,
- 0,
- 0,
- isolate->factory()->undefined_value(),
- isolate->factory()->undefined_value(),
- isolate->factory()->undefined_value());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetType) {
- CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
- return message->type();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetArguments) {
- CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
- return message->arguments();
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) {
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
return Smi::FromInt(message->start_position());
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) {
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
return message->script();
}
@@ -13328,8 +14471,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) {
// ListNatives is ONLY used by the fuzz-natives.js in debug mode
// Exclude the code in release mode.
RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 0);
- HandleScope scope;
#define COUNT_ENTRY(Name, argc, ressize) + 1
int entry_count = 0
RUNTIME_FUNCTION_LIST(COUNT_ENTRY)
@@ -13342,7 +14485,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
bool inline_runtime_functions = false;
#define ADD_ENTRY(Name, argc, ressize) \
{ \
- HandleScope inner; \
+ HandleScope inner(isolate); \
Handle<String> name; \
/* Inline runtime functions have an underscore in front of the name. */ \
if (inline_runtime_functions) { \
@@ -13372,13 +14515,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, format, 0);
CONVERT_ARG_CHECKED(JSArray, elms, 1);
+ DisallowHeapAllocation no_gc;
String::FlatContent format_content = format->GetFlatContent();
RUNTIME_ASSERT(format_content.IsAscii());
- Vector<const char> chars = format_content.ToAsciiVector();
- LOGGER->LogRuntime(chars, elms);
+ Vector<const uint8_t> chars = format_content.ToOneByteVector();
+ isolate->logger()->LogRuntime(Vector<const char>::cast(chars), elms);
return isolate->heap()->undefined_value();
}
@@ -13401,6 +14546,7 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOrObjectElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastHoleyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(NonStrictArgumentsElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalPixelElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalByteElements)
@@ -13418,12 +14564,231 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, obj1, 0);
CONVERT_ARG_CHECKED(JSObject, obj2, 1);
return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
}
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessCheckNeeded) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(HeapObject, obj, 0);
+ return isolate->heap()->ToBoolean(obj->IsAccessCheckNeeded());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+
+ if (!args[0]->IsJSReceiver()) return isolate->heap()->false_value();
+ JSReceiver* obj = JSReceiver::cast(args[0]);
+ if (obj->IsJSGlobalProxy()) {
+ Object* proto = obj->GetPrototype();
+ if (proto->IsNull()) return isolate->heap()->false_value();
+ ASSERT(proto->IsJSGlobalObject());
+ obj = JSReceiver::cast(proto);
+ }
+ return isolate->heap()->ToBoolean(obj->map()->is_observed());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
+ if (obj->IsJSGlobalProxy()) {
+ Object* proto = obj->GetPrototype();
+ if (proto->IsNull()) return isolate->heap()->undefined_value();
+ ASSERT(proto->IsJSGlobalObject());
+ obj = handle(JSReceiver::cast(proto));
+ }
+ if (obj->IsJSProxy())
+ return isolate->heap()->undefined_value();
+
+ ASSERT(!(obj->map()->is_observed() && obj->IsJSObject() &&
+ Handle<JSObject>::cast(obj)->HasFastElements()));
+ ASSERT(obj->IsJSObject());
+ JSObject::SetObserved(Handle<JSObject>::cast(obj));
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetObserverDeliveryPending) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 0);
+ isolate->set_observer_delivery_pending(true);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetObservationState) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 0);
+ return isolate->heap()->observation_state();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ObservationWeakMapCreate) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ // TODO(adamk): Currently this runtime function is only called three times per
+ // isolate. If it's called more often, the map should be moved into the
+ // strong root list.
+ Handle<Map> map =
+ isolate->factory()->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
+ Handle<JSWeakMap> weakmap =
+ Handle<JSWeakMap>::cast(isolate->factory()->NewJSObjectFromMap(map));
+ return WeakCollectionInitialize(isolate, weakmap);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_UnwrapGlobalProxy) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ Object* object = args[0];
+ if (object->IsJSGlobalProxy()) {
+ object = object->GetPrototype(isolate);
+ if (object->IsNull()) return isolate->heap()->undefined_value();
+ }
+ return object;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessAllowedForObserver) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, observer, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1);
+ ASSERT(object->IsAccessCheckNeeded());
+ Handle<Object> key = args.at<Object>(2);
+ SaveContext save(isolate);
+ isolate->set_context(observer->context());
+ if (!isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ return isolate->heap()->false_value();
+ }
+ bool access_allowed = false;
+ uint32_t index = 0;
+ if (key->ToArrayIndex(&index) ||
+ (key->IsString() && String::cast(*key)->AsArrayIndex(&index))) {
+ access_allowed =
+ isolate->MayIndexedAccess(*object, index, v8::ACCESS_GET) &&
+ isolate->MayIndexedAccess(*object, index, v8::ACCESS_HAS);
+ } else {
+ access_allowed = isolate->MayNamedAccess(*object, *key, v8::ACCESS_GET) &&
+ isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS);
+ }
+ return isolate->heap()->ToBoolean(access_allowed);
+}
+
+
+static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
+ Handle<JSFunction> constructor,
+ Handle<Object> type_info,
+ Arguments* caller_args) {
+ bool holey = false;
+ bool can_use_type_feedback = true;
+ if (caller_args->length() == 1) {
+ Object* argument_one = (*caller_args)[0];
+ if (argument_one->IsSmi()) {
+ int value = Smi::cast(argument_one)->value();
+ if (value < 0 || value >= JSObject::kInitialMaxFastElementArray) {
+ // the array is a dictionary in this case.
+ can_use_type_feedback = false;
+ } else if (value != 0) {
+ holey = true;
+ }
+ } else {
+ // Non-smi length argument produces a dictionary
+ can_use_type_feedback = false;
+ }
+ }
+
+ JSArray* array;
+ MaybeObject* maybe_array;
+ if (!type_info.is_null() &&
+ *type_info != isolate->heap()->undefined_value() &&
+ Cell::cast(*type_info)->value()->IsAllocationSite() &&
+ can_use_type_feedback) {
+ Handle<Cell> cell = Handle<Cell>::cast(type_info);
+ Handle<AllocationSite> site = Handle<AllocationSite>(
+ AllocationSite::cast(cell->value()), isolate);
+ ASSERT(!site->SitePointsToLiteral());
+ ElementsKind to_kind = site->GetElementsKind();
+ if (holey && !IsFastHoleyElementsKind(to_kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ // Update the allocation site info to reflect the advice alteration.
+ site->SetElementsKind(to_kind);
+ }
+
+ maybe_array = isolate->heap()->AllocateJSObjectWithAllocationSite(
+ *constructor, site);
+ if (!maybe_array->To(&array)) return maybe_array;
+ } else {
+ maybe_array = isolate->heap()->AllocateJSObject(*constructor);
+ if (!maybe_array->To(&array)) return maybe_array;
+ // We might need to transition to holey
+ ElementsKind kind = constructor->initial_map()->elements_kind();
+ if (holey && !IsFastHoleyElementsKind(kind)) {
+ kind = GetHoleyElementsKind(kind);
+ maybe_array = array->TransitionElementsKind(kind);
+ if (maybe_array->IsFailure()) return maybe_array;
+ }
+ }
+
+ maybe_array = isolate->heap()->AllocateJSArrayStorage(array, 0, 0,
+ DONT_INITIALIZE_ARRAY_ELEMENTS);
+ if (maybe_array->IsFailure()) return maybe_array;
+ maybe_array = ArrayConstructInitializeElements(array, caller_args);
+ if (maybe_array->IsFailure()) return maybe_array;
+ return array;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConstructor) {
+ HandleScope scope(isolate);
+ // If we get 2 arguments then they are the stub parameters (constructor, type
+ // info). If we get 3, then the first one is a pointer to the arguments
+ // passed by the caller.
+ Arguments empty_args(0, NULL);
+ bool no_caller_args = args.length() == 2;
+ ASSERT(no_caller_args || args.length() == 3);
+ int parameters_start = no_caller_args ? 0 : 1;
+ Arguments* caller_args = no_caller_args
+ ? &empty_args
+ : reinterpret_cast<Arguments*>(args[0]);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, parameters_start);
+ CONVERT_ARG_HANDLE_CHECKED(Object, type_info, parameters_start + 1);
+
+ return ArrayConstructorCommon(isolate,
+ constructor,
+ type_info,
+ caller_args);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalArrayConstructor) {
+ HandleScope scope(isolate);
+ Arguments empty_args(0, NULL);
+ bool no_caller_args = args.length() == 1;
+ ASSERT(no_caller_args || args.length() == 2);
+ int parameters_start = no_caller_args ? 0 : 1;
+ Arguments* caller_args = no_caller_args
+ ? &empty_args
+ : reinterpret_cast<Arguments*>(args[0]);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, parameters_start);
+
+ return ArrayConstructorCommon(isolate,
+ constructor,
+ Handle<Object>::null(),
+ caller_args);
+}
+
+
// ----------------------------------------------------------------------------
// Implementation of Runtime
@@ -13445,20 +14810,19 @@ static const Runtime::Function kIntrinsicFunctions[] = {
MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
Object* dictionary) {
- ASSERT(Isolate::Current()->heap() == heap);
ASSERT(dictionary != NULL);
- ASSERT(StringDictionary::cast(dictionary)->NumberOfElements() == 0);
+ ASSERT(NameDictionary::cast(dictionary)->NumberOfElements() == 0);
for (int i = 0; i < kNumFunctions; ++i) {
- Object* name_symbol;
- { MaybeObject* maybe_name_symbol =
- heap->LookupAsciiSymbol(kIntrinsicFunctions[i].name);
- if (!maybe_name_symbol->ToObject(&name_symbol)) return maybe_name_symbol;
- }
- StringDictionary* string_dictionary = StringDictionary::cast(dictionary);
- { MaybeObject* maybe_dictionary = string_dictionary->Add(
- String::cast(name_symbol),
+ Object* name_string;
+ { MaybeObject* maybe_name_string =
+ heap->InternalizeUtf8String(kIntrinsicFunctions[i].name);
+ if (!maybe_name_string->ToObject(&name_string)) return maybe_name_string;
+ }
+ NameDictionary* name_dictionary = NameDictionary::cast(dictionary);
+ { MaybeObject* maybe_dictionary = name_dictionary->Add(
+ String::cast(name_string),
Smi::FromInt(i),
- PropertyDetails(NONE, NORMAL));
+ PropertyDetails(NONE, NORMAL, Representation::None()));
if (!maybe_dictionary->ToObject(&dictionary)) {
// Non-recoverable failure. Calling code must restart heap
// initialization.
@@ -13470,7 +14834,7 @@ MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
}
-const Runtime::Function* Runtime::FunctionForSymbol(Handle<String> name) {
+const Runtime::Function* Runtime::FunctionForName(Handle<String> name) {
Heap* heap = name->GetHeap();
int entry = heap->intrinsic_function_names()->FindEntry(*name);
if (entry != kNotFound) {
@@ -13487,8 +14851,7 @@ const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
}
-void Runtime::PerformGC(Object* result) {
- Isolate* isolate = Isolate::Current();
+void Runtime::PerformGC(Object* result, Isolate* isolate) {
Failure* failure = Failure::cast(result);
if (failure->IsRetryAfterGC()) {
if (isolate->heap()->new_space()->AddFreshPage()) {
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 47745b358..1b7e32e7a 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -55,7 +55,7 @@ namespace internal {
F(IsPropertyEnumerable, 2, 1) \
F(GetPropertyNames, 1, 1) \
F(GetPropertyNamesFast, 1, 1) \
- F(GetLocalPropertyNames, 1, 1) \
+ F(GetLocalPropertyNames, 2, 1) \
F(GetLocalElementNames, 1, 1) \
F(GetInterceptorInfo, 1, 1) \
F(GetNamedInterceptorPropertyNames, 1, 1) \
@@ -64,9 +64,11 @@ namespace internal {
F(ToFastProperties, 1, 1) \
F(FinishArrayPrototypeSetup, 1, 1) \
F(SpecialArrayFunctions, 1, 1) \
+ F(IsClassicModeFunction, 1, 1) \
F(GetDefaultReceiver, 1, 1) \
\
F(GetPrototype, 1, 1) \
+ F(SetPrototype, 2, 1) \
F(IsInPrototypeChain, 2, 1) \
\
F(GetOwnProperty, 2, 1) \
@@ -85,22 +87,31 @@ namespace internal {
F(NewStrictArgumentsFast, 3, 1) \
F(LazyCompile, 1, 1) \
F(LazyRecompile, 1, 1) \
- F(ParallelRecompile, 1, 1) \
+ F(ConcurrentRecompile, 1, 1) \
+ F(TryInstallRecompiledCode, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
- F(NotifyOSR, 0, 1) \
+ F(NotifyStubFailure, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
F(RunningInSimulator, 0, 1) \
+ F(IsConcurrentRecompilationSupported, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
- F(GetOptimizationStatus, 1, 1) \
+ F(NeverOptimizeFunction, 1, 1) \
+ F(GetOptimizationStatus, -1, 1) \
F(GetOptimizationCount, 1, 1) \
- F(CompileForOnStackReplacement, 1, 1) \
- F(SetNewFunctionAttributes, 1, 1) \
+ F(UnblockConcurrentRecompilation, 0, 1) \
+ F(CompileForOnStackReplacement, 2, 1) \
+ F(SetAllocationTimeout, 2, 1) \
F(AllocateInNewSpace, 1, 1) \
+ F(AllocateInOldPointerSpace, 1, 1) \
+ F(AllocateInOldDataSpace, 1, 1) \
F(SetNativeFlag, 1, 1) \
F(StoreArrayLiteralElement, 5, 1) \
F(DebugCallbackSupportsStepping, 1, 1) \
F(DebugPrepareStepInIfStepping, 1, 1) \
+ F(FlattenString, 1, 1) \
+ F(MigrateInstance, 1, 1) \
+ F(NotifyContextDisposed, 0, 1) \
\
/* Array join support */ \
F(PushIfAbsent, 2, 1) \
@@ -111,7 +122,6 @@ namespace internal {
F(Typeof, 1, 1) \
\
F(StringToNumber, 1, 1) \
- F(StringFromCharCodeArray, 1, 1) \
F(StringParseInt, 2, 1) \
F(StringParseFloat, 1, 1) \
F(StringToLowerCase, 1, 1) \
@@ -120,13 +130,11 @@ namespace internal {
F(CharFromCode, 1, 1) \
F(URIEscape, 1, 1) \
F(URIUnescape, 1, 1) \
- F(QuoteJSONString, 1, 1) \
- F(QuoteJSONStringComma, 1, 1) \
- F(QuoteJSONStringArray, 1, 1) \
\
F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
F(NumberToInteger, 1, 1) \
+ F(NumberToPositiveInteger, 1, 1) \
F(NumberToIntegerMapMinusZero, 1, 1) \
F(NumberToJSUint32, 1, 1) \
F(NumberToJSInt32, 1, 1) \
@@ -141,6 +149,7 @@ namespace internal {
F(NumberMod, 2, 1) \
F(NumberUnaryMinus, 1, 1) \
F(NumberAlloc, 0, 1) \
+ F(NumberImul, 2, 1) \
\
F(StringAdd, 2, 1) \
F(StringBuilderConcat, 3, 1) \
@@ -151,7 +160,6 @@ namespace internal {
F(NumberOr, 2, 1) \
F(NumberAnd, 2, 1) \
F(NumberXor, 2, 1) \
- F(NumberNot, 1, 1) \
\
F(NumberShl, 2, 1) \
F(NumberShr, 2, 1) \
@@ -191,6 +199,8 @@ namespace internal {
\
/* JSON */ \
F(ParseJson, 1, 1) \
+ F(BasicJSONStringify, 1, 1) \
+ F(QuoteJSONString, 1, 1) \
\
/* Strings */ \
F(StringCharCodeAt, 2, 1) \
@@ -198,18 +208,22 @@ namespace internal {
F(StringLastIndexOf, 3, 1) \
F(StringLocaleCompare, 2, 1) \
F(SubString, 3, 1) \
- F(StringReplaceRegExpWithString, 4, 1) \
+ F(StringReplaceGlobalRegExpWithString, 4, 1) \
F(StringReplaceOneCharWithString, 3, 1) \
F(StringMatch, 3, 1) \
F(StringTrim, 3, 1) \
F(StringToArray, 2, 1) \
F(NewStringWrapper, 1, 1) \
+ F(NewString, 2, 1) \
+ F(TruncateString, 2, 1) \
\
/* Numbers */ \
F(NumberToRadixString, 2, 1) \
F(NumberToFixed, 2, 1) \
F(NumberToExponential, 2, 1) \
- F(NumberToPrecision, 2, 1)
+ F(NumberToPrecision, 2, 1) \
+ F(IsValidSmi, 1, 1)
+
#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
/* Reflection */ \
@@ -221,6 +235,7 @@ namespace internal {
F(FunctionSetName, 2, 1) \
F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
+ F(FunctionIsGenerator, 1, 1) \
F(FunctionBindArguments, 4, 1) \
F(BoundFunctionGetBindings, 1, 1) \
F(FunctionRemovePrototype, 1, 1) \
@@ -232,6 +247,7 @@ namespace internal {
F(FunctionIsBuiltin, 1, 1) \
F(GetScript, 1, 1) \
F(CollectStackTrace, 3, 1) \
+ F(GetAndClearOverflowedStackTrace, 1, 1) \
F(GetV8Version, 0, 1) \
\
F(ClassOf, 1, 1) \
@@ -243,6 +259,7 @@ namespace internal {
F(GetTemplateField, 2, 1) \
F(DisableAccessChecks, 1, 1) \
F(EnableAccessChecks, 1, 1) \
+ F(SetAccessorProperty, 6, 1) \
\
/* Dates */ \
F(DateCurrentTime, 0, 1) \
@@ -255,7 +272,7 @@ namespace internal {
/* Numbers */ \
\
/* Globals */ \
- F(CompileString, 1, 1) \
+ F(CompileString, 2, 1) \
F(GlobalPrint, 1, 1) \
\
/* Eval */ \
@@ -266,12 +283,15 @@ namespace internal {
F(DefineOrRedefineDataProperty, 4, 1) \
F(DefineOrRedefineAccessorProperty, 5, 1) \
F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
+ F(GetDataProperty, 2, 1) \
\
/* Arrays */ \
F(RemoveArrayHoles, 2, 1) \
F(GetArrayKeys, 2, 1) \
F(MoveArrayContents, 2, 1) \
F(EstimateNumberOfElements, 1, 1) \
+ F(ArrayConstructor, -1, 1) \
+ F(InternalArrayConstructor, -1, 1) \
\
/* Getters and Setters */ \
F(LookupAccessor, 3, 1) \
@@ -279,13 +299,25 @@ namespace internal {
/* Literals */ \
F(MaterializeRegExpLiteral, 4, 1)\
F(CreateObjectLiteral, 4, 1) \
- F(CreateObjectLiteralShallow, 4, 1) \
F(CreateArrayLiteral, 3, 1) \
F(CreateArrayLiteralShallow, 3, 1) \
\
+ /* Harmony generators */ \
+ F(CreateJSGeneratorObject, 0, 1) \
+ F(SuspendJSGeneratorObject, 1, 1) \
+ F(ResumeJSGeneratorObject, 3, 1) \
+ F(ThrowGeneratorStateError, 1, 1) \
+ \
+ /* ES5 */ \
+ F(ObjectFreeze, 1, 1) \
+ \
/* Harmony modules */ \
F(IsJSModule, 1, 1) \
\
+ /* Harmony symbols */ \
+ F(CreateSymbol, 1, 1) \
+ F(SymbolName, 1, 1) \
+ \
/* Harmony proxies */ \
F(CreateJSProxy, 2, 1) \
F(CreateJSFunctionProxy, 4, 1) \
@@ -301,6 +333,7 @@ namespace internal {
F(SetAdd, 2, 1) \
F(SetHas, 2, 1) \
F(SetDelete, 2, 1) \
+ F(SetGetSize, 1, 1) \
\
/* Harmony maps */ \
F(MapInitialize, 1, 1) \
@@ -308,16 +341,63 @@ namespace internal {
F(MapHas, 2, 1) \
F(MapDelete, 2, 1) \
F(MapSet, 3, 1) \
- \
- /* Harmony weakmaps */ \
- F(WeakMapInitialize, 1, 1) \
- F(WeakMapGet, 2, 1) \
- F(WeakMapHas, 2, 1) \
- F(WeakMapDelete, 2, 1) \
- F(WeakMapSet, 3, 1) \
+ F(MapGetSize, 1, 1) \
+ \
+ /* Harmony weak maps and sets */ \
+ F(WeakCollectionInitialize, 1, 1) \
+ F(WeakCollectionGet, 2, 1) \
+ F(WeakCollectionHas, 2, 1) \
+ F(WeakCollectionDelete, 2, 1) \
+ F(WeakCollectionSet, 3, 1) \
+ \
+ /* Harmony observe */ \
+ F(IsObserved, 1, 1) \
+ F(SetIsObserved, 1, 1) \
+ F(SetObserverDeliveryPending, 0, 1) \
+ F(GetObservationState, 0, 1) \
+ F(ObservationWeakMapCreate, 0, 1) \
+ F(UnwrapGlobalProxy, 1, 1) \
+ F(IsAccessAllowedForObserver, 3, 1) \
+ \
+ /* Harmony typed arrays */ \
+ F(ArrayBufferInitialize, 2, 1)\
+ F(ArrayBufferGetByteLength, 1, 1)\
+ F(ArrayBufferSliceImpl, 3, 1) \
+ F(ArrayBufferIsView, 1, 1) \
+ \
+ F(TypedArrayInitialize, 5, 1) \
+ F(TypedArrayInitializeFromArrayLike, 4, 1) \
+ F(TypedArrayGetBuffer, 1, 1) \
+ F(TypedArrayGetByteLength, 1, 1) \
+ F(TypedArrayGetByteOffset, 1, 1) \
+ F(TypedArrayGetLength, 1, 1) \
+ F(TypedArraySetFastCases, 3, 1) \
+ \
+ F(DataViewInitialize, 4, 1) \
+ F(DataViewGetBuffer, 1, 1) \
+ F(DataViewGetByteLength, 1, 1) \
+ F(DataViewGetByteOffset, 1, 1) \
+ F(DataViewGetInt8, 3, 1) \
+ F(DataViewGetUint8, 3, 1) \
+ F(DataViewGetInt16, 3, 1) \
+ F(DataViewGetUint16, 3, 1) \
+ F(DataViewGetInt32, 3, 1) \
+ F(DataViewGetUint32, 3, 1) \
+ F(DataViewGetFloat32, 3, 1) \
+ F(DataViewGetFloat64, 3, 1) \
+ \
+ F(DataViewSetInt8, 4, 1) \
+ F(DataViewSetUint8, 4, 1) \
+ F(DataViewSetInt16, 4, 1) \
+ F(DataViewSetUint16, 4, 1) \
+ F(DataViewSetInt32, 4, 1) \
+ F(DataViewSetUint32, 4, 1) \
+ F(DataViewSetFloat32, 4, 1) \
+ F(DataViewSetFloat64, 4, 1) \
\
/* Statements */ \
F(NewClosure, 3, 1) \
+ F(NewClosureFromStubFailure, 1, 1) \
F(NewObject, 1, 1) \
F(NewObjectFromBound, 1, 1) \
F(FinalizeInstanceSize, 1, 1) \
@@ -335,7 +415,7 @@ namespace internal {
F(PushWithContext, 2, 1) \
F(PushCatchContext, 3, 1) \
F(PushBlockContext, 2, 1) \
- F(PushModuleContext, 1, 1) \
+ F(PushModuleContext, 2, 1) \
F(DeleteContextSlot, 2, 1) \
F(LoadContextSlot, 2, 2) \
F(LoadContextSlotNoReferenceError, 2, 2) \
@@ -343,6 +423,7 @@ namespace internal {
\
/* Declarations and initialization */ \
F(DeclareGlobals, 3, 1) \
+ F(DeclareModules, 1, 1) \
F(DeclareContextSlot, 4, 1) \
F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \
F(InitializeConstGlobal, 2, 1) \
@@ -363,9 +444,6 @@ namespace internal {
F(GetFromCache, 2, 1) \
\
/* Message objects */ \
- F(NewMessageObject, 2, 1) \
- F(MessageGetType, 1, 1) \
- F(MessageGetArguments, 1, 1) \
F(MessageGetStartPosition, 1, 1) \
F(MessageGetScript, 1, 1) \
\
@@ -379,6 +457,7 @@ namespace internal {
F(HasFastDoubleElements, 1, 1) \
F(HasFastHoleyElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
+ F(HasNonStrictArgumentsElements, 1, 1) \
F(HasExternalPixelElements, 1, 1) \
F(HasExternalArrayElements, 1, 1) \
F(HasExternalByteElements, 1, 1) \
@@ -390,12 +469,9 @@ namespace internal {
F(HasExternalFloatElements, 1, 1) \
F(HasExternalDoubleElements, 1, 1) \
F(HasFastProperties, 1, 1) \
- F(TransitionElementsSmiToDouble, 1, 1) \
- F(TransitionElementsDoubleToObject, 1, 1) \
+ F(TransitionElementsKind, 2, 1) \
F(HaveSameMap, 2, 1) \
- /* profiler */ \
- F(ProfilerResume, 0, 1) \
- F(ProfilerPause, 0, 1)
+ F(IsAccessCheckNeeded, 1, 1)
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -415,6 +491,7 @@ namespace internal {
F(GetFrameCount, 1, 1) \
F(GetFrameDetails, 2, 1) \
F(GetScopeCount, 2, 1) \
+ F(GetStepInPositions, 2, 1) \
F(GetScopeDetails, 4, 1) \
F(GetFunctionScopeCount, 1, 1) \
F(GetFunctionScopeDetails, 2, 1) \
@@ -423,13 +500,13 @@ namespace internal {
F(GetThreadCount, 1, 1) \
F(GetThreadDetails, 2, 1) \
F(SetDisableBreak, 1, 1) \
- F(GetBreakLocations, 1, 1) \
+ F(GetBreakLocations, 2, 1) \
F(SetFunctionBreakPoint, 3, 1) \
- F(SetScriptBreakPoint, 3, 1) \
+ F(SetScriptBreakPoint, 4, 1) \
F(ClearBreakPoint, 1, 1) \
F(ChangeBreakOnException, 2, 1) \
F(IsBreakOnException, 1, 1) \
- F(PrepareStep, 3, 1) \
+ F(PrepareStep, 4, 1) \
F(ClearStepping, 0, 1) \
F(DebugEvaluate, 6, 1) \
F(DebugEvaluateGlobal, 4, 1) \
@@ -459,25 +536,48 @@ namespace internal {
F(SetFlags, 1, 1) \
F(CollectGarbage, 1, 1) \
F(GetHeapUsage, 0, 1) \
- \
- /* LiveObjectList support*/ \
- F(HasLOLEnabled, 0, 1) \
- F(CaptureLOL, 0, 1) \
- F(DeleteLOL, 1, 1) \
- F(DumpLOL, 5, 1) \
- F(GetLOLObj, 1, 1) \
- F(GetLOLObjId, 1, 1) \
- F(GetLOLObjRetainers, 6, 1) \
- F(GetLOLPath, 3, 1) \
- F(InfoLOL, 2, 1) \
- F(PrintLOLObj, 1, 1) \
- F(ResetLOL, 0, 1) \
- F(SummarizeLOL, 3, 1)
#else
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
#endif
+
+#ifdef V8_I18N_SUPPORT
+#define RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F) \
+ /* i18n support */ \
+ /* Standalone, helper methods. */ \
+ F(CanonicalizeLanguageTag, 1, 1) \
+ F(AvailableLocalesOf, 1, 1) \
+ F(GetDefaultICULocale, 0, 1) \
+ F(GetLanguageTagVariants, 1, 1) \
+ \
+ /* Date format and parse. */ \
+ F(CreateDateTimeFormat, 3, 1) \
+ F(InternalDateFormat, 2, 1) \
+ F(InternalDateParse, 2, 1) \
+ \
+ /* Number format and parse. */ \
+ F(CreateNumberFormat, 3, 1) \
+ F(InternalNumberFormat, 2, 1) \
+ F(InternalNumberParse, 2, 1) \
+ \
+ /* Collator. */ \
+ F(CreateCollator, 3, 1) \
+ F(InternalCompare, 3, 1) \
+ \
+ /* Break iterator. */ \
+ F(CreateBreakIterator, 3, 1) \
+ F(BreakIteratorAdoptText, 2, 1) \
+ F(BreakIteratorFirst, 1, 1) \
+ F(BreakIteratorNext, 1, 1) \
+ F(BreakIteratorCurrent, 1, 1) \
+ F(BreakIteratorBreakType, 1, 1) \
+
+#else
+#define RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)
+#endif
+
+
#ifdef DEBUG
#define RUNTIME_FUNCTION_LIST_DEBUG(F) \
/* Testing */ \
@@ -495,7 +595,8 @@ namespace internal {
RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
RUNTIME_FUNCTION_LIST_DEBUG(F) \
- RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
+ RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
+ RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)
// ----------------------------------------------------------------------------
// INLINE_FUNCTION_LIST defines all inlined functions accessed
@@ -515,6 +616,8 @@ namespace internal {
F(DateField, 2 /* date object, field index */, 1) \
F(StringCharFromCode, 1, 1) \
F(StringCharAt, 2, 1) \
+ F(OneByteSeqStringSetChar, 3, 1) \
+ F(TwoByteSeqStringSetChar, 3, 1) \
F(ObjectEquals, 2, 1) \
F(RandomHeapNumber, 0, 1) \
F(IsObject, 1, 1) \
@@ -531,11 +634,14 @@ namespace internal {
F(IsRegExpEquivalent, 2, 1) \
F(HasCachedArrayIndex, 1, 1) \
F(GetCachedArrayIndex, 1, 1) \
- F(FastAsciiArrayJoin, 2, 1)
+ F(FastAsciiArrayJoin, 2, 1) \
+ F(GeneratorNext, 2, 1) \
+ F(GeneratorThrow, 2, 1) \
+ F(DebugBreakInOptimizedCode, 0, 1)
// ----------------------------------------------------------------------------
-// INLINE_AND_RUNTIME_FUNCTION_LIST defines all inlined functions accessed
+// INLINE_RUNTIME_FUNCTION_LIST defines all inlined functions accessed
// with a native call of the form %_name from within JS code that also have
// a corresponding runtime function, that is called for slow cases.
// Entries have the form F(name, number of arguments, number of return values).
@@ -557,8 +663,8 @@ namespace internal {
class RuntimeState {
public:
- StaticResource<StringInputBuffer>* string_input_buffer() {
- return &string_input_buffer_;
+ StaticResource<ConsStringIteratorOp>* string_iterator() {
+ return &string_iterator_;
}
unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() {
return &to_upper_mapping_;
@@ -566,29 +672,29 @@ class RuntimeState {
unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() {
return &to_lower_mapping_;
}
- StringInputBuffer* string_input_buffer_compare_bufx() {
- return &string_input_buffer_compare_bufx_;
+ ConsStringIteratorOp* string_iterator_compare_x() {
+ return &string_iterator_compare_x_;
}
- StringInputBuffer* string_input_buffer_compare_bufy() {
- return &string_input_buffer_compare_bufy_;
+ ConsStringIteratorOp* string_iterator_compare_y() {
+ return &string_iterator_compare_y_;
}
- StringInputBuffer* string_locale_compare_buf1() {
- return &string_locale_compare_buf1_;
+ ConsStringIteratorOp* string_locale_compare_it1() {
+ return &string_locale_compare_it1_;
}
- StringInputBuffer* string_locale_compare_buf2() {
- return &string_locale_compare_buf2_;
+ ConsStringIteratorOp* string_locale_compare_it2() {
+ return &string_locale_compare_it2_;
}
private:
RuntimeState() {}
// Non-reentrant string buffer for efficient general use in the runtime.
- StaticResource<StringInputBuffer> string_input_buffer_;
+ StaticResource<ConsStringIteratorOp> string_iterator_;
unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
- StringInputBuffer string_input_buffer_compare_bufx_;
- StringInputBuffer string_input_buffer_compare_bufy_;
- StringInputBuffer string_locale_compare_buf1_;
- StringInputBuffer string_locale_compare_buf2_;
+ ConsStringIteratorOp string_iterator_compare_x_;
+ ConsStringIteratorOp string_iterator_compare_y_;
+ ConsStringIteratorOp string_locale_compare_it1_;
+ ConsStringIteratorOp string_locale_compare_it2_;
friend class Isolate;
friend class Runtime;
@@ -635,15 +741,16 @@ class Runtime : public AllStatic {
static const int kNotFound = -1;
- // Add symbols for all the intrinsic function names to a StringDictionary.
+ // Add internalized strings for all the intrinsic function names to a
+ // StringDictionary.
// Returns failure if an allocation fails. In this case, it must be
// retried with a new, empty StringDictionary, not with the same one.
// Alternatively, heap initialization can be completely restarted.
MUST_USE_RESULT static MaybeObject* InitializeIntrinsicFunctionNames(
Heap* heap, Object* dictionary);
- // Get the intrinsic function with the given name, which must be a symbol.
- static const Function* FunctionForSymbol(Handle<String> name);
+ // Get the intrinsic function with the given name, which must be internalized.
+ static const Function* FunctionForName(Handle<String> name);
// Get the intrinsic function with the given FunctionId.
static const Function* FunctionForId(FunctionId id);
@@ -665,6 +772,11 @@ class Runtime : public AllStatic {
Handle<Object> object,
uint32_t index);
+ MUST_USE_RESULT static MaybeObject* GetElementOrCharAtOrFail(
+ Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index);
+
MUST_USE_RESULT static MaybeObject* SetObjectProperty(
Isolate* isolate,
Handle<Object> object,
@@ -673,6 +785,14 @@ class Runtime : public AllStatic {
PropertyAttributes attr,
StrictModeFlag strict_mode);
+ MUST_USE_RESULT static MaybeObject* SetObjectPropertyOrFail(
+ Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyAttributes attr,
+ StrictModeFlag strict_mode);
+
MUST_USE_RESULT static MaybeObject* ForceSetObjectProperty(
Isolate* isolate,
Handle<JSObject> object,
@@ -680,7 +800,13 @@ class Runtime : public AllStatic {
Handle<Object> value,
PropertyAttributes attr);
- MUST_USE_RESULT static MaybeObject* ForceDeleteObjectProperty(
+ MUST_USE_RESULT static MaybeObject* DeleteObjectProperty(
+ Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> key,
+ JSReceiver::DeleteMode mode);
+
+ MUST_USE_RESULT static MaybeObject* HasObjectProperty(
Isolate* isolate,
Handle<JSReceiver> object,
Handle<Object> key);
@@ -690,8 +816,29 @@ class Runtime : public AllStatic {
Handle<Object> object,
Handle<Object> key);
+ MUST_USE_RESULT static MaybeObject* GetObjectPropertyOrFail(
+ Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> key);
+
+ static void SetupArrayBuffer(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ bool is_external,
+ void* data,
+ size_t allocated_length);
+
+ static bool SetupArrayBufferAllocatingData(
+ Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ size_t allocated_length,
+ bool initialize = true);
+
+ static void FreeArrayBuffer(
+ Isolate* isolate,
+ JSArrayBuffer* phantom_array_buffer);
+
// Helper functions used stubs.
- static void PerformGC(Object* result);
+ static void PerformGC(Object* result, Isolate* isolate);
// Used in runtime.cc and hydrogen's VisitArrayLiteral.
static Handle<Object> CreateArrayLiteralBoilerplate(
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 6b487349d..ce11c3707 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -69,16 +69,24 @@ function EQUALS(y) {
} else if (IS_STRING(x)) {
while (true) {
if (IS_STRING(y)) return %StringEquals(x, y);
+ if (IS_SYMBOL(y)) return 1; // not equal
if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
if (IS_BOOLEAN(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
y = %ToPrimitive(y, NO_HINT);
}
+ } else if (IS_SYMBOL(x)) {
+ while (true) {
+ if (IS_SYMBOL(y)) return %_ObjectEquals(x, y) ? 0 : 1;
+ if (!IS_SPEC_OBJECT(y)) return 1; // not equal
+ y = %ToPrimitive(y, NO_HINT);
+ }
} else if (IS_BOOLEAN(x)) {
if (IS_BOOLEAN(y)) return %_ObjectEquals(x, y) ? 0 : 1;
if (IS_NULL_OR_UNDEFINED(y)) return 1;
if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
if (IS_STRING(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
+ if (IS_SYMBOL(y)) return 1; // not equal
// y is object.
x = %ToNumber(x);
y = %ToPrimitive(y, NO_HINT);
@@ -286,20 +294,6 @@ function BIT_XOR(y) {
}
-// ECMA-262, section 11.4.7, page 47.
-function UNARY_MINUS() {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- return %NumberUnaryMinus(x);
-}
-
-
-// ECMA-262, section 11.4.8, page 48.
-function BIT_NOT() {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- return %NumberNot(x);
-}
-
-
// ECMA-262, section 11.7.1, page 51.
function SHL(y) {
var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
@@ -346,7 +340,7 @@ function SHR(y) {
// ECMA-262, section 11.4.1, page 46.
function DELETE(key, strict) {
- return %DeleteProperty(%ToObject(this), %ToString(key), strict);
+ return %DeleteProperty(%ToObject(this), %ToName(key), strict);
}
@@ -356,7 +350,7 @@ function IN(x) {
throw %MakeTypeError('invalid_in_operator_use', [this, x]);
}
return %_IsNonNegativeSmi(this) ?
- %HasElement(x, this) : %HasProperty(x, %ToString(this));
+ %HasElement(x, this) : %HasProperty(x, %ToName(this));
}
@@ -396,7 +390,7 @@ function INSTANCE_OF(F) {
// has a property with the given key; return the key as a string if
// it has. Otherwise returns 0 (smi). Used in for-in statements.
function FILTER_KEY(key) {
- var string = %ToString(key);
+ var string = %ToName(key);
if (%HasProperty(this, string)) return string;
return 0;
}
@@ -508,6 +502,7 @@ function ToPrimitive(x, hint) {
if (IS_STRING(x)) return x;
// Normal behavior.
if (!IS_SPEC_OBJECT(x)) return x;
+ if (IS_SYMBOL_WRAPPER(x)) return %_ValueOf(x);
if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
return (hint == NUMBER_HINT) ? %DefaultNumber(x) : %DefaultString(x);
}
@@ -531,7 +526,8 @@ function ToNumber(x) {
: %StringToNumber(x);
}
if (IS_BOOLEAN(x)) return x ? 1 : 0;
- if (IS_UNDEFINED(x)) return $NaN;
+ if (IS_UNDEFINED(x)) return NAN;
+ if (IS_SYMBOL(x)) return NAN;
return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
}
@@ -541,7 +537,8 @@ function NonNumberToNumber(x) {
: %StringToNumber(x);
}
if (IS_BOOLEAN(x)) return x ? 1 : 0;
- if (IS_UNDEFINED(x)) return $NaN;
+ if (IS_UNDEFINED(x)) return NAN;
+ if (IS_SYMBOL(x)) return NAN;
return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
}
@@ -563,13 +560,20 @@ function NonStringToString(x) {
}
+// ES6 symbols
+function ToName(x) {
+ return IS_SYMBOL(x) ? x : %ToString(x);
+}
+
+
// ECMA-262, section 9.9, page 36.
function ToObject(x) {
if (IS_STRING(x)) return new $String(x);
+ if (IS_SYMBOL(x)) return new $Symbol(x);
if (IS_NUMBER(x)) return new $Number(x);
if (IS_BOOLEAN(x)) return new $Boolean(x);
if (IS_NULL_OR_UNDEFINED(x) && !IS_UNDETECTABLE(x)) {
- throw %MakeTypeError('null_to_object', []);
+ throw %MakeTypeError('undefined_or_null_to_object', []);
}
return x;
}
@@ -640,7 +644,6 @@ function DefaultNumber(x) {
throw %MakeTypeError('cannot_convert_to_primitive', []);
}
-
// ECMA-262, section 8.6.2.6, page 28.
function DefaultString(x) {
var toString = x.toString;
@@ -658,6 +661,12 @@ function DefaultString(x) {
throw %MakeTypeError('cannot_convert_to_primitive', []);
}
+function ToPositiveInteger(x, rangeErrorName) {
+ var i = TO_INTEGER(x);
+ if (i < 0) throw %MakeRangeError(rangeErrorName);
+ return i;
+}
+
// NOTE: Setting the prototype for Array must take place as early as
// possible due to code generation for array literals. When
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index 714e5c397..b56556572 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -59,7 +59,7 @@ bool SafepointEntry::HasRegisterAt(int reg_index) const {
SafepointTable::SafepointTable(Code* code) {
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ ASSERT(code->is_crankshafted());
code_ = code;
Address header = code->instruction_start() + code->safepoint_table_offset();
length_ = Memory::uint32_at(header + kLengthOffset);
@@ -158,14 +158,6 @@ unsigned SafepointTableBuilder::GetCodeOffset() const {
void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
- // For lazy deoptimization we need space to patch a call after every call.
- // Ensure there is always space for such patching, even if the code ends
- // in a call.
- int target_offset = assembler->pc_offset() + Deoptimizer::patch_size();
- while (assembler->pc_offset() < target_offset) {
- assembler->nop();
- }
-
// Make sure the safepoint table is properly aligned. Pad with nops.
assembler->Align(kIntSize);
assembler->RecordComment(";;; Safepoint table.");
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index 307d948bf..fc8bf7a41 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -151,7 +151,7 @@ class SafepointTable BASE_EMBEDDED {
static void PrintBits(uint8_t byte, int digits);
- AssertNoAllocation no_allocation_;
+ DisallowHeapAllocation no_allocation_;
Code* code_;
unsigned length_;
unsigned entry_size_;
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
new file mode 100644
index 000000000..684ef486c
--- /dev/null
+++ b/deps/v8/src/sampler.cc
@@ -0,0 +1,687 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "sampler.h"
+
+#if V8_OS_POSIX && !V8_OS_CYGWIN
+
+#define USE_SIGNALS
+
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <sys/syscall.h>
+
+#if V8_OS_MACOSX
+#include <mach/mach.h>
+// OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
+// and is a typedef for struct sigcontext. There is no uc_mcontext.
+#elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) \
+ && !V8_OS_OPENBSD
+#include <ucontext.h>
+#endif
+#include <unistd.h>
+
+// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
+// Old versions of the C library <signal.h> didn't define the type.
+#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
+ defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+#include <asm/sigcontext.h>
+#endif
+
+#elif V8_OS_WIN || V8_OS_CYGWIN
+
+#include "win32-headers.h"
+
+#endif
+
+#include "v8.h"
+
+#include "cpu-profiler-inl.h"
+#include "flags.h"
+#include "frames-inl.h"
+#include "log.h"
+#include "platform.h"
+#include "simulator.h"
+#include "v8threads.h"
+#include "vm-state-inl.h"
+
+
+#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
+
+// Not all versions of Android's C library provide ucontext_t.
+// Detect this and provide custom but compatible definitions. Note that these
+// follow the GLibc naming convention to access register values from
+// mcontext_t.
+//
+// See http://code.google.com/p/android/issues/detail?id=34784
+
+#if defined(__arm__)
+
+typedef struct sigcontext mcontext_t;
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
+#elif defined(__mips__)
+// MIPS version of sigcontext, for Android bionic.
+typedef struct {
+ uint32_t regmask;
+ uint32_t status;
+ uint64_t pc;
+ uint64_t gregs[32];
+ uint64_t fpregs[32];
+ uint32_t acx;
+ uint32_t fpc_csr;
+ uint32_t fpc_eir;
+ uint32_t used_math;
+ uint32_t dsp;
+ uint64_t mdhi;
+ uint64_t mdlo;
+ uint32_t hi1;
+ uint32_t lo1;
+ uint32_t hi2;
+ uint32_t lo2;
+ uint32_t hi3;
+ uint32_t lo3;
+} mcontext_t;
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
+#elif defined(__i386__)
+// x86 version for Android.
+typedef struct {
+ uint32_t gregs[19];
+ void* fpregs;
+ uint32_t oldmask;
+ uint32_t cr2;
+} mcontext_t;
+
+typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
+#endif
+
+#endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
+
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class PlatformDataCommon : public Malloced {
+ public:
+ PlatformDataCommon() : profiled_thread_id_(ThreadId::Current()) {}
+ ThreadId profiled_thread_id() { return profiled_thread_id_; }
+
+ protected:
+ ~PlatformDataCommon() {}
+
+ private:
+ ThreadId profiled_thread_id_;
+};
+
+} // namespace
+
+#if defined(USE_SIGNALS)
+
+class Sampler::PlatformData : public PlatformDataCommon {
+ public:
+ PlatformData() : vm_tid_(pthread_self()) {}
+ pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+ pthread_t vm_tid_;
+};
+
+#elif V8_OS_WIN || V8_OS_CYGWIN
+
+// ----------------------------------------------------------------------------
+// Win32 profiler support. On Cygwin we use the same sampler implementation as
+// on Win32.
+
+class Sampler::PlatformData : public PlatformDataCommon {
+ public:
+ // Get a handle to the calling thread. This is the thread that we are
+ // going to profile. We need to make a copy of the handle because we are
+ // going to use it in the sampler thread. Using GetThreadHandle() will
+ // not work in this case. We're using OpenThread because DuplicateHandle
+ // for some reason doesn't work in Chrome's sandbox.
+ PlatformData()
+ : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
+ THREAD_SUSPEND_RESUME |
+ THREAD_QUERY_INFORMATION,
+ false,
+ GetCurrentThreadId())) {}
+
+ ~PlatformData() {
+ if (profiled_thread_ != NULL) {
+ CloseHandle(profiled_thread_);
+ profiled_thread_ = NULL;
+ }
+ }
+
+ HANDLE profiled_thread() { return profiled_thread_; }
+
+ private:
+ HANDLE profiled_thread_;
+};
+#endif
+
+
+#if defined(USE_SIMULATOR)
+class SimulatorHelper {
+ public:
+ inline bool Init(Sampler* sampler, Isolate* isolate) {
+ simulator_ = isolate->thread_local_top()->simulator_;
+ // Check if there is active simulator.
+ return simulator_ != NULL;
+ }
+
+ inline void FillRegisters(RegisterState* state) {
+ state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ state->sp = reinterpret_cast<Address>(simulator_->get_register(
+ Simulator::sp));
+#if V8_TARGET_ARCH_ARM
+ state->fp = reinterpret_cast<Address>(simulator_->get_register(
+ Simulator::r11));
+#elif V8_TARGET_ARCH_MIPS
+ state->fp = reinterpret_cast<Address>(simulator_->get_register(
+ Simulator::fp));
+#endif
+ }
+
+ private:
+ Simulator* simulator_;
+};
+#endif // USE_SIMULATOR
+
+
+#if defined(USE_SIGNALS)
+
+class SignalHandler : public AllStatic {
+ public:
+ static void SetUp() { if (!mutex_) mutex_ = new Mutex(); }
+ static void TearDown() { delete mutex_; }
+
+ static void IncreaseSamplerCount() {
+ LockGuard<Mutex> lock_guard(mutex_);
+ if (++client_count_ == 1) Install();
+ }
+
+ static void DecreaseSamplerCount() {
+ LockGuard<Mutex> lock_guard(mutex_);
+ if (--client_count_ == 0) Restore();
+ }
+
+ static bool Installed() {
+ return signal_handler_installed_;
+ }
+
+ private:
+ static void Install() {
+ struct sigaction sa;
+ sa.sa_sigaction = &HandleProfilerSignal;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+ }
+
+ static void Restore() {
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
+
+ static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static int client_count_;
+ static bool signal_handler_installed_;
+ static struct sigaction old_signal_handler_;
+};
+
+
+Mutex* SignalHandler::mutex_ = NULL;
+int SignalHandler::client_count_ = 0;
+struct sigaction SignalHandler::old_signal_handler_;
+bool SignalHandler::signal_handler_installed_ = false;
+
+
+void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
+ void* context) {
+#if V8_OS_NACL
+ // As Native Client does not support signal handling, profiling
+ // is disabled.
+ return;
+#else
+ USE(info);
+ if (signal != SIGPROF) return;
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
+ // We require a fully initialized and entered isolate.
+ return;
+ }
+ if (v8::Locker::IsActive() &&
+ !isolate->thread_manager()->IsLockedByCurrentThread()) {
+ return;
+ }
+
+ Sampler* sampler = isolate->logger()->sampler();
+ if (sampler == NULL) return;
+
+ RegisterState state;
+
+#if defined(USE_SIMULATOR)
+ SimulatorHelper helper;
+ if (!helper.Init(sampler, isolate)) return;
+ helper.FillRegisters(&state);
+#else
+ // Extracting the sample from the context is extremely machine dependent.
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+#if !V8_OS_OPENBSD
+ mcontext_t& mcontext = ucontext->uc_mcontext;
+#endif
+#if V8_OS_LINUX
+#if V8_HOST_ARCH_IA32
+ state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
+ state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
+ state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
+#elif V8_HOST_ARCH_X64
+ state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
+ state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
+ state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
+#elif V8_HOST_ARCH_ARM
+#if defined(__GLIBC__) && !defined(__UCLIBC__) && \
+ (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+ // Old GLibc ARM versions used a gregs[] array to access the register
+ // values from mcontext_t.
+ state.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
+ state.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
+ state.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
+#else
+ state.pc = reinterpret_cast<Address>(mcontext.arm_pc);
+ state.sp = reinterpret_cast<Address>(mcontext.arm_sp);
+ state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
+#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
+ // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+#elif V8_HOST_ARCH_MIPS
+ state.pc = reinterpret_cast<Address>(mcontext.pc);
+ state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
+ state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
+#endif // V8_HOST_ARCH_*
+#elif V8_OS_MACOSX
+#if V8_HOST_ARCH_X64
+#if __DARWIN_UNIX03
+ state.pc = reinterpret_cast<Address>(mcontext->__ss.__rip);
+ state.sp = reinterpret_cast<Address>(mcontext->__ss.__rsp);
+ state.fp = reinterpret_cast<Address>(mcontext->__ss.__rbp);
+#else // !__DARWIN_UNIX03
+ state.pc = reinterpret_cast<Address>(mcontext->ss.rip);
+ state.sp = reinterpret_cast<Address>(mcontext->ss.rsp);
+ state.fp = reinterpret_cast<Address>(mcontext->ss.rbp);
+#endif // __DARWIN_UNIX03
+#elif V8_HOST_ARCH_IA32
+#if __DARWIN_UNIX03
+ state.pc = reinterpret_cast<Address>(mcontext->__ss.__eip);
+ state.sp = reinterpret_cast<Address>(mcontext->__ss.__esp);
+ state.fp = reinterpret_cast<Address>(mcontext->__ss.__ebp);
+#else // !__DARWIN_UNIX03
+ state.pc = reinterpret_cast<Address>(mcontext->ss.eip);
+ state.sp = reinterpret_cast<Address>(mcontext->ss.esp);
+ state.fp = reinterpret_cast<Address>(mcontext->ss.ebp);
+#endif // __DARWIN_UNIX03
+#endif // V8_HOST_ARCH_IA32
+#elif V8_OS_FREEBSD
+#if V8_HOST_ARCH_IA32
+ state.pc = reinterpret_cast<Address>(mcontext.mc_eip);
+ state.sp = reinterpret_cast<Address>(mcontext.mc_esp);
+ state.fp = reinterpret_cast<Address>(mcontext.mc_ebp);
+#elif V8_HOST_ARCH_X64
+ state.pc = reinterpret_cast<Address>(mcontext.mc_rip);
+ state.sp = reinterpret_cast<Address>(mcontext.mc_rsp);
+ state.fp = reinterpret_cast<Address>(mcontext.mc_rbp);
+#elif V8_HOST_ARCH_ARM
+ state.pc = reinterpret_cast<Address>(mcontext.mc_r15);
+ state.sp = reinterpret_cast<Address>(mcontext.mc_r13);
+ state.fp = reinterpret_cast<Address>(mcontext.mc_r11);
+#endif // V8_HOST_ARCH_*
+#elif V8_OS_NETBSD
+#if V8_HOST_ARCH_IA32
+ state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
+ state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
+ state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
+#elif V8_HOST_ARCH_X64
+ state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
+ state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
+ state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
+#endif // V8_HOST_ARCH_*
+#elif V8_OS_OPENBSD
+#if V8_HOST_ARCH_IA32
+ state.pc = reinterpret_cast<Address>(ucontext->sc_eip);
+ state.sp = reinterpret_cast<Address>(ucontext->sc_esp);
+ state.fp = reinterpret_cast<Address>(ucontext->sc_ebp);
+#elif V8_HOST_ARCH_X64
+ state.pc = reinterpret_cast<Address>(ucontext->sc_rip);
+ state.sp = reinterpret_cast<Address>(ucontext->sc_rsp);
+ state.fp = reinterpret_cast<Address>(ucontext->sc_rbp);
+#endif // V8_HOST_ARCH_*
+#elif V8_OS_SOLARIS
+ state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
+ state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
+ state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
+#endif // V8_OS_SOLARIS
+#endif // USE_SIMULATOR
+ sampler->SampleStack(state);
+#endif // V8_OS_NACL
+}
+
+#endif
+
+
+class SamplerThread : public Thread {
+ public:
+ static const int kSamplerThreadStackSize = 64 * KB;
+
+ explicit SamplerThread(int interval)
+ : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
+ interval_(interval) {}
+
+ static void SetUp() { if (!mutex_) mutex_ = new Mutex(); }
+ static void TearDown() { delete mutex_; mutex_ = NULL; }
+
+ static void AddActiveSampler(Sampler* sampler) {
+ bool need_to_start = false;
+ LockGuard<Mutex> lock_guard(mutex_);
+ if (instance_ == NULL) {
+ // Start a thread that will send SIGPROF signal to VM threads,
+ // when CPU profiling will be enabled.
+ instance_ = new SamplerThread(sampler->interval());
+ need_to_start = true;
+ }
+
+ ASSERT(sampler->IsActive());
+ ASSERT(!instance_->active_samplers_.Contains(sampler));
+ ASSERT(instance_->interval_ == sampler->interval());
+ instance_->active_samplers_.Add(sampler);
+
+ if (need_to_start) instance_->StartSynchronously();
+ }
+
+ static void RemoveActiveSampler(Sampler* sampler) {
+ SamplerThread* instance_to_remove = NULL;
+ {
+ LockGuard<Mutex> lock_guard(mutex_);
+
+ ASSERT(sampler->IsActive());
+ bool removed = instance_->active_samplers_.RemoveElement(sampler);
+ ASSERT(removed);
+ USE(removed);
+
+ // We cannot delete the instance immediately as we need to Join() the
+ // thread but we are holding mutex_ and the thread may try to acquire it.
+ if (instance_->active_samplers_.is_empty()) {
+ instance_to_remove = instance_;
+ instance_ = NULL;
+ }
+ }
+
+ if (!instance_to_remove) return;
+ instance_to_remove->Join();
+ delete instance_to_remove;
+ }
+
+ // Implement Thread::Run().
+ virtual void Run() {
+ while (true) {
+ {
+ LockGuard<Mutex> lock_guard(mutex_);
+ if (active_samplers_.is_empty()) break;
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ for (int i = 0; i < active_samplers_.length(); ++i) {
+ Sampler* sampler = active_samplers_.at(i);
+ if (!sampler->isolate()->IsInitialized()) continue;
+ if (!sampler->IsProfiling()) continue;
+ sampler->DoSample();
+ }
+ }
+ OS::Sleep(interval_);
+ }
+ }
+
+ private:
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SamplerThread* instance_;
+
+ const int interval_;
+ List<Sampler*> active_samplers_;
+
+ DISALLOW_COPY_AND_ASSIGN(SamplerThread);
+};
+
+
+Mutex* SamplerThread::mutex_ = NULL;
+SamplerThread* SamplerThread::instance_ = NULL;
+
+
+//
+// StackTracer implementation
+//
+DISABLE_ASAN void TickSample::Init(Isolate* isolate,
+ const RegisterState& regs) {
+ ASSERT(isolate->IsInitialized());
+ pc = regs.pc;
+ state = isolate->current_vm_state();
+
+ // Avoid collecting traces while doing GC.
+ if (state == GC) return;
+
+ Address js_entry_sp = isolate->js_entry_sp();
+ if (js_entry_sp == 0) {
+ // Not executing JS now.
+ return;
+ }
+
+ ExternalCallbackScope* scope = isolate->external_callback_scope();
+ Address handler = Isolate::handler(isolate->thread_local_top());
+ // If there is a handler on top of the external callback scope then
+ // we have already entrered JavaScript again and the external callback
+ // is not the top function.
+ if (scope && scope->scope_address() < handler) {
+ external_callback = scope->callback();
+ has_external_callback = true;
+ } else {
+ // Sample potential return address value for frameless invocation of
+ // stubs (we'll figure out later, if this value makes sense).
+ tos = Memory::Address_at(regs.sp);
+ has_external_callback = false;
+ }
+
+ SafeStackFrameIterator it(isolate, regs.fp, regs.sp, js_entry_sp);
+ top_frame_type = it.top_frame_type();
+ int i = 0;
+ while (!it.done() && i < TickSample::kMaxFramesCount) {
+ stack[i++] = it.frame()->pc();
+ it.Advance();
+ }
+ frames_count = i;
+}
+
+
+void Sampler::SetUp() {
+#if defined(USE_SIGNALS)
+ SignalHandler::SetUp();
+#endif
+ SamplerThread::SetUp();
+}
+
+
+void Sampler::TearDown() {
+ SamplerThread::TearDown();
+#if defined(USE_SIGNALS)
+ SignalHandler::TearDown();
+#endif
+}
+
+
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
+ profiling_(false),
+ has_processing_thread_(false),
+ active_(false),
+ is_counting_samples_(false),
+ js_and_external_sample_count_(0) {
+ data_ = new PlatformData;
+}
+
+
+Sampler::~Sampler() {
+ ASSERT(!IsActive());
+ delete data_;
+}
+
+
+void Sampler::Start() {
+ ASSERT(!IsActive());
+ SetActive(true);
+ SamplerThread::AddActiveSampler(this);
+}
+
+
+void Sampler::Stop() {
+ ASSERT(IsActive());
+ SamplerThread::RemoveActiveSampler(this);
+ SetActive(false);
+}
+
+
+void Sampler::IncreaseProfilingDepth() {
+ NoBarrier_AtomicIncrement(&profiling_, 1);
+#if defined(USE_SIGNALS)
+ SignalHandler::IncreaseSamplerCount();
+#endif
+}
+
+
+void Sampler::DecreaseProfilingDepth() {
+#if defined(USE_SIGNALS)
+ SignalHandler::DecreaseSamplerCount();
+#endif
+ NoBarrier_AtomicIncrement(&profiling_, -1);
+}
+
+
+void Sampler::SampleStack(const RegisterState& state) {
+ TickSample* sample = isolate_->cpu_profiler()->StartTickSample();
+ TickSample sample_obj;
+ if (sample == NULL) sample = &sample_obj;
+ sample->Init(isolate_, state);
+ if (is_counting_samples_) {
+ if (sample->state == JS || sample->state == EXTERNAL) {
+ ++js_and_external_sample_count_;
+ }
+ }
+ Tick(sample);
+ if (sample != &sample_obj) {
+ isolate_->cpu_profiler()->FinishTickSample();
+ }
+}
+
+
+#if defined(USE_SIGNALS)
+
+void Sampler::DoSample() {
+ if (!SignalHandler::Installed()) return;
+ pthread_kill(platform_data()->vm_tid(), SIGPROF);
+}
+
+#elif V8_OS_WIN || V8_OS_CYGWIN
+
+void Sampler::DoSample() {
+ HANDLE profiled_thread = platform_data()->profiled_thread();
+ if (profiled_thread == NULL) return;
+
+#if defined(USE_SIMULATOR)
+ SimulatorHelper helper;
+ if (!helper.Init(this, isolate())) return;
+#endif
+
+ const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+ if (SuspendThread(profiled_thread) == kSuspendFailed) return;
+
+ // Context used for sampling the register state of the profiled thread.
+ CONTEXT context;
+ memset(&context, 0, sizeof(context));
+ context.ContextFlags = CONTEXT_FULL;
+ if (GetThreadContext(profiled_thread, &context) != 0) {
+ RegisterState state;
+#if defined(USE_SIMULATOR)
+ helper.FillRegisters(&state);
+#else
+#if V8_HOST_ARCH_X64
+ state.pc = reinterpret_cast<Address>(context.Rip);
+ state.sp = reinterpret_cast<Address>(context.Rsp);
+ state.fp = reinterpret_cast<Address>(context.Rbp);
+#else
+ state.pc = reinterpret_cast<Address>(context.Eip);
+ state.sp = reinterpret_cast<Address>(context.Esp);
+ state.fp = reinterpret_cast<Address>(context.Ebp);
+#endif
+#endif // USE_SIMULATOR
+ SampleStack(state);
+ }
+ ResumeThread(profiled_thread);
+}
+
+#endif // USE_SIGNALS
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/sampler.h b/deps/v8/src/sampler.h
new file mode 100644
index 000000000..b17a2ed8d
--- /dev/null
+++ b/deps/v8/src/sampler.h
@@ -0,0 +1,150 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SAMPLER_H_
+#define V8_SAMPLER_H_
+
+#include "atomicops.h"
+#include "frames.h"
+#include "v8globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+// ----------------------------------------------------------------------------
+// Sampler
+//
+// A sampler periodically samples the state of the VM and optionally
+// (if used for profiling) the program counter and stack pointer for
+// the thread that created it.
+
+struct RegisterState {
+ RegisterState() : pc(NULL), sp(NULL), fp(NULL) {}
+ Address pc; // Instruction pointer.
+ Address sp; // Stack pointer.
+ Address fp; // Frame pointer.
+};
+
+// TickSample captures the information collected for each sample.
+struct TickSample {
+ TickSample()
+ : state(OTHER),
+ pc(NULL),
+ external_callback(NULL),
+ frames_count(0),
+ has_external_callback(false),
+ top_frame_type(StackFrame::NONE) {}
+ void Init(Isolate* isolate, const RegisterState& state);
+ StateTag state; // The state of the VM.
+ Address pc; // Instruction pointer.
+ union {
+ Address tos; // Top stack value (*sp).
+ Address external_callback;
+ };
+ static const int kMaxFramesCount = 64;
+ Address stack[kMaxFramesCount]; // Call stack.
+ int frames_count : 8; // Number of captured frames.
+ bool has_external_callback : 1;
+ StackFrame::Type top_frame_type : 4;
+};
+
+class Sampler {
+ public:
+ // Initializes the Sampler support. Called once at VM startup.
+ static void SetUp();
+ static void TearDown();
+
+ // Initialize sampler.
+ Sampler(Isolate* isolate, int interval);
+ virtual ~Sampler();
+
+ Isolate* isolate() const { return isolate_; }
+ int interval() const { return interval_; }
+
+ // Performs stack sampling.
+ void SampleStack(const RegisterState& regs);
+
+ // Start and stop sampler.
+ void Start();
+ void Stop();
+
+ // Whether the sampling thread should use this Sampler for CPU profiling?
+ bool IsProfiling() const {
+ return NoBarrier_Load(&profiling_) > 0 &&
+ !NoBarrier_Load(&has_processing_thread_);
+ }
+ void IncreaseProfilingDepth();
+ void DecreaseProfilingDepth();
+
+ // Whether the sampler is running (that is, consumes resources).
+ bool IsActive() const { return NoBarrier_Load(&active_); }
+
+ void DoSample();
+ // If true next sample must be initiated on the profiler event processor
+ // thread right after latest sample is processed.
+ void SetHasProcessingThread(bool value) {
+ NoBarrier_Store(&has_processing_thread_, value);
+ }
+
+ // Used in tests to make sure that stack sampling is performed.
+ unsigned js_and_external_sample_count() const {
+ return js_and_external_sample_count_;
+ }
+ void StartCountingSamples() {
+ is_counting_samples_ = true;
+ js_and_external_sample_count_ = 0;
+ }
+
+ class PlatformData;
+ PlatformData* platform_data() const { return data_; }
+
+ protected:
+ // This method is called for each sampling period with the current
+ // program counter.
+ virtual void Tick(TickSample* sample) = 0;
+
+ private:
+ void SetActive(bool value) { NoBarrier_Store(&active_, value); }
+
+ Isolate* isolate_;
+ const int interval_;
+ Atomic32 profiling_;
+ Atomic32 has_processing_thread_;
+ Atomic32 active_;
+ PlatformData* data_; // Platform specific data.
+ bool is_counting_samples_;
+ // Counts stack samples taken in JS VM state.
+ unsigned js_and_external_sample_count_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_SAMPLER_H_
diff --git a/deps/v8/src/scanner-character-streams.cc b/deps/v8/src/scanner-character-streams.cc
index 56b9f03aa..fb503459f 100644
--- a/deps/v8/src/scanner-character-streams.cc
+++ b/deps/v8/src/scanner-character-streams.cc
@@ -46,6 +46,7 @@ BufferedUtf16CharacterStream::BufferedUtf16CharacterStream()
buffer_end_ = buffer_;
}
+
BufferedUtf16CharacterStream::~BufferedUtf16CharacterStream() { }
void BufferedUtf16CharacterStream::PushBack(uc32 character) {
@@ -113,6 +114,7 @@ unsigned BufferedUtf16CharacterStream::SlowSeekForward(unsigned delta) {
return BufferSeekForward(delta);
}
+
// ----------------------------------------------------------------------------
// GenericStringUtf16CharacterStream
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index bd2db5818..26f840b23 100755..100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -27,10 +27,14 @@
// Features shared by parsing and pre-parsing scanners.
+#include <cmath>
+
#include "scanner.h"
#include "../include/v8stdint.h"
#include "char-predicates-inl.h"
+#include "conversions-inl.h"
+#include "list-inl.h"
namespace v8 {
namespace internal {
@@ -42,7 +46,8 @@ Scanner::Scanner(UnicodeCache* unicode_cache)
: unicode_cache_(unicode_cache),
octal_pos_(Location::invalid()),
harmony_scoping_(false),
- harmony_modules_(false) { }
+ harmony_modules_(false),
+ harmony_numeric_literals_(false) { }
void Scanner::Initialize(Utf16CharacterStream* source) {
@@ -719,7 +724,7 @@ void Scanner::ScanDecimalDigits() {
Token::Value Scanner::ScanNumber(bool seen_period) {
ASSERT(IsDecimalDigit(c0_)); // the first digit of the number or the fraction
- enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
+ enum { DECIMAL, HEX, OCTAL, IMPLICIT_OCTAL, BINARY } kind = DECIMAL;
LiteralScope literal(this);
if (seen_period) {
@@ -733,7 +738,8 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
int start_pos = source_pos(); // For reporting octal positions.
AddLiteralCharAdvance();
- // either 0, 0exxx, 0Exxx, 0.xxx, an octal number, or a hex number
+ // either 0, 0exxx, 0Exxx, 0.xxx, a hex number, a binary number or
+ // an octal number.
if (c0_ == 'x' || c0_ == 'X') {
// hex number
kind = HEX;
@@ -745,9 +751,29 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
while (IsHexDigit(c0_)) {
AddLiteralCharAdvance();
}
+ } else if (harmony_numeric_literals_ && (c0_ == 'o' || c0_ == 'O')) {
+ kind = OCTAL;
+ AddLiteralCharAdvance();
+ if (!IsOctalDigit(c0_)) {
+ // we must have at least one octal digit after 'o'/'O'
+ return Token::ILLEGAL;
+ }
+ while (IsOctalDigit(c0_)) {
+ AddLiteralCharAdvance();
+ }
+ } else if (harmony_numeric_literals_ && (c0_ == 'b' || c0_ == 'B')) {
+ kind = BINARY;
+ AddLiteralCharAdvance();
+ if (!IsBinaryDigit(c0_)) {
+ // we must have at least one binary digit after 'b'/'B'
+ return Token::ILLEGAL;
+ }
+ while (IsBinaryDigit(c0_)) {
+ AddLiteralCharAdvance();
+ }
} else if ('0' <= c0_ && c0_ <= '7') {
// (possible) octal number
- kind = OCTAL;
+ kind = IMPLICIT_OCTAL;
while (true) {
if (c0_ == '8' || c0_ == '9') {
kind = DECIMAL;
@@ -776,7 +802,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// scan exponent, if any
if (c0_ == 'e' || c0_ == 'E') {
ASSERT(kind != HEX); // 'e'/'E' must be scanned as part of the hex number
- if (kind == OCTAL) return Token::ILLEGAL; // no exponent for octals allowed
+ if (kind != DECIMAL) return Token::ILLEGAL;
// scan exponent
AddLiteralCharAdvance();
if (c0_ == '+' || c0_ == '-')
@@ -877,7 +903,7 @@ uc32 Scanner::ScanIdentifierUnicodeEscape() {
KEYWORD("while", Token::WHILE) \
KEYWORD("with", Token::WITH) \
KEYWORD_GROUP('y') \
- KEYWORD("yield", Token::FUTURE_STRICT_RESERVED_WORD)
+ KEYWORD("yield", Token::YIELD)
static Token::Value KeywordOrIdentifierToken(const char* input,
@@ -1086,4 +1112,140 @@ bool Scanner::ScanRegExpFlags() {
return true;
}
+
+int DuplicateFinder::AddAsciiSymbol(Vector<const char> key, int value) {
+ return AddSymbol(Vector<const byte>::cast(key), true, value);
+}
+
+
+int DuplicateFinder::AddUtf16Symbol(Vector<const uint16_t> key, int value) {
+ return AddSymbol(Vector<const byte>::cast(key), false, value);
+}
+
+
+int DuplicateFinder::AddSymbol(Vector<const byte> key,
+ bool is_ascii,
+ int value) {
+ uint32_t hash = Hash(key, is_ascii);
+ byte* encoding = BackupKey(key, is_ascii);
+ HashMap::Entry* entry = map_.Lookup(encoding, hash, true);
+ int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ entry->value =
+ reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
+ return old_value;
+}
+
+
+int DuplicateFinder::AddNumber(Vector<const char> key, int value) {
+ ASSERT(key.length() > 0);
+ // Quick check for already being in canonical form.
+ if (IsNumberCanonical(key)) {
+ return AddAsciiSymbol(key, value);
+ }
+
+ int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY;
+ double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
+ int length;
+ const char* string;
+ if (!std::isfinite(double_value)) {
+ string = "Infinity";
+ length = 8; // strlen("Infinity");
+ } else {
+ string = DoubleToCString(double_value,
+ Vector<char>(number_buffer_, kBufferSize));
+ length = StrLength(string);
+ }
+ return AddSymbol(Vector<const byte>(reinterpret_cast<const byte*>(string),
+ length), true, value);
+}
+
+
+bool DuplicateFinder::IsNumberCanonical(Vector<const char> number) {
+ // Test for a safe approximation of number literals that are already
+ // in canonical form: max 15 digits, no leading zeroes, except an
+ // integer part that is a single zero, and no trailing zeros below
+ // the decimal point.
+ int pos = 0;
+ int length = number.length();
+ if (number.length() > 15) return false;
+ if (number[pos] == '0') {
+ pos++;
+ } else {
+ while (pos < length &&
+ static_cast<unsigned>(number[pos] - '0') <= ('9' - '0')) pos++;
+ }
+ if (length == pos) return true;
+ if (number[pos] != '.') return false;
+ pos++;
+ bool invalid_last_digit = true;
+ while (pos < length) {
+ byte digit = number[pos] - '0';
+ if (digit > '9' - '0') return false;
+ invalid_last_digit = (digit == 0);
+ pos++;
+ }
+ return !invalid_last_digit;
+}
+
+
+uint32_t DuplicateFinder::Hash(Vector<const byte> key, bool is_ascii) {
+ // Primitive hash function, almost identical to the one used
+ // for strings (except that it's seeded by the length and ASCII-ness).
+ int length = key.length();
+ uint32_t hash = (length << 1) | (is_ascii ? 1 : 0) ;
+ for (int i = 0; i < length; i++) {
+ uint32_t c = key[i];
+ hash = (hash + c) * 1025;
+ hash ^= (hash >> 6);
+ }
+ return hash;
+}
+
+
+bool DuplicateFinder::Match(void* first, void* second) {
+ // Decode lengths.
+ // Length + ASCII-bit is encoded as base 128, most significant heptet first,
+ // with a 8th bit being non-zero while there are more heptets.
+ // The value encodes the number of bytes following, and whether the original
+ // was ASCII.
+ byte* s1 = reinterpret_cast<byte*>(first);
+ byte* s2 = reinterpret_cast<byte*>(second);
+ uint32_t length_ascii_field = 0;
+ byte c1;
+ do {
+ c1 = *s1;
+ if (c1 != *s2) return false;
+ length_ascii_field = (length_ascii_field << 7) | (c1 & 0x7f);
+ s1++;
+ s2++;
+ } while ((c1 & 0x80) != 0);
+ int length = static_cast<int>(length_ascii_field >> 1);
+ return memcmp(s1, s2, length) == 0;
+}
+
+
+byte* DuplicateFinder::BackupKey(Vector<const byte> bytes,
+ bool is_ascii) {
+ uint32_t ascii_length = (bytes.length() << 1) | (is_ascii ? 1 : 0);
+ backing_store_.StartSequence();
+ // Emit ascii_length as base-128 encoded number, with the 7th bit set
+ // on the byte of every heptet except the last, least significant, one.
+ if (ascii_length >= (1 << 7)) {
+ if (ascii_length >= (1 << 14)) {
+ if (ascii_length >= (1 << 21)) {
+ if (ascii_length >= (1 << 28)) {
+ backing_store_.Add(static_cast<byte>((ascii_length >> 28) | 0x80));
+ }
+ backing_store_.Add(static_cast<byte>((ascii_length >> 21) | 0x80u));
+ }
+ backing_store_.Add(static_cast<byte>((ascii_length >> 14) | 0x80u));
+ }
+ backing_store_.Add(static_cast<byte>((ascii_length >> 7) | 0x80u));
+ }
+ backing_store_.Add(static_cast<byte>(ascii_length & 0x7f));
+
+ backing_store_.AddBlock(bytes);
+ return backing_store_.EndSequence().start();
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index 4de413b88..3cefc833a 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -34,6 +34,8 @@
#include "char-predicates.h"
#include "checks.h"
#include "globals.h"
+#include "hashmap.h"
+#include "list.h"
#include "token.h"
#include "unicode-inl.h"
#include "utils.h"
@@ -42,25 +44,6 @@ namespace v8 {
namespace internal {
-// General collection of (multi-)bit-flags that can be passed to scanners and
-// parsers to signify their (initial) mode of operation.
-enum ParsingFlags {
- kNoParsingFlags = 0,
- // Embed LanguageMode values in parsing flags, i.e., equivalent to:
- // CLASSIC_MODE = 0,
- // STRICT_MODE,
- // EXTENDED_MODE,
- kLanguageModeMask = 0x03,
- kAllowLazy = 0x04,
- kAllowNativesSyntax = 0x08,
- kAllowModules = 0x10
-};
-
-STATIC_ASSERT((kLanguageModeMask & CLASSIC_MODE) == CLASSIC_MODE);
-STATIC_ASSERT((kLanguageModeMask & STRICT_MODE) == STRICT_MODE);
-STATIC_ASSERT((kLanguageModeMask & EXTENDED_MODE) == EXTENDED_MODE);
-
-
// Returns the value (0 .. 15) of a hexadecimal character c.
// If c is not a legal hexadecimal character, returns a value < 0.
inline int HexValue(uc32 c) {
@@ -140,12 +123,13 @@ class Utf16CharacterStream {
};
-class UnicodeCache {
// ---------------------------------------------------------------------
// Caching predicates used by scanners.
+
+class UnicodeCache {
public:
UnicodeCache() {}
- typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
+ typedef unibrow::Utf8Decoder<512> Utf8Decoder;
StaticResource<Utf8Decoder>* utf8_decoder() {
return &utf8_decoder_;
@@ -167,6 +151,56 @@ class UnicodeCache {
};
+// ---------------------------------------------------------------------
+// DuplicateFinder discovers duplicate symbols.
+
+class DuplicateFinder {
+ public:
+ explicit DuplicateFinder(UnicodeCache* constants)
+ : unicode_constants_(constants),
+ backing_store_(16),
+ map_(&Match) { }
+
+ int AddAsciiSymbol(Vector<const char> key, int value);
+ int AddUtf16Symbol(Vector<const uint16_t> key, int value);
+ // Add a a number literal by converting it (if necessary)
+ // to the string that ToString(ToNumber(literal)) would generate.
+ // and then adding that string with AddAsciiSymbol.
+ // This string is the actual value used as key in an object literal,
+ // and the one that must be different from the other keys.
+ int AddNumber(Vector<const char> key, int value);
+
+ private:
+ int AddSymbol(Vector<const byte> key, bool is_ascii, int value);
+ // Backs up the key and its length in the backing store.
+ // The backup is stored with a base 127 encoding of the
+ // length (plus a bit saying whether the string is ASCII),
+ // followed by the bytes of the key.
+ byte* BackupKey(Vector<const byte> key, bool is_ascii);
+
+ // Compare two encoded keys (both pointing into the backing store)
+ // for having the same base-127 encoded lengths and ASCII-ness,
+ // and then having the same 'length' bytes following.
+ static bool Match(void* first, void* second);
+ // Creates a hash from a sequence of bytes.
+ static uint32_t Hash(Vector<const byte> key, bool is_ascii);
+ // Checks whether a string containing a JS number is its canonical
+ // form.
+ static bool IsNumberCanonical(Vector<const char> key);
+
+ // Size of buffer. Sufficient for using it to call DoubleToCString in
+ // from conversions.h.
+ static const int kBufferSize = 100;
+
+ UnicodeCache* unicode_constants_;
+ // Backing store used to store strings used as hashmap keys.
+ SequenceCollector<unsigned char> backing_store_;
+ HashMap map_;
+ // Buffer used for string->number->canonical string conversions.
+ char number_buffer_[kBufferSize];
+};
+
+
// ----------------------------------------------------------------------------
// LiteralBuffer - Collector of chars of literals.
@@ -183,9 +217,9 @@ class LiteralBuffer {
INLINE(void AddChar(uint32_t code_unit)) {
if (position_ >= backing_store_.length()) ExpandBuffer();
if (is_ascii_) {
- if (code_unit < kMaxAsciiCharCodeU) {
+ if (code_unit <= unibrow::Latin1::kMaxChar) {
backing_store_[position_] = static_cast<byte>(code_unit);
- position_ += kASCIISize;
+ position_ += kOneByteSize;
return;
}
ConvertToUtf16();
@@ -197,6 +231,11 @@ class LiteralBuffer {
bool is_ascii() { return is_ascii_; }
+ bool is_contextual_keyword(Vector<const char> keyword) {
+ return is_ascii() && keyword.length() == position_ &&
+ (memcmp(keyword.start(), backing_store_.start(), position_) == 0);
+ }
+
Vector<const uc16> utf16_literal() {
ASSERT(!is_ascii_);
ASSERT((position_ & 0x1) == 0);
@@ -234,7 +273,7 @@ class LiteralBuffer {
void ExpandBuffer() {
Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
- memcpy(new_store.start(), backing_store_.start(), position_);
+ OS::MemCopy(new_store.start(), backing_store_.start(), position_);
backing_store_.Dispose();
backing_store_ = new_store;
}
@@ -250,7 +289,7 @@ class LiteralBuffer {
} else {
new_store = backing_store_;
}
- char* src = reinterpret_cast<char*>(backing_store_.start());
+ uint8_t* src = backing_store_.start();
uc16* dst = reinterpret_cast<uc16*>(new_store.start());
for (int i = position_ - 1; i >= 0; i--) {
dst[i] = src[i];
@@ -315,8 +354,6 @@ class Scanner {
// -1 is outside of the range of any real source code.
static const int kNoOctalLocation = -1;
- typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
-
explicit Scanner(UnicodeCache* scanner_contants);
void Initialize(Utf16CharacterStream* source);
@@ -346,6 +383,10 @@ class Scanner {
ASSERT_NOT_NULL(current_.literal_chars);
return current_.literal_chars->is_ascii();
}
+ bool is_literal_contextual_keyword(Vector<const char> keyword) {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->is_contextual_keyword(keyword);
+ }
int literal_length() const {
ASSERT_NOT_NULL(current_.literal_chars);
return current_.literal_chars->length();
@@ -382,6 +423,10 @@ class Scanner {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->is_ascii();
}
+ bool is_next_contextual_keyword(Vector<const char> keyword) {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->is_contextual_keyword(keyword);
+ }
int next_literal_length() const {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->length();
@@ -416,7 +461,12 @@ class Scanner {
void SetHarmonyModules(bool modules) {
harmony_modules_ = modules;
}
-
+ bool HarmonyNumericLiterals() const {
+ return harmony_numeric_literals_;
+ }
+ void SetHarmonyNumericLiterals(bool numeric_literals) {
+ harmony_numeric_literals_ = numeric_literals;
+ }
// Returns true if there was a line terminator before the peek'ed token,
// possibly inside a multi-line comment.
@@ -432,10 +482,6 @@ class Scanner {
// be empty).
bool ScanRegExpFlags();
- // Tells whether the buffer contains an identifier (no escapes).
- // Used for checking if a property name is an identifier.
- static bool IsIdentifier(unibrow::CharacterStream* buffer);
-
private:
// The current and look-ahead token.
struct TokenDesc {
@@ -569,6 +615,8 @@ class Scanner {
bool harmony_scoping_;
// Whether we scan 'module', 'import', 'export' as keywords.
bool harmony_modules_;
+ // Whether we scan 0o777 and 0b111 as numbers.
+ bool harmony_numeric_literals_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index 02b432398..f1ae876ca 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -74,10 +74,11 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
+ parameter_count + stack_local_count + 2 * context_local_count
+ (has_function_name ? 2 : 0);
- Handle<ScopeInfo> scope_info = FACTORY->NewScopeInfo(length);
+ Factory* factory = zone->isolate()->factory();
+ Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
// Encode the flags.
- int flags = TypeField::encode(scope->type()) |
+ int flags = ScopeTypeField::encode(scope->scope_type()) |
CallsEvalField::encode(scope->calls_eval()) |
LanguageModeField::encode(scope->language_mode()) |
FunctionVariableField::encode(function_name_info) |
@@ -149,14 +150,14 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
}
-ScopeInfo* ScopeInfo::Empty() {
- return reinterpret_cast<ScopeInfo*>(HEAP->empty_fixed_array());
+ScopeInfo* ScopeInfo::Empty(Isolate* isolate) {
+ return reinterpret_cast<ScopeInfo*>(isolate->heap()->empty_fixed_array());
}
-ScopeType ScopeInfo::Type() {
+ScopeType ScopeInfo::scope_type() {
ASSERT(length() > 0);
- return TypeField::decode(Flags());
+ return ScopeTypeField::decode(Flags());
}
@@ -192,9 +193,9 @@ int ScopeInfo::ContextLength() {
FunctionVariableField::decode(Flags()) == CONTEXT;
bool has_context = context_locals > 0 ||
function_name_context_slot ||
- Type() == WITH_SCOPE ||
- (Type() == FUNCTION_SCOPE && CallsEval()) ||
- Type() == MODULE_SCOPE;
+ scope_type() == WITH_SCOPE ||
+ (scope_type() == FUNCTION_SCOPE && CallsEval()) ||
+ scope_type() == MODULE_SCOPE;
if (has_context) {
return Context::MIN_CONTEXT_SLOTS + context_locals +
(function_name_context_slot ? 1 : 0);
@@ -280,7 +281,7 @@ InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
int ScopeInfo::StackSlotIndex(String* name) {
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
if (length() > 0) {
int start = StackLocalEntriesIndex();
int end = StackLocalEntriesIndex() + StackLocalCount();
@@ -297,7 +298,7 @@ int ScopeInfo::StackSlotIndex(String* name) {
int ScopeInfo::ContextSlotIndex(String* name,
VariableMode* mode,
InitializationFlag* init_flag) {
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
ASSERT(mode != NULL);
ASSERT(init_flag != NULL);
if (length() > 0) {
@@ -321,6 +322,7 @@ int ScopeInfo::ContextSlotIndex(String* name,
return result;
}
}
+ // Cache as not found. Mode and init flag don't matter.
context_slot_cache->Update(this, name, INTERNAL, kNeedsInitialization, -1);
}
return -1;
@@ -328,7 +330,7 @@ int ScopeInfo::ContextSlotIndex(String* name,
int ScopeInfo::ParameterIndex(String* name) {
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
if (length() > 0) {
// We must read parameters from the end since for
// multiply declared parameters the value of the
@@ -348,7 +350,7 @@ int ScopeInfo::ParameterIndex(String* name) {
int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
ASSERT(mode != NULL);
if (length() > 0) {
if (FunctionVariableField::decode(Flags()) == CONTEXT &&
@@ -361,6 +363,31 @@ int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
}
+bool ScopeInfo::CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ Handle<JSObject> scope_object) {
+ Isolate* isolate = scope_info->GetIsolate();
+ int local_count = scope_info->ContextLocalCount();
+ if (local_count == 0) return true;
+ // Fill all context locals to the context extension.
+ int start = scope_info->ContextLocalNameEntriesIndex();
+ int end = start + local_count;
+ for (int i = start; i < end; ++i) {
+ int context_index = Context::MIN_CONTEXT_SLOTS + i - start;
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
+ SetProperty(isolate,
+ scope_object,
+ Handle<String>(String::cast(scope_info->get(i))),
+ Handle<Object>(context->get(context_index), isolate),
+ ::NONE,
+ kNonStrictMode),
+ false);
+ }
+ return true;
+}
+
+
int ScopeInfo::ParameterEntriesIndex() {
ASSERT(length() > 0);
return kVariablePartIndex;
@@ -416,13 +443,14 @@ void ContextSlotCache::Update(Object* data,
VariableMode mode,
InitializationFlag init_flag,
int slot_index) {
- String* symbol;
+ String* internalized_name;
ASSERT(slot_index > kNotFound);
- if (HEAP->LookupSymbolIfExists(name, &symbol)) {
- int index = Hash(data, symbol);
+ if (name->GetIsolate()->heap()->InternalizeStringIfExists(
+ name, &internalized_name)) {
+ int index = Hash(data, internalized_name);
Key& key = keys_[index];
key.data = data;
- key.name = symbol;
+ key.name = internalized_name;
// Please note value only takes a uint as index.
values_[index] = Value(mode, init_flag, slot_index - kNotFound).raw();
#ifdef DEBUG
@@ -444,8 +472,9 @@ void ContextSlotCache::ValidateEntry(Object* data,
VariableMode mode,
InitializationFlag init_flag,
int slot_index) {
- String* symbol;
- if (HEAP->LookupSymbolIfExists(name, &symbol)) {
+ String* internalized_name;
+ if (name->GetIsolate()->heap()->InternalizeStringIfExists(
+ name, &internalized_name)) {
int index = Hash(data, name);
Key& key = keys_[index];
ASSERT(key.data == data);
@@ -504,4 +533,32 @@ void ScopeInfo::Print() {
}
#endif // DEBUG
+
+//---------------------------------------------------------------------------
+// ModuleInfo.
+
+Handle<ModuleInfo> ModuleInfo::Create(
+ Isolate* isolate, Interface* interface, Scope* scope) {
+ Handle<ModuleInfo> info = Allocate(isolate, interface->Length());
+ info->set_host_index(interface->Index());
+ int i = 0;
+ for (Interface::Iterator it = interface->iterator();
+ !it.done(); it.Advance(), ++i) {
+ Variable* var = scope->LocalLookup(it.name());
+ info->set_name(i, *it.name());
+ info->set_mode(i, var->mode());
+ ASSERT((var->mode() == MODULE) == (it.interface()->IsModule()));
+ if (var->mode() == MODULE) {
+ ASSERT(it.interface()->IsFrozen());
+ ASSERT(it.interface()->Index() >= 0);
+ info->set_index(i, it.interface()->Index());
+ } else {
+ ASSERT(var->index() >= 0);
+ info->set_index(i, var->index());
+ }
+ }
+ ASSERT(i == info->length());
+ return info;
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h
index 93734f5a1..a884b3b9e 100644
--- a/deps/v8/src/scopeinfo.h
+++ b/deps/v8/src/scopeinfo.h
@@ -114,9 +114,9 @@ class ContextSlotCache {
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
- class ModeField: public BitField<VariableMode, 0, 3> {};
- class InitField: public BitField<InitializationFlag, 3, 1> {};
- class IndexField: public BitField<int, 4, 32-4> {};
+ class ModeField: public BitField<VariableMode, 0, 4> {};
+ class InitField: public BitField<InitializationFlag, 4, 1> {};
+ class IndexField: public BitField<int, 5, 32-5> {};
private:
uint32_t value_;
@@ -130,6 +130,67 @@ class ContextSlotCache {
};
+
+
+//---------------------------------------------------------------------------
+// Auxiliary class used for the description of module instances.
+// Used by Runtime_DeclareModules.
+
+class ModuleInfo: public FixedArray {
+ public:
+ static ModuleInfo* cast(Object* description) {
+ return static_cast<ModuleInfo*>(FixedArray::cast(description));
+ }
+
+ static Handle<ModuleInfo> Create(
+ Isolate* isolate, Interface* interface, Scope* scope);
+
+ // Index of module's context in host context.
+ int host_index() { return Smi::cast(get(HOST_OFFSET))->value(); }
+
+ // Name, mode, and index of the i-th export, respectively.
+ // For value exports, the index is the slot of the value in the module
+ // context, for exported modules it is the slot index of the
+ // referred module's context in the host context.
+ // TODO(rossberg): This format cannot yet handle exports of modules declared
+ // in earlier scripts.
+ String* name(int i) { return String::cast(get(name_offset(i))); }
+ VariableMode mode(int i) {
+ return static_cast<VariableMode>(Smi::cast(get(mode_offset(i)))->value());
+ }
+ int index(int i) { return Smi::cast(get(index_offset(i)))->value(); }
+
+ int length() { return (FixedArray::length() - HEADER_SIZE) / ITEM_SIZE; }
+
+ private:
+ // The internal format is: Index, (Name, VariableMode, Index)*
+ enum {
+ HOST_OFFSET,
+ NAME_OFFSET,
+ MODE_OFFSET,
+ INDEX_OFFSET,
+ HEADER_SIZE = NAME_OFFSET,
+ ITEM_SIZE = INDEX_OFFSET - NAME_OFFSET + 1
+ };
+ inline int name_offset(int i) { return NAME_OFFSET + i * ITEM_SIZE; }
+ inline int mode_offset(int i) { return MODE_OFFSET + i * ITEM_SIZE; }
+ inline int index_offset(int i) { return INDEX_OFFSET + i * ITEM_SIZE; }
+
+ static Handle<ModuleInfo> Allocate(Isolate* isolate, int length) {
+ return Handle<ModuleInfo>::cast(
+ isolate->factory()->NewFixedArray(HEADER_SIZE + ITEM_SIZE * length));
+ }
+ void set_host_index(int index) { set(HOST_OFFSET, Smi::FromInt(index)); }
+ void set_name(int i, String* name) { set(name_offset(i), name); }
+ void set_mode(int i, VariableMode mode) {
+ set(mode_offset(i), Smi::FromInt(mode));
+ }
+ void set_index(int i, int index) {
+ set(index_offset(i), Smi::FromInt(index));
+ }
+};
+
+
} } // namespace v8::internal
#endif // V8_SCOPEINFO_H_
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 434479ca5..ee327fb79 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -52,8 +52,8 @@ namespace internal {
static bool Match(void* key1, void* key2) {
String* name1 = *reinterpret_cast<String**>(key1);
String* name2 = *reinterpret_cast<String**>(key2);
- ASSERT(name1->IsSymbol());
- ASSERT(name2->IsSymbol());
+ ASSERT(name1->IsInternalizedString());
+ ASSERT(name2->IsInternalizedString());
return name1 == name2;
}
@@ -104,33 +104,35 @@ Variable* VariableMap::Lookup(Handle<String> name) {
// ----------------------------------------------------------------------------
// Implementation of Scope
-Scope::Scope(Scope* outer_scope, ScopeType type, Zone* zone)
- : isolate_(Isolate::Current()),
+Scope::Scope(Scope* outer_scope, ScopeType scope_type, Zone* zone)
+ : isolate_(zone->isolate()),
inner_scopes_(4, zone),
variables_(zone),
+ internals_(4, zone),
temps_(4, zone),
params_(4, zone),
unresolved_(16, zone),
decls_(4, zone),
interface_(FLAG_harmony_modules &&
- (type == MODULE_SCOPE || type == GLOBAL_SCOPE)
+ (scope_type == MODULE_SCOPE || scope_type == GLOBAL_SCOPE)
? Interface::NewModule(zone) : NULL),
already_resolved_(false),
zone_(zone) {
- SetDefaults(type, outer_scope, Handle<ScopeInfo>::null());
+ SetDefaults(scope_type, outer_scope, Handle<ScopeInfo>::null());
// The outermost scope must be a global scope.
- ASSERT(type == GLOBAL_SCOPE || outer_scope != NULL);
+ ASSERT(scope_type == GLOBAL_SCOPE || outer_scope != NULL);
ASSERT(!HasIllegalRedeclaration());
}
Scope::Scope(Scope* inner_scope,
- ScopeType type,
+ ScopeType scope_type,
Handle<ScopeInfo> scope_info,
Zone* zone)
- : isolate_(Isolate::Current()),
+ : isolate_(zone->isolate()),
inner_scopes_(4, zone),
variables_(zone),
+ internals_(4, zone),
temps_(4, zone),
params_(4, zone),
unresolved_(16, zone),
@@ -138,7 +140,7 @@ Scope::Scope(Scope* inner_scope,
interface_(NULL),
already_resolved_(true),
zone_(zone) {
- SetDefaults(type, NULL, scope_info);
+ SetDefaults(scope_type, NULL, scope_info);
if (!scope_info.is_null()) {
num_heap_slots_ = scope_info_->ContextLength();
}
@@ -150,9 +152,10 @@ Scope::Scope(Scope* inner_scope,
Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone)
- : isolate_(Isolate::Current()),
+ : isolate_(zone->isolate()),
inner_scopes_(1, zone),
variables_(zone),
+ internals_(0, zone),
temps_(0, zone),
params_(0, zone),
unresolved_(0, zone),
@@ -174,12 +177,12 @@ Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone)
}
-void Scope::SetDefaults(ScopeType type,
+void Scope::SetDefaults(ScopeType scope_type,
Scope* outer_scope,
Handle<ScopeInfo> scope_info) {
outer_scope_ = outer_scope;
- type_ = type;
- scope_name_ = isolate_->factory()->empty_symbol();
+ scope_type_ = scope_type;
+ scope_name_ = isolate_->factory()->empty_string();
dynamics_ = NULL;
receiver_ = NULL;
function_ = NULL;
@@ -194,9 +197,13 @@ void Scope::SetDefaults(ScopeType type,
outer_scope_calls_non_strict_eval_ = false;
inner_scope_calls_eval_ = false;
force_eager_compilation_ = false;
+ force_context_allocation_ = (outer_scope != NULL && !is_function_scope())
+ ? outer_scope->has_forced_context_allocation() : false;
num_var_or_const_ = 0;
num_stack_slots_ = 0;
num_heap_slots_ = 0;
+ num_modules_ = 0;
+ module_var_ = NULL,
scope_info_ = scope_info;
start_position_ = RelocInfo::kNoPosition;
end_position_ = RelocInfo::kNoPosition;
@@ -303,23 +310,6 @@ bool Scope::Analyze(CompilationInfo* info) {
}
#endif
- if (FLAG_harmony_scoping) {
- VariableProxy* proxy = scope->CheckAssignmentToConst();
- if (proxy != NULL) {
- // Found an assignment to const. Throw a syntax error.
- MessageLocation location(info->script(),
- proxy->position(),
- proxy->position());
- Isolate* isolate = info->isolate();
- Factory* factory = isolate->factory();
- Handle<JSArray> array = factory->NewJSArray(0);
- Handle<Object> result =
- factory->NewSyntaxError("harmony_const_assign", array);
- isolate->Throw(*result, &location);
- return false;
- }
- }
-
info->SetScope(scope);
return true;
}
@@ -347,7 +337,7 @@ void Scope::Initialize() {
if (is_declaration_scope()) {
Variable* var =
variables_.Declare(this,
- isolate_->factory()->this_symbol(),
+ isolate_->factory()->this_string(),
VAR,
false,
Variable::THIS,
@@ -364,7 +354,7 @@ void Scope::Initialize() {
// Note that it might never be accessed, in which case it won't be
// allocated during variable allocation.
variables_.Declare(this,
- isolate_->factory()->arguments_symbol(),
+ isolate_->factory()->arguments_string(),
VAR,
true,
Variable::ARGUMENTS,
@@ -375,6 +365,7 @@ void Scope::Initialize() {
Scope* Scope::FinalizeBlockScope() {
ASSERT(is_block_scope());
+ ASSERT(internals_.is_empty());
ASSERT(temps_.is_empty());
ASSERT(params_.is_empty());
@@ -446,8 +437,8 @@ Variable* Scope::LookupFunctionVar(Handle<String> name,
this, name, mode, true /* is valid LHS */,
Variable::NORMAL, kCreatedInitialized);
VariableProxy* proxy = factory->NewVariableProxy(var);
- VariableDeclaration* declaration =
- factory->NewVariableDeclaration(proxy, mode, this);
+ VariableDeclaration* declaration = factory->NewVariableDeclaration(
+ proxy, mode, this, RelocInfo::kNoPosition);
DeclareFunctionVar(declaration);
var->AllocateTo(Variable::CONTEXT, index);
return var;
@@ -515,6 +506,19 @@ void Scope::RemoveUnresolved(VariableProxy* var) {
}
+Variable* Scope::NewInternal(Handle<String> name) {
+ ASSERT(!already_resolved());
+ Variable* var = new(zone()) Variable(this,
+ name,
+ INTERNAL,
+ false,
+ Variable::NORMAL,
+ kCreatedInitialized);
+ internals_.Add(var, zone());
+ return var;
+}
+
+
Variable* Scope::NewTemporary(Handle<String> name) {
ASSERT(!already_resolved());
Variable* var = new(zone()) Variable(this,
@@ -572,29 +576,6 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
}
-VariableProxy* Scope::CheckAssignmentToConst() {
- // Check this scope.
- if (is_extended_mode()) {
- for (int i = 0; i < unresolved_.length(); i++) {
- ASSERT(unresolved_[i]->var() != NULL);
- if (unresolved_[i]->var()->is_const_mode() &&
- unresolved_[i]->IsLValue()) {
- return unresolved_[i];
- }
- }
- }
-
- // Check inner scopes.
- for (int i = 0; i < inner_scopes_.length(); i++) {
- VariableProxy* proxy = inner_scopes_[i]->CheckAssignmentToConst();
- if (proxy != NULL) return proxy;
- }
-
- // No assignments to const found.
- return NULL;
-}
-
-
class VarAndOrder {
public:
VarAndOrder(Variable* var, int order) : var_(var), order_(order) { }
@@ -615,18 +596,32 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
ASSERT(stack_locals != NULL);
ASSERT(context_locals != NULL);
- // Collect temporaries which are always allocated on the stack.
+ // Collect internals which are always allocated on the heap.
+ for (int i = 0; i < internals_.length(); i++) {
+ Variable* var = internals_[i];
+ if (var->is_used()) {
+ ASSERT(var->IsContextSlot());
+ context_locals->Add(var, zone());
+ }
+ }
+
+ // Collect temporaries which are always allocated on the stack, unless the
+ // context as a whole has forced context allocation.
for (int i = 0; i < temps_.length(); i++) {
Variable* var = temps_[i];
if (var->is_used()) {
- ASSERT(var->IsStackLocal());
- stack_locals->Add(var, zone());
+ if (var->IsContextSlot()) {
+ ASSERT(has_forced_context_allocation());
+ context_locals->Add(var, zone());
+ } else {
+ ASSERT(var->IsStackLocal());
+ stack_locals->Add(var, zone());
+ }
}
}
- ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
-
// Collect declared local variables.
+ ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
for (VariableMap::Entry* p = variables_.Start();
p != NULL;
p = variables_.Next(p)) {
@@ -659,18 +654,18 @@ bool Scope::AllocateVariables(CompilationInfo* info,
}
PropagateScopeInfo(outer_scope_calls_non_strict_eval);
- // 2) Resolve variables.
+ // 2) Allocate module instances.
+ if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
+ ASSERT(num_modules_ == 0);
+ AllocateModulesRecursively(this);
+ }
+
+ // 3) Resolve variables.
if (!ResolveVariablesRecursively(info, factory)) return false;
- // 3) Allocate variables.
+ // 4) Allocate variables.
AllocateVariablesRecursively();
- // 4) Allocate and link module instance objects.
- if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
- AllocateModules(info);
- LinkModules(info);
- }
-
return true;
}
@@ -731,12 +726,24 @@ int Scope::ContextChainLength(Scope* scope) {
int n = 0;
for (Scope* s = this; s != scope; s = s->outer_scope_) {
ASSERT(s != NULL); // scope must be in the scope chain
- if (s->num_heap_slots() > 0) n++;
+ if (s->is_with_scope() || s->num_heap_slots() > 0) n++;
+ // Catch and module scopes always have heap slots.
+ ASSERT(!s->is_catch_scope() || s->num_heap_slots() > 0);
+ ASSERT(!s->is_module_scope() || s->num_heap_slots() > 0);
}
return n;
}
+Scope* Scope::GlobalScope() {
+ Scope* scope = this;
+ while (!scope->is_global_scope()) {
+ scope = scope->outer_scope();
+ }
+ return scope;
+}
+
+
Scope* Scope::DeclarationScope() {
Scope* scope = this;
while (!scope->is_declaration_scope()) {
@@ -773,8 +780,8 @@ void Scope::GetNestedScopeChain(
#ifdef DEBUG
-static const char* Header(ScopeType type) {
- switch (type) {
+static const char* Header(ScopeType scope_type) {
+ switch (scope_type) {
case EVAL_SCOPE: return "eval";
case FUNCTION_SCOPE: return "function";
case MODULE_SCOPE: return "module";
@@ -848,7 +855,7 @@ void Scope::Print(int n) {
int n1 = n0 + 2; // indentation
// Print header.
- Indent(n0, Header(type_));
+ Indent(n0, Header(scope_type_));
if (scope_name_->length() > 0) {
PrintF(" ");
PrintName(scope_name_);
@@ -900,21 +907,32 @@ void Scope::Print(int n) {
PrintF("%d heap slots\n", num_heap_slots_); }
// Print locals.
- Indent(n1, "// function var\n");
if (function_ != NULL) {
+ Indent(n1, "// function var:\n");
PrintVar(n1, function_->proxy()->var());
}
- Indent(n1, "// temporary vars\n");
- for (int i = 0; i < temps_.length(); i++) {
- PrintVar(n1, temps_[i]);
+ if (temps_.length() > 0) {
+ Indent(n1, "// temporary vars:\n");
+ for (int i = 0; i < temps_.length(); i++) {
+ PrintVar(n1, temps_[i]);
+ }
}
- Indent(n1, "// local vars\n");
- PrintMap(n1, &variables_);
+ if (internals_.length() > 0) {
+ Indent(n1, "// internal vars:\n");
+ for (int i = 0; i < internals_.length(); i++) {
+ PrintVar(n1, internals_[i]);
+ }
+ }
+
+ if (variables_.Start() != NULL) {
+ Indent(n1, "// local vars:\n");
+ PrintMap(n1, &variables_);
+ }
- Indent(n1, "// dynamic vars\n");
if (dynamics_ != NULL) {
+ Indent(n1, "// dynamic vars:\n");
PrintMap(n1, dynamics_->GetMap(DYNAMIC));
PrintMap(n1, dynamics_->GetMap(DYNAMIC_LOCAL));
PrintMap(n1, dynamics_->GetMap(DYNAMIC_GLOBAL));
@@ -958,6 +976,13 @@ Variable* Scope::LookupRecursive(Handle<String> name,
BindingKind* binding_kind,
AstNodeFactory<AstNullVisitor>* factory) {
ASSERT(binding_kind != NULL);
+ if (already_resolved() && is_with_scope()) {
+ // Short-cut: if the scope is deserialized from a scope info, variable
+ // allocation is already fixed. We can simply return with dynamic lookup.
+ *binding_kind = DYNAMIC_LOOKUP;
+ return NULL;
+ }
+
// Try to find the variable in this scope.
Variable* var = LocalLookup(name);
@@ -986,6 +1011,7 @@ Variable* Scope::LookupRecursive(Handle<String> name,
}
if (is_with_scope()) {
+ ASSERT(!already_resolved());
// The current scope is a with scope, so the variable binding can not be
// statically resolved. However, note that it was necessary to do a lookup
// in the outer scope anyway, because if a binding exists in an outer scope,
@@ -1060,7 +1086,20 @@ bool Scope::ResolveVariable(CompilationInfo* info,
}
ASSERT(var != NULL);
- proxy->BindTo(var);
+
+ if (FLAG_harmony_scoping && is_extended_mode() &&
+ var->is_const_mode() && proxy->IsLValue()) {
+ // Assignment to const. Throw a syntax error.
+ MessageLocation location(
+ info->script(), proxy->position(), proxy->position());
+ Isolate* isolate = info->isolate();
+ Factory* factory = isolate->factory();
+ Handle<JSArray> array = factory->NewJSArray(0);
+ Handle<Object> result =
+ factory->NewSyntaxError("harmony_const_assign", array);
+ isolate->Throw(*result, &location);
+ return false;
+ }
if (FLAG_harmony_modules) {
bool ok;
@@ -1082,10 +1121,9 @@ bool Scope::ResolveVariable(CompilationInfo* info,
// Inconsistent use of module. Throw a syntax error.
// TODO(rossberg): generate more helpful error message.
- MessageLocation location(info->script(),
- proxy->position(),
- proxy->position());
- Isolate* isolate = Isolate::Current();
+ MessageLocation location(
+ info->script(), proxy->position(), proxy->position());
+ Isolate* isolate = info->isolate();
Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(1);
USE(JSObject::SetElement(array, 0, var->name(), NONE, kStrictMode));
@@ -1096,6 +1134,8 @@ bool Scope::ResolveVariable(CompilationInfo* info,
}
}
+ proxy->BindTo(var);
+
return true;
}
@@ -1167,9 +1207,13 @@ bool Scope::MustAllocateInContext(Variable* var) {
// an eval() call or a runtime with lookup), it must be allocated in the
// context.
//
- // Exceptions: temporary variables are never allocated in a context;
- // catch-bound variables are always allocated in a context.
+ // Exceptions: If the scope as a whole has forced context allocation, all
+ // variables will have context allocation, even temporaries. Otherwise
+ // temporary variables are always stack-allocated. Catch-bound variables are
+ // always context-allocated.
+ if (has_forced_context_allocation()) return true;
if (var->mode() == TEMPORARY) return false;
+ if (var->mode() == INTERNAL) return true;
if (is_catch_scope() || is_block_scope() || is_module_scope()) return true;
if (is_global_scope() && IsLexicalVariableMode(var->mode())) return true;
return var->has_forced_context_allocation() ||
@@ -1182,7 +1226,7 @@ bool Scope::MustAllocateInContext(Variable* var) {
bool Scope::HasArgumentsParameter() {
for (int i = 0; i < params_.length(); i++) {
if (params_[i]->name().is_identical_to(
- isolate_->factory()->arguments_symbol())) {
+ isolate_->factory()->arguments_string())) {
return true;
}
}
@@ -1202,7 +1246,7 @@ void Scope::AllocateHeapSlot(Variable* var) {
void Scope::AllocateParameterLocals() {
ASSERT(is_function_scope());
- Variable* arguments = LocalLookup(isolate_->factory()->arguments_symbol());
+ Variable* arguments = LocalLookup(isolate_->factory()->arguments_string());
ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly
bool uses_nonstrict_arguments = false;
@@ -1258,7 +1302,7 @@ void Scope::AllocateParameterLocals() {
void Scope::AllocateNonParameterLocal(Variable* var) {
ASSERT(var->scope() == this);
- ASSERT(!var->IsVariable(isolate_->factory()->result_symbol()) ||
+ ASSERT(!var->IsVariable(isolate_->factory()->result_string()) ||
!var->IsStackLocal());
if (var->IsUnallocated() && MustAllocate(var)) {
if (MustAllocateInContext(var)) {
@@ -1276,15 +1320,17 @@ void Scope::AllocateNonParameterLocals() {
AllocateNonParameterLocal(temps_[i]);
}
- ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
+ for (int i = 0; i < internals_.length(); i++) {
+ AllocateNonParameterLocal(internals_[i]);
+ }
+ ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
for (VariableMap::Entry* p = variables_.Start();
p != NULL;
p = variables_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
vars.Add(VarAndOrder(var, p->order), zone());
}
-
vars.Sort(VarAndOrder::Compare);
int var_count = vars.length();
for (int i = 0; i < var_count; i++) {
@@ -1337,89 +1383,34 @@ void Scope::AllocateVariablesRecursively() {
}
-int Scope::StackLocalCount() const {
- return num_stack_slots() -
- (function_ != NULL && function_->proxy()->var()->IsStackLocal() ? 1 : 0);
-}
-
-
-int Scope::ContextLocalCount() const {
- if (num_heap_slots() == 0) return 0;
- return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
- (function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0);
-}
-
-
-void Scope::AllocateModules(CompilationInfo* info) {
- ASSERT(is_global_scope() || is_module_scope());
-
+void Scope::AllocateModulesRecursively(Scope* host_scope) {
+ if (already_resolved()) return;
if (is_module_scope()) {
ASSERT(interface_->IsFrozen());
- ASSERT(scope_info_.is_null());
-
- // TODO(rossberg): This has to be the initial compilation of this code.
- // We currently do not allow recompiling any module definitions.
- Handle<ScopeInfo> scope_info = GetScopeInfo();
- Factory* factory = info->isolate()->factory();
- Handle<Context> context = factory->NewModuleContext(scope_info);
- Handle<JSModule> instance = factory->NewJSModule(context, scope_info);
- context->set_module(*instance);
-
- bool ok;
- interface_->MakeSingleton(instance, &ok);
- ASSERT(ok);
+ Handle<String> name = isolate_->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR(".module"));
+ ASSERT(module_var_ == NULL);
+ module_var_ = host_scope->NewInternal(name);
+ ++host_scope->num_modules_;
}
- // Allocate nested modules.
for (int i = 0; i < inner_scopes_.length(); i++) {
Scope* inner_scope = inner_scopes_.at(i);
- if (inner_scope->is_module_scope()) {
- inner_scope->AllocateModules(info);
- }
+ inner_scope->AllocateModulesRecursively(host_scope);
}
}
-void Scope::LinkModules(CompilationInfo* info) {
- ASSERT(is_global_scope() || is_module_scope());
+int Scope::StackLocalCount() const {
+ return num_stack_slots() -
+ (function_ != NULL && function_->proxy()->var()->IsStackLocal() ? 1 : 0);
+}
- if (is_module_scope()) {
- Handle<JSModule> instance = interface_->Instance();
-
- // Populate the module instance object.
- const PropertyAttributes ro_attr =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM);
- const PropertyAttributes rw_attr =
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM);
- for (Interface::Iterator it = interface_->iterator();
- !it.done(); it.Advance()) {
- if (it.interface()->IsModule()) {
- Handle<Object> value = it.interface()->Instance();
- ASSERT(!value.is_null());
- JSReceiver::SetProperty(
- instance, it.name(), value, ro_attr, kStrictMode);
- } else {
- Variable* var = LocalLookup(it.name());
- ASSERT(var != NULL && var->IsContextSlot());
- PropertyAttributes attr = var->is_const_mode() ? ro_attr : rw_attr;
- Handle<AccessorInfo> info =
- Accessors::MakeModuleExport(it.name(), var->index(), attr);
- Handle<Object> result = SetAccessor(instance, info);
- ASSERT(!(result.is_null() || result->IsUndefined()));
- USE(result);
- }
- }
- USE(JSObject::PreventExtensions(instance));
- }
- // Link nested modules.
- for (int i = 0; i < inner_scopes_.length(); i++) {
- Scope* inner_scope = inner_scopes_.at(i);
- if (inner_scope->is_module_scope()) {
- inner_scope->LinkModules(info);
- }
- }
+int Scope::ContextLocalCount() const {
+ if (num_heap_slots() == 0) return 0;
+ return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
+ (function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0);
}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index b9d151cba..06aaa902c 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -97,7 +97,7 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Construction
- Scope(Scope* outer_scope, ScopeType type, Zone* zone);
+ Scope(Scope* outer_scope, ScopeType scope_type, Zone* zone);
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
@@ -186,6 +186,12 @@ class Scope: public ZoneObject {
// such a variable again if it was added; otherwise this is a no-op.
void RemoveUnresolved(VariableProxy* var);
+ // Creates a new internal variable in this scope. The name is only used
+ // for printing and cannot be used to find the variable. In particular,
+ // the only way to get hold of the temporary is by keeping the Variable*
+ // around.
+ Variable* NewInternal(Handle<String> name);
+
// Creates a new temporary variable in this scope. The name is only used
// for printing and cannot be used to find the variable. In particular,
// the only way to get hold of the temporary is by keeping the Variable*
@@ -218,11 +224,6 @@ class Scope: public ZoneObject {
// scope over a let binding of the same name.
Declaration* CheckConflictingVarDeclarations();
- // For harmony block scoping mode: Check if the scope has variable proxies
- // that are used as lvalues and point to const variables. Assumes that scopes
- // have been analyzed and variables been resolved.
- VariableProxy* CheckAssignmentToConst();
-
// ---------------------------------------------------------------------------
// Scope-specific info.
@@ -268,17 +269,26 @@ class Scope: public ZoneObject {
end_position_ = statement_pos;
}
+ // In some cases we want to force context allocation for a whole scope.
+ void ForceContextAllocation() {
+ ASSERT(!already_resolved());
+ force_context_allocation_ = true;
+ }
+ bool has_forced_context_allocation() const {
+ return force_context_allocation_;
+ }
+
// ---------------------------------------------------------------------------
// Predicates.
// Specific scope types.
- bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
- bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
- bool is_module_scope() const { return type_ == MODULE_SCOPE; }
- bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
- bool is_catch_scope() const { return type_ == CATCH_SCOPE; }
- bool is_block_scope() const { return type_ == BLOCK_SCOPE; }
- bool is_with_scope() const { return type_ == WITH_SCOPE; }
+ bool is_eval_scope() const { return scope_type_ == EVAL_SCOPE; }
+ bool is_function_scope() const { return scope_type_ == FUNCTION_SCOPE; }
+ bool is_module_scope() const { return scope_type_ == MODULE_SCOPE; }
+ bool is_global_scope() const { return scope_type_ == GLOBAL_SCOPE; }
+ bool is_catch_scope() const { return scope_type_ == CATCH_SCOPE; }
+ bool is_block_scope() const { return scope_type_ == BLOCK_SCOPE; }
+ bool is_with_scope() const { return scope_type_ == WITH_SCOPE; }
bool is_declaration_scope() const {
return is_eval_scope() || is_function_scope() ||
is_module_scope() || is_global_scope();
@@ -311,7 +321,7 @@ class Scope: public ZoneObject {
// Accessors.
// The type of this scope.
- ScopeType type() const { return type_; }
+ ScopeType scope_type() const { return scope_type_; }
// The language mode of this scope.
LanguageMode language_mode() const { return language_mode_; }
@@ -369,6 +379,12 @@ class Scope: public ZoneObject {
int StackLocalCount() const;
int ContextLocalCount() const;
+ // For global scopes, the number of module literals (including nested ones).
+ int num_modules() const { return num_modules_; }
+
+ // For module scopes, the host scope's internal variable binding this module.
+ Variable* module_var() const { return module_var_; }
+
// Make sure this scope and all outer scopes are eagerly compiled.
void ForceEagerCompilation() { force_eager_compilation_ = true; }
@@ -387,6 +403,9 @@ class Scope: public ZoneObject {
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
+ // Find the innermost global scope.
+ Scope* GlobalScope();
+
// Find the first function, global, or eval scope. This is the scope
// where var declarations will be hoisted to in the implementation.
Scope* DeclarationScope();
@@ -430,7 +449,7 @@ class Scope: public ZoneObject {
ZoneList<Scope*> inner_scopes_; // the immediately enclosed inner scopes
// The scope type.
- ScopeType type_;
+ ScopeType scope_type_;
// Debugging support.
Handle<String> scope_name_;
@@ -441,6 +460,8 @@ class Scope: public ZoneObject {
// variables may be implicitly 'declared' by being used (possibly in
// an inner scope) with no intervening with statements or eval calls.
VariableMap variables_;
+ // Compiler-allocated (user-invisible) internals.
+ ZoneList<Variable*> internals_;
// Compiler-allocated (user-invisible) temporaries.
ZoneList<Variable*> temps_;
// Parameter list in source order.
@@ -482,6 +503,7 @@ class Scope: public ZoneObject {
bool outer_scope_calls_non_strict_eval_;
bool inner_scope_calls_eval_;
bool force_eager_compilation_;
+ bool force_context_allocation_;
// True if it doesn't need scope resolution (e.g., if the scope was
// constructed based on a serialized scope info or a catch context).
@@ -494,6 +516,12 @@ class Scope: public ZoneObject {
int num_stack_slots_;
int num_heap_slots_;
+ // The number of modules (including nested ones).
+ int num_modules_;
+
+ // For module scopes, the host scope's internal variable binding this module.
+ Variable* module_var_;
+
// Serialized scope info support.
Handle<ScopeInfo> scope_info_;
bool already_resolved() { return already_resolved_; }
@@ -578,6 +606,7 @@ class Scope: public ZoneObject {
void AllocateNonParameterLocal(Variable* var);
void AllocateNonParameterLocals();
void AllocateVariablesRecursively();
+ void AllocateModulesRecursively(Scope* host_scope);
// Resolve and fill in the allocation information for all variables
// in this scopes. Must be called *after* all scopes have been
@@ -591,13 +620,6 @@ class Scope: public ZoneObject {
bool AllocateVariables(CompilationInfo* info,
AstNodeFactory<AstNullVisitor>* factory);
- // Instance objects have to be created ahead of time (before code generation)
- // because of potentially cyclic references between them.
- // Linking also has to be a separate stage, since populating one object may
- // potentially require (forward) references to others.
- void AllocateModules(CompilationInfo* info);
- void LinkModules(CompilationInfo* info);
-
private:
// Construct a scope based on the scope info.
Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info,
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 2ea09f89c..7ed36665e 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -30,6 +30,7 @@
#include "accessors.h"
#include "api.h"
#include "bootstrapper.h"
+#include "deoptimizer.h"
#include "execution.h"
#include "global-handles.h"
#include "ic-inl.h"
@@ -443,15 +444,15 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
30,
"TranscendentalCache::caches()");
- Add(ExternalReference::handle_scope_next_address().address(),
+ Add(ExternalReference::handle_scope_next_address(isolate).address(),
UNCLASSIFIED,
31,
"HandleScope::next");
- Add(ExternalReference::handle_scope_limit_address().address(),
+ Add(ExternalReference::handle_scope_limit_address(isolate).address(),
UNCLASSIFIED,
32,
"HandleScope::limit");
- Add(ExternalReference::handle_scope_level_address().address(),
+ Add(ExternalReference::handle_scope_level_address(isolate).address(),
UNCLASSIFIED,
33,
"HandleScope::level");
@@ -471,7 +472,7 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
37,
"LDoubleConstant::one_half");
- Add(ExternalReference::isolate_address().address(),
+ Add(ExternalReference::isolate_address(isolate).address(),
UNCLASSIFIED,
38,
"isolate");
@@ -523,12 +524,85 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
50,
"pending_message_script");
+ Add(ExternalReference::get_make_code_young_function(isolate).address(),
+ UNCLASSIFIED,
+ 51,
+ "Code::MakeCodeYoung");
+ Add(ExternalReference::cpu_features().address(),
+ UNCLASSIFIED,
+ 52,
+ "cpu_features");
+ Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
+ UNCLASSIFIED,
+ 53,
+ "Runtime::AllocateInNewSpace");
+ Add(ExternalReference::old_pointer_space_allocation_top_address(
+ isolate).address(),
+ UNCLASSIFIED,
+ 54,
+ "Heap::OldPointerSpaceAllocationTopAddress");
+ Add(ExternalReference::old_pointer_space_allocation_limit_address(
+ isolate).address(),
+ UNCLASSIFIED,
+ 55,
+ "Heap::OldPointerSpaceAllocationLimitAddress");
+ Add(ExternalReference(Runtime::kAllocateInOldPointerSpace, isolate).address(),
+ UNCLASSIFIED,
+ 56,
+ "Runtime::AllocateInOldPointerSpace");
+ Add(ExternalReference::old_data_space_allocation_top_address(
+ isolate).address(),
+ UNCLASSIFIED,
+ 57,
+ "Heap::OldDataSpaceAllocationTopAddress");
+ Add(ExternalReference::old_data_space_allocation_limit_address(
+ isolate).address(),
+ UNCLASSIFIED,
+ 58,
+ "Heap::OldDataSpaceAllocationLimitAddress");
+ Add(ExternalReference(Runtime::kAllocateInOldDataSpace, isolate).address(),
+ UNCLASSIFIED,
+ 59,
+ "Runtime::AllocateInOldDataSpace");
+ Add(ExternalReference::new_space_high_promotion_mode_active_address(isolate).
+ address(),
+ UNCLASSIFIED,
+ 60,
+ "Heap::NewSpaceAllocationLimitAddress");
+ Add(ExternalReference::allocation_sites_list_address(isolate).address(),
+ UNCLASSIFIED,
+ 61,
+ "Heap::allocation_sites_list_address()");
+ Add(ExternalReference::record_object_allocation_function(isolate).address(),
+ UNCLASSIFIED,
+ 62,
+ "HeapProfiler::RecordObjectAllocationFromMasm");
+ Add(ExternalReference::address_of_uint32_bias().address(),
+ UNCLASSIFIED,
+ 63,
+ "uint32_bias");
+ Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
+ UNCLASSIFIED,
+ 64,
+ "Code::MarkCodeAsExecuted");
+
+ // Add a small set of deopt entry addresses to encoder without generating the
+ // deopt table code, which isn't possible at deserialization time.
+ HandleScope scope(isolate);
+ for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
+ Address address = Deoptimizer::GetDeoptimizationEntry(
+ isolate,
+ entry,
+ Deoptimizer::LAZY,
+ Deoptimizer::CALCULATE_ENTRY_ADDRESS);
+ Add(address, LAZY_DEOPTIMIZATION, 64 + entry, "lazy_deopt");
+ }
}
-ExternalReferenceEncoder::ExternalReferenceEncoder()
+ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate)
: encodings_(Match),
- isolate_(Isolate::Current()) {
+ isolate_(isolate) {
ExternalReferenceTable* external_references =
ExternalReferenceTable::instance(isolate_);
for (int i = 0; i < external_references->size(); ++i) {
@@ -568,9 +642,9 @@ void ExternalReferenceEncoder::Put(Address key, int index) {
}
-ExternalReferenceDecoder::ExternalReferenceDecoder()
+ExternalReferenceDecoder::ExternalReferenceDecoder(Isolate* isolate)
: encodings_(NewArray<Address*>(kTypeCodeCount)),
- isolate_(Isolate::Current()) {
+ isolate_(isolate) {
ExternalReferenceTable* external_references =
ExternalReferenceTable::instance(isolate_);
for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
@@ -595,6 +669,140 @@ bool Serializer::serialization_enabled_ = false;
bool Serializer::too_late_to_enable_now_ = false;
+class CodeAddressMap: public CodeEventLogger {
+ public:
+ explicit CodeAddressMap(Isolate* isolate)
+ : isolate_(isolate) {
+ isolate->logger()->addCodeEventListener(this);
+ }
+
+ virtual ~CodeAddressMap() {
+ isolate_->logger()->removeCodeEventListener(this);
+ }
+
+ virtual void CodeMoveEvent(Address from, Address to) {
+ address_to_name_map_.Move(from, to);
+ }
+
+ virtual void CodeDeleteEvent(Address from) {
+ address_to_name_map_.Remove(from);
+ }
+
+ const char* Lookup(Address address) {
+ return address_to_name_map_.Lookup(address);
+ }
+
+ private:
+ class NameMap {
+ public:
+ NameMap() : impl_(&PointerEquals) {}
+
+ ~NameMap() {
+ for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
+ DeleteArray(static_cast<const char*>(p->value));
+ }
+ }
+
+ void Insert(Address code_address, const char* name, int name_size) {
+ HashMap::Entry* entry = FindOrCreateEntry(code_address);
+ if (entry->value == NULL) {
+ entry->value = CopyName(name, name_size);
+ }
+ }
+
+ const char* Lookup(Address code_address) {
+ HashMap::Entry* entry = FindEntry(code_address);
+ return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
+ }
+
+ void Remove(Address code_address) {
+ HashMap::Entry* entry = FindEntry(code_address);
+ if (entry != NULL) {
+ DeleteArray(static_cast<char*>(entry->value));
+ RemoveEntry(entry);
+ }
+ }
+
+ void Move(Address from, Address to) {
+ if (from == to) return;
+ HashMap::Entry* from_entry = FindEntry(from);
+ ASSERT(from_entry != NULL);
+ void* value = from_entry->value;
+ RemoveEntry(from_entry);
+ HashMap::Entry* to_entry = FindOrCreateEntry(to);
+ ASSERT(to_entry->value == NULL);
+ to_entry->value = value;
+ }
+
+ private:
+ static bool PointerEquals(void* lhs, void* rhs) {
+ return lhs == rhs;
+ }
+
+ static char* CopyName(const char* name, int name_size) {
+ char* result = NewArray<char>(name_size + 1);
+ for (int i = 0; i < name_size; ++i) {
+ char c = name[i];
+ if (c == '\0') c = ' ';
+ result[i] = c;
+ }
+ result[name_size] = '\0';
+ return result;
+ }
+
+ HashMap::Entry* FindOrCreateEntry(Address code_address) {
+ return impl_.Lookup(code_address, ComputePointerHash(code_address), true);
+ }
+
+ HashMap::Entry* FindEntry(Address code_address) {
+ return impl_.Lookup(code_address,
+ ComputePointerHash(code_address),
+ false);
+ }
+
+ void RemoveEntry(HashMap::Entry* entry) {
+ impl_.Remove(entry->key, entry->hash);
+ }
+
+ HashMap impl_;
+
+ DISALLOW_COPY_AND_ASSIGN(NameMap);
+ };
+
+ virtual void LogRecordedBuffer(Code* code,
+ SharedFunctionInfo*,
+ const char* name,
+ int length) {
+ address_to_name_map_.Insert(code->address(), name, length);
+ }
+
+ NameMap address_to_name_map_;
+ Isolate* isolate_;
+};
+
+
+CodeAddressMap* Serializer::code_address_map_ = NULL;
+
+
+void Serializer::Enable(Isolate* isolate) {
+ if (!serialization_enabled_) {
+ ASSERT(!too_late_to_enable_now_);
+ }
+ if (serialization_enabled_) return;
+ serialization_enabled_ = true;
+ isolate->InitializeLoggingAndCounters();
+ code_address_map_ = new CodeAddressMap(isolate);
+}
+
+
+void Serializer::Disable() {
+ if (!serialization_enabled_) return;
+ serialization_enabled_ = false;
+ delete code_address_map_;
+ code_address_map_ = NULL;
+}
+
+
Deserializer::Deserializer(SnapshotByteSource* source)
: isolate_(NULL),
source_(source),
@@ -605,8 +813,8 @@ Deserializer::Deserializer(SnapshotByteSource* source)
}
-void Deserializer::Deserialize() {
- isolate_ = Isolate::Current();
+void Deserializer::Deserialize(Isolate* isolate) {
+ isolate_ = isolate;
ASSERT(isolate_ != NULL);
isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
// No active threads.
@@ -614,13 +822,24 @@ void Deserializer::Deserialize() {
// No active handles.
ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
ASSERT_EQ(NULL, external_reference_decoder_);
- external_reference_decoder_ = new ExternalReferenceDecoder();
+ external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate_->heap()->RepairFreeListsAfterBoot();
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
isolate_->heap()->set_native_contexts_list(
isolate_->heap()->undefined_value());
+ isolate_->heap()->set_array_buffers_list(
+ isolate_->heap()->undefined_value());
+
+ // The allocation site list is build during root iteration, but if no sites
+ // were encountered then it needs to be initialized to undefined.
+ if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
+ isolate_->heap()->set_allocation_sites_list(
+ isolate_->heap()->undefined_value());
+ }
+
+ isolate_->heap()->InitializeWeakObjectToCodeTable();
// Update data pointers to the external strings containing natives sources.
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
@@ -636,14 +855,14 @@ void Deserializer::Deserialize() {
}
-void Deserializer::DeserializePartial(Object** root) {
- isolate_ = Isolate::Current();
+void Deserializer::DeserializePartial(Isolate* isolate, Object** root) {
+ isolate_ = isolate;
for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
ASSERT(reservations_[i] != kUninitializedReservation);
}
isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
if (external_reference_decoder_ == NULL) {
- external_reference_decoder_ = new ExternalReferenceDecoder();
+ external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
}
// Keep track of the code space start and end pointers in case new
@@ -677,6 +896,16 @@ void Deserializer::VisitPointers(Object** start, Object** end) {
}
+void Deserializer::RelinkAllocationSite(AllocationSite* site) {
+ if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
+ site->set_weak_next(isolate_->heap()->undefined_value());
+ } else {
+ site->set_weak_next(isolate_->heap()->allocation_sites_list());
+ }
+ isolate_->heap()->set_allocation_sites_list(site);
+}
+
+
// This routine writes the new object into the pointer provided and then
// returns true if the new object was in young space and false otherwise.
// The reason for this strange interface is that otherwise the object is
@@ -686,16 +915,25 @@ void Deserializer::ReadObject(int space_number,
Object** write_back) {
int size = source_->GetInt() << kObjectAlignmentBits;
Address address = Allocate(space_number, size);
- *write_back = HeapObject::FromAddress(address);
+ HeapObject* obj = HeapObject::FromAddress(address);
+ *write_back = obj;
Object** current = reinterpret_cast<Object**>(address);
Object** limit = current + (size >> kPointerSizeLog2);
if (FLAG_log_snapshot_positions) {
LOG(isolate_, SnapshotPositionEvent(address, source_->position()));
}
ReadChunk(current, limit, space_number, address);
+
+ // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
+ // as a (weak) root. If this root is relocated correctly,
+ // RelinkAllocationSite() isn't necessary.
+ if (obj->IsAllocationSite()) {
+ RelinkAllocationSite(AllocationSite::cast(obj));
+ }
+
#ifdef DEBUG
bool is_codespace = (space_number == CODE_SPACE);
- ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace);
+ ASSERT(obj->IsCode() == is_codespace);
#endif
}
@@ -710,6 +948,7 @@ void Deserializer::ReadChunk(Object** current,
bool write_barrier_needed = (current_object_address != NULL &&
source_space != NEW_SPACE &&
source_space != CELL_SPACE &&
+ source_space != PROPERTY_CELL_SPACE &&
source_space != CODE_SPACE &&
source_space != OLD_DATA_SPACE);
while (current < limit) {
@@ -770,8 +1009,7 @@ void Deserializer::ReadChunk(Object** current,
new_code_object->instruction_start()); \
} else { \
ASSERT(space_number == CODE_SPACE); \
- JSGlobalPropertyCell* cell = \
- JSGlobalPropertyCell::cast(new_object); \
+ Cell* cell = Cell::cast(new_object); \
new_object = reinterpret_cast<Object*>( \
cell->ValueAddress()); \
} \
@@ -811,6 +1049,7 @@ void Deserializer::ReadChunk(Object** current,
CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
CASE_STATEMENT(where, how, within, CODE_SPACE) \
CASE_STATEMENT(where, how, within, CELL_SPACE) \
+ CASE_STATEMENT(where, how, within, PROPERTY_CELL_SPACE) \
CASE_STATEMENT(where, how, within, MAP_SPACE) \
CASE_BODY(where, how, within, kAnyOldSpace)
@@ -1043,15 +1282,14 @@ void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
}
-Serializer::Serializer(SnapshotByteSink* sink)
- : sink_(sink),
+Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
+ : isolate_(isolate),
+ sink_(sink),
current_root_index_(0),
- external_reference_encoder_(new ExternalReferenceEncoder),
+ external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
root_index_wave_front_(0) {
- isolate_ = Isolate::Current();
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
- ASSERT(isolate_->IsDefaultIsolate());
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
@@ -1064,16 +1302,17 @@ Serializer::~Serializer() {
void StartupSerializer::SerializeStrongReferences() {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = this->isolate();
// No active threads.
- CHECK_EQ(NULL, Isolate::Current()->thread_manager()->FirstThreadStateInUse());
+ CHECK_EQ(NULL, isolate->thread_manager()->FirstThreadStateInUse());
// No active or weak handles.
CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
+ CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
// We don't support serializing installed extensions.
CHECK(!isolate->has_installed_extensions());
- HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
}
@@ -1083,16 +1322,23 @@ void PartialSerializer::Serialize(Object** object) {
}
+bool Serializer::ShouldBeSkipped(Object** current) {
+ Object** roots = isolate()->heap()->roots_array_start();
+ return current == &roots[Heap::kStoreBufferTopRootIndex]
+ || current == &roots[Heap::kStackLimitRootIndex]
+ || current == &roots[Heap::kRealStackLimitRootIndex];
+}
+
+
void Serializer::VisitPointers(Object** start, Object** end) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = this->isolate();;
for (Object** current = start; current < end; current++) {
if (start == isolate->heap()->roots_array_start()) {
root_index_wave_front_ =
Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
}
- if (reinterpret_cast<Address>(current) ==
- isolate->heap()->store_buffer()->TopAddress()) {
+ if (ShouldBeSkipped(current)) {
sink_->Put(kSkip, "Skip");
sink_->PutInt(kPointerSize, "SkipOneWord");
} else if ((*current)->IsSmi()) {
@@ -1115,9 +1361,9 @@ void Serializer::VisitPointers(Object** start, Object** end) {
// that correspond to the elements of this cache array. On deserialization we
// therefore need to visit the cache array. This fills it up with pointers to
// deserialized objects.
-void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
+void SerializerDeserializer::Iterate(Isolate* isolate,
+ ObjectVisitor* visitor) {
if (Serializer::enabled()) return;
- Isolate* isolate = Isolate::Current();
for (int i = 0; ; i++) {
if (isolate->serialize_partial_snapshot_cache_length() <= i) {
// Extend the array ready to get a value from the visitor when
@@ -1136,7 +1382,7 @@ void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = this->isolate();
for (int i = 0;
i < isolate->serialize_partial_snapshot_cache_length();
@@ -1159,7 +1405,7 @@ int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
- Heap* heap = HEAP;
+ Heap* heap = isolate()->heap();
if (heap->InNewSpace(heap_object)) return kInvalidRootIndex;
for (int i = 0; i < root_index_wave_front_; i++) {
Object* root = heap->roots_array_start()[i];
@@ -1249,10 +1495,9 @@ void StartupSerializer::SerializeWeakReferences() {
// will contain some references needed to decode the partial snapshot. We
// add one entry with 'undefined' which is the sentinel that the deserializer
// uses to know it is done deserializing the array.
- Isolate* isolate = Isolate::Current();
- Object* undefined = isolate->heap()->undefined_value();
+ Object* undefined = isolate()->heap()->undefined_value();
VisitPointer(&undefined);
- HEAP->IterateWeakRoots(this, VISIT_ALL);
+ isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
Pad();
}
@@ -1265,7 +1510,7 @@ void Serializer::PutRoot(int root_index,
if (how_to_code == kPlain &&
where_to_point == kStartOfObject &&
root_index < kRootArrayNumberOfConstantEncodings &&
- !HEAP->InNewSpace(object)) {
+ !isolate()->heap()->InNewSpace(object)) {
if (skip == 0) {
sink_->Put(kRootArrayConstants + kNoSkipDistance + root_index,
"RootConstant");
@@ -1297,7 +1542,7 @@ void PartialSerializer::SerializeObject(
// The code-caches link to context-specific code objects, which
// the startup and context serializes cannot currently handle.
ASSERT(Map::cast(heap_object)->code_cache() ==
- heap_object->GetHeap()->raw_unchecked_empty_fixed_array());
+ heap_object->GetHeap()->empty_fixed_array());
}
int root_index;
@@ -1323,9 +1568,9 @@ void PartialSerializer::SerializeObject(
// should go through the root array or through the partial snapshot cache.
// If this is not the case you may have to add something to the root array.
ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object));
- // All the symbols that the partial snapshot needs should be either in the
- // root table or in the partial snapshot cache.
- ASSERT(!heap_object->IsSymbol());
+ // All the internalized strings that the partial snapshot needs should be
+ // either in the root table or in the partial snapshot cache.
+ ASSERT(!heap_object->IsInternalizedString());
if (address_mapper_.IsMapped(heap_object)) {
int space = SpaceOfObject(heap_object);
@@ -1359,7 +1604,11 @@ void Serializer::ObjectSerializer::Serialize() {
"ObjectSerialization");
sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
- LOG(i::Isolate::Current(),
+ ASSERT(code_address_map_);
+ const char* code_name = code_address_map_->Lookup(object_->address());
+ LOG(serializer_->isolate_,
+ CodeNameEvent(object_->address(), sink_->Position(), code_name));
+ LOG(serializer_->isolate_,
SnapshotPositionEvent(object_->address(), sink_->Position()));
// Mark this object as already serialized.
@@ -1394,7 +1643,7 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
root_index != kInvalidRootIndex &&
root_index < kRootArrayNumberOfConstantEncodings &&
current_contents == current[-1]) {
- ASSERT(!HEAP->InNewSpace(current_contents));
+ ASSERT(!serializer_->isolate()->heap()->InNewSpace(current_contents));
int repeat_count = 1;
while (current < end - 1 && current[repeat_count] == current_contents) {
repeat_count++;
@@ -1429,19 +1678,15 @@ void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
}
-void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
- Address* end) {
- Address references_start = reinterpret_cast<Address>(start);
+void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
+ Address references_start = reinterpret_cast<Address>(p);
int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping);
- for (Address* current = start; current < end; current++) {
- sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
- sink_->PutInt(skip, "SkipB4ExternalRef");
- skip = 0;
- int reference_id = serializer_->EncodeExternalReference(*current);
- sink_->PutInt(reference_id, "reference id");
- }
- bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize);
+ sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
+ sink_->PutInt(skip, "SkipB4ExternalRef");
+ int reference_id = serializer_->EncodeExternalReference(*p);
+ sink_->PutInt(reference_id, "reference id");
+ bytes_processed_so_far_ += kPointerSize;
}
@@ -1498,10 +1743,9 @@ void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
}
-void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(rinfo->target_cell());
+void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::CELL);
+ Cell* cell = Cell::cast(rinfo->target_cell());
int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
serializer_->SerializeObject(cell, kPlain, kInnerPointer, skip);
}
@@ -1512,7 +1756,8 @@ void Serializer::ObjectSerializer::VisitExternalAsciiString(
Address references_start = reinterpret_cast<Address>(resource_pointer);
OutputRawData(references_start);
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Object* source = HEAP->natives_source_cache()->get(i);
+ Object* source =
+ serializer_->isolate()->heap()->natives_source_cache()->get(i);
if (!source->IsUndefined()) {
ExternalAsciiString* string = ExternalAsciiString::cast(source);
typedef v8::String::ExternalAsciiStringResource Resource;
@@ -1581,7 +1826,7 @@ int Serializer::ObjectSerializer::OutputRawData(
int Serializer::SpaceOfObject(HeapObject* object) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
AllocationSpace s = static_cast<AllocationSpace>(i);
- if (HEAP->InSpace(object, s)) {
+ if (object->GetHeap()->InSpace(object, s)) {
ASSERT(i < kNumberOfSpaces);
return i;
}
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index 204179285..47627ac2d 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -47,10 +47,11 @@ enum TypeCode {
EXTENSION,
ACCESSOR,
RUNTIME_ENTRY,
- STUB_CACHE_TABLE
+ STUB_CACHE_TABLE,
+ LAZY_DEOPTIMIZATION
};
-const int kTypeCodeCount = STUB_CACHE_TABLE + 1;
+const int kTypeCodeCount = LAZY_DEOPTIMIZATION + 1;
const int kFirstTypeCode = UNCLASSIFIED;
const int kReferenceIdBits = 16;
@@ -59,6 +60,7 @@ const int kReferenceTypeShift = kReferenceIdBits;
const int kDebugRegisterBits = 4;
const int kDebugIdShift = kDebugRegisterBits;
+const int kDeoptTableSerializeEntryCount = 8;
// ExternalReferenceTable is a helper class that defines the relationship
// between external references and their encodings. It is used to build
@@ -108,7 +110,7 @@ class ExternalReferenceTable {
class ExternalReferenceEncoder {
public:
- ExternalReferenceEncoder();
+ explicit ExternalReferenceEncoder(Isolate* isolate);
uint32_t Encode(Address key) const;
@@ -132,7 +134,7 @@ class ExternalReferenceEncoder {
class ExternalReferenceDecoder {
public:
- ExternalReferenceDecoder();
+ explicit ExternalReferenceDecoder(Isolate* isolate);
~ExternalReferenceDecoder();
Address Decode(uint32_t key) const {
@@ -206,7 +208,7 @@ class SnapshotByteSource {
// both.
class SerializerDeserializer: public ObjectVisitor {
public:
- static void Iterate(ObjectVisitor* visitor);
+ static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
static int nop() { return kNop; }
@@ -309,7 +311,7 @@ int SnapshotByteSource::GetInt() {
void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
- memcpy(to, data_ + position_, number_of_bytes);
+ OS::MemCopy(to, data_ + position_, number_of_bytes);
position_ += number_of_bytes;
}
@@ -323,10 +325,10 @@ class Deserializer: public SerializerDeserializer {
virtual ~Deserializer();
// Deserialize the snapshot into an empty heap.
- void Deserialize();
+ void Deserialize(Isolate* isolate);
// Deserialize a single object and the objects reachable from it.
- void DeserializePartial(Object** root);
+ void DeserializePartial(Isolate* isolate, Object** root);
void set_reservation(int space_number, int reservation) {
ASSERT(space_number >= 0);
@@ -337,14 +339,14 @@ class Deserializer: public SerializerDeserializer {
private:
virtual void VisitPointers(Object** start, Object** end);
- virtual void VisitExternalReferences(Address* start, Address* end) {
- UNREACHABLE();
- }
-
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
UNREACHABLE();
}
+ // Allocation sites are present in the snapshot, and must be linked into
+ // a list at deserialization time.
+ void RelinkAllocationSite(AllocationSite* site);
+
// Fills in some heap data in an area from start to end (non-inclusive). The
// space id is used for the write barrier. The object_address is the address
// of the object we are writing into, or NULL if we are not writing into an
@@ -360,6 +362,10 @@ class Deserializer: public SerializerDeserializer {
Address Allocate(int space_index, int size) {
Address address = high_water_[space_index];
high_water_[space_index] = address + size;
+ HeapProfiler* profiler = isolate_->heap_profiler();
+ if (profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(address, size);
+ }
return address;
}
@@ -406,12 +412,11 @@ class SnapshotByteSink {
class SerializationAddressMapper {
public:
SerializationAddressMapper()
- : serialization_map_(new HashMap(&SerializationMatchFun)),
- no_allocation_(new AssertNoAllocation()) { }
+ : no_allocation_(),
+ serialization_map_(new HashMap(&SerializationMatchFun)) { }
~SerializationAddressMapper() {
delete serialization_map_;
- delete no_allocation_;
}
bool IsMapped(HeapObject* obj) {
@@ -448,16 +453,18 @@ class SerializationAddressMapper {
return reinterpret_cast<void*>(v);
}
+ DisallowHeapAllocation no_allocation_;
HashMap* serialization_map_;
- AssertNoAllocation* no_allocation_;
DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper);
};
+class CodeAddressMap;
+
// There can be only one serializer per V8 process.
class Serializer : public SerializerDeserializer {
public:
- explicit Serializer(SnapshotByteSink* sink);
+ Serializer(Isolate* isolate, SnapshotByteSink* sink);
~Serializer();
void VisitPointers(Object** start, Object** end);
// You can call this after serialization to find out how much space was used
@@ -467,14 +474,10 @@ class Serializer : public SerializerDeserializer {
return fullness_[space];
}
- static void Enable() {
- if (!serialization_enabled_) {
- ASSERT(!too_late_to_enable_now_);
- }
- serialization_enabled_ = true;
- }
+ Isolate* isolate() const { return isolate_; }
+ static void Enable(Isolate* isolate);
+ static void Disable();
- static void Disable() { serialization_enabled_ = false; }
// Call this when you have made use of the fact that there is no serialization
// going on.
static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
@@ -514,11 +517,11 @@ class Serializer : public SerializerDeserializer {
void Serialize();
void VisitPointers(Object** start, Object** end);
void VisitEmbeddedPointer(RelocInfo* target);
- void VisitExternalReferences(Address* start, Address* end);
+ void VisitExternalReference(Address* p);
void VisitExternalReference(RelocInfo* rinfo);
void VisitCodeTarget(RelocInfo* target);
void VisitCodeEntry(Address entry_address);
- void VisitGlobalPropertyCell(RelocInfo* rinfo);
+ void VisitCell(RelocInfo* rinfo);
void VisitRuntimeEntry(RelocInfo* reloc);
// Used for seralizing the external strings that hold the natives source.
void VisitExternalAsciiString(
@@ -566,6 +569,10 @@ class Serializer : public SerializerDeserializer {
int SpaceAreaSize(int space);
+ // Some roots should not be serialized, because their actual value depends on
+ // absolute addresses and they are reset after deserialization, anyway.
+ bool ShouldBeSkipped(Object** current);
+
Isolate* isolate_;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references.
@@ -584,15 +591,17 @@ class Serializer : public SerializerDeserializer {
friend class Deserializer;
private:
+ static CodeAddressMap* code_address_map_;
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
class PartialSerializer : public Serializer {
public:
- PartialSerializer(Serializer* startup_snapshot_serializer,
+ PartialSerializer(Isolate* isolate,
+ Serializer* startup_snapshot_serializer,
SnapshotByteSink* sink)
- : Serializer(sink),
+ : Serializer(isolate, sink),
startup_serializer_(startup_snapshot_serializer) {
set_root_index_wave_front(Heap::kStrongRootListLength);
}
@@ -612,10 +621,11 @@ class PartialSerializer : public Serializer {
// unique ID, and deserializing several partial snapshots containing script
// would cause dupes.
ASSERT(!o->IsScript());
- return o->IsString() || o->IsSharedFunctionInfo() ||
+ return o->IsName() || o->IsSharedFunctionInfo() ||
o->IsHeapNumber() || o->IsCode() ||
o->IsScopeInfo() ||
- o->map() == HEAP->fixed_cow_array_map();
+ o->map() ==
+ startup_serializer_->isolate()->heap()->fixed_cow_array_map();
}
private:
@@ -626,17 +636,18 @@ class PartialSerializer : public Serializer {
class StartupSerializer : public Serializer {
public:
- explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
+ StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
+ : Serializer(isolate, sink) {
// Clear the cache of objects used by the partial snapshot. After the
// strong roots have been serialized we can create a partial snapshot
// which will repopulate the cache with objects needed by that partial
// snapshot.
- Isolate::Current()->set_serialize_partial_snapshot_cache_length(0);
+ isolate->set_serialize_partial_snapshot_cache_length(0);
}
// Serialize the current state of the heap. The order is:
// 1) Strong references.
// 2) Partial snapshot cache.
- // 3) Weak references (e.g. the symbol table).
+ // 3) Weak references (e.g. the string table).
virtual void SerializeStrongReferences();
virtual void SerializeObject(Object* o,
HowToCode how_to_code,
diff --git a/deps/v8/src/smart-pointers.h b/deps/v8/src/smart-pointers.h
index 345c4d47f..7c35b2aff 100644
--- a/deps/v8/src/smart-pointers.h
+++ b/deps/v8/src/smart-pointers.h
@@ -58,11 +58,16 @@ class SmartPointerBase {
// You can get the underlying pointer out with the * operator.
inline T* operator*() { return p_; }
- // You can use [n] to index as if it was a plain pointer
+ // You can use [n] to index as if it was a plain pointer.
inline T& operator[](size_t i) {
return p_[i];
}
+ // You can use [n] to index as if it was a plain pointer.
+ const inline T& operator[](size_t i) const {
+ return p_[i];
+ }
+
// We don't have implicit conversion to a T* since that hinders migration:
// You would not be able to change a method from returning a T* to
// returning an SmartArrayPointer<T> and then get errors wherever it is used.
@@ -77,6 +82,11 @@ class SmartPointerBase {
return temp;
}
+ inline void Reset(T* new_value) {
+ if (p_) Deallocator::Delete(p_);
+ p_ = new_value;
+ }
+
// Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
// the copy constructor it removes the pointer in the original to avoid
// double freeing.
@@ -119,11 +129,12 @@ class SmartArrayPointer: public SmartPointerBase<ArrayDeallocator<T>, T> {
template<typename T>
struct ObjectDeallocator {
- static void Delete(T* array) {
- Malloced::Delete(array);
+ static void Delete(T* object) {
+ delete object;
}
};
+
template<typename T>
class SmartPointer: public SmartPointerBase<ObjectDeallocator<T>, T> {
public:
diff --git a/deps/v8/src/snapshot-common.cc b/deps/v8/src/snapshot-common.cc
index a8806f053..4bdf63ced 100644
--- a/deps/v8/src/snapshot-common.cc
+++ b/deps/v8/src/snapshot-common.cc
@@ -45,7 +45,8 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
OS::SNPrintF(name, "%s.size", file_name);
FILE* fp = OS::FOpen(name.start(), "r");
CHECK_NE(NULL, fp);
- int new_size, pointer_size, data_size, code_size, map_size, cell_size;
+ int new_size, pointer_size, data_size, code_size, map_size, cell_size,
+ property_cell_size;
#ifdef _MSC_VER
// Avoid warning about unsafe fscanf from MSVC.
// Please note that this is only fine if %c and %s are not being used.
@@ -57,6 +58,7 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
CHECK_EQ(1, fscanf(fp, "code %d\n", &code_size));
CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size));
CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size));
+ CHECK_EQ(1, fscanf(fp, "property cell %d\n", &property_cell_size));
#ifdef _MSC_VER
#undef fscanf
#endif
@@ -67,6 +69,8 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
deserializer->set_reservation(CODE_SPACE, code_size);
deserializer->set_reservation(MAP_SPACE, map_size);
deserializer->set_reservation(CELL_SPACE, cell_size);
+ deserializer->set_reservation(PROPERTY_CELL_SPACE,
+ property_cell_size);
name.Dispose();
}
@@ -78,6 +82,8 @@ void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
deserializer->set_reservation(CODE_SPACE, code_space_used_);
deserializer->set_reservation(MAP_SPACE, map_space_used_);
deserializer->set_reservation(CELL_SPACE, cell_space_used_);
+ deserializer->set_reservation(PROPERTY_CELL_SPACE,
+ property_cell_space_used_);
}
@@ -96,10 +102,19 @@ bool Snapshot::Initialize(const char* snapshot_file) {
DeleteArray(str);
return success;
} else if (size_ > 0) {
+ ElapsedTimer timer;
+ if (FLAG_profile_deserialization) {
+ timer.Start();
+ }
SnapshotByteSource source(raw_data_, raw_size_);
Deserializer deserializer(&source);
ReserveSpaceForLinkedInSnapshot(&deserializer);
- return V8::Initialize(&deserializer);
+ bool success = V8::Initialize(&deserializer);
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ PrintF("[Snapshot loading and deserialization took %0.3f ms]\n", ms);
+ }
+ return success;
}
return false;
}
@@ -110,7 +125,7 @@ bool Snapshot::HaveASnapshotToStartFrom() {
}
-Handle<Context> Snapshot::NewContextFromSnapshot() {
+Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
if (context_size_ == 0) {
return Handle<Context>();
}
@@ -124,7 +139,9 @@ Handle<Context> Snapshot::NewContextFromSnapshot() {
deserializer.set_reservation(CODE_SPACE, context_code_space_used_);
deserializer.set_reservation(MAP_SPACE, context_map_space_used_);
deserializer.set_reservation(CELL_SPACE, context_cell_space_used_);
- deserializer.DeserializePartial(&root);
+ deserializer.set_reservation(PROPERTY_CELL_SPACE,
+ context_property_cell_space_used_);
+ deserializer.DeserializePartial(isolate, &root);
CHECK(root->IsContext());
return Handle<Context>(Context::cast(root));
}
diff --git a/deps/v8/src/snapshot-empty.cc b/deps/v8/src/snapshot-empty.cc
index 70e7ab815..54236d82e 100644
--- a/deps/v8/src/snapshot-empty.cc
+++ b/deps/v8/src/snapshot-empty.cc
@@ -49,6 +49,7 @@ const int Snapshot::data_space_used_ = 0;
const int Snapshot::code_space_used_ = 0;
const int Snapshot::map_space_used_ = 0;
const int Snapshot::cell_space_used_ = 0;
+const int Snapshot::property_cell_space_used_ = 0;
const int Snapshot::context_new_space_used_ = 0;
const int Snapshot::context_pointer_space_used_ = 0;
@@ -56,5 +57,6 @@ const int Snapshot::context_data_space_used_ = 0;
const int Snapshot::context_code_space_used_ = 0;
const int Snapshot::context_map_space_used_ = 0;
const int Snapshot::context_cell_space_used_ = 0;
+const int Snapshot::context_property_cell_space_used_ = 0;
} } // namespace v8::internal
diff --git a/deps/v8/src/snapshot.h b/deps/v8/src/snapshot.h
index c4ae45eee..4041f2925 100644
--- a/deps/v8/src/snapshot.h
+++ b/deps/v8/src/snapshot.h
@@ -43,7 +43,7 @@ class Snapshot {
static bool HaveASnapshotToStartFrom();
// Create a new context using the internal partial snapshot.
- static Handle<Context> NewContextFromSnapshot();
+ static Handle<Context> NewContextFromSnapshot(Isolate* isolate);
// Returns whether or not the snapshot is enabled.
static bool IsEnabled() { return size_ != 0; }
@@ -77,12 +77,14 @@ class Snapshot {
static const int code_space_used_;
static const int map_space_used_;
static const int cell_space_used_;
+ static const int property_cell_space_used_;
static const int context_new_space_used_;
static const int context_pointer_space_used_;
static const int context_data_space_used_;
static const int context_code_space_used_;
static const int context_map_space_used_;
static const int context_cell_space_used_;
+ static const int context_property_cell_space_used_;
static const int size_;
static const int raw_size_;
static const int context_size_;
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index c64772775..d5c114c5b 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -28,6 +28,7 @@
#ifndef V8_SPACES_INL_H_
#define V8_SPACES_INL_H_
+#include "heap-profiler.h"
#include "isolate.h"
#include "spaces.h"
#include "v8memory.h"
@@ -164,7 +165,7 @@ Page* Page::Initialize(Heap* heap,
Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
- ASSERT(chunk->size() <= static_cast<size_t>(kPageSize));
+ ASSERT(page->area_size() <= kNonCodeObjectAreaSize);
ASSERT(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size());
@@ -194,11 +195,11 @@ void MemoryChunk::set_scan_on_scavenge(bool scan) {
}
-MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
+MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
OffsetFrom(addr) & ~Page::kPageAlignmentMask);
if (maybe->owner() != NULL) return maybe;
- LargeObjectIterator iterator(HEAP->lo_space());
+ LargeObjectIterator iterator(heap->lo_space());
for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
// Fixed arrays are the only pointer-containing objects in large object
// space.
@@ -214,6 +215,19 @@ MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
}
+void MemoryChunk::UpdateHighWaterMark(Address mark) {
+ if (mark == NULL) return;
+ // Need to subtract one from the mark because when a chunk is full the
+ // top points to the next address after the chunk, which effectively belongs
+ // to another chunk. See the comment to Page::FromAllocationTop.
+ MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
+ int new_mark = static_cast<int>(mark - chunk->address());
+ if (new_mark > chunk->high_water_mark_) {
+ chunk->high_water_mark_ = new_mark;
+ }
+}
+
+
PointerChunkIterator::PointerChunkIterator(Heap* heap)
: state_(kOldPointerState),
old_pointer_iterator_(heap->old_pointer_space()),
@@ -250,22 +264,28 @@ void Page::set_prev_page(Page* page) {
// allocation) so it can be used by all the allocation functions and for all
// the paged spaces.
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
- Address current_top = allocation_info_.top;
+ Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
- if (new_top > allocation_info_.limit) return NULL;
+ if (new_top > allocation_info_.limit()) return NULL;
- allocation_info_.top = new_top;
+ allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
}
// Raw allocation.
-MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
+MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes,
+ AllocationType event) {
+ HeapProfiler* profiler = heap()->isolate()->heap_profiler();
+
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object != NULL) {
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
+ if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(object->address(), size_in_bytes);
+ }
return object;
}
@@ -278,6 +298,9 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
+ if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(object->address(), size_in_bytes);
+ }
return object;
}
@@ -286,6 +309,9 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
+ if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(object->address(), size_in_bytes);
+ }
return object;
}
@@ -298,31 +324,36 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
- Address old_top = allocation_info_.top;
+ Address old_top = allocation_info_.top();
#ifdef DEBUG
// If we are stressing compaction we waste some memory in new space
// in order to get more frequent GCs.
- if (FLAG_stress_compaction && !HEAP->linear_allocation()) {
- if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
+ if (FLAG_stress_compaction && !heap()->linear_allocation()) {
+ if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
int filler_size = size_in_bytes * 4;
for (int i = 0; i < filler_size; i += kPointerSize) {
*(reinterpret_cast<Object**>(old_top + i)) =
- HEAP->one_pointer_filler_map();
+ heap()->one_pointer_filler_map();
}
old_top += filler_size;
- allocation_info_.top += filler_size;
+ allocation_info_.set_top(allocation_info_.top() + filler_size);
}
}
#endif
- if (allocation_info_.limit - old_top < size_in_bytes) {
+ if (allocation_info_.limit() - old_top < size_in_bytes) {
return SlowAllocateRaw(size_in_bytes);
}
- Object* obj = HeapObject::FromAddress(old_top);
- allocation_info_.top += size_in_bytes;
+ HeapObject* obj = HeapObject::FromAddress(old_top);
+ allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+ HeapProfiler* profiler = heap()->isolate()->heap_profiler();
+ if (profiler != NULL && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(obj->address(), size_in_bytes);
+ }
+
return obj;
}
@@ -338,23 +369,6 @@ intptr_t LargeObjectSpace::Available() {
}
-template <typename StringType>
-void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
- ASSERT(length <= string->length());
- ASSERT(string->IsSeqString());
- ASSERT(string->address() + StringType::SizeFor(string->length()) ==
- allocation_info_.top);
- Address old_top = allocation_info_.top;
- allocation_info_.top =
- string->address() + StringType::SizeFor(length);
- string->set_length(length);
- if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
- int delta = static_cast<int>(old_top - allocation_info_.top);
- MemoryChunk::IncrementLiveBytesFromMutator(string->address(), -delta);
- }
-}
-
-
bool FreeListNode::IsFreeListNode(HeapObject* object) {
Map* map = object->map();
Heap* heap = object->GetHeap();
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index cc841806b..fe5eeb5e4 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -27,9 +27,9 @@
#include "v8.h"
-#include "liveobjectlist-inl.h"
#include "macro-assembler.h"
#include "mark-compact.h"
+#include "msan.h"
#include "platform.h"
namespace v8 {
@@ -69,11 +69,12 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
HeapObjectIterator::HeapObjectIterator(Page* page,
HeapObjectCallback size_func) {
Space* owner = page->owner();
- ASSERT(owner == HEAP->old_pointer_space() ||
- owner == HEAP->old_data_space() ||
- owner == HEAP->map_space() ||
- owner == HEAP->cell_space() ||
- owner == HEAP->code_space());
+ ASSERT(owner == page->heap()->old_pointer_space() ||
+ owner == page->heap()->old_data_space() ||
+ owner == page->heap()->map_space() ||
+ owner == page->heap()->cell_space() ||
+ owner == page->heap()->property_cell_space() ||
+ owner == page->heap()->code_space());
Initialize(reinterpret_cast<PagedSpace*>(owner),
page->area_start(),
page->area_end(),
@@ -207,17 +208,18 @@ void CodeRange::GetNextAllocationBlock(size_t requested) {
}
-
-Address CodeRange::AllocateRawMemory(const size_t requested,
+Address CodeRange::AllocateRawMemory(const size_t requested_size,
+ const size_t commit_size,
size_t* allocated) {
+ ASSERT(commit_size <= requested_size);
ASSERT(current_allocation_block_index_ < allocation_list_.length());
- if (requested > allocation_list_[current_allocation_block_index_].size) {
+ if (requested_size > allocation_list_[current_allocation_block_index_].size) {
// Find an allocation block large enough. This function call may
// call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
- GetNextAllocationBlock(requested);
+ GetNextAllocationBlock(requested_size);
}
// Commit the requested memory at the start of the current allocation block.
- size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
+ size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
FreeBlock current = allocation_list_[current_allocation_block_index_];
if (aligned_requested >= (current.size - Page::kPageSize)) {
// Don't leave a small free block, useless for a large object or chunk.
@@ -227,9 +229,10 @@ Address CodeRange::AllocateRawMemory(const size_t requested,
}
ASSERT(*allocated <= current.size);
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
- if (!MemoryAllocator::CommitCodePage(code_range_,
- current.start,
- *allocated)) {
+ if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
+ current.start,
+ commit_size,
+ *allocated)) {
*allocated = 0;
return NULL;
}
@@ -242,6 +245,16 @@ Address CodeRange::AllocateRawMemory(const size_t requested,
}
+bool CodeRange::CommitRawMemory(Address start, size_t length) {
+ return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
+}
+
+
+bool CodeRange::UncommitRawMemory(Address start, size_t length) {
+ return code_range_->Uncommit(start, length);
+}
+
+
void CodeRange::FreeRawMemory(Address address, size_t length) {
ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
free_list_.Add(FreeBlock(address, length));
@@ -266,7 +279,9 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
capacity_(0),
capacity_executable_(0),
size_(0),
- size_executable_(0) {
+ size_executable_(0),
+ lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
+ highest_ever_allocated_(reinterpret_cast<void*>(0)) {
}
@@ -292,6 +307,17 @@ void MemoryAllocator::TearDown() {
}
+bool MemoryAllocator::CommitMemory(Address base,
+ size_t size,
+ Executability executable) {
+ if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
+ return false;
+ }
+ UpdateAllocatedSpaceLimits(base, base + size);
+ return true;
+}
+
+
void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
@@ -353,20 +379,27 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size,
}
-Address MemoryAllocator::AllocateAlignedMemory(size_t size,
+Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
+ size_t commit_size,
size_t alignment,
Executability executable,
VirtualMemory* controller) {
+ ASSERT(commit_size <= reserve_size);
VirtualMemory reservation;
- Address base = ReserveAlignedMemory(size, alignment, &reservation);
+ Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
if (base == NULL) return NULL;
if (executable == EXECUTABLE) {
- if (!CommitCodePage(&reservation, base, size)) {
+ if (!CommitExecutableMemory(&reservation,
+ base,
+ commit_size,
+ reserve_size)) {
base = NULL;
}
} else {
- if (!reservation.Commit(base, size, false)) {
+ if (reservation.Commit(base, commit_size, false)) {
+ UpdateAllocatedSpaceLimits(base, base + commit_size);
+ } else {
base = NULL;
}
}
@@ -448,6 +481,14 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->slots_buffer_ = NULL;
chunk->skip_list_ = NULL;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
+ chunk->progress_bar_ = 0;
+ chunk->high_water_mark_ = static_cast<int>(area_start - base);
+ chunk->parallel_sweeping_ = 0;
+ chunk->available_in_small_free_list_ = 0;
+ chunk->available_in_medium_free_list_ = 0;
+ chunk->available_in_large_free_list_ = 0;
+ chunk->available_in_huge_free_list_ = 0;
+ chunk->non_available_small_blocks_ = 0;
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);
@@ -468,9 +509,70 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
}
+// Commit MemoryChunk area to the requested size.
+bool MemoryChunk::CommitArea(size_t requested) {
+ size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
+ MemoryAllocator::CodePageGuardSize() : 0;
+ size_t header_size = area_start() - address() - guard_size;
+ size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
+ size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
+ OS::CommitPageSize());
+
+ if (commit_size > committed_size) {
+ // Commit size should be less or equal than the reserved size.
+ ASSERT(commit_size <= size() - 2 * guard_size);
+ // Append the committed area.
+ Address start = address() + committed_size + guard_size;
+ size_t length = commit_size - committed_size;
+ if (reservation_.IsReserved()) {
+ Executability executable = IsFlagSet(IS_EXECUTABLE)
+ ? EXECUTABLE : NOT_EXECUTABLE;
+ if (!heap()->isolate()->memory_allocator()->CommitMemory(
+ start, length, executable)) {
+ return false;
+ }
+ } else {
+ CodeRange* code_range = heap_->isolate()->code_range();
+ ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
+ if (!code_range->CommitRawMemory(start, length)) return false;
+ }
+
+ if (Heap::ShouldZapGarbage()) {
+ heap_->isolate()->memory_allocator()->ZapBlock(start, length);
+ }
+ } else if (commit_size < committed_size) {
+ ASSERT(commit_size > 0);
+ // Shrink the committed area.
+ size_t length = committed_size - commit_size;
+ Address start = address() + committed_size + guard_size - length;
+ if (reservation_.IsReserved()) {
+ if (!reservation_.Uncommit(start, length)) return false;
+ } else {
+ CodeRange* code_range = heap_->isolate()->code_range();
+ ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
+ if (!code_range->UncommitRawMemory(start, length)) return false;
+ }
+ }
+
+ area_end_ = area_start_ + requested;
+ return true;
+}
+
+
void MemoryChunk::InsertAfter(MemoryChunk* other) {
next_chunk_ = other->next_chunk_;
prev_chunk_ = other;
+
+ // This memory barrier is needed since concurrent sweeper threads may iterate
+ // over the list of pages while a new page is inserted.
+ // TODO(hpayer): find a cleaner way to guarantee that the page list can be
+ // expanded concurrently
+ MemoryBarrier();
+
+ // The following two write operations can take effect in arbitrary order
+ // since pages are always iterated by the sweeper threads in LIFO order, i.e,
+ // the inserted page becomes visible for the sweeper threads after
+ // other->next_chunk_ = this;
other->next_chunk_->prev_chunk_ = this;
other->next_chunk_ = this;
}
@@ -488,9 +590,12 @@ void MemoryChunk::Unlink() {
}
-MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
+MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
+ intptr_t commit_area_size,
Executability executable,
Space* owner) {
+ ASSERT(commit_area_size <= reserve_area_size);
+
size_t chunk_size;
Heap* heap = isolate_->heap();
Address base = NULL;
@@ -498,8 +603,38 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
Address area_start = NULL;
Address area_end = NULL;
+ //
+ // MemoryChunk layout:
+ //
+ // Executable
+ // +----------------------------+<- base aligned with MemoryChunk::kAlignment
+ // | Header |
+ // +----------------------------+<- base + CodePageGuardStartOffset
+ // | Guard |
+ // +----------------------------+<- area_start_
+ // | Area |
+ // +----------------------------+<- area_end_ (area_start + commit_area_size)
+ // | Committed but not used |
+ // +----------------------------+<- aligned at OS page boundary
+ // | Reserved but not committed |
+ // +----------------------------+<- aligned at OS page boundary
+ // | Guard |
+ // +----------------------------+<- base + chunk_size
+ //
+ // Non-executable
+ // +----------------------------+<- base aligned with MemoryChunk::kAlignment
+ // | Header |
+ // +----------------------------+<- area_start_ (base + kObjectStartOffset)
+ // | Area |
+ // +----------------------------+<- area_end_ (area_start + commit_area_size)
+ // | Committed but not used |
+ // +----------------------------+<- aligned at OS page boundary
+ // | Reserved but not committed |
+ // +----------------------------+<- base + chunk_size
+ //
+
if (executable == EXECUTABLE) {
- chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
+ chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
OS::CommitPageSize()) + CodePageGuardSize();
// Check executable memory limit.
@@ -510,10 +645,15 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
return NULL;
}
+ // Size of header (not executable) plus area (executable).
+ size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
+ OS::CommitPageSize());
// Allocate executable memory either from code range or from the
// OS.
if (isolate_->code_range()->exists()) {
- base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
+ base = isolate_->code_range()->AllocateRawMemory(chunk_size,
+ commit_size,
+ &chunk_size);
ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
MemoryChunk::kAlignment));
if (base == NULL) return NULL;
@@ -522,6 +662,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
size_executable_ += chunk_size;
} else {
base = AllocateAlignedMemory(chunk_size,
+ commit_size,
MemoryChunk::kAlignment,
executable,
&reservation);
@@ -532,14 +673,18 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
if (Heap::ShouldZapGarbage()) {
ZapBlock(base, CodePageGuardStartOffset());
- ZapBlock(base + CodePageAreaStartOffset(), body_size);
+ ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
}
area_start = base + CodePageAreaStartOffset();
- area_end = area_start + body_size;
+ area_end = area_start + commit_area_size;
} else {
- chunk_size = MemoryChunk::kObjectStartOffset + body_size;
+ chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
+ OS::CommitPageSize());
+ size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
+ commit_area_size, OS::CommitPageSize());
base = AllocateAlignedMemory(chunk_size,
+ commit_size,
MemoryChunk::kAlignment,
executable,
&reservation);
@@ -547,13 +692,15 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
if (base == NULL) return NULL;
if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, chunk_size);
+ ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
}
area_start = base + Page::kObjectStartOffset;
- area_end = base + chunk_size;
+ area_end = area_start + commit_area_size;
}
+ // Use chunk_size for statistics and callbacks because we assume that they
+ // treat reserved but not-yet committed memory regions of chunks as allocated.
isolate_->counters()->memory_allocated()->
Increment(static_cast<int>(chunk_size));
@@ -571,14 +718,24 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
executable,
owner);
result->set_reserved_memory(&reservation);
+ MSAN_MEMORY_IS_INITIALIZED(base, chunk_size);
return result;
}
+void Page::ResetFreeListStatistics() {
+ non_available_small_blocks_ = 0;
+ available_in_small_free_list_ = 0;
+ available_in_medium_free_list_ = 0;
+ available_in_large_free_list_ = 0;
+ available_in_huge_free_list_ = 0;
+}
+
+
Page* MemoryAllocator::AllocatePage(intptr_t size,
PagedSpace* owner,
Executability executable) {
- MemoryChunk* chunk = AllocateChunk(size, executable, owner);
+ MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
if (chunk == NULL) return NULL;
@@ -589,7 +746,10 @@ Page* MemoryAllocator::AllocatePage(intptr_t size,
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
Space* owner,
Executability executable) {
- MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
+ MemoryChunk* chunk = AllocateChunk(object_size,
+ object_size,
+ executable,
+ owner);
if (chunk == NULL) return NULL;
return LargePage::Initialize(isolate_->heap(), chunk);
}
@@ -623,7 +783,7 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
bool MemoryAllocator::CommitBlock(Address start,
size_t size,
Executability executable) {
- if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
+ if (!CommitMemory(start, size, executable)) return false;
if (Heap::ShouldZapGarbage()) {
ZapBlock(start, size);
@@ -731,9 +891,10 @@ int MemoryAllocator::CodePageAreaEndOffset() {
}
-bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
- Address start,
- size_t size) {
+bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
+ Address start,
+ size_t commit_size,
+ size_t reserved_size) {
// Commit page header (not executable).
if (!vm->Commit(start,
CodePageGuardStartOffset(),
@@ -747,18 +908,20 @@ bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
}
// Commit page body (executable).
- size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
if (!vm->Commit(start + CodePageAreaStartOffset(),
- area_size,
+ commit_size - CodePageGuardStartOffset(),
true)) {
return false;
}
- // Create guard page after the allocatable area.
- if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
+ // Create guard page before the end.
+ if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
return false;
}
+ UpdateAllocatedSpaceLimits(start,
+ start + CodePageAreaStartOffset() +
+ commit_size - CodePageGuardStartOffset());
return true;
}
@@ -774,6 +937,7 @@ void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
chunk->IncrementLiveBytes(by);
}
+
// -----------------------------------------------------------------------------
// PagedSpace implementation
@@ -796,8 +960,8 @@ PagedSpace::PagedSpace(Heap* heap,
* AreaSize();
accounting_stats_.Clear();
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
anchor_.InitializeAsAnchor(this);
}
@@ -824,6 +988,18 @@ void PagedSpace::TearDown() {
}
+size_t PagedSpace::CommittedPhysicalMemory() {
+ if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ size_t size = 0;
+ PageIterator it(this);
+ while (it.has_next()) {
+ size += it.next()->CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+
MaybeObject* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called on precisely swept spaces.
ASSERT(!heap()->mark_compact_collector()->in_use());
@@ -842,6 +1018,7 @@ MaybeObject* PagedSpace::FindObject(Address addr) {
return Failure::Exception();
}
+
bool PagedSpace::CanExpand() {
ASSERT(max_capacity_ % AreaSize() == 0);
@@ -855,6 +1032,7 @@ bool PagedSpace::CanExpand() {
return true;
}
+
bool PagedSpace::Expand() {
if (!CanExpand()) return false;
@@ -880,7 +1058,7 @@ intptr_t PagedSpace::SizeOfFirstPage() {
int size = 0;
switch (identity()) {
case OLD_POINTER_SPACE:
- size = 64 * kPointerSize * KB;
+ size = 72 * kPointerSize * KB;
break;
case OLD_DATA_SPACE:
size = 192 * KB;
@@ -891,14 +1069,22 @@ intptr_t PagedSpace::SizeOfFirstPage() {
case CELL_SPACE:
size = 16 * kPointerSize * KB;
break;
+ case PROPERTY_CELL_SPACE:
+ size = 8 * kPointerSize * KB;
+ break;
case CODE_SPACE:
- if (kPointerSize == 8) {
- // On x64 we allocate code pages in a special way (from the reserved
- // 2Byte area). That part of the code is not yet upgraded to handle
- // small pages.
+ if (heap()->isolate()->code_range()->exists()) {
+ // When code range exists, code pages are allocated in a special way
+ // (from the reserved code range). That part of the code is not yet
+ // upgraded to handle small pages.
size = AreaSize();
} else {
- size = 384 * KB;
+#if V8_TARGET_ARCH_MIPS
+ // TODO(plind): Investigate larger code stubs size on MIPS.
+ size = 480 * KB;
+#else
+ size = 416 * KB;
+#endif
}
break;
default:
@@ -919,7 +1105,24 @@ int PagedSpace::CountTotalPages() {
}
-void PagedSpace::ReleasePage(Page* page) {
+void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
+ sizes->huge_size_ = page->available_in_huge_free_list();
+ sizes->small_size_ = page->available_in_small_free_list();
+ sizes->medium_size_ = page->available_in_medium_free_list();
+ sizes->large_size_ = page->available_in_large_free_list();
+}
+
+
+void PagedSpace::ResetFreeListStatistics() {
+ PageIterator page_iterator(this);
+ while (page_iterator.has_next()) {
+ Page* page = page_iterator.next();
+ page->ResetFreeListStatistics();
+ }
+}
+
+
+void PagedSpace::ReleasePage(Page* page, bool unlink) {
ASSERT(page->LiveBytes() == 0);
ASSERT(AreaSize() == page->area_size());
@@ -939,11 +1142,14 @@ void PagedSpace::ReleasePage(Page* page) {
DecreaseUnsweptFreeBytes(page);
}
- if (Page::FromAllocationTop(allocation_info_.top) == page) {
- allocation_info_.top = allocation_info_.limit = NULL;
+ if (Page::FromAllocationTop(allocation_info_.top()) == page) {
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
}
- page->Unlink();
+ if (unlink) {
+ page->Unlink();
+ }
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
heap()->isolate()->memory_allocator()->Free(page);
} else {
@@ -955,36 +1161,6 @@ void PagedSpace::ReleasePage(Page* page) {
}
-void PagedSpace::ReleaseAllUnusedPages() {
- PageIterator it(this);
- while (it.has_next()) {
- Page* page = it.next();
- if (!page->WasSwept()) {
- if (page->LiveBytes() == 0) ReleasePage(page);
- } else {
- HeapObject* obj = HeapObject::FromAddress(page->area_start());
- if (obj->IsFreeSpace() &&
- FreeSpace::cast(obj)->size() == AreaSize()) {
- // Sometimes we allocate memory from free list but don't
- // immediately initialize it (e.g. see PagedSpace::ReserveSpace
- // called from Heap::ReserveSpace that can cause GC before
- // reserved space is actually initialized).
- // Thus we can't simply assume that obj represents a valid
- // node still owned by a free list
- // Instead we should verify that the page is fully covered
- // by free list items.
- FreeList::SizeStats sizes;
- free_list_.CountFreeListItems(page, &sizes);
- if (sizes.Total() == AreaSize()) {
- ReleasePage(page);
- }
- }
- }
- }
- heap()->FreeQueuedChunks();
-}
-
-
#ifdef DEBUG
void PagedSpace::Print() { }
#endif
@@ -995,12 +1171,12 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
if (was_swept_conservatively_) return;
bool allocation_pointer_found_in_space =
- (allocation_info_.top == allocation_info_.limit);
+ (allocation_info_.top() == allocation_info_.limit());
PageIterator page_iterator(this);
while (page_iterator.has_next()) {
Page* page = page_iterator.next();
CHECK(page->owner() == this);
- if (page == Page::FromAllocationTop(allocation_info_.top)) {
+ if (page == Page::FromAllocationTop(allocation_info_.top())) {
allocation_pointer_found_in_space = true;
}
CHECK(page->WasSweptPrecisely());
@@ -1111,8 +1287,8 @@ void NewSpace::TearDown() {
}
start_ = NULL;
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
to_space_.TearDown();
from_space_.TearDown();
@@ -1169,21 +1345,22 @@ void NewSpace::Shrink() {
}
}
}
- allocation_info_.limit = to_space_.page_high();
+ allocation_info_.set_limit(to_space_.page_high());
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::UpdateAllocationInfo() {
- allocation_info_.top = to_space_.page_low();
- allocation_info_.limit = to_space_.page_high();
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.set_top(to_space_.page_low());
+ allocation_info_.set_limit(to_space_.page_high());
// Lower limit during incremental marking.
if (heap()->incremental_marking()->IsMarking() &&
inline_allocation_limit_step() != 0) {
Address new_limit =
- allocation_info_.top + inline_allocation_limit_step();
- allocation_info_.limit = Min(new_limit, allocation_info_.limit);
+ allocation_info_.top() + inline_allocation_limit_step();
+ allocation_info_.set_limit(Min(new_limit, allocation_info_.limit()));
}
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -1202,7 +1379,7 @@ void NewSpace::ResetAllocationInfo() {
bool NewSpace::AddFreshPage() {
- Address top = allocation_info_.top;
+ Address top = allocation_info_.top();
if (NewSpacePage::IsAtStart(top)) {
// The current page is already empty. Don't try to make another.
@@ -1234,15 +1411,16 @@ bool NewSpace::AddFreshPage() {
MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
- Address old_top = allocation_info_.top;
+ Address old_top = allocation_info_.top();
Address new_top = old_top + size_in_bytes;
Address high = to_space_.page_high();
- if (allocation_info_.limit < high) {
+ if (allocation_info_.limit() < high) {
// Incremental marking has lowered the limit to get a
// chance to do a step.
- allocation_info_.limit = Min(
- allocation_info_.limit + inline_allocation_limit_step_,
+ Address new_limit = Min(
+ allocation_info_.limit() + inline_allocation_limit_step_,
high);
+ allocation_info_.set_limit(new_limit);
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
@@ -1351,20 +1529,18 @@ void SemiSpace::TearDown() {
bool SemiSpace::Commit() {
ASSERT(!is_committed());
int pages = capacity_ / Page::kPageSize;
- Address end = start_ + maximum_capacity_;
- Address start = end - pages * Page::kPageSize;
- if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(start_,
capacity_,
executable())) {
return false;
}
- NewSpacePage* page = anchor();
- for (int i = 1; i <= pages; i++) {
+ NewSpacePage* current = anchor();
+ for (int i = 0; i < pages; i++) {
NewSpacePage* new_page =
- NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
- new_page->InsertAfter(page);
- page = new_page;
+ NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
+ new_page->InsertAfter(current);
+ current = new_page;
}
committed_ = true;
@@ -1387,6 +1563,17 @@ bool SemiSpace::Uncommit() {
}
+size_t SemiSpace::CommittedPhysicalMemory() {
+ if (!is_committed()) return 0;
+ size_t size = 0;
+ NewSpacePageIterator it(this);
+ while (it.has_next()) {
+ size += it.next()->CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+
bool SemiSpace::GrowTo(int new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
@@ -1397,20 +1584,18 @@ bool SemiSpace::GrowTo(int new_capacity) {
int pages_before = capacity_ / Page::kPageSize;
int pages_after = new_capacity / Page::kPageSize;
- Address end = start_ + maximum_capacity_;
- Address start = end - new_capacity;
size_t delta = new_capacity - capacity_;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start, delta, executable())) {
+ start_ + capacity_, delta, executable())) {
return false;
}
capacity_ = new_capacity;
NewSpacePage* last_page = anchor()->prev_page();
ASSERT(last_page != anchor());
- for (int i = pages_before + 1; i <= pages_after; i++) {
- Address page_address = end - i * Page::kPageSize;
+ for (int i = pages_before; i < pages_after; i++) {
+ Address page_address = start_ + i * Page::kPageSize;
NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
page_address,
this);
@@ -1430,25 +1615,20 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
ASSERT(new_capacity >= initial_capacity_);
ASSERT(new_capacity < capacity_);
if (is_committed()) {
- // Semispaces grow backwards from the end of their allocated capacity,
- // so we find the before and after start addresses relative to the
- // end of the space.
- Address space_end = start_ + maximum_capacity_;
- Address old_start = space_end - capacity_;
size_t delta = capacity_ - new_capacity;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
- if (!allocator->UncommitBlock(old_start, delta)) {
+ if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
return false;
}
int pages_after = new_capacity / Page::kPageSize;
NewSpacePage* new_last_page =
- NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
+ NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
new_last_page->set_next_page(anchor());
anchor()->set_prev_page(new_last_page);
- ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
+ ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page));
}
capacity_ = new_capacity;
@@ -1618,8 +1798,7 @@ void SemiSpaceIterator::Initialize(Address start,
#ifdef DEBUG
// heap_histograms is shared, always clear it before using it.
-static void ClearHistograms() {
- Isolate* isolate = Isolate::Current();
+static void ClearHistograms(Isolate* isolate) {
// We reset the name each time, though it hasn't changed.
#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
@@ -1633,48 +1812,20 @@ static void ClearHistograms() {
}
-static void ClearCodeKindStatistics() {
- Isolate* isolate = Isolate::Current();
+static void ClearCodeKindStatistics(int* code_kind_statistics) {
for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- isolate->code_kind_statistics()[i] = 0;
+ code_kind_statistics[i] = 0;
}
}
-static void ReportCodeKindStatistics() {
- Isolate* isolate = Isolate::Current();
- const char* table[Code::NUMBER_OF_KINDS] = { NULL };
-
-#define CASE(name) \
- case Code::name: table[Code::name] = #name; \
- break
-
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- switch (static_cast<Code::Kind>(i)) {
- CASE(FUNCTION);
- CASE(OPTIMIZED_FUNCTION);
- CASE(STUB);
- CASE(BUILTIN);
- CASE(LOAD_IC);
- CASE(KEYED_LOAD_IC);
- CASE(STORE_IC);
- CASE(KEYED_STORE_IC);
- CASE(CALL_IC);
- CASE(KEYED_CALL_IC);
- CASE(UNARY_OP_IC);
- CASE(BINARY_OP_IC);
- CASE(COMPARE_IC);
- CASE(TO_BOOLEAN_IC);
- }
- }
-
-#undef CASE
-
+static void ReportCodeKindStatistics(int* code_kind_statistics) {
PrintF("\n Code kind histograms: \n");
for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- if (isolate->code_kind_statistics()[i] > 0) {
- PrintF(" %-20s: %10d bytes\n", table[i],
- isolate->code_kind_statistics()[i]);
+ if (code_kind_statistics[i] > 0) {
+ PrintF(" %-20s: %10d bytes\n",
+ Code::Kind2String(static_cast<Code::Kind>(i)),
+ code_kind_statistics[i]);
}
}
PrintF("\n");
@@ -1682,7 +1833,7 @@ static void ReportCodeKindStatistics() {
static int CollectHistogramInfo(HeapObject* obj) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = obj->GetIsolate();
InstanceType type = obj->map()->instance_type();
ASSERT(0 <= type && type <= LAST_TYPE);
ASSERT(isolate->heap_histograms()[type].name() != NULL);
@@ -1698,8 +1849,7 @@ static int CollectHistogramInfo(HeapObject* obj) {
}
-static void ReportHistogram(bool print_spill) {
- Isolate* isolate = Isolate::Current();
+static void ReportHistogram(Isolate* isolate, bool print_spill) {
PrintF("\n Object Histogram:\n");
for (int i = 0; i <= LAST_TYPE; i++) {
if (isolate->heap_histograms()[i].number() > 0) {
@@ -1739,6 +1889,7 @@ void NewSpace::ClearHistograms() {
}
}
+
// Because the copying collector does not touch garbage objects, we iterate
// the new space before a collection to get a histogram of allocated objects.
// This only happens when --log-gc flag is set.
@@ -1799,7 +1950,7 @@ void NewSpace::ReportStatistics() {
#endif // DEBUG
if (FLAG_log_gc) {
- Isolate* isolate = ISOLATE;
+ Isolate* isolate = heap()->isolate();
DoReportStatistics(isolate, allocated_histogram_, "allocated");
DoReportStatistics(isolate, promoted_histogram_, "promoted");
}
@@ -1821,6 +1972,18 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
promoted_histogram_[type].increment_bytes(obj->Size());
}
+
+size_t NewSpace::CommittedPhysicalMemory() {
+ if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ size_t size = to_space_.CommittedPhysicalMemory();
+ if (from_space_.is_committed()) {
+ size += from_space_.CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
@@ -1854,7 +2017,7 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
FreeListNode* FreeListNode::next() {
ASSERT(IsFreeListNode(this));
- if (map() == HEAP->raw_unchecked_free_space_map()) {
+ if (map() == GetHeap()->raw_unchecked_free_space_map()) {
ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
return reinterpret_cast<FreeListNode*>(
Memory::Address_at(address() + kNextOffset));
@@ -1867,7 +2030,7 @@ FreeListNode* FreeListNode::next() {
FreeListNode** FreeListNode::next_address() {
ASSERT(IsFreeListNode(this));
- if (map() == HEAP->raw_unchecked_free_space_map()) {
+ if (map() == GetHeap()->raw_unchecked_free_space_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize);
return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
} else {
@@ -1881,7 +2044,7 @@ void FreeListNode::set_next(FreeListNode* next) {
// While we are booting the VM the free space map will actually be null. So
// we have to make sure that we don't try to use it for anything at that
// stage.
- if (map() == HEAP->raw_unchecked_free_space_map()) {
+ if (map() == GetHeap()->raw_unchecked_free_space_map()) {
ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
Memory::Address_at(address() + kNextOffset) =
reinterpret_cast<Address>(next);
@@ -1892,115 +2055,283 @@ void FreeListNode::set_next(FreeListNode* next) {
}
+intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
+ intptr_t free_bytes = 0;
+ if (category->top_ != NULL) {
+ ASSERT(category->end_ != NULL);
+ // This is safe (not going to deadlock) since Concatenate operations
+ // are never performed on the same free lists at the same time in
+ // reverse order.
+ LockGuard<Mutex> target_lock_guard(mutex());
+ LockGuard<Mutex> source_lock_guard(category->mutex());
+ free_bytes = category->available();
+ if (end_ == NULL) {
+ end_ = category->end();
+ } else {
+ category->end()->set_next(top_);
+ }
+ top_ = category->top();
+ available_ += category->available();
+ category->Reset();
+ }
+ return free_bytes;
+}
+
+
+void FreeListCategory::Reset() {
+ top_ = NULL;
+ end_ = NULL;
+ available_ = 0;
+}
+
+
+intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
+ int sum = 0;
+ FreeListNode** n = &top_;
+ while (*n != NULL) {
+ if (Page::FromAddress((*n)->address()) == p) {
+ FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
+ sum += free_space->Size();
+ *n = (*n)->next();
+ } else {
+ n = (*n)->next_address();
+ }
+ }
+ if (top_ == NULL) {
+ end_ = NULL;
+ }
+ available_ -= sum;
+ return sum;
+}
+
+
+FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
+ FreeListNode* node = top_;
+
+ if (node == NULL) return NULL;
+
+ while (node != NULL &&
+ Page::FromAddress(node->address())->IsEvacuationCandidate()) {
+ available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
+ node = node->next();
+ }
+
+ if (node != NULL) {
+ set_top(node->next());
+ *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
+ available_ -= *node_size;
+ } else {
+ set_top(NULL);
+ }
+
+ if (top() == NULL) {
+ set_end(NULL);
+ }
+
+ return node;
+}
+
+
+FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
+ int *node_size) {
+ FreeListNode* node = PickNodeFromList(node_size);
+ if (node != NULL && *node_size < size_in_bytes) {
+ Free(node, *node_size);
+ *node_size = 0;
+ return NULL;
+ }
+ return node;
+}
+
+
+void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
+ node->set_next(top_);
+ top_ = node;
+ if (end_ == NULL) {
+ end_ = node;
+ }
+ available_ += size_in_bytes;
+}
+
+
+void FreeListCategory::RepairFreeList(Heap* heap) {
+ FreeListNode* n = top_;
+ while (n != NULL) {
+ Map** map_location = reinterpret_cast<Map**>(n->address());
+ if (*map_location == NULL) {
+ *map_location = heap->free_space_map();
+ } else {
+ ASSERT(*map_location == heap->free_space_map());
+ }
+ n = n->next();
+ }
+}
+
+
FreeList::FreeList(PagedSpace* owner)
: owner_(owner), heap_(owner->heap()) {
Reset();
}
+intptr_t FreeList::Concatenate(FreeList* free_list) {
+ intptr_t free_bytes = 0;
+ free_bytes += small_list_.Concatenate(free_list->small_list());
+ free_bytes += medium_list_.Concatenate(free_list->medium_list());
+ free_bytes += large_list_.Concatenate(free_list->large_list());
+ free_bytes += huge_list_.Concatenate(free_list->huge_list());
+ return free_bytes;
+}
+
+
void FreeList::Reset() {
- available_ = 0;
- small_list_ = NULL;
- medium_list_ = NULL;
- large_list_ = NULL;
- huge_list_ = NULL;
+ small_list_.Reset();
+ medium_list_.Reset();
+ large_list_.Reset();
+ huge_list_.Reset();
}
int FreeList::Free(Address start, int size_in_bytes) {
if (size_in_bytes == 0) return 0;
+
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(heap_, size_in_bytes);
+ Page* page = Page::FromAddress(start);
// Early return to drop too-small blocks on the floor.
- if (size_in_bytes < kSmallListMin) return size_in_bytes;
+ if (size_in_bytes < kSmallListMin) {
+ page->add_non_available_small_blocks(size_in_bytes);
+ return size_in_bytes;
+ }
// Insert other blocks at the head of a free list of the appropriate
// magnitude.
if (size_in_bytes <= kSmallListMax) {
- node->set_next(small_list_);
- small_list_ = node;
+ small_list_.Free(node, size_in_bytes);
+ page->add_available_in_small_free_list(size_in_bytes);
} else if (size_in_bytes <= kMediumListMax) {
- node->set_next(medium_list_);
- medium_list_ = node;
+ medium_list_.Free(node, size_in_bytes);
+ page->add_available_in_medium_free_list(size_in_bytes);
} else if (size_in_bytes <= kLargeListMax) {
- node->set_next(large_list_);
- large_list_ = node;
+ large_list_.Free(node, size_in_bytes);
+ page->add_available_in_large_free_list(size_in_bytes);
} else {
- node->set_next(huge_list_);
- huge_list_ = node;
+ huge_list_.Free(node, size_in_bytes);
+ page->add_available_in_huge_free_list(size_in_bytes);
}
- available_ += size_in_bytes;
- ASSERT(IsVeryLong() || available_ == SumFreeLists());
- return 0;
-}
-
-
-FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
- FreeListNode* node = *list;
- if (node == NULL) return NULL;
-
- while (node != NULL &&
- Page::FromAddress(node->address())->IsEvacuationCandidate()) {
- available_ -= node->Size();
- node = node->next();
- }
-
- if (node != NULL) {
- *node_size = node->Size();
- *list = node->next();
- } else {
- *list = NULL;
- }
-
- return node;
+ ASSERT(IsVeryLong() || available() == SumFreeLists());
+ return 0;
}
FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
FreeListNode* node = NULL;
+ Page* page = NULL;
if (size_in_bytes <= kSmallAllocationMax) {
- node = PickNodeFromList(&small_list_, node_size);
- if (node != NULL) return node;
+ node = small_list_.PickNodeFromList(node_size);
+ if (node != NULL) {
+ ASSERT(size_in_bytes <= *node_size);
+ page = Page::FromAddress(node->address());
+ page->add_available_in_small_free_list(-(*node_size));
+ ASSERT(IsVeryLong() || available() == SumFreeLists());
+ return node;
+ }
}
if (size_in_bytes <= kMediumAllocationMax) {
- node = PickNodeFromList(&medium_list_, node_size);
- if (node != NULL) return node;
+ node = medium_list_.PickNodeFromList(node_size);
+ if (node != NULL) {
+ ASSERT(size_in_bytes <= *node_size);
+ page = Page::FromAddress(node->address());
+ page->add_available_in_medium_free_list(-(*node_size));
+ ASSERT(IsVeryLong() || available() == SumFreeLists());
+ return node;
+ }
}
if (size_in_bytes <= kLargeAllocationMax) {
- node = PickNodeFromList(&large_list_, node_size);
- if (node != NULL) return node;
+ node = large_list_.PickNodeFromList(node_size);
+ if (node != NULL) {
+ ASSERT(size_in_bytes <= *node_size);
+ page = Page::FromAddress(node->address());
+ page->add_available_in_large_free_list(-(*node_size));
+ ASSERT(IsVeryLong() || available() == SumFreeLists());
+ return node;
+ }
}
- for (FreeListNode** cur = &huge_list_;
+ int huge_list_available = huge_list_.available();
+ for (FreeListNode** cur = huge_list_.GetTopAddress();
*cur != NULL;
cur = (*cur)->next_address()) {
FreeListNode* cur_node = *cur;
while (cur_node != NULL &&
Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
- available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
+ int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
+ huge_list_available -= size;
+ page = Page::FromAddress(cur_node->address());
+ page->add_available_in_huge_free_list(-size);
cur_node = cur_node->next();
}
*cur = cur_node;
- if (cur_node == NULL) break;
+ if (cur_node == NULL) {
+ huge_list_.set_end(NULL);
+ break;
+ }
- ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
+ ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
int size = cur_as_free_space->Size();
if (size >= size_in_bytes) {
// Large enough node found. Unlink it from the list.
node = *cur;
- *node_size = size;
*cur = node->next();
+ *node_size = size;
+ huge_list_available -= size;
+ page = Page::FromAddress(node->address());
+ page->add_available_in_huge_free_list(-size);
break;
}
}
+ if (huge_list_.top() == NULL) {
+ huge_list_.set_end(NULL);
+ }
+ huge_list_.set_available(huge_list_available);
+
+ if (node != NULL) {
+ ASSERT(IsVeryLong() || available() == SumFreeLists());
+ return node;
+ }
+
+ if (size_in_bytes <= kSmallListMax) {
+ node = small_list_.PickNodeFromList(size_in_bytes, node_size);
+ if (node != NULL) {
+ ASSERT(size_in_bytes <= *node_size);
+ page = Page::FromAddress(node->address());
+ page->add_available_in_small_free_list(-(*node_size));
+ }
+ } else if (size_in_bytes <= kMediumListMax) {
+ node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
+ if (node != NULL) {
+ ASSERT(size_in_bytes <= *node_size);
+ page = Page::FromAddress(node->address());
+ page->add_available_in_medium_free_list(-(*node_size));
+ }
+ } else if (size_in_bytes <= kLargeListMax) {
+ node = large_list_.PickNodeFromList(size_in_bytes, node_size);
+ if (node != NULL) {
+ ASSERT(size_in_bytes <= *node_size);
+ page = Page::FromAddress(node->address());
+ page->add_available_in_large_free_list(-(*node_size));
+ }
+ }
+
+ ASSERT(IsVeryLong() || available() == SumFreeLists());
return node;
}
@@ -2016,16 +2347,6 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// Don't free list allocate if there is linear space available.
ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
- int new_node_size = 0;
- FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
- if (new_node == NULL) return NULL;
-
- available_ -= new_node_size;
- ASSERT(IsVeryLong() || available_ == SumFreeLists());
-
- int bytes_left = new_node_size - size_in_bytes;
- ASSERT(bytes_left >= 0);
-
int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
@@ -2035,6 +2356,16 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
owner_->heap()->incremental_marking()->OldSpaceStep(
size_in_bytes - old_linear_size);
+ int new_node_size = 0;
+ FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+ if (new_node == NULL) {
+ owner_->SetTop(NULL, NULL);
+ return NULL;
+ }
+
+ int bytes_left = new_node_size - size_in_bytes;
+ ASSERT(bytes_left >= 0);
+
#ifdef DEBUG
for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
reinterpret_cast<Object**>(new_node->address())[i] =
@@ -2079,68 +2410,37 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
}
-static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
- intptr_t sum = 0;
- while (n != NULL) {
- if (Page::FromAddress(n->address()) == p) {
- FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
- sum += free_space->Size();
- }
- n = n->next();
- }
- return sum;
-}
-
+intptr_t FreeList::EvictFreeListItems(Page* p) {
+ intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
+ p->set_available_in_huge_free_list(0);
-void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
- sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
- if (sizes->huge_size_ < p->area_size()) {
- sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
- sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
- sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
- } else {
- sizes->small_size_ = 0;
- sizes->medium_size_ = 0;
- sizes->large_size_ = 0;
+ if (sum < p->area_size()) {
+ sum += small_list_.EvictFreeListItemsInList(p) +
+ medium_list_.EvictFreeListItemsInList(p) +
+ large_list_.EvictFreeListItemsInList(p);
+ p->set_available_in_small_free_list(0);
+ p->set_available_in_medium_free_list(0);
+ p->set_available_in_large_free_list(0);
}
-}
-
-static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
- intptr_t sum = 0;
- while (*n != NULL) {
- if (Page::FromAddress((*n)->address()) == p) {
- FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
- sum += free_space->Size();
- *n = (*n)->next();
- } else {
- n = (*n)->next_address();
- }
- }
return sum;
}
-intptr_t FreeList::EvictFreeListItems(Page* p) {
- intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
-
- if (sum < p->area_size()) {
- sum += EvictFreeListItemsInList(&small_list_, p) +
- EvictFreeListItemsInList(&medium_list_, p) +
- EvictFreeListItemsInList(&large_list_, p);
- }
-
- available_ -= static_cast<int>(sum);
-
- return sum;
+void FreeList::RepairLists(Heap* heap) {
+ small_list_.RepairFreeList(heap);
+ medium_list_.RepairFreeList(heap);
+ large_list_.RepairFreeList(heap);
+ huge_list_.RepairFreeList(heap);
}
#ifdef DEBUG
-intptr_t FreeList::SumFreeList(FreeListNode* cur) {
+intptr_t FreeListCategory::SumFreeList() {
intptr_t sum = 0;
+ FreeListNode* cur = top_;
while (cur != NULL) {
- ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
+ ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
sum += cur_as_free_space->Size();
cur = cur->next();
@@ -2152,8 +2452,9 @@ intptr_t FreeList::SumFreeList(FreeListNode* cur) {
static const int kVeryLongFreeList = 500;
-int FreeList::FreeListLength(FreeListNode* cur) {
+int FreeListCategory::FreeListLength() {
int length = 0;
+ FreeListNode* cur = top_;
while (cur != NULL) {
length++;
cur = cur->next();
@@ -2164,10 +2465,10 @@ int FreeList::FreeListLength(FreeListNode* cur) {
bool FreeList::IsVeryLong() {
- if (FreeListLength(small_list_) == kVeryLongFreeList) return true;
- if (FreeListLength(medium_list_) == kVeryLongFreeList) return true;
- if (FreeListLength(large_list_) == kVeryLongFreeList) return true;
- if (FreeListLength(huge_list_) == kVeryLongFreeList) return true;
+ if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
+ if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
+ if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
+ if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
return false;
}
@@ -2176,10 +2477,10 @@ bool FreeList::IsVeryLong() {
// on the free list, so it should not be called if FreeListLength returns
// kVeryLongFreeList.
intptr_t FreeList::SumFreeLists() {
- intptr_t sum = SumFreeList(small_list_);
- sum += SumFreeList(medium_list_);
- sum += SumFreeList(large_list_);
- sum += SumFreeList(huge_list_);
+ intptr_t sum = small_list_.SumFreeList();
+ sum += medium_list_.SumFreeList();
+ sum += large_list_.SumFreeList();
+ sum += huge_list_.SumFreeList();
return sum;
}
#endif
@@ -2200,9 +2501,9 @@ bool NewSpace::ReserveSpace(int bytes) {
Object* object = NULL;
if (!maybe->ToObject(&object)) return false;
HeapObject* allocation = HeapObject::cast(object);
- Address top = allocation_info_.top;
+ Address top = allocation_info_.top();
if ((top - bytes) == allocation->address()) {
- allocation_info_.top = allocation->address();
+ allocation_info_.set_top(allocation->address());
return true;
}
// There may be a borderline case here where the allocation succeeded, but
@@ -2248,9 +2549,9 @@ void PagedSpace::PrepareForMarkCompact() {
bool PagedSpace::ReserveSpace(int size_in_bytes) {
ASSERT(size_in_bytes <= AreaSize());
ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
- Address current_top = allocation_info_.top;
+ Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
- if (new_top <= allocation_info_.limit) return true;
+ if (new_top <= allocation_info_.limit()) return true;
HeapObject* new_area = free_list_.Allocate(size_in_bytes);
if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
@@ -2267,24 +2568,9 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) {
}
-static void RepairFreeList(Heap* heap, FreeListNode* n) {
- while (n != NULL) {
- Map** map_location = reinterpret_cast<Map**>(n->address());
- if (*map_location == NULL) {
- *map_location = heap->free_space_map();
- } else {
- ASSERT(*map_location == heap->free_space_map());
- }
- n = n->next();
- }
-}
-
-
-void FreeList::RepairLists(Heap* heap) {
- RepairFreeList(heap, small_list_);
- RepairFreeList(heap, medium_list_);
- RepairFreeList(heap, large_list_);
- RepairFreeList(heap, huge_list_);
+intptr_t PagedSpace::SizeOfObjects() {
+ ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
+ return Size() - unswept_free_bytes_ - (limit() - top());
}
@@ -2307,7 +2593,7 @@ bool LargeObjectSpace::ReserveSpace(int bytes) {
bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
- if (IsSweepingComplete()) return true;
+ if (IsLazySweepingComplete()) return true;
intptr_t freed_bytes = 0;
Page* p = first_unswept_page_;
@@ -2319,7 +2605,10 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
reinterpret_cast<intptr_t>(p));
}
DecreaseUnsweptFreeBytes(p);
- freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
+ freed_bytes +=
+ MarkCompactCollector::
+ SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
+ this, NULL, p);
}
p = next_page;
} while (p != anchor() && freed_bytes < bytes_to_sweep);
@@ -2332,21 +2621,41 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
heap()->FreeQueuedChunks();
- return IsSweepingComplete();
+ return IsLazySweepingComplete();
}
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
- if (allocation_info_.top >= allocation_info_.limit) return;
+ if (allocation_info_.top() >= allocation_info_.limit()) return;
- if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
+ if (Page::FromAllocationTop(allocation_info_.top())->
+ IsEvacuationCandidate()) {
// Create filler object to keep page iterable if it was iterable.
int remaining =
- static_cast<int>(allocation_info_.limit - allocation_info_.top);
- heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top());
+ heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
+
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
+ }
+}
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+
+bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ if (collector->AreSweeperThreadsActivated()) {
+ if (collector->IsConcurrentSweepingInProgress()) {
+ if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
+ if (!collector->sequential_sweeping()) {
+ collector->WaitUntilSweepingCompleted();
+ return true;
+ }
+ }
+ return false;
+ }
+ return true;
+ } else {
+ return AdvanceSweeper(size_in_bytes);
}
}
@@ -2354,10 +2663,13 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
- // If there are unswept pages advance lazy sweeper then sweep one page before
- // allocating a new page.
- if (first_unswept_page_->is_valid()) {
- AdvanceSweeper(size_in_bytes);
+ // If there are unswept pages advance lazy sweeper a bounded number of times
+ // until we find a size_in_bytes contiguous piece of memory
+ const int kMaxSweepingTries = 5;
+ bool sweeping_complete = false;
+
+ for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
+ sweeping_complete = EnsureSweeperProgress(size_in_bytes);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
@@ -2374,13 +2686,14 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Try to expand the space and allocate in the new next page.
if (Expand()) {
+ ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
return free_list_.Allocate(size_in_bytes);
}
// Last ditch, sweep all the remaining pages to try to find space. This may
// cause a pause.
- if (!IsSweepingComplete()) {
- AdvanceSweeper(kMaxInt);
+ if (!IsLazySweepingComplete()) {
+ EnsureSweeperProgress(kMaxInt);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
@@ -2393,11 +2706,10 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
#ifdef DEBUG
-void PagedSpace::ReportCodeStatistics() {
- Isolate* isolate = Isolate::Current();
+void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
CommentStatistic* comments_statistics =
isolate->paged_space_comments_statistics();
- ReportCodeKindStatistics();
+ ReportCodeKindStatistics(isolate->code_kind_statistics());
PrintF("Code comment statistics (\" [ comment-txt : size/ "
"count (average)\"):\n");
for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
@@ -2411,11 +2723,10 @@ void PagedSpace::ReportCodeStatistics() {
}
-void PagedSpace::ResetCodeStatistics() {
- Isolate* isolate = Isolate::Current();
+void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
CommentStatistic* comments_statistics =
isolate->paged_space_comments_statistics();
- ClearCodeKindStatistics();
+ ClearCodeKindStatistics(isolate->code_kind_statistics());
for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
comments_statistics[i].Clear();
}
@@ -2527,11 +2838,11 @@ void PagedSpace::ReportStatistics() {
Capacity(), Waste(), Available(), pct);
if (was_swept_conservatively_) return;
- ClearHistograms();
+ ClearHistograms(heap()->isolate());
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
CollectHistogramInfo(obj);
- ReportHistogram(true);
+ ReportHistogram(heap()->isolate(), true);
}
#endif
@@ -2560,21 +2871,23 @@ void FixedSpace::PrepareForMarkCompact() {
// the VerifyObject definition behind VERIFY_HEAP.
void MapSpace::VerifyObject(HeapObject* object) {
- // The object should be a map or a free-list node.
- CHECK(object->IsMap() || object->IsFreeSpace());
+ CHECK(object->IsMap());
}
// -----------------------------------------------------------------------------
-// GlobalPropertyCellSpace implementation
+// CellSpace and PropertyCellSpace implementation
// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
// there is at least one non-inlined virtual function. I would prefer to hide
// the VerifyObject definition behind VERIFY_HEAP.
void CellSpace::VerifyObject(HeapObject* object) {
- // The object should be a global object property cell or a free-list node.
- CHECK(object->IsJSGlobalPropertyCell() ||
- object->map() == heap()->two_pointer_filler_map());
+ CHECK(object->IsCell());
+}
+
+
+void PropertyCellSpace::VerifyObject(HeapObject* object) {
+ CHECK(object->IsPropertyCell());
}
@@ -2698,6 +3011,18 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
}
+size_t LargeObjectSpace::CommittedPhysicalMemory() {
+ if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ size_t size = 0;
+ LargePage* current = first_page_;
+ while (current != NULL) {
+ size += current->CommittedPhysicalMemory();
+ current = current->next_page();
+ }
+ return size;
+}
+
+
// GC support
MaybeObject* LargeObjectSpace::FindObject(Address a) {
LargePage* page = FindPage(a);
@@ -2736,7 +3061,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
MarkBit mark_bit = Marking::MarkBitFrom(object);
if (mark_bit.Get()) {
mark_bit.Clear();
- MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
+ Page::FromAddress(object->address())->ResetProgressBar();
+ Page::FromAddress(object->address())->ResetLiveBytes();
previous = current;
current = current->next_page();
} else {
@@ -2853,7 +3179,7 @@ void LargeObjectSpace::Print() {
void LargeObjectSpace::ReportStatistics() {
PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
int num_objects = 0;
- ClearHistograms();
+ ClearHistograms(heap()->isolate());
LargeObjectIterator it(this);
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
num_objects++;
@@ -2862,7 +3188,7 @@ void LargeObjectSpace::ReportStatistics() {
PrintF(" number of objects %d, "
"size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
- if (num_objects > 0) ReportHistogram(false);
+ if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
}
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index c8ccc7b56..2cd92c59d 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -32,6 +32,8 @@
#include "hashmap.h"
#include "list.h"
#include "log.h"
+#include "platform/mutex.h"
+#include "v8utils.h"
namespace v8 {
namespace internal {
@@ -305,7 +307,7 @@ class MemoryChunk {
}
// Only works for addresses in pointer spaces, not data or code spaces.
- static inline MemoryChunk* FromAnyPointerAddress(Address addr);
+ static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
Address address() { return reinterpret_cast<Address>(this); }
@@ -398,6 +400,12 @@ class MemoryChunk {
WAS_SWEPT_PRECISELY,
WAS_SWEPT_CONSERVATIVELY,
+ // Large objects can have a progress bar in their page header. These object
+ // are scanned in increments and will be kept black while being scanned.
+ // Even if the mutator writes to them they will be kept black and a white
+ // to grey transition is performed in the value.
+ HAS_PROGRESS_BAR,
+
// Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS
};
@@ -449,6 +457,18 @@ class MemoryChunk {
// Return all current flags.
intptr_t GetFlags() { return flags_; }
+ intptr_t parallel_sweeping() const {
+ return parallel_sweeping_;
+ }
+
+ void set_parallel_sweeping(intptr_t state) {
+ parallel_sweeping_ = state;
+ }
+
+ bool TryParallelSweeping() {
+ return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
+ }
+
// Manage live byte count (count of bytes known to be live,
// because they are marked black).
void ResetLiveBytes() {
@@ -481,6 +501,29 @@ class MemoryChunk {
write_barrier_counter_ = counter;
}
+ int progress_bar() {
+ ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
+ return progress_bar_;
+ }
+
+ void set_progress_bar(int progress_bar) {
+ ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
+ progress_bar_ = progress_bar;
+ }
+
+ void ResetProgressBar() {
+ if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ set_progress_bar(0);
+ ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
+ }
+ }
+
+ bool IsLeftOfProgressBar(Object** slot) {
+ Address slot_address = reinterpret_cast<Address>(slot);
+ ASSERT(slot_address > this->address());
+ return (slot_address - (this->address() + kObjectStartOffset)) <
+ progress_bar();
+ }
static void IncrementLiveBytesFromGC(Address address, int by) {
MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
@@ -505,7 +548,9 @@ class MemoryChunk {
static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize + kPointerSize;
- static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize;
+ static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
+ kIntSize + kIntSize + kPointerSize +
+ 5 * kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -616,6 +661,14 @@ class MemoryChunk {
int area_size() {
return static_cast<int>(area_end() - area_start());
}
+ bool CommitArea(size_t requested);
+
+ // Approximate amount of physical memory committed for this chunk.
+ size_t CommittedPhysicalMemory() {
+ return high_water_mark_;
+ }
+
+ static inline void UpdateHighWaterMark(Address mark);
protected:
MemoryChunk* next_chunk_;
@@ -642,6 +695,21 @@ class MemoryChunk {
SlotsBuffer* slots_buffer_;
SkipList* skip_list_;
intptr_t write_barrier_counter_;
+ // Used by the incremental marker to keep track of the scanning progress in
+ // large objects that have a progress bar and are scanned in increments.
+ int progress_bar_;
+ // Assuming the initial allocation on a page is sequential,
+ // count highest number of bytes ever allocated on the page.
+ int high_water_mark_;
+
+ intptr_t parallel_sweeping_;
+
+ // PagedSpace free-list statistics.
+ intptr_t available_in_small_free_list_;
+ intptr_t available_in_medium_free_list_;
+ intptr_t available_in_large_free_list_;
+ intptr_t available_in_huge_free_list_;
+ intptr_t non_available_small_blocks_;
static MemoryChunk* Initialize(Heap* heap,
Address base,
@@ -714,8 +782,12 @@ class Page : public MemoryChunk {
// Object area size in bytes.
static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
- // Maximum object size that fits in a page.
- static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
+ // Maximum object size that fits in a page. Objects larger than that size
+ // are allocated in large object space and are never moved in memory. This
+ // also applies to new space allocation, since objects are never migrated
+ // from new space to large object space. Takes double alignment into account.
+ static const int kMaxNonCodeHeapObjectSize =
+ kNonCodeObjectAreaSize - kPointerSize;
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
@@ -739,6 +811,21 @@ class Page : public MemoryChunk {
void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
+ void ResetFreeListStatistics();
+
+#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
+ type name() { return name##_; } \
+ void set_##name(type name) { name##_ = name; } \
+ void add_##name(type name) { name##_ += name; }
+
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
+
+#undef FRAGMENTATION_STATS_ACCESSORS
+
#ifdef DEBUG
void Print();
#endif // DEBUG
@@ -836,6 +923,10 @@ class CodeRange {
void TearDown();
bool exists() { return this != NULL && code_range_ != NULL; }
+ Address start() {
+ if (this == NULL || code_range_ == NULL) return NULL;
+ return static_cast<Address>(code_range_->address());
+ }
bool contains(Address address) {
if (this == NULL || code_range_ == NULL) return false;
Address start = static_cast<Address>(code_range_->address());
@@ -845,8 +936,11 @@ class CodeRange {
// Allocates a chunk of memory from the large-object portion of
// the code range. On platforms with no separate code range, should
// not be called.
- MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
+ MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
+ const size_t commit_size,
size_t* allocated);
+ bool CommitRawMemory(Address start, size_t length);
+ bool UncommitRawMemory(Address start, size_t length);
void FreeRawMemory(Address buf, size_t length);
private:
@@ -989,23 +1083,37 @@ class MemoryAllocator {
return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
}
+ // Returns an indication of whether a pointer is in a space that has
+ // been allocated by this MemoryAllocator.
+ V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
+ return address < lowest_ever_allocated_ ||
+ address >= highest_ever_allocated_;
+ }
+
#ifdef DEBUG
// Reports statistic info of the space.
void ReportStatistics();
#endif
- MemoryChunk* AllocateChunk(intptr_t body_size,
+ // Returns a MemoryChunk in which the memory region from commit_area_size to
+ // reserve_area_size of the chunk area is reserved but not committed, it
+ // could be committed later by calling MemoryChunk::CommitArea.
+ MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
+ intptr_t commit_area_size,
Executability executable,
Space* space);
Address ReserveAlignedMemory(size_t requested,
size_t alignment,
VirtualMemory* controller);
- Address AllocateAlignedMemory(size_t requested,
+ Address AllocateAlignedMemory(size_t reserve_size,
+ size_t commit_size,
size_t alignment,
Executability executable,
VirtualMemory* controller);
+ bool CommitMemory(Address addr, size_t size, Executability executable);
+
void FreeMemory(VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable);
@@ -1051,9 +1159,10 @@ class MemoryAllocator {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
- MUST_USE_RESULT static bool CommitCodePage(VirtualMemory* vm,
- Address start,
- size_t size);
+ MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
+ Address start,
+ size_t commit_size,
+ size_t reserved_size);
private:
Isolate* isolate_;
@@ -1068,6 +1177,14 @@ class MemoryAllocator {
// Allocated executable space size in bytes.
size_t size_executable_;
+ // We keep the lowest and highest addresses allocated as a quick way
+ // of determining that pointers are outside the heap. The estimate is
+ // conservative, i.e. not all addrsses in 'allocated' space are allocated
+ // to our heap. The range is [lowest, highest[, inclusive on the low end
+ // and exclusive on the high end.
+ void* lowest_ever_allocated_;
+ void* highest_ever_allocated_;
+
struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
ObjectSpace space,
@@ -1090,6 +1207,11 @@ class MemoryAllocator {
Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner);
+ void UpdateAllocatedSpaceLimits(void* low, void* high) {
+ lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
+ highest_ever_allocated_ = Max(highest_ever_allocated_, high);
+ }
+
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
@@ -1195,18 +1317,53 @@ class PageIterator BASE_EMBEDDED {
// space.
class AllocationInfo {
public:
- AllocationInfo() : top(NULL), limit(NULL) {
+ AllocationInfo() : top_(NULL), limit_(NULL) {
+ }
+
+ INLINE(void set_top(Address top)) {
+ SLOW_ASSERT(top == NULL ||
+ (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
+ top_ = top;
+ }
+
+ INLINE(Address top()) const {
+ SLOW_ASSERT(top_ == NULL ||
+ (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
+ return top_;
+ }
+
+ Address* top_address() {
+ return &top_;
+ }
+
+ INLINE(void set_limit(Address limit)) {
+ SLOW_ASSERT(limit == NULL ||
+ (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
+ limit_ = limit;
}
- Address top; // Current allocation top.
- Address limit; // Current allocation limit.
+ INLINE(Address limit()) const {
+ SLOW_ASSERT(limit_ == NULL ||
+ (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
+ return limit_;
+ }
+
+ Address* limit_address() {
+ return &limit_;
+ }
#ifdef DEBUG
bool VerifyPagedAllocation() {
- return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
- && (top <= limit);
+ return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
+ && (top_ <= limit_);
}
#endif
+
+ private:
+ // Current allocation top.
+ Address top_;
+ // Current allocation limit.
+ Address limit_;
};
@@ -1339,6 +1496,57 @@ class FreeListNode: public HeapObject {
};
+// The free list category holds a pointer to the top element and a pointer to
+// the end element of the linked list of free memory blocks.
+class FreeListCategory {
+ public:
+ FreeListCategory() :
+ top_(NULL),
+ end_(NULL),
+ available_(0) {}
+
+ intptr_t Concatenate(FreeListCategory* category);
+
+ void Reset();
+
+ void Free(FreeListNode* node, int size_in_bytes);
+
+ FreeListNode* PickNodeFromList(int *node_size);
+ FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
+
+ intptr_t EvictFreeListItemsInList(Page* p);
+
+ void RepairFreeList(Heap* heap);
+
+ FreeListNode** GetTopAddress() { return &top_; }
+ FreeListNode* top() const { return top_; }
+ void set_top(FreeListNode* top) { top_ = top; }
+
+ FreeListNode** GetEndAddress() { return &end_; }
+ FreeListNode* end() const { return end_; }
+ void set_end(FreeListNode* end) { end_ = end; }
+
+ int* GetAvailableAddress() { return &available_; }
+ int available() const { return available_; }
+ void set_available(int available) { available_ = available; }
+
+ Mutex* mutex() { return &mutex_; }
+
+#ifdef DEBUG
+ intptr_t SumFreeList();
+ int FreeListLength();
+#endif
+
+ private:
+ FreeListNode* top_;
+ FreeListNode* end_;
+ Mutex mutex_;
+
+ // Total available bytes in all blocks of this free list category.
+ int available_;
+};
+
+
// The free list for the old space. The free list is organized in such a way
// as to encourage objects allocated around the same time to be near each
// other. The normal way to allocate is intended to be by bumping a 'top'
@@ -1366,11 +1574,16 @@ class FreeList BASE_EMBEDDED {
public:
explicit FreeList(PagedSpace* owner);
+ intptr_t Concatenate(FreeList* free_list);
+
// Clear the free list.
void Reset();
// Return the number of bytes available on the free list.
- intptr_t available() { return available_; }
+ intptr_t available() {
+ return small_list_.available() + medium_list_.available() +
+ large_list_.available() + huge_list_.available();
+ }
// Place a node on the free list. The block of size 'size_in_bytes'
// starting at 'start' is placed on the free list. The return value is the
@@ -1388,8 +1601,6 @@ class FreeList BASE_EMBEDDED {
#ifdef DEBUG
void Zap();
- static intptr_t SumFreeList(FreeListNode* node);
- static int FreeListLength(FreeListNode* cur);
intptr_t SumFreeLists();
bool IsVeryLong();
#endif
@@ -1397,36 +1608,23 @@ class FreeList BASE_EMBEDDED {
// Used after booting the VM.
void RepairLists(Heap* heap);
- struct SizeStats {
- intptr_t Total() {
- return small_size_ + medium_size_ + large_size_ + huge_size_;
- }
-
- intptr_t small_size_;
- intptr_t medium_size_;
- intptr_t large_size_;
- intptr_t huge_size_;
- };
-
- void CountFreeListItems(Page* p, SizeStats* sizes);
-
intptr_t EvictFreeListItems(Page* p);
+ FreeListCategory* small_list() { return &small_list_; }
+ FreeListCategory* medium_list() { return &medium_list_; }
+ FreeListCategory* large_list() { return &large_list_; }
+ FreeListCategory* huge_list() { return &huge_list_; }
+
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
- FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
-
FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
PagedSpace* owner_;
Heap* heap_;
- // Total available bytes in all blocks on this free list.
- int available_;
-
static const int kSmallListMin = 0x20 * kPointerSize;
static const int kSmallListMax = 0xff * kPointerSize;
static const int kMediumListMax = 0x7ff * kPointerSize;
@@ -1434,10 +1632,10 @@ class FreeList BASE_EMBEDDED {
static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
static const int kMediumAllocationMax = kSmallListMax;
static const int kLargeAllocationMax = kMediumListMax;
- FreeListNode* small_list_;
- FreeListNode* medium_list_;
- FreeListNode* large_list_;
- FreeListNode* huge_list_;
+ FreeListCategory small_list_;
+ FreeListCategory medium_list_;
+ FreeListCategory large_list_;
+ FreeListCategory huge_list_;
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
};
@@ -1491,6 +1689,23 @@ class PagedSpace : public Space {
// spaces this equals the capacity.
intptr_t CommittedMemory() { return Capacity(); }
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory();
+
+ struct SizeStats {
+ intptr_t Total() {
+ return small_size_ + medium_size_ + large_size_ + huge_size_;
+ }
+
+ intptr_t small_size_;
+ intptr_t medium_size_;
+ intptr_t large_size_;
+ intptr_t huge_size_;
+ };
+
+ void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
+ void ResetFreeListStatistics();
+
// Sets the capacity, the available space and the wasted space to zero.
// The stats are rebuilt during sweeping by adding each page to the
// capacity and the size when it is encountered. As free spaces are
@@ -1498,6 +1713,12 @@ class PagedSpace : public Space {
// to the available and wasted totals.
void ClearStats() {
accounting_stats_.ClearSizeWaste();
+ ResetFreeListStatistics();
+ }
+
+ // Increases the number of available bytes of that space.
+ void AddToAccountingStats(intptr_t bytes) {
+ accounting_stats_.DeallocateBytes(bytes);
}
// Available bytes without growing. These are the bytes on the free list.
@@ -1513,10 +1734,7 @@ class PagedSpace : public Space {
// As size, but the bytes in lazily swept pages are estimated and the bytes
// in the current linear allocation area are not included.
- virtual intptr_t SizeOfObjects() {
- ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0));
- return Size() - unswept_free_bytes_ - (limit() - top());
- }
+ virtual intptr_t SizeOfObjects();
// Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation. They do not include the
@@ -1524,12 +1742,29 @@ class PagedSpace : public Space {
virtual intptr_t Waste() { return accounting_stats_.Waste(); }
// Returns the allocation pointer in this space.
- Address top() { return allocation_info_.top; }
- Address limit() { return allocation_info_.limit; }
+ Address top() { return allocation_info_.top(); }
+ Address limit() { return allocation_info_.limit(); }
+
+ // The allocation top address.
+ Address* allocation_top_address() {
+ return allocation_info_.top_address();
+ }
+
+ // The allocation limit address.
+ Address* allocation_limit_address() {
+ return allocation_info_.limit_address();
+ }
+
+ enum AllocationType {
+ NEW_OBJECT,
+ MOVE_OBJECT
+ };
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
- MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT inline MaybeObject* AllocateRaw(
+ int size_in_bytes,
+ AllocationType event = NEW_OBJECT);
virtual bool ReserveSpace(int bytes);
@@ -1551,8 +1786,9 @@ class PagedSpace : public Space {
void SetTop(Address top, Address limit) {
ASSERT(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
- allocation_info_.top = top;
- allocation_info_.limit = limit;
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.set_top(top);
+ allocation_info_.set_limit(limit);
}
void Allocate(int bytes) {
@@ -1564,10 +1800,7 @@ class PagedSpace : public Space {
}
// Releases an unused page and shrinks the space.
- void ReleasePage(Page* page);
-
- // Releases all of the unused pages.
- void ReleaseAllUnusedPages();
+ void ReleasePage(Page* page, bool unlink);
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
@@ -1590,8 +1823,8 @@ class PagedSpace : public Space {
// Report code object related statistics
void CollectCodeStatistics();
- static void ReportCodeStatistics();
- static void ResetCodeStatistics();
+ static void ReportCodeStatistics(Isolate* isolate);
+ static void ResetCodeStatistics(Isolate* isolate);
#endif
bool was_swept_conservatively() { return was_swept_conservatively_; }
@@ -1611,7 +1844,7 @@ class PagedSpace : public Space {
first_unswept_page_ = first;
}
- void IncrementUnsweptFreeBytes(int by) {
+ void IncrementUnsweptFreeBytes(intptr_t by) {
unswept_free_bytes_ += by;
}
@@ -1620,24 +1853,33 @@ class PagedSpace : public Space {
unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
}
+ void DecrementUnsweptFreeBytes(intptr_t by) {
+ unswept_free_bytes_ -= by;
+ }
+
void DecreaseUnsweptFreeBytes(Page* p) {
ASSERT(ShouldBeSweptLazily(p));
unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
}
+ void ResetUnsweptFreeBytes() {
+ unswept_free_bytes_ = 0;
+ }
+
bool AdvanceSweeper(intptr_t bytes_to_sweep);
- bool IsSweepingComplete() {
+ // When parallel sweeper threads are active and the main thread finished
+ // its sweeping phase, this function waits for them to complete, otherwise
+ // AdvanceSweeper with size_in_bytes is called.
+ bool EnsureSweeperProgress(intptr_t size_in_bytes);
+
+ bool IsLazySweepingComplete() {
return !first_unswept_page_->is_valid();
}
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
- void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
- free_list_.CountFreeListItems(p, sizes);
- }
-
void EvictEvacuationCandidatesFromFreeLists();
bool CanExpand();
@@ -1651,6 +1893,8 @@ class PagedSpace : public Space {
}
protected:
+ FreeList* free_list() { return &free_list_; }
+
int area_size_;
// Maximum capacity of this space.
@@ -1700,6 +1944,7 @@ class PagedSpace : public Space {
MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
friend class PageIterator;
+ friend class SweeperThread;
};
@@ -1962,6 +2207,9 @@ class SemiSpace : public Space {
static void Swap(SemiSpace* from, SemiSpace* to);
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory();
+
private:
// Flips the semispace between being from-space and to-space.
// Copies the flags into the masked positions on all pages in the space.
@@ -2159,6 +2407,9 @@ class NewSpace : public Space {
return Capacity();
}
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory();
+
// Return the available bytes without growing.
intptr_t Available() {
return Capacity() - Size();
@@ -2178,9 +2429,15 @@ class NewSpace : public Space {
// Return the address of the allocation pointer in the active semispace.
Address top() {
- ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
- return allocation_info_.top;
+ ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
+ return allocation_info_.top();
+ }
+
+ void set_top(Address top) {
+ ASSERT(to_space_.current_page()->ContainsLimit(top));
+ allocation_info_.set_top(top);
}
+
// Return the address of the first object in the active semispace.
Address bottom() { return to_space_.space_start(); }
@@ -2205,9 +2462,15 @@ class NewSpace : public Space {
return reinterpret_cast<Address>(index << kPointerSizeLog2);
}
- // The allocation top and limit addresses.
- Address* allocation_top_address() { return &allocation_info_.top; }
- Address* allocation_limit_address() { return &allocation_info_.limit; }
+ // The allocation top and limit address.
+ Address* allocation_top_address() {
+ return allocation_info_.top_address();
+ }
+
+ // The allocation limit address.
+ Address* allocation_limit_address() {
+ return allocation_info_.limit_address();
+ }
MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
@@ -2217,13 +2480,14 @@ class NewSpace : public Space {
void LowerInlineAllocationLimit(intptr_t step) {
inline_allocation_limit_step_ = step;
if (step == 0) {
- allocation_info_.limit = to_space_.page_high();
+ allocation_info_.set_limit(to_space_.page_high());
} else {
- allocation_info_.limit = Min(
- allocation_info_.top + inline_allocation_limit_step_,
- allocation_info_.limit);
+ Address new_limit = Min(
+ allocation_info_.top() + inline_allocation_limit_step_,
+ allocation_info_.limit());
+ allocation_info_.set_limit(new_limit);
}
- top_on_previous_step_ = allocation_info_.top;
+ top_on_previous_step_ = allocation_info_.top();
}
// Get the extent of the inactive semispace (for use as a marking stack,
@@ -2259,11 +2523,6 @@ class NewSpace : public Space {
virtual bool ReserveSpace(int bytes);
- // Resizes a sequential string which must be the most recent thing that was
- // allocated in new space.
- template <typename StringType>
- inline void ShrinkStringAtAllocationBoundary(String* string, int len);
-
#ifdef VERIFY_HEAP
// Verify the active semispace.
virtual void Verify();
@@ -2375,9 +2634,9 @@ class OldSpace : public PagedSpace {
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
- SLOW_ASSERT((space).page_low() <= (info).top \
- && (info).top <= (space).page_high() \
- && (info).limit <= (space).page_high())
+ SLOW_ASSERT((space).page_low() <= (info).top() \
+ && (info).top() <= (space).page_high() \
+ && (info).limit() <= (space).page_high())
// -----------------------------------------------------------------------------
@@ -2388,11 +2647,9 @@ class FixedSpace : public PagedSpace {
FixedSpace(Heap* heap,
intptr_t max_capacity,
AllocationSpace id,
- int object_size_in_bytes,
- const char* name)
+ int object_size_in_bytes)
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
- object_size_in_bytes_(object_size_in_bytes),
- name_(name) {
+ object_size_in_bytes_(object_size_in_bytes) {
page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
}
@@ -2409,9 +2666,6 @@ class FixedSpace : public PagedSpace {
private:
// The size of objects in this space.
int object_size_in_bytes_;
-
- // The name of this space.
- const char* name_;
};
@@ -2422,7 +2676,7 @@ class MapSpace : public FixedSpace {
public:
// Creates a map space object with a maximum capacity.
MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
+ : FixedSpace(heap, max_capacity, id, Map::kSize),
max_map_space_pages_(kMaxMapPageIndex - 1) {
}
@@ -2457,20 +2711,20 @@ class MapSpace : public FixedSpace {
// -----------------------------------------------------------------------------
-// Old space for all global object property cell objects
+// Old space for simple property cell objects
class CellSpace : public FixedSpace {
public:
// Creates a property cell space object with a maximum capacity.
CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
+ : FixedSpace(heap, max_capacity, id, Cell::kSize)
{}
virtual int RoundSizeDownToObjectAlignment(int size) {
- if (IsPowerOf2(JSGlobalPropertyCell::kSize)) {
- return RoundDown(size, JSGlobalPropertyCell::kSize);
+ if (IsPowerOf2(Cell::kSize)) {
+ return RoundDown(size, Cell::kSize);
} else {
- return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize;
+ return (size / Cell::kSize) * Cell::kSize;
}
}
@@ -2483,6 +2737,33 @@ class CellSpace : public FixedSpace {
// -----------------------------------------------------------------------------
+// Old space for all global object property cell objects
+
+class PropertyCellSpace : public FixedSpace {
+ public:
+ // Creates a property cell space object with a maximum capacity.
+ PropertyCellSpace(Heap* heap, intptr_t max_capacity,
+ AllocationSpace id)
+ : FixedSpace(heap, max_capacity, id, PropertyCell::kSize)
+ {}
+
+ virtual int RoundSizeDownToObjectAlignment(int size) {
+ if (IsPowerOf2(PropertyCell::kSize)) {
+ return RoundDown(size, PropertyCell::kSize);
+ } else {
+ return (size / PropertyCell::kSize) * PropertyCell::kSize;
+ }
+ }
+
+ protected:
+ virtual void VerifyObject(HeapObject* obj);
+
+ public:
+ TRACK_MEMORY("PropertyCellSpace")
+};
+
+
+// -----------------------------------------------------------------------------
// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
// the large object space. A large object is allocated from OS heap with
// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
@@ -2525,6 +2806,9 @@ class LargeObjectSpace : public Space {
return Size();
}
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory();
+
int PageCount() {
return page_count_;
}
diff --git a/deps/v8/src/splay-tree-inl.h b/deps/v8/src/splay-tree-inl.h
index 4eca71d10..42024756e 100644
--- a/deps/v8/src/splay-tree-inl.h
+++ b/deps/v8/src/splay-tree-inl.h
@@ -91,6 +91,12 @@ bool SplayTree<Config, Allocator>::FindInternal(const Key& key) {
template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::Contains(const Key& key) {
+ return FindInternal(key);
+}
+
+
+template<typename Config, class Allocator>
bool SplayTree<Config, Allocator>::Find(const Key& key, Locator* locator) {
if (FindInternal(key)) {
locator->bind(root_);
@@ -293,9 +299,10 @@ void SplayTree<Config, Allocator>::ForEach(Callback* callback) {
template <typename Config, class Allocator> template <class Callback>
void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
+ if (root_ == NULL) return;
// Pre-allocate some space for tiny trees.
List<Node*, Allocator> nodes_to_visit(10, allocator_);
- if (root_ != NULL) nodes_to_visit.Add(root_, allocator_);
+ nodes_to_visit.Add(root_, allocator_);
int pos = 0;
while (pos < nodes_to_visit.length()) {
Node* node = nodes_to_visit[pos++];
diff --git a/deps/v8/src/splay-tree.h b/deps/v8/src/splay-tree.h
index 8844d8a8f..f393027a8 100644
--- a/deps/v8/src/splay-tree.h
+++ b/deps/v8/src/splay-tree.h
@@ -39,9 +39,9 @@ namespace internal {
//
// typedef Key: the key type
// typedef Value: the value type
-// static const kNoKey: the dummy key used when no key is set
-// static const kNoValue: the dummy value used to initialize nodes
-// int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
+// static const Key kNoKey: the dummy key used when no key is set
+// static Value kNoValue(): the dummy value used to initialize nodes
+// static int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
//
// The tree is also parameterized by an allocation policy
// (Allocator). The policy is used for allocating lists in the C free
@@ -74,6 +74,11 @@ class SplayTree {
UNREACHABLE();
}
+ AllocationPolicy allocator() { return allocator_; }
+
+ // Checks if there is a mapping for the key.
+ bool Contains(const Key& key);
+
// Inserts the given key in this tree with the given value. Returns
// true if a node was inserted, otherwise false. If found the locator
// is enabled and provides access to the mapping for the key.
@@ -104,6 +109,9 @@ class SplayTree {
// Remove the node with the given key from the tree.
bool Remove(const Key& key);
+ // Remove all keys from the tree.
+ void Clear() { ResetRoot(); }
+
bool is_empty() { return root_ == NULL; }
// Perform the splay operation for the given key. Moves the node with
diff --git a/deps/v8/src/store-buffer-inl.h b/deps/v8/src/store-buffer-inl.h
index dd65cbcc9..7e5432c84 100644
--- a/deps/v8/src/store-buffer-inl.h
+++ b/deps/v8/src/store-buffer-inl.h
@@ -41,6 +41,7 @@ Address StoreBuffer::TopAddress() {
void StoreBuffer::Mark(Address addr) {
ASSERT(!heap_->cell_space()->Contains(addr));
ASSERT(!heap_->code_space()->Contains(addr));
+ ASSERT(!heap_->old_data_space()->Contains(addr));
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
*top++ = addr;
heap_->public_set_store_buffer_top(top);
@@ -67,13 +68,21 @@ void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
if (top >= old_limit_) {
ASSERT(callback_ != NULL);
(*callback_)(heap_,
- MemoryChunk::FromAnyPointerAddress(addr),
+ MemoryChunk::FromAnyPointerAddress(heap_, addr),
kStoreBufferFullEvent);
}
}
}
+void StoreBuffer::ClearDeadObject(HeapObject* object) {
+ Address& map_field = Memory::Address_at(object->address());
+ if (heap_->map_space()->Contains(map_field)) {
+ map_field = NULL;
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_STORE_BUFFER_INL_H_
diff --git a/deps/v8/src/store-buffer.cc b/deps/v8/src/store-buffer.cc
index 66488ae50..22a546742 100644
--- a/deps/v8/src/store-buffer.cc
+++ b/deps/v8/src/store-buffer.cc
@@ -25,9 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
-
#include "store-buffer.h"
+
+#include <algorithm>
+
+#include "v8.h"
#include "store-buffer-inl.h"
#include "v8-counters.h"
@@ -119,36 +121,10 @@ void StoreBuffer::TearDown() {
void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
isolate->heap()->store_buffer()->Compact();
+ isolate->counters()->store_buffer_overflows()->Increment();
}
-#if V8_TARGET_ARCH_X64
-static int CompareAddresses(const void* void_a, const void* void_b) {
- intptr_t a =
- reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
- intptr_t b =
- reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
- // Unfortunately if int is smaller than intptr_t there is no branch-free
- // way to return a number with the same sign as the difference between the
- // pointers.
- if (a == b) return 0;
- if (a < b) return -1;
- ASSERT(a > b);
- return 1;
-}
-#else
-static int CompareAddresses(const void* void_a, const void* void_b) {
- intptr_t a =
- reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
- intptr_t b =
- reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
- ASSERT(sizeof(1) == sizeof(a));
- // Shift down to avoid wraparound.
- return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2);
-}
-#endif
-
-
void StoreBuffer::Uniq() {
// Remove adjacent duplicates and cells that do not point at new space.
Address previous = NULL;
@@ -167,6 +143,11 @@ void StoreBuffer::Uniq() {
}
+bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
+ return old_limit_ - old_top_ >= space_needed;
+}
+
+
void StoreBuffer::EnsureSpace(intptr_t space_needed) {
while (old_limit_ - old_top_ < space_needed &&
old_limit_ < old_reserved_limit_) {
@@ -177,7 +158,7 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
old_limit_ += grow;
}
- if (old_limit_ - old_top_ >= space_needed) return;
+ if (SpaceAvailable(space_needed)) return;
if (old_buffer_is_filtered_) return;
ASSERT(may_move_store_buffer_entries_);
@@ -189,16 +170,17 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
PointerChunkIterator it(heap_);
MemoryChunk* chunk;
while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
+ if (chunk->scan_on_scavenge()) {
+ page_has_scan_on_scavenge_flag = true;
+ break;
+ }
}
if (page_has_scan_on_scavenge_flag) {
Filter(MemoryChunk::SCAN_ON_SCAVENGE);
}
- // If filtering out the entries from scan_on_scavenge pages got us down to
- // less than half full, then we are satisfied with that.
- if (old_limit_ - old_top_ > old_top_ - old_start_) return;
+ if (SpaceAvailable(space_needed)) return;
// Sample 1 entry in 97 and filter out the pages where we estimate that more
// than 1 in 8 pointers are to new space.
@@ -213,11 +195,11 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
{ 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
{ 1, 0}
};
- for (int i = kSampleFinenesses - 1; i >= 0; i--) {
+ for (int i = 0; i < kSampleFinenesses; i++) {
ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
// As a last resort we mark all pages as being exempt from the store buffer.
- ASSERT(i != 0 || old_top_ == old_start_);
- if (old_limit_ - old_top_ > old_top_ - old_start_) return;
+ ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
+ if (SpaceAvailable(space_needed)) return;
}
UNREACHABLE();
}
@@ -239,7 +221,7 @@ void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
containing_chunk = previous_chunk;
} else {
- containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
+ containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
}
int old_counter = containing_chunk->store_buffer_counter();
if (old_counter == threshold) {
@@ -265,7 +247,7 @@ void StoreBuffer::Filter(int flag) {
if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
containing_chunk = previous_chunk;
} else {
- containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
+ containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
previous_chunk = containing_chunk;
}
if (!containing_chunk->IsFlagSet(flag)) {
@@ -283,10 +265,7 @@ void StoreBuffer::Filter(int flag) {
void StoreBuffer::SortUniq() {
Compact();
if (old_buffer_is_sorted_) return;
- qsort(reinterpret_cast<void*>(old_start_),
- old_top_ - old_start_,
- sizeof(*old_top_),
- &CompareAddresses);
+ std::sort(old_start_, old_top_);
Uniq();
old_buffer_is_sorted_ = true;
@@ -303,7 +282,10 @@ bool StoreBuffer::PrepareForIteration() {
MemoryChunk* chunk;
bool page_has_scan_on_scavenge_flag = false;
while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
+ if (chunk->scan_on_scavenge()) {
+ page_has_scan_on_scavenge_flag = true;
+ break;
+ }
}
if (page_has_scan_on_scavenge_flag) {
@@ -322,7 +304,7 @@ bool StoreBuffer::PrepareForIteration() {
void StoreBuffer::Clean() {
ClearFilteringHashSets();
Uniq(); // Also removes things that no longer point to new space.
- CheckForFullBuffer();
+ EnsureSpace(kStoreBufferSize / 2);
}
@@ -388,7 +370,8 @@ void StoreBuffer::VerifyPointers(PagedSpace* space,
reinterpret_cast<PagedSpace*>(page->owner()),
page,
region_callback,
- &DummyScavengePointer);
+ &DummyScavengePointer,
+ false);
}
}
@@ -436,7 +419,10 @@ void StoreBuffer::GCEpilogue() {
void StoreBuffer::FindPointersToNewSpaceInRegion(
- Address start, Address end, ObjectSlotCallback slot_callback) {
+ Address start,
+ Address end,
+ ObjectSlotCallback slot_callback,
+ bool clear_maps) {
for (Address slot_address = start;
slot_address < end;
slot_address += kPointerSize) {
@@ -444,6 +430,9 @@ void StoreBuffer::FindPointersToNewSpaceInRegion(
if (heap_->InNewSpace(*slot)) {
HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
ASSERT(object->IsHeapObject());
+ // The new space object was not promoted if it still contains a map
+ // pointer. Clear the map field now lazily.
+ if (clear_maps) ClearDeadObject(object);
slot_callback(reinterpret_cast<HeapObject**>(slot), object);
if (heap_->InNewSpace(*slot)) {
EnterDirectlyIntoStoreBuffer(slot_address);
@@ -470,7 +459,8 @@ static inline Address MapEndAlign(Address addr) {
void StoreBuffer::FindPointersToNewSpaceInMaps(
Address start,
Address end,
- ObjectSlotCallback slot_callback) {
+ ObjectSlotCallback slot_callback,
+ bool clear_maps) {
ASSERT(MapStartAlign(start) == start);
ASSERT(MapEndAlign(end) == end);
@@ -484,7 +474,8 @@ void StoreBuffer::FindPointersToNewSpaceInMaps(
FindPointersToNewSpaceInRegion(pointer_fields_start,
pointer_fields_end,
- slot_callback);
+ slot_callback,
+ clear_maps);
map_address += Map::kSize;
}
}
@@ -493,7 +484,8 @@ void StoreBuffer::FindPointersToNewSpaceInMaps(
void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
Address start,
Address end,
- ObjectSlotCallback slot_callback) {
+ ObjectSlotCallback slot_callback,
+ bool clear_maps) {
Address map_aligned_start = MapStartAlign(start);
Address map_aligned_end = MapEndAlign(end);
@@ -502,7 +494,8 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
FindPointersToNewSpaceInMaps(map_aligned_start,
map_aligned_end,
- slot_callback);
+ slot_callback,
+ clear_maps);
}
@@ -524,7 +517,8 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
PagedSpace* space,
Page* page,
RegionCallback region_callback,
- ObjectSlotCallback slot_callback) {
+ ObjectSlotCallback slot_callback,
+ bool clear_maps) {
Address visitable_start = page->area_start();
Address end_of_page = page->area_end();
@@ -544,7 +538,8 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
// After calling this the special garbage section may have moved.
(this->*region_callback)(visitable_start,
visitable_end,
- slot_callback);
+ slot_callback,
+ clear_maps);
if (visitable_end >= space->top() && visitable_end < space->limit()) {
visitable_end = space->limit();
visitable_start = visitable_end;
@@ -575,13 +570,15 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
if (visitable_start != visitable_end) {
(this->*region_callback)(visitable_start,
visitable_end,
- slot_callback);
+ slot_callback,
+ clear_maps);
}
}
void StoreBuffer::IteratePointersInStoreBuffer(
- ObjectSlotCallback slot_callback) {
+ ObjectSlotCallback slot_callback,
+ bool clear_maps) {
Address* limit = old_top_;
old_top_ = old_start_;
{
@@ -594,6 +591,9 @@ void StoreBuffer::IteratePointersInStoreBuffer(
Object* object = *slot;
if (heap_->InFromSpace(object)) {
HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+ // The new space object was not promoted if it still contains a map
+ // pointer. Clear the map field now lazily.
+ if (clear_maps) ClearDeadObject(heap_object);
slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
if (heap_->InNewSpace(*slot)) {
EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
@@ -606,6 +606,18 @@ void StoreBuffer::IteratePointersInStoreBuffer(
void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
+ IteratePointersToNewSpace(slot_callback, false);
+}
+
+
+void StoreBuffer::IteratePointersToNewSpaceAndClearMaps(
+ ObjectSlotCallback slot_callback) {
+ IteratePointersToNewSpace(slot_callback, true);
+}
+
+
+void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
+ bool clear_maps) {
// We do not sort or remove duplicated entries from the store buffer because
// we expect that callback will rebuild the store buffer thus removing
// all duplicates and pointers to old space.
@@ -614,7 +626,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
// TODO(gc): we want to skip slots on evacuation candidates
// but we can't simply figure that out from slot address
// because slot can belong to a large object.
- IteratePointersInStoreBuffer(slot_callback);
+ IteratePointersInStoreBuffer(slot_callback, clear_maps);
// We are done scanning all the pointers that were in the store buffer, but
// there may be some pages marked scan_on_scavenge that have pointers to new
@@ -643,7 +655,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
ASSERT(array->IsFixedArray());
Address start = array->address();
Address end = start + array->Size();
- FindPointersToNewSpaceInRegion(start, end, slot_callback);
+ FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
} else {
Page* page = reinterpret_cast<Page*>(chunk);
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
@@ -653,7 +665,8 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
(owner == heap_->map_space() ?
&StoreBuffer::FindPointersToNewSpaceInMapsRegion :
&StoreBuffer::FindPointersToNewSpaceInRegion),
- slot_callback);
+ slot_callback,
+ clear_maps);
}
}
}
@@ -687,10 +700,15 @@ void StoreBuffer::Compact() {
uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
// Shift out the last bits including any tags.
int_addr >>= kPointerSizeLog2;
- int hash1 =
- ((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
+ // The upper part of an address is basically random because of ASLR and OS
+ // non-determinism, so we use only the bits within a page for hashing to
+ // make v8's behavior (more) deterministic.
+ uintptr_t hash_addr =
+ int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
+ int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
+ (kHashSetLength - 1));
if (hash_set_1_[hash1] == int_addr) continue;
- uintptr_t hash2 = (int_addr - (int_addr >> kHashSetLengthLog2));
+ uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
hash2 &= (kHashSetLength - 1);
if (hash_set_2_[hash2] == int_addr) continue;
@@ -710,12 +728,6 @@ void StoreBuffer::Compact() {
ASSERT(old_top_ <= old_limit_);
}
heap_->isolate()->counters()->store_buffer_compactions()->Increment();
- CheckForFullBuffer();
-}
-
-
-void StoreBuffer::CheckForFullBuffer() {
- EnsureSpace(kStoreBufferSize * 2);
}
} } // namespace v8::internal
diff --git a/deps/v8/src/store-buffer.h b/deps/v8/src/store-buffer.h
index 0ade8cee1..01e7cbeb8 100644
--- a/deps/v8/src/store-buffer.h
+++ b/deps/v8/src/store-buffer.h
@@ -37,12 +37,16 @@
namespace v8 {
namespace internal {
+class Page;
+class PagedSpace;
class StoreBuffer;
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-typedef void (StoreBuffer::*RegionCallback)(
- Address start, Address end, ObjectSlotCallback slot_callback);
+typedef void (StoreBuffer::*RegionCallback)(Address start,
+ Address end,
+ ObjectSlotCallback slot_callback,
+ bool clear_maps);
// Used to implement the write barrier by collecting addresses of pointers
// between spaces.
@@ -81,6 +85,10 @@ class StoreBuffer {
// surviving old-to-new pointers into the store buffer to rebuild it.
void IteratePointersToNewSpace(ObjectSlotCallback callback);
+ // Same as IteratePointersToNewSpace but additonally clears maps in objects
+ // referenced from the store buffer that do not contain a forwarding pointer.
+ void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback);
+
static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
static const int kStoreBufferSize = kStoreBufferOverflowBit;
static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
@@ -158,13 +166,19 @@ class StoreBuffer {
void ClearFilteringHashSets();
- void CheckForFullBuffer();
+ bool SpaceAvailable(intptr_t space_needed);
void Uniq();
void ExemptPopularPages(int prime_sample_step, int threshold);
+ // Set the map field of the object to NULL if contains a map.
+ inline void ClearDeadObject(HeapObject *object);
+
+ void IteratePointersToNewSpace(ObjectSlotCallback callback, bool clear_maps);
+
void FindPointersToNewSpaceInRegion(Address start,
Address end,
- ObjectSlotCallback slot_callback);
+ ObjectSlotCallback slot_callback,
+ bool clear_maps);
// For each region of pointers on a page in use from an old space call
// visit_pointer_region callback.
@@ -180,20 +194,24 @@ class StoreBuffer {
void FindPointersToNewSpaceInMaps(
Address start,
Address end,
- ObjectSlotCallback slot_callback);
+ ObjectSlotCallback slot_callback,
+ bool clear_maps);
void FindPointersToNewSpaceInMapsRegion(
Address start,
Address end,
- ObjectSlotCallback slot_callback);
+ ObjectSlotCallback slot_callback,
+ bool clear_maps);
void FindPointersToNewSpaceOnPage(
PagedSpace* space,
Page* page,
RegionCallback region_callback,
- ObjectSlotCallback slot_callback);
+ ObjectSlotCallback slot_callback,
+ bool clear_maps);
- void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
+ void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
+ bool clear_maps);
#ifdef VERIFY_HEAP
void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
@@ -210,8 +228,7 @@ class StoreBufferRebuildScope {
explicit StoreBufferRebuildScope(Heap* heap,
StoreBuffer* store_buffer,
StoreBufferCallback callback)
- : heap_(heap),
- store_buffer_(store_buffer),
+ : store_buffer_(store_buffer),
stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
stored_callback_(store_buffer->callback_) {
store_buffer_->store_buffer_rebuilding_enabled_ = true;
@@ -222,11 +239,9 @@ class StoreBufferRebuildScope {
~StoreBufferRebuildScope() {
store_buffer_->callback_ = stored_callback_;
store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
- store_buffer_->CheckForFullBuffer();
}
private:
- Heap* heap_;
StoreBuffer* store_buffer_;
bool stored_state_;
StoreBufferCallback stored_callback_;
diff --git a/deps/v8/src/string-search.h b/deps/v8/src/string-search.h
index 8c3456aa0..bc685ffe5 100644
--- a/deps/v8/src/string-search.h
+++ b/deps/v8/src/string-search.h
@@ -53,7 +53,7 @@ class StringSearchBase {
// a potentially less efficient searching, but is a safe approximation.
// For needles using only characters in the same Unicode 256-code point page,
// there is no search speed degradation.
- static const int kAsciiAlphabetSize = 128;
+ static const int kAsciiAlphabetSize = 256;
static const int kUC16AlphabetSize = Isolate::kUC16AlphabetSize;
// Bad-char shift table stored in the state. It's length is the alphabet size.
@@ -61,12 +61,12 @@ class StringSearchBase {
// to compensate for the algorithmic overhead compared to simple brute force.
static const int kBMMinPatternLength = 7;
- static inline bool IsAsciiString(Vector<const char>) {
+ static inline bool IsOneByteString(Vector<const uint8_t> string) {
return true;
}
- static inline bool IsAsciiString(Vector<const uc16> string) {
- return String::IsAscii(string.start(), string.length());
+ static inline bool IsOneByteString(Vector<const uc16> string) {
+ return String::IsOneByte(string.start(), string.length());
}
friend class Isolate;
@@ -81,7 +81,7 @@ class StringSearch : private StringSearchBase {
pattern_(pattern),
start_(Max(0, pattern.length() - kBMMaxShift)) {
if (sizeof(PatternChar) > sizeof(SubjectChar)) {
- if (!IsAsciiString(pattern_)) {
+ if (!IsOneByteString(pattern_)) {
strategy_ = &FailSearch;
return;
}
@@ -150,13 +150,21 @@ class StringSearch : private StringSearchBase {
void PopulateBoyerMooreTable();
+ static inline bool exceedsOneByte(uint8_t c) {
+ return false;
+ }
+
+ static inline bool exceedsOneByte(uint16_t c) {
+ return c > String::kMaxOneByteCharCodeU;
+ }
+
static inline int CharOccurrence(int* bad_char_occurrence,
SubjectChar char_code) {
if (sizeof(SubjectChar) == 1) {
return bad_char_occurrence[static_cast<int>(char_code)];
}
if (sizeof(PatternChar) == 1) {
- if (static_cast<unsigned int>(char_code) > String::kMaxAsciiCharCodeU) {
+ if (exceedsOneByte(char_code)) {
return -1;
}
return bad_char_occurrence[static_cast<unsigned int>(char_code)];
@@ -223,7 +231,7 @@ int StringSearch<PatternChar, SubjectChar>::SingleCharSearch(
return static_cast<int>(pos - subject.start());
} else {
if (sizeof(PatternChar) > sizeof(SubjectChar)) {
- if (static_cast<uc16>(pattern_first_char) > String::kMaxAsciiCharCodeU) {
+ if (exceedsOneByte(pattern_first_char)) {
return -1;
}
}
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 30519b561..45b675fa8 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -194,7 +194,8 @@ void StringStream::PrintObject(Object* o) {
return;
}
if (o->IsHeapObject()) {
- DebugObjectCache* debug_object_cache = Isolate::Current()->
+ HeapObject* ho = HeapObject::cast(o);
+ DebugObjectCache* debug_object_cache = ho->GetIsolate()->
string_stream_debug_object_cache();
for (int i = 0; i < debug_object_cache->length(); i++) {
if ((*debug_object_cache)[i] == o) {
@@ -252,16 +253,24 @@ void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
}
+void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
+ FmtElm arg2, FmtElm arg3, FmtElm arg4) {
+ const char argc = 5;
+ FmtElm argv[argc] = { arg0, arg1, arg2, arg3, arg4 };
+ Add(CStrVector(format), Vector<FmtElm>(argv, argc));
+}
+
+
SmartArrayPointer<const char> StringStream::ToCString() const {
char* str = NewArray<char>(length_ + 1);
- memcpy(str, buffer_, length_);
+ OS::MemCopy(str, buffer_, length_);
str[length_] = '\0';
return SmartArrayPointer<const char>(str);
}
-void StringStream::Log() {
- LOG(ISOLATE, StringEvent("StackDump", buffer_));
+void StringStream::Log(Isolate* isolate) {
+ LOG(isolate, StringEvent("StackDump", buffer_));
}
@@ -281,13 +290,13 @@ void StringStream::OutputToFile(FILE* out) {
}
-Handle<String> StringStream::ToString() {
- return FACTORY->NewStringFromUtf8(Vector<const char>(buffer_, length_));
+Handle<String> StringStream::ToString(Isolate* isolate) {
+ return isolate->factory()->NewStringFromUtf8(
+ Vector<const char>(buffer_, length_));
}
-void StringStream::ClearMentionedObjectCache() {
- Isolate* isolate = Isolate::Current();
+void StringStream::ClearMentionedObjectCache(Isolate* isolate) {
isolate->set_string_stream_current_security_token(NULL);
if (isolate->string_stream_debug_object_cache() == NULL) {
isolate->set_string_stream_debug_object_cache(
@@ -298,9 +307,8 @@ void StringStream::ClearMentionedObjectCache() {
#ifdef DEBUG
-bool StringStream::IsMentionedObjectCacheClear() {
- return (
- Isolate::Current()->string_stream_debug_object_cache()->length() == 0);
+bool StringStream::IsMentionedObjectCacheClear(Isolate* isolate) {
+ return isolate->string_stream_debug_object_cache()->length() == 0;
}
#endif
@@ -311,14 +319,14 @@ bool StringStream::Put(String* str) {
bool StringStream::Put(String* str, int start, int end) {
- StringInputBuffer name_buffer(str);
- name_buffer.Seek(start);
- for (int i = start; i < end && name_buffer.has_more(); i++) {
- int c = name_buffer.GetNext();
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(str, &op, start);
+ for (int i = start; i < end && stream.HasMore(); i++) {
+ uint16_t c = stream.GetNext();
if (c >= 127 || c < 32) {
c = '?';
}
- if (!Put(c)) {
+ if (!Put(static_cast<char>(c))) {
return false; // Output was truncated.
}
}
@@ -342,7 +350,7 @@ void StringStream::PrintName(Object* name) {
void StringStream::PrintUsingMap(JSObject* js_object) {
Map* map = js_object->map();
- if (!HEAP->Contains(map) ||
+ if (!js_object->GetHeap()->Contains(map) ||
!map->IsHeapObject() ||
!map->IsMap()) {
Add("<Invalid map>\n");
@@ -350,9 +358,8 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
}
int real_size = map->NumberOfOwnDescriptors();
DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
- if (details.descriptor_index() > real_size) continue;
if (details.type() == FIELD) {
Object* key = descs->GetKey(i);
if (key->IsString() || key->IsNumber()) {
@@ -368,7 +375,7 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
key->ShortPrint();
}
Add(": ");
- Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
+ Object* value = js_object->RawFastPropertyAt(descs->GetFieldIndex(i));
Add("%o\n", value);
}
}
@@ -377,7 +384,7 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
void StringStream::PrintFixedArray(FixedArray* array, unsigned int limit) {
- Heap* heap = HEAP;
+ Heap* heap = array->GetHeap();
for (unsigned int i = 0; i < 10 && i < limit; i++) {
Object* element = array->get(i);
if (element != heap->the_hole_value()) {
@@ -414,9 +421,9 @@ void StringStream::PrintByteArray(ByteArray* byte_array) {
}
-void StringStream::PrintMentionedObjectCache() {
+void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
DebugObjectCache* debug_object_cache =
- Isolate::Current()->string_stream_debug_object_cache();
+ isolate->string_stream_debug_object_cache();
Add("==== Key ============================================\n\n");
for (int i = 0; i < debug_object_cache->length(); i++) {
HeapObject* printee = (*debug_object_cache)[i];
@@ -449,12 +456,12 @@ void StringStream::PrintMentionedObjectCache() {
void StringStream::PrintSecurityTokenIfChanged(Object* f) {
- Isolate* isolate = Isolate::Current();
+ if (!f->IsHeapObject()) return;
+ HeapObject* obj = HeapObject::cast(f);
+ Isolate* isolate = obj->GetIsolate();
Heap* heap = isolate->heap();
- if (!f->IsHeapObject() || !heap->Contains(HeapObject::cast(f))) {
- return;
- }
- Map* map = HeapObject::cast(f)->map();
+ if (!heap->Contains(obj)) return;
+ Map* map = obj->map();
if (!map->IsHeapObject() ||
!heap->Contains(map) ||
!map->IsMap() ||
@@ -463,7 +470,7 @@ void StringStream::PrintSecurityTokenIfChanged(Object* f) {
}
JSFunction* fun = JSFunction::cast(f);
- Object* perhaps_context = fun->unchecked_context();
+ Object* perhaps_context = fun->context();
if (perhaps_context->IsHeapObject() &&
heap->Contains(HeapObject::cast(perhaps_context)) &&
perhaps_context->IsContext()) {
@@ -484,48 +491,39 @@ void StringStream::PrintSecurityTokenIfChanged(Object* f) {
void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
- if (f->IsHeapObject() &&
- HEAP->Contains(HeapObject::cast(f)) &&
- HEAP->Contains(HeapObject::cast(f)->map()) &&
- HeapObject::cast(f)->map()->IsMap()) {
- if (f->IsJSFunction()) {
- JSFunction* fun = JSFunction::cast(f);
- // Common case: on-stack function present and resolved.
- PrintPrototype(fun, receiver);
- *code = fun->code();
- } else if (f->IsSymbol()) {
- // Unresolved and megamorphic calls: Instead of the function
- // we have the function name on the stack.
- PrintName(f);
- Add("/* unresolved */ ");
- } else {
- // Unless this is the frame of a built-in function, we should always have
- // the callee function or name on the stack. If we don't, we have a
- // problem or a change of the stack frame layout.
- Add("%o", f);
- Add("/* warning: no JSFunction object or function name found */ ");
- }
- /* } else if (is_trampoline()) {
- Print("trampoline ");
- */
+ if (!f->IsHeapObject()) {
+ Add("/* warning: 'function' was not a heap object */ ");
+ return;
+ }
+ Heap* heap = HeapObject::cast(f)->GetHeap();
+ if (!heap->Contains(HeapObject::cast(f))) {
+ Add("/* warning: 'function' was not on the heap */ ");
+ return;
+ }
+ if (!heap->Contains(HeapObject::cast(f)->map())) {
+ Add("/* warning: function's map was not on the heap */ ");
+ return;
+ }
+ if (!HeapObject::cast(f)->map()->IsMap()) {
+ Add("/* warning: function's map was not a valid map */ ");
+ return;
+ }
+ if (f->IsJSFunction()) {
+ JSFunction* fun = JSFunction::cast(f);
+ // Common case: on-stack function present and resolved.
+ PrintPrototype(fun, receiver);
+ *code = fun->code();
+ } else if (f->IsInternalizedString()) {
+ // Unresolved and megamorphic calls: Instead of the function
+ // we have the function name on the stack.
+ PrintName(f);
+ Add("/* unresolved */ ");
} else {
- if (!f->IsHeapObject()) {
- Add("/* warning: 'function' was not a heap object */ ");
- return;
- }
- if (!HEAP->Contains(HeapObject::cast(f))) {
- Add("/* warning: 'function' was not on the heap */ ");
- return;
- }
- if (!HEAP->Contains(HeapObject::cast(f)->map())) {
- Add("/* warning: function's map was not on the heap */ ");
- return;
- }
- if (!HeapObject::cast(f)->map()->IsMap()) {
- Add("/* warning: function's map was not a valid map */ ");
- return;
- }
- Add("/* warning: Invalid JSFunction object found */ ");
+ // Unless this is the frame of a built-in function, we should always have
+ // the callee function or name on the stack. If we don't, we have a
+ // problem or a change of the stack frame layout.
+ Add("%o", f);
+ Add("/* warning: no JSFunction object or function name found */ ");
}
}
@@ -533,11 +531,13 @@ void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
Object* name = fun->shared()->name();
bool print_name = false;
- Heap* heap = HEAP;
- for (Object* p = receiver; p != heap->null_value(); p = p->GetPrototype()) {
+ Isolate* isolate = fun->GetIsolate();
+ for (Object* p = receiver;
+ p != isolate->heap()->null_value();
+ p = p->GetPrototype(isolate)) {
if (p->IsJSObject()) {
Object* key = JSObject::cast(p)->SlowReverseLookup(fun);
- if (key != heap->undefined_value()) {
+ if (key != isolate->heap()->undefined_value()) {
if (!name->IsString() ||
!key->IsString() ||
!String::cast(name)->Equals(String::cast(key))) {
@@ -573,7 +573,7 @@ char* HeapStringAllocator::grow(unsigned* bytes) {
if (new_space == NULL) {
return space_;
}
- memcpy(new_space, space_, *bytes);
+ OS::MemCopy(new_space, space_, *bytes);
*bytes = new_bytes;
DeleteArray(space_);
space_ = new_space;
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index 0ba8f52d4..e3db2a8a8 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -137,12 +137,18 @@ class StringStream {
FmtElm arg1,
FmtElm arg2,
FmtElm arg3);
+ void Add(const char* format,
+ FmtElm arg0,
+ FmtElm arg1,
+ FmtElm arg2,
+ FmtElm arg3,
+ FmtElm arg4);
// Getting the message out.
void OutputToFile(FILE* out);
void OutputToStdOut() { OutputToFile(stdout); }
- void Log();
- Handle<String> ToString();
+ void Log(Isolate* isolate);
+ Handle<String> ToString(Isolate* isolate);
SmartArrayPointer<const char> ToCString() const;
int length() const { return length_; }
@@ -163,10 +169,10 @@ class StringStream {
}
// Mentioned object cache support.
- void PrintMentionedObjectCache();
- static void ClearMentionedObjectCache();
+ void PrintMentionedObjectCache(Isolate* isolate);
+ static void ClearMentionedObjectCache(Isolate* isolate);
#ifdef DEBUG
- static bool IsMentionedObjectCacheClear();
+ static bool IsMentionedObjectCacheClear(Isolate* isolate);
#endif
@@ -187,6 +193,31 @@ class StringStream {
};
+// Utility class to print a list of items to a stream, divided by a separator.
+class SimpleListPrinter {
+ public:
+ explicit SimpleListPrinter(StringStream* stream, char separator = ',') {
+ separator_ = separator;
+ stream_ = stream;
+ first_ = true;
+ }
+
+ void Add(const char* str) {
+ if (first_) {
+ first_ = false;
+ } else {
+ stream_->Put(separator_);
+ }
+ stream_->Add(str);
+ }
+
+ private:
+ bool first_;
+ char separator_;
+ StringStream* stream_;
+};
+
+
} } // namespace v8::internal
#endif // V8_STRING_STREAM_H_
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 6115930b6..14b44ca41 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -25,24 +25,21 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
// This file relies on the fact that the following declaration has been made
// in runtime.js:
// var $String = global.String;
-// var $NaN = 0/0;
+// -------------------------------------------------------------------
-// Set the String function and constructor.
-%SetCode($String, function(x) {
+function StringConstructor(x) {
var value = %_ArgumentsLength() == 0 ? '' : TO_STRING_INLINE(x);
if (%_IsConstructCall()) {
%_SetValueOf(this, value);
} else {
return value;
}
-});
+}
-%FunctionSetPrototype($String, new $String());
// ECMA-262 section 15.5.4.2
function StringToString() {
@@ -172,7 +169,6 @@ function StringLocaleCompare(other) {
throw MakeTypeError("called_on_null_or_undefined",
["String.prototype.localeCompare"]);
}
- if (%_ArgumentsLength() === 0) return 0;
return %StringLocaleCompare(TO_STRING_INLINE(this),
TO_STRING_INLINE(other));
}
@@ -186,11 +182,16 @@ function StringMatch(regexp) {
}
var subject = TO_STRING_INLINE(this);
if (IS_REGEXP(regexp)) {
+ // Emulate RegExp.prototype.exec's side effect in step 5, even though
+ // value is discarded.
+ var lastIndex = regexp.lastIndex;
+ TO_INTEGER_FOR_SIDE_EFFECT(lastIndex);
if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
%_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
// lastMatchInfo is defined in regexp.js.
var result = %StringMatch(subject, regexp, lastMatchInfo);
if (result !== null) lastMatchInfoOverride = null;
+ regexp.lastIndex = 0;
return result;
}
// Non-regexp argument.
@@ -199,16 +200,6 @@ function StringMatch(regexp) {
}
-// SubString is an internal function that returns the sub string of 'string'.
-// If resulting string is of length 1, we use the one character cache
-// otherwise we call the runtime system.
-function SubString(string, start, end) {
- // Use the one character string cache.
- if (start + 1 == end) return %_StringCharAt(string, start);
- return %_SubString(string, start, end);
-}
-
-
// This has the same size as the lastMatchInfo array, and can be used for
// functions that expect that structure to be returned. It is used when the
// needle is a string rather than a regexp. In this case we can't update
@@ -225,33 +216,62 @@ function StringReplace(search, replace) {
}
var subject = TO_STRING_INLINE(this);
- // Delegate to one of the regular expression variants if necessary.
+ // Decision tree for dispatch
+ // .. regexp search
+ // .... string replace
+ // ...... non-global search
+ // ........ empty string replace
+ // ........ non-empty string replace (with $-expansion)
+ // ...... global search
+ // ........ no need to circumvent last match info override
+ // ........ need to circument last match info override
+ // .... function replace
+ // ...... global search
+ // ...... non-global search
+ // .. string search
+ // .... special case that replaces with one single character
+ // ...... function replace
+ // ...... string replace (with $-expansion)
+
if (IS_REGEXP(search)) {
+ // Emulate RegExp.prototype.exec's side effect in step 5, even if
+ // value is discarded.
+ var lastIndex = search.lastIndex;
+ TO_INTEGER_FOR_SIDE_EFFECT(lastIndex);
%_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]);
- if (IS_SPEC_FUNCTION(replace)) {
- if (search.global) {
- return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
- } else {
- return StringReplaceNonGlobalRegExpWithFunction(subject,
- search,
- replace);
+
+ if (!IS_SPEC_FUNCTION(replace)) {
+ replace = TO_STRING_INLINE(replace);
+
+ if (!search.global) {
+ // Non-global regexp search, string replace.
+ var match = DoRegExpExec(search, subject, 0);
+ if (match == null) {
+ search.lastIndex = 0
+ return subject;
+ }
+ if (replace.length == 0) {
+ return %_SubString(subject, 0, match[CAPTURE0]) +
+ %_SubString(subject, match[CAPTURE1], subject.length)
+ }
+ return ExpandReplacement(replace, subject, lastMatchInfo,
+ %_SubString(subject, 0, match[CAPTURE0])) +
+ %_SubString(subject, match[CAPTURE1], subject.length);
}
- } else {
+
+ // Global regexp search, string replace.
+ search.lastIndex = 0;
if (lastMatchInfoOverride == null) {
- return %StringReplaceRegExpWithString(subject,
- search,
- TO_STRING_INLINE(replace),
- lastMatchInfo);
+ return %StringReplaceGlobalRegExpWithString(
+ subject, search, replace, lastMatchInfo);
} else {
// We use this hack to detect whether StringReplaceRegExpWithString
- // found at least one hit. In that case we need to remove any
+ // found at least one hit. In that case we need to remove any
// override.
var saved_subject = lastMatchInfo[LAST_SUBJECT_INDEX];
lastMatchInfo[LAST_SUBJECT_INDEX] = 0;
- var answer = %StringReplaceRegExpWithString(subject,
- search,
- TO_STRING_INLINE(replace),
- lastMatchInfo);
+ var answer = %StringReplaceGlobalRegExpWithString(
+ subject, search, replace, lastMatchInfo);
if (%_IsSmi(lastMatchInfo[LAST_SUBJECT_INDEX])) {
lastMatchInfo[LAST_SUBJECT_INDEX] = saved_subject;
} else {
@@ -260,10 +280,17 @@ function StringReplace(search, replace) {
return answer;
}
}
+
+ if (search.global) {
+ // Global regexp search, function replace.
+ return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
+ }
+ // Non-global regexp search, function replace.
+ return StringReplaceNonGlobalRegExpWithFunction(subject, search, replace);
}
- // Convert the search argument to a string and search for it.
search = TO_STRING_INLINE(search);
+
if (search.length == 1 &&
subject.length > 0xFF &&
IS_STRING(replace) &&
@@ -277,7 +304,7 @@ function StringReplace(search, replace) {
if (start < 0) return subject;
var end = start + search.length;
- var result = SubString(subject, 0, start);
+ var result = %_SubString(subject, 0, start);
// Compute the string to replace with.
if (IS_SPEC_FUNCTION(replace)) {
@@ -286,11 +313,13 @@ function StringReplace(search, replace) {
} else {
reusableMatchInfo[CAPTURE0] = start;
reusableMatchInfo[CAPTURE1] = end;
- replace = TO_STRING_INLINE(replace);
- result = ExpandReplacement(replace, subject, reusableMatchInfo, result);
+ result = ExpandReplacement(TO_STRING_INLINE(replace),
+ subject,
+ reusableMatchInfo,
+ result);
}
- return result + SubString(subject, end, subject.length);
+ return result + %_SubString(subject, end, subject.length);
}
@@ -304,7 +333,7 @@ function ExpandReplacement(string, subject, matchInfo, result) {
return result;
}
- if (next > 0) result += SubString(string, 0, next);
+ if (next > 0) result += %_SubString(string, 0, next);
while (true) {
var expansion = '$';
@@ -316,13 +345,39 @@ function ExpandReplacement(string, subject, matchInfo, result) {
result += '$';
} else if (peek == 38) { // $& - match
++position;
- result += SubString(subject, matchInfo[CAPTURE0], matchInfo[CAPTURE1]);
+ result +=
+ %_SubString(subject, matchInfo[CAPTURE0], matchInfo[CAPTURE1]);
} else if (peek == 96) { // $` - prefix
++position;
- result += SubString(subject, 0, matchInfo[CAPTURE0]);
+ result += %_SubString(subject, 0, matchInfo[CAPTURE0]);
} else if (peek == 39) { // $' - suffix
++position;
- result += SubString(subject, matchInfo[CAPTURE1], subject.length);
+ result += %_SubString(subject, matchInfo[CAPTURE1], subject.length);
+ } else if (peek >= 48 && peek <= 57) {
+ // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
+ var scaled_index = (peek - 48) << 1;
+ var advance = 1;
+ var number_of_captures = NUMBER_OF_CAPTURES(matchInfo);
+ if (position + 1 < string.length) {
+ var next = %_StringCharCodeAt(string, position + 1);
+ if (next >= 48 && next <= 57) {
+ var new_scaled_index = scaled_index * 10 + ((next - 48) << 1);
+ if (new_scaled_index < number_of_captures) {
+ scaled_index = new_scaled_index;
+ advance = 2;
+ }
+ }
+ }
+ if (scaled_index != 0 && scaled_index < number_of_captures) {
+ var start = matchInfo[CAPTURE(scaled_index)];
+ if (start >= 0) {
+ result +=
+ %_SubString(subject, start, matchInfo[CAPTURE(scaled_index + 1)]);
+ }
+ position += advance;
+ } else {
+ result += '$';
+ }
} else {
result += '$';
}
@@ -337,14 +392,14 @@ function ExpandReplacement(string, subject, matchInfo, result) {
// haven't reached the end, we need to append the suffix.
if (next < 0) {
if (position < length) {
- result += SubString(string, position, length);
+ result += %_SubString(string, position, length);
}
return result;
}
// Append substring between the previous and the next $ character.
if (next > position) {
- result += SubString(string, position, next);
+ result += %_SubString(string, position, next);
}
}
return result;
@@ -360,7 +415,7 @@ function CaptureString(string, lastCaptureInfo, index) {
// If start isn't valid, return undefined.
if (start < 0) return;
var end = lastCaptureInfo[CAPTURE(scaled + 1)];
- return SubString(string, start, end);
+ return %_SubString(string, start, end);
}
@@ -401,7 +456,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
// input string and some replacements that were returned from the replace
// function.
var match_start = 0;
- var override = new InternalArray(null, 0, subject);
+ var override = new InternalPackedArray(null, 0, subject);
var receiver = %GetDefaultReceiver(replace);
for (var i = 0; i < len; i++) {
var elem = res[i];
@@ -441,8 +496,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
}
}
}
- var resultBuilder = new ReplaceResultBuilder(subject, res);
- var result = resultBuilder.generate();
+ var result = %StringBuilderConcat(res, res.length, subject);
resultArray.length = 0;
reusableReplaceArray = resultArray;
return result;
@@ -451,9 +505,12 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
var matchInfo = DoRegExpExec(regexp, subject, 0);
- if (IS_NULL(matchInfo)) return subject;
+ if (IS_NULL(matchInfo)) {
+ regexp.lastIndex = 0;
+ return subject;
+ }
var index = matchInfo[CAPTURE0];
- var result = SubString(subject, 0, index);
+ var result = %_SubString(subject, 0, index);
var endOfMatch = matchInfo[CAPTURE1];
// Compute the parameter list consisting of the match, captures, index,
// and subject for the replace function invocation.
@@ -463,7 +520,7 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
var receiver = %GetDefaultReceiver(replace);
if (m == 1) {
// No captures, only the match, which is always valid.
- var s = SubString(subject, index, endOfMatch);
+ var s = %_SubString(subject, index, endOfMatch);
// Don't call directly to avoid exposing the built-in global object.
replacement = %_CallFunction(receiver, s, index, subject, replace);
} else {
@@ -480,7 +537,7 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
result += replacement; // The add method converts to string if necessary.
// Can't use matchInfo any more from here, since the function could
// overwrite it.
- return result + SubString(subject, endOfMatch, subject.length);
+ return result + %_SubString(subject, endOfMatch, subject.length);
}
@@ -516,7 +573,7 @@ function StringSlice(start, end) {
var s_len = s.length;
var start_i = TO_INTEGER(start);
var end_i = s_len;
- if (end !== void 0) {
+ if (!IS_UNDEFINED(end)) {
end_i = TO_INTEGER(end);
}
@@ -546,7 +603,7 @@ function StringSlice(start, end) {
return '';
}
- return SubString(s, start_i, end_i);
+ return %_SubString(s, start_i, end_i);
}
@@ -588,6 +645,8 @@ function StringSplit(separator, limit) {
}
+var ArrayPushBuiltin = $Array.prototype.push;
+
function StringSplitOnRegExp(subject, separator, limit, length) {
%_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
@@ -607,13 +666,15 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
while (true) {
if (startIndex === length) {
- result.push(SubString(subject, currentIndex, length));
+ %_CallFunction(result, %_SubString(subject, currentIndex, length),
+ ArrayPushBuiltin);
break;
}
var matchInfo = DoRegExpExec(separator, subject, startIndex);
if (matchInfo == null || length === (startMatch = matchInfo[CAPTURE0])) {
- result.push(SubString(subject, currentIndex, length));
+ %_CallFunction(result, %_SubString(subject, currentIndex, length),
+ ArrayPushBuiltin);
break;
}
var endIndex = matchInfo[CAPTURE1];
@@ -624,11 +685,8 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
continue;
}
- if (currentIndex + 1 == startMatch) {
- result.push(%_StringCharAt(subject, currentIndex));
- } else {
- result.push(%_SubString(subject, currentIndex, startMatch));
- }
+ %_CallFunction(result, %_SubString(subject, currentIndex, startMatch),
+ ArrayPushBuiltin);
if (result.length === limit) break;
@@ -637,13 +695,10 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
var start = matchInfo[i++];
var end = matchInfo[i++];
if (end != -1) {
- if (start + 1 == end) {
- result.push(%_StringCharAt(subject, start));
- } else {
- result.push(%_SubString(subject, start, end));
- }
+ %_CallFunction(result, %_SubString(subject, start, end),
+ ArrayPushBuiltin);
} else {
- result.push(void 0);
+ %_CallFunction(result, UNDEFINED, ArrayPushBuiltin);
}
if (result.length === limit) break outer_loop;
}
@@ -685,9 +740,7 @@ function StringSubstring(start, end) {
}
}
- return ((start_i + 1 == end_i)
- ? %_StringCharAt(s, start_i)
- : %_SubString(s, start_i, end_i));
+ return %_SubString(s, start_i, end_i);
}
@@ -702,7 +755,7 @@ function StringSubstr(start, n) {
// Correct n: If not given, set to string length; if explicitly
// set to undefined, zero, or negative, returns empty string.
- if (n === void 0) {
+ if (IS_UNDEFINED(n)) {
len = s.length;
} else {
len = TO_INTEGER(n);
@@ -711,7 +764,7 @@ function StringSubstr(start, n) {
// Correct start: If not given (or undefined), set to zero; otherwise
// convert to integer and handle negative case.
- if (start === void 0) {
+ if (IS_UNDEFINED(start)) {
start = 0;
} else {
start = TO_INTEGER(start);
@@ -729,9 +782,7 @@ function StringSubstr(start, n) {
var end = start + len;
if (end > s.length) end = s.length;
- return ((start + 1 == end)
- ? %_StringCharAt(s, start)
- : %_SubString(s, start, end));
+ return %_SubString(s, start, end);
}
@@ -799,7 +850,6 @@ function StringTrimRight() {
return %StringTrim(TO_STRING_INLINE(this), false, true);
}
-var static_charcode_array = new InternalArray(4);
// ECMA-262, section 15.5.3.2
function StringFromCharCode(code) {
@@ -809,17 +859,25 @@ function StringFromCharCode(code) {
return %_StringCharFromCode(code & 0xffff);
}
- // NOTE: This is not super-efficient, but it is necessary because we
- // want to avoid converting to numbers from within the virtual
- // machine. Maybe we can find another way of doing this?
- var codes = static_charcode_array;
- for (var i = 0; i < n; i++) {
+ var one_byte = %NewString(n, NEW_ONE_BYTE_STRING);
+ var i;
+ for (i = 0; i < n; i++) {
var code = %_Arguments(i);
- if (!%_IsSmi(code)) code = ToNumber(code);
- codes[i] = code;
+ if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
+ if (code < 0) code = code & 0xffff;
+ if (code > 0xff) break;
+ %_OneByteSeqStringSetChar(one_byte, i, code);
+ }
+ if (i == n) return one_byte;
+ one_byte = %TruncateString(one_byte, i);
+
+ var two_byte = %NewString(n - i, NEW_TWO_BYTE_STRING);
+ for (var j = 0; i < n; i++, j++) {
+ var code = %_Arguments(i);
+ if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
+ %_TwoByteSeqStringSetChar(two_byte, j, code);
}
- codes.length = n;
- return %StringFromCharCodeArray(codes);
+ return one_byte + two_byte;
}
@@ -898,57 +956,23 @@ function StringSup() {
return "<sup>" + this + "</sup>";
}
-
-// ReplaceResultBuilder support.
-function ReplaceResultBuilder(str) {
- if (%_ArgumentsLength() > 1) {
- this.elements = %_Arguments(1);
- } else {
- this.elements = new InternalArray();
- }
- this.special_string = str;
-}
-
-SetUpLockedPrototype(ReplaceResultBuilder,
- $Array("elements", "special_string"), $Array(
- "add", function(str) {
- str = TO_STRING_INLINE(str);
- if (str.length > 0) this.elements.push(str);
- },
- "addSpecialSlice", function(start, end) {
- var len = end - start;
- if (start < 0 || len <= 0) return;
- if (start < 0x80000 && len < 0x800) {
- this.elements.push((start << 11) | len);
- } else {
- // 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength,
- // so -len is a smi.
- var elements = this.elements;
- elements.push(-len);
- elements.push(start);
- }
- },
- "generate", function() {
- var elements = this.elements;
- return %StringBuilderConcat(elements, elements.length, this.special_string);
- }
-));
-
-
// -------------------------------------------------------------------
function SetUpString() {
%CheckIsBootstrapping();
+
+ // Set the String function and constructor.
+ %SetCode($String, StringConstructor);
+ %FunctionSetPrototype($String, new $String());
+
// Set up the constructor property on the String prototype object.
%SetProperty($String.prototype, "constructor", $String, DONT_ENUM);
-
// Set up the non-enumerable functions on the String object.
InstallFunctions($String, DONT_ENUM, $Array(
"fromCharCode", StringFromCharCode
));
-
// Set up the non-enumerable functions on the String prototype object.
InstallFunctions($String.prototype, DONT_ENUM, $Array(
"valueOf", StringValueOf,
diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/strtod.cc
index 0dc618a39..d332fd2bc 100644
--- a/deps/v8/src/strtod.cc
+++ b/deps/v8/src/strtod.cc
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h>
-#include <math.h>
+#include <cmath>
#include "globals.h"
#include "utils.h"
@@ -128,6 +128,7 @@ static void TrimToMaxSignificantDigits(Vector<const char> buffer,
exponent + (buffer.length() - kMaxSignificantDecimalDigits);
}
+
// Reads digits from the buffer and converts them to a uint64.
// Reads in as many digits as fit into a uint64.
// When the string starts with "1844674407370955161" no further digit is read.
@@ -175,8 +176,7 @@ static void ReadDiyFp(Vector<const char> buffer,
static bool DoubleStrtod(Vector<const char> trimmed,
int exponent,
double* result) {
-#if (defined(V8_TARGET_ARCH_IA32) || defined(USE_SIMULATOR)) \
- && !defined(_MSC_VER)
+#if (V8_TARGET_ARCH_IA32 || defined(USE_SIMULATOR)) && !defined(_MSC_VER)
// On x86 the floating-point stack can be 64 or 80 bits wide. If it is
// 80 bits wide (as is the case on Linux) then double-rounding occurs and the
// result is not accurate.
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 8490c7e74..1ec00d49b 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -31,6 +31,7 @@
#include "arguments.h"
#include "ast.h"
#include "code-stubs.h"
+#include "cpu-profiler.h"
#include "gdb-jit.h"
#include "ic-inl.h"
#include "stub-cache.h"
@@ -43,10 +44,8 @@ namespace internal {
// StubCache implementation.
-StubCache::StubCache(Isolate* isolate, Zone* zone)
- : isolate_(isolate) {
- ASSERT(isolate == Isolate::Current());
-}
+StubCache::StubCache(Isolate* isolate)
+ : isolate_(isolate) { }
void StubCache::Initialize() {
@@ -56,14 +55,14 @@ void StubCache::Initialize() {
}
-Code* StubCache::Set(String* name, Map* map, Code* code) {
+Code* StubCache::Set(Name* name, Map* map, Code* code) {
// Get the flags from the code.
Code::Flags flags = Code::RemoveTypeFromFlags(code->flags());
// Validate that the name does not move on scavenge, and that we
- // can use identity checks instead of string equality checks.
+ // can use identity checks instead of structural equality checks.
ASSERT(!heap()->InNewSpace(name));
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsUniqueName());
// The state bits are not important to the hash function because
// the stub cache only contains monomorphic stubs. Make sure that
@@ -100,484 +99,194 @@ Code* StubCache::Set(String* name, Map* map, Code* code) {
}
-Handle<Code> StubCache::ComputeLoadNonexistent(Handle<String> name,
- Handle<JSObject> receiver) {
- ASSERT(receiver->IsGlobalObject() || receiver->HasFastProperties());
- // If no global objects are present in the prototype chain, the load
- // nonexistent IC stub can be shared for all names for a given map
- // and we use the empty string for the map cache in that case. If
- // there are global objects involved, we need to check global
- // property cells in the stub and therefore the stub will be
- // specific to the name.
- Handle<String> cache_name = factory()->empty_string();
- if (receiver->IsGlobalObject()) cache_name = name;
- Handle<JSObject> last = receiver;
- while (last->GetPrototype() != heap()->null_value()) {
- last = Handle<JSObject>(JSObject::cast(last->GetPrototype()));
- if (last->IsGlobalObject()) cache_name = name;
- }
- // Compile the stub that is either shared for all names or
- // name specific if there are global objects involved.
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NONEXISTENT);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*cache_name, flags));
+Handle<Code> StubCache::FindIC(Handle<Name> name,
+ Handle<Map> stub_holder_map,
+ Code::Kind kind,
+ Code::ExtraICState extra_state) {
+ Code::Flags flags = Code::ComputeMonomorphicFlags(kind, extra_state);
+ Handle<Object> probe(stub_holder_map->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadNonexistent(cache_name, receiver, last);
- PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *cache_name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *cache_name, *code));
- JSObject::UpdateMapCodeCache(receiver, cache_name, code);
- return code;
+ return Handle<Code>::null();
}
-Handle<Code> StubCache::ComputeLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- int field_index) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::FIELD);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadField(receiver, holder, field_index, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+Handle<Code> StubCache::FindIC(Handle<Name> name,
+ Handle<JSObject> stub_holder,
+ Code::Kind kind,
+ Code::ExtraICState extra_ic_state) {
+ return FindIC(name, Handle<Map>(stub_holder->map()), kind, extra_ic_state);
}
-Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+Handle<Code> StubCache::FindHandler(Handle<Name> name,
+ Handle<JSObject> receiver,
+ Code::Kind kind,
+ StrictModeFlag strict_mode) {
+ Code::ExtraICState extra_ic_state = Code::kNoExtraICState;
+ if (kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC) {
+ extra_ic_state = Code::ComputeExtraICState(
+ STANDARD_STORE, strict_mode);
+ }
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ Code::HANDLER, extra_ic_state, Code::NORMAL, kind);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadCallback(name, receiver, holder, callback);
- PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ return Handle<Code>::null();
}
-Handle<Code> StubCache::ComputeLoadViaGetter(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+Handle<Code> StubCache::ComputeMonomorphicIC(Handle<HeapObject> receiver,
+ Handle<Code> handler,
+ Handle<Name> name,
+ StrictModeFlag strict_mode) {
+ Code::Kind kind = handler->handler_kind();
+ Handle<Map> map(receiver->map());
+ Handle<Code> ic = FindIC(name, map, kind, strict_mode);
+ if (!ic.is_null()) return ic;
+
+ if (kind == Code::LOAD_IC) {
+ LoadStubCompiler ic_compiler(isolate());
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+ } else if (kind == Code::KEYED_LOAD_IC) {
+ KeyedLoadStubCompiler ic_compiler(isolate());
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+ } else if (kind == Code::STORE_IC) {
+ StoreStubCompiler ic_compiler(isolate(), strict_mode);
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+ } else {
+ ASSERT(kind == Code::KEYED_STORE_IC);
+ KeyedStoreStubCompiler ic_compiler(isolate(), strict_mode, STANDARD_STORE);
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+ }
- LoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadViaGetter(name, receiver, holder, getter);
- PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ HeapObject::UpdateMapCodeCache(receiver, name, ic);
+ return ic;
}
-Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CONSTANT_FUNCTION);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadConstant(receiver, holder, value, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
+Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
+ Handle<JSObject> receiver) {
+ // If no global objects are present in the prototype chain, the load
+ // nonexistent IC stub can be shared for all names for a given map
+ // and we use the empty string for the map cache in that case. If
+ // there are global objects involved, we need to check global
+ // property cells in the stub and therefore the stub will be
+ // specific to the name.
+ Handle<Name> cache_name = factory()->empty_string();
+ Handle<JSObject> current;
+ Handle<Object> next = receiver;
+ Handle<JSGlobalObject> global;
+ do {
+ current = Handle<JSObject>::cast(next);
+ next = Handle<Object>(current->GetPrototype(), isolate_);
+ if (current->IsJSGlobalObject()) {
+ global = Handle<JSGlobalObject>::cast(current);
+ cache_name = name;
+ } else if (!current->HasFastProperties()) {
+ cache_name = name;
+ }
+ } while (!next->IsNull());
-Handle<Code> StubCache::ComputeLoadInterceptor(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::INTERCEPTOR);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ // Compile the stub that is either shared for all names or
+ // name specific if there are global objects involved.
+ Handle<Code> handler = FindHandler(cache_name, receiver, Code::LOAD_IC);
+ if (!handler.is_null()) return handler;
LoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadInterceptor(receiver, holder, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ handler =
+ compiler.CompileLoadNonexistent(receiver, current, cache_name, global);
+ HeapObject::UpdateMapCodeCache(receiver, cache_name, handler);
+ return handler;
}
-Handle<Code> StubCache::ComputeLoadNormal() {
- return isolate_->builtins()->LoadIC_Normal();
-}
-
-
-Handle<Code> StubCache::ComputeLoadGlobal(Handle<String> name,
+Handle<Code> StubCache::ComputeLoadGlobal(Handle<Name> name,
Handle<JSObject> receiver,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
bool is_dont_delete) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NORMAL);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ Handle<Code> stub = FindIC(name, receiver, Code::LOAD_IC);
+ if (!stub.is_null()) return stub;
LoadStubCompiler compiler(isolate_);
- Handle<Code> code =
+ Handle<Code> ic =
compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete);
- PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ HeapObject::UpdateMapCodeCache(receiver, name, ic);
+ return ic;
}
-Handle<Code> StubCache::ComputeKeyedLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- int field_index) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::FIELD);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadField(name, receiver, holder, field_index);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC,
- Code::CONSTANT_FUNCTION);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadConstant(name, receiver, holder, value);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::INTERCEPTOR);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileLoadInterceptor(receiver, holder, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadCallback(name, receiver, holder, callback);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadArrayLength(Handle<String> name,
- Handle<JSArray> receiver) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileLoadArrayLength(name);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
+Handle<Code> StubCache::ComputeKeyedLoadElement(Handle<Map> receiver_map) {
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC);
+ Handle<Name> name =
+ isolate()->factory()->KeyedLoadElementMonomorphic_string();
-Handle<Code> StubCache::ComputeKeyedLoadStringLength(Handle<String> name,
- Handle<String> receiver) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
- Handle<Map> map(receiver->map());
- Handle<Object> probe(map->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileLoadStringLength(name);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
- Map::UpdateCodeCache(map, name, code);
- return code;
-}
+ KeyedLoadStubCompiler compiler(isolate());
+ Handle<Code> code = compiler.CompileLoadElement(receiver_map);
-
-Handle<Code> StubCache::ComputeKeyedLoadFunctionPrototype(
- Handle<String> name,
- Handle<JSFunction> receiver) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileLoadFunctionPrototype(name);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
+ Map::UpdateCodeCache(receiver_map, name, code);
return code;
}
-Handle<Code> StubCache::ComputeStoreField(Handle<String> name,
- Handle<JSObject> receiver,
- int field_index,
- Handle<Map> transition,
- StrictModeFlag strict_mode) {
- Code::StubType type =
- (transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
+Handle<Code> StubCache::ComputeKeyedStoreElement(
+ Handle<Map> receiver_map,
+ StrictModeFlag strict_mode,
+ KeyedAccessStoreMode store_mode) {
+ Code::ExtraICState extra_state =
+ Code::ComputeExtraICState(store_mode, strict_mode);
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, type, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ Code::KEYED_STORE_IC, extra_state);
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code =
- compiler.CompileStoreField(receiver, field_index, transition, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
+ ASSERT(store_mode == STANDARD_STORE ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW);
-
-Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement(
- Handle<Map> receiver_map,
- KeyedIC::StubKind stub_kind,
- StrictModeFlag strict_mode) {
- KeyedAccessGrowMode grow_mode =
- KeyedIC::GetGrowModeFromStubKind(stub_kind);
- Code::ExtraICState extra_state =
- Code::ComputeExtraICState(grow_mode, strict_mode);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(
- stub_kind == KeyedIC::LOAD ? Code::KEYED_LOAD_IC
- : Code::KEYED_STORE_IC,
- Code::NORMAL,
- extra_state);
- Handle<String> name;
- switch (stub_kind) {
- case KeyedIC::LOAD:
- name = isolate()->factory()->KeyedLoadElementMonomorphic_symbol();
- break;
- case KeyedIC::STORE_NO_TRANSITION:
- name = isolate()->factory()->KeyedStoreElementMonomorphic_symbol();
- break;
- case KeyedIC::STORE_AND_GROW_NO_TRANSITION:
- name = isolate()->factory()->KeyedStoreAndGrowElementMonomorphic_symbol();
- break;
- default:
- UNREACHABLE();
- break;
- }
- Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags));
+ Handle<String> name =
+ isolate()->factory()->KeyedStoreElementMonomorphic_string();
+ Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
- Handle<Code> code;
- switch (stub_kind) {
- case KeyedIC::LOAD: {
- KeyedLoadStubCompiler compiler(isolate_);
- code = compiler.CompileLoadElement(receiver_map);
- break;
- }
- case KeyedIC::STORE_AND_GROW_NO_TRANSITION: {
- KeyedStoreStubCompiler compiler(isolate_, strict_mode,
- ALLOW_JSARRAY_GROWTH);
- code = compiler.CompileStoreElement(receiver_map);
- break;
- }
- case KeyedIC::STORE_NO_TRANSITION: {
- KeyedStoreStubCompiler compiler(isolate_, strict_mode,
- DO_NOT_ALLOW_JSARRAY_GROWTH);
- code = compiler.CompileStoreElement(receiver_map);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
+ KeyedStoreStubCompiler compiler(isolate(), strict_mode, store_mode);
+ Handle<Code> code = compiler.CompileStoreElement(receiver_map);
- ASSERT(!code.is_null());
-
- if (stub_kind == KeyedIC::LOAD) {
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, 0));
- } else {
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, 0));
- }
Map::UpdateCodeCache(receiver_map, name, code);
+ ASSERT(Code::GetKeyedAccessStoreMode(code->extra_ic_state()) == store_mode);
return code;
}
-Handle<Code> StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
- return (strict_mode == kStrictMode)
- ? isolate_->builtins()->Builtins::StoreIC_Normal_Strict()
- : isolate_->builtins()->Builtins::StoreIC_Normal();
-}
-
-
-Handle<Code> StubCache::ComputeStoreGlobal(Handle<String> name,
+Handle<Code> StubCache::ComputeStoreGlobal(Handle<Name> name,
Handle<GlobalObject> receiver,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
+ Handle<Object> value,
StrictModeFlag strict_mode) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, Code::NORMAL, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code = compiler.CompileStoreGlobal(receiver, cell, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeStoreCallback(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback,
- StrictModeFlag strict_mode) {
- ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, Code::CALLBACKS, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code =
- compiler.CompileStoreCallback(name, receiver, holder, callback);
- PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
+ Handle<Type> union_type = PropertyCell::UpdatedType(cell, value);
+ bool is_constant = union_type->IsConstant();
+ StoreGlobalStub stub(strict_mode, is_constant);
+
+ Handle<Code> code = FindIC(
+ name, Handle<JSObject>::cast(receiver),
+ Code::STORE_IC, stub.GetExtraICState());
+ if (!code.is_null()) return code;
+
+ // Replace the placeholder cell and global object map with the actual global
+ // cell and receiver map.
+ Handle<Map> meta_map(isolate_->heap()->meta_map());
+ Handle<Object> receiver_map(receiver->map(), isolate_);
+ code = stub.GetCodeCopyFromTemplate(isolate_);
+ code->ReplaceNthObject(1, *meta_map, *receiver_map);
+ Handle<Map> cell_map(isolate_->heap()->global_property_cell_map());
+ code->ReplaceNthObject(1, *cell_map, *cell);
+
+ HeapObject::UpdateMapCodeCache(receiver, name, code);
-Handle<Code> StubCache::ComputeStoreViaSetter(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter,
- StrictModeFlag strict_mode) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, Code::CALLBACKS, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code =
- compiler.CompileStoreViaSetter(name, receiver, holder, setter);
- PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeStoreInterceptor(Handle<String> name,
- Handle<JSObject> receiver,
- StrictModeFlag strict_mode) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, Code::INTERCEPTOR, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code = compiler.CompileStoreInterceptor(receiver, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name,
- Handle<JSObject> receiver,
- int field_index,
- Handle<Map> transition,
- StrictModeFlag strict_mode) {
- Code::StubType type =
- (transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::KEYED_STORE_IC, type, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- KeyedStoreStubCompiler compiler(isolate(), strict_mode,
- DO_NOT_ALLOW_JSARRAY_GROWTH);
- Handle<Code> code =
- compiler.CompileStoreField(receiver, field_index, transition, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
@@ -588,40 +297,54 @@ Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name,
Handle<Code> StubCache::ComputeCallConstant(int argc,
Code::Kind kind,
Code::ExtraICState extra_state,
- Handle<String> name,
+ Handle<Name> name,
Handle<Object> object,
Handle<JSObject> holder,
Handle<JSFunction> function) {
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
+ Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
+ isolate_, *object, cache_holder));
// Compute check type based on receiver/holder.
CheckType check = RECEIVER_MAP_CHECK;
if (object->IsString()) {
check = STRING_CHECK;
+ } else if (object->IsSymbol()) {
+ check = SYMBOL_CHECK;
} else if (object->IsNumber()) {
check = NUMBER_CHECK;
} else if (object->IsBoolean()) {
check = BOOLEAN_CHECK;
}
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, Code::CONSTANT_FUNCTION, extra_state,
- cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ if (check != RECEIVER_MAP_CHECK &&
+ !function->IsBuiltin() &&
+ function->shared()->is_classic_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ return Handle<Code>::null();
+ }
+
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ kind, extra_state, Code::CONSTANT, argc, cache_holder);
+ Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
Handle<Code> code =
- compiler.CompileCallConstant(object, holder, function, name, check);
+ compiler.CompileCallConstant(object, holder, name, check, function);
code->set_check_type(check);
ASSERT(flags == code->flags());
PROFILE(isolate_,
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(map_holder, name, code);
+
+ if (CallStubCompiler::CanBeCached(function)) {
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
+ }
return code;
}
@@ -629,26 +352,28 @@ Handle<Code> StubCache::ComputeCallConstant(int argc,
Handle<Code> StubCache::ComputeCallField(int argc,
Code::Kind kind,
Code::ExtraICState extra_state,
- Handle<String> name,
+ Handle<Name> name,
Handle<Object> object,
Handle<JSObject> holder,
- int index) {
+ PropertyIndex index) {
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
+ Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
+ isolate_, *object, cache_holder));
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
// map. Instead, we check against the map in the holder.
- if (object->IsNumber() || object->IsBoolean() || object->IsString()) {
+ if (object->IsNumber() || object->IsSymbol() ||
+ object->IsBoolean() || object->IsString()) {
object = holder;
}
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, Code::FIELD, extra_state,
- cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ kind, extra_state, Code::FIELD, argc, cache_holder);
+ Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
@@ -659,7 +384,7 @@ Handle<Code> StubCache::ComputeCallField(int argc,
PROFILE(isolate_,
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(map_holder, name, code);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
return code;
}
@@ -667,25 +392,27 @@ Handle<Code> StubCache::ComputeCallField(int argc,
Handle<Code> StubCache::ComputeCallInterceptor(int argc,
Code::Kind kind,
Code::ExtraICState extra_state,
- Handle<String> name,
+ Handle<Name> name,
Handle<Object> object,
Handle<JSObject> holder) {
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
+ Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
+ isolate_, *object, cache_holder));
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
// map. Instead, we check against the map in the holder.
- if (object->IsNumber() || object->IsBoolean() || object->IsString()) {
+ if (object->IsNumber() || object->IsSymbol() ||
+ object->IsBoolean() || object->IsString()) {
object = holder;
}
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, Code::INTERCEPTOR, extra_state,
- cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ kind, extra_state, Code::INTERCEPTOR, argc, cache_holder);
+ Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
@@ -696,7 +423,7 @@ Handle<Code> StubCache::ComputeCallInterceptor(int argc,
PROFILE(isolate(),
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(map_holder, name, code);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
return code;
}
@@ -704,28 +431,27 @@ Handle<Code> StubCache::ComputeCallInterceptor(int argc,
Handle<Code> StubCache::ComputeCallGlobal(int argc,
Code::Kind kind,
Code::ExtraICState extra_state,
- Handle<String> name,
+ Handle<Name> name,
Handle<JSObject> receiver,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<JSFunction> function) {
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*receiver, *holder);
- Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, Code::NORMAL, extra_state,
- cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ kind, extra_state, Code::NORMAL, argc);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
- CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
+ CallStubCompiler compiler(isolate(), argc, kind, extra_state);
Handle<Code> code =
compiler.CompileCallGlobal(receiver, holder, cell, function, name);
ASSERT(flags == code->flags());
PROFILE(isolate(),
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(map_holder, name, code);
+ if (CallStubCompiler::CanBeCached(function)) {
+ HeapObject::UpdateMapCodeCache(receiver, name, code);
+ }
return code;
}
@@ -747,10 +473,8 @@ Code* StubCache::FindCallInitialize(int argc,
CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
Code::Flags flags =
Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc);
-
- // Use raw_unchecked... so we don't get assert failures during GC.
UnseededNumberDictionary* dictionary =
- isolate()->heap()->raw_unchecked_non_monomorphic_cache();
+ isolate()->heap()->non_monomorphic_cache();
int entry = dictionary->FindEntry(isolate(), flags);
ASSERT(entry != -1);
Object* code = dictionary->ValueAt(entry);
@@ -826,11 +550,10 @@ Handle<Code> StubCache::ComputeCallNormal(int argc,
}
-Handle<Code> StubCache::ComputeCallArguments(int argc, Code::Kind kind) {
- ASSERT(kind == Code::KEYED_CALL_IC);
+Handle<Code> StubCache::ComputeCallArguments(int argc) {
Code::Flags flags =
- Code::ComputeFlags(kind, MEGAMORPHIC, Code::kNoExtraICState,
- Code::NORMAL, argc);
+ Code::ComputeFlags(Code::KEYED_CALL_IC, MEGAMORPHIC,
+ Code::kNoExtraICState, Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -882,13 +605,101 @@ Handle<Code> StubCache::ComputeCallMiss(int argc,
}
+Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map,
+ CompareNilICStub& stub) {
+ Handle<String> name(isolate_->heap()->empty_string());
+ if (!receiver_map->is_shared()) {
+ Handle<Code> cached_ic = FindIC(name, receiver_map, Code::COMPARE_NIL_IC,
+ stub.GetExtraICState());
+ if (!cached_ic.is_null()) return cached_ic;
+ }
+
+ Handle<Code> ic = stub.GetCodeCopyFromTemplate(isolate_);
+ ic->ReplaceNthObject(1, isolate_->heap()->meta_map(), *receiver_map);
+
+ if (!receiver_map->is_shared()) {
+ Map::UpdateCodeCache(receiver_map, name, ic);
+ }
+
+ return ic;
+}
+
+
+Handle<Code> StubCache::ComputeLoadElementPolymorphic(
+ MapHandleList* receiver_maps) {
+ Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
+ Handle<PolymorphicCodeCache> cache =
+ isolate_->factory()->polymorphic_code_cache();
+ Handle<Object> probe = cache->Lookup(receiver_maps, flags);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ CodeHandleList handlers(receiver_maps->length());
+ KeyedLoadStubCompiler compiler(isolate_);
+ compiler.CompileElementHandlers(receiver_maps, &handlers);
+ Handle<Code> code = compiler.CompilePolymorphicIC(
+ receiver_maps, &handlers, factory()->empty_string(),
+ Code::NORMAL, ELEMENT);
+
+ isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
+
+ PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
+ return code;
+}
+
+
+Handle<Code> StubCache::ComputePolymorphicIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name,
+ StrictModeFlag strict_mode) {
+ Handle<Code> handler = handlers->at(0);
+ Code::Kind kind = handler->handler_kind();
+ Code::StubType type = number_of_valid_maps == 1 ? handler->type()
+ : Code::NORMAL;
+ if (kind == Code::LOAD_IC) {
+ LoadStubCompiler ic_compiler(isolate_);
+ return ic_compiler.CompilePolymorphicIC(
+ receiver_maps, handlers, name, type, PROPERTY);
+ } else {
+ ASSERT(kind == Code::STORE_IC);
+ StoreStubCompiler ic_compiler(isolate_, strict_mode);
+ return ic_compiler.CompilePolymorphicIC(
+ receiver_maps, handlers, name, type, PROPERTY);
+ }
+}
+
+
+Handle<Code> StubCache::ComputeStoreElementPolymorphic(
+ MapHandleList* receiver_maps,
+ KeyedAccessStoreMode store_mode,
+ StrictModeFlag strict_mode) {
+ ASSERT(store_mode == STANDARD_STORE ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+ Handle<PolymorphicCodeCache> cache =
+ isolate_->factory()->polymorphic_code_cache();
+ Code::ExtraICState extra_state = Code::ComputeExtraICState(store_mode,
+ strict_mode);
+ Code::Flags flags =
+ Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state);
+ Handle<Object> probe = cache->Lookup(receiver_maps, flags);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ KeyedStoreStubCompiler compiler(isolate_, strict_mode, store_mode);
+ Handle<Code> code = compiler.CompileStoreElementPolymorphic(receiver_maps);
+ PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
+ return code;
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<Code> StubCache::ComputeCallDebugBreak(int argc,
Code::Kind kind) {
// Extra IC state is irrelevant for debug break ICs. They jump to
// the actual call ic to carry out the work.
Code::Flags flags =
- Code::ComputeFlags(kind, DEBUG_BREAK, Code::kNoExtraICState,
+ Code::ComputeFlags(kind, DEBUG_STUB, DEBUG_BREAK,
Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
@@ -907,7 +718,7 @@ Handle<Code> StubCache::ComputeCallDebugPrepareStepIn(int argc,
// Extra IC state is irrelevant for debug break ICs. They jump to
// the actual call ic to carry out the work.
Code::Flags flags =
- Code::ComputeFlags(kind, DEBUG_PREPARE_STEP_IN, Code::kNoExtraICState,
+ Code::ComputeFlags(kind, DEBUG_STUB, DEBUG_PREPARE_STEP_IN,
Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
@@ -926,55 +737,52 @@ void StubCache::Clear() {
Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
for (int i = 0; i < kPrimaryTableSize; i++) {
primary_[i].key = heap()->empty_string();
+ primary_[i].map = NULL;
primary_[i].value = empty;
}
for (int j = 0; j < kSecondaryTableSize; j++) {
secondary_[j].key = heap()->empty_string();
+ secondary_[j].map = NULL;
secondary_[j].value = empty;
}
}
void StubCache::CollectMatchingMaps(SmallMapList* types,
- String* name,
+ Handle<Name> name,
Code::Flags flags,
Handle<Context> native_context,
Zone* zone) {
for (int i = 0; i < kPrimaryTableSize; i++) {
- if (primary_[i].key == name) {
- Map* map = primary_[i].value->FindFirstMap();
+ if (primary_[i].key == *name) {
+ Map* map = primary_[i].map;
// Map can be NULL, if the stub is constant function call
// with a primitive receiver.
if (map == NULL) continue;
- int offset = PrimaryOffset(name, flags, map);
+ int offset = PrimaryOffset(*name, flags, map);
if (entry(primary_, offset) == &primary_[i] &&
!TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
- types->Add(Handle<Map>(map), zone);
+ types->AddMapIfMissing(Handle<Map>(map), zone);
}
}
}
for (int i = 0; i < kSecondaryTableSize; i++) {
- if (secondary_[i].key == name) {
- Map* map = secondary_[i].value->FindFirstMap();
+ if (secondary_[i].key == *name) {
+ Map* map = secondary_[i].map;
// Map can be NULL, if the stub is constant function call
// with a primitive receiver.
if (map == NULL) continue;
// Lookup in primary table and skip duplicates.
- int primary_offset = PrimaryOffset(name, flags, map);
- Entry* primary_entry = entry(primary_, primary_offset);
- if (primary_entry->key == name) {
- Map* primary_map = primary_entry->value->FindFirstMap();
- if (map == primary_map) continue;
- }
+ int primary_offset = PrimaryOffset(*name, flags, map);
// Lookup in secondary table and add matches.
- int offset = SecondaryOffset(name, flags, primary_offset);
+ int offset = SecondaryOffset(*name, flags, primary_offset);
if (entry(secondary_, offset) == &secondary_[i] &&
!TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
- types->Add(Handle<Map>(map), zone);
+ types->AddMapIfMissing(Handle<Map>(map), zone);
}
}
}
@@ -985,59 +793,31 @@ void StubCache::CollectMatchingMaps(SmallMapList* types,
// StubCompiler implementation.
-RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty) {
- ASSERT(args[0]->IsJSObject());
- ASSERT(args[1]->IsJSObject());
- ASSERT(args[3]->IsSmi());
- AccessorInfo* callback = AccessorInfo::cast(args[4]);
- Address getter_address = v8::ToCData<Address>(callback->getter());
- v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
- ASSERT(fun != NULL);
- ASSERT(callback->IsCompatibleReceiver(args[0]));
- v8::AccessorInfo info(&args[0]);
- HandleScope scope(isolate);
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- ExternalCallbackScope call_scope(isolate, getter_address);
- result = fun(v8::Utils::ToLocal(args.at<String>(5)), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (result.IsEmpty()) return HEAP->undefined_value();
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
- return *result_internal;
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
JSObject* recv = JSObject::cast(args[0]);
- AccessorInfo* callback = AccessorInfo::cast(args[1]);
+ ExecutableAccessorInfo* callback = ExecutableAccessorInfo::cast(args[1]);
Address setter_address = v8::ToCData<Address>(callback->setter());
- v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address);
+ v8::AccessorSetterCallback fun =
+ FUNCTION_CAST<v8::AccessorSetterCallback>(setter_address);
ASSERT(fun != NULL);
ASSERT(callback->IsCompatibleReceiver(recv));
- Handle<String> name = args.at<String>(2);
+ Handle<Name> name = args.at<Name>(2);
Handle<Object> value = args.at<Object>(3);
HandleScope scope(isolate);
+
+ // TODO(rossberg): Support symbols in the API.
+ if (name->IsSymbol()) return *value;
+ Handle<String> str = Handle<String>::cast(name);
+
LOG(isolate, ApiNamedPropertyAccess("store", recv, *name));
- CustomArguments custom_args(isolate, callback->data(), recv, recv);
- v8::AccessorInfo info(custom_args.end());
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- ExternalCallbackScope call_scope(isolate, setter_address);
- fun(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
- }
+ PropertyCallbackArguments
+ custom_args(isolate, callback->data(), recv, recv);
+ custom_args.Call(fun, v8::Utils::ToLocal(str), v8::Utils::ToLocal(value));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value;
}
-static const int kAccessorInfoOffsetInInterceptorArgs = 2;
-
-
/**
* Attempts to load a property with an interceptor (which must be present),
* but doesn't search the prototype chain.
@@ -1046,30 +826,33 @@ static const int kAccessorInfoOffsetInInterceptorArgs = 2;
* provide any value for the given name.
*/
RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
- Handle<String> name_handle = args.at<String>(0);
- Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1);
- ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
- ASSERT(args[2]->IsJSObject()); // Receiver.
- ASSERT(args[3]->IsJSObject()); // Holder.
- ASSERT(args[5]->IsSmi()); // Isolate.
- ASSERT(args.length() == 6);
+ ASSERT(args.length() == StubCache::kInterceptorArgsLength);
+ Handle<Name> name_handle =
+ args.at<Name>(StubCache::kInterceptorArgsNameIndex);
+ Handle<InterceptorInfo> interceptor_info =
+ args.at<InterceptorInfo>(StubCache::kInterceptorArgsInfoIndex);
+
+ // TODO(rossberg): Support symbols in the API.
+ if (name_handle->IsSymbol())
+ return isolate->heap()->no_interceptor_result_sentinel();
+ Handle<String> name = Handle<String>::cast(name_handle);
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::NamedPropertyGetter getter =
- FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
+ v8::NamedPropertyGetterCallback getter =
+ FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
ASSERT(getter != NULL);
+ Handle<JSObject> receiver =
+ args.at<JSObject>(StubCache::kInterceptorArgsThisIndex);
+ Handle<JSObject> holder =
+ args.at<JSObject>(StubCache::kInterceptorArgsHolderIndex);
+ PropertyCallbackArguments callback_args(
+ isolate, interceptor_info->data(), *receiver, *holder);
{
// Use the interceptor getter.
- v8::AccessorInfo info(args.arguments() -
- kAccessorInfoOffsetInInterceptorArgs);
HandleScope scope(isolate);
- v8::Handle<v8::Value> r;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- r = getter(v8::Utils::ToLocal(name_handle), info);
- }
+ v8::Handle<v8::Value> r =
+ callback_args.Call(getter, v8::Utils::ToLocal(name));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!r.IsEmpty()) {
Handle<Object> result = v8::Utils::OpenHandle(*r);
@@ -1082,65 +865,70 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
}
-static MaybeObject* ThrowReferenceError(String* name) {
+static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) {
// If the load is non-contextual, just return the undefined result.
// Note that both keyed and non-keyed loads may end up here, so we
// can't use either LoadIC or KeyedLoadIC constructors.
- IC ic(IC::NO_EXTRA_FRAME, Isolate::Current());
+ HandleScope scope(isolate);
+ IC ic(IC::NO_EXTRA_FRAME, isolate);
ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
- if (!ic.SlowIsContextual()) return HEAP->undefined_value();
+ if (!ic.SlowIsUndeclaredGlobal()) return isolate->heap()->undefined_value();
// Throw a reference error.
- HandleScope scope;
- Handle<String> name_handle(name);
+ Handle<Name> name_handle(name);
Handle<Object> error =
- FACTORY->NewReferenceError("not_defined",
- HandleVector(&name_handle, 1));
- return Isolate::Current()->Throw(*error);
+ isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name_handle, 1));
+ return isolate->Throw(*error);
}
-static MaybeObject* LoadWithInterceptor(Arguments* args,
- PropertyAttributes* attrs) {
- Handle<String> name_handle = args->at<String>(0);
- Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(1);
- ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
- Handle<JSObject> receiver_handle = args->at<JSObject>(2);
- Handle<JSObject> holder_handle = args->at<JSObject>(3);
- ASSERT(args->length() == 6);
+static Handle<Object> LoadWithInterceptor(Arguments* args,
+ PropertyAttributes* attrs) {
+ ASSERT(args->length() == StubCache::kInterceptorArgsLength);
+ Handle<Name> name_handle =
+ args->at<Name>(StubCache::kInterceptorArgsNameIndex);
+ Handle<InterceptorInfo> interceptor_info =
+ args->at<InterceptorInfo>(StubCache::kInterceptorArgsInfoIndex);
+ Handle<JSObject> receiver_handle =
+ args->at<JSObject>(StubCache::kInterceptorArgsThisIndex);
+ Handle<JSObject> holder_handle =
+ args->at<JSObject>(StubCache::kInterceptorArgsHolderIndex);
Isolate* isolate = receiver_handle->GetIsolate();
+ // TODO(rossberg): Support symbols in the API.
+ if (name_handle->IsSymbol()) {
+ return JSObject::GetPropertyPostInterceptor(
+ holder_handle, receiver_handle, name_handle, attrs);
+ }
+ Handle<String> name = Handle<String>::cast(name_handle);
+
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::NamedPropertyGetter getter =
- FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
+ v8::NamedPropertyGetterCallback getter =
+ FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
ASSERT(getter != NULL);
+ PropertyCallbackArguments callback_args(isolate,
+ interceptor_info->data(),
+ *receiver_handle,
+ *holder_handle);
{
- // Use the interceptor getter.
- v8::AccessorInfo info(args->arguments() -
- kAccessorInfoOffsetInInterceptorArgs);
HandleScope scope(isolate);
- v8::Handle<v8::Value> r;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- r = getter(v8::Utils::ToLocal(name_handle), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ // Use the interceptor getter.
+ v8::Handle<v8::Value> r =
+ callback_args.Call(getter, v8::Utils::ToLocal(name));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!r.IsEmpty()) {
*attrs = NONE;
Handle<Object> result = v8::Utils::OpenHandle(*r);
result->VerifyApiCallResultType();
- return *result;
+ return scope.CloseAndEscape(result);
}
}
- MaybeObject* result = holder_handle->GetPropertyPostInterceptor(
- *receiver_handle,
- *name_handle,
- attrs);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ Handle<Object> result = JSObject::GetPropertyPostInterceptor(
+ holder_handle, receiver_handle, name_handle, attrs);
return result;
}
@@ -1151,40 +939,42 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
*/
RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad) {
PropertyAttributes attr = NONE;
- Object* result;
- { MaybeObject* maybe_result = LoadWithInterceptor(&args, &attr);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ HandleScope scope(isolate);
+ Handle<Object> result = LoadWithInterceptor(&args, &attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
// If the property is present, return it.
- if (attr != ABSENT) return result;
- return ThrowReferenceError(String::cast(args[0]));
+ if (attr != ABSENT) return *result;
+ return ThrowReferenceError(isolate, Name::cast(args[0]));
}
RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall) {
PropertyAttributes attr;
- MaybeObject* result = LoadWithInterceptor(&args, &attr);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ HandleScope scope(isolate);
+ Handle<Object> result = LoadWithInterceptor(&args, &attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
// This is call IC. In this case, we simply return the undefined result which
// will lead to an exception when trying to invoke the result as a
// function.
- return result;
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
- JSObject* recv = JSObject::cast(args[0]);
- String* name = String::cast(args[1]);
- Object* value = args[2];
+ Handle<JSObject> recv(JSObject::cast(args[0]));
+ Handle<Name> name(Name::cast(args[1]));
+ Handle<Object> value(args[2], isolate);
ASSERT(args.smi_at(3) == kStrictMode || args.smi_at(3) == kNonStrictMode);
StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
ASSERT(recv->HasNamedInterceptor());
PropertyAttributes attr = NONE;
- MaybeObject* result = recv->SetPropertyWithInterceptor(
- name, value, attr, strict_mode);
- return result;
+ Handle<Object> result = JSObject::SetPropertyWithInterceptor(
+ recv, name, value, attr, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -1358,15 +1148,15 @@ Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
- Handle<String> name) {
- return (FLAG_print_code_stubs && !name.is_null())
- ? GetCodeWithFlags(flags, *name->ToCString())
- : GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
+ Handle<Name> name) {
+ return (FLAG_print_code_stubs && !name.is_null() && name->IsString())
+ ? GetCodeWithFlags(flags, *Handle<String>::cast(name)->ToCString())
+ : GetCodeWithFlags(flags, NULL);
}
void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
- Handle<String> name,
+ Handle<Name> name,
LookupResult* lookup) {
holder->LocalLookupRealNamedProperty(*name, lookup);
if (lookup->IsFound()) return;
@@ -1375,49 +1165,522 @@ void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
}
-Handle<Code> LoadStubCompiler::GetCode(Code::StubType type,
- Handle<String> name) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
+#define __ ACCESS_MASM(masm())
+
+
+Register LoadStubCompiler::HandlerFrontendHeader(
+ Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* miss) {
+ return CheckPrototypes(object, object_reg, holder,
+ scratch1(), scratch2(), scratch3(),
+ name, miss, SKIP_RECEIVER);
+}
+
+
+// HandlerFrontend for store uses the name register. It has to be restored
+// before a miss.
+Register StoreStubCompiler::HandlerFrontendHeader(
+ Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* miss) {
+ return CheckPrototypes(object, object_reg, holder,
+ this->name(), scratch1(), scratch2(),
+ name, miss, SKIP_RECEIVER);
+}
+
+
+Register BaseLoadStoreStubCompiler::HandlerFrontend(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* success) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+
+ HandlerFrontendFooter(name, success, &miss);
+ return reg;
+}
+
+
+void LoadStubCompiler::NonexistentHandlerFrontend(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<Name> name,
+ Label* success,
+ Handle<JSGlobalObject> global) {
+ Label miss;
+
+ Register holder =
+ HandlerFrontendHeader(object, receiver(), last, name, &miss);
+
+ if (!last->HasFastProperties() &&
+ !last->IsJSGlobalObject() &&
+ !last->IsJSGlobalProxy()) {
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ ASSERT(last->property_dictionary()->FindEntry(*name) ==
+ NameDictionary::kNotFound);
+ GenerateDictionaryNegativeLookup(masm(), &miss, holder, name,
+ scratch2(), scratch3());
+ }
+
+ // If the last object in the prototype chain is a global object,
+ // check that the global property cell is empty.
+ if (!global.is_null()) {
+ GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
+ }
+
+ HandlerFrontendFooter(name, success, &miss);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadField(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ PropertyIndex field,
+ Representation representation) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(object, receiver(), holder, name, &miss);
+
+ GenerateLoadField(reg, holder, field, representation);
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FIELD, name);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadConstant(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<Object> value) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+ GenerateLoadConstant(value);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CONSTANT, name);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ Label success;
+
+ Register reg = CallbackHandlerFrontend(
+ object, receiver(), holder, name, &success, callback);
+ __ bind(&success);
+ GenerateLoadCallback(reg, callback);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ ASSERT(call_optimization.is_simple_api_call());
+ Label success;
+
+ Handle<JSFunction> callback = call_optimization.constant_function();
+ CallbackHandlerFrontend(
+ object, receiver(), holder, name, &success, callback);
+ __ bind(&success);
+ GenerateLoadCallback(call_optimization);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name) {
+ Label success;
+
+ LookupResult lookup(isolate());
+ LookupPostInterceptor(holder, name, &lookup);
+
+ Register reg = HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+ // TODO(368): Compile in the whole chain: all the interceptors in
+ // prototypes and ultimate answer.
+ GenerateLoadInterceptor(reg, object, holder, &lookup, name);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::INTERCEPTOR, name);
+}
+
+
+void LoadStubCompiler::GenerateLoadPostInterceptor(
+ Register interceptor_reg,
+ Handle<JSObject> interceptor_holder,
+ Handle<Name> name,
+ LookupResult* lookup) {
+ Label success;
+ Handle<JSObject> holder(lookup->holder());
+ if (lookup->IsField()) {
+ PropertyIndex field = lookup->GetFieldIndex();
+ if (interceptor_holder.is_identical_to(holder)) {
+ GenerateLoadField(
+ interceptor_reg, holder, field, lookup->representation());
+ } else {
+ // We found FIELD property in prototype chain of interceptor's holder.
+ // Retrieve a field from field's holder.
+ Register reg = HandlerFrontend(
+ interceptor_holder, interceptor_reg, holder, name, &success);
+ __ bind(&success);
+ GenerateLoadField(
+ reg, holder, field, lookup->representation());
+ }
+ } else {
+ // We found CALLBACKS property in prototype chain of interceptor's
+ // holder.
+ ASSERT(lookup->type() == CALLBACKS);
+ Handle<ExecutableAccessorInfo> callback(
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
+ ASSERT(callback->getter() != NULL);
+
+ Register reg = CallbackHandlerFrontend(
+ interceptor_holder, interceptor_reg, holder, name, &success, callback);
+ __ bind(&success);
+ GenerateLoadCallback(reg, callback);
+ }
+}
+
+
+Handle<Code> BaseLoadStoreStubCompiler::CompileMonomorphicIC(
+ Handle<Map> receiver_map,
+ Handle<Code> handler,
+ Handle<Name> name) {
+ MapHandleList receiver_maps(1);
+ receiver_maps.Add(receiver_map);
+ CodeHandleList handlers(1);
+ handlers.Add(handler);
+ Code::StubType type = handler->type();
+ return CompilePolymorphicIC(&receiver_maps, &handlers, name, type, PROPERTY);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<JSFunction> getter) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+
+ __ bind(&success);
+ GenerateLoadViaGetter(masm(), receiver(), getter);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreTransition(
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name) {
+ Label miss, slow;
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1(), &miss);
+
+ // Check that we are allowed to write this.
+ if (object->GetPrototype()->IsJSObject()) {
+ Handle<JSObject> holder;
+ // holder == object indicates that no property was found.
+ if (lookup->holder() != *object) {
+ holder = Handle<JSObject>(lookup->holder());
+ } else {
+ // Find the top object.
+ holder = object;
+ do {
+ holder = Handle<JSObject>(JSObject::cast(holder->GetPrototype()));
+ } while (holder->GetPrototype()->IsJSObject());
+ }
+
+ Register holder_reg =
+ HandlerFrontendHeader(object, receiver(), holder, name, &miss);
+
+ // If no property was found, and the holder (the last object in the
+ // prototype chain) is in slow mode, we need to do a negative lookup on the
+ // holder.
+ if (lookup->holder() == *object) {
+ GenerateNegativeHolderLookup(masm(), holder, holder_reg, name, &miss);
+ }
+ }
+
+ GenerateStoreTransition(masm(),
+ object,
+ lookup,
+ transition,
+ name,
+ receiver(), this->name(), value(),
+ scratch1(), scratch2(), scratch3(),
+ &miss,
+ &slow);
+
+ // Handle store cache miss.
+ GenerateRestoreName(masm(), &miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ GenerateRestoreName(masm(), &slow, name);
+ TailCallBuiltin(masm(), SlowBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::TRANSITION, name);
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name) {
+ Label miss;
+
+ HandlerFrontendHeader(object, receiver(), object, name, &miss);
+
+ // Generate store field code.
+ GenerateStoreField(masm(),
+ object,
+ lookup,
+ receiver(), this->name(), value(), scratch1(), scratch2(),
+ &miss);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FIELD, name);
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<JSFunction> setter) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+
+ __ bind(&success);
+ GenerateStoreViaSetter(masm(), setter);
+
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+ Handle<Map> receiver_map) {
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
+ Handle<Code> stub = KeyedLoadFastElementStub(
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind).GetCode(isolate());
+ __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
+ } else {
+ Handle<Code> stub =
+ KeyedLoadDictionaryElementStub().GetCode(isolate());
+ __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
+ }
+
+ TailCallBuiltin(masm(), Builtins::kKeyedLoadIC_Miss);
+
+ // Return the generated code.
+ return GetICCode(kind(), Code::NORMAL, factory()->empty_string());
+}
+
+
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+ Handle<Map> receiver_map) {
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ Handle<Code> stub;
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
+ stub = KeyedStoreFastElementStub(
+ is_jsarray,
+ elements_kind,
+ store_mode_).GetCode(isolate());
+ } else {
+ stub = KeyedStoreElementStub(is_jsarray,
+ elements_kind,
+ store_mode_).GetCode(isolate());
+ }
+
+ __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
+
+ TailCallBuiltin(masm(), Builtins::kKeyedStoreIC_Miss);
+
+ // Return the generated code.
+ return GetICCode(kind(), Code::NORMAL, factory()->empty_string());
+}
+
+
+#undef __
+
+
+void StubCompiler::TailCallBuiltin(MacroAssembler* masm, Builtins::Name name) {
+ Handle<Code> code(masm->isolate()->builtins()->builtin(name));
+ GenerateTailCall(masm, code);
+}
+
+
+void BaseLoadStoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) {
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ GDBJITInterface::CodeTag tag;
+ if (kind_ == Code::LOAD_IC) {
+ tag = GDBJITInterface::LOAD_IC;
+ } else if (kind_ == Code::KEYED_LOAD_IC) {
+ tag = GDBJITInterface::KEYED_LOAD_IC;
+ } else if (kind_ == Code::STORE_IC) {
+ tag = GDBJITInterface::STORE_IC;
+ } else {
+ tag = GDBJITInterface::KEYED_STORE_IC;
+ }
+ GDBJIT(AddCode(tag, *name, *code));
+#endif
+}
+
+
+void BaseLoadStoreStubCompiler::InitializeRegisters() {
+ if (kind_ == Code::LOAD_IC) {
+ registers_ = LoadStubCompiler::registers();
+ } else if (kind_ == Code::KEYED_LOAD_IC) {
+ registers_ = KeyedLoadStubCompiler::registers();
+ } else if (kind_ == Code::STORE_IC) {
+ registers_ = StoreStubCompiler::registers();
+ } else {
+ registers_ = KeyedStoreStubCompiler::registers();
+ }
+}
+
+
+Handle<Code> BaseLoadStoreStubCompiler::GetICCode(Code::Kind kind,
+ Code::StubType type,
+ Handle<Name> name,
+ InlineCacheState state) {
+ Code::Flags flags = Code::ComputeFlags(kind, state, extra_state(), type);
Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+ PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
+ JitEvent(name, code);
return code;
}
-Handle<Code> KeyedLoadStubCompiler::GetCode(Code::StubType type,
- Handle<String> name,
- InlineCacheState state) {
+Handle<Code> BaseLoadStoreStubCompiler::GetCode(Code::Kind kind,
+ Code::StubType type,
+ Handle<Name> name) {
+ ASSERT(type != Code::NORMAL);
Code::Flags flags = Code::ComputeFlags(
- Code::KEYED_LOAD_IC, state, Code::kNoExtraICState, type);
+ Code::HANDLER, MONOMORPHIC, extra_state(), type, kind);
Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+ PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
+ JitEvent(name, code);
return code;
}
-Handle<Code> StoreStubCompiler::GetCode(Code::StubType type,
- Handle<String> name) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::STORE_IC, type, strict_mode_);
- Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
- return code;
+void KeyedLoadStubCompiler::CompileElementHandlers(MapHandleList* receiver_maps,
+ CodeHandleList* handlers) {
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ Handle<Map> receiver_map = receiver_maps->at(i);
+ Handle<Code> cached_stub;
+
+ if ((receiver_map->instance_type() & kNotStringTag) == 0) {
+ cached_stub = isolate()->builtins()->KeyedLoadIC_String();
+ } else {
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+
+ if (IsFastElementsKind(elements_kind) ||
+ IsExternalArrayElementsKind(elements_kind)) {
+ cached_stub =
+ KeyedLoadFastElementStub(is_js_array,
+ elements_kind).GetCode(isolate());
+ } else {
+ ASSERT(elements_kind == DICTIONARY_ELEMENTS);
+ cached_stub = KeyedLoadDictionaryElementStub().GetCode(isolate());
+ }
+ }
+
+ handlers->Add(cached_stub);
+ }
}
-Handle<Code> KeyedStoreStubCompiler::GetCode(Code::StubType type,
- Handle<String> name,
- InlineCacheState state) {
- Code::ExtraICState extra_state =
- Code::ComputeExtraICState(grow_mode_, strict_mode_);
- Code::Flags flags =
- Code::ComputeFlags(Code::KEYED_STORE_IC, state, extra_state, type);
- Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElementPolymorphic(
+ MapHandleList* receiver_maps) {
+ // Collect MONOMORPHIC stubs for all |receiver_maps|.
+ CodeHandleList handlers(receiver_maps->length());
+ MapHandleList transitioned_maps(receiver_maps->length());
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ Handle<Map> receiver_map(receiver_maps->at(i));
+ Handle<Code> cached_stub;
+ Handle<Map> transitioned_map =
+ receiver_map->FindTransitionedMap(receiver_maps);
+
+ // TODO(mvstanton): The code below is doing pessimistic elements
+ // transitions. I would like to stop doing that and rely on Allocation Site
+ // Tracking to do a better job of ensuring the data types are what they need
+ // to be. Not all the elements are in place yet, pessimistic elements
+ // transitions are still important for performance.
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ if (!transitioned_map.is_null()) {
+ cached_stub = ElementsTransitionAndStoreStub(
+ elements_kind,
+ transitioned_map->elements_kind(),
+ is_js_array,
+ store_mode_).GetCode(isolate());
+ } else {
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
+ cached_stub = KeyedStoreFastElementStub(
+ is_js_array,
+ elements_kind,
+ store_mode_).GetCode(isolate());
+ } else {
+ cached_stub = KeyedStoreElementStub(
+ is_js_array,
+ elements_kind,
+ store_mode_).GetCode(isolate());
+ }
+ }
+ ASSERT(!cached_stub.is_null());
+ handlers.Add(cached_stub);
+ transitioned_maps.Add(transitioned_map);
+ }
+ Handle<Code> code =
+ CompileStorePolymorphic(receiver_maps, &handlers, &transitioned_maps);
+ isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::KEYED_STORE_POLYMORPHIC_IC_TAG, *code, 0));
return code;
}
@@ -1454,12 +1717,25 @@ bool CallStubCompiler::HasCustomCallGenerator(Handle<JSFunction> function) {
}
+bool CallStubCompiler::CanBeCached(Handle<JSFunction> function) {
+ if (function->shared()->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = function->shared()->builtin_function_id();
+#define CALL_GENERATOR_CASE(name) if (id == k##name) return false;
+ SITE_SPECIFIC_CALL_GENERATORS(CALL_GENERATOR_CASE)
+#undef CALL_GENERATOR_CASE
+ }
+
+ return true;
+}
+
+
Handle<Code> CallStubCompiler::CompileCustomCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> fname) {
+ Handle<String> fname,
+ Code::StubType type) {
ASSERT(HasCustomCallGenerator(function));
if (function->shared()->HasBuiltinFunctionId()) {
@@ -1470,7 +1746,8 @@ Handle<Code> CallStubCompiler::CompileCustomCall(
holder, \
cell, \
function, \
- fname); \
+ fname, \
+ type); \
}
CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
#undef CALL_GENERATOR_CASE
@@ -1487,13 +1764,13 @@ Handle<Code> CallStubCompiler::CompileCustomCall(
Handle<Code> CallStubCompiler::GetCode(Code::StubType type,
- Handle<String> name) {
+ Handle<Name> name) {
int argc = arguments_.immediate();
Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
- type,
extra_state_,
- cache_holder_,
- argc);
+ type,
+ argc,
+ cache_holder_);
return GetCodeWithFlags(flags, name);
}
@@ -1503,23 +1780,14 @@ Handle<Code> CallStubCompiler::GetCode(Handle<JSFunction> function) {
if (function->shared()->name()->IsString()) {
function_name = Handle<String>(String::cast(function->shared()->name()));
}
- return GetCode(Code::CONSTANT_FUNCTION, function_name);
-}
-
-
-Handle<Code> ConstructStubCompiler::GetCode() {
- Code::Flags flags = Code::ComputeFlags(Code::STUB);
- Handle<Code> code = GetCodeWithFlags(flags, "ConstructStub");
- PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, "ConstructStub"));
- GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", *code));
- return code;
+ return GetCode(Code::CONSTANT, function_name);
}
CallOptimization::CallOptimization(LookupResult* lookup) {
if (lookup->IsFound() &&
lookup->IsCacheable() &&
- lookup->type() == CONSTANT_FUNCTION) {
+ lookup->IsConstantFunction()) {
// We only optimize constant function calls.
Initialize(Handle<JSFunction>(lookup->GetConstantFunction()));
} else {
@@ -1527,6 +1795,7 @@ CallOptimization::CallOptimization(LookupResult* lookup) {
}
}
+
CallOptimization::CallOptimization(Handle<JSFunction> function) {
Initialize(function);
}
@@ -1541,6 +1810,7 @@ int CallOptimization::GetPrototypeDepthOfExpectedType(
while (!object.is_identical_to(holder)) {
if (object->IsInstanceOf(*expected_receiver_type_)) return depth;
object = Handle<JSObject>(JSObject::cast(object->GetPrototype()));
+ if (!object->map()->is_hidden_prototype()) return kInvalidProtoDepth;
++depth;
}
if (holder->IsInstanceOf(*expected_receiver_type_)) return depth;
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 005c537ab..42685b205 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -30,6 +30,7 @@
#include "allocation.h"
#include "arguments.h"
+#include "code-stubs.h"
#include "ic-inl.h"
#include "macro-assembler.h"
#include "objects.h"
@@ -47,6 +48,8 @@ namespace internal {
// invalidate the cache whenever a prototype map is changed. The stub
// validates the map chain as in the mono-morphic case.
+
+class CallOptimization;
class SmallMapList;
class StubCache;
@@ -67,138 +70,100 @@ class SCTableReference {
class StubCache {
public:
struct Entry {
- String* key;
+ Name* key;
Code* value;
Map* map;
};
void Initialize();
+ Handle<JSObject> StubHolder(Handle<JSObject> receiver,
+ Handle<JSObject> holder);
- // Computes the right stub matching. Inserts the result in the
- // cache before returning. This might compile a stub if needed.
- Handle<Code> ComputeLoadNonexistent(Handle<String> name,
- Handle<JSObject> receiver);
+ Handle<Code> FindIC(Handle<Name> name,
+ Handle<Map> stub_holder_map,
+ Code::Kind kind,
+ Code::ExtraICState extra_state = Code::kNoExtraICState);
- Handle<Code> ComputeLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- int field_index);
+ Handle<Code> FindIC(Handle<Name> name,
+ Handle<JSObject> stub_holder,
+ Code::Kind kind,
+ Code::ExtraICState extra_state = Code::kNoExtraICState);
- Handle<Code> ComputeLoadCallback(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback);
-
- Handle<Code> ComputeLoadViaGetter(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter);
-
- Handle<Code> ComputeLoadConstant(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value);
+ Handle<Code> FindHandler(Handle<Name> name,
+ Handle<JSObject> receiver,
+ Code::Kind kind,
+ StrictModeFlag strict_mode = kNonStrictMode);
- Handle<Code> ComputeLoadInterceptor(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder);
+ Handle<Code> ComputeMonomorphicIC(Handle<HeapObject> receiver,
+ Handle<Code> handler,
+ Handle<Name> name,
+ StrictModeFlag strict_mode);
- Handle<Code> ComputeLoadNormal();
+ // Computes the right stub matching. Inserts the result in the
+ // cache before returning. This might compile a stub if needed.
+ Handle<Code> ComputeLoadNonexistent(Handle<Name> name,
+ Handle<JSObject> object);
- Handle<Code> ComputeLoadGlobal(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<Code> ComputeLoadGlobal(Handle<Name> name,
+ Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
bool is_dont_delete);
// ---
- Handle<Code> ComputeKeyedLoadField(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<Code> ComputeKeyedLoadField(Handle<Name> name,
+ Handle<JSObject> object,
Handle<JSObject> holder,
- int field_index);
-
- Handle<Code> ComputeKeyedLoadCallback(Handle<String> name,
- Handle<JSObject> receiver,
+ PropertyIndex field_index,
+ Representation representation);
+
+ Handle<Code> ComputeKeyedLoadCallback(
+ Handle<Name> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<ExecutableAccessorInfo> callback);
+
+ Handle<Code> ComputeKeyedLoadCallback(
+ Handle<Name> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ const CallOptimization& call_optimization);
+
+ Handle<Code> ComputeKeyedLoadConstant(Handle<Name> name,
+ Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback);
+ Handle<Object> value);
- Handle<Code> ComputeKeyedLoadConstant(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value);
-
- Handle<Code> ComputeKeyedLoadInterceptor(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<Code> ComputeKeyedLoadInterceptor(Handle<Name> name,
+ Handle<JSObject> object,
Handle<JSObject> holder);
- Handle<Code> ComputeKeyedLoadArrayLength(Handle<String> name,
- Handle<JSArray> receiver);
-
- Handle<Code> ComputeKeyedLoadStringLength(Handle<String> name,
- Handle<String> receiver);
-
- Handle<Code> ComputeKeyedLoadFunctionPrototype(Handle<String> name,
- Handle<JSFunction> receiver);
-
- // ---
-
- Handle<Code> ComputeStoreField(Handle<String> name,
- Handle<JSObject> receiver,
- int field_index,
- Handle<Map> transition,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreNormal(StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreGlobal(Handle<String> name,
- Handle<GlobalObject> receiver,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Code> ComputeStoreGlobal(Handle<Name> name,
+ Handle<GlobalObject> object,
+ Handle<PropertyCell> cell,
+ Handle<Object> value,
StrictModeFlag strict_mode);
- Handle<Code> ComputeStoreCallback(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreViaSetter(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreInterceptor(Handle<String> name,
- Handle<JSObject> receiver,
- StrictModeFlag strict_mode);
-
- // ---
-
- Handle<Code> ComputeKeyedStoreField(Handle<String> name,
- Handle<JSObject> receiver,
- int field_index,
- Handle<Map> transition,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeKeyedLoadOrStoreElement(Handle<Map> receiver_map,
- KeyedIC::StubKind stub_kind,
- StrictModeFlag strict_mode);
+ Handle<Code> ComputeKeyedLoadElement(Handle<Map> receiver_map);
- // ---
+ Handle<Code> ComputeKeyedStoreElement(Handle<Map> receiver_map,
+ StrictModeFlag strict_mode,
+ KeyedAccessStoreMode store_mode);
Handle<Code> ComputeCallField(int argc,
Code::Kind,
Code::ExtraICState extra_state,
- Handle<String> name,
+ Handle<Name> name,
Handle<Object> object,
Handle<JSObject> holder,
- int index);
+ PropertyIndex index);
Handle<Code> ComputeCallConstant(int argc,
Code::Kind,
Code::ExtraICState extra_state,
- Handle<String> name,
+ Handle<Name> name,
Handle<Object> object,
Handle<JSObject> holder,
Handle<JSFunction> function);
@@ -206,17 +171,17 @@ class StubCache {
Handle<Code> ComputeCallInterceptor(int argc,
Code::Kind,
Code::ExtraICState extra_state,
- Handle<String> name,
+ Handle<Name> name,
Handle<Object> object,
Handle<JSObject> holder);
Handle<Code> ComputeCallGlobal(int argc,
Code::Kind,
Code::ExtraICState extra_state,
- Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<Name> name,
+ Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<JSFunction> function);
// ---
@@ -233,7 +198,7 @@ class StubCache {
Code::Kind kind,
Code::ExtraICState state);
- Handle<Code> ComputeCallArguments(int argc, Code::Kind kind);
+ Handle<Code> ComputeCallArguments(int argc);
Handle<Code> ComputeCallMegamorphic(int argc,
Code::Kind kind,
@@ -243,6 +208,24 @@ class StubCache {
Code::Kind kind,
Code::ExtraICState state);
+ // ---
+
+ Handle<Code> ComputeCompareNil(Handle<Map> receiver_map,
+ CompareNilICStub& stub);
+
+ // ---
+
+ Handle<Code> ComputeLoadElementPolymorphic(MapHandleList* receiver_maps);
+ Handle<Code> ComputeStoreElementPolymorphic(MapHandleList* receiver_maps,
+ KeyedAccessStoreMode store_mode,
+ StrictModeFlag strict_mode);
+
+ Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name,
+ StrictModeFlag strict_mode);
+
// Finds the Code object stored in the Heap::non_monomorphic_cache().
Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind);
@@ -253,14 +236,14 @@ class StubCache {
#endif
// Update cache for entry hash(name, map).
- Code* Set(String* name, Map* map, Code* code);
+ Code* Set(Name* name, Map* map, Code* code);
// Clear the lookup table (@ mark compact collection).
void Clear();
// Collect all maps that match the name and flags.
void CollectMatchingMaps(SmallMapList* types,
- String* name,
+ Handle<Name> name,
Code::Flags flags,
Handle<Context> native_context,
Zone* zone);
@@ -314,8 +297,18 @@ class StubCache {
Heap* heap() { return isolate()->heap(); }
Factory* factory() { return isolate()->factory(); }
+ // These constants describe the structure of the interceptor arguments on the
+ // stack. The arguments are pushed by the (platform-specific)
+ // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and
+ // LoadWithInterceptor.
+ static const int kInterceptorArgsNameIndex = 0;
+ static const int kInterceptorArgsInfoIndex = 1;
+ static const int kInterceptorArgsThisIndex = 2;
+ static const int kInterceptorArgsHolderIndex = 3;
+ static const int kInterceptorArgsLength = 4;
+
private:
- StubCache(Isolate* isolate, Zone* zone);
+ explicit StubCache(Isolate* isolate);
Handle<Code> ComputeCallInitialize(int argc,
RelocInfo::Mode mode,
@@ -331,12 +324,12 @@ class StubCache {
// Hash algorithm for the primary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kHeapObjectTagSize.
- static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
+ static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
// This works well because the heap object tag size and the hash
// shift are equal. Shifting down the length field to get the
// hash code would effectively throw away two bits of the hash
// code.
- STATIC_ASSERT(kHeapObjectTagSize == String::kHashShift);
+ STATIC_ASSERT(kHeapObjectTagSize == Name::kHashShift);
// Compute the hash of the name (use entire hash field).
ASSERT(name->HasHashCode());
uint32_t field = name->hash_field();
@@ -357,25 +350,25 @@ class StubCache {
// Hash algorithm for the secondary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kHeapObjectTagSize.
- static int SecondaryOffset(String* name, Code::Flags flags, int seed) {
+ static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
// Use the seed from the primary cache in the secondary cache.
- uint32_t string_low32bits =
+ uint32_t name_low32bits =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
// We always set the in_loop bit to zero when generating the lookup code
// so do it here too so the hash codes match.
uint32_t iflags =
(static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
- uint32_t key = (seed - string_low32bits) + iflags;
+ uint32_t key = (seed - name_low32bits) + iflags;
return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
}
// Compute the entry for a given offset in exactly the same way as
// we do in generated code. We generate an hash code that already
- // ends in String::kHashShift 0s. Then we multiply it so it is a multiple
+ // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple
// of sizeof(Entry). This makes it easier to avoid making mistakes
// in the hashed offset computations.
static Entry* entry(Entry* table, int offset) {
- const int multiplier = sizeof(*table) >> String::kHashShift;
+ const int multiplier = sizeof(*table) >> Name::kHashShift;
return reinterpret_cast<Entry*>(
reinterpret_cast<Address>(table) + offset * multiplier);
}
@@ -400,7 +393,6 @@ class StubCache {
// Support functions for IC stubs for callbacks.
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty);
@@ -413,6 +405,10 @@ DECLARE_RUNTIME_FUNCTION(MaybeObject*, CallInterceptorProperty);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor);
+enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER };
+enum IcCheckType { ELEMENT, PROPERTY };
+
+
// The stub compilers compile stubs for the stub cache.
class StubCompiler BASE_EMBEDDED {
public:
@@ -438,6 +434,18 @@ class StubCompiler BASE_EMBEDDED {
int index,
Register prototype);
+ // Helper function used to check that the dictionary doesn't contain
+ // the property. This function may return false negatives, so miss_label
+ // must always call a backup property check that is complete.
+ // This function is safe to call if the receiver has fast properties.
+ // Name must be unique and receiver must be a heap object.
+ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register r0,
+ Register r1);
+
// Generates prototype loading code that uses the objects from the
// context we were in when this function was called. If the context
// has changed, a jump to miss is performed. This ties the generated
@@ -452,8 +460,9 @@ class StubCompiler BASE_EMBEDDED {
static void GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
- Handle<JSObject> holder,
- int index);
+ bool inobject,
+ int index,
+ Representation representation);
static void GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
@@ -464,8 +473,7 @@ class StubCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss_label,
- bool support_wrappers);
+ Label* miss_label);
static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
@@ -473,21 +481,25 @@ class StubCompiler BASE_EMBEDDED {
Register scratch2,
Label* miss_label);
- void GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name,
- Register receiver_reg,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label);
-
- static void GenerateLoadMiss(MacroAssembler* masm,
- Code::Kind kind);
-
- static void GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm);
+ // Generate code to check that a global property cell is empty. Create
+ // the property cell at compilation time if no cell exists for the
+ // property.
+ static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss);
+
+ // Calls GenerateCheckPropertyCell for each global object in the prototype
+ // chain from object to (but not including) holder.
+ static void GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss);
+
+ static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
// Generates code that verifies that the property holder has not changed
// (checking maps of objects in the prototype chain for fast and global
@@ -510,10 +522,11 @@ class StubCompiler BASE_EMBEDDED {
Register holder_reg,
Register scratch1,
Register scratch2,
- Handle<String> name,
- Label* miss) {
+ Handle<Name> name,
+ Label* miss,
+ PrototypeCheckType check = CHECK_ALL_MAPS) {
return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1,
- scratch2, name, kInvalidProtoDepth, miss);
+ scratch2, name, kInvalidProtoDepth, miss, check);
}
Register CheckPrototypes(Handle<JSObject> object,
@@ -522,78 +535,29 @@ class StubCompiler BASE_EMBEDDED {
Register holder_reg,
Register scratch1,
Register scratch2,
- Handle<String> name,
+ Handle<Name> name,
int save_at_depth,
- Label* miss);
+ Label* miss,
+ PrototypeCheckType check = CHECK_ALL_MAPS);
protected:
Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
- Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<String> name);
+ Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
MacroAssembler* masm() { return &masm_; }
void set_failure(Failure* failure) { failure_ = failure; }
- void GenerateLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- Handle<String> name,
- Label* miss);
-
- void GenerateLoadCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss);
-
- void GenerateDictionaryLoadCallback(Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss);
-
- void GenerateLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSFunction> value,
- Handle<String> name,
- Label* miss);
-
- void GenerateLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Label* miss);
-
static void LookupPostInterceptor(Handle<JSObject> holder,
- Handle<String> name,
+ Handle<Name> name,
LookupResult* lookup);
Isolate* isolate() { return isolate_; }
Heap* heap() { return isolate()->heap(); }
Factory* factory() { return isolate()->factory(); }
+ static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code);
+
private:
Isolate* isolate_;
MacroAssembler masm_;
@@ -601,153 +565,345 @@ class StubCompiler BASE_EMBEDDED {
};
-class LoadStubCompiler: public StubCompiler {
- public:
- explicit LoadStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
+enum FrontendCheckType { PERFORM_INITIAL_CHECKS, SKIP_INITIAL_CHECKS };
- Handle<Code> CompileLoadNonexistent(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> last);
- Handle<Code> CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- int index,
- Handle<String> name);
-
- Handle<Code> CompileLoadCallback(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback);
+class BaseLoadStoreStubCompiler: public StubCompiler {
+ public:
+ BaseLoadStoreStubCompiler(Isolate* isolate, Code::Kind kind)
+ : StubCompiler(isolate), kind_(kind) {
+ InitializeRegisters();
+ }
+ virtual ~BaseLoadStoreStubCompiler() { }
+
+ Handle<Code> CompileMonomorphicIC(Handle<Map> receiver_map,
+ Handle<Code> handler,
+ Handle<Name> name);
+
+ Handle<Code> CompilePolymorphicIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check);
+
+ virtual void GenerateNameCheck(Handle<Name> name,
+ Register name_reg,
+ Label* miss) { }
+
+ static Builtins::Name MissBuiltin(Code::Kind kind) {
+ switch (kind) {
+ case Code::LOAD_IC: return Builtins::kLoadIC_Miss;
+ case Code::STORE_IC: return Builtins::kStoreIC_Miss;
+ case Code::KEYED_LOAD_IC: return Builtins::kKeyedLoadIC_Miss;
+ case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Miss;
+ default: UNREACHABLE();
+ }
+ return Builtins::kLoadIC_Miss;
+ }
- static void GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<JSFunction> getter);
+ protected:
+ virtual Register HandlerFrontendHeader(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* miss) = 0;
- Handle<Code> CompileLoadViaGetter(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter);
+ virtual void HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) = 0;
- Handle<Code> CompileLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> value,
- Handle<String> name);
+ Register HandlerFrontend(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* success);
+
+ Handle<Code> GetCode(Code::Kind kind,
+ Code::StubType type,
+ Handle<Name> name);
+
+ Handle<Code> GetICCode(Code::Kind kind,
+ Code::StubType type,
+ Handle<Name> name,
+ InlineCacheState state = MONOMORPHIC);
+ Code::Kind kind() { return kind_; }
+
+ Logger::LogEventsAndTags log_kind(Handle<Code> code) {
+ if (!code->is_inline_cache_stub()) return Logger::STUB_TAG;
+ if (kind_ == Code::LOAD_IC) {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::LOAD_IC_TAG : Logger::LOAD_POLYMORPHIC_IC_TAG;
+ } else if (kind_ == Code::KEYED_LOAD_IC) {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::KEYED_LOAD_IC_TAG : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
+ } else if (kind_ == Code::STORE_IC) {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::STORE_IC_TAG : Logger::STORE_POLYMORPHIC_IC_TAG;
+ } else {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::KEYED_STORE_IC_TAG : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG;
+ }
+ }
+ void JitEvent(Handle<Name> name, Handle<Code> code);
- Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name);
+ virtual Code::ExtraICState extra_state() { return Code::kNoExtraICState; }
+ virtual Register receiver() = 0;
+ virtual Register name() = 0;
+ virtual Register scratch1() = 0;
+ virtual Register scratch2() = 0;
+ virtual Register scratch3() = 0;
- Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
- bool is_dont_delete);
+ void InitializeRegisters();
- private:
- Handle<Code> GetCode(Code::StubType type, Handle<String> name);
+ Code::Kind kind_;
+ Register* registers_;
};
-class KeyedLoadStubCompiler: public StubCompiler {
+class LoadStubCompiler: public BaseLoadStoreStubCompiler {
public:
- explicit KeyedLoadStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
+ LoadStubCompiler(Isolate* isolate, Code::Kind kind = Code::LOAD_IC)
+ : BaseLoadStoreStubCompiler(isolate, kind) { }
+ virtual ~LoadStubCompiler() { }
- Handle<Code> CompileLoadField(Handle<String> name,
- Handle<JSObject> object,
+ Handle<Code> CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index);
+ Handle<Name> name,
+ PropertyIndex index,
+ Representation representation);
+
+ Handle<Code> CompileLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback);
- Handle<Code> CompileLoadCallback(Handle<String> name,
- Handle<JSObject> object,
+ Handle<Code> CompileLoadCallback(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback);
+ Handle<Name> name,
+ const CallOptimization& call_optimization);
- Handle<Code> CompileLoadConstant(Handle<String> name,
- Handle<JSObject> object,
+ Handle<Code> CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<JSFunction> value);
+ Handle<Name> name,
+ Handle<Object> value);
Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name);
+ Handle<Name> name);
- Handle<Code> CompileLoadArrayLength(Handle<String> name);
+ Handle<Code> CompileLoadViaGetter(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<JSFunction> getter);
- Handle<Code> CompileLoadStringLength(Handle<String> name);
+ static void GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
+ Handle<JSFunction> getter);
- Handle<Code> CompileLoadFunctionPrototype(Handle<String> name);
+ Handle<Code> CompileLoadNonexistent(Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<Name> name,
+ Handle<JSGlobalObject> global);
- Handle<Code> CompileLoadElement(Handle<Map> receiver_map);
+ Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
+ bool is_dont_delete);
+
+ static Register* registers();
- Handle<Code> CompileLoadPolymorphic(MapHandleList* receiver_maps,
- CodeHandleList* handler_ics);
+ protected:
+ virtual Register HandlerFrontendHeader(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* miss);
+
+ virtual void HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss);
+
+ Register CallbackHandlerFrontend(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* success,
+ Handle<Object> callback);
+ void NonexistentHandlerFrontend(Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<Name> name,
+ Label* success,
+ Handle<JSGlobalObject> global);
+
+ void GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation);
+ void GenerateLoadConstant(Handle<Object> value);
+ void GenerateLoadCallback(Register reg,
+ Handle<ExecutableAccessorInfo> callback);
+ void GenerateLoadCallback(const CallOptimization& call_optimization);
+ void GenerateLoadInterceptor(Register holder_reg,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ LookupResult* lookup,
+ Handle<Name> name);
+ void GenerateLoadPostInterceptor(Register reg,
+ Handle<JSObject> interceptor_holder,
+ Handle<Name> name,
+ LookupResult* lookup);
+
+ virtual Register receiver() { return registers_[0]; }
+ virtual Register name() { return registers_[1]; }
+ virtual Register scratch1() { return registers_[2]; }
+ virtual Register scratch2() { return registers_[3]; }
+ virtual Register scratch3() { return registers_[4]; }
+ Register scratch4() { return registers_[5]; }
+};
- static void GenerateLoadExternalArray(MacroAssembler* masm,
- ElementsKind elements_kind);
- static void GenerateLoadFastElement(MacroAssembler* masm);
+class KeyedLoadStubCompiler: public LoadStubCompiler {
+ public:
+ explicit KeyedLoadStubCompiler(Isolate* isolate)
+ : LoadStubCompiler(isolate, Code::KEYED_LOAD_IC) { }
+
+ Handle<Code> CompileLoadElement(Handle<Map> receiver_map);
- static void GenerateLoadFastDoubleElement(MacroAssembler* masm);
+ void CompileElementHandlers(MapHandleList* receiver_maps,
+ CodeHandleList* handlers);
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
+ protected:
+ static Register* registers();
+
private:
- Handle<Code> GetCode(Code::StubType type,
- Handle<String> name,
- InlineCacheState state = MONOMORPHIC);
+ virtual void GenerateNameCheck(Handle<Name> name,
+ Register name_reg,
+ Label* miss);
+ friend class BaseLoadStoreStubCompiler;
};
-class StoreStubCompiler: public StubCompiler {
+class StoreStubCompiler: public BaseLoadStoreStubCompiler {
public:
- StoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode)
- : StubCompiler(isolate), strict_mode_(strict_mode) { }
+ StoreStubCompiler(Isolate* isolate,
+ StrictModeFlag strict_mode,
+ Code::Kind kind = Code::STORE_IC)
+ : BaseLoadStoreStubCompiler(isolate, kind),
+ strict_mode_(strict_mode) { }
+ virtual ~StoreStubCompiler() { }
+
+ Handle<Code> CompileStoreTransition(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name);
Handle<Code> CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name);
+ LookupResult* lookup,
+ Handle<Name> name);
- Handle<Code> CompileStoreCallback(Handle<String> name,
- Handle<JSObject> receiver,
+ void GenerateNegativeHolderLookup(MacroAssembler* masm,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback);
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss);
+
+ void GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow);
+
+ void GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label);
+
+ Handle<Code> CompileStoreCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback);
+
+ Handle<Code> CompileStoreCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization);
static void GenerateStoreViaSetter(MacroAssembler* masm,
Handle<JSFunction> setter);
- Handle<Code> CompileStoreViaSetter(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<Code> CompileStoreViaSetter(Handle<JSObject> object,
Handle<JSObject> holder,
+ Handle<Name> name,
Handle<JSFunction> setter);
Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
- Handle<String> name);
+ Handle<Name> name);
- Handle<Code> CompileStoreGlobal(Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> holder,
- Handle<String> name);
+ static Builtins::Name SlowBuiltin(Code::Kind kind) {
+ switch (kind) {
+ case Code::STORE_IC: return Builtins::kStoreIC_Slow;
+ case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Slow;
+ default: UNREACHABLE();
+ }
+ return Builtins::kStoreIC_Slow;
+ }
- private:
- Handle<Code> GetCode(Code::StubType type, Handle<String> name);
+ protected:
+ virtual Register HandlerFrontendHeader(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* miss);
+
+ virtual void HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss);
+ void GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name);
+
+ virtual Register receiver() { return registers_[0]; }
+ virtual Register name() { return registers_[1]; }
+ Register value() { return registers_[2]; }
+ virtual Register scratch1() { return registers_[3]; }
+ virtual Register scratch2() { return registers_[4]; }
+ virtual Register scratch3() { return registers_[5]; }
+ StrictModeFlag strict_mode() { return strict_mode_; }
+ virtual Code::ExtraICState extra_state() { return strict_mode_; }
+
+ protected:
+ static Register* registers();
+ private:
StrictModeFlag strict_mode_;
+ friend class BaseLoadStoreStubCompiler;
};
-class KeyedStoreStubCompiler: public StubCompiler {
+class KeyedStoreStubCompiler: public StoreStubCompiler {
public:
KeyedStoreStubCompiler(Isolate* isolate,
StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode)
- : StubCompiler(isolate),
- strict_mode_(strict_mode),
- grow_mode_(grow_mode) { }
-
- Handle<Code> CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name);
+ KeyedAccessStoreMode store_mode)
+ : StoreStubCompiler(isolate, strict_mode, Code::KEYED_STORE_IC),
+ store_mode_(store_mode) { }
Handle<Code> CompileStoreElement(Handle<Map> receiver_map);
@@ -755,27 +911,26 @@ class KeyedStoreStubCompiler: public StubCompiler {
CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps);
- static void GenerateStoreFastElement(MacroAssembler* masm,
- bool is_js_array,
- ElementsKind element_kind,
- KeyedAccessGrowMode grow_mode);
-
- static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessGrowMode grow_mode);
-
- static void GenerateStoreExternalArray(MacroAssembler* masm,
- ElementsKind elements_kind);
+ Handle<Code> CompileStoreElementPolymorphic(MapHandleList* receiver_maps);
static void GenerateStoreDictionaryElement(MacroAssembler* masm);
+ protected:
+ virtual Code::ExtraICState extra_state() {
+ return Code::ComputeExtraICState(store_mode_, strict_mode());
+ }
+ static Register* registers();
+
private:
- Handle<Code> GetCode(Code::StubType type,
- Handle<String> name,
- InlineCacheState state = MONOMORPHIC);
+ Register transition_map() {
+ return registers()[3];
+ }
- StrictModeFlag strict_mode_;
- KeyedAccessGrowMode grow_mode_;
+ virtual void GenerateNameCheck(Handle<Name> name,
+ Register name_reg,
+ Label* miss);
+ KeyedAccessStoreMode store_mode_;
+ friend class BaseLoadStoreStubCompiler;
};
@@ -788,10 +943,13 @@ class KeyedStoreStubCompiler: public StubCompiler {
V(StringCharAt) \
V(StringFromCharCode) \
V(MathFloor) \
- V(MathAbs)
+ V(MathAbs) \
+ V(ArrayCode)
-class CallOptimization;
+#define SITE_SPECIFIC_CALL_GENERATORS(V) \
+ V(ArrayCode)
+
class CallStubCompiler: public StubCompiler {
public:
@@ -799,30 +957,39 @@ class CallStubCompiler: public StubCompiler {
int argc,
Code::Kind kind,
Code::ExtraICState extra_state,
- InlineCacheHolderFlag cache_holder);
+ InlineCacheHolderFlag cache_holder = OWN_MAP);
Handle<Code> CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
- Handle<String> name);
+ PropertyIndex index,
+ Handle<Name> name);
+
+ void CompileHandlerFrontend(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Label* success);
+
+ void CompileHandlerBackend(Handle<JSFunction> function);
Handle<Code> CompileCallConstant(Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSFunction> function,
- Handle<String> name,
- CheckType check);
+ Handle<Name> name,
+ CheckType check,
+ Handle<JSFunction> function);
Handle<Code> CompileCallInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name);
+ Handle<Name> name);
Handle<Code> CompileCallGlobal(Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<JSFunction> function,
- Handle<String> name);
+ Handle<Name> name);
static bool HasCustomCallGenerator(Handle<JSFunction> function);
+ static bool CanBeCached(Handle<JSFunction> function);
private:
// Compiles a custom call constant/global IC. For constant calls cell is
@@ -830,41 +997,43 @@ class CallStubCompiler: public StubCompiler {
// given function.
Handle<Code> CompileCustomCall(Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name);
+ Handle<String> name,
+ Code::StubType type);
#define DECLARE_CALL_GENERATOR(name) \
Handle<Code> Compile##name##Call(Handle<Object> object, \
Handle<JSObject> holder, \
- Handle<JSGlobalPropertyCell> cell, \
+ Handle<Cell> cell, \
Handle<JSFunction> function, \
- Handle<String> fname);
+ Handle<String> fname, \
+ Code::StubType type);
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
#undef DECLARE_CALL_GENERATOR
Handle<Code> CompileFastApiCall(const CallOptimization& optimization,
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name);
- Handle<Code> GetCode(Code::StubType type, Handle<String> name);
+ Handle<Code> GetCode(Code::StubType type, Handle<Name> name);
Handle<Code> GetCode(Handle<JSFunction> function);
const ParameterCount& arguments() { return arguments_; }
- void GenerateNameCheck(Handle<String> name, Label* miss);
+ void GenerateNameCheck(Handle<Name> name, Label* miss);
void GenerateGlobalReceiverCheck(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name,
+ Handle<Name> name,
Label* miss);
// Generates code to load the function from the cell checking that
// it still contains the same function.
- void GenerateLoadFunctionFromCell(Handle<JSGlobalPropertyCell> cell,
+ void GenerateLoadFunctionFromCell(Handle<Cell> cell,
Handle<JSFunction> function,
Label* miss);
@@ -878,17 +1047,6 @@ class CallStubCompiler: public StubCompiler {
};
-class ConstructStubCompiler: public StubCompiler {
- public:
- explicit ConstructStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
-
- Handle<Code> CompileConstructStub(Handle<JSFunction> function);
-
- private:
- Handle<Code> GetCode();
-};
-
-
// Holds information about possible function call optimizations.
class CallOptimization BASE_EMBEDDED {
public:
@@ -924,6 +1082,12 @@ class CallOptimization BASE_EMBEDDED {
int GetPrototypeDepthOfExpectedType(Handle<JSObject> object,
Handle<JSObject> holder) const;
+ bool IsCompatibleReceiver(Object* receiver) {
+ ASSERT(is_simple_api_call());
+ if (expected_receiver_type_.is_null()) return true;
+ return receiver->IsInstanceOf(*expected_receiver_type_);
+ }
+
private:
void Initialize(Handle<JSFunction> function);
diff --git a/deps/v8/src/sweeper-thread.cc b/deps/v8/src/sweeper-thread.cc
new file mode 100644
index 000000000..58c684a54
--- /dev/null
+++ b/deps/v8/src/sweeper-thread.cc
@@ -0,0 +1,108 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "sweeper-thread.h"
+
+#include "v8.h"
+
+#include "isolate.h"
+#include "v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+static const int kSweeperThreadStackSize = 64 * KB;
+
+SweeperThread::SweeperThread(Isolate* isolate)
+ : Thread(Thread::Options("v8:SweeperThread", kSweeperThreadStackSize)),
+ isolate_(isolate),
+ heap_(isolate->heap()),
+ collector_(heap_->mark_compact_collector()),
+ start_sweeping_semaphore_(0),
+ end_sweeping_semaphore_(0),
+ stop_semaphore_(0),
+ free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
+ free_list_old_pointer_space_(heap_->paged_space(OLD_POINTER_SPACE)),
+ private_free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
+ private_free_list_old_pointer_space_(
+ heap_->paged_space(OLD_POINTER_SPACE)) {
+ NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+}
+
+
+void SweeperThread::Run() {
+ Isolate::SetIsolateThreadLocals(isolate_, NULL);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+
+ while (true) {
+ start_sweeping_semaphore_.Wait();
+
+ if (Acquire_Load(&stop_thread_)) {
+ stop_semaphore_.Signal();
+ return;
+ }
+
+ collector_->SweepInParallel(heap_->old_data_space(),
+ &private_free_list_old_data_space_,
+ &free_list_old_data_space_);
+ collector_->SweepInParallel(heap_->old_pointer_space(),
+ &private_free_list_old_pointer_space_,
+ &free_list_old_pointer_space_);
+ end_sweeping_semaphore_.Signal();
+ }
+}
+
+
+intptr_t SweeperThread::StealMemory(PagedSpace* space) {
+ if (space->identity() == OLD_POINTER_SPACE) {
+ return space->free_list()->Concatenate(&free_list_old_pointer_space_);
+ } else if (space->identity() == OLD_DATA_SPACE) {
+ return space->free_list()->Concatenate(&free_list_old_data_space_);
+ }
+ return 0;
+}
+
+
+void SweeperThread::Stop() {
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+ start_sweeping_semaphore_.Signal();
+ stop_semaphore_.Wait();
+ Join();
+}
+
+
+void SweeperThread::StartSweeping() {
+ start_sweeping_semaphore_.Signal();
+}
+
+
+void SweeperThread::WaitForSweeperThread() {
+ end_sweeping_semaphore_.Wait();
+}
+} } // namespace v8::internal
diff --git a/deps/v8/src/sweeper-thread.h b/deps/v8/src/sweeper-thread.h
new file mode 100644
index 000000000..c36cfc39a
--- /dev/null
+++ b/deps/v8/src/sweeper-thread.h
@@ -0,0 +1,70 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SWEEPER_THREAD_H_
+#define V8_SWEEPER_THREAD_H_
+
+#include "atomicops.h"
+#include "flags.h"
+#include "platform.h"
+#include "v8utils.h"
+
+#include "spaces.h"
+
+#include "heap.h"
+
+namespace v8 {
+namespace internal {
+
+class SweeperThread : public Thread {
+ public:
+ explicit SweeperThread(Isolate* isolate);
+ ~SweeperThread() {}
+
+ void Run();
+ void Stop();
+ void StartSweeping();
+ void WaitForSweeperThread();
+ intptr_t StealMemory(PagedSpace* space);
+
+ private:
+ Isolate* isolate_;
+ Heap* heap_;
+ MarkCompactCollector* collector_;
+ Semaphore start_sweeping_semaphore_;
+ Semaphore end_sweeping_semaphore_;
+ Semaphore stop_semaphore_;
+ FreeList free_list_old_data_space_;
+ FreeList free_list_old_pointer_space_;
+ FreeList private_free_list_old_data_space_;
+ FreeList private_free_list_old_pointer_space_;
+ volatile AtomicWord stop_thread_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_SWEEPER_THREAD_H_
diff --git a/deps/v8/src/symbol.js b/deps/v8/src/symbol.js
new file mode 100644
index 000000000..050e7d918
--- /dev/null
+++ b/deps/v8/src/symbol.js
@@ -0,0 +1,87 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+var $Symbol = global.Symbol;
+
+// -------------------------------------------------------------------
+
+function SymbolConstructor(x) {
+ var value =
+ IS_SYMBOL(x) ? x : %CreateSymbol(IS_UNDEFINED(x) ? x : ToString(x));
+ if (%_IsConstructCall()) {
+ %_SetValueOf(this, value);
+ } else {
+ return value;
+ }
+}
+
+function SymbolGetName() {
+ var symbol = IS_SYMBOL_WRAPPER(this) ? %_ValueOf(this) : this;
+ if (!IS_SYMBOL(symbol)) {
+ throw MakeTypeError(
+ 'incompatible_method_receiver', ["Symbol.prototype.name", this]);
+ }
+ return %SymbolName(symbol);
+}
+
+function SymbolToString() {
+ throw MakeTypeError('symbol_to_string');
+}
+
+function SymbolValueOf() {
+ // NOTE: Both Symbol objects and values can enter here as
+ // 'this'. This is not as dictated by ECMA-262.
+ if (!IS_SYMBOL(this) && !IS_SYMBOL_WRAPPER(this)) {
+ throw MakeTypeError(
+ 'incompatible_method_receiver', ["Symbol.prototype.valueOf", this]);
+ }
+ return %_ValueOf(this);
+}
+
+//-------------------------------------------------------------------
+
+function SetUpSymbol() {
+ %CheckIsBootstrapping();
+
+ %SetCode($Symbol, SymbolConstructor);
+ %FunctionSetPrototype($Symbol, new $Symbol());
+ %SetProperty($Symbol.prototype, "constructor", $Symbol, DONT_ENUM);
+
+ InstallGetter($Symbol.prototype, "name", SymbolGetName);
+ InstallFunctions($Symbol.prototype, DONT_ENUM, $Array(
+ "toString", SymbolToString,
+ "valueOf", SymbolValueOf
+ ));
+}
+
+SetUpSymbol();
diff --git a/deps/v8/src/third_party/vtune/ittnotify_config.h b/deps/v8/src/third_party/vtune/ittnotify_config.h
new file mode 100644
index 000000000..412e34462
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/ittnotify_config.h
@@ -0,0 +1,484 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+ http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+ BSD LICENSE
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ITTNOTIFY_CONFIG_H_
+#define _ITTNOTIFY_CONFIG_H_
+
+/** @cond exclude_from_documentation */
+#ifndef ITT_OS_WIN
+# define ITT_OS_WIN 1
+#endif /* ITT_OS_WIN */
+
+#ifndef ITT_OS_LINUX
+# define ITT_OS_LINUX 2
+#endif /* ITT_OS_LINUX */
+
+#ifndef ITT_OS_MAC
+# define ITT_OS_MAC 3
+#endif /* ITT_OS_MAC */
+
+#ifndef ITT_OS
+# if defined WIN32 || defined _WIN32
+# define ITT_OS ITT_OS_WIN
+# elif defined( __APPLE__ ) && defined( __MACH__ )
+# define ITT_OS ITT_OS_MAC
+# else
+# define ITT_OS ITT_OS_LINUX
+# endif
+#endif /* ITT_OS */
+
+#ifndef ITT_PLATFORM_WIN
+# define ITT_PLATFORM_WIN 1
+#endif /* ITT_PLATFORM_WIN */
+
+#ifndef ITT_PLATFORM_POSIX
+# define ITT_PLATFORM_POSIX 2
+#endif /* ITT_PLATFORM_POSIX */
+
+#ifndef ITT_PLATFORM
+# if ITT_OS==ITT_OS_WIN
+# define ITT_PLATFORM ITT_PLATFORM_WIN
+# else
+# define ITT_PLATFORM ITT_PLATFORM_POSIX
+# endif /* _WIN32 */
+#endif /* ITT_PLATFORM */
+
+#if defined(_UNICODE) && !defined(UNICODE)
+#define UNICODE
+#endif
+
+#include <stddef.h>
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <tchar.h>
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <stdint.h>
+#if defined(UNICODE) || defined(_UNICODE)
+#include <wchar.h>
+#endif /* UNICODE || _UNICODE */
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#ifndef CDECL
+# if ITT_PLATFORM==ITT_PLATFORM_WIN
+# define CDECL __cdecl
+# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define CDECL /* not actual on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define CDECL __attribute__ ((cdecl))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* CDECL */
+
+#ifndef STDCALL
+# if ITT_PLATFORM==ITT_PLATFORM_WIN
+# define STDCALL __stdcall
+# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define STDCALL /* not supported on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define STDCALL __attribute__ ((stdcall))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* STDCALL */
+
+#define ITTAPI CDECL
+#define LIBITTAPI CDECL
+
+/* TODO: Temporary for compatibility! */
+#define ITTAPI_CALL CDECL
+#define LIBITTAPI_CALL CDECL
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+/* use __forceinline (VC++ specific) */
+#define ITT_INLINE __forceinline
+#define ITT_INLINE_ATTRIBUTE /* nothing */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+/*
+ * Generally, functions are not inlined unless optimization is specified.
+ * For functions declared inline, this attribute inlines the function even
+ * if no optimization level was specified.
+ */
+#ifdef __STRICT_ANSI__
+#define ITT_INLINE static
+#else /* __STRICT_ANSI__ */
+#define ITT_INLINE static inline
+#endif /* __STRICT_ANSI__ */
+#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+/** @endcond */
+
+#ifndef ITT_ARCH_IA32
+# define ITT_ARCH_IA32 1
+#endif /* ITT_ARCH_IA32 */
+
+#ifndef ITT_ARCH_IA32E
+# define ITT_ARCH_IA32E 2
+#endif /* ITT_ARCH_IA32E */
+
+#ifndef ITT_ARCH_IA64
+# define ITT_ARCH_IA64 3
+#endif /* ITT_ARCH_IA64 */
+
+#ifndef ITT_ARCH
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define ITT_ARCH ITT_ARCH_IA32E
+# elif defined _M_IA64 || defined __ia64
+# define ITT_ARCH ITT_ARCH_IA64
+# else
+# define ITT_ARCH ITT_ARCH_IA32
+# endif
+#endif
+
+#ifdef __cplusplus
+# define ITT_EXTERN_C extern "C"
+#else
+# define ITT_EXTERN_C /* nothing */
+#endif /* __cplusplus */
+
+#define ITT_TO_STR_AUX(x) #x
+#define ITT_TO_STR(x) ITT_TO_STR_AUX(x)
+
+#define __ITT_BUILD_ASSERT(expr, suffix) do { static char __itt_build_check_##suffix[(expr) ? 1 : -1]; __itt_build_check_##suffix[0] = 0; } while(0)
+#define _ITT_BUILD_ASSERT(expr, suffix) __ITT_BUILD_ASSERT((expr), suffix)
+#define ITT_BUILD_ASSERT(expr) _ITT_BUILD_ASSERT((expr), __LINE__)
+
+#define ITT_MAGIC { 0xED, 0xAB, 0xAB, 0xEC, 0x0D, 0xEE, 0xDA, 0x30 }
+
+/* Replace with snapshot date YYYYMMDD for promotion build. */
+#define API_VERSION_BUILD 20111111
+
+#ifndef API_VERSION_NUM
+#define API_VERSION_NUM 0.0.0
+#endif /* API_VERSION_NUM */
+
+#define API_VERSION "ITT-API-Version " ITT_TO_STR(API_VERSION_NUM) " (" ITT_TO_STR(API_VERSION_BUILD) ")"
+
+/* OS communication functions */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <windows.h>
+typedef HMODULE lib_t;
+typedef DWORD TIDT;
+typedef CRITICAL_SECTION mutex_t;
+#define MUTEX_INITIALIZER { 0 }
+#define strong_alias(name, aliasname) /* empty for Windows */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <dlfcn.h>
+#if defined(UNICODE) || defined(_UNICODE)
+#include <wchar.h>
+#endif /* UNICODE */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE 1 /* need for PTHREAD_MUTEX_RECURSIVE */
+#endif /* _GNU_SOURCE */
+#include <pthread.h>
+typedef void* lib_t;
+typedef pthread_t TIDT;
+typedef pthread_mutex_t mutex_t;
+#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+#define _strong_alias(name, aliasname) extern __typeof (name) aliasname __attribute__ ((alias (#name)));
+#define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_get_proc(lib, name) GetProcAddress(lib, name)
+#define __itt_mutex_init(mutex) InitializeCriticalSection(mutex)
+#define __itt_mutex_lock(mutex) EnterCriticalSection(mutex)
+#define __itt_mutex_unlock(mutex) LeaveCriticalSection(mutex)
+#define __itt_load_lib(name) LoadLibraryA(name)
+#define __itt_unload_lib(handle) FreeLibrary(handle)
+#define __itt_system_error() (int)GetLastError()
+#define __itt_fstrcmp(s1, s2) lstrcmpA(s1, s2)
+#define __itt_fstrlen(s) lstrlenA(s)
+#define __itt_fstrcpyn(s1, s2, l) lstrcpynA(s1, s2, l)
+#define __itt_fstrdup(s) _strdup(s)
+#define __itt_thread_id() GetCurrentThreadId()
+#define __itt_thread_yield() SwitchToThread()
+#ifndef ITT_SIMPLE_INIT
+ITT_INLINE int __itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE int __itt_interlocked_increment(volatile long* ptr)
+{
+ return InterlockedIncrement(ptr);
+}
+#endif /* ITT_SIMPLE_INIT */
+#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+#define __itt_get_proc(lib, name) dlsym(lib, name)
+#define __itt_mutex_init(mutex) \
+ { \
+ pthread_mutexattr_t mutex_attr; \
+ int error_code = pthread_mutexattr_init(&mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_init", error_code); \
+ error_code = pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_settype", error_code); \
+ error_code = pthread_mutex_init(mutex, &mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutex_init", error_code); \
+ error_code = pthread_mutexattr_destroy(&mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_destroy", error_code); \
+ }
+#define __itt_mutex_lock(mutex) pthread_mutex_lock(mutex)
+#define __itt_mutex_unlock(mutex) pthread_mutex_unlock(mutex)
+#define __itt_load_lib(name) dlopen(name, RTLD_LAZY)
+#define __itt_unload_lib(handle) dlclose(handle)
+#define __itt_system_error() errno
+#define __itt_fstrcmp(s1, s2) strcmp(s1, s2)
+#define __itt_fstrlen(s) strlen(s)
+#define __itt_fstrcpyn(s1, s2, l) strncpy(s1, s2, l)
+#define __itt_fstrdup(s) strdup(s)
+#define __itt_thread_id() pthread_self()
+#define __itt_thread_yield() sched_yield()
+#if ITT_ARCH==ITT_ARCH_IA64
+#ifdef __INTEL_COMPILER
+#define __TBB_machine_fetchadd4(addr, val) __fetchadd4_acq((void *)addr, val)
+#else /* __INTEL_COMPILER */
+/* TODO: Add Support for not Intel compilers for IA64 */
+#endif /* __INTEL_COMPILER */
+#else /* ITT_ARCH!=ITT_ARCH_IA64 */
+/*ITT_INLINE int __TBB_machine_fetchadd4(volatile void* ptr, long addend) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE int __TBB_machine_fetchadd4(volatile void* ptr, long addend)
+{
+ int result;
+ __asm__ __volatile__("lock\nxaddl %0,%1"
+ : "=r"(result),"=m"(*(long*)ptr)
+ : "0"(addend), "m"(*(long*)ptr)
+ : "memory");
+ return result;
+}
+*/
+#endif /* ITT_ARCH==ITT_ARCH_IA64 */
+#ifndef ITT_SIMPLE_INIT
+/*ITT_INLINE int __itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE int __itt_interlocked_increment(volatile long* ptr)
+{
+ return __TBB_machine_fetchadd4(ptr, 1) + 1;
+}
+*/
+#endif /* ITT_SIMPLE_INIT */
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+typedef enum {
+ __itt_collection_normal = 0,
+ __itt_collection_paused = 1
+} __itt_collection_state;
+
+typedef enum {
+ __itt_thread_normal = 0,
+ __itt_thread_ignored = 1
+} __itt_thread_state;
+
+#pragma pack(push, 8)
+
+typedef struct ___itt_thread_info
+{
+ const char* nameA; /*!< Copy of original name in ASCII. */
+#if defined(UNICODE) || defined(_UNICODE)
+ const wchar_t* nameW; /*!< Copy of original name in UNICODE. */
+#else /* UNICODE || _UNICODE */
+ void* nameW;
+#endif /* UNICODE || _UNICODE */
+ TIDT tid;
+ __itt_thread_state state; /*!< Thread state (paused or normal) */
+ int extra1; /*!< Reserved to the runtime */
+ void* extra2; /*!< Reserved to the runtime */
+ struct ___itt_thread_info* next;
+} __itt_thread_info;
+
+#include "ittnotify_types.h" /* For __itt_group_id definition */
+
+typedef struct ___itt_api_info_20101001
+{
+ const char* name;
+ void** func_ptr;
+ void* init_func;
+ __itt_group_id group;
+} __itt_api_info_20101001;
+
+typedef struct ___itt_api_info
+{
+ const char* name;
+ void** func_ptr;
+ void* init_func;
+ void* null_func;
+ __itt_group_id group;
+} __itt_api_info;
+
+struct ___itt_domain;
+struct ___itt_string_handle;
+
+typedef struct ___itt_global
+{
+ unsigned char magic[8];
+ unsigned long version_major;
+ unsigned long version_minor;
+ unsigned long version_build;
+ volatile long api_initialized;
+ volatile long mutex_initialized;
+ volatile long atomic_counter;
+ mutex_t mutex;
+ lib_t lib;
+ void* error_handler;
+ const char** dll_path_ptr;
+ __itt_api_info* api_list_ptr;
+ struct ___itt_global* next;
+ /* Joinable structures below */
+ __itt_thread_info* thread_list;
+ struct ___itt_domain* domain_list;
+ struct ___itt_string_handle* string_list;
+ __itt_collection_state state;
+} __itt_global;
+
+#pragma pack(pop)
+
+#define NEW_THREAD_INFO_W(gptr,h,h_tail,t,s,n) { \
+ h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \
+ if (h != NULL) { \
+ h->tid = t; \
+ h->nameA = NULL; \
+ h->nameW = n ? _wcsdup(n) : NULL; \
+ h->state = s; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->thread_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_THREAD_INFO_A(gptr,h,h_tail,t,s,n) { \
+ h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \
+ if (h != NULL) { \
+ h->tid = t; \
+ h->nameA = n ? __itt_fstrdup(n) : NULL; \
+ h->nameW = NULL; \
+ h->state = s; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->thread_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_DOMAIN_W(gptr,h,h_tail,name) { \
+ h = (__itt_domain*)malloc(sizeof(__itt_domain)); \
+ if (h != NULL) { \
+ h->flags = 0; /* domain is disabled by default */ \
+ h->nameA = NULL; \
+ h->nameW = name ? _wcsdup(name) : NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->domain_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_DOMAIN_A(gptr,h,h_tail,name) { \
+ h = (__itt_domain*)malloc(sizeof(__itt_domain)); \
+ if (h != NULL) { \
+ h->flags = 0; /* domain is disabled by default */ \
+ h->nameA = name ? __itt_fstrdup(name) : NULL; \
+ h->nameW = NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->domain_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_STRING_HANDLE_W(gptr,h,h_tail,name) { \
+ h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \
+ if (h != NULL) { \
+ h->strA = NULL; \
+ h->strW = name ? _wcsdup(name) : NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->string_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_STRING_HANDLE_A(gptr,h,h_tail,name) { \
+ h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \
+ if (h != NULL) { \
+ h->strA = name ? __itt_fstrdup(name) : NULL; \
+ h->strW = NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->string_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#endif /* _ITTNOTIFY_CONFIG_H_ */
+
diff --git a/deps/v8/src/third_party/vtune/ittnotify_types.h b/deps/v8/src/third_party/vtune/ittnotify_types.h
new file mode 100644
index 000000000..736c1f5b5
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/ittnotify_types.h
@@ -0,0 +1,113 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+ http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+ BSD LICENSE
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ITTNOTIFY_TYPES_H_
+#define _ITTNOTIFY_TYPES_H_
+
+typedef enum ___itt_group_id
+{
+ __itt_group_none = 0,
+ __itt_group_legacy = 1<<0,
+ __itt_group_control = 1<<1,
+ __itt_group_thread = 1<<2,
+ __itt_group_mark = 1<<3,
+ __itt_group_sync = 1<<4,
+ __itt_group_fsync = 1<<5,
+ __itt_group_jit = 1<<6,
+ __itt_group_model = 1<<7,
+ __itt_group_splitter_min = 1<<7,
+ __itt_group_counter = 1<<8,
+ __itt_group_frame = 1<<9,
+ __itt_group_stitch = 1<<10,
+ __itt_group_heap = 1<<11,
+ __itt_group_splitter_max = 1<<12,
+ __itt_group_structure = 1<<12,
+ __itt_group_suppress = 1<<13,
+ __itt_group_all = -1
+} __itt_group_id;
+
+#pragma pack(push, 8)
+
+typedef struct ___itt_group_list
+{
+ __itt_group_id id;
+ const char* name;
+} __itt_group_list;
+
+#pragma pack(pop)
+
+#define ITT_GROUP_LIST(varname) \
+ static __itt_group_list varname[] = { \
+ { __itt_group_all, "all" }, \
+ { __itt_group_control, "control" }, \
+ { __itt_group_thread, "thread" }, \
+ { __itt_group_mark, "mark" }, \
+ { __itt_group_sync, "sync" }, \
+ { __itt_group_fsync, "fsync" }, \
+ { __itt_group_jit, "jit" }, \
+ { __itt_group_model, "model" }, \
+ { __itt_group_counter, "counter" }, \
+ { __itt_group_frame, "frame" }, \
+ { __itt_group_stitch, "stitch" }, \
+ { __itt_group_heap, "heap" }, \
+ { __itt_group_structure, "structure" }, \
+ { __itt_group_suppress, "suppress" }, \
+ { __itt_group_none, NULL } \
+ }
+
+#endif /* _ITTNOTIFY_TYPES_H_ */
+
diff --git a/deps/v8/src/third_party/vtune/jitprofiling.cc b/deps/v8/src/third_party/vtune/jitprofiling.cc
new file mode 100644
index 000000000..b3952b321
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/jitprofiling.cc
@@ -0,0 +1,499 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+ http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+ BSD LICENSE
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include "ittnotify_config.h"
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <windows.h>
+#pragma optimize("", off)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <pthread.h>
+#include <dlfcn.h>
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <malloc.h>
+#include <stdlib.h>
+
+#include "jitprofiling.h"
+
+static const char rcsid[] = "\n@(#) $Revision: 234474 $\n";
+
+#define DLL_ENVIRONMENT_VAR "VS_PROFILER"
+
+#ifndef NEW_DLL_ENVIRONMENT_VAR
+#if ITT_ARCH==ITT_ARCH_IA32
+#define NEW_DLL_ENVIRONMENT_VAR "INTEL_JIT_PROFILER32"
+#else
+#define NEW_DLL_ENVIRONMENT_VAR "INTEL_JIT_PROFILER64"
+#endif
+#endif /* NEW_DLL_ENVIRONMENT_VAR */
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define DEFAULT_DLLNAME "JitPI.dll"
+HINSTANCE m_libHandle = NULL;
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define DEFAULT_DLLNAME "libJitPI.so"
+void* m_libHandle = NULL;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/* default location of JIT profiling agent on Android */
+#define ANDROID_JIT_AGENT_PATH "/data/intel/libittnotify.so"
+
+/* the function pointers */
+typedef unsigned int(*TPInitialize)(void);
+static TPInitialize FUNC_Initialize=NULL;
+
+typedef unsigned int(*TPNotify)(unsigned int, void*);
+static TPNotify FUNC_NotifyEvent=NULL;
+
+static iJIT_IsProfilingActiveFlags executionMode = iJIT_NOTHING_RUNNING;
+
+/* end collector dll part. */
+
+/* loadiJIT_Funcs() : this function is called just in the beginning and is responsible
+** to load the functions from BistroJavaCollector.dll
+** result:
+** on success: the functions loads, iJIT_DLL_is_missing=0, return value = 1.
+** on failure: the functions are NULL, iJIT_DLL_is_missing=1, return value = 0.
+*/
+static int loadiJIT_Funcs(void);
+
+/* global representing whether the BistroJavaCollector can't be loaded */
+static int iJIT_DLL_is_missing = 0;
+
+/* Virtual stack - the struct is used as a virtual stack for each thread.
+** Every thread initializes with a stack of size INIT_TOP_STACK.
+** Every method entry decreases from the current stack point,
+** and when a thread stack reaches its top of stack (return from the global function),
+** the top of stack and the current stack increase. Notice that when returning from a function
+** the stack pointer is the address of the function return.
+*/
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+static DWORD threadLocalStorageHandle = 0;
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+static pthread_key_t threadLocalStorageHandle = (pthread_key_t)0;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#define INIT_TOP_Stack 10000
+
+typedef struct
+{
+ unsigned int TopStack;
+ unsigned int CurrentStack;
+} ThreadStack, *pThreadStack;
+
+/* end of virtual stack. */
+
+/*
+** The function for reporting virtual-machine related events to VTune.
+** Note: when reporting iJVM_EVENT_TYPE_ENTER_NIDS, there is no need to fill in the stack_id
+** field in the iJIT_Method_NIDS structure, as VTune fills it.
+**
+** The return value in iJVM_EVENT_TYPE_ENTER_NIDS && iJVM_EVENT_TYPE_LEAVE_NIDS events
+** will be 0 in case of failure.
+** in iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED event it will be -1 if EventSpecificData == 0
+** otherwise it will be 0.
+*/
+
+ITT_EXTERN_C int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData)
+{
+ int ReturnValue;
+
+ /*******************************************************************************
+ ** This section is for debugging outside of VTune.
+ ** It creates the environment variables that indicates call graph mode.
+ ** If running outside of VTune remove the remark.
+ **
+
+ static int firstTime = 1;
+ char DoCallGraph[12] = "DoCallGraph";
+ if (firstTime)
+ {
+ firstTime = 0;
+ SetEnvironmentVariable( "BISTRO_COLLECTORS_DO_CALLGRAPH", DoCallGraph);
+ }
+
+ ** end of section.
+ *******************************************************************************/
+
+ /* initialization part - the functions have not been loaded yet. This part
+ ** will load the functions, and check if we are in Call Graph mode.
+ ** (for special treatment).
+ */
+ if (!FUNC_NotifyEvent)
+ {
+ if (iJIT_DLL_is_missing)
+ return 0;
+
+ // load the Function from the DLL
+ if (!loadiJIT_Funcs())
+ return 0;
+
+ /* Call Graph initialization. */
+ }
+
+ /* If the event is method entry/exit, check that in the current mode
+ ** VTune is allowed to receive it
+ */
+ if ((event_type == iJVM_EVENT_TYPE_ENTER_NIDS || event_type == iJVM_EVENT_TYPE_LEAVE_NIDS) &&
+ (executionMode != iJIT_CALLGRAPH_ON))
+ {
+ return 0;
+ }
+ /* This section is performed when method enter event occurs.
+ ** It updates the virtual stack, or creates it if this is the first
+ ** method entry in the thread. The stack pointer is decreased.
+ */
+ if (event_type == iJVM_EVENT_TYPE_ENTER_NIDS)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack = (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack = (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ // check for use of reserved method IDs
+ if ( ((piJIT_Method_NIDS) EventSpecificData)->method_id <= 999 )
+ return 0;
+
+ if (!threadStack)
+ {
+ // initialize the stack.
+ threadStack = (pThreadStack) calloc (sizeof(ThreadStack), 1);
+ threadStack->TopStack = INIT_TOP_Stack;
+ threadStack->CurrentStack = INIT_TOP_Stack;
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsSetValue(threadLocalStorageHandle,(void*)threadStack);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_setspecific(threadLocalStorageHandle,(void*)threadStack);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ // decrease the stack.
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id = (threadStack->CurrentStack)--;
+ }
+
+ /* This section is performed when method leave event occurs
+ ** It updates the virtual stack.
+ ** Increases the stack pointer.
+ ** If the stack pointer reached the top (left the global function)
+ ** increase the pointer and the top pointer.
+ */
+ if (event_type == iJVM_EVENT_TYPE_LEAVE_NIDS)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack = (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack = (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ // check for use of reserved method IDs
+ if ( ((piJIT_Method_NIDS) EventSpecificData)->method_id <= 999 )
+ return 0;
+
+ if (!threadStack)
+ {
+ /* Error: first report in this thread is method exit */
+ exit (1);
+ }
+
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id = ++(threadStack->CurrentStack) + 1;
+
+ if (((piJIT_Method_NIDS) EventSpecificData)->stack_id > threadStack->TopStack)
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id = (unsigned int)-1;
+ }
+
+ if (event_type == iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED)
+ {
+ // check for use of reserved method IDs
+ if ( ((piJIT_Method_Load) EventSpecificData)->method_id <= 999 )
+ return 0;
+ }
+
+ ReturnValue = (int)FUNC_NotifyEvent(event_type, EventSpecificData);
+
+ return ReturnValue;
+}
+
+ITT_EXTERN_C void JITAPI iJIT_RegisterCallbackEx(void *userdata, iJIT_ModeChangedEx NewModeCallBackFuncEx) // The new mode call back routine
+{
+ // is it already missing... or the load of functions from the DLL failed
+ if (iJIT_DLL_is_missing || !loadiJIT_Funcs())
+ {
+ NewModeCallBackFuncEx(userdata, iJIT_NO_NOTIFICATIONS); // then do not bother with notifications
+ /* Error: could not load JIT functions. */
+ return;
+ }
+ // nothing to do with the callback
+}
+
+/*
+** This function allows the user to query in which mode, if at all, VTune is running
+*/
+ITT_EXTERN_C iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive()
+{
+ if (!iJIT_DLL_is_missing)
+ {
+ loadiJIT_Funcs();
+ }
+
+ return executionMode;
+}
+#include <stdio.h>
+/* this function loads the collector dll (BistroJavaCollector) and the relevant functions.
+** on success: all functions load, iJIT_DLL_is_missing = 0, return value = 1.
+** on failure: all functions are NULL, iJIT_DLL_is_missing = 1, return value = 0.
+*/
+static int loadiJIT_Funcs()
+{
+ static int bDllWasLoaded = 0;
+ char *dllName = (char*)rcsid; // !!! Just to avoid unused code elimination !!!
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ DWORD dNameLength = 0;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ if(bDllWasLoaded)
+ {// dll was already loaded, no need to do it for the second time
+ return 1;
+ }
+
+ // Assumes that the DLL will not be found
+ iJIT_DLL_is_missing = 1;
+ FUNC_NotifyEvent = NULL;
+
+ if (m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FreeLibrary(m_libHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dlclose(m_libHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = NULL;
+ }
+
+ // try to get the dll name from the environment
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ dNameLength = GetEnvironmentVariableA(NEW_DLL_ENVIRONMENT_VAR, NULL, 0);
+ if (dNameLength)
+ {
+ DWORD envret = 0;
+ dllName = (char*)malloc(sizeof(char) * (dNameLength + 1));
+ envret = GetEnvironmentVariableA(NEW_DLL_ENVIRONMENT_VAR, dllName, dNameLength);
+ if (envret)
+ {
+ // Try to load the dll from the PATH...
+ m_libHandle = LoadLibraryExA(dllName, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
+ }
+ free(dllName);
+ } else {
+ // Try to use old VS_PROFILER variable
+ dNameLength = GetEnvironmentVariableA(DLL_ENVIRONMENT_VAR, NULL, 0);
+ if (dNameLength)
+ {
+ DWORD envret = 0;
+ dllName = (char*)malloc(sizeof(char) * (dNameLength + 1));
+ envret = GetEnvironmentVariableA(DLL_ENVIRONMENT_VAR, dllName, dNameLength);
+ if (envret)
+ {
+ // Try to load the dll from the PATH...
+ m_libHandle = LoadLibraryA(dllName);
+ }
+ free(dllName);
+ }
+ }
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dllName = getenv(NEW_DLL_ENVIRONMENT_VAR);
+ if (!dllName) {
+ dllName = getenv(DLL_ENVIRONMENT_VAR);
+ }
+#ifdef ANDROID
+ if (!dllName)
+ dllName = ANDROID_JIT_AGENT_PATH;
+#endif
+ if (dllName)
+ {
+ // Try to load the dll from the PATH...
+ m_libHandle = dlopen(dllName, RTLD_LAZY);
+ }
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ if (!m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ m_libHandle = LoadLibraryA(DEFAULT_DLLNAME);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = dlopen(DEFAULT_DLLNAME, RTLD_LAZY);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ // if the dll wasn't loaded - exit.
+ if (!m_libHandle)
+ {
+ iJIT_DLL_is_missing = 1; // don't try to initialize JIT agent the second time
+ return 0;
+ }
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FUNC_NotifyEvent = (TPNotify)GetProcAddress(m_libHandle, "NotifyEvent");
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ FUNC_NotifyEvent = reinterpret_cast<TPNotify>(reinterpret_cast<intptr_t>(dlsym(m_libHandle, "NotifyEvent")));
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (!FUNC_NotifyEvent)
+ {
+ FUNC_Initialize = NULL;
+ return 0;
+ }
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FUNC_Initialize = (TPInitialize)GetProcAddress(m_libHandle, "Initialize");
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ FUNC_Initialize = reinterpret_cast<TPInitialize>(reinterpret_cast<intptr_t>(dlsym(m_libHandle, "Initialize")));
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (!FUNC_Initialize)
+ {
+ FUNC_NotifyEvent = NULL;
+ return 0;
+ }
+
+ executionMode = (iJIT_IsProfilingActiveFlags)FUNC_Initialize();
+ if (executionMode != iJIT_SAMPLING_ON)
+ executionMode = iJIT_SAMPLING_ON;
+
+ bDllWasLoaded = 1;
+ iJIT_DLL_is_missing = 0; // DLL is ok.
+
+ /*
+ ** Call Graph mode: init the thread local storage
+ ** (need to store the virtual stack there).
+ */
+ if ( executionMode == iJIT_CALLGRAPH_ON )
+ {
+ // Allocate a thread local storage slot for the thread "stack"
+ if (!threadLocalStorageHandle)
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ threadLocalStorageHandle = TlsAlloc();
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_key_create(&threadLocalStorageHandle, NULL);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ return 1;
+}
+
+/*
+** This function should be called by the user whenever a thread ends, to free the thread
+** "virtual stack" storage
+*/
+ITT_EXTERN_C void JITAPI FinalizeThread()
+{
+ if (threadLocalStorageHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack = (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack = (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (threadStack)
+ {
+ free (threadStack);
+ threadStack = NULL;
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsSetValue (threadLocalStorageHandle, threadStack);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_setspecific(threadLocalStorageHandle, threadStack);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+ }
+}
+
+/*
+** This function should be called by the user when the process ends, to free the local
+** storage index
+*/
+ITT_EXTERN_C void JITAPI FinalizeProcess()
+{
+ if (m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FreeLibrary(m_libHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dlclose(m_libHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = NULL;
+ }
+
+ if (threadLocalStorageHandle)
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsFree (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_key_delete(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+}
+
+/*
+** This function should be called by the user for any method once.
+** The function will return a unique method ID, the user should maintain the ID for each
+** method
+*/
+ITT_EXTERN_C unsigned int JITAPI iJIT_GetNewMethodID()
+{
+ static unsigned int methodID = 0x100000;
+
+ if (methodID == 0)
+ return 0; // ERROR : this is not a valid value
+
+ return methodID++;
+}
+
diff --git a/deps/v8/src/third_party/vtune/jitprofiling.h b/deps/v8/src/third_party/vtune/jitprofiling.h
new file mode 100644
index 000000000..abd6d8ca7
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/jitprofiling.h
@@ -0,0 +1,298 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright (c) 2005-2012 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+ http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+ BSD LICENSE
+
+ Copyright (c) 2005-2012 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __JITPROFILING_H__
+#define __JITPROFILING_H__
+
+/*
+ * Various constants used by functions
+ */
+
+/* event notification */
+typedef enum iJIT_jvm_event
+{
+
+ /* shutdown */
+
+ /*
+ * Program exiting EventSpecificData NA
+ */
+ iJVM_EVENT_TYPE_SHUTDOWN = 2,
+
+ /* JIT profiling */
+
+ /*
+ * issued after method code jitted into memory but before code is executed
+ * EventSpecificData is an iJIT_Method_Load
+ */
+ iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13,
+
+ /* issued before unload. Method code will no longer be executed, but code
+ * and info are still in memory. The VTune profiler may capture method
+ * code only at this point EventSpecificData is iJIT_Method_Id
+ */
+ iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
+
+ /* Method Profiling */
+
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be entered EventSpecificData is
+ * iJIT_Method_NIDS
+ */
+ iJVM_EVENT_TYPE_ENTER_NIDS = 19,
+
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be left EventSpecificData is
+ * iJIT_Method_NIDS
+ */
+ iJVM_EVENT_TYPE_LEAVE_NIDS
+} iJIT_JVM_EVENT;
+
+typedef enum _iJIT_ModeFlags
+{
+ /* No need to Notify VTune, since VTune is not running */
+ iJIT_NO_NOTIFICATIONS = 0x0000,
+
+ /* when turned on the jit must call
+ * iJIT_NotifyEvent
+ * (
+ * iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
+ * )
+ * for all the method already jitted
+ */
+ iJIT_BE_NOTIFY_ON_LOAD = 0x0001,
+
+ /* when turned on the jit must call
+ * iJIT_NotifyEvent
+ * (
+ * iJVM_EVENT_TYPE_METHOD_UNLOAD_FINISHED,
+ * ) for all the method that are unloaded
+ */
+ iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002,
+
+ /* when turned on the jit must instrument all
+ * the currently jited code with calls on
+ * method entries
+ */
+ iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004,
+
+ /* when turned on the jit must instrument all
+ * the currently jited code with calls
+ * on method exit
+ */
+ iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008
+
+} iJIT_ModeFlags;
+
+
+ /* Flags used by iJIT_IsProfilingActive() */
+typedef enum _iJIT_IsProfilingActiveFlags
+{
+ /* No profiler is running. Currently not used */
+ iJIT_NOTHING_RUNNING = 0x0000,
+
+ /* Sampling is running. This is the default value
+ * returned by iJIT_IsProfilingActive()
+ */
+ iJIT_SAMPLING_ON = 0x0001,
+
+ /* Call Graph is running */
+ iJIT_CALLGRAPH_ON = 0x0002
+
+} iJIT_IsProfilingActiveFlags;
+
+/* Enumerator for the environment of methods*/
+typedef enum _iJDEnvironmentType
+{
+ iJDE_JittingAPI = 2
+} iJDEnvironmentType;
+
+/**********************************
+ * Data structures for the events *
+ **********************************/
+
+/* structure for the events:
+ * iJVM_EVENT_TYPE_METHOD_UNLOAD_START
+ */
+
+typedef struct _iJIT_Method_Id
+{
+ /* Id of the method (same as the one passed in
+ * the iJIT_Method_Load struct
+ */
+ unsigned int method_id;
+
+} *piJIT_Method_Id, iJIT_Method_Id;
+
+
+/* structure for the events:
+ * iJVM_EVENT_TYPE_ENTER_NIDS,
+ * iJVM_EVENT_TYPE_LEAVE_NIDS,
+ * iJVM_EVENT_TYPE_EXCEPTION_OCCURRED_NIDS
+ */
+
+typedef struct _iJIT_Method_NIDS
+{
+ /* unique method ID */
+ unsigned int method_id;
+
+ /* NOTE: no need to fill this field, it's filled by VTune */
+ unsigned int stack_id;
+
+ /* method name (just the method, without the class) */
+ char* method_name;
+} *piJIT_Method_NIDS, iJIT_Method_NIDS;
+
+/* structures for the events:
+ * iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED
+ */
+
+typedef struct _LineNumberInfo
+{
+ /* x86 Offset from the begining of the method*/
+ unsigned int Offset;
+
+ /* source line number from the begining of the source file */
+ unsigned int LineNumber;
+
+} *pLineNumberInfo, LineNumberInfo;
+
+typedef struct _iJIT_Method_Load
+{
+ /* unique method ID - can be any unique value, (except 0 - 999) */
+ unsigned int method_id;
+
+ /* method name (can be with or without the class and signature, in any case
+ * the class name will be added to it)
+ */
+ char* method_name;
+
+ /* virtual address of that method - This determines the method range for the
+ * iJVM_EVENT_TYPE_ENTER/LEAVE_METHOD_ADDR events
+ */
+ void* method_load_address;
+
+ /* Size in memory - Must be exact */
+ unsigned int method_size;
+
+ /* Line Table size in number of entries - Zero if none */
+ unsigned int line_number_size;
+
+ /* Pointer to the begining of the line numbers info array */
+ pLineNumberInfo line_number_table;
+
+ /* unique class ID */
+ unsigned int class_id;
+
+ /* class file name */
+ char* class_file_name;
+
+ /* source file name */
+ char* source_file_name;
+
+ /* bits supplied by the user for saving in the JIT file */
+ void* user_data;
+
+ /* the size of the user data buffer */
+ unsigned int user_data_size;
+
+ /* NOTE: no need to fill this field, it's filled by VTune */
+ iJDEnvironmentType env;
+
+} *piJIT_Method_Load, iJIT_Method_Load;
+
+/* API Functions */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef CDECL
+# if defined WIN32 || defined _WIN32
+# define CDECL __cdecl
+# else /* defined WIN32 || defined _WIN32 */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define CDECL /* not actual on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define CDECL __attribute__ ((cdecl))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* defined WIN32 || defined _WIN32 */
+#endif /* CDECL */
+
+#define JITAPI CDECL
+
+/* called when the settings are changed with new settings */
+typedef void (*iJIT_ModeChangedEx)(void *UserData, iJIT_ModeFlags Flags);
+
+int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData);
+
+/* The new mode call back routine */
+void JITAPI iJIT_RegisterCallbackEx(void *userdata,
+ iJIT_ModeChangedEx NewModeCallBackFuncEx);
+
+iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive(void);
+
+void JITAPI FinalizeThread(void);
+
+void JITAPI FinalizeProcess(void);
+
+unsigned int JITAPI iJIT_GetNewMethodID(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __JITPROFILING_H__ */
diff --git a/deps/v8/src/third_party/vtune/v8-vtune.h b/deps/v8/src/third_party/vtune/v8-vtune.h
new file mode 100644
index 000000000..29ea3eacd
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/v8-vtune.h
@@ -0,0 +1,69 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+ http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+ BSD LICENSE
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef V8_VTUNE_H_
+#define V8_VTUNE_H_
+
+namespace vTune {
+
+void InitializeVtuneForV8();
+
+} // namespace vTune
+
+
+#endif // V8_VTUNE_H_
+
diff --git a/deps/v8/src/third_party/vtune/v8vtune.gyp b/deps/v8/src/third_party/vtune/v8vtune.gyp
new file mode 100644
index 000000000..6adf36568
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/v8vtune.gyp
@@ -0,0 +1,59 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ },
+ 'includes': ['../../../build/toolchain.gypi', '../../../build/features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'v8_vtune',
+ 'type': 'static_library',
+ 'dependencies': [
+ '../../../tools/gyp/v8.gyp:v8',
+ ],
+ 'sources': [
+ 'ittnotify_config.h',
+ 'ittnotify_types.h',
+ 'jitprofiling.cc',
+ 'jitprofiling.h',
+ 'v8-vtune.h',
+ 'vtune-jit.cc',
+ 'vtune-jit.h',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': ['ENABLE_VTUNE_JIT_INTERFACE',],
+ 'conditions': [
+ ['OS != "win"', {
+ 'libraries': ['-ldl',],
+ }],
+ ],
+ },
+ },
+ ],
+}
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.cc b/deps/v8/src/third_party/vtune/vtune-jit.cc
new file mode 100644
index 000000000..93de7efbb
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/vtune-jit.cc
@@ -0,0 +1,283 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+ http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+ BSD LICENSE
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <string.h>
+
+#ifdef WIN32
+#include <hash_map>
+using namespace std;
+#else
+// To avoid GCC 4.4 compilation warning about hash_map being deprecated.
+#define OLD_DEPRECATED __DEPRECATED
+#undef __DEPRECATED
+#if defined (ANDROID)
+#include <hash_map>
+using namespace std;
+#else
+#include <ext/hash_map>
+using namespace __gnu_cxx;
+#endif
+#define __DEPRECATED OLD_DEPRECATED
+#endif
+
+#include <list>
+
+#include "v8-vtune.h"
+#include "vtune-jit.h"
+
+namespace vTune {
+namespace internal {
+
+
+// This class is used to record the JITted code position info for JIT
+// code profiling.
+class JITCodeLineInfo {
+ public:
+ JITCodeLineInfo() { }
+
+ void SetPosition(intptr_t pc, int pos) {
+ AddCodeLineInfo(LineNumInfo(pc, pos));
+ }
+
+ struct LineNumInfo {
+ LineNumInfo(intptr_t pc, int pos)
+ : pc_(pc), pos_(pos) { }
+
+ intptr_t pc_;
+ int pos_;
+ };
+
+ std::list<LineNumInfo>* GetLineNumInfo() {
+ return &line_num_info_;
+ }
+
+ private:
+ void AddCodeLineInfo(const LineNumInfo& line_info) {
+ line_num_info_.push_back(line_info);
+ }
+ std::list<LineNumInfo> line_num_info_;
+};
+
+struct SameCodeObjects {
+ bool operator () (void* key1, void* key2) const {
+ return key1 == key2;
+ }
+};
+
+struct HashForCodeObject {
+ uint32_t operator () (void* code) const {
+ static const uintptr_t kGoldenRatio = 2654435761u;
+ uintptr_t hash = reinterpret_cast<uintptr_t>(code);
+ return static_cast<uint32_t>(hash * kGoldenRatio);
+ }
+};
+
+#ifdef WIN32
+typedef hash_map<void*, void*> JitInfoMap;
+#else
+typedef hash_map<void*, void*, HashForCodeObject, SameCodeObjects> JitInfoMap;
+#endif
+
+static JitInfoMap* GetEntries() {
+ static JitInfoMap* entries;
+ if (entries == NULL) {
+ entries = new JitInfoMap();
+ }
+ return entries;
+}
+
+static bool IsLineInfoTagged(void* ptr) {
+ return 0 != (reinterpret_cast<intptr_t>(ptr));
+}
+
+static JITCodeLineInfo* UntagLineInfo(void* ptr) {
+ return reinterpret_cast<JITCodeLineInfo*>(
+ reinterpret_cast<intptr_t>(ptr));
+}
+
+// The parameter str is a mixed pattern which contains the
+// function name and some other info. It comes from all the
+// Logger::CodeCreateEvent(...) function. This funtion get the
+// pure function name from the input parameter.
+static char* GetFunctionNameFromMixedName(const char* str, int length) {
+ int index = 0;
+ int count = 0;
+ char* start_ptr = NULL;
+
+ while (str[index++] != ':' && (index < length)) {}
+
+ if (str[index] == '*' || str[index] == '~' ) index++;
+ if (index >= length) return NULL;
+
+ start_ptr = const_cast<char*>(str + index);
+
+ while (index < length && str[index++] != ' ') {
+ count++;
+ }
+
+ char* result = new char[count + 1];
+ memcpy(result, start_ptr, count);
+ result[count] = '\0';
+
+ return result;
+}
+
+// The JitCodeEventHandler for Vtune.
+void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
+ if (VTUNERUNNING && event != NULL) {
+ switch (event->type) {
+ case v8::JitCodeEvent::CODE_ADDED: {
+ char* temp_file_name = NULL;
+ char* temp_method_name =
+ GetFunctionNameFromMixedName(event->name.str,
+ static_cast<int>(event->name.len));
+ iJIT_Method_Load jmethod;
+ memset(&jmethod, 0, sizeof jmethod);
+ jmethod.method_id = iJIT_GetNewMethodID();
+ jmethod.method_load_address = event->code_start;
+ jmethod.method_size = static_cast<unsigned int>(event->code_len);
+ jmethod.method_name = temp_method_name;
+
+ Handle<Script> script = event->script;
+
+ if (*script != NULL) {
+ // Get the source file name and set it to jmethod.source_file_name
+ if ((*script->GetScriptName())->IsString()) {
+ Handle<String> script_name = script->GetScriptName()->ToString();
+ temp_file_name = new char[script_name->Utf8Length() + 1];
+ script_name->WriteUtf8(temp_file_name);
+ jmethod.source_file_name = temp_file_name;
+ }
+
+ JitInfoMap::iterator entry =
+ GetEntries()->find(event->code_start);
+ if (entry != GetEntries()->end() && IsLineInfoTagged(entry->first)) {
+ JITCodeLineInfo* line_info = UntagLineInfo(entry->second);
+ // Get the line_num_info and set it to jmethod.line_number_table
+ std::list<JITCodeLineInfo::LineNumInfo>* vtunelineinfo =
+ line_info->GetLineNumInfo();
+
+ jmethod.line_number_size = (unsigned int)vtunelineinfo->size();
+ jmethod.line_number_table =
+ reinterpret_cast<LineNumberInfo*>(
+ malloc(sizeof(LineNumberInfo)*jmethod.line_number_size));
+
+ std::list<JITCodeLineInfo::LineNumInfo>::iterator Iter;
+ int index = 0;
+ for (Iter = vtunelineinfo->begin();
+ Iter != vtunelineinfo->end();
+ Iter++) {
+ jmethod.line_number_table[index].Offset =
+ static_cast<unsigned int>(Iter->pc_);
+ jmethod.line_number_table[index++].LineNumber =
+ script->GetLineNumber(Iter->pos_)+1;
+ }
+ GetEntries()->erase(event->code_start);
+ }
+ }
+
+ iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
+ reinterpret_cast<void*>(&jmethod));
+ if (temp_method_name)
+ delete []temp_method_name;
+ if (temp_file_name)
+ delete []temp_file_name;
+ break;
+ }
+ // TODO(chunyang.dai@intel.com): code_move will be supported.
+ case v8::JitCodeEvent::CODE_MOVED:
+ break;
+ // Currently the CODE_REMOVED event is not issued.
+ case v8::JitCodeEvent::CODE_REMOVED:
+ break;
+ case v8::JitCodeEvent::CODE_ADD_LINE_POS_INFO: {
+ JITCodeLineInfo* line_info =
+ reinterpret_cast<JITCodeLineInfo*>(event->user_data);
+ if (line_info != NULL) {
+ line_info->SetPosition(static_cast<intptr_t>(event->line_info.offset),
+ static_cast<int>(event->line_info.pos));
+ }
+ break;
+ }
+ case v8::JitCodeEvent::CODE_START_LINE_INFO_RECORDING: {
+ v8::JitCodeEvent* temp_event = const_cast<v8::JitCodeEvent*>(event);
+ temp_event->user_data = new JITCodeLineInfo();
+ break;
+ }
+ case v8::JitCodeEvent::CODE_END_LINE_INFO_RECORDING: {
+ GetEntries()->insert(std::pair <void*, void*>(event->code_start, event->user_data));
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ return;
+}
+
+} // namespace internal
+
+void InitializeVtuneForV8() {
+ if (v8::V8::Initialize()) {
+ v8::V8::SetFlagsFromString("--nocompact_code_space",
+ (int)strlen("--nocompact_code_space"));
+ v8::V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault,
+ vTune::internal::VTUNEJITInterface::event_handler);
+ }
+}
+
+} // namespace vTune
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.h b/deps/v8/src/third_party/vtune/vtune-jit.h
new file mode 100644
index 000000000..42b8c3da1
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/vtune-jit.h
@@ -0,0 +1,82 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+ http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+ BSD LICENSE
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef VTUNE_VTUNE_JIT_H_
+#define VTUNE_VTUNE_JIT_H_
+
+#include "jitprofiling.h"
+#include "../../../include/v8.h"
+
+#define VTUNERUNNING (iJIT_IsProfilingActive() == iJIT_SAMPLING_ON)
+
+namespace vTune {
+namespace internal {
+using namespace v8;
+class VTUNEJITInterface {
+ public:
+ static void event_handler(const v8::JitCodeEvent* event);
+
+ private:
+ //static Mutex* vtunemutex_;
+};
+
+
+} } // namespace vTune::internal
+
+
+#endif // VTUNE_VTUNE_JIT_H_
+
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index 3036e5512..992adaa77 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -99,6 +99,7 @@ namespace internal {
T(SHL, "<<", 11) \
T(SAR, ">>", 11) \
T(SHR, ">>>", 11) \
+ T(ROR, "rotate right", 11) /* only used by Crankshaft */ \
T(ADD, "+", 12) \
T(SUB, "-", 12) \
T(MUL, "*", 13) \
@@ -173,6 +174,7 @@ namespace internal {
K(EXPORT, "export", 0) \
K(IMPORT, "import", 0) \
K(LET, "let", 0) \
+ K(YIELD, "yield", 0) \
\
/* Illegal token - not able to scan. */ \
T(ILLEGAL, "ILLEGAL", 0) \
@@ -223,32 +225,45 @@ class Token {
return op == EQ || op == EQ_STRICT;
}
+ static bool IsInequalityOp(Value op) {
+ return op == NE || op == NE_STRICT;
+ }
+
+ static bool IsArithmeticCompareOp(Value op) {
+ return IsOrderedRelationalCompareOp(op) ||
+ IsEqualityOp(op) || IsInequalityOp(op);
+ }
+
static Value NegateCompareOp(Value op) {
- ASSERT(IsCompareOp(op));
+ ASSERT(IsArithmeticCompareOp(op));
switch (op) {
case EQ: return NE;
case NE: return EQ;
case EQ_STRICT: return NE_STRICT;
+ case NE_STRICT: return EQ_STRICT;
case LT: return GTE;
case GT: return LTE;
case LTE: return GT;
case GTE: return LT;
default:
+ UNREACHABLE();
return op;
}
}
- static Value InvertCompareOp(Value op) {
- ASSERT(IsCompareOp(op));
+ static Value ReverseCompareOp(Value op) {
+ ASSERT(IsArithmeticCompareOp(op));
switch (op) {
- case EQ: return NE;
- case NE: return EQ;
- case EQ_STRICT: return NE_STRICT;
+ case EQ: return EQ;
+ case NE: return NE;
+ case EQ_STRICT: return EQ_STRICT;
+ case NE_STRICT: return NE_STRICT;
case LT: return GT;
case GT: return LT;
case LTE: return GTE;
case GTE: return LTE;
default:
+ UNREACHABLE();
return op;
}
}
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index cfaa99d73..c4825fcf7 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -57,30 +57,8 @@ TransitionArray* TransitionArray::cast(Object* object) {
}
-Map* TransitionArray::elements_transition() {
- Object* transition_map = get(kElementsTransitionIndex);
- return Map::cast(transition_map);
-}
-
-
-void TransitionArray::ClearElementsTransition() {
- WRITE_FIELD(this, kElementsTransitionOffset, Smi::FromInt(0));
-}
-
-
bool TransitionArray::HasElementsTransition() {
- return IsFullTransitionArray() &&
- get(kElementsTransitionIndex) != Smi::FromInt(0);
-}
-
-
-void TransitionArray::set_elements_transition(Map* transition_map,
- WriteBarrierMode mode) {
- ASSERT(IsFullTransitionArray());
- Heap* heap = GetHeap();
- WRITE_FIELD(this, kElementsTransitionOffset, transition_map);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kElementsTransitionOffset, transition_map, mode);
+ return Search(GetHeap()->elements_transition_symbol()) != kNotFound;
}
@@ -143,19 +121,19 @@ Object** TransitionArray::GetKeySlot(int transition_number) {
}
-String* TransitionArray::GetKey(int transition_number) {
+Name* TransitionArray::GetKey(int transition_number) {
if (IsSimpleTransition()) {
Map* target = GetTarget(kSimpleTransitionIndex);
int descriptor = target->LastAdded();
- String* key = target->instance_descriptors()->GetKey(descriptor);
+ Name* key = target->instance_descriptors()->GetKey(descriptor);
return key;
}
ASSERT(transition_number < number_of_transitions());
- return String::cast(get(ToKeyIndex(transition_number)));
+ return Name::cast(get(ToKeyIndex(transition_number)));
}
-void TransitionArray::SetKey(int transition_number, String* key) {
+void TransitionArray::SetKey(int transition_number, Name* key) {
ASSERT(!IsSimpleTransition());
ASSERT(transition_number < number_of_transitions());
set(ToKeyIndex(transition_number), key);
@@ -190,9 +168,9 @@ PropertyDetails TransitionArray::GetTargetDetails(int transition_number) {
}
-int TransitionArray::Search(String* name) {
+int TransitionArray::Search(Name* name) {
if (IsSimpleTransition()) {
- String* key = GetKey(kSimpleTransitionIndex);
+ Name* key = GetKey(kSimpleTransitionIndex);
if (key->Equals(name)) return kSimpleTransitionIndex;
return kNotFound;
}
@@ -201,7 +179,7 @@ int TransitionArray::Search(String* name) {
void TransitionArray::NoIncrementalWriteBarrierSet(int transition_number,
- String* key,
+ Name* key,
Map* target) {
FixedArray::NoIncrementalWriteBarrierSet(
this, ToKeyIndex(transition_number), key);
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 56b6caf3d..9d3f03894 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -35,22 +35,21 @@ namespace v8 {
namespace internal {
-static MaybeObject* AllocateRaw(int length) {
- Heap* heap = Isolate::Current()->heap();
-
+static MaybeObject* AllocateRaw(Isolate* isolate, int length) {
// Use FixedArray to not use TransitionArray::cast on incomplete object.
FixedArray* array;
- MaybeObject* maybe_array = heap->AllocateFixedArray(length);
+ MaybeObject* maybe_array = isolate->heap()->AllocateFixedArray(length);
if (!maybe_array->To(&array)) return maybe_array;
return array;
}
-MaybeObject* TransitionArray::Allocate(int number_of_transitions) {
+MaybeObject* TransitionArray::Allocate(Isolate* isolate,
+ int number_of_transitions) {
FixedArray* array;
- MaybeObject* maybe_array = AllocateRaw(ToKeyIndex(number_of_transitions));
+ MaybeObject* maybe_array =
+ AllocateRaw(isolate, ToKeyIndex(number_of_transitions));
if (!maybe_array->To(&array)) return maybe_array;
- array->set(kElementsTransitionIndex, Smi::FromInt(0));
array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
return array;
}
@@ -65,24 +64,24 @@ void TransitionArray::NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin,
}
-static bool InsertionPointFound(String* key1, String* key2) {
+static bool InsertionPointFound(Name* key1, Name* key2) {
return key1->Hash() > key2->Hash();
}
MaybeObject* TransitionArray::NewWith(SimpleTransitionFlag flag,
- String* key,
+ Name* key,
Map* target,
Object* back_pointer) {
TransitionArray* result;
MaybeObject* maybe_result;
if (flag == SIMPLE_TRANSITION) {
- maybe_result = AllocateRaw(kSimpleTransitionSize);
+ maybe_result = AllocateRaw(target->GetIsolate(), kSimpleTransitionSize);
if (!maybe_result->To(&result)) return maybe_result;
result->set(kSimpleTransitionTarget, target);
} else {
- maybe_result = Allocate(1);
+ maybe_result = Allocate(target->GetIsolate(), 1);
if (!maybe_result->To(&result)) return maybe_result;
result->NoIncrementalWriteBarrierSet(0, key, target);
}
@@ -95,7 +94,7 @@ MaybeObject* TransitionArray::ExtendToFullTransitionArray() {
ASSERT(!IsFullTransitionArray());
int nof = number_of_transitions();
TransitionArray* result;
- MaybeObject* maybe_result = Allocate(nof);
+ MaybeObject* maybe_result = Allocate(GetIsolate(), nof);
if (!maybe_result->To(&result)) return maybe_result;
if (nof == 1) {
@@ -107,7 +106,7 @@ MaybeObject* TransitionArray::ExtendToFullTransitionArray() {
}
-MaybeObject* TransitionArray::CopyInsert(String* name, Map* target) {
+MaybeObject* TransitionArray::CopyInsert(Name* name, Map* target) {
TransitionArray* result;
int number_of_transitions = this->number_of_transitions();
@@ -117,13 +116,9 @@ MaybeObject* TransitionArray::CopyInsert(String* name, Map* target) {
if (insertion_index == kNotFound) ++new_size;
MaybeObject* maybe_array;
- maybe_array = TransitionArray::Allocate(new_size);
+ maybe_array = TransitionArray::Allocate(GetIsolate(), new_size);
if (!maybe_array->To(&result)) return maybe_array;
- if (HasElementsTransition()) {
- result->set_elements_transition(elements_transition());
- }
-
if (HasPrototypeTransitions()) {
result->SetPrototypeTransitions(GetPrototypeTransitions());
}
@@ -135,6 +130,7 @@ MaybeObject* TransitionArray::CopyInsert(String* name, Map* target) {
}
}
result->NoIncrementalWriteBarrierSet(insertion_index, name, target);
+ result->set_back_pointer_storage(back_pointer_storage());
return result;
}
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index 0a660261c..b2e983967 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -41,10 +41,10 @@ namespace internal {
// TransitionArrays are fixed arrays used to hold map transitions for property,
// constant, and element changes. They can either be simple transition arrays
// that store a single property transition, or a full transition array that has
-// space for elements transitions, prototype transitions and multiple property
-// transitons. The details related to property transitions are accessed in the
-// descriptor array of the target map. In the case of a simple transition, the
-// key is also read from the descriptor array of the target map.
+// prototype transitions and multiple property transitons. The details related
+// to property transitions are accessed in the descriptor array of the target
+// map. In the case of a simple transition, the key is also read from the
+// descriptor array of the target map.
//
// The simple format of the these objects is:
// [0] Undefined or back pointer map
@@ -52,19 +52,18 @@ namespace internal {
//
// The full format is:
// [0] Undefined or back pointer map
-// [1] Smi(0) or elements transition map
-// [2] Smi(0) or fixed array of prototype transitions
-// [3] First transition
+// [1] Smi(0) or fixed array of prototype transitions
+// [2] First transition
// [length() - kTransitionSize] Last transition
class TransitionArray: public FixedArray {
public:
// Accessors for fetching instance transition at transition number.
- inline String* GetKey(int transition_number);
- inline void SetKey(int transition_number, String* value);
+ inline Name* GetKey(int transition_number);
+ inline void SetKey(int transition_number, Name* value);
inline Object** GetKeySlot(int transition_number);
int GetSortedKeyIndex(int transition_number) { return transition_number; }
- String* GetSortedKey(int transition_number) {
+ Name* GetSortedKey(int transition_number) {
return GetKey(transition_number);
}
@@ -73,12 +72,7 @@ class TransitionArray: public FixedArray {
inline PropertyDetails GetTargetDetails(int transition_number);
- inline Map* elements_transition();
- inline void set_elements_transition(
- Map* target,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline bool HasElementsTransition();
- inline void ClearElementsTransition();
inline Object* back_pointer_storage();
inline void set_back_pointer_storage(
@@ -105,7 +99,7 @@ class TransitionArray: public FixedArray {
// Allocate a new transition array with a single entry.
static MUST_USE_RESULT MaybeObject* NewWith(
SimpleTransitionFlag flag,
- String* key,
+ Name* key,
Map* target,
Object* back_pointer);
@@ -114,7 +108,7 @@ class TransitionArray: public FixedArray {
// Copy the transition array, inserting a new transition.
// TODO(verwaest): This should not cause an existing transition to be
// overwritten.
- MUST_USE_RESULT MaybeObject* CopyInsert(String* name, Map* target);
+ MUST_USE_RESULT MaybeObject* CopyInsert(Name* name, Map* target);
// Copy a single transition from the origin array.
inline void NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin,
@@ -122,13 +116,27 @@ class TransitionArray: public FixedArray {
int target_transition);
// Search a transition for a given property name.
- inline int Search(String* name);
+ inline int Search(Name* name);
// Allocates a TransitionArray.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_transitions);
+ MUST_USE_RESULT static MaybeObject* Allocate(
+ Isolate* isolate, int number_of_transitions);
+
+ bool IsSimpleTransition() {
+ return length() == kSimpleTransitionSize &&
+ get(kSimpleTransitionTarget)->IsHeapObject() &&
+ // The IntrusivePrototypeTransitionIterator may have set the map of the
+ // prototype transitions array to a smi. In that case, there are
+ // prototype transitions, hence this transition array is a full
+ // transition array.
+ HeapObject::cast(get(kSimpleTransitionTarget))->map()->IsMap() &&
+ get(kSimpleTransitionTarget)->IsMap();
+ }
- bool IsSimpleTransition() { return length() == kSimpleTransitionSize; }
- bool IsFullTransitionArray() { return length() >= kFirstIndex; }
+ bool IsFullTransitionArray() {
+ return length() > kFirstIndex ||
+ (length() == kFirstIndex && !IsSimpleTransition());
+ }
// Casting.
static inline TransitionArray* cast(Object* obj);
@@ -139,9 +147,8 @@ class TransitionArray: public FixedArray {
static const int kBackPointerStorageIndex = 0;
// Layout for full transition arrays.
- static const int kElementsTransitionIndex = 1;
- static const int kPrototypeTransitionsIndex = 2;
- static const int kFirstIndex = 3;
+ static const int kPrototypeTransitionsIndex = 1;
+ static const int kFirstIndex = 2;
// Layout for simple transition arrays.
static const int kSimpleTransitionTarget = 1;
@@ -152,9 +159,7 @@ class TransitionArray: public FixedArray {
static const int kBackPointerStorageOffset = FixedArray::kHeaderSize;
// Layout for the full transition array header.
- static const int kElementsTransitionOffset = kBackPointerStorageOffset +
- kPointerSize;
- static const int kPrototypeTransitionsOffset = kElementsTransitionOffset +
+ static const int kPrototypeTransitionsOffset = kBackPointerStorageOffset +
kPointerSize;
// Layout of map transition entries in full transition arrays.
@@ -195,7 +200,7 @@ class TransitionArray: public FixedArray {
}
inline void NoIncrementalWriteBarrierSet(int transition_number,
- String* key,
+ Name* key,
Map* target);
DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionArray);
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index bc6a46b4b..65d136405 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -42,32 +42,29 @@ namespace v8 {
namespace internal {
-TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) {
- TypeInfo info;
+TypeInfo TypeInfo::FromValue(Handle<Object> value) {
if (value->IsSmi()) {
- info = TypeInfo::Smi();
+ return TypeInfo::Smi();
} else if (value->IsHeapNumber()) {
- info = TypeInfo::IsInt32Double(HeapNumber::cast(*value)->value())
+ return TypeInfo::IsInt32Double(HeapNumber::cast(*value)->value())
? TypeInfo::Integer32()
: TypeInfo::Double();
} else if (value->IsString()) {
- info = TypeInfo::String();
- } else {
- info = TypeInfo::Unknown();
+ return TypeInfo::String();
}
- return info;
+ return TypeInfo::Unknown();
}
TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
Handle<Context> native_context,
Isolate* isolate,
- Zone* zone) {
- native_context_ = native_context;
- isolate_ = isolate;
- zone_ = zone;
+ Zone* zone)
+ : native_context_(native_context),
+ isolate_(isolate),
+ zone_(zone) {
BuildDictionary(code);
- ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
+ ASSERT(dictionary_->IsDictionary());
}
@@ -78,9 +75,27 @@ static uint32_t IdToKey(TypeFeedbackId ast_id) {
Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
int entry = dictionary_->FindEntry(IdToKey(ast_id));
- return entry != UnseededNumberDictionary::kNotFound
- ? Handle<Object>(dictionary_->ValueAt(entry))
- : Handle<Object>::cast(isolate_->factory()->undefined_value());
+ if (entry != UnseededNumberDictionary::kNotFound) {
+ Object* value = dictionary_->ValueAt(entry);
+ if (value->IsCell()) {
+ Cell* cell = Cell::cast(value);
+ return Handle<Object>(cell->value(), isolate_);
+ } else {
+ return Handle<Object>(value, isolate_);
+ }
+ }
+ return Handle<Object>::cast(isolate_->factory()->undefined_value());
+}
+
+
+Handle<Cell> TypeFeedbackOracle::GetInfoCell(
+ TypeFeedbackId ast_id) {
+ int entry = dictionary_->FindEntry(IdToKey(ast_id));
+ if (entry != UnseededNumberDictionary::kNotFound) {
+ Cell* cell = Cell::cast(dictionary_->ValueAt(entry));
+ return Handle<Cell>(cell, isolate_);
+ }
+ return Handle<Cell>::null();
}
@@ -105,59 +120,78 @@ bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
if (!preliminary_checks) return false;
Map* map = code->FindFirstMap();
+ if (map == NULL) return false;
+ map = map->CurrentMapForDeprecated();
return map != NULL && !CanRetainOtherContext(map, *native_context_);
}
return false;
}
-bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
+bool TypeFeedbackOracle::LoadIsPreMonomorphic(Property* expr) {
Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- Builtins* builtins = isolate_->builtins();
- return code->is_keyed_load_stub() &&
- *code != builtins->builtin(Builtins::kKeyedLoadIC_Generic) &&
- code->ic_state() == MEGAMORPHIC;
+ return code->is_inline_cache_stub() && code->ic_state() == PREMONOMORPHIC;
}
return false;
}
+bool TypeFeedbackOracle::LoadIsPolymorphic(Property* expr) {
+ Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
+ if (map_or_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ return code->is_keyed_load_stub() && code->ic_state() == POLYMORPHIC;
+ }
+ return false;
+}
+
+
+bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
+ Handle<Object> map_or_code = GetInfo(ast_id);
+ if (map_or_code->IsMap()) return false;
+ if (!map_or_code->IsCode()) return false;
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ return code->ic_state() == UNINITIALIZED;
+}
+
+
bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- bool allow_growth =
- Code::GetKeyedAccessGrowMode(code->extra_ic_state()) ==
- ALLOW_JSARRAY_GROWTH;
bool preliminary_checks =
code->is_keyed_store_stub() &&
- !allow_growth &&
code->ic_state() == MONOMORPHIC &&
Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
if (!preliminary_checks) return false;
Map* map = code->FindFirstMap();
+ if (map == NULL) return false;
+ map = map->CurrentMapForDeprecated();
return map != NULL && !CanRetainOtherContext(map, *native_context_);
}
return false;
}
-bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(TypeFeedbackId ast_id) {
+bool TypeFeedbackOracle::StoreIsPreMonomorphic(TypeFeedbackId ast_id) {
+ Handle<Object> map_or_code = GetInfo(ast_id);
+ if (map_or_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ return code->ic_state() == PREMONOMORPHIC;
+ }
+ return false;
+}
+
+
+bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- Builtins* builtins = isolate_->builtins();
- bool allow_growth =
- Code::GetKeyedAccessGrowMode(code->extra_ic_state()) ==
- ALLOW_JSARRAY_GROWTH;
return code->is_keyed_store_stub() &&
- !allow_growth &&
- *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) &&
- *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) &&
- code->ic_state() == MEGAMORPHIC;
+ code->ic_state() == POLYMORPHIC;
}
return false;
}
@@ -165,13 +199,14 @@ bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(TypeFeedbackId ast_id) {
bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
Handle<Object> value = GetInfo(expr->CallFeedbackId());
- return value->IsMap() || value->IsSmi() || value->IsJSFunction();
+ return value->IsMap() || value->IsAllocationSite() || value->IsJSFunction() ||
+ value->IsSmi();
}
bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) {
- Handle<Object> value = GetInfo(expr->CallNewFeedbackId());
- return value->IsJSFunction();
+ Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
+ return info->IsAllocationSite() || info->IsJSFunction();
}
@@ -182,10 +217,11 @@ bool TypeFeedbackOracle::ObjectLiteralStoreIsMonomorphic(
}
-bool TypeFeedbackOracle::IsForInFastCase(ForInStatement* stmt) {
+byte TypeFeedbackOracle::ForInType(ForInStatement* stmt) {
Handle<Object> value = GetInfo(stmt->ForInFeedbackId());
return value->IsSmi() &&
- Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker;
+ Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker
+ ? ForInStatement::FAST_FOR_IN : ForInStatement::SLOW_FOR_IN;
}
@@ -194,11 +230,10 @@ Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- Map* first_map = code->FindFirstMap();
- ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *native_context_)
+ Map* map = code->FindFirstMap()->CurrentMapForDeprecated();
+ return map == NULL || CanRetainOtherContext(map, *native_context_)
? Handle<Map>::null()
- : Handle<Map>(first_map);
+ : Handle<Map>(map);
}
return Handle<Map>::cast(map_or_code);
}
@@ -210,21 +245,34 @@ Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(
Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- Map* first_map = code->FindFirstMap();
- ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *native_context_)
+ Map* map = code->FindFirstMap()->CurrentMapForDeprecated();
+ return map == NULL || CanRetainOtherContext(map, *native_context_)
? Handle<Map>::null()
- : Handle<Map>(first_map);
+ : Handle<Map>(map);
}
return Handle<Map>::cast(map_or_code);
}
+KeyedAccessStoreMode TypeFeedbackOracle::GetStoreMode(
+ TypeFeedbackId ast_id) {
+ Handle<Object> map_or_code = GetInfo(ast_id);
+ if (map_or_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ if (code->kind() == Code::KEYED_STORE_IC) {
+ return Code::GetKeyedAccessStoreMode(code->extra_ic_state());
+ }
+ }
+ return STANDARD_STORE;
+}
+
+
void TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
Handle<String> name,
SmallMapList* types) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NORMAL);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
+ Code::NORMAL, Code::LOAD_IC);
CollectReceiverTypes(expr->PropertyFeedbackId(), name, flags, types);
}
@@ -232,8 +280,9 @@ void TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
void TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
Handle<String> name,
SmallMapList* types) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::STORE_IC, Code::NORMAL);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
+ Code::NORMAL, Code::STORE_IC);
CollectReceiverTypes(expr->AssignmentFeedbackId(), name, flags, types);
}
@@ -250,10 +299,10 @@ void TypeFeedbackOracle::CallReceiverTypes(Call* expr,
CallIC::Contextual::encode(call_kind == CALL_AS_FUNCTION);
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
- Code::NORMAL,
extra_ic_state,
- OWN_MAP,
- arity);
+ Code::NORMAL,
+ arity,
+ OWN_MAP);
CollectReceiverTypes(expr->CallFeedbackId(), name, flags, types);
}
@@ -267,35 +316,28 @@ CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
}
-Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
- CheckType check) {
- JSFunction* function = NULL;
- switch (check) {
- case RECEIVER_MAP_CHECK:
- UNREACHABLE();
- break;
- case STRING_CHECK:
- function = native_context_->string_function();
- break;
- case NUMBER_CHECK:
- function = native_context_->number_function();
- break;
- case BOOLEAN_CHECK:
- function = native_context_->boolean_function();
- break;
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
+ Handle<Object> info = GetInfo(expr->CallFeedbackId());
+ if (info->IsAllocationSite()) {
+ return Handle<JSFunction>(isolate_->global_context()->array_function());
+ } else {
+ return Handle<JSFunction>::cast(info);
}
- ASSERT(function != NULL);
- return Handle<JSObject>(JSObject::cast(function->instance_prototype()));
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
- return Handle<JSFunction>::cast(GetInfo(expr->CallFeedbackId()));
+Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
+ Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
+ if (info->IsAllocationSite()) {
+ return Handle<JSFunction>(isolate_->global_context()->array_function());
+ } else {
+ return Handle<JSFunction>::cast(info);
+ }
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
- return Handle<JSFunction>::cast(GetInfo(expr->CallNewFeedbackId()));
+Handle<Cell> TypeFeedbackOracle::GetCallNewAllocationInfoCell(CallNew* expr) {
+ return GetInfoCell(expr->CallNewFeedbackId());
}
@@ -312,192 +354,114 @@ bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
}
-TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return unknown;
-
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
- switch (state) {
- case CompareIC::UNINITIALIZED:
- // Uninitialized means never executed.
- return TypeInfo::Uninitialized();
- case CompareIC::SMIS:
- return TypeInfo::Smi();
- case CompareIC::HEAP_NUMBERS:
- return TypeInfo::Number();
- case CompareIC::SYMBOLS:
- case CompareIC::STRINGS:
- return TypeInfo::String();
- case CompareIC::OBJECTS:
- case CompareIC::KNOWN_OBJECTS:
- // TODO(kasperl): We really need a type for JS objects here.
- return TypeInfo::NonPrimitive();
- case CompareIC::GENERIC:
- default:
- return unknown;
- }
-}
-
-
-bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
+bool TypeFeedbackOracle::LoadIsStub(Property* expr, ICStub* stub) {
+ Handle<Object> object = GetInfo(expr->PropertyFeedbackId());
if (!object->IsCode()) return false;
Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return false;
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
- return state == CompareIC::SYMBOLS;
+ if (!code->is_load_stub()) return false;
+ if (code->ic_state() != MONOMORPHIC) return false;
+ return stub->Describes(*code);
}
-Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
- if (!object->IsCode()) return Handle<Map>::null();
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return Handle<Map>::null();
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
- if (state != CompareIC::KNOWN_OBJECTS) {
- return Handle<Map>::null();
+void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
+ Handle<Type>* left_type,
+ Handle<Type>* right_type,
+ Handle<Type>* combined_type) {
+ Handle<Object> info = GetInfo(id);
+ if (!info->IsCode()) {
+ // For some comparisons we don't have ICs, e.g. LiteralCompareTypeof.
+ *left_type = *right_type = *combined_type = handle(Type::None(), isolate_);
+ return;
+ }
+ Handle<Code> code = Handle<Code>::cast(info);
+
+ Handle<Map> map;
+ Map* raw_map = code->FindFirstMap();
+ if (raw_map != NULL) {
+ raw_map = raw_map->CurrentMapForDeprecated();
+ if (raw_map != NULL && !CanRetainOtherContext(raw_map, *native_context_)) {
+ map = handle(raw_map, isolate_);
+ }
}
- Map* first_map = code->FindFirstMap();
- ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *native_context_)
- ? Handle<Map>::null()
- : Handle<Map>(first_map);
-}
-
-TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
- Handle<Object> object = GetInfo(expr->UnaryOperationFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- ASSERT(code->is_unary_op_stub());
- UnaryOpIC::TypeInfo type = static_cast<UnaryOpIC::TypeInfo>(
- code->unary_op_type());
- switch (type) {
- case UnaryOpIC::SMI:
- return TypeInfo::Smi();
- case UnaryOpIC::HEAP_NUMBER:
- return TypeInfo::Double();
- default:
- return unknown;
+ if (code->is_compare_ic_stub()) {
+ int stub_minor_key = code->stub_info();
+ CompareIC::StubInfoToType(
+ stub_minor_key, left_type, right_type, combined_type, map, isolate());
+ } else if (code->is_compare_nil_ic_stub()) {
+ CompareNilICStub stub(code->extended_extra_ic_state());
+ *combined_type = stub.GetType(isolate_, map);
+ *left_type = *right_type = stub.GetInputType(isolate_, map);
}
}
-TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
- Handle<Object> object = GetInfo(expr->BinaryOperationFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- if (code->is_binary_op_stub()) {
- BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
- code->binary_op_type());
- BinaryOpIC::TypeInfo result_type = static_cast<BinaryOpIC::TypeInfo>(
- code->binary_op_result_type());
-
- switch (type) {
- case BinaryOpIC::UNINITIALIZED:
- // Uninitialized means never executed.
- return TypeInfo::Uninitialized();
- case BinaryOpIC::SMI:
- switch (result_type) {
- case BinaryOpIC::UNINITIALIZED:
- if (expr->op() == Token::DIV) {
- return TypeInfo::Double();
- }
- return TypeInfo::Smi();
- case BinaryOpIC::SMI:
- return TypeInfo::Smi();
- case BinaryOpIC::INT32:
- return TypeInfo::Integer32();
- case BinaryOpIC::HEAP_NUMBER:
- return TypeInfo::Double();
- default:
- return unknown;
- }
- case BinaryOpIC::INT32:
- if (expr->op() == Token::DIV ||
- result_type == BinaryOpIC::HEAP_NUMBER) {
- return TypeInfo::Double();
- }
- return TypeInfo::Integer32();
- case BinaryOpIC::HEAP_NUMBER:
- return TypeInfo::Double();
- case BinaryOpIC::BOTH_STRING:
- return TypeInfo::String();
- case BinaryOpIC::STRING:
- case BinaryOpIC::GENERIC:
- return unknown;
- default:
- return unknown;
- }
+void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
+ Handle<Type>* left,
+ Handle<Type>* right,
+ Handle<Type>* result,
+ Maybe<int>* fixed_right_arg,
+ Token::Value operation) {
+ Handle<Object> object = GetInfo(id);
+ if (!object->IsCode()) {
+ // For some binary ops we don't have ICs, e.g. Token::COMMA, but for the
+ // operations covered by the BinaryOpStub we should always have them.
+ ASSERT(!(operation >= BinaryOpStub::FIRST_TOKEN &&
+ operation <= BinaryOpStub::LAST_TOKEN));
+ *left = *right = *result = handle(Type::None(), isolate_);
+ return;
}
- return unknown;
+ Handle<Code> code = Handle<Code>::cast(object);
+ ASSERT(code->is_binary_op_stub());
+
+ BinaryOpStub stub(code->extended_extra_ic_state());
+
+ // Sanity check.
+ ASSERT(stub.operation() == operation);
+
+ *left = stub.GetLeftType(isolate());
+ *right = stub.GetRightType(isolate());
+ *result = stub.GetResultType(isolate());
+ *fixed_right_arg = stub.fixed_right_arg();
}
-TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
- Handle<Object> object = GetInfo(clause->CompareId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return unknown;
-
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
- switch (state) {
- case CompareIC::UNINITIALIZED:
- // Uninitialized means never executed.
- // TODO(fschneider): Introduce a separate value for never-executed ICs.
- return unknown;
- case CompareIC::SMIS:
- return TypeInfo::Smi();
- case CompareIC::STRINGS:
- return TypeInfo::String();
- case CompareIC::SYMBOLS:
- return TypeInfo::Symbol();
- case CompareIC::HEAP_NUMBERS:
- return TypeInfo::Number();
- case CompareIC::OBJECTS:
- case CompareIC::KNOWN_OBJECTS:
- // TODO(kasperl): We really need a type for JS objects here.
- return TypeInfo::NonPrimitive();
- case CompareIC::GENERIC:
- default:
- return unknown;
+Handle<Type> TypeFeedbackOracle::ClauseType(TypeFeedbackId id) {
+ Handle<Object> info = GetInfo(id);
+ Handle<Type> result(Type::None(), isolate_);
+ if (info->IsCode() && Handle<Code>::cast(info)->is_compare_ic_stub()) {
+ Handle<Code> code = Handle<Code>::cast(info);
+ CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
+ result = CompareIC::StateToType(isolate_, state);
}
+ return result;
}
-TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
+Handle<Type> TypeFeedbackOracle::IncrementType(CountOperation* expr) {
Handle<Object> object = GetInfo(expr->CountBinOpFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
+ Handle<Type> unknown(Type::None(), isolate_);
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_binary_op_stub()) return unknown;
- BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
- code->binary_op_type());
- switch (type) {
- case BinaryOpIC::UNINITIALIZED:
- case BinaryOpIC::SMI:
- return TypeInfo::Smi();
- case BinaryOpIC::INT32:
- return TypeInfo::Integer32();
- case BinaryOpIC::HEAP_NUMBER:
- return TypeInfo::Double();
- case BinaryOpIC::BOTH_STRING:
- case BinaryOpIC::STRING:
- case BinaryOpIC::GENERIC:
- return unknown;
- default:
- return unknown;
+ BinaryOpStub stub(code->extended_extra_ic_state());
+ return stub.GetLeftType(isolate());
+}
+
+
+void TypeFeedbackOracle::CollectPolymorphicMaps(Handle<Code> code,
+ SmallMapList* types) {
+ MapHandleList maps;
+ code->FindAllMaps(&maps);
+ types->Reserve(maps.length(), zone());
+ for (int i = 0; i < maps.length(); i++) {
+ Handle<Map> map(maps.at(i));
+ if (!CanRetainOtherContext(*map, *native_context_)) {
+ types->AddMapIfMissing(map, zone());
+ }
}
- UNREACHABLE();
- return unknown;
}
@@ -508,19 +472,21 @@ void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
Handle<Object> object = GetInfo(ast_id);
if (object->IsUndefined() || object->IsSmi()) return;
- if (*object ==
- isolate_->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
+ if (object.is_identical_to(isolate_->builtins()->StoreIC_GlobalProxy())) {
// TODO(fschneider): We could collect the maps and signal that
// we need a generic store (or load) here.
- ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
+ ASSERT(Handle<Code>::cast(object)->ic_state() == GENERIC);
} else if (object->IsMap()) {
- types->Add(Handle<Map>::cast(object), zone());
+ types->AddMapIfMissing(Handle<Map>::cast(object), zone());
+ } else if (Handle<Code>::cast(object)->ic_state() == POLYMORPHIC ||
+ Handle<Code>::cast(object)->ic_state() == MONOMORPHIC) {
+ CollectPolymorphicMaps(Handle<Code>::cast(object), types);
} else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
types->Reserve(4, zone());
ASSERT(object->IsCode());
isolate_->stub_cache()->CollectMatchingMaps(types,
- *name,
+ name,
flags,
native_context_,
zone());
@@ -562,15 +528,6 @@ bool TypeFeedbackOracle::CanRetainOtherContext(JSFunction* function,
}
-static void AddMapIfMissing(Handle<Map> map, SmallMapList* list,
- Zone* zone) {
- for (int i = 0; i < list->length(); ++i) {
- if (list->at(i).is_identical_to(map)) return;
- }
- list->Add(map, zone);
-}
-
-
void TypeFeedbackOracle::CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
SmallMapList* types) {
Handle<Object> object = GetInfo(ast_id);
@@ -578,24 +535,25 @@ void TypeFeedbackOracle::CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
Handle<Code> code = Handle<Code>::cast(object);
if (code->kind() == Code::KEYED_LOAD_IC ||
code->kind() == Code::KEYED_STORE_IC) {
- AssertNoAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Object* object = info->target_object();
- if (object->IsMap()) {
- Map* map = Map::cast(object);
- if (!CanRetainOtherContext(map, *native_context_)) {
- AddMapIfMissing(Handle<Map>(map), types, zone());
- }
- }
- }
+ CollectPolymorphicMaps(code, types);
}
}
-byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId ast_id) {
+void TypeFeedbackOracle::CollectPolymorphicStoreReceiverTypes(
+ TypeFeedbackId ast_id,
+ SmallMapList* types) {
Handle<Object> object = GetInfo(ast_id);
+ if (!object->IsCode()) return;
+ Handle<Code> code = Handle<Code>::cast(object);
+ if (code->kind() == Code::STORE_IC && code->ic_state() == POLYMORPHIC) {
+ CollectPolymorphicMaps(code, types);
+ }
+}
+
+
+byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId id) {
+ Handle<Object> object = GetInfo(id);
return object->IsCode() ? Handle<Code>::cast(object)->to_boolean_state() : 0;
}
@@ -605,9 +563,9 @@ byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId ast_id) {
// dictionary (possibly triggering GC), and finally we relocate the collected
// infos before we process them.
void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
ZoneList<RelocInfo> infos(16, zone());
- HandleScope scope;
+ HandleScope scope(isolate_);
GetRelocInfos(code, &infos);
CreateDictionary(code, &infos);
ProcessRelocInfos(&infos);
@@ -628,14 +586,14 @@ void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code,
void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
ZoneList<RelocInfo>* infos) {
- DisableAssertNoAllocation allocation_allowed;
+ AllowHeapAllocation allocation_allowed;
int cell_count = code->type_feedback_info()->IsTypeFeedbackInfo()
? TypeFeedbackInfo::cast(code->type_feedback_info())->
type_feedback_cells()->CellCount()
: 0;
int length = infos->length() + cell_count;
byte* old_start = code->instruction_start();
- dictionary_ = FACTORY->NewUnseededNumberDictionary(length);
+ dictionary_ = isolate()->factory()->NewUnseededNumberDictionary(length);
byte* new_start = code->instruction_start();
RelocateRelocInfos(infos, old_start, new_start);
}
@@ -673,7 +631,8 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
SetInfo(ast_id, static_cast<Object*>(target));
} else if (!CanRetainOtherContext(Map::cast(map),
*native_context_)) {
- SetInfo(ast_id, map);
+ Map* feedback = Map::cast(map)->CurrentMapForDeprecated();
+ if (feedback != NULL) SetInfo(ast_id, feedback);
}
}
} else {
@@ -683,16 +642,10 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
case Code::KEYED_LOAD_IC:
case Code::KEYED_STORE_IC:
- if (target->ic_state() == MONOMORPHIC ||
- target->ic_state() == MEGAMORPHIC) {
- SetInfo(ast_id, target);
- }
- break;
-
- case Code::UNARY_OP_IC:
case Code::BINARY_OP_IC:
case Code::COMPARE_IC:
case Code::TO_BOOLEAN_IC:
+ case Code::COMPARE_NIL_IC:
SetInfo(ast_id, target);
break;
@@ -710,12 +663,14 @@ void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
TypeFeedbackInfo::cast(raw_info)->type_feedback_cells());
for (int i = 0; i < cache->CellCount(); i++) {
TypeFeedbackId ast_id = cache->AstId(i);
- Object* value = cache->Cell(i)->value();
+ Cell* cell = cache->GetCell(i);
+ Object* value = cell->value();
if (value->IsSmi() ||
+ value->IsAllocationSite() ||
(value->IsJSFunction() &&
!CanRetainOtherContext(JSFunction::cast(value),
*native_context_))) {
- SetInfo(ast_id, value);
+ SetInfo(ast_id, cell);
}
}
}
@@ -734,4 +689,15 @@ void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
#endif
}
+
+Representation Representation::FromType(TypeInfo info) {
+ if (info.IsUninitialized()) return Representation::None();
+ if (info.IsSmi()) return Representation::Smi();
+ if (info.IsInteger32()) return Representation::Integer32();
+ if (info.IsDouble()) return Representation::Double();
+ if (info.IsNumber()) return Representation::Double();
+ return Representation::Tagged();
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 00d88c2af..f295c06da 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -29,8 +29,8 @@
#define V8_TYPE_INFO_H_
#include "allocation.h"
-#include "ast.h"
#include "globals.h"
+#include "types.h"
#include "zone-inl.h"
namespace v8 {
@@ -65,12 +65,12 @@ class TypeInfo {
static TypeInfo Integer32() { return TypeInfo(kInteger32); }
// We know it's a Smi.
static TypeInfo Smi() { return TypeInfo(kSmi); }
- // We know it's a Symbol.
- static TypeInfo Symbol() { return TypeInfo(kSymbol); }
// We know it's a heap number.
static TypeInfo Double() { return TypeInfo(kDouble); }
// We know it's a string.
static TypeInfo String() { return TypeInfo(kString); }
+ // We know it's an internalized string.
+ static TypeInfo InternalizedString() { return TypeInfo(kInternalizedString); }
// We know it's a non-primitive (object) type.
static TypeInfo NonPrimitive() { return TypeInfo(kNonPrimitive); }
// We haven't started collecting info yet.
@@ -114,7 +114,7 @@ class TypeInfo {
return false;
}
- static TypeInfo TypeFromValue(Handle<Object> value);
+ static TypeInfo FromValue(Handle<Object> value);
bool Equals(const TypeInfo& other) {
return type_ == other.type_;
@@ -140,14 +140,14 @@ class TypeInfo {
return ((type_ & kSmi) == kSmi);
}
- inline bool IsSymbol() {
+ inline bool IsInternalizedString() {
ASSERT(type_ != kUninitialized);
- return ((type_ & kSymbol) == kSymbol);
+ return ((type_ & kInternalizedString) == kInternalizedString);
}
- inline bool IsNonSymbol() {
+ inline bool IsNonInternalizedString() {
ASSERT(type_ != kUninitialized);
- return ((type_ & kSymbol) == kString);
+ return ((type_ & kInternalizedString) == kString);
}
inline bool IsInteger32() {
@@ -181,7 +181,7 @@ class TypeInfo {
case kNumber: return "Number";
case kInteger32: return "Integer32";
case kSmi: return "Smi";
- case kSymbol: return "Symbol";
+ case kInternalizedString: return "InternalizedString";
case kDouble: return "Double";
case kString: return "String";
case kNonPrimitive: return "Object";
@@ -193,17 +193,18 @@ class TypeInfo {
private:
enum Type {
- kUnknown = 0, // 0000000
- kPrimitive = 0x10, // 0010000
- kNumber = 0x11, // 0010001
- kInteger32 = 0x13, // 0010011
- kSmi = 0x17, // 0010111
- kDouble = 0x19, // 0011001
- kString = 0x30, // 0110000
- kSymbol = 0x32, // 0110010
- kNonPrimitive = 0x40, // 1000000
- kUninitialized = 0x7f // 1111111
+ kUnknown = 0, // 0000000
+ kPrimitive = 0x10, // 0010000
+ kNumber = 0x11, // 0010001
+ kInteger32 = 0x13, // 0010011
+ kSmi = 0x17, // 0010111
+ kDouble = 0x19, // 0011001
+ kString = 0x30, // 0110000
+ kInternalizedString = 0x32, // 0110010
+ kNonPrimitive = 0x40, // 1000000
+ kUninitialized = 0x7f // 1111111
};
+
explicit inline TypeInfo(Type t) : type_(t) { }
Type type_;
@@ -217,19 +218,20 @@ enum StringStubFeedback {
// Forward declarations.
+// TODO(rossberg): these should all go away eventually.
class Assignment;
-class BinaryOperation;
class Call;
class CallNew;
class CaseClause;
-class CompareOperation;
class CompilationInfo;
class CountOperation;
class Expression;
+class ForInStatement;
+class ICStub;
class Property;
class SmallMapList;
-class UnaryOperation;
-class ForInStatement;
+class ObjectLiteral;
+class ObjectLiteralProperty;
class TypeFeedbackOracle: public ZoneObject {
@@ -241,17 +243,24 @@ class TypeFeedbackOracle: public ZoneObject {
bool LoadIsMonomorphicNormal(Property* expr);
bool LoadIsUninitialized(Property* expr);
- bool LoadIsMegamorphicWithTypeInfo(Property* expr);
+ bool LoadIsPreMonomorphic(Property* expr);
+ bool LoadIsPolymorphic(Property* expr);
+ bool StoreIsUninitialized(TypeFeedbackId ast_id);
bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id);
- bool StoreIsMegamorphicWithTypeInfo(TypeFeedbackId ast_id);
+ bool StoreIsPreMonomorphic(TypeFeedbackId ast_id);
+ bool StoreIsKeyedPolymorphic(TypeFeedbackId ast_id);
bool CallIsMonomorphic(Call* expr);
bool CallNewIsMonomorphic(CallNew* expr);
- bool ObjectLiteralStoreIsMonomorphic(ObjectLiteral::Property* prop);
+ bool ObjectLiteralStoreIsMonomorphic(ObjectLiteralProperty* prop);
- bool IsForInFastCase(ForInStatement* expr);
+ // TODO(1571) We can't use ForInStatement::ForInType as the return value due
+ // to various cycles in our headers.
+ byte ForInType(ForInStatement* expr);
Handle<Map> LoadMonomorphicReceiverType(Property* expr);
- Handle<Map> StoreMonomorphicReceiverType(TypeFeedbackId ast_id);
+ Handle<Map> StoreMonomorphicReceiverType(TypeFeedbackId id);
+
+ KeyedAccessStoreMode GetStoreMode(TypeFeedbackId ast_id);
void LoadReceiverTypes(Property* expr,
Handle<String> name,
@@ -265,36 +274,49 @@ class TypeFeedbackOracle: public ZoneObject {
SmallMapList* types);
void CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
SmallMapList* types);
+ void CollectPolymorphicStoreReceiverTypes(TypeFeedbackId ast_id,
+ SmallMapList* types);
static bool CanRetainOtherContext(Map* map, Context* native_context);
static bool CanRetainOtherContext(JSFunction* function,
Context* native_context);
- CheckType GetCallCheckType(Call* expr);
- Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
+ void CollectPolymorphicMaps(Handle<Code> code, SmallMapList* types);
+ CheckType GetCallCheckType(Call* expr);
Handle<JSFunction> GetCallTarget(Call* expr);
Handle<JSFunction> GetCallNewTarget(CallNew* expr);
+ Handle<Cell> GetCallNewAllocationInfoCell(CallNew* expr);
- Handle<Map> GetObjectLiteralStoreMap(ObjectLiteral::Property* prop);
+ Handle<Map> GetObjectLiteralStoreMap(ObjectLiteralProperty* prop);
bool LoadIsBuiltin(Property* expr, Builtins::Name id);
+ bool LoadIsStub(Property* expr, ICStub* stub);
// TODO(1571) We can't use ToBooleanStub::Types as the return value because
- // of various cylces in our headers. Death to tons of implementations in
+ // of various cycles in our headers. Death to tons of implementations in
// headers!! :-P
- byte ToBooleanTypes(TypeFeedbackId ast_id);
+ byte ToBooleanTypes(TypeFeedbackId id);
// Get type information for arithmetic operations and compares.
- TypeInfo UnaryType(UnaryOperation* expr);
- TypeInfo BinaryType(BinaryOperation* expr);
- TypeInfo CompareType(CompareOperation* expr);
- bool IsSymbolCompare(CompareOperation* expr);
- Handle<Map> GetCompareMap(CompareOperation* expr);
- TypeInfo SwitchType(CaseClause* clause);
- TypeInfo IncrementType(CountOperation* expr);
+ void BinaryType(TypeFeedbackId id,
+ Handle<Type>* left,
+ Handle<Type>* right,
+ Handle<Type>* result,
+ Maybe<int>* fixed_right_arg,
+ Token::Value operation);
+
+ void CompareType(TypeFeedbackId id,
+ Handle<Type>* left,
+ Handle<Type>* right,
+ Handle<Type>* combined);
+
+ Handle<Type> ClauseType(TypeFeedbackId id);
+
+ Handle<Type> IncrementType(CountOperation* expr);
Zone* zone() const { return zone_; }
+ Isolate* isolate() const { return isolate_; }
private:
void CollectReceiverTypes(TypeFeedbackId ast_id,
@@ -317,10 +339,14 @@ class TypeFeedbackOracle: public ZoneObject {
// there is no information.
Handle<Object> GetInfo(TypeFeedbackId ast_id);
+ // Return the cell that contains type feedback.
+ Handle<Cell> GetInfoCell(TypeFeedbackId ast_id);
+
+ private:
Handle<Context> native_context_;
Isolate* isolate_;
- Handle<UnseededNumberDictionary> dictionary_;
Zone* zone_;
+ Handle<UnseededNumberDictionary> dictionary_;
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
};
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
new file mode 100644
index 000000000..1e67bc30c
--- /dev/null
+++ b/deps/v8/src/typedarray.js
@@ -0,0 +1,597 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+var $ArrayBuffer = global.ArrayBuffer;
+
+
+// --------------- Typed Arrays ---------------------
+
+function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
+ function ConstructByArrayBuffer(obj, buffer, byteOffset, length) {
+ var offset = ToPositiveInteger(byteOffset, "invalid_typed_array_length")
+
+ if (offset % elementSize !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ "start offset", name, elementSize);
+ }
+ var bufferByteLength = %ArrayBufferGetByteLength(buffer);
+ if (offset > bufferByteLength) {
+ throw MakeRangeError("invalid_typed_array_offset");
+ }
+
+ var newByteLength;
+ var newLength;
+ if (IS_UNDEFINED(length)) {
+ if (bufferByteLength % elementSize !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ "byte length", name, elementSize);
+ }
+ newByteLength = bufferByteLength - offset;
+ newLength = newByteLength / elementSize;
+ } else {
+ var newLength = ToPositiveInteger(length, "invalid_typed_array_length");
+ newByteLength = newLength * elementSize;
+ }
+ if (offset + newByteLength > bufferByteLength) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ %TypedArrayInitialize(obj, arrayId, buffer, offset, newByteLength);
+ }
+
+ function ConstructByLength(obj, length) {
+ var l = ToPositiveInteger(length, "invalid_typed_array_length");
+ var byteLength = l * elementSize;
+ var buffer = new $ArrayBuffer(byteLength);
+ %TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength);
+ }
+
+ function ConstructByArrayLike(obj, arrayLike) {
+ var length = arrayLike.length;
+ var l = ToPositiveInteger(length, "invalid_typed_array_length");
+ if(!%TypedArrayInitializeFromArrayLike(obj, arrayId, arrayLike, l)) {
+ for (var i = 0; i < l; i++) {
+ // It is crucial that we let any execptions from arrayLike[i]
+ // propagate outside the function.
+ obj[i] = arrayLike[i];
+ }
+ }
+ }
+
+ return function (arg1, arg2, arg3) {
+ if (%_IsConstructCall()) {
+ if (IS_ARRAYBUFFER(arg1)) {
+ ConstructByArrayBuffer(this, arg1, arg2, arg3);
+ } else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
+ IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
+ ConstructByLength(this, arg1);
+ } else {
+ ConstructByArrayLike(this, arg1);
+ }
+ } else {
+ throw MakeTypeError("constructor_not_function", [name])
+ }
+ }
+}
+
+function TypedArrayGetBuffer() {
+ return %TypedArrayGetBuffer(this);
+}
+
+function TypedArrayGetByteLength() {
+ return %TypedArrayGetByteLength(this);
+}
+
+function TypedArrayGetByteOffset() {
+ return %TypedArrayGetByteOffset(this);
+}
+
+function TypedArrayGetLength() {
+ return %TypedArrayGetLength(this);
+}
+
+function CreateSubArray(elementSize, constructor) {
+ return function(begin, end) {
+ var srcLength = %TypedArrayGetLength(this);
+ var beginInt = TO_INTEGER(begin);
+ if (beginInt < 0) {
+ beginInt = MathMax(0, srcLength + beginInt);
+ } else {
+ beginInt = MathMin(srcLength, beginInt);
+ }
+
+ var endInt = IS_UNDEFINED(end) ? srcLength : TO_INTEGER(end);
+ if (endInt < 0) {
+ endInt = MathMax(0, srcLength + endInt);
+ } else {
+ endInt = MathMin(endInt, srcLength);
+ }
+ if (endInt < beginInt) {
+ endInt = beginInt;
+ }
+ var newLength = endInt - beginInt;
+ var beginByteOffset =
+ %TypedArrayGetByteOffset(this) + beginInt * elementSize;
+ return new constructor(%TypedArrayGetBuffer(this),
+ beginByteOffset, newLength);
+ }
+}
+
+function TypedArraySetFromArrayLike(target, source, sourceLength, offset) {
+ if (offset > 0) {
+ for (var i = 0; i < sourceLength; i++) {
+ target[offset + i] = source[i];
+ }
+ }
+ else {
+ for (var i = 0; i < sourceLength; i++) {
+ target[i] = source[i];
+ }
+ }
+}
+
+function TypedArraySetFromOverlappingTypedArray(target, source, offset) {
+ var sourceElementSize = source.BYTES_PER_ELEMENT;
+ var targetElementSize = target.BYTES_PER_ELEMENT;
+ var sourceLength = source.length;
+
+ // Copy left part.
+ function CopyLeftPart() {
+ // First un-mutated byte after the next write
+ var targetPtr = target.byteOffset + (offset + 1) * targetElementSize;
+ // Next read at sourcePtr. We do not care for memory changing before
+ // sourcePtr - we have already copied it.
+ var sourcePtr = source.byteOffset;
+ for (var leftIndex = 0;
+ leftIndex < sourceLength && targetPtr <= sourcePtr;
+ leftIndex++) {
+ target[offset + leftIndex] = source[leftIndex];
+ targetPtr += targetElementSize;
+ sourcePtr += sourceElementSize;
+ }
+ return leftIndex;
+ }
+ var leftIndex = CopyLeftPart();
+
+ // Copy rigth part;
+ function CopyRightPart() {
+ // First unmutated byte before the next write
+ var targetPtr =
+ target.byteOffset + (offset + sourceLength - 1) * targetElementSize;
+ // Next read before sourcePtr. We do not care for memory changing after
+ // sourcePtr - we have already copied it.
+ var sourcePtr =
+ source.byteOffset + sourceLength * sourceElementSize;
+ for(var rightIndex = sourceLength - 1;
+ rightIndex >= leftIndex && targetPtr >= sourcePtr;
+ rightIndex--) {
+ target[offset + rightIndex] = source[rightIndex];
+ targetPtr -= targetElementSize;
+ sourcePtr -= sourceElementSize;
+ }
+ return rightIndex;
+ }
+ var rightIndex = CopyRightPart();
+
+ var temp = new $Array(rightIndex + 1 - leftIndex);
+ for (var i = leftIndex; i <= rightIndex; i++) {
+ temp[i - leftIndex] = source[i];
+ }
+ for (i = leftIndex; i <= rightIndex; i++) {
+ target[offset + i] = temp[i - leftIndex];
+ }
+}
+
+function TypedArraySet(obj, offset) {
+ var intOffset = IS_UNDEFINED(offset) ? 0 : TO_INTEGER(offset);
+ if (intOffset < 0) {
+ throw MakeTypeError("typed_array_set_negative_offset");
+ }
+ switch (%TypedArraySetFastCases(this, obj, intOffset)) {
+ // These numbers should be synchronized with runtime.cc.
+ case 0: // TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE
+ return;
+ case 1: // TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING
+ TypedArraySetFromOverlappingTypedArray(this, obj, intOffset);
+ return;
+ case 2: // TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING
+ TypedArraySetFromArrayLike(this, obj, obj.length, intOffset);
+ return;
+ case 3: // TYPED_ARRAY_SET_NON_TYPED_ARRAY
+ var l = obj.length;
+ if (IS_UNDEFINED(l)) {
+ if (IS_NUMBER(obj)) {
+ // For number as a first argument, throw TypeError
+ // instead of silently ignoring the call, so that
+ // the user knows (s)he did something wrong.
+ // (Consistent with Firefox and Blink/WebKit)
+ throw MakeTypeError("invalid_argument");
+ }
+ return;
+ }
+ if (intOffset + l > this.length) {
+ throw MakeRangeError("typed_array_set_source_too_large");
+ }
+ TypedArraySetFromArrayLike(this, obj, l, intOffset);
+ return;
+ }
+}
+
+// -------------------------------------------------------------------
+
+function SetupTypedArray(arrayId, name, constructor, elementSize) {
+ %CheckIsBootstrapping();
+ var fun = CreateTypedArrayConstructor(name, elementSize,
+ arrayId, constructor);
+ %SetCode(constructor, fun);
+ %FunctionSetPrototype(constructor, new $Object());
+
+ %SetProperty(constructor, "BYTES_PER_ELEMENT", elementSize,
+ READ_ONLY | DONT_ENUM | DONT_DELETE);
+ %SetProperty(constructor.prototype,
+ "constructor", constructor, DONT_ENUM);
+ %SetProperty(constructor.prototype,
+ "BYTES_PER_ELEMENT", elementSize,
+ READ_ONLY | DONT_ENUM | DONT_DELETE);
+ InstallGetter(constructor.prototype, "buffer", TypedArrayGetBuffer);
+ InstallGetter(constructor.prototype, "byteOffset", TypedArrayGetByteOffset);
+ InstallGetter(constructor.prototype, "byteLength", TypedArrayGetByteLength);
+ InstallGetter(constructor.prototype, "length", TypedArrayGetLength);
+
+ InstallFunctions(constructor.prototype, DONT_ENUM, $Array(
+ "subarray", CreateSubArray(elementSize, constructor),
+ "set", TypedArraySet
+ ));
+}
+
+// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
+SetupTypedArray(1, "Uint8Array", global.Uint8Array, 1);
+SetupTypedArray(2, "Int8Array", global.Int8Array, 1);
+SetupTypedArray(3, "Uint16Array", global.Uint16Array, 2);
+SetupTypedArray(4, "Int16Array", global.Int16Array, 2);
+SetupTypedArray(5, "Uint32Array", global.Uint32Array, 4);
+SetupTypedArray(6, "Int32Array", global.Int32Array, 4);
+SetupTypedArray(7, "Float32Array", global.Float32Array, 4);
+SetupTypedArray(8, "Float64Array", global.Float64Array, 8);
+SetupTypedArray(9, "Uint8ClampedArray", global.Uint8ClampedArray, 1);
+
+
+// --------------------------- DataView -----------------------------
+
+var $DataView = global.DataView;
+
+function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
+ if (%_IsConstructCall()) {
+ if (!IS_ARRAYBUFFER(buffer)) {
+ throw MakeTypeError('data_view_not_array_buffer', []);
+ }
+ var bufferByteLength = %ArrayBufferGetByteLength(buffer);
+ var offset = ToPositiveInteger(byteOffset, 'invalid_data_view_offset');
+ if (offset > bufferByteLength) {
+ throw MakeRangeError('invalid_data_view_offset');
+ }
+ var length = IS_UNDEFINED(byteLength) ?
+ bufferByteLength - offset : TO_INTEGER(byteLength);
+ if (length < 0 || offset + length > bufferByteLength) {
+ throw new MakeRangeError('invalid_data_view_length');
+ }
+ %DataViewInitialize(this, buffer, offset, length);
+ } else {
+ throw MakeTypeError('constructor_not_function', ["DataView"]);
+ }
+}
+
+function DataViewGetBuffer() {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.buffer', this]);
+ }
+ return %DataViewGetBuffer(this);
+}
+
+function DataViewGetByteOffset() {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.byteOffset', this]);
+ }
+ return %DataViewGetByteOffset(this);
+}
+
+function DataViewGetByteLength() {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.byteLength', this]);
+ }
+ return %DataViewGetByteLength(this);
+}
+
+function ToPositiveDataViewOffset(offset) {
+ return ToPositiveInteger(offset, 'invalid_data_view_accessor_offset');
+}
+
+function DataViewGetInt8(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.getInt8', this]);
+ }
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
+ return %DataViewGetInt8(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetInt8(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.setInt8', this]);
+ }
+ if (%_ArgumentsLength() < 2) {
+ throw MakeTypeError('invalid_argument');
+ }
+ %DataViewSetInt8(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function DataViewGetUint8(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.getUint8', this]);
+ }
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
+ return %DataViewGetUint8(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetUint8(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.setUint8', this]);
+ }
+ if (%_ArgumentsLength() < 2) {
+ throw MakeTypeError('invalid_argument');
+ }
+ %DataViewSetUint8(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function DataViewGetInt16(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.getInt16', this]);
+ }
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
+ return %DataViewGetInt16(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetInt16(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.setInt16', this]);
+ }
+ if (%_ArgumentsLength() < 2) {
+ throw MakeTypeError('invalid_argument');
+ }
+ %DataViewSetInt16(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function DataViewGetUint16(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.getUint16', this]);
+ }
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
+ return %DataViewGetUint16(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetUint16(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.setUint16', this]);
+ }
+ if (%_ArgumentsLength() < 2) {
+ throw MakeTypeError('invalid_argument');
+ }
+ %DataViewSetUint16(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function DataViewGetInt32(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.getInt32', this]);
+ }
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
+ return %DataViewGetInt32(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetInt32(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.setInt32', this]);
+ }
+ if (%_ArgumentsLength() < 2) {
+ throw MakeTypeError('invalid_argument');
+ }
+ %DataViewSetInt32(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function DataViewGetUint32(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.getUint32', this]);
+ }
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
+ return %DataViewGetUint32(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetUint32(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.setUint32', this]);
+ }
+ if (%_ArgumentsLength() < 2) {
+ throw MakeTypeError('invalid_argument');
+ }
+ %DataViewSetUint32(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function DataViewGetFloat32(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.getFloat32', this]);
+ }
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
+ return %DataViewGetFloat32(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetFloat32(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.setFloat32', this]);
+ }
+ if (%_ArgumentsLength() < 2) {
+ throw MakeTypeError('invalid_argument');
+ }
+ %DataViewSetFloat32(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function DataViewGetFloat64(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.getFloat64', this]);
+ }
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
+ return %DataViewGetFloat64(this,
+ ToPositiveDataViewOffset(offset),
+ !!little_endian);
+}
+
+function DataViewSetFloat64(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['DataView.setFloat64', this]);
+ }
+ if (%_ArgumentsLength() < 2) {
+ throw MakeTypeError('invalid_argument');
+ }
+ %DataViewSetFloat64(this,
+ ToPositiveDataViewOffset(offset),
+ TO_NUMBER_INLINE(value),
+ !!little_endian);
+}
+
+function SetupDataView() {
+ %CheckIsBootstrapping();
+
+ // Setup the DataView constructor.
+ %SetCode($DataView, DataViewConstructor);
+ %FunctionSetPrototype($DataView, new $Object);
+
+ // Set up constructor property on the DataView prototype.
+ %SetProperty($DataView.prototype, "constructor", $DataView, DONT_ENUM);
+
+ InstallGetter($DataView.prototype, "buffer", DataViewGetBuffer);
+ InstallGetter($DataView.prototype, "byteOffset", DataViewGetByteOffset);
+ InstallGetter($DataView.prototype, "byteLength", DataViewGetByteLength);
+
+ InstallFunctions($DataView.prototype, DONT_ENUM, $Array(
+ "getInt8", DataViewGetInt8,
+ "setInt8", DataViewSetInt8,
+
+ "getUint8", DataViewGetUint8,
+ "setUint8", DataViewSetUint8,
+
+ "getInt16", DataViewGetInt16,
+ "setInt16", DataViewSetInt16,
+
+ "getUint16", DataViewGetUint16,
+ "setUint16", DataViewSetUint16,
+
+ "getInt32", DataViewGetInt32,
+ "setInt32", DataViewSetInt32,
+
+ "getUint32", DataViewGetUint32,
+ "setUint32", DataViewSetUint32,
+
+ "getFloat32", DataViewGetFloat32,
+ "setFloat32", DataViewSetFloat32,
+
+ "getFloat64", DataViewGetFloat64,
+ "setFloat64", DataViewSetFloat64
+ ));
+}
+
+SetupDataView();
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
new file mode 100644
index 000000000..17a19b29e
--- /dev/null
+++ b/deps/v8/src/types.cc
@@ -0,0 +1,551 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "types.h"
+#include "string-stream.h"
+
+namespace v8 {
+namespace internal {
+
+int Type::NumClasses() {
+ if (is_class()) {
+ return 1;
+ } else if (is_union()) {
+ Handle<Unioned> unioned = as_union();
+ int result = 0;
+ for (int i = 0; i < unioned->length(); ++i) {
+ if (union_get(unioned, i)->is_class()) ++result;
+ }
+ return result;
+ } else {
+ return 0;
+ }
+}
+
+
+int Type::NumConstants() {
+ if (is_constant()) {
+ return 1;
+ } else if (is_union()) {
+ Handle<Unioned> unioned = as_union();
+ int result = 0;
+ for (int i = 0; i < unioned->length(); ++i) {
+ if (union_get(unioned, i)->is_constant()) ++result;
+ }
+ return result;
+ } else {
+ return 0;
+ }
+}
+
+
+template<class T>
+Handle<Type> Type::Iterator<T>::get_type() {
+ ASSERT(!Done());
+ return type_->is_union() ? union_get(type_->as_union(), index_) : type_;
+}
+
+template<>
+Handle<Map> Type::Iterator<Map>::Current() {
+ return get_type()->as_class();
+}
+
+template<>
+Handle<v8::internal::Object> Type::Iterator<v8::internal::Object>::Current() {
+ return get_type()->as_constant();
+}
+
+
+template<>
+bool Type::Iterator<Map>::matches(Handle<Type> type) {
+ return type->is_class();
+}
+
+template<>
+bool Type::Iterator<v8::internal::Object>::matches(Handle<Type> type) {
+ return type->is_constant();
+}
+
+
+template<class T>
+void Type::Iterator<T>::Advance() {
+ ++index_;
+ if (type_->is_union()) {
+ Handle<Unioned> unioned = type_->as_union();
+ for (; index_ < unioned->length(); ++index_) {
+ if (matches(union_get(unioned, index_))) return;
+ }
+ } else if (index_ == 0 && matches(type_)) {
+ return;
+ }
+ index_ = -1;
+}
+
+template class Type::Iterator<Map>;
+template class Type::Iterator<v8::internal::Object>;
+
+
+// Get the smallest bitset subsuming this type.
+int Type::LubBitset() {
+ if (this->is_bitset()) {
+ return this->as_bitset();
+ } else if (this->is_union()) {
+ Handle<Unioned> unioned = this->as_union();
+ int bitset = kNone;
+ for (int i = 0; i < unioned->length(); ++i) {
+ bitset |= union_get(unioned, i)->LubBitset();
+ }
+ return bitset;
+ } else {
+ Map* map = NULL;
+ if (this->is_class()) {
+ map = *this->as_class();
+ } else {
+ Handle<v8::internal::Object> value = this->as_constant();
+ if (value->IsSmi()) return kSmi;
+ map = HeapObject::cast(*value)->map();
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
+ int32_t i;
+ uint32_t u;
+ if (value->ToInt32(&i)) return Smi::IsValid(i) ? kSmi : kOtherSigned32;
+ if (value->ToUint32(&u)) return kUnsigned32;
+ return kDouble;
+ }
+ if (map->instance_type() == ODDBALL_TYPE) {
+ if (value->IsUndefined()) return kUndefined;
+ if (value->IsNull()) return kNull;
+ if (value->IsTrue() || value->IsFalse()) return kBoolean;
+ if (value->IsTheHole()) return kAny; // TODO(rossberg): kNone?
+ UNREACHABLE();
+ }
+ }
+ switch (map->instance_type()) {
+ case STRING_TYPE:
+ case ASCII_STRING_TYPE:
+ case CONS_STRING_TYPE:
+ case CONS_ASCII_STRING_TYPE:
+ case SLICED_STRING_TYPE:
+ case SLICED_ASCII_STRING_TYPE:
+ case EXTERNAL_STRING_TYPE:
+ case EXTERNAL_ASCII_STRING_TYPE:
+ case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SHORT_EXTERNAL_STRING_TYPE:
+ case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+ case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case INTERNALIZED_STRING_TYPE:
+ case ASCII_INTERNALIZED_STRING_TYPE:
+ case CONS_INTERNALIZED_STRING_TYPE:
+ case CONS_ASCII_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ return kString;
+ case SYMBOL_TYPE:
+ return kSymbol;
+ case ODDBALL_TYPE:
+ return kOddball;
+ case HEAP_NUMBER_TYPE:
+ return kDouble;
+ case JS_VALUE_TYPE:
+ case JS_DATE_TYPE:
+ case JS_OBJECT_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_MODULE_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_BUILTINS_OBJECT_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_ARRAY_BUFFER_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
+ case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
+ if (map->is_undetectable()) return kUndetectable;
+ return kOtherObject;
+ case JS_ARRAY_TYPE:
+ return kArray;
+ case JS_FUNCTION_TYPE:
+ return kFunction;
+ case JS_REGEXP_TYPE:
+ return kRegExp;
+ case JS_PROXY_TYPE:
+ case JS_FUNCTION_PROXY_TYPE:
+ return kProxy;
+ case MAP_TYPE:
+ // When compiling stub templates, the meta map is used as a place holder
+ // for the actual map with which the template is later instantiated.
+ // We treat it as a kind of type variable whose upper bound is Any.
+ // TODO(rossberg): for caching of CompareNilIC stubs to work correctly,
+ // we must exclude Undetectable here. This makes no sense, really,
+ // because it means that the template isn't actually parametric.
+ // Also, it doesn't apply elsewhere. 8-(
+ // We ought to find a cleaner solution for compiling stubs parameterised
+ // over type or class variables, esp ones with bounds...
+ return kDetectable;
+ case DECLARED_ACCESSOR_INFO_TYPE:
+ case EXECUTABLE_ACCESSOR_INFO_TYPE:
+ case ACCESSOR_PAIR_TYPE:
+ case FIXED_ARRAY_TYPE:
+ return kInternal;
+ default:
+ UNREACHABLE();
+ return kNone;
+ }
+ }
+}
+
+
+// Get the largest bitset subsumed by this type.
+int Type::GlbBitset() {
+ if (this->is_bitset()) {
+ return this->as_bitset();
+ } else if (this->is_union()) {
+ // All but the first are non-bitsets and thus would yield kNone anyway.
+ return union_get(this->as_union(), 0)->GlbBitset();
+ } else {
+ return kNone;
+ }
+}
+
+
+// Check this <= that.
+bool Type::SlowIs(Type* that) {
+ // Fast path for bitsets.
+ if (this->is_none()) return true;
+ if (that->is_bitset()) {
+ return (this->LubBitset() | that->as_bitset()) == that->as_bitset();
+ }
+
+ if (that->is_class()) {
+ return this->is_class() && *this->as_class() == *that->as_class();
+ }
+ if (that->is_constant()) {
+ return this->is_constant() && *this->as_constant() == *that->as_constant();
+ }
+
+ // (T1 \/ ... \/ Tn) <= T <=> (T1 <= T) /\ ... /\ (Tn <= T)
+ if (this->is_union()) {
+ Handle<Unioned> unioned = this->as_union();
+ for (int i = 0; i < unioned->length(); ++i) {
+ Handle<Type> this_i = union_get(unioned, i);
+ if (!this_i->Is(that)) return false;
+ }
+ return true;
+ }
+
+ // T <= (T1 \/ ... \/ Tn) <=> (T <= T1) \/ ... \/ (T <= Tn)
+ // (iff T is not a union)
+ ASSERT(!this->is_union());
+ if (that->is_union()) {
+ Handle<Unioned> unioned = that->as_union();
+ for (int i = 0; i < unioned->length(); ++i) {
+ Handle<Type> that_i = union_get(unioned, i);
+ if (this->Is(that_i)) return true;
+ if (this->is_bitset()) break; // Fast fail, no other field is a bitset.
+ }
+ return false;
+ }
+
+ return false;
+}
+
+
+// Check this overlaps that.
+bool Type::Maybe(Type* that) {
+ // Fast path for bitsets.
+ if (this->is_bitset()) {
+ return (this->as_bitset() & that->LubBitset()) != 0;
+ }
+ if (that->is_bitset()) {
+ return (this->LubBitset() & that->as_bitset()) != 0;
+ }
+
+ // (T1 \/ ... \/ Tn) overlaps T <=> (T1 overlaps T) \/ ... \/ (Tn overlaps T)
+ if (this->is_union()) {
+ Handle<Unioned> unioned = this->as_union();
+ for (int i = 0; i < unioned->length(); ++i) {
+ Handle<Type> this_i = union_get(unioned, i);
+ if (this_i->Maybe(that)) return true;
+ }
+ return false;
+ }
+
+ // T overlaps (T1 \/ ... \/ Tn) <=> (T overlaps T1) \/ ... \/ (T overlaps Tn)
+ if (that->is_union()) {
+ Handle<Unioned> unioned = that->as_union();
+ for (int i = 0; i < unioned->length(); ++i) {
+ Handle<Type> that_i = union_get(unioned, i);
+ if (this->Maybe(that_i)) return true;
+ }
+ return false;
+ }
+
+ ASSERT(!that->is_union());
+ if (this->is_class()) {
+ return that->is_class() && *this->as_class() == *that->as_class();
+ }
+ if (this->is_constant()) {
+ return that->is_constant() && *this->as_constant() == *that->as_constant();
+ }
+
+ return false;
+}
+
+
+bool Type::InUnion(Handle<Unioned> unioned, int current_size) {
+ ASSERT(!this->is_union());
+ for (int i = 0; i < current_size; ++i) {
+ Handle<Type> type = union_get(unioned, i);
+ if (this->Is(type)) return true;
+ }
+ return false;
+}
+
+
+// Get non-bitsets from this which are not subsumed by union, store at unioned,
+// starting at index. Returns updated index.
+int Type::ExtendUnion(Handle<Unioned> result, int current_size) {
+ int old_size = current_size;
+ if (this->is_class() || this->is_constant()) {
+ if (!this->InUnion(result, old_size)) result->set(current_size++, this);
+ } else if (this->is_union()) {
+ Handle<Unioned> unioned = this->as_union();
+ for (int i = 0; i < unioned->length(); ++i) {
+ Handle<Type> type = union_get(unioned, i);
+ ASSERT(i == 0 || !(type->is_bitset() || type->Is(union_get(unioned, 0))));
+ if (type->is_bitset()) continue;
+ if (!type->InUnion(result, old_size)) result->set(current_size++, *type);
+ }
+ }
+ return current_size;
+}
+
+
+// Union is O(1) on simple bit unions, but O(n*m) on structured unions.
+// TODO(rossberg): Should we use object sets somehow? Is it worth it?
+Type* Type::Union(Handle<Type> type1, Handle<Type> type2) {
+ // Fast case: bit sets.
+ if (type1->is_bitset() && type2->is_bitset()) {
+ return from_bitset(type1->as_bitset() | type2->as_bitset());
+ }
+
+ // Fast case: top or bottom types.
+ if (type1->SameValue(Type::Any())) return *type1;
+ if (type2->SameValue(Type::Any())) return *type2;
+ if (type1->SameValue(Type::None())) return *type2;
+ if (type2->SameValue(Type::None())) return *type1;
+
+ // Semi-fast case: Unioned objects are neither involved nor produced.
+ if (!(type1->is_union() || type2->is_union())) {
+ if (type1->Is(type2)) return *type2;
+ if (type2->Is(type1)) return *type1;
+ }
+
+ // Slow case: may need to produce a Unioned object.
+ Isolate* isolate = NULL;
+ int size = type1->is_bitset() || type2->is_bitset() ? 1 : 0;
+ if (!type1->is_bitset()) {
+ isolate = HeapObject::cast(*type1)->GetIsolate();
+ size += (type1->is_union() ? type1->as_union()->length() : 1);
+ }
+ if (!type2->is_bitset()) {
+ isolate = HeapObject::cast(*type2)->GetIsolate();
+ size += (type2->is_union() ? type2->as_union()->length() : 1);
+ }
+ ASSERT(isolate != NULL);
+ ASSERT(size >= 2);
+ Handle<Unioned> unioned = isolate->factory()->NewFixedArray(size);
+ size = 0;
+
+ int bitset = type1->GlbBitset() | type2->GlbBitset();
+ if (bitset != kNone) unioned->set(size++, from_bitset(bitset));
+ size = type1->ExtendUnion(unioned, size);
+ size = type2->ExtendUnion(unioned, size);
+
+ if (size == 1) {
+ return *union_get(unioned, 0);
+ } else if (size == unioned->length()) {
+ return from_handle(unioned);
+ }
+
+ // There was an overlap. Copy to smaller union.
+ Handle<Unioned> result = isolate->factory()->NewFixedArray(size);
+ for (int i = 0; i < size; ++i) result->set(i, unioned->get(i));
+ return from_handle(result);
+}
+
+
+// Get non-bitsets from this which are also in that, store at unioned,
+// starting at index. Returns updated index.
+int Type::ExtendIntersection(
+ Handle<Unioned> result, Handle<Type> that, int current_size) {
+ int old_size = current_size;
+ if (this->is_class() || this->is_constant()) {
+ if (this->Is(that) && !this->InUnion(result, old_size))
+ result->set(current_size++, this);
+ } else if (this->is_union()) {
+ Handle<Unioned> unioned = this->as_union();
+ for (int i = 0; i < unioned->length(); ++i) {
+ Handle<Type> type = union_get(unioned, i);
+ ASSERT(i == 0 || !(type->is_bitset() || type->Is(union_get(unioned, 0))));
+ if (type->is_bitset()) continue;
+ if (type->Is(that) && !type->InUnion(result, old_size))
+ result->set(current_size++, *type);
+ }
+ }
+ return current_size;
+}
+
+
+// Intersection is O(1) on simple bit unions, but O(n*m) on structured unions.
+// TODO(rossberg): Should we use object sets somehow? Is it worth it?
+Type* Type::Intersect(Handle<Type> type1, Handle<Type> type2) {
+ // Fast case: bit sets.
+ if (type1->is_bitset() && type2->is_bitset()) {
+ return from_bitset(type1->as_bitset() & type2->as_bitset());
+ }
+
+ // Fast case: top or bottom types.
+ if (type1->SameValue(Type::None())) return *type1;
+ if (type2->SameValue(Type::None())) return *type2;
+ if (type1->SameValue(Type::Any())) return *type2;
+ if (type2->SameValue(Type::Any())) return *type1;
+
+ // Semi-fast case: Unioned objects are neither involved nor produced.
+ if (!(type1->is_union() || type2->is_union())) {
+ if (type1->Is(type2)) return *type1;
+ if (type2->Is(type1)) return *type2;
+ }
+
+ // Slow case: may need to produce a Unioned object.
+ Isolate* isolate = NULL;
+ int size = 0;
+ if (!type1->is_bitset()) {
+ isolate = HeapObject::cast(*type1)->GetIsolate();
+ size = (type1->is_union() ? type1->as_union()->length() : 2);
+ }
+ if (!type2->is_bitset()) {
+ isolate = HeapObject::cast(*type2)->GetIsolate();
+ int size2 = (type2->is_union() ? type2->as_union()->length() : 2);
+ size = (size == 0 ? size2 : Min(size, size2));
+ }
+ ASSERT(isolate != NULL);
+ ASSERT(size >= 2);
+ Handle<Unioned> unioned = isolate->factory()->NewFixedArray(size);
+ size = 0;
+
+ int bitset = type1->GlbBitset() & type2->GlbBitset();
+ if (bitset != kNone) unioned->set(size++, from_bitset(bitset));
+ size = type1->ExtendIntersection(unioned, type2, size);
+ size = type2->ExtendIntersection(unioned, type1, size);
+
+ if (size == 0) {
+ return None();
+ } else if (size == 1) {
+ return *union_get(unioned, 0);
+ } else if (size == unioned->length()) {
+ return from_handle(unioned);
+ }
+
+ // There were dropped cases. Copy to smaller union.
+ Handle<Unioned> result = isolate->factory()->NewFixedArray(size);
+ for (int i = 0; i < size; ++i) result->set(i, unioned->get(i));
+ return from_handle(result);
+}
+
+
+Type* Type::Optional(Handle<Type> type) {
+ return type->is_bitset()
+ ? from_bitset(type->as_bitset() | kUndefined)
+ : Union(type, Undefined()->handle_via_isolate_of(*type));
+}
+
+
+Representation Representation::FromType(Handle<Type> type) {
+ if (type->Is(Type::None())) return Representation::None();
+ if (type->Is(Type::Smi())) return Representation::Smi();
+ if (type->Is(Type::Signed32())) return Representation::Integer32();
+ if (type->Is(Type::Number())) return Representation::Double();
+ return Representation::Tagged();
+}
+
+
+#ifdef OBJECT_PRINT
+void Type::TypePrint() {
+ TypePrint(stdout);
+ PrintF(stdout, "\n");
+ Flush(stdout);
+}
+
+
+void Type::TypePrint(FILE* out) {
+ if (is_bitset()) {
+ int val = as_bitset();
+ const char* composed_name = GetComposedName(val);
+ if (composed_name != NULL) {
+ PrintF(out, "%s", composed_name);
+ return;
+ }
+ bool first_entry = true;
+ PrintF(out, "{");
+ for (unsigned i = 0; i < sizeof(val)*8; ++i) {
+ int mask = (1 << i);
+ if ((val & mask) != 0) {
+ if (!first_entry) PrintF(out, ",");
+ first_entry = false;
+ PrintF(out, "%s", GetPrimitiveName(mask));
+ }
+ }
+ PrintF(out, "}");
+ } else if (is_constant()) {
+ PrintF(out, "Constant(%p : ", static_cast<void*>(*as_constant()));
+ from_bitset(LubBitset())->TypePrint(out);
+ PrintF(")");
+ } else if (is_class()) {
+ PrintF(out, "Class(%p < ", static_cast<void*>(*as_class()));
+ from_bitset(LubBitset())->TypePrint(out);
+ PrintF(")");
+ } else if (is_union()) {
+ PrintF(out, "{");
+ Handle<Unioned> unioned = as_union();
+ for (int i = 0; i < unioned->length(); ++i) {
+ Handle<Type> type_i = union_get(unioned, i);
+ if (i > 0) PrintF(out, ",");
+ type_i->TypePrint(out);
+ }
+ PrintF(out, "}");
+ }
+}
+#endif
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
new file mode 100644
index 000000000..5d437e26b
--- /dev/null
+++ b/deps/v8/src/types.h
@@ -0,0 +1,351 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TYPES_H_
+#define V8_TYPES_H_
+
+#include "v8.h"
+
+#include "objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+// A simple type system for compiler-internal use. It is based entirely on
+// union types, and all subtyping hence amounts to set inclusion. Besides the
+// obvious primitive types and some predefined unions, the type language also
+// can express class types (a.k.a. specific maps) and singleton types (i.e.,
+// concrete constants).
+//
+// The following equations and inequations hold:
+//
+// None <= T
+// T <= Any
+//
+// Oddball = Boolean \/ Null \/ Undefined
+// Number = Signed32 \/ Unsigned32 \/ Double
+// Smi <= Signed32
+// Name = String \/ Symbol
+// UniqueName = InternalizedString \/ Symbol
+// InternalizedString < String
+//
+// Allocated = Receiver \/ Number \/ Name
+// Detectable = Allocated - Undetectable
+// Undetectable < Object
+// Receiver = Object \/ Proxy
+// Array < Object
+// Function < Object
+// RegExp < Object
+//
+// Class(map) < T iff instance_type(map) < T
+// Constant(x) < T iff instance_type(map(x)) < T
+//
+// Note that Constant(x) < Class(map(x)) does _not_ hold, since x's map can
+// change! (Its instance type cannot, however.)
+// TODO(rossberg): the latter is not currently true for proxies, because of fix,
+// but will hold once we implement direct proxies.
+//
+// There are two main functions for testing types:
+//
+// T1->Is(T2) -- tests whether T1 is included in T2 (i.e., T1 <= T2)
+// T1->Maybe(T2) -- tests whether T1 and T2 overlap (i.e., T1 /\ T2 =/= 0)
+//
+// Typically, the former is to be used to select representations (e.g., via
+// T->Is(Integer31())), and the to check whether a specific case needs handling
+// (e.g., via T->Maybe(Number())).
+//
+// There is no functionality to discover whether a type is a leaf in the
+// lattice. That is intentional. It should always be possible to refine the
+// lattice (e.g., splitting up number types further) without invalidating any
+// existing assumptions or tests.
+//
+// Consequently, do not use pointer equality for type tests, always use Is!
+//
+// Internally, all 'primitive' types, and their unions, are represented as
+// bitsets via smis. Class is a heap pointer to the respective map. Only
+// Constant's, or unions containing Class'es or Constant's, require allocation.
+// Note that the bitset representation is closed under both Union and Intersect.
+//
+// The type representation is heap-allocated, so cannot (currently) be used in
+// a concurrent compilation context.
+
+
+#define PRIMITIVE_TYPE_LIST(V) \
+ V(None, 0) \
+ V(Null, 1 << 0) \
+ V(Undefined, 1 << 1) \
+ V(Boolean, 1 << 2) \
+ V(Smi, 1 << 3) \
+ V(OtherSigned32, 1 << 4) \
+ V(Unsigned32, 1 << 5) \
+ V(Double, 1 << 6) \
+ V(Symbol, 1 << 7) \
+ V(InternalizedString, 1 << 8) \
+ V(OtherString, 1 << 9) \
+ V(Undetectable, 1 << 10) \
+ V(Array, 1 << 11) \
+ V(Function, 1 << 12) \
+ V(RegExp, 1 << 13) \
+ V(OtherObject, 1 << 14) \
+ V(Proxy, 1 << 15) \
+ V(Internal, 1 << 16)
+
+#define COMPOSED_TYPE_LIST(V) \
+ V(Oddball, kBoolean | kNull | kUndefined) \
+ V(Signed32, kSmi | kOtherSigned32) \
+ V(Number, kSigned32 | kUnsigned32 | kDouble) \
+ V(String, kInternalizedString | kOtherString) \
+ V(UniqueName, kSymbol | kInternalizedString) \
+ V(Name, kSymbol | kString) \
+ V(NumberOrString, kNumber | kString) \
+ V(Object, kUndetectable | kArray | kFunction | \
+ kRegExp | kOtherObject) \
+ V(Receiver, kObject | kProxy) \
+ V(Allocated, kDouble | kName | kReceiver) \
+ V(Any, kOddball | kNumber | kAllocated | kInternal) \
+ V(NonNumber, kAny - kNumber) \
+ V(Detectable, kAllocated - kUndetectable)
+
+#define TYPE_LIST(V) \
+ PRIMITIVE_TYPE_LIST(V) \
+ COMPOSED_TYPE_LIST(V)
+
+
+
+class Type : public Object {
+ public:
+ #define DEFINE_TYPE_CONSTRUCTOR(type, value) \
+ static Type* type() { return from_bitset(k##type); }
+ TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
+ #undef DEFINE_TYPE_CONSTRUCTOR
+
+ static Type* Class(Handle<Map> map) { return from_handle(map); }
+ static Type* Constant(Handle<HeapObject> value) {
+ return Constant(value, value->GetIsolate());
+ }
+ static Type* Constant(Handle<v8::internal::Object> value, Isolate* isolate) {
+ return from_handle(isolate->factory()->NewBox(value));
+ }
+
+ static Type* Union(Handle<Type> type1, Handle<Type> type2);
+ static Type* Intersect(Handle<Type> type1, Handle<Type> type2);
+ static Type* Optional(Handle<Type> type); // type \/ Undefined
+
+ bool Is(Type* that) { return (this == that) ? true : SlowIs(that); }
+ bool Is(Handle<Type> that) { return this->Is(*that); }
+ bool Maybe(Type* that);
+ bool Maybe(Handle<Type> that) { return this->Maybe(*that); }
+
+ bool IsClass() { return is_class(); }
+ bool IsConstant() { return is_constant(); }
+ Handle<Map> AsClass() { return as_class(); }
+ Handle<v8::internal::Object> AsConstant() { return as_constant(); }
+
+ int NumClasses();
+ int NumConstants();
+
+ template<class T>
+ class Iterator {
+ public:
+ bool Done() const { return index_ < 0; }
+ Handle<T> Current();
+ void Advance();
+
+ private:
+ friend class Type;
+
+ Iterator() : index_(-1) {}
+ explicit Iterator(Handle<Type> type) : type_(type), index_(-1) {
+ Advance();
+ }
+
+ inline bool matches(Handle<Type> type);
+ inline Handle<Type> get_type();
+
+ Handle<Type> type_;
+ int index_;
+ };
+
+ Iterator<Map> Classes() {
+ if (this->is_bitset()) return Iterator<Map>();
+ return Iterator<Map>(this->handle());
+ }
+ Iterator<v8::internal::Object> Constants() {
+ if (this->is_bitset()) return Iterator<v8::internal::Object>();
+ return Iterator<v8::internal::Object>(this->handle());
+ }
+
+ static Type* cast(v8::internal::Object* object) {
+ Type* t = static_cast<Type*>(object);
+ ASSERT(t->is_bitset() || t->is_class() ||
+ t->is_constant() || t->is_union());
+ return t;
+ }
+
+#ifdef OBJECT_PRINT
+ void TypePrint();
+ void TypePrint(FILE* out);
+#endif
+
+ private:
+ // A union is a fixed array containing types. Invariants:
+ // - its length is at least 2
+ // - at most one field is a bitset, and it must go into index 0
+ // - no field is a union
+ typedef FixedArray Unioned;
+
+ enum {
+ #define DECLARE_TYPE(type, value) k##type = (value),
+ TYPE_LIST(DECLARE_TYPE)
+ #undef DECLARE_TYPE
+ kUnusedEOL = 0
+ };
+
+ bool is_none() { return this == None(); }
+ bool is_bitset() { return this->IsSmi(); }
+ bool is_class() { return this->IsMap(); }
+ bool is_constant() { return this->IsBox(); }
+ bool is_union() { return this->IsFixedArray(); }
+
+ bool SlowIs(Type* that);
+
+ int as_bitset() { return Smi::cast(this)->value(); }
+ Handle<Map> as_class() { return Handle<Map>::cast(handle()); }
+ Handle<v8::internal::Object> as_constant() {
+ Handle<Box> box = Handle<Box>::cast(handle());
+ return v8::internal::handle(box->value(), box->GetIsolate());
+ }
+ Handle<Unioned> as_union() { return Handle<Unioned>::cast(handle()); }
+
+ Handle<Type> handle() { return handle_via_isolate_of(this); }
+ Handle<Type> handle_via_isolate_of(Type* type) {
+ ASSERT(type->IsHeapObject());
+ return v8::internal::handle(this, HeapObject::cast(type)->GetIsolate());
+ }
+
+ static Type* from_bitset(int bitset) {
+ return static_cast<Type*>(Object::cast(Smi::FromInt(bitset)));
+ }
+ static Type* from_handle(Handle<HeapObject> handle) {
+ return static_cast<Type*>(Object::cast(*handle));
+ }
+
+ static Handle<Type> union_get(Handle<Unioned> unioned, int i) {
+ Type* type = static_cast<Type*>(unioned->get(i));
+ ASSERT(!type->is_union());
+ return type->handle_via_isolate_of(from_handle(unioned));
+ }
+
+ int LubBitset(); // least upper bound that's a bitset
+ int GlbBitset(); // greatest lower bound that's a bitset
+ bool InUnion(Handle<Unioned> unioned, int current_size);
+ int ExtendUnion(Handle<Unioned> unioned, int current_size);
+ int ExtendIntersection(
+ Handle<Unioned> unioned, Handle<Type> type, int current_size);
+
+ static const char* GetComposedName(int type) {
+ switch (type) {
+ #define PRINT_COMPOSED_TYPE(type, value) \
+ case k##type: \
+ return # type;
+ COMPOSED_TYPE_LIST(PRINT_COMPOSED_TYPE)
+ #undef PRINT_COMPOSED_TYPE
+ }
+ return NULL;
+ }
+
+ static const char* GetPrimitiveName(int type) {
+ switch (type) {
+ #define PRINT_PRIMITIVE_TYPE(type, value) \
+ case k##type: \
+ return # type;
+ PRIMITIVE_TYPE_LIST(PRINT_PRIMITIVE_TYPE)
+ #undef PRINT_PRIMITIVE_TYPE
+ default:
+ UNREACHABLE();
+ return "InvalidType";
+ }
+ }
+};
+
+
+// A simple struct to represent a pair of lower/upper type bounds.
+struct Bounds {
+ Handle<Type> lower;
+ Handle<Type> upper;
+
+ Bounds() {}
+ Bounds(Handle<Type> l, Handle<Type> u) : lower(l), upper(u) {
+ ASSERT(lower->Is(upper));
+ }
+ Bounds(Type* l, Type* u, Isolate* isl) : lower(l, isl), upper(u, isl) {
+ ASSERT(lower->Is(upper));
+ }
+ explicit Bounds(Handle<Type> t) : lower(t), upper(t) {
+ ASSERT(lower->Is(upper));
+ }
+ Bounds(Type* t, Isolate* isl) : lower(t, isl), upper(t, isl) {
+ ASSERT(lower->Is(upper));
+ }
+
+ // Unrestricted bounds.
+ static Bounds Unbounded(Isolate* isl) {
+ return Bounds(Type::None(), Type::Any(), isl);
+ }
+
+ // Meet: both b1 and b2 are known to hold.
+ static Bounds Both(Bounds b1, Bounds b2, Isolate* isl) {
+ Handle<Type> lower(Type::Union(b1.lower, b2.lower), isl);
+ Handle<Type> upper(Type::Intersect(b1.upper, b2.upper), isl);
+ // Lower bounds are considered approximate, correct as necessary.
+ lower = handle(Type::Intersect(lower, upper), isl);
+ return Bounds(lower, upper);
+ }
+
+ // Join: either b1 or b2 is known to hold.
+ static Bounds Either(Bounds b1, Bounds b2, Isolate* isl) {
+ return Bounds(
+ handle(Type::Intersect(b1.lower, b2.lower), isl),
+ handle(Type::Union(b1.upper, b2.upper), isl));
+ }
+
+ static Bounds NarrowLower(Bounds b, Handle<Type> t, Isolate* isl) {
+ // Lower bounds are considered approximate, correct as necessary.
+ t = handle(Type::Intersect(t, b.upper), isl);
+ return Bounds(handle(Type::Union(b.lower, t), isl), b.upper);
+ }
+ static Bounds NarrowUpper(Bounds b, Handle<Type> t, Isolate* isl) {
+ return Bounds(
+ handle(Type::Intersect(b.lower, t), isl),
+ handle(Type::Intersect(b.upper, t), isl));
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_TYPES_H_
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
new file mode 100644
index 000000000..03c1ad16e
--- /dev/null
+++ b/deps/v8/src/typing.cc
@@ -0,0 +1,719 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "typing.h"
+
+#include "parser.h" // for CompileTimeValue; TODO(rossberg): should move
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+AstTyper::AstTyper(CompilationInfo* info)
+ : info_(info),
+ oracle_(
+ Handle<Code>(info->closure()->shared()->code()),
+ Handle<Context>(info->closure()->context()->native_context()),
+ info->isolate(),
+ info->zone()),
+ store_(info->zone()) {
+ InitializeAstVisitor(info->isolate());
+}
+
+
+#define RECURSE(call) \
+ do { \
+ ASSERT(!visitor->HasStackOverflow()); \
+ call; \
+ if (visitor->HasStackOverflow()) return; \
+ } while (false)
+
+void AstTyper::Run(CompilationInfo* info) {
+ AstTyper* visitor = new(info->zone()) AstTyper(info);
+ Scope* scope = info->scope();
+
+ // Handle implicit declaration of the function name in named function
+ // expressions before other declarations.
+ if (scope->is_function_scope() && scope->function() != NULL) {
+ RECURSE(visitor->VisitVariableDeclaration(scope->function()));
+ }
+ RECURSE(visitor->VisitDeclarations(scope->declarations()));
+ RECURSE(visitor->VisitStatements(info->function()->body()));
+}
+
+#undef RECURSE
+
+#define RECURSE(call) \
+ do { \
+ ASSERT(!HasStackOverflow()); \
+ call; \
+ if (HasStackOverflow()) return; \
+ } while (false)
+
+
+void AstTyper::VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0; i < stmts->length(); ++i) {
+ Statement* stmt = stmts->at(i);
+ RECURSE(Visit(stmt));
+ if (stmt->IsJump()) break;
+ }
+}
+
+
+void AstTyper::VisitBlock(Block* stmt) {
+ RECURSE(VisitStatements(stmt->statements()));
+ if (stmt->labels() != NULL) {
+ store_.Forget(); // Control may transfer here via 'break l'.
+ }
+}
+
+
+void AstTyper::VisitExpressionStatement(ExpressionStatement* stmt) {
+ RECURSE(Visit(stmt->expression()));
+}
+
+
+void AstTyper::VisitEmptyStatement(EmptyStatement* stmt) {
+}
+
+
+void AstTyper::VisitIfStatement(IfStatement* stmt) {
+ // Collect type feedback.
+ if (!stmt->condition()->ToBooleanIsTrue() &&
+ !stmt->condition()->ToBooleanIsFalse()) {
+ stmt->condition()->RecordToBooleanTypeFeedback(oracle());
+ }
+
+ RECURSE(Visit(stmt->condition()));
+ Effects then_effects = EnterEffects();
+ RECURSE(Visit(stmt->then_statement()));
+ ExitEffects();
+ Effects else_effects = EnterEffects();
+ RECURSE(Visit(stmt->else_statement()));
+ ExitEffects();
+ then_effects.Alt(else_effects);
+ store_.Seq(then_effects);
+}
+
+
+void AstTyper::VisitContinueStatement(ContinueStatement* stmt) {
+ // TODO(rossberg): is it worth having a non-termination effect?
+}
+
+
+void AstTyper::VisitBreakStatement(BreakStatement* stmt) {
+ // TODO(rossberg): is it worth having a non-termination effect?
+}
+
+
+void AstTyper::VisitReturnStatement(ReturnStatement* stmt) {
+ // Collect type feedback.
+ // TODO(rossberg): we only need this for inlining into test contexts...
+ stmt->expression()->RecordToBooleanTypeFeedback(oracle());
+
+ RECURSE(Visit(stmt->expression()));
+ // TODO(rossberg): is it worth having a non-termination effect?
+}
+
+
+void AstTyper::VisitWithStatement(WithStatement* stmt) {
+ RECURSE(stmt->expression());
+ RECURSE(stmt->statement());
+}
+
+
+void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
+ RECURSE(Visit(stmt->tag()));
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ SwitchStatement::SwitchType switch_type = stmt->switch_type();
+ Effects local_effects(zone());
+ bool complex_effects = false; // True for label effects or fall-through.
+
+ for (int i = 0; i < clauses->length(); ++i) {
+ CaseClause* clause = clauses->at(i);
+ Effects clause_effects = EnterEffects();
+
+ if (!clause->is_default()) {
+ Expression* label = clause->label();
+ SwitchStatement::SwitchType label_switch_type =
+ label->IsSmiLiteral() ? SwitchStatement::SMI_SWITCH :
+ label->IsStringLiteral() ? SwitchStatement::STRING_SWITCH :
+ SwitchStatement::GENERIC_SWITCH;
+ if (switch_type == SwitchStatement::UNKNOWN_SWITCH)
+ switch_type = label_switch_type;
+ else if (switch_type != label_switch_type)
+ switch_type = SwitchStatement::GENERIC_SWITCH;
+
+ RECURSE(Visit(label));
+ if (!clause_effects.IsEmpty()) complex_effects = true;
+ }
+
+ ZoneList<Statement*>* stmts = clause->statements();
+ RECURSE(VisitStatements(stmts));
+ ExitEffects();
+ if (stmts->is_empty() || stmts->last()->IsJump()) {
+ local_effects.Alt(clause_effects);
+ } else {
+ complex_effects = true;
+ }
+ }
+
+ if (complex_effects) {
+ store_.Forget(); // Reached this in unknown state.
+ } else {
+ store_.Seq(local_effects);
+ }
+
+ if (switch_type == SwitchStatement::UNKNOWN_SWITCH)
+ switch_type = SwitchStatement::GENERIC_SWITCH;
+ stmt->set_switch_type(switch_type);
+
+ // Collect type feedback.
+ // TODO(rossberg): can we eliminate this special case and extra loop?
+ if (switch_type == SwitchStatement::SMI_SWITCH) {
+ for (int i = 0; i < clauses->length(); ++i) {
+ CaseClause* clause = clauses->at(i);
+ if (!clause->is_default())
+ clause->RecordTypeFeedback(oracle());
+ }
+ }
+}
+
+
+void AstTyper::VisitCaseClause(CaseClause* clause) {
+ UNREACHABLE();
+}
+
+
+void AstTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ // Collect type feedback.
+ if (!stmt->cond()->ToBooleanIsTrue()) {
+ stmt->cond()->RecordToBooleanTypeFeedback(oracle());
+ }
+
+ // TODO(rossberg): refine the unconditional Forget (here and elsewhere) by
+ // computing the set of variables assigned in only some of the origins of the
+ // control transfer (such as the loop body here).
+ store_.Forget(); // Control may transfer here via looping or 'continue'.
+ RECURSE(Visit(stmt->body()));
+ RECURSE(Visit(stmt->cond()));
+ store_.Forget(); // Control may transfer here via 'break'.
+}
+
+
+void AstTyper::VisitWhileStatement(WhileStatement* stmt) {
+ // Collect type feedback.
+ if (!stmt->cond()->ToBooleanIsTrue()) {
+ stmt->cond()->RecordToBooleanTypeFeedback(oracle());
+ }
+
+ store_.Forget(); // Control may transfer here via looping or 'continue'.
+ RECURSE(Visit(stmt->cond()));
+ RECURSE(Visit(stmt->body()));
+ store_.Forget(); // Control may transfer here via termination or 'break'.
+}
+
+
+void AstTyper::VisitForStatement(ForStatement* stmt) {
+ if (stmt->init() != NULL) {
+ RECURSE(Visit(stmt->init()));
+ }
+ store_.Forget(); // Control may transfer here via looping.
+ if (stmt->cond() != NULL) {
+ // Collect type feedback.
+ stmt->cond()->RecordToBooleanTypeFeedback(oracle());
+
+ RECURSE(Visit(stmt->cond()));
+ }
+ RECURSE(Visit(stmt->body()));
+ if (stmt->next() != NULL) {
+ store_.Forget(); // Control may transfer here via 'continue'.
+ RECURSE(Visit(stmt->next()));
+ }
+ store_.Forget(); // Control may transfer here via termination or 'break'.
+}
+
+
+void AstTyper::VisitForInStatement(ForInStatement* stmt) {
+ // Collect type feedback.
+ stmt->RecordTypeFeedback(oracle());
+
+ RECURSE(Visit(stmt->enumerable()));
+ store_.Forget(); // Control may transfer here via looping or 'continue'.
+ RECURSE(Visit(stmt->body()));
+ store_.Forget(); // Control may transfer here via 'break'.
+}
+
+
+void AstTyper::VisitForOfStatement(ForOfStatement* stmt) {
+ RECURSE(Visit(stmt->iterable()));
+ store_.Forget(); // Control may transfer here via looping or 'continue'.
+ RECURSE(Visit(stmt->body()));
+ store_.Forget(); // Control may transfer here via 'break'.
+}
+
+
+void AstTyper::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ Effects try_effects = EnterEffects();
+ RECURSE(Visit(stmt->try_block()));
+ ExitEffects();
+ Effects catch_effects = EnterEffects();
+ store_.Forget(); // Control may transfer here via 'throw'.
+ RECURSE(Visit(stmt->catch_block()));
+ ExitEffects();
+ try_effects.Alt(catch_effects);
+ store_.Seq(try_effects);
+ // At this point, only variables that were reassigned in the catch block are
+ // still remembered.
+}
+
+
+void AstTyper::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ RECURSE(Visit(stmt->try_block()));
+ store_.Forget(); // Control may transfer here via 'throw'.
+ RECURSE(Visit(stmt->finally_block()));
+}
+
+
+void AstTyper::VisitDebuggerStatement(DebuggerStatement* stmt) {
+ store_.Forget(); // May do whatever.
+}
+
+
+void AstTyper::VisitFunctionLiteral(FunctionLiteral* expr) {
+}
+
+
+void AstTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
+}
+
+
+void AstTyper::VisitConditional(Conditional* expr) {
+ // Collect type feedback.
+ expr->condition()->RecordToBooleanTypeFeedback(oracle());
+
+ RECURSE(Visit(expr->condition()));
+ Effects then_effects = EnterEffects();
+ RECURSE(Visit(expr->then_expression()));
+ ExitEffects();
+ Effects else_effects = EnterEffects();
+ RECURSE(Visit(expr->else_expression()));
+ ExitEffects();
+ then_effects.Alt(else_effects);
+ store_.Seq(then_effects);
+
+ NarrowType(expr, Bounds::Either(
+ expr->then_expression()->bounds(),
+ expr->else_expression()->bounds(), isolate_));
+}
+
+
+void AstTyper::VisitVariableProxy(VariableProxy* expr) {
+ Variable* var = expr->var();
+ if (var->IsStackAllocated()) {
+ NarrowType(expr, store_.LookupBounds(variable_index(var)));
+ }
+}
+
+
+void AstTyper::VisitLiteral(Literal* expr) {
+ Type* type = Type::Constant(expr->value(), isolate_);
+ NarrowType(expr, Bounds(type, isolate_));
+}
+
+
+void AstTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
+ NarrowType(expr, Bounds(Type::RegExp(), isolate_));
+}
+
+
+void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
+ ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
+ for (int i = 0; i < properties->length(); ++i) {
+ ObjectLiteral::Property* prop = properties->at(i);
+
+ // Collect type feedback.
+ if ((prop->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL &&
+ !CompileTimeValue::IsCompileTimeValue(prop->value())) ||
+ prop->kind() == ObjectLiteral::Property::COMPUTED) {
+ if (prop->key()->value()->IsInternalizedString() && prop->emit_store()) {
+ prop->RecordTypeFeedback(oracle());
+ }
+ }
+
+ RECURSE(Visit(prop->value()));
+ }
+
+ NarrowType(expr, Bounds(Type::Object(), isolate_));
+}
+
+
+void AstTyper::VisitArrayLiteral(ArrayLiteral* expr) {
+ ZoneList<Expression*>* values = expr->values();
+ for (int i = 0; i < values->length(); ++i) {
+ Expression* value = values->at(i);
+ RECURSE(Visit(value));
+ }
+
+ NarrowType(expr, Bounds(Type::Array(), isolate_));
+}
+
+
+void AstTyper::VisitAssignment(Assignment* expr) {
+ // TODO(rossberg): Can we clean this up?
+ if (expr->is_compound()) {
+ // Collect type feedback.
+ Expression* target = expr->target();
+ Property* prop = target->AsProperty();
+ if (prop != NULL) {
+ prop->RecordTypeFeedback(oracle(), zone());
+ expr->RecordTypeFeedback(oracle(), zone());
+ }
+
+ RECURSE(Visit(expr->binary_operation()));
+
+ NarrowType(expr, expr->binary_operation()->bounds());
+ } else {
+ // Collect type feedback.
+ if (expr->target()->IsProperty()) {
+ expr->RecordTypeFeedback(oracle(), zone());
+ }
+
+ RECURSE(Visit(expr->target()));
+ RECURSE(Visit(expr->value()));
+
+ NarrowType(expr, expr->value()->bounds());
+ }
+
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->IsStackAllocated()) {
+ store_.Seq(variable_index(proxy->var()), Effect(expr->bounds()));
+ }
+}
+
+
+void AstTyper::VisitYield(Yield* expr) {
+ RECURSE(Visit(expr->generator_object()));
+ RECURSE(Visit(expr->expression()));
+
+ // We don't know anything about the result type.
+}
+
+
+void AstTyper::VisitThrow(Throw* expr) {
+ RECURSE(Visit(expr->exception()));
+ // TODO(rossberg): is it worth having a non-termination effect?
+
+ NarrowType(expr, Bounds(Type::None(), isolate_));
+}
+
+
+void AstTyper::VisitProperty(Property* expr) {
+ // Collect type feedback.
+ expr->RecordTypeFeedback(oracle(), zone());
+
+ RECURSE(Visit(expr->obj()));
+ RECURSE(Visit(expr->key()));
+
+ // We don't know anything about the result type.
+}
+
+
+void AstTyper::VisitCall(Call* expr) {
+ // Collect type feedback.
+ Expression* callee = expr->expression();
+ Property* prop = callee->AsProperty();
+ if (prop != NULL) {
+ if (prop->key()->IsPropertyName())
+ expr->RecordTypeFeedback(oracle(), CALL_AS_METHOD);
+ } else {
+ expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
+ }
+
+ RECURSE(Visit(expr->expression()));
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE(Visit(arg));
+ }
+
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ store_.Forget(); // Eval could do whatever to local variables.
+ }
+
+ // We don't know anything about the result type.
+}
+
+
+void AstTyper::VisitCallNew(CallNew* expr) {
+ // Collect type feedback.
+ expr->RecordTypeFeedback(oracle());
+
+ RECURSE(Visit(expr->expression()));
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE(Visit(arg));
+ }
+
+ // We don't know anything about the result type.
+}
+
+
+void AstTyper::VisitCallRuntime(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE(Visit(arg));
+ }
+
+ // We don't know anything about the result type.
+}
+
+
+void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
+ // Collect type feedback.
+ if (expr->op() == Token::NOT) {
+ // TODO(rossberg): only do in test or value context.
+ expr->expression()->RecordToBooleanTypeFeedback(oracle());
+ }
+
+ RECURSE(Visit(expr->expression()));
+
+ switch (expr->op()) {
+ case Token::NOT:
+ case Token::DELETE:
+ NarrowType(expr, Bounds(Type::Boolean(), isolate_));
+ break;
+ case Token::VOID:
+ NarrowType(expr, Bounds(Type::Undefined(), isolate_));
+ break;
+ case Token::TYPEOF:
+ NarrowType(expr, Bounds(Type::InternalizedString(), isolate_));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void AstTyper::VisitCountOperation(CountOperation* expr) {
+ // Collect type feedback.
+ expr->RecordTypeFeedback(oracle(), zone());
+ Property* prop = expr->expression()->AsProperty();
+ if (prop != NULL) {
+ prop->RecordTypeFeedback(oracle(), zone());
+ }
+
+ RECURSE(Visit(expr->expression()));
+
+ NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
+
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->IsStackAllocated()) {
+ store_.Seq(variable_index(proxy->var()), Effect(expr->bounds()));
+ }
+}
+
+
+void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
+ // Collect type feedback.
+ Handle<Type> type, left_type, right_type;
+ Maybe<int> fixed_right_arg;
+ oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
+ &left_type, &right_type, &type, &fixed_right_arg, expr->op());
+ NarrowLowerType(expr, type);
+ NarrowLowerType(expr->left(), left_type);
+ NarrowLowerType(expr->right(), right_type);
+ expr->set_fixed_right_arg(fixed_right_arg);
+ if (expr->op() == Token::OR || expr->op() == Token::AND) {
+ expr->left()->RecordToBooleanTypeFeedback(oracle());
+ }
+
+ switch (expr->op()) {
+ case Token::COMMA:
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+ NarrowType(expr, expr->right()->bounds());
+ break;
+ case Token::OR:
+ case Token::AND: {
+ Effects left_effects = EnterEffects();
+ RECURSE(Visit(expr->left()));
+ ExitEffects();
+ Effects right_effects = EnterEffects();
+ RECURSE(Visit(expr->right()));
+ ExitEffects();
+ left_effects.Alt(right_effects);
+ store_.Seq(left_effects);
+
+ NarrowType(expr, Bounds::Either(
+ expr->left()->bounds(), expr->right()->bounds(), isolate_));
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND: {
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+ Handle<Type> upper(
+ Type::Union(
+ expr->left()->bounds().upper, expr->right()->bounds().upper),
+ isolate_);
+ if (!upper->Is(Type::Signed32()))
+ upper = handle(Type::Signed32(), isolate_);
+ Handle<Type> lower(Type::Intersect(
+ handle(Type::Smi(), isolate_), upper), isolate_);
+ NarrowType(expr, Bounds(lower, upper));
+ break;
+ }
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SAR:
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+ NarrowType(expr, Bounds(Type::Smi(), Type::Signed32(), isolate_));
+ break;
+ case Token::SHR:
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+ // TODO(rossberg): The upper bound would be Unsigned32, but since there
+ // is no 'positive Smi' type for the lower bound, we use the smallest
+ // union of Smi and Unsigned32 as upper bound instead.
+ NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
+ break;
+ case Token::ADD: {
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+ Bounds l = expr->left()->bounds();
+ Bounds r = expr->right()->bounds();
+ Type* lower =
+ l.lower->Is(Type::None()) || r.lower->Is(Type::None()) ?
+ Type::None() :
+ l.lower->Is(Type::String()) || r.lower->Is(Type::String()) ?
+ Type::String() :
+ l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ?
+ Type::Smi() : Type::None();
+ Type* upper =
+ l.upper->Is(Type::String()) || r.upper->Is(Type::String()) ?
+ Type::String() :
+ l.upper->Is(Type::Number()) && r.upper->Is(Type::Number()) ?
+ Type::Number() : Type::NumberOrString();
+ NarrowType(expr, Bounds(lower, upper, isolate_));
+ break;
+ }
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+ NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void AstTyper::VisitCompareOperation(CompareOperation* expr) {
+ // Collect type feedback.
+ Handle<Type> left_type, right_type, combined_type;
+ oracle()->CompareType(expr->CompareOperationFeedbackId(),
+ &left_type, &right_type, &combined_type);
+ NarrowLowerType(expr->left(), left_type);
+ NarrowLowerType(expr->right(), right_type);
+ expr->set_combined_type(combined_type);
+
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+
+ NarrowType(expr, Bounds(Type::Boolean(), isolate_));
+}
+
+
+void AstTyper::VisitThisFunction(ThisFunction* expr) {
+}
+
+
+void AstTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
+ for (int i = 0; i < decls->length(); ++i) {
+ Declaration* decl = decls->at(i);
+ RECURSE(Visit(decl));
+ }
+}
+
+
+void AstTyper::VisitVariableDeclaration(VariableDeclaration* declaration) {
+}
+
+
+void AstTyper::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
+ RECURSE(Visit(declaration->fun()));
+}
+
+
+void AstTyper::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+ RECURSE(Visit(declaration->module()));
+}
+
+
+void AstTyper::VisitImportDeclaration(ImportDeclaration* declaration) {
+ RECURSE(Visit(declaration->module()));
+}
+
+
+void AstTyper::VisitExportDeclaration(ExportDeclaration* declaration) {
+}
+
+
+void AstTyper::VisitModuleLiteral(ModuleLiteral* module) {
+ RECURSE(Visit(module->body()));
+}
+
+
+void AstTyper::VisitModuleVariable(ModuleVariable* module) {
+}
+
+
+void AstTyper::VisitModulePath(ModulePath* module) {
+ RECURSE(Visit(module->module()));
+}
+
+
+void AstTyper::VisitModuleUrl(ModuleUrl* module) {
+}
+
+
+void AstTyper::VisitModuleStatement(ModuleStatement* stmt) {
+ RECURSE(Visit(stmt->body()));
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/typing.h b/deps/v8/src/typing.h
new file mode 100644
index 000000000..c942b0063
--- /dev/null
+++ b/deps/v8/src/typing.h
@@ -0,0 +1,102 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TYPING_H_
+#define V8_TYPING_H_
+
+#include "v8.h"
+
+#include "allocation.h"
+#include "ast.h"
+#include "compiler.h"
+#include "type-info.h"
+#include "types.h"
+#include "effects.h"
+#include "zone.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+class AstTyper: public AstVisitor {
+ public:
+ static void Run(CompilationInfo* info);
+
+ void* operator new(size_t size, Zone* zone) {
+ return zone->New(static_cast<int>(size));
+ }
+ void operator delete(void* pointer, Zone* zone) { }
+ void operator delete(void* pointer) { }
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+ private:
+ explicit AstTyper(CompilationInfo* info);
+
+ static const int kNoVar = INT_MIN;
+ typedef v8::internal::Effects<int, kNoVar> Effects;
+ typedef v8::internal::NestedEffects<int, kNoVar> Store;
+
+ CompilationInfo* info_;
+ TypeFeedbackOracle oracle_;
+ Store store_;
+
+ TypeFeedbackOracle* oracle() { return &oracle_; }
+ Zone* zone() const { return info_->zone(); }
+
+ void NarrowType(Expression* e, Bounds b) {
+ e->set_bounds(Bounds::Both(e->bounds(), b, isolate_));
+ }
+ void NarrowLowerType(Expression* e, Handle<Type> t) {
+ e->set_bounds(Bounds::NarrowLower(e->bounds(), t, isolate_));
+ }
+
+ Effects EnterEffects() {
+ store_ = store_.Push();
+ return store_.Top();
+ }
+ void ExitEffects() { store_ = store_.Pop(); }
+
+ int variable_index(Variable* var) {
+ return var->IsStackLocal() ? var->index() :
+ var->IsParameter() ? -var->index() : kNoVar;
+ }
+
+ void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void VisitStatements(ZoneList<Statement*>* statements);
+
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ DISALLOW_COPY_AND_ASSIGN(AstTyper);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_TYPING_H_
diff --git a/deps/v8/src/unbound-queue-inl.h b/deps/v8/src/unbound-queue-inl.h
index fffb1dbcf..796ba401d 100644
--- a/deps/v8/src/unbound-queue-inl.h
+++ b/deps/v8/src/unbound-queue-inl.h
@@ -30,6 +30,8 @@
#include "unbound-queue.h"
+#include "atomicops.h"
+
namespace v8 {
namespace internal {
@@ -66,11 +68,12 @@ void UnboundQueue<Record>::DeleteFirst() {
template<typename Record>
-void UnboundQueue<Record>::Dequeue(Record* rec) {
- ASSERT(divider_ != last_);
+bool UnboundQueue<Record>::Dequeue(Record* rec) {
+ if (divider_ == Acquire_Load(&last_)) return false;
Node* next = reinterpret_cast<Node*>(divider_)->next;
*rec = next->value;
- OS::ReleaseStore(&divider_, reinterpret_cast<AtomicWord>(next));
+ Release_Store(&divider_, reinterpret_cast<AtomicWord>(next));
+ return true;
}
@@ -78,14 +81,23 @@ template<typename Record>
void UnboundQueue<Record>::Enqueue(const Record& rec) {
Node*& next = reinterpret_cast<Node*>(last_)->next;
next = new Node(rec);
- OS::ReleaseStore(&last_, reinterpret_cast<AtomicWord>(next));
- while (first_ != reinterpret_cast<Node*>(divider_)) DeleteFirst();
+ Release_Store(&last_, reinterpret_cast<AtomicWord>(next));
+
+ while (first_ != reinterpret_cast<Node*>(Acquire_Load(&divider_))) {
+ DeleteFirst();
+ }
+}
+
+
+template<typename Record>
+bool UnboundQueue<Record>::IsEmpty() const {
+ return NoBarrier_Load(&divider_) == NoBarrier_Load(&last_);
}
template<typename Record>
-Record* UnboundQueue<Record>::Peek() {
- ASSERT(divider_ != last_);
+Record* UnboundQueue<Record>::Peek() const {
+ if (divider_ == Acquire_Load(&last_)) return NULL;
Node* next = reinterpret_cast<Node*>(divider_)->next;
return &next->value;
}
diff --git a/deps/v8/src/unbound-queue.h b/deps/v8/src/unbound-queue.h
index 59a426b7f..429e3c673 100644
--- a/deps/v8/src/unbound-queue.h
+++ b/deps/v8/src/unbound-queue.h
@@ -46,10 +46,10 @@ class UnboundQueue BASE_EMBEDDED {
inline UnboundQueue();
inline ~UnboundQueue();
- INLINE(void Dequeue(Record* rec));
+ INLINE(bool Dequeue(Record* rec));
INLINE(void Enqueue(const Record& rec));
- INLINE(bool IsEmpty()) { return divider_ == last_; }
- INLINE(Record* Peek());
+ INLINE(bool IsEmpty() const);
+ INLINE(Record* Peek() const);
private:
INLINE(void DeleteFirst());
diff --git a/deps/v8/src/unicode-inl.h b/deps/v8/src/unicode-inl.h
index ec9c69f8d..f861f9f2d 100644
--- a/deps/v8/src/unicode-inl.h
+++ b/deps/v8/src/unicode-inl.h
@@ -30,6 +30,7 @@
#include "unicode.h"
#include "checks.h"
+#include "platform.h"
namespace unibrow {
@@ -79,6 +80,34 @@ template <class T, int s> int Mapping<T, s>::CalculateValue(uchar c, uchar n,
}
+uint16_t Latin1::ConvertNonLatin1ToLatin1(uint16_t c) {
+ ASSERT(c > Latin1::kMaxChar);
+ switch (c) {
+ // This are equivalent characters in unicode.
+ case 0x39c:
+ case 0x3bc:
+ return 0xb5;
+ // This is an uppercase of a Latin-1 character
+ // outside of Latin-1.
+ case 0x178:
+ return 0xff;
+ }
+ return 0;
+}
+
+
+unsigned Utf8::EncodeOneByte(char* str, uint8_t c) {
+ static const int kMask = ~(1 << 6);
+ if (c <= kMaxOneByteChar) {
+ str[0] = c;
+ return 1;
+ }
+ str[0] = 0xC0 | (c >> 6);
+ str[1] = 0x80 | (c & kMask);
+ return 2;
+}
+
+
unsigned Utf8::Encode(char* str, uchar c, int previous) {
static const int kMask = ~(1 << 6);
if (c <= kMaxOneByteChar) {
@@ -137,113 +166,51 @@ unsigned Utf8::Length(uchar c, int previous) {
}
}
-uchar CharacterStream::GetNext() {
- uchar result = DecodeCharacter(buffer_, &cursor_);
- if (remaining_ == 1) {
- cursor_ = 0;
- FillBuffer();
- } else {
- remaining_--;
- }
- ASSERT(BoundsCheck(cursor_));
- return result;
-}
-
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-#define IF_LITTLE(expr) expr
-#define IF_BIG(expr) ((void) 0)
-#elif __BYTE_ORDER == __BIG_ENDIAN
-#define IF_LITTLE(expr) ((void) 0)
-#define IF_BIG(expr) expr
-#else
-#warning Unknown byte ordering
-#endif
-
-bool CharacterStream::EncodeAsciiCharacter(uchar c, byte* buffer,
- unsigned capacity, unsigned& offset) {
- if (offset >= capacity) return false;
- buffer[offset] = c;
- offset += 1;
- return true;
-}
-
-bool CharacterStream::EncodeNonAsciiCharacter(uchar c, byte* buffer,
- unsigned capacity, unsigned& offset) {
- unsigned aligned = (offset + 0x3) & ~0x3;
- if ((aligned + sizeof(uchar)) > capacity)
- return false;
- if (offset == aligned) {
- IF_LITTLE(*reinterpret_cast<uchar*>(buffer + aligned) = (c << 8) | 0x80);
- IF_BIG(*reinterpret_cast<uchar*>(buffer + aligned) = c | (1 << 31));
- } else {
- buffer[offset] = 0x80;
- IF_LITTLE(*reinterpret_cast<uchar*>(buffer + aligned) = c << 8);
- IF_BIG(*reinterpret_cast<uchar*>(buffer + aligned) = c);
- }
- offset = aligned + sizeof(uchar);
- return true;
-}
-
-bool CharacterStream::EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
- unsigned& offset) {
- if (c <= Utf8::kMaxOneByteChar) {
- return EncodeAsciiCharacter(c, buffer, capacity, offset);
- } else {
- return EncodeNonAsciiCharacter(c, buffer, capacity, offset);
- }
-}
-
-uchar CharacterStream::DecodeCharacter(const byte* buffer, unsigned* offset) {
- byte b = buffer[*offset];
- if (b <= Utf8::kMaxOneByteChar) {
- (*offset)++;
- return b;
- } else {
- unsigned aligned = (*offset + 0x3) & ~0x3;
- *offset = aligned + sizeof(uchar);
- IF_LITTLE(return *reinterpret_cast<const uchar*>(buffer + aligned) >> 8);
- IF_BIG(return *reinterpret_cast<const uchar*>(buffer + aligned) &
- ~(1 << 31));
- }
-}
-
-#undef IF_LITTLE
-#undef IF_BIG
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::FillBuffer() {
- buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Rewind() {
- Reset(input_);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Reset(unsigned position, I input) {
- input_ = input;
- remaining_ = 0;
- cursor_ = 0;
- offset_ = position;
- buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Reset(I input) {
- Reset(0, input);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Seek(unsigned position) {
- offset_ = position;
- buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
-}
-
-template <unsigned s>
-Utf8InputBuffer<s>::Utf8InputBuffer(const char* data, unsigned length)
- : InputBuffer<Utf8, Buffer<const char*>, s>(Buffer<const char*>(data,
- length)) {
+Utf8DecoderBase::Utf8DecoderBase()
+ : unbuffered_start_(NULL),
+ utf16_length_(0),
+ last_byte_of_buffer_unused_(false) {}
+
+Utf8DecoderBase::Utf8DecoderBase(uint16_t* buffer,
+ unsigned buffer_length,
+ const uint8_t* stream,
+ unsigned stream_length) {
+ Reset(buffer, buffer_length, stream, stream_length);
+}
+
+template<unsigned kBufferSize>
+Utf8Decoder<kBufferSize>::Utf8Decoder(const char* stream, unsigned length)
+ : Utf8DecoderBase(buffer_,
+ kBufferSize,
+ reinterpret_cast<const uint8_t*>(stream),
+ length) {
+}
+
+template<unsigned kBufferSize>
+void Utf8Decoder<kBufferSize>::Reset(const char* stream, unsigned length) {
+ Utf8DecoderBase::Reset(buffer_,
+ kBufferSize,
+ reinterpret_cast<const uint8_t*>(stream),
+ length);
+}
+
+template <unsigned kBufferSize>
+unsigned Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data,
+ unsigned length) const {
+ ASSERT(length > 0);
+ if (length > utf16_length_) length = utf16_length_;
+ // memcpy everything in buffer.
+ unsigned buffer_length =
+ last_byte_of_buffer_unused_ ? kBufferSize - 1 : kBufferSize;
+ unsigned memcpy_length = length <= buffer_length ? length : buffer_length;
+ v8::internal::OS::MemCopy(data, buffer_, memcpy_length*sizeof(uint16_t));
+ if (length <= buffer_length) return length;
+ ASSERT(unbuffered_start_ != NULL);
+ // Copy the rest the slow way.
+ WriteUtf16Slow(unbuffered_start_,
+ data + buffer_length,
+ length - buffer_length);
+ return length;
}
} // namespace unibrow
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index 14f380642..bd3246778 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -52,14 +52,17 @@ static inline uchar TableGet(const int32_t* table, int index) {
return table[D * index];
}
+
static inline uchar GetEntry(int32_t entry) {
return entry & (kStartBit - 1);
}
+
static inline bool IsStart(int32_t entry) {
return (entry & kStartBit) != 0;
}
+
/**
* Look up a character in the unicode table using a mix of binary and
* interpolation search. For a uniformly distributed array
@@ -106,6 +109,7 @@ struct MultiCharacterSpecialCase {
uchar chars[kW];
};
+
// Look up the mapping for the given character in the specified table,
// which is of the specified length and uses the specified special case
// mapping for multi-char mappings. The next parameter is the character
@@ -277,84 +281,74 @@ uchar Utf8::CalculateValue(const byte* str,
}
-const byte* Utf8::ReadBlock(Buffer<const char*> str, byte* buffer,
- unsigned capacity, unsigned* chars_read_ptr, unsigned* offset_ptr) {
- unsigned offset = *offset_ptr;
- // Bail out early if we've reached the end of the string.
- if (offset == str.length()) {
- *chars_read_ptr = 0;
- return NULL;
- }
- const byte* data = reinterpret_cast<const byte*>(str.data());
- if (data[offset] <= kMaxOneByteChar) {
- // The next character is an ASCII char so we scan forward over
- // the following ASCII characters and return the next pure ASCII
- // substring
- const byte* result = data + offset;
- offset++;
- while ((offset < str.length()) && (data[offset] <= kMaxOneByteChar))
- offset++;
- *chars_read_ptr = offset - *offset_ptr;
- *offset_ptr = offset;
- return result;
- } else {
- // The next character is non-ASCII so we just fill the buffer
+void Utf8DecoderBase::Reset(uint16_t* buffer,
+ unsigned buffer_length,
+ const uint8_t* stream,
+ unsigned stream_length) {
+ // Assume everything will fit in the buffer and stream won't be needed.
+ last_byte_of_buffer_unused_ = false;
+ unbuffered_start_ = NULL;
+ bool writing_to_buffer = true;
+ // Loop until stream is read, writing to buffer as long as buffer has space.
+ unsigned utf16_length = 0;
+ while (stream_length != 0) {
unsigned cursor = 0;
- unsigned chars_read = 0;
- while (offset < str.length()) {
- uchar c = data[offset];
- if (c <= kMaxOneByteChar) {
- // Fast case for ASCII characters
- if (!CharacterStream::EncodeAsciiCharacter(c,
- buffer,
- capacity,
- cursor))
- break;
- offset += 1;
+ uint32_t character = Utf8::ValueOf(stream, stream_length, &cursor);
+ ASSERT(cursor > 0 && cursor <= stream_length);
+ stream += cursor;
+ stream_length -= cursor;
+ bool is_two_characters = character > Utf16::kMaxNonSurrogateCharCode;
+ utf16_length += is_two_characters ? 2 : 1;
+ // Don't need to write to the buffer, but still need utf16_length.
+ if (!writing_to_buffer) continue;
+ // Write out the characters to the buffer.
+ // Must check for equality with buffer_length as we've already updated it.
+ if (utf16_length <= buffer_length) {
+ if (is_two_characters) {
+ *buffer++ = Utf16::LeadSurrogate(character);
+ *buffer++ = Utf16::TrailSurrogate(character);
} else {
- unsigned chars = 0;
- c = Utf8::ValueOf(data + offset, str.length() - offset, &chars);
- if (!CharacterStream::EncodeNonAsciiCharacter(c,
- buffer,
- capacity,
- cursor))
- break;
- offset += chars;
+ *buffer++ = character;
}
- chars_read++;
+ if (utf16_length == buffer_length) {
+ // Just wrote last character of buffer
+ writing_to_buffer = false;
+ unbuffered_start_ = stream;
+ }
+ continue;
}
- *offset_ptr = offset;
- *chars_read_ptr = chars_read;
- return buffer;
+ // Have gone over buffer.
+ // Last char of buffer is unused, set cursor back.
+ ASSERT(is_two_characters);
+ writing_to_buffer = false;
+ last_byte_of_buffer_unused_ = true;
+ unbuffered_start_ = stream - cursor;
}
+ utf16_length_ = utf16_length;
}
-unsigned CharacterStream::Length() {
- unsigned result = 0;
- while (has_more()) {
- result++;
- GetNext();
- }
- Rewind();
- return result;
-}
-unsigned CharacterStream::Utf16Length() {
- unsigned result = 0;
- while (has_more()) {
- uchar c = GetNext();
- result += c > Utf16::kMaxNonSurrogateCharCode ? 2 : 1;
+void Utf8DecoderBase::WriteUtf16Slow(const uint8_t* stream,
+ uint16_t* data,
+ unsigned data_length) {
+ while (data_length != 0) {
+ unsigned cursor = 0;
+ uint32_t character = Utf8::ValueOf(stream, Utf8::kMaxEncodedSize, &cursor);
+ // There's a total lack of bounds checking for stream
+ // as it was already done in Reset.
+ stream += cursor;
+ if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ *data++ = Utf16::LeadSurrogate(character);
+ *data++ = Utf16::TrailSurrogate(character);
+ ASSERT(data_length > 1);
+ data_length -= 2;
+ } else {
+ *data++ = character;
+ data_length -= 1;
+ }
}
- Rewind();
- return result;
}
-void CharacterStream::Seek(unsigned position) {
- Rewind();
- for (unsigned i = 0; i < position; i++) {
- GetNext();
- }
-}
// Uppercase: point.category == 'Lu'
@@ -466,6 +460,7 @@ bool Uppercase::Is(uchar c) {
}
}
+
// Lowercase: point.category == 'Ll'
static const uint16_t kLowercaseTable0Size = 463;
@@ -577,6 +572,7 @@ bool Lowercase::Is(uchar c) {
}
}
+
// Letter: point.category in ['Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl' ]
static const uint16_t kLetterTable0Size = 435;
@@ -713,6 +709,7 @@ bool Letter::Is(uchar c) {
}
}
+
// Space: point.category == 'Zs'
static const uint16_t kSpaceTable0Size = 4;
@@ -734,6 +731,7 @@ bool Space::Is(uchar c) {
}
}
+
// Number: point.category == 'Nd'
static const uint16_t kNumberTable0Size = 56;
@@ -768,6 +766,7 @@ bool Number::Is(uchar c) {
}
}
+
// WhiteSpace: 'Ws' in point.properties
static const uint16_t kWhiteSpaceTable0Size = 7;
@@ -789,6 +788,7 @@ bool WhiteSpace::Is(uchar c) {
}
}
+
// LineTerminator: 'Lt' in point.properties
static const uint16_t kLineTerminatorTable0Size = 2;
@@ -810,6 +810,7 @@ bool LineTerminator::Is(uchar c) {
}
}
+
// CombiningMark: point.category in ['Mn', 'Mc']
static const uint16_t kCombiningMarkTable0Size = 258;
@@ -881,6 +882,7 @@ bool CombiningMark::Is(uchar c) {
}
}
+
// ConnectorPunctuation: point.category == 'Pc'
static const uint16_t kConnectorPunctuationTable0Size = 1;
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 91b16c9f3..f1dcad0bc 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -29,7 +29,7 @@
#define V8_UNICODE_H_
#include <sys/types.h>
-
+#include "globals.h"
/**
* \file
* Definitions and convenience functions for working with unicode.
@@ -100,21 +100,6 @@ class UnicodeData {
static const uchar kMaxCodePoint;
};
-// --- U t f 8 a n d 16 ---
-
-template <typename Data>
-class Buffer {
- public:
- inline Buffer(Data data, unsigned length) : data_(data), length_(length) { }
- inline Buffer() : data_(0), length_(0) { }
- Data data() { return data_; }
- unsigned length() { return length_; }
- private:
- Data data_;
- unsigned length_;
-};
-
-
class Utf16 {
public:
static inline bool IsLeadSurrogate(int code) {
@@ -140,22 +125,29 @@ class Utf16 {
// One UTF-16 surrogate is endoded (illegally) as 3 UTF-8 bytes.
// The illegality stems from the surrogate not being part of a pair.
static const int kUtf8BytesToCodeASurrogate = 3;
- static inline uchar LeadSurrogate(int char_code) {
+ static inline uint16_t LeadSurrogate(uint32_t char_code) {
return 0xd800 + (((char_code - 0x10000) >> 10) & 0x3ff);
}
- static inline uchar TrailSurrogate(int char_code) {
+ static inline uint16_t TrailSurrogate(uint32_t char_code) {
return 0xdc00 + (char_code & 0x3ff);
}
};
+class Latin1 {
+ public:
+ static const unsigned kMaxChar = 0xff;
+ // Returns 0 if character does not convert to single latin-1 character
+ // or if the character doesn't not convert back to latin-1 via inverse
+ // operation (upper to lower, etc).
+ static inline uint16_t ConvertNonLatin1ToLatin1(uint16_t);
+};
class Utf8 {
public:
static inline uchar Length(uchar chr, int previous);
+ static inline unsigned EncodeOneByte(char* out, uint8_t c);
static inline unsigned Encode(
char* out, uchar c, int previous);
- static const byte* ReadBlock(Buffer<const char*> str, byte* buffer,
- unsigned capacity, unsigned* chars_read, unsigned* offset);
static uchar CalculateValue(const byte* str,
unsigned length,
unsigned* cursor);
@@ -170,92 +162,47 @@ class Utf8 {
// that match are coded as a 4 byte UTF-8 sequence.
static const unsigned kBytesSavedByCombiningSurrogates = 2;
static const unsigned kSizeOfUnmatchedSurrogate = 3;
-
- private:
- template <unsigned s> friend class Utf8InputBuffer;
- friend class Test;
static inline uchar ValueOf(const byte* str,
unsigned length,
unsigned* cursor);
};
-// --- C h a r a c t e r S t r e a m ---
-
-class CharacterStream {
- public:
- inline uchar GetNext();
- inline bool has_more() { return remaining_ != 0; }
- // Note that default implementation is not efficient.
- virtual void Seek(unsigned);
- unsigned Length();
- unsigned Utf16Length();
- virtual ~CharacterStream() { }
- static inline bool EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
- unsigned& offset);
- static inline bool EncodeAsciiCharacter(uchar c, byte* buffer,
- unsigned capacity, unsigned& offset);
- static inline bool EncodeNonAsciiCharacter(uchar c, byte* buffer,
- unsigned capacity, unsigned& offset);
- static inline uchar DecodeCharacter(const byte* buffer, unsigned* offset);
- virtual void Rewind() = 0;
-
- protected:
- virtual void FillBuffer() = 0;
- virtual bool BoundsCheck(unsigned offset) = 0;
- // The number of characters left in the current buffer
- unsigned remaining_;
- // The current offset within the buffer
- unsigned cursor_;
- // The buffer containing the decoded characters.
- const byte* buffer_;
-};
-
-// --- I n p u t B u f f e r ---
-/**
- * Provides efficient access to encoded characters in strings. It
- * does so by reading characters one block at a time, rather than one
- * character at a time, which gives string implementations an
- * opportunity to optimize the decoding.
- */
-template <class Reader, class Input = Reader*, unsigned kSize = 256>
-class InputBuffer : public CharacterStream {
+class Utf8DecoderBase {
public:
- virtual void Rewind();
- inline void Reset(Input input);
- void Seek(unsigned position);
- inline void Reset(unsigned position, Input input);
+ // Initialization done in subclass.
+ inline Utf8DecoderBase();
+ inline Utf8DecoderBase(uint16_t* buffer,
+ unsigned buffer_length,
+ const uint8_t* stream,
+ unsigned stream_length);
+ inline unsigned Utf16Length() const { return utf16_length_; }
protected:
- InputBuffer() { }
- explicit InputBuffer(Input input) { Reset(input); }
- virtual void FillBuffer();
- virtual bool BoundsCheck(unsigned offset) {
- return (buffer_ != util_buffer_) || (offset < kSize);
- }
-
- // A custom offset that can be used by the string implementation to
- // mark progress within the encoded string.
- unsigned offset_;
- // The input string
- Input input_;
- // To avoid heap allocation, we keep an internal buffer to which
- // the encoded string can write its characters. The string
- // implementation is free to decide whether it wants to use this
- // buffer or not.
- byte util_buffer_[kSize];
+ // This reads all characters and sets the utf16_length_.
+ // The first buffer_length utf16 chars are cached in the buffer.
+ void Reset(uint16_t* buffer,
+ unsigned buffer_length,
+ const uint8_t* stream,
+ unsigned stream_length);
+ static void WriteUtf16Slow(const uint8_t* stream,
+ uint16_t* data,
+ unsigned length);
+ const uint8_t* unbuffered_start_;
+ unsigned utf16_length_;
+ bool last_byte_of_buffer_unused_;
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Utf8DecoderBase);
};
-// --- U t f 8 I n p u t B u f f e r ---
-
-template <unsigned s = 256>
-class Utf8InputBuffer : public InputBuffer<Utf8, Buffer<const char*>, s> {
+template <unsigned kBufferSize>
+class Utf8Decoder : public Utf8DecoderBase {
public:
- inline Utf8InputBuffer() { }
- inline Utf8InputBuffer(const char* data, unsigned length);
- inline void Reset(const char* data, unsigned length) {
- InputBuffer<Utf8, Buffer<const char*>, s>::Reset(
- Buffer<const char*>(data, length));
- }
+ inline Utf8Decoder() {}
+ inline Utf8Decoder(const char* stream, unsigned length);
+ inline void Reset(const char* stream, unsigned length);
+ inline unsigned WriteUtf16(uint16_t* data, unsigned length) const;
+ private:
+ uint16_t buffer_[kBufferSize];
};
diff --git a/deps/v8/src/unique.h b/deps/v8/src/unique.h
new file mode 100644
index 000000000..a93b04699
--- /dev/null
+++ b/deps/v8/src/unique.h
@@ -0,0 +1,332 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_UNIQUE_H_
+#define V8_HYDROGEN_UNIQUE_H_
+
+#include "handles.h"
+#include "objects.h"
+#include "utils.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+
+template <typename T>
+class UniqueSet;
+
+
+// Represents a handle to an object on the heap, but with the additional
+// ability of checking for equality and hashing without accessing the heap.
+//
+// Creating a Unique<T> requires first dereferencing the handle to obtain
+// the address of the object, which is used as the hashcode and the basis for
+// comparison. The object can be moved later by the GC, but comparison
+// and hashing use the old address of the object, without dereferencing it.
+//
+// Careful! Comparison of two Uniques is only correct if both were created
+// in the same "era" of GC or if at least one is a non-movable object.
+template <typename T>
+class Unique V8_FINAL {
+ public:
+ // TODO(titzer): make private and introduce a uniqueness scope.
+ explicit Unique(Handle<T> handle) {
+ if (handle.is_null()) {
+ raw_address_ = NULL;
+ } else {
+ // This is a best-effort check to prevent comparing Unique<T>'s created
+ // in different GC eras; we require heap allocation to be disallowed at
+ // creation time.
+ // NOTE: we currently consider maps to be non-movable, so no special
+ // assurance is required for creating a Unique<Map>.
+ // TODO(titzer): other immortable immovable objects are also fine.
+ ASSERT(!AllowHeapAllocation::IsAllowed() || handle->IsMap());
+ raw_address_ = reinterpret_cast<Address>(*handle);
+ ASSERT_NE(raw_address_, NULL); // Non-null should imply non-zero address.
+ }
+ handle_ = handle;
+ }
+
+ // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
+ Unique(Address raw_address, Handle<T> handle)
+ : raw_address_(raw_address), handle_(handle) { }
+
+ // Constructor for handling automatic up casting.
+ // Eg. Unique<JSFunction> can be passed when Unique<Object> is expected.
+ template <class S> Unique(Unique<S> uniq) {
+#ifdef DEBUG
+ T* a = NULL;
+ S* b = NULL;
+ a = b; // Fake assignment to enforce type checks.
+ USE(a);
+#endif
+ raw_address_ = uniq.raw_address_;
+ handle_ = uniq.handle_;
+ }
+
+ template <typename U>
+ inline bool operator==(const Unique<U>& other) const {
+ ASSERT(IsInitialized() && other.IsInitialized());
+ return raw_address_ == other.raw_address_;
+ }
+
+ template <typename U>
+ inline bool operator!=(const Unique<U>& other) const {
+ ASSERT(IsInitialized() && other.IsInitialized());
+ return raw_address_ != other.raw_address_;
+ }
+
+ inline intptr_t Hashcode() const {
+ ASSERT(IsInitialized());
+ return reinterpret_cast<intptr_t>(raw_address_);
+ }
+
+ inline bool IsNull() const {
+ ASSERT(IsInitialized());
+ return raw_address_ == NULL;
+ }
+
+ inline bool IsKnownGlobal(void* global) const {
+ ASSERT(IsInitialized());
+ return raw_address_ == reinterpret_cast<Address>(global);
+ }
+
+ inline Handle<T> handle() const {
+ return handle_;
+ }
+
+ template <class S> static Unique<T> cast(Unique<S> that) {
+ return Unique<T>(that.raw_address_, Handle<T>::cast(that.handle_));
+ }
+
+ inline bool IsInitialized() const {
+ return raw_address_ != NULL || handle_.is_null();
+ }
+
+ // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
+ static Unique<T> CreateUninitialized(Handle<T> handle) {
+ return Unique<T>(reinterpret_cast<Address>(NULL), handle);
+ }
+
+ static Unique<T> CreateImmovable(Handle<T> handle) {
+ return Unique<T>(reinterpret_cast<Address>(*handle), handle);
+ }
+
+ friend class UniqueSet<T>; // Uses internal details for speed.
+ template <class U>
+ friend class Unique; // For comparing raw_address values.
+
+ private:
+ Address raw_address_;
+ Handle<T> handle_;
+};
+
+
+template <typename T>
+class UniqueSet V8_FINAL : public ZoneObject {
+ public:
+ // Constructor. A new set will be empty.
+ UniqueSet() : size_(0), capacity_(0), array_(NULL) { }
+
+ // Add a new element to this unique set. Mutates this set. O(|this|).
+ void Add(Unique<T> uniq, Zone* zone) {
+ ASSERT(uniq.IsInitialized());
+ // Keep the set sorted by the {raw_address} of the unique elements.
+ for (int i = 0; i < size_; i++) {
+ if (array_[i] == uniq) return;
+ if (array_[i].raw_address_ > uniq.raw_address_) {
+ // Insert in the middle.
+ Grow(size_ + 1, zone);
+ for (int j = size_ - 1; j >= i; j--) array_[j + 1] = array_[j];
+ array_[i] = uniq;
+ size_++;
+ return;
+ }
+ }
+ // Append the element to the the end.
+ Grow(size_ + 1, zone);
+ array_[size_++] = uniq;
+ }
+
+ // Remove an element from this set. Mutates this set. O(|this|)
+ void Remove(Unique<T> uniq) {
+ for (int i = 0; i < size_; i++) {
+ if (array_[i] == uniq) {
+ while (++i < size_) array_[i - 1] = array_[i];
+ size_--;
+ return;
+ }
+ }
+ }
+
+ // Compare this set against another set. O(|this|).
+ bool Equals(UniqueSet<T>* that) const {
+ if (that->size_ != this->size_) return false;
+ for (int i = 0; i < this->size_; i++) {
+ if (this->array_[i] != that->array_[i]) return false;
+ }
+ return true;
+ }
+
+ // Check whether this set contains the given element. O(|this|)
+ // TODO(titzer): use binary search for large sets to make this O(log|this|)
+ template <typename U>
+ bool Contains(Unique<U> elem) const {
+ for (int i = 0; i < size_; i++) {
+ if (this->array_[i] == elem) return true;
+ }
+ return false;
+ }
+
+ // Check if this set is a subset of the given set. O(|this| + |that|).
+ bool IsSubset(UniqueSet<T>* that) const {
+ if (that->size_ < this->size_) return false;
+ int j = 0;
+ for (int i = 0; i < this->size_; i++) {
+ Unique<T> sought = this->array_[i];
+ while (true) {
+ if (sought == that->array_[j++]) break;
+ // Fail whenever there are more elements in {this} than {that}.
+ if ((this->size_ - i) > (that->size_ - j)) return false;
+ }
+ }
+ return true;
+ }
+
+ // Returns a new set representing the intersection of this set and the other.
+ // O(|this| + |that|).
+ UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) const {
+ if (that->size_ == 0 || this->size_ == 0) return new(zone) UniqueSet<T>();
+
+ UniqueSet<T>* out = new(zone) UniqueSet<T>();
+ out->Grow(Min(this->size_, that->size_), zone);
+
+ int i = 0, j = 0, k = 0;
+ while (i < this->size_ && j < that->size_) {
+ Unique<T> a = this->array_[i];
+ Unique<T> b = that->array_[j];
+ if (a == b) {
+ out->array_[k++] = a;
+ i++;
+ j++;
+ } else if (a.raw_address_ < b.raw_address_) {
+ i++;
+ } else {
+ j++;
+ }
+ }
+
+ out->size_ = k;
+ return out;
+ }
+
+ // Returns a new set representing the union of this set and the other.
+ // O(|this| + |that|).
+ UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) const {
+ if (that->size_ == 0) return this->Copy(zone);
+ if (this->size_ == 0) return that->Copy(zone);
+
+ UniqueSet<T>* out = new(zone) UniqueSet<T>();
+ out->Grow(this->size_ + that->size_, zone);
+
+ int i = 0, j = 0, k = 0;
+ while (i < this->size_ && j < that->size_) {
+ Unique<T> a = this->array_[i];
+ Unique<T> b = that->array_[j];
+ if (a == b) {
+ out->array_[k++] = a;
+ i++;
+ j++;
+ } else if (a.raw_address_ < b.raw_address_) {
+ out->array_[k++] = a;
+ i++;
+ } else {
+ out->array_[k++] = b;
+ j++;
+ }
+ }
+
+ while (i < this->size_) out->array_[k++] = this->array_[i++];
+ while (j < that->size_) out->array_[k++] = that->array_[j++];
+
+ out->size_ = k;
+ return out;
+ }
+
+ // Makes an exact copy of this set. O(|this| + |that|).
+ UniqueSet<T>* Copy(Zone* zone) const {
+ UniqueSet<T>* copy = new(zone) UniqueSet<T>();
+ copy->size_ = this->size_;
+ copy->capacity_ = this->size_;
+ copy->array_ = zone->NewArray<Unique<T> >(this->size_);
+ memcpy(copy->array_, this->array_, this->size_ * sizeof(Unique<T>));
+ return copy;
+ }
+
+ void Clear() {
+ size_ = 0;
+ }
+
+ inline int size() const {
+ return size_;
+ }
+
+ inline Unique<T> at(int index) const {
+ ASSERT(index >= 0 && index < size_);
+ return array_[index];
+ }
+
+ private:
+ // These sets should be small, since operations are implemented with simple
+ // linear algorithms. Enforce a maximum size.
+ static const int kMaxCapacity = 65535;
+
+ uint16_t size_;
+ uint16_t capacity_;
+ Unique<T>* array_;
+
+ // Grow the size of internal storage to be at least {size} elements.
+ void Grow(int size, Zone* zone) {
+ CHECK(size < kMaxCapacity); // Enforce maximum size.
+ if (capacity_ < size) {
+ int new_capacity = 2 * capacity_ + size;
+ if (new_capacity > kMaxCapacity) new_capacity = kMaxCapacity;
+ Unique<T>* new_array = zone->NewArray<Unique<T> >(new_capacity);
+ if (size_ > 0) {
+ memcpy(new_array, array_, size_ * sizeof(Unique<T>));
+ }
+ capacity_ = new_capacity;
+ array_ = new_array;
+ }
+ }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_UNIQUE_H_
diff --git a/deps/v8/src/uri.h b/deps/v8/src/uri.h
new file mode 100644
index 000000000..ee1baeb51
--- /dev/null
+++ b/deps/v8/src/uri.h
@@ -0,0 +1,309 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_URI_H_
+#define V8_URI_H_
+
+#include "v8.h"
+
+#include "string-search.h"
+#include "v8utils.h"
+#include "v8conversions.h"
+
+namespace v8 {
+namespace internal {
+
+
+template <typename Char>
+static INLINE(Vector<const Char> GetCharVector(Handle<String> string));
+
+
+template <>
+Vector<const uint8_t> GetCharVector(Handle<String> string) {
+ String::FlatContent flat = string->GetFlatContent();
+ ASSERT(flat.IsAscii());
+ return flat.ToOneByteVector();
+}
+
+
+template <>
+Vector<const uc16> GetCharVector(Handle<String> string) {
+ String::FlatContent flat = string->GetFlatContent();
+ ASSERT(flat.IsTwoByte());
+ return flat.ToUC16Vector();
+}
+
+
+class URIUnescape : public AllStatic {
+ public:
+ template<typename Char>
+ static Handle<String> Unescape(Isolate* isolate, Handle<String> source);
+
+ private:
+ static const signed char kHexValue['g'];
+
+ template<typename Char>
+ static Handle<String> UnescapeSlow(
+ Isolate* isolate, Handle<String> string, int start_index);
+
+ static INLINE(int TwoDigitHex(uint16_t character1, uint16_t character2));
+
+ template <typename Char>
+ static INLINE(int UnescapeChar(Vector<const Char> vector,
+ int i,
+ int length,
+ int* step));
+};
+
+
+const signed char URIUnescape::kHexValue[] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1,
+ -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 10, 11, 12, 13, 14, 15 };
+
+
+template<typename Char>
+Handle<String> URIUnescape::Unescape(Isolate* isolate, Handle<String> source) {
+ int index;
+ { DisallowHeapAllocation no_allocation;
+ StringSearch<uint8_t, Char> search(isolate, STATIC_ASCII_VECTOR("%"));
+ index = search.Search(GetCharVector<Char>(source), 0);
+ if (index < 0) return source;
+ }
+ return UnescapeSlow<Char>(isolate, source, index);
+}
+
+
+template <typename Char>
+Handle<String> URIUnescape::UnescapeSlow(
+ Isolate* isolate, Handle<String> string, int start_index) {
+ bool one_byte = true;
+ int length = string->length();
+
+ int unescaped_length = 0;
+ { DisallowHeapAllocation no_allocation;
+ Vector<const Char> vector = GetCharVector<Char>(string);
+ for (int i = start_index; i < length; unescaped_length++) {
+ int step;
+ if (UnescapeChar(vector, i, length, &step) >
+ String::kMaxOneByteCharCode) {
+ one_byte = false;
+ }
+ i += step;
+ }
+ }
+
+ ASSERT(start_index < length);
+ Handle<String> first_part =
+ isolate->factory()->NewProperSubString(string, 0, start_index);
+
+ int dest_position = 0;
+ Handle<String> second_part;
+ if (one_byte) {
+ Handle<SeqOneByteString> dest =
+ isolate->factory()->NewRawOneByteString(unescaped_length);
+ DisallowHeapAllocation no_allocation;
+ Vector<const Char> vector = GetCharVector<Char>(string);
+ for (int i = start_index; i < length; dest_position++) {
+ int step;
+ dest->SeqOneByteStringSet(dest_position,
+ UnescapeChar(vector, i, length, &step));
+ i += step;
+ }
+ second_part = dest;
+ } else {
+ Handle<SeqTwoByteString> dest =
+ isolate->factory()->NewRawTwoByteString(unescaped_length);
+ DisallowHeapAllocation no_allocation;
+ Vector<const Char> vector = GetCharVector<Char>(string);
+ for (int i = start_index; i < length; dest_position++) {
+ int step;
+ dest->SeqTwoByteStringSet(dest_position,
+ UnescapeChar(vector, i, length, &step));
+ i += step;
+ }
+ second_part = dest;
+ }
+ return isolate->factory()->NewConsString(first_part, second_part);
+}
+
+
+int URIUnescape::TwoDigitHex(uint16_t character1, uint16_t character2) {
+ if (character1 > 'f') return -1;
+ int hi = kHexValue[character1];
+ if (hi == -1) return -1;
+ if (character2 > 'f') return -1;
+ int lo = kHexValue[character2];
+ if (lo == -1) return -1;
+ return (hi << 4) + lo;
+}
+
+
+template <typename Char>
+int URIUnescape::UnescapeChar(Vector<const Char> vector,
+ int i,
+ int length,
+ int* step) {
+ uint16_t character = vector[i];
+ int32_t hi = 0;
+ int32_t lo = 0;
+ if (character == '%' &&
+ i <= length - 6 &&
+ vector[i + 1] == 'u' &&
+ (hi = TwoDigitHex(vector[i + 2],
+ vector[i + 3])) != -1 &&
+ (lo = TwoDigitHex(vector[i + 4],
+ vector[i + 5])) != -1) {
+ *step = 6;
+ return (hi << 8) + lo;
+ } else if (character == '%' &&
+ i <= length - 3 &&
+ (lo = TwoDigitHex(vector[i + 1],
+ vector[i + 2])) != -1) {
+ *step = 3;
+ return lo;
+ } else {
+ *step = 1;
+ return character;
+ }
+}
+
+
+class URIEscape : public AllStatic {
+ public:
+ template<typename Char>
+ static Handle<String> Escape(Isolate* isolate, Handle<String> string);
+
+ private:
+ static const char kHexChars[17];
+ static const char kNotEscaped[256];
+
+ static bool IsNotEscaped(uint16_t c) { return kNotEscaped[c] != 0; }
+};
+
+
+const char URIEscape::kHexChars[] = "0123456789ABCDEF";
+
+
+// kNotEscaped is generated by the following:
+//
+// #!/bin/perl
+// for (my $i = 0; $i < 256; $i++) {
+// print "\n" if $i % 16 == 0;
+// my $c = chr($i);
+// my $escaped = 1;
+// $escaped = 0 if $c =~ m#[A-Za-z0-9@*_+./-]#;
+// print $escaped ? "0, " : "1, ";
+// }
+
+const char URIEscape::kNotEscaped[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+
+template<typename Char>
+Handle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
+ ASSERT(string->IsFlat());
+ int escaped_length = 0;
+ int length = string->length();
+
+ { DisallowHeapAllocation no_allocation;
+ Vector<const Char> vector = GetCharVector<Char>(string);
+ for (int i = 0; i < length; i++) {
+ uint16_t c = vector[i];
+ if (c >= 256) {
+ escaped_length += 6;
+ } else if (IsNotEscaped(c)) {
+ escaped_length++;
+ } else {
+ escaped_length += 3;
+ }
+
+ // We don't allow strings that are longer than a maximal length.
+ ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
+ if (escaped_length > String::kMaxLength) {
+ isolate->context()->mark_out_of_memory();
+ return Handle<String>::null();
+ }
+ }
+ }
+
+ // No length change implies no change. Return original string if no change.
+ if (escaped_length == length) return string;
+
+ Handle<SeqOneByteString> dest =
+ isolate->factory()->NewRawOneByteString(escaped_length);
+ int dest_position = 0;
+
+ { DisallowHeapAllocation no_allocation;
+ Vector<const Char> vector = GetCharVector<Char>(string);
+ for (int i = 0; i < length; i++) {
+ uint16_t c = vector[i];
+ if (c >= 256) {
+ dest->SeqOneByteStringSet(dest_position, '%');
+ dest->SeqOneByteStringSet(dest_position+1, 'u');
+ dest->SeqOneByteStringSet(dest_position+2, kHexChars[c >> 12]);
+ dest->SeqOneByteStringSet(dest_position+3, kHexChars[(c >> 8) & 0xf]);
+ dest->SeqOneByteStringSet(dest_position+4, kHexChars[(c >> 4) & 0xf]);
+ dest->SeqOneByteStringSet(dest_position+5, kHexChars[c & 0xf]);
+ dest_position += 6;
+ } else if (IsNotEscaped(c)) {
+ dest->SeqOneByteStringSet(dest_position, c);
+ dest_position++;
+ } else {
+ dest->SeqOneByteStringSet(dest_position, '%');
+ dest->SeqOneByteStringSet(dest_position+1, kHexChars[c >> 4]);
+ dest->SeqOneByteStringSet(dest_position+2, kHexChars[c & 0xf]);
+ dest_position += 3;
+ }
+ }
+ }
+
+ return dest;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_URI_H_
diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js
index b195f3da7..4e3f084af 100644
--- a/deps/v8/src/uri.js
+++ b/deps/v8/src/uri.js
@@ -25,11 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+// -------------------------------------------------------------------
+
// This file contains support for URI manipulations written in
// JavaScript.
-// Expect $String = global.String;
-
// Lazily initialized.
var hexCharArray = 0;
var hexCharCodeArray = 0;
@@ -165,11 +169,11 @@ function URIDecodeOctets(octets, result, index) {
throw new $URIError("URI malformed");
}
if (value < 0x10000) {
- result[index++] = value;
+ %_TwoByteSeqStringSetChar(result, index++, value);
return index;
} else {
- result[index++] = (value >> 10) + 0xd7c0;
- result[index++] = (value & 0x3ff) + 0xdc00;
+ %_TwoByteSeqStringSetChar(result, index++, (value >> 10) + 0xd7c0);
+ %_TwoByteSeqStringSetChar(result, index++, (value & 0x3ff) + 0xdc00);
return index;
}
}
@@ -178,43 +182,72 @@ function URIDecodeOctets(octets, result, index) {
// ECMA-262, section 15.1.3
function Encode(uri, unescape) {
var uriLength = uri.length;
- // We are going to pass result to %StringFromCharCodeArray
- // which does not expect any getters/setters installed
- // on the incoming array.
- var result = new InternalArray(uriLength);
+ var array = new InternalArray(uriLength);
var index = 0;
for (var k = 0; k < uriLength; k++) {
var cc1 = uri.charCodeAt(k);
if (unescape(cc1)) {
- result[index++] = cc1;
+ array[index++] = cc1;
} else {
if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw new $URIError("URI malformed");
if (cc1 < 0xD800 || cc1 > 0xDBFF) {
- index = URIEncodeSingle(cc1, result, index);
+ index = URIEncodeSingle(cc1, array, index);
} else {
k++;
if (k == uriLength) throw new $URIError("URI malformed");
var cc2 = uri.charCodeAt(k);
if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw new $URIError("URI malformed");
- index = URIEncodePair(cc1, cc2, result, index);
+ index = URIEncodePair(cc1, cc2, array, index);
}
}
}
- return %StringFromCharCodeArray(result);
+
+ var result = %NewString(array.length, NEW_ONE_BYTE_STRING);
+ for (var i = 0; i < array.length; i++) {
+ %_OneByteSeqStringSetChar(result, i, array[i]);
+ }
+ return result;
}
// ECMA-262, section 15.1.3
function Decode(uri, reserved) {
var uriLength = uri.length;
- // We are going to pass result to %StringFromCharCodeArray
- // which does not expect any getters/setters installed
- // on the incoming array.
- var result = new InternalArray(uriLength);
+ var one_byte = %NewString(uriLength, NEW_ONE_BYTE_STRING);
var index = 0;
- for (var k = 0; k < uriLength; k++) {
- var ch = uri.charAt(k);
- if (ch == '%') {
+ var k = 0;
+
+ // Optimistically assume ascii string.
+ for ( ; k < uriLength; k++) {
+ var code = uri.charCodeAt(k);
+ if (code == 37) { // '%'
+ if (k + 2 >= uriLength) throw new $URIError("URI malformed");
+ var cc = URIHexCharsToCharCode(uri.charCodeAt(k+1), uri.charCodeAt(k+2));
+ if (cc >> 7) break; // Assumption wrong, two byte string.
+ if (reserved(cc)) {
+ %_OneByteSeqStringSetChar(one_byte, index++, 37); // '%'.
+ %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+1));
+ %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+2));
+ } else {
+ %_OneByteSeqStringSetChar(one_byte, index++, cc);
+ }
+ k += 2;
+ } else {
+ if (code > 0x7f) break; // Assumption wrong, two byte string.
+ %_OneByteSeqStringSetChar(one_byte, index++, code);
+ }
+ }
+
+ one_byte = %TruncateString(one_byte, index);
+ if (k == uriLength) return one_byte;
+
+ // Write into two byte string.
+ var two_byte = %NewString(uriLength - k, NEW_TWO_BYTE_STRING);
+ index = 0;
+
+ for ( ; k < uriLength; k++) {
+ var code = uri.charCodeAt(k);
+ if (code == 37) { // '%'
if (k + 2 >= uriLength) throw new $URIError("URI malformed");
var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
if (cc >> 7) {
@@ -229,22 +262,21 @@ function Decode(uri, reserved) {
octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k),
uri.charCodeAt(++k));
}
- index = URIDecodeOctets(octets, result, index);
+ index = URIDecodeOctets(octets, two_byte, index);
+ } else if (reserved(cc)) {
+ %_TwoByteSeqStringSetChar(two_byte, index++, 37); // '%'.
+ %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k - 1));
+ %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k));
} else {
- if (reserved(cc)) {
- result[index++] = 37; // Char code of '%'.
- result[index++] = uri.charCodeAt(k - 1);
- result[index++] = uri.charCodeAt(k);
- } else {
- result[index++] = cc;
- }
+ %_TwoByteSeqStringSetChar(two_byte, index++, cc);
}
} else {
- result[index++] = ch.charCodeAt(0);
+ %_TwoByteSeqStringSetChar(two_byte, index++, code);
}
}
- result.length = index;
- return %StringFromCharCodeArray(result);
+
+ two_byte = %TruncateString(two_byte, index);
+ return one_byte + two_byte;
}
@@ -409,6 +441,7 @@ function URIUnescape(str) {
function SetUpUri() {
%CheckIsBootstrapping();
+
// Set up non-enumerable URI functions on the global object and set
// their names.
InstallFunctions(global, DONT_ENUM, $Array(
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 7e8c088dd..846261520 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -28,6 +28,7 @@
#include <stdarg.h>
#include "../include/v8stdint.h"
#include "checks.h"
+#include "platform.h"
#include "utils.h"
namespace v8 {
@@ -46,9 +47,9 @@ void SimpleStringBuilder::AddString(const char* s) {
void SimpleStringBuilder::AddSubstring(const char* s, int n) {
- ASSERT(!is_finalized() && position_ + n < buffer_.length());
+ ASSERT(!is_finalized() && position_ + n <= buffer_.length());
ASSERT(static_cast<size_t>(n) <= strlen(s));
- memcpy(&buffer_[position_], s, n * kCharSize);
+ OS::MemCopy(&buffer_[position_], s, n * kCharSize);
position_ += n;
}
@@ -79,7 +80,13 @@ void SimpleStringBuilder::AddDecimalInteger(int32_t value) {
char* SimpleStringBuilder::Finalize() {
- ASSERT(!is_finalized() && position_ < buffer_.length());
+ ASSERT(!is_finalized() && position_ <= buffer_.length());
+ // If there is no space for null termination, overwrite last character.
+ if (position_ == buffer_.length()) {
+ position_--;
+ // Print ellipsis.
+ for (int i = 3; i > 0 && position_ > i; --i) buffer_[position_ - i] = '.';
+ }
buffer_[position_] = '\0';
// Make sure nobody managed to add a 0-character to the
// buffer while building the string.
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index e03f96f6e..062019af4 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -30,11 +30,12 @@
#include <stdlib.h>
#include <string.h>
+#include <algorithm>
#include <climits>
-#include "globals.h"
-#include "checks.h"
#include "allocation.h"
+#include "checks.h"
+#include "globals.h"
namespace v8 {
namespace internal {
@@ -85,6 +86,25 @@ inline int WhichPowerOf2(uint32_t x) {
}
+inline int MostSignificantBit(uint32_t x) {
+ static const int msb4[] = {0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4};
+ int nibble = 0;
+ if (x & 0xffff0000) {
+ nibble += 16;
+ x >>= 16;
+ }
+ if (x & 0xff00) {
+ nibble += 8;
+ x >>= 8;
+ }
+ if (x & 0xf0) {
+ nibble += 4;
+ x >>= 4;
+ }
+ return nibble + msb4[x];
+}
+
+
// Magic numbers for integer division.
// These are kind of 2's complement reciprocal of the divisors.
// Details and proofs can be found in:
@@ -231,6 +251,20 @@ T Min(T a, T b) {
}
+// Returns the absolute value of its argument.
+template <typename T>
+T Abs(T a) {
+ return a < 0 ? -a : a;
+}
+
+
+// Returns the negative absolute value of its argument.
+template <typename T>
+T NegAbs(T a) {
+ return a < 0 ? a : -a;
+}
+
+
inline int StrLength(const char* string) {
size_t length = strlen(string);
ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
@@ -241,41 +275,52 @@ inline int StrLength(const char* string) {
// ----------------------------------------------------------------------------
// BitField is a help template for encoding and decode bitfield with
// unsigned content.
-template<class T, int shift, int size>
-class BitField {
+
+template<class T, int shift, int size, class U>
+class BitFieldBase {
public:
- // A uint32_t mask of bit field. To use all bits of a uint32 in a
- // bitfield without compiler warnings we have to compute 2^32 without
- // using a shift count of 32.
- static const uint32_t kMask = ((1U << shift) << size) - (1U << shift);
- static const uint32_t kShift = shift;
+ // A type U mask of bit field. To use all bits of a type U of x bits
+ // in a bitfield without compiler warnings we have to compute 2^x
+ // without using a shift count of x in the computation.
+ static const U kOne = static_cast<U>(1U);
+ static const U kMask = ((kOne << shift) << size) - (kOne << shift);
+ static const U kShift = shift;
+ static const U kSize = size;
// Value for the field with all bits set.
static const T kMax = static_cast<T>((1U << size) - 1);
// Tells whether the provided value fits into the bit field.
static bool is_valid(T value) {
- return (static_cast<uint32_t>(value) & ~static_cast<uint32_t>(kMax)) == 0;
+ return (static_cast<U>(value) & ~static_cast<U>(kMax)) == 0;
}
- // Returns a uint32_t with the bit field value encoded.
- static uint32_t encode(T value) {
+ // Returns a type U with the bit field value encoded.
+ static U encode(T value) {
ASSERT(is_valid(value));
- return static_cast<uint32_t>(value) << shift;
+ return static_cast<U>(value) << shift;
}
- // Returns a uint32_t with the bit field value updated.
- static uint32_t update(uint32_t previous, T value) {
+ // Returns a type U with the bit field value updated.
+ static U update(U previous, T value) {
return (previous & ~kMask) | encode(value);
}
// Extracts the bit field from the value.
- static T decode(uint32_t value) {
+ static T decode(U value) {
return static_cast<T>((value & kMask) >> shift);
}
};
+template<class T, int shift, int size>
+class BitField : public BitFieldBase<T, shift, size, uint32_t> { };
+
+
+template<class T, int shift, int size>
+class BitField64 : public BitFieldBase<T, shift, size, uint64_t> { };
+
+
// ----------------------------------------------------------------------------
// Hash function.
@@ -304,7 +349,7 @@ inline uint32_t ComputeLongHash(uint64_t key) {
hash = hash ^ (hash >> 11);
hash = hash + (hash << 6);
hash = hash ^ (hash >> 22);
- return (uint32_t) hash;
+ return static_cast<uint32_t>(hash);
}
@@ -374,8 +419,8 @@ class Vector {
// Returns a vector using the same backing storage as this one,
// spanning from and including 'from', to but not including 'to'.
Vector<T> SubVector(int from, int to) {
- ASSERT(to <= length_);
- ASSERT(from < to);
+ SLOW_ASSERT(to <= length_);
+ SLOW_ASSERT(from < to);
ASSERT(0 <= from);
return Vector<T>(start() + from, to - from);
}
@@ -409,15 +454,11 @@ class Vector {
}
void Sort(int (*cmp)(const T*, const T*)) {
- typedef int (*RawComparer)(const void*, const void*);
- qsort(start(),
- length(),
- sizeof(T),
- reinterpret_cast<RawComparer>(cmp));
+ std::sort(start(), start() + length(), RawComparer(cmp));
}
void Sort() {
- Sort(PointerValueCompare<T>);
+ std::sort(start(), start() + length());
}
void Truncate(int length) {
@@ -453,6 +494,17 @@ class Vector {
private:
T* start_;
int length_;
+
+ class RawComparer {
+ public:
+ explicit RawComparer(int (*cmp)(const T*, const T*)) : cmp_(cmp) {}
+ bool operator()(const T& a, const T& b) {
+ return cmp_(&a, &b) < 0;
+ }
+
+ private:
+ int (*cmp_)(const T*, const T*);
+ };
};
@@ -493,6 +545,7 @@ class EmbeddedVector : public Vector<T> {
// When copying, make underlying Vector to reference our buffer.
EmbeddedVector(const EmbeddedVector& rhs)
: Vector<T>(rhs) {
+ // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead.
memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
set_start(buffer_);
}
@@ -500,6 +553,7 @@ class EmbeddedVector : public Vector<T> {
EmbeddedVector& operator=(const EmbeddedVector& rhs) {
if (this == &rhs) return *this;
Vector<T>::operator=(rhs);
+ // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead.
memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
this->set_start(buffer_);
return *this;
@@ -522,11 +576,22 @@ class ScopedVector : public Vector<T> {
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
};
+#define STATIC_ASCII_VECTOR(x) \
+ v8::internal::Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(x), \
+ ARRAY_SIZE(x)-1)
inline Vector<const char> CStrVector(const char* data) {
return Vector<const char>(data, StrLength(data));
}
+inline Vector<const uint8_t> OneByteVector(const char* data, int length) {
+ return Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), length);
+}
+
+inline Vector<const uint8_t> OneByteVector(const char* data) {
+ return OneByteVector(data, StrLength(data));
+}
+
inline Vector<char> MutableCStrVector(char* data) {
return Vector<char>(data, StrLength(data));
}
@@ -765,7 +830,9 @@ class SequenceCollector : public Collector<T, growth_factor, max_growth> {
// Compare ASCII/16bit chars to ASCII/16bit chars.
template <typename lchar, typename rchar>
-inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
+inline int CompareCharsUnsigned(const lchar* lhs,
+ const rchar* rhs,
+ int chars) {
const lchar* limit = lhs + chars;
#ifdef V8_HOST_CAN_READ_UNALIGNED
if (sizeof(*lhs) == sizeof(*rhs)) {
@@ -790,6 +857,33 @@ inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
return 0;
}
+template<typename lchar, typename rchar>
+inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
+ ASSERT(sizeof(lchar) <= 2);
+ ASSERT(sizeof(rchar) <= 2);
+ if (sizeof(lchar) == 1) {
+ if (sizeof(rchar) == 1) {
+ return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
+ reinterpret_cast<const uint8_t*>(rhs),
+ chars);
+ } else {
+ return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
+ reinterpret_cast<const uint16_t*>(rhs),
+ chars);
+ }
+ } else {
+ if (sizeof(rchar) == 1) {
+ return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs),
+ reinterpret_cast<const uint8_t*>(rhs),
+ chars);
+ } else {
+ return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs),
+ reinterpret_cast<const uint16_t*>(rhs),
+ chars);
+ }
+ }
+}
+
// Calculate 10^exponent.
inline int TenToThe(int exponent) {
@@ -835,6 +929,7 @@ struct BitCastHelper {
INLINE(static Dest cast(const Source& source)) {
Dest dest;
+ // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead.
memcpy(&dest, &source, sizeof(dest));
return dest;
}
@@ -978,6 +1073,10 @@ class EnumSet {
void Intersect(const EnumSet& set) { bits_ &= set.bits_; }
T ToIntegral() const { return bits_; }
bool operator==(const EnumSet& set) { return bits_ == set.bits_; }
+ bool operator!=(const EnumSet& set) { return bits_ != set.bits_; }
+ EnumSet<E, T> operator|(const EnumSet& set) const {
+ return EnumSet<E, T>(bits_ | set.bits_);
+ }
private:
T Mask(E element) const {
@@ -1015,6 +1114,7 @@ class BailoutId {
static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
+ static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
bool IsNone() const { return id_ == kNoneId; }
bool operator==(const BailoutId& other) const { return id_ == other.id_; }
@@ -1030,9 +1130,12 @@ class BailoutId {
// code (function declarations).
static const int kDeclarationsId = 3;
- // Ever FunctionState starts with this id.
+ // Every FunctionState starts with this id.
static const int kFirstUsableId = 4;
+ // Every compiled stub starts with this id.
+ static const int kStubEntryId = 5;
+
int id_;
};
diff --git a/deps/v8/src/utils/random-number-generator.cc b/deps/v8/src/utils/random-number-generator.cc
new file mode 100644
index 000000000..fe273315a
--- /dev/null
+++ b/deps/v8/src/utils/random-number-generator.cc
@@ -0,0 +1,153 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "utils/random-number-generator.h"
+
+#include <cstdio>
+#include <cstdlib>
+
+#include "flags.h"
+#include "platform/mutex.h"
+#include "platform/time.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
+static RandomNumberGenerator::EntropySource entropy_source = NULL;
+
+
+// static
+void RandomNumberGenerator::SetEntropySource(EntropySource source) {
+ LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
+ entropy_source = source;
+}
+
+
+RandomNumberGenerator::RandomNumberGenerator() {
+ // Check --random-seed flag first.
+ if (FLAG_random_seed != 0) {
+ SetSeed(FLAG_random_seed);
+ return;
+ }
+
+ // Check if embedder supplied an entropy source.
+ { LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
+ if (entropy_source != NULL) {
+ int64_t seed;
+ if (entropy_source(reinterpret_cast<unsigned char*>(&seed),
+ sizeof(seed))) {
+ SetSeed(seed);
+ return;
+ }
+ }
+ }
+
+#if V8_OS_CYGWIN || V8_OS_WIN
+ // Use rand_s() to gather entropy on Windows. See:
+ // https://code.google.com/p/v8/issues/detail?id=2905
+ unsigned first_half, second_half;
+ errno_t result = rand_s(&first_half);
+ ASSERT_EQ(0, result);
+ result = rand_s(&second_half);
+ ASSERT_EQ(0, result);
+ SetSeed((static_cast<int64_t>(first_half) << 32) + second_half);
+#else
+ // Gather entropy from /dev/urandom if available.
+ FILE* fp = fopen("/dev/urandom", "rb");
+ if (fp != NULL) {
+ int64_t seed;
+ size_t n = fread(&seed, sizeof(seed), 1, fp);
+ fclose(fp);
+ if (n == 1) {
+ SetSeed(seed);
+ return;
+ }
+ }
+
+ // We cannot assume that random() or rand() were seeded
+ // properly, so instead of relying on random() or rand(),
+ // we just seed our PRNG using timing data as fallback.
+ // This is weak entropy, but it's sufficient, because
+ // it is the responsibility of the embedder to install
+ // an entropy source using v8::V8::SetEntropySource(),
+ // which provides reasonable entropy, see:
+ // https://code.google.com/p/v8/issues/detail?id=2905
+ int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24;
+ seed ^= TimeTicks::HighResolutionNow().ToInternalValue() << 16;
+ seed ^= TimeTicks::Now().ToInternalValue() << 8;
+ SetSeed(seed);
+#endif // V8_OS_CYGWIN || V8_OS_WIN
+}
+
+
+int RandomNumberGenerator::NextInt(int max) {
+ ASSERT_LE(0, max);
+
+ // Fast path if max is a power of 2.
+ if (IsPowerOf2(max)) {
+ return static_cast<int>((max * static_cast<int64_t>(Next(31))) >> 31);
+ }
+
+ while (true) {
+ int rnd = Next(31);
+ int val = rnd % max;
+ if (rnd - val + (max - 1) >= 0) {
+ return val;
+ }
+ }
+}
+
+
+double RandomNumberGenerator::NextDouble() {
+ return ((static_cast<int64_t>(Next(26)) << 27) + Next(27)) /
+ static_cast<double>(static_cast<int64_t>(1) << 53);
+}
+
+
+void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
+ for (size_t n = 0; n < buflen; ++n) {
+ static_cast<uint8_t*>(buffer)[n] = static_cast<uint8_t>(Next(8));
+ }
+}
+
+
+int RandomNumberGenerator::Next(int bits) {
+ ASSERT_LT(0, bits);
+ ASSERT_GE(32, bits);
+ int64_t seed = (seed_ * kMultiplier + kAddend) & kMask;
+ seed_ = seed;
+ return static_cast<int>(seed >> (48 - bits));
+}
+
+
+void RandomNumberGenerator::SetSeed(int64_t seed) {
+ seed_ = (seed ^ kMultiplier) & kMask;
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/utils/random-number-generator.h b/deps/v8/src/utils/random-number-generator.h
new file mode 100644
index 000000000..cc7d7395e
--- /dev/null
+++ b/deps/v8/src/utils/random-number-generator.h
@@ -0,0 +1,110 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
+#define V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
+
+#include "globals.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// RandomNumberGenerator
+//
+// This class is used to generate a stream of pseudorandom numbers. The class
+// uses a 48-bit seed, which is modified using a linear congruential formula.
+// (See Donald Knuth, The Art of Computer Programming, Volume 3, Section 3.2.1.)
+// If two instances of RandomNumberGenerator are created with the same seed, and
+// the same sequence of method calls is made for each, they will generate and
+// return identical sequences of numbers.
+// This class uses (probably) weak entropy by default, but it's sufficient,
+// because it is the responsibility of the embedder to install an entropy source
+// using v8::V8::SetEntropySource(), which provides reasonable entropy, see:
+// https://code.google.com/p/v8/issues/detail?id=2905
+// This class is neither reentrant nor threadsafe.
+
+class RandomNumberGenerator V8_FINAL {
+ public:
+ // EntropySource is used as a callback function when V8 needs a source of
+ // entropy.
+ typedef bool (*EntropySource)(unsigned char* buffer, size_t buflen);
+ static void SetEntropySource(EntropySource entropy_source);
+
+ RandomNumberGenerator();
+ explicit RandomNumberGenerator(int64_t seed) { SetSeed(seed); }
+
+ // Returns the next pseudorandom, uniformly distributed int value from this
+ // random number generator's sequence. The general contract of |NextInt()| is
+ // that one int value is pseudorandomly generated and returned.
+ // All 2^32 possible integer values are produced with (approximately) equal
+ // probability.
+ V8_INLINE int NextInt() V8_WARN_UNUSED_RESULT {
+ return Next(32);
+ }
+
+ // Returns a pseudorandom, uniformly distributed int value between 0
+ // (inclusive) and the specified max value (exclusive), drawn from this random
+ // number generator's sequence. The general contract of |NextInt(int)| is that
+ // one int value in the specified range is pseudorandomly generated and
+ // returned. All max possible int values are produced with (approximately)
+ // equal probability.
+ int NextInt(int max) V8_WARN_UNUSED_RESULT;
+
+ // Returns the next pseudorandom, uniformly distributed boolean value from
+ // this random number generator's sequence. The general contract of
+ // |NextBoolean()| is that one boolean value is pseudorandomly generated and
+ // returned. The values true and false are produced with (approximately) equal
+ // probability.
+ V8_INLINE bool NextBool() V8_WARN_UNUSED_RESULT {
+ return Next(1) != 0;
+ }
+
+ // Returns the next pseudorandom, uniformly distributed double value between
+ // 0.0 and 1.0 from this random number generator's sequence.
+ // The general contract of |NextDouble()| is that one double value, chosen
+ // (approximately) uniformly from the range 0.0 (inclusive) to 1.0
+ // (exclusive), is pseudorandomly generated and returned.
+ double NextDouble() V8_WARN_UNUSED_RESULT;
+
+ // Fills the elements of a specified array of bytes with random numbers.
+ void NextBytes(void* buffer, size_t buflen);
+
+ private:
+ static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d);
+ static const int64_t kAddend = 0xb;
+ static const int64_t kMask = V8_2PART_UINT64_C(0xffff, ffffffff);
+
+ int Next(int bits) V8_WARN_UNUSED_RESULT;
+ void SetSeed(int64_t seed);
+
+ int64_t seed_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
diff --git a/deps/v8/src/v8-counters.cc b/deps/v8/src/v8-counters.cc
index 3f83dffca..a0c3ebd07 100644
--- a/deps/v8/src/v8-counters.cc
+++ b/deps/v8/src/v8-counters.cc
@@ -32,73 +32,61 @@
namespace v8 {
namespace internal {
-Counters::Counters() {
+Counters::Counters(Isolate* isolate) {
#define HT(name, caption) \
- HistogramTimer name = { {#caption, 0, 10000, 50, NULL, false}, 0, 0 }; \
- name##_ = name;
+ name##_ = HistogramTimer(#caption, 0, 10000, 50, isolate);
HISTOGRAM_TIMER_LIST(HT)
#undef HT
#define HP(name, caption) \
- Histogram name = { #caption, 0, 101, 100, NULL, false }; \
- name##_ = name;
+ name##_ = Histogram(#caption, 0, 101, 100, isolate);
HISTOGRAM_PERCENTAGE_LIST(HP)
#undef HP
#define HM(name, caption) \
- Histogram name = { #caption, 1000, 500000, 50, NULL, false }; \
- name##_ = name;
+ name##_ = Histogram(#caption, 1000, 500000, 50, isolate);
HISTOGRAM_MEMORY_LIST(HM)
#undef HM
#define SC(name, caption) \
- StatsCounter name = { "c:" #caption, NULL, false };\
- name##_ = name;
+ name##_ = StatsCounter(isolate, "c:" #caption);
STATS_COUNTER_LIST_1(SC)
STATS_COUNTER_LIST_2(SC)
#undef SC
#define SC(name) \
- StatsCounter count_of_##name = { "c:" "V8.CountOf_" #name, NULL, false };\
- count_of_##name##_ = count_of_##name; \
- StatsCounter size_of_##name = { "c:" "V8.SizeOf_" #name, NULL, false };\
- size_of_##name##_ = size_of_##name;
+ count_of_##name##_ = StatsCounter(isolate, "c:" "V8.CountOf_" #name); \
+ size_of_##name##_ = StatsCounter(isolate, "c:" "V8.SizeOf_" #name);
INSTANCE_TYPE_LIST(SC)
#undef SC
#define SC(name) \
- StatsCounter count_of_CODE_TYPE_##name = { \
- "c:" "V8.CountOf_CODE_TYPE-" #name, NULL, false }; \
- count_of_CODE_TYPE_##name##_ = count_of_CODE_TYPE_##name; \
- StatsCounter size_of_CODE_TYPE_##name = { \
- "c:" "V8.SizeOf_CODE_TYPE-" #name, NULL, false }; \
- size_of_CODE_TYPE_##name##_ = size_of_CODE_TYPE_##name;
+ count_of_CODE_TYPE_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.CountOf_CODE_TYPE-" #name); \
+ size_of_CODE_TYPE_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.SizeOf_CODE_TYPE-" #name);
CODE_KIND_LIST(SC)
#undef SC
#define SC(name) \
- StatsCounter count_of_FIXED_ARRAY_##name = { \
- "c:" "V8.CountOf_FIXED_ARRAY-" #name, NULL, false }; \
- count_of_FIXED_ARRAY_##name##_ = count_of_FIXED_ARRAY_##name; \
- StatsCounter size_of_FIXED_ARRAY_##name = { \
- "c:" "V8.SizeOf_FIXED_ARRAY-" #name, NULL, false }; \
- size_of_FIXED_ARRAY_##name##_ = size_of_FIXED_ARRAY_##name;
+ count_of_FIXED_ARRAY_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.CountOf_FIXED_ARRAY-" #name); \
+ size_of_FIXED_ARRAY_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.SizeOf_FIXED_ARRAY-" #name);
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
- StatsCounter state_counters[] = {
-#define COUNTER_NAME(name) \
- { "c:V8.State" #name, NULL, false },
- STATE_TAG_LIST(COUNTER_NAME)
-#undef COUNTER_NAME
- };
-
- for (int i = 0; i < kSlidingStateWindowCounterCount; ++i) {
- state_counters_[i] = state_counters[i];
- }
+#define SC(name) \
+ count_of_CODE_AGE_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.CountOf_CODE_AGE-" #name); \
+ size_of_CODE_AGE_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.SizeOf_CODE_AGE-" #name);
+ CODE_AGE_LIST_WITH_NO_AGE(SC)
+#undef SC
}
+
void Counters::ResetHistograms() {
#define HT(name, caption) name##_.Reset();
HISTOGRAM_TIMER_LIST(HT)
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index fad345481..476021cdb 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -50,8 +50,8 @@ namespace internal {
HT(compile_eval, V8.CompileEval) \
HT(compile_lazy, V8.CompileLazy)
-
#define HISTOGRAM_PERCENTAGE_LIST(HP) \
+ /* Heap fragmentation. */ \
HP(external_fragmentation_total, \
V8.MemoryExternalFragmentationTotal) \
HP(external_fragmentation_old_pointer_space, \
@@ -64,12 +64,30 @@ namespace internal {
V8.MemoryExternalFragmentationMapSpace) \
HP(external_fragmentation_cell_space, \
V8.MemoryExternalFragmentationCellSpace) \
+ HP(external_fragmentation_property_cell_space, \
+ V8.MemoryExternalFragmentationPropertyCellSpace) \
HP(external_fragmentation_lo_space, \
V8.MemoryExternalFragmentationLoSpace) \
+ /* Percentages of heap committed to each space. */ \
+ HP(heap_fraction_new_space, \
+ V8.MemoryHeapFractionNewSpace) \
+ HP(heap_fraction_old_pointer_space, \
+ V8.MemoryHeapFractionOldPointerSpace) \
+ HP(heap_fraction_old_data_space, \
+ V8.MemoryHeapFractionOldDataSpace) \
+ HP(heap_fraction_code_space, \
+ V8.MemoryHeapFractionCodeSpace) \
HP(heap_fraction_map_space, \
V8.MemoryHeapFractionMapSpace) \
HP(heap_fraction_cell_space, \
V8.MemoryHeapFractionCellSpace) \
+ HP(heap_fraction_property_cell_space, \
+ V8.MemoryHeapFractionPropertyCellSpace) \
+ HP(heap_fraction_lo_space, \
+ V8.MemoryHeapFractionLoSpace) \
+ /* Percentage of crankshafted codegen. */ \
+ HP(codegen_fraction_crankshaft, \
+ V8.CodegenFractionCrankshaft) \
#define HISTOGRAM_MEMORY_LIST(HM) \
@@ -78,7 +96,11 @@ namespace internal {
HM(heap_sample_map_space_committed, \
V8.MemoryHeapSampleMapSpaceCommitted) \
HM(heap_sample_cell_space_committed, \
- V8.MemoryHeapSampleCellSpaceCommitted)
+ V8.MemoryHeapSampleCellSpaceCommitted) \
+ HM(heap_sample_property_cell_space_committed, \
+ V8.MemoryHeapSamplePropertyCellSpaceCommitted) \
+ HM(heap_sample_code_space_committed, \
+ V8.MemoryHeapSampleCodeSpaceCommitted) \
// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
@@ -89,8 +111,6 @@ namespace internal {
#define STATS_COUNTER_LIST_1(SC) \
/* Global Handle Count*/ \
SC(global_handles, V8.GlobalHandles) \
- /* Mallocs from PCRE */ \
- SC(pcre_mallocs, V8.PcreMallocCount) \
/* OS Memory allocated */ \
SC(memory_allocated, V8.OsMemoryAllocated) \
SC(normalized_maps, V8.NormalizedMaps) \
@@ -99,7 +119,7 @@ namespace internal {
SC(alive_after_last_gc, V8.AliveAfterLastGC) \
SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
SC(objs_since_last_full, V8.ObjsSinceLastFull) \
- SC(symbol_table_capacity, V8.SymbolTableCapacity) \
+ SC(string_table_capacity, V8.StringTableCapacity) \
SC(number_of_symbols, V8.NumberOfSymbols) \
SC(script_wrappers, V8.ScriptWrappers) \
SC(call_initialize_stubs, V8.CallInitializeStubs) \
@@ -109,8 +129,6 @@ namespace internal {
SC(arguments_adaptors, V8.ArgumentsAdaptors) \
SC(compilation_cache_hits, V8.CompilationCacheHits) \
SC(compilation_cache_misses, V8.CompilationCacheMisses) \
- SC(regexp_cache_hits, V8.RegExpCacheHits) \
- SC(regexp_cache_misses, V8.RegExpCacheMisses) \
SC(string_ctor_calls, V8.StringConstructorCalls) \
SC(string_ctor_conversions, V8.StringConstructorConversions) \
SC(string_ctor_cached_number, V8.StringConstructorCachedNumber) \
@@ -128,8 +146,6 @@ namespace internal {
SC(total_preparse_symbols_skipped, V8.TotalPreparseSymbolSkipped) \
/* Amount of compiled source code. */ \
SC(total_compile_size, V8.TotalCompileSize) \
- /* Amount of source code compiled with the old codegen. */ \
- SC(total_old_codegen_source_size, V8.TotalOldCodegenSourceSize) \
/* Amount of source code compiled with the full codegen. */ \
SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize) \
/* Number of contexts created from scratch. */ \
@@ -156,8 +172,6 @@ namespace internal {
V8.GCCompactorCausedByPromotedData) \
SC(gc_compactor_caused_by_oldspace_exhaustion, \
V8.GCCompactorCausedByOldspaceExhaustion) \
- SC(gc_compactor_caused_by_weak_handles, \
- V8.GCCompactorCausedByWeakHandles) \
SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
/* How is the generic keyed-load stub used? */ \
@@ -172,39 +186,9 @@ namespace internal {
SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict) \
SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \
SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict) \
- SC(keyed_call_generic_value_type, V8.KeyedCallGenericValueType) \
SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow) \
SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad) \
- /* Count how much the monomorphic keyed-load stubs are hit. */ \
- SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype) \
- SC(keyed_load_string_length, V8.KeyedLoadStringLength) \
- SC(keyed_load_array_length, V8.KeyedLoadArrayLength) \
- SC(keyed_load_constant_function, V8.KeyedLoadConstantFunction) \
- SC(keyed_load_field, V8.KeyedLoadField) \
- SC(keyed_load_callback, V8.KeyedLoadCallback) \
- SC(keyed_load_interceptor, V8.KeyedLoadInterceptor) \
- SC(keyed_load_inline, V8.KeyedLoadInline) \
- SC(keyed_load_inline_miss, V8.KeyedLoadInlineMiss) \
- SC(named_load_inline, V8.NamedLoadInline) \
- SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \
- SC(named_load_global_inline, V8.NamedLoadGlobalInline) \
- SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss) \
- SC(dont_delete_hint_hit, V8.DontDeleteHintHit) \
- SC(dont_delete_hint_miss, V8.DontDeleteHintMiss) \
SC(named_load_global_stub, V8.NamedLoadGlobalStub) \
- SC(named_load_global_stub_miss, V8.NamedLoadGlobalStubMiss) \
- SC(keyed_store_field, V8.KeyedStoreField) \
- SC(named_store_inline_field, V8.NamedStoreInlineField) \
- SC(keyed_store_inline, V8.KeyedStoreInline) \
- SC(named_load_inline_generic, V8.NamedLoadInlineGeneric) \
- SC(named_load_inline_field, V8.NamedLoadInlineFast) \
- SC(keyed_load_inline_generic, V8.KeyedLoadInlineGeneric) \
- SC(keyed_load_inline_fast, V8.KeyedLoadInlineFast) \
- SC(keyed_store_inline_generic, V8.KeyedStoreInlineGeneric) \
- SC(keyed_store_inline_fast, V8.KeyedStoreInlineFast) \
- SC(named_store_inline_generic, V8.NamedStoreInlineGeneric) \
- SC(named_store_inline_fast, V8.NamedStoreInlineFast) \
- SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs) \
@@ -226,7 +210,6 @@ namespace internal {
SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
SC(constructed_objects, V8.ConstructedObjects) \
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
- SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
SC(negative_lookups, V8.NegativeLookups) \
SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes) \
@@ -238,9 +221,6 @@ namespace internal {
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
- SC(compute_entry_frame, V8.ComputeEntryFrame) \
- SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
- SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs) \
SC(fast_new_closure_total, V8.FastNewClosureTotal) \
SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
@@ -274,10 +254,11 @@ namespace internal {
SC(transcendental_cache_miss, V8.TranscendentalCacheMiss) \
SC(stack_interrupts, V8.StackInterrupts) \
SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
- SC(smi_checks_removed, V8.SmiChecksRemoved) \
- SC(map_checks_removed, V8.MapChecksRemoved) \
- SC(quote_json_char_count, V8.QuoteJsonCharacterCount) \
- SC(quote_json_char_recount, V8.QuoteJsonCharacterReCount) \
+ SC(bounds_checks_eliminated, V8.BoundsChecksEliminated) \
+ SC(bounds_checks_hoisted, V8.BoundsChecksHoisted) \
+ SC(soft_deopts_requested, V8.SoftDeoptsRequested) \
+ SC(soft_deopts_inserted, V8.SoftDeoptsInserted) \
+ SC(soft_deopts_executed, V8.SoftDeoptsExecuted) \
SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \
SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \
SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed) \
@@ -298,6 +279,12 @@ namespace internal {
SC(cell_space_bytes_available, V8.MemoryCellSpaceBytesAvailable) \
SC(cell_space_bytes_committed, V8.MemoryCellSpaceBytesCommitted) \
SC(cell_space_bytes_used, V8.MemoryCellSpaceBytesUsed) \
+ SC(property_cell_space_bytes_available, \
+ V8.MemoryPropertyCellSpaceBytesAvailable) \
+ SC(property_cell_space_bytes_committed, \
+ V8.MemoryPropertyCellSpaceBytesCommitted) \
+ SC(property_cell_space_bytes_used, \
+ V8.MemoryPropertyCellSpaceBytesUsed) \
SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \
SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \
SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed)
@@ -349,6 +336,14 @@ class Counters {
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
+#define SC(name) \
+ StatsCounter* count_of_CODE_AGE_##name() \
+ { return &count_of_CODE_AGE_##name##_; } \
+ StatsCounter* size_of_CODE_AGE_##name() \
+ { return &size_of_CODE_AGE_##name##_; }
+ CODE_AGE_LIST_WITH_NO_AGE(SC)
+#undef SC
+
enum Id {
#define RATE_ID(name, caption) k_##name,
HISTOGRAM_TIMER_LIST(RATE_ID)
@@ -374,16 +369,13 @@ class Counters {
kSizeOfFIXED_ARRAY__##name,
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID)
#undef COUNTER_ID
-#define COUNTER_ID(name) k_##name,
- STATE_TAG_LIST(COUNTER_ID)
+#define COUNTER_ID(name) kCountOfCODE_AGE__##name, \
+ kSizeOfCODE_AGE__##name,
+ CODE_AGE_LIST_WITH_NO_AGE(COUNTER_ID)
#undef COUNTER_ID
stats_counter_count
};
- StatsCounter* state_counters(StateTag state) {
- return &state_counters_[state];
- }
-
void ResetHistograms();
private:
@@ -426,17 +418,16 @@ class Counters {
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
- enum {
-#define COUNTER_ID(name) __##name,
- STATE_TAG_LIST(COUNTER_ID)
-#undef COUNTER_ID
- kSlidingStateWindowCounterCount
- };
+#define SC(name) \
+ StatsCounter size_of_CODE_AGE_##name##_; \
+ StatsCounter count_of_CODE_AGE_##name##_;
+ CODE_AGE_LIST_WITH_NO_AGE(SC)
+#undef SC
- // Sliding state window counters.
- StatsCounter state_counters_[kSlidingStateWindowCounterCount];
friend class Isolate;
+ explicit Counters(Isolate* isolate);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
};
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 2407037b3..62330c32d 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -37,9 +37,10 @@
#include "heap-profiler.h"
#include "hydrogen.h"
#include "lithium-allocator.h"
-#include "log.h"
+#include "objects.h"
#include "once.h"
#include "platform.h"
+#include "sampler.h"
#include "runtime-profiler.h"
#include "serialize.h"
#include "store-buffer.h"
@@ -49,21 +50,11 @@ namespace internal {
V8_DECLARE_ONCE(init_once);
-bool V8::is_running_ = false;
-bool V8::has_been_set_up_ = false;
-bool V8::has_been_disposed_ = false;
-bool V8::has_fatal_error_ = false;
-bool V8::use_crankshaft_ = true;
List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL;
-
-static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
-
-static EntropySource entropy_source;
+v8::ArrayBuffer::Allocator* V8::array_buffer_allocator_ = NULL;
bool V8::Initialize(Deserializer* des) {
- FlagList::EnforceFlagImplications();
-
InitializeOncePerProcess();
// The current thread may not yet had entered an isolate to run.
@@ -80,31 +71,18 @@ bool V8::Initialize(Deserializer* des) {
ASSERT(i::Isolate::CurrentPerIsolateThreadData()->isolate() ==
i::Isolate::Current());
- if (IsDead()) return false;
-
Isolate* isolate = Isolate::Current();
+ if (isolate->IsDead()) return false;
if (isolate->IsInitialized()) return true;
- is_running_ = true;
- has_been_set_up_ = true;
- has_fatal_error_ = false;
- has_been_disposed_ = false;
-
return isolate->Init(des);
}
-void V8::SetFatalError() {
- is_running_ = false;
- has_fatal_error_ = true;
-}
-
-
void V8::TearDown() {
Isolate* isolate = Isolate::Current();
ASSERT(isolate->IsDefaultIsolate());
-
- if (!has_been_set_up_ || has_been_disposed_) return;
+ if (!isolate->IsInitialized()) return;
// The isolate has to be torn down before clearing the LOperand
// caches so that the optimizing compiler thread (if running)
@@ -114,50 +92,14 @@ void V8::TearDown() {
ElementsAccessor::TearDown();
LOperand::TearDownCaches();
+ ExternalReference::TearDownMathExpData();
RegisteredExtension::UnregisterAll();
-
- is_running_ = false;
- has_been_disposed_ = true;
+ Isolate::GlobalTearDown();
delete call_completed_callbacks_;
call_completed_callbacks_ = NULL;
- OS::TearDown();
-}
-
-
-static void seed_random(uint32_t* state) {
- for (int i = 0; i < 2; ++i) {
- if (FLAG_random_seed != 0) {
- state[i] = FLAG_random_seed;
- } else if (entropy_source != NULL) {
- uint32_t val;
- ScopedLock lock(entropy_mutex.Pointer());
- entropy_source(reinterpret_cast<unsigned char*>(&val), sizeof(uint32_t));
- state[i] = val;
- } else {
- state[i] = random();
- }
- }
-}
-
-
-// Random number generator using George Marsaglia's MWC algorithm.
-static uint32_t random_base(uint32_t* state) {
- // Initialize seed using the system random().
- // No non-zero seed will ever become zero again.
- if (state[0] == 0) seed_random(state);
-
- // Mix the bits. Never replaces state[i] with 0 if it is nonzero.
- state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16);
- state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16);
-
- return (state[0] << 14) + (state[1] & 0x3FFFF);
-}
-
-
-void V8::SetEntropySource(EntropySource source) {
- entropy_source = source;
+ Sampler::TearDown();
}
@@ -171,26 +113,18 @@ void V8::SetReturnAddressLocationResolver(
uint32_t V8::Random(Context* context) {
ASSERT(context->IsNativeContext());
ByteArray* seed = context->random_seed();
- return random_base(reinterpret_cast<uint32_t*>(seed->GetDataStartAddress()));
-}
-
-
-// Used internally by the JIT and memory allocator for security
-// purposes. So, we keep a different state to prevent informations
-// leaks that could be used in an exploit.
-uint32_t V8::RandomPrivate(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- return random_base(isolate->private_random_seed());
-}
+ uint32_t* state = reinterpret_cast<uint32_t*>(seed->GetDataStartAddress());
+ // When we get here, the RNG must have been initialized,
+ // see the Genesis constructor in file bootstrapper.cc.
+ ASSERT_NE(0, state[0]);
+ ASSERT_NE(0, state[1]);
-bool V8::IdleNotification(int hint) {
- // Returning true tells the caller that there is no need to call
- // IdleNotification again.
- if (!FLAG_use_idle_notification) return true;
+ // Mix the bits. Never replaces state[i] with 0 if it is nonzero.
+ state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16);
+ state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16);
- // Tell the heap that it may want to adjust.
- return HEAP->IdleNotification(hint);
+ return (state[0] << 14) + (state[1] & 0x3FFFF);
}
@@ -216,14 +150,22 @@ void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
void V8::FireCallCompletedCallback(Isolate* isolate) {
- if (call_completed_callbacks_ == NULL) return;
+ bool has_call_completed_callbacks = call_completed_callbacks_ != NULL;
+ bool observer_delivery_pending =
+ FLAG_harmony_observation && isolate->observer_delivery_pending();
+ if (!has_call_completed_callbacks && !observer_delivery_pending) return;
HandleScopeImplementer* handle_scope_implementer =
isolate->handle_scope_implementer();
if (!handle_scope_implementer->CallDepthIsZero()) return;
// Fire callbacks. Increase call depth to prevent recursive callbacks.
handle_scope_implementer->IncrementCallDepth();
- for (int i = 0; i < call_completed_callbacks_->length(); i++) {
- call_completed_callbacks_->at(i)();
+ if (observer_delivery_pending) {
+ JSObject::DeliverChangeRecords(isolate);
+ }
+ if (has_call_completed_callbacks) {
+ for (int i = 0; i < call_completed_callbacks_->length(); i++) {
+ call_completed_callbacks_->at(i)();
+ }
}
handle_scope_implementer->DecrementCallDepth();
}
@@ -252,38 +194,56 @@ Object* V8::FillHeapNumberWithRandom(Object* heap_number,
return heap_number;
}
+
void V8::InitializeOncePerProcessImpl() {
- OS::SetUp();
+ FlagList::EnforceFlagImplications();
+ if (FLAG_stress_compaction) {
+ FLAG_force_marking_deque_overflows = true;
+ FLAG_gc_global = true;
+ FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
+ }
- use_crankshaft_ = FLAG_crankshaft;
+ if (FLAG_concurrent_recompilation &&
+ (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs)) {
+ FLAG_concurrent_recompilation = false;
+ PrintF("Concurrent recompilation has been disabled for tracing.\n");
+ }
- if (Serializer::enabled()) {
- use_crankshaft_ = false;
+ if (FLAG_sweeper_threads <= 0) {
+ if (FLAG_concurrent_sweeping) {
+ FLAG_sweeper_threads = SystemThreadManager::
+ NumberOfParallelSystemThreads(
+ SystemThreadManager::CONCURRENT_SWEEPING);
+ } else if (FLAG_parallel_sweeping) {
+ FLAG_sweeper_threads = SystemThreadManager::
+ NumberOfParallelSystemThreads(
+ SystemThreadManager::PARALLEL_SWEEPING);
+ }
+ if (FLAG_sweeper_threads == 0) {
+ FLAG_concurrent_sweeping = false;
+ FLAG_parallel_sweeping = false;
+ }
+ } else if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) {
+ FLAG_sweeper_threads = 0;
}
- CPU::SetUp();
- if (!CPU::SupportsCrankshaft()) {
- use_crankshaft_ = false;
+ if (FLAG_concurrent_recompilation &&
+ SystemThreadManager::NumberOfParallelSystemThreads(
+ SystemThreadManager::PARALLEL_RECOMPILATION) == 0) {
+ FLAG_concurrent_recompilation = false;
}
+ Sampler::SetUp();
+ CPU::SetUp();
OS::PostSetUp();
-
- RuntimeProfiler::GlobalSetUp();
-
ElementsAccessor::InitializeOncePerProcess();
-
- if (FLAG_stress_compaction) {
- FLAG_force_marking_deque_overflows = true;
- FLAG_gc_global = true;
- FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
- }
-
LOperand::SetUpCaches();
SetUpJSCallerSavedCodeData();
- SamplerRegistry::SetUp();
ExternalReference::SetUp();
+ Bootstrapper::InitializeOncePerProcess();
}
+
void V8::InitializeOncePerProcess() {
CallOnce(&init_once, &InitializeOncePerProcessImpl);
}
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 67716d810..5848f7481 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -53,6 +53,7 @@
#include "v8globals.h"
#include "v8checks.h"
#include "allocation.h"
+#include "assert-scope.h"
#include "v8utils.h"
#include "flags.h"
@@ -63,7 +64,6 @@
#include "incremental-marking-inl.h"
#include "mark-compact-inl.h"
#include "log-inl.h"
-#include "cpu-profiler-inl.h"
#include "handles-inl.h"
#include "zone-inl.h"
@@ -82,12 +82,6 @@ class V8 : public AllStatic {
// empty heap.
static bool Initialize(Deserializer* des);
static void TearDown();
- static bool IsRunning() { return is_running_; }
- static bool UseCrankshaft() { return use_crankshaft_; }
- // To be dead you have to have lived
- // TODO(isolates): move IsDead to Isolate.
- static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
- static void SetFatalError();
// Report process out of memory. Implementation found in api.cc.
static void FatalProcessOutOfMemory(const char* location,
@@ -99,41 +93,34 @@ class V8 : public AllStatic {
// Support for return-address rewriting profilers.
static void SetReturnAddressLocationResolver(
ReturnAddressLocationResolver resolver);
+ // Support for entry hooking JITed code.
+ static void SetFunctionEntryHook(FunctionEntryHook entry_hook);
// Random number generation support. Not cryptographically safe.
static uint32_t Random(Context* context);
- // We use random numbers internally in memory allocation and in the
- // compilers for security. In order to prevent information leaks we
- // use a separate random state for internal random number
- // generation.
- static uint32_t RandomPrivate(Isolate* isolate);
static Object* FillHeapNumberWithRandom(Object* heap_number,
Context* context);
- // Idle notification directly from the API.
- static bool IdleNotification(int hint);
-
static void AddCallCompletedCallback(CallCompletedCallback callback);
static void RemoveCallCompletedCallback(CallCompletedCallback callback);
static void FireCallCompletedCallback(Isolate* isolate);
+ static v8::ArrayBuffer::Allocator* ArrayBufferAllocator() {
+ return array_buffer_allocator_;
+ }
+
+ static void SetArrayBufferAllocator(v8::ArrayBuffer::Allocator *allocator) {
+ CHECK_EQ(NULL, array_buffer_allocator_);
+ array_buffer_allocator_ = allocator;
+ }
+
private:
static void InitializeOncePerProcessImpl();
static void InitializeOncePerProcess();
- // True if engine is currently running
- static bool is_running_;
- // True if V8 has ever been run
- static bool has_been_set_up_;
- // True if error has been signaled for current engine
- // (reset to false if engine is restarted)
- static bool has_fatal_error_;
- // True if engine has been shut down
- // (reset if engine is restarted)
- static bool has_been_disposed_;
- // True if we are using the crankshaft optimizing compiler.
- static bool use_crankshaft_;
// List of callbacks when a Call completes.
static List<CallCompletedCallback>* call_completed_callbacks_;
+ // Allocator for external array buffers.
+ static v8::ArrayBuffer::Allocator* array_buffer_allocator_;
};
@@ -141,10 +128,6 @@ class V8 : public AllStatic {
enum NilValue { kNullValue, kUndefinedValue };
-// JavaScript defines two kinds of equality.
-enum EqualityKind { kStrictEquality, kNonStrictEquality };
-
-
} } // namespace v8::internal
namespace i = v8::internal;
diff --git a/deps/v8/src/v8conversions.cc b/deps/v8/src/v8conversions.cc
index bf175e50b..900b62d10 100644
--- a/deps/v8/src/v8conversions.cc
+++ b/deps/v8/src/v8conversions.cc
@@ -41,40 +41,40 @@ namespace internal {
namespace {
-// C++-style iterator adaptor for StringInputBuffer
+// C++-style iterator adaptor for StringCharacterStream
// (unlike C++ iterators the end-marker has different type).
-class StringInputBufferIterator {
+class StringCharacterStreamIterator {
public:
class EndMarker {};
- explicit StringInputBufferIterator(StringInputBuffer* buffer);
+ explicit StringCharacterStreamIterator(StringCharacterStream* stream);
- int operator*() const;
+ uint16_t operator*() const;
void operator++();
bool operator==(EndMarker const&) const { return end_; }
bool operator!=(EndMarker const& m) const { return !end_; }
private:
- StringInputBuffer* const buffer_;
- int current_;
+ StringCharacterStream* const stream_;
+ uint16_t current_;
bool end_;
};
-StringInputBufferIterator::StringInputBufferIterator(
- StringInputBuffer* buffer) : buffer_(buffer) {
+StringCharacterStreamIterator::StringCharacterStreamIterator(
+ StringCharacterStream* stream) : stream_(stream) {
++(*this);
}
-int StringInputBufferIterator::operator*() const {
+uint16_t StringCharacterStreamIterator::operator*() const {
return current_;
}
-void StringInputBufferIterator::operator++() {
- end_ = !buffer_->has_more();
+void StringCharacterStreamIterator::operator++() {
+ end_ = !stream_->HasMore();
if (!end_) {
- current_ = buffer_->GetNext();
+ current_ = stream_->GetNext();
}
}
} // End anonymous namespace.
@@ -83,9 +83,10 @@ void StringInputBufferIterator::operator++() {
double StringToDouble(UnicodeCache* unicode_cache,
String* str, int flags, double empty_string_val) {
StringShape shape(str);
+ // TODO(dcarney): Use a Visitor here.
if (shape.IsSequentialAscii()) {
- const char* begin = SeqAsciiString::cast(str)->GetChars();
- const char* end = begin + str->length();
+ const uint8_t* begin = SeqOneByteString::cast(str)->GetChars();
+ const uint8_t* end = begin + str->length();
return InternalStringToDouble(unicode_cache, begin, end, flags,
empty_string_val);
} else if (shape.IsSequentialTwoByte()) {
@@ -94,10 +95,11 @@ double StringToDouble(UnicodeCache* unicode_cache,
return InternalStringToDouble(unicode_cache, begin, end, flags,
empty_string_val);
} else {
- StringInputBuffer buffer(str);
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(str, &op);
return InternalStringToDouble(unicode_cache,
- StringInputBufferIterator(&buffer),
- StringInputBufferIterator::EndMarker(),
+ StringCharacterStreamIterator(&stream),
+ StringCharacterStreamIterator::EndMarker(),
flags,
empty_string_val);
}
@@ -108,19 +110,21 @@ double StringToInt(UnicodeCache* unicode_cache,
String* str,
int radix) {
StringShape shape(str);
+ // TODO(dcarney): Use a Visitor here.
if (shape.IsSequentialAscii()) {
- const char* begin = SeqAsciiString::cast(str)->GetChars();
- const char* end = begin + str->length();
+ const uint8_t* begin = SeqOneByteString::cast(str)->GetChars();
+ const uint8_t* end = begin + str->length();
return InternalStringToInt(unicode_cache, begin, end, radix);
} else if (shape.IsSequentialTwoByte()) {
const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
const uc16* end = begin + str->length();
return InternalStringToInt(unicode_cache, begin, end, radix);
} else {
- StringInputBuffer buffer(str);
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(str, &op);
return InternalStringToInt(unicode_cache,
- StringInputBufferIterator(&buffer),
- StringInputBufferIterator::EndMarker(),
+ StringCharacterStreamIterator(&stream),
+ StringCharacterStreamIterator::EndMarker(),
radix);
}
}
diff --git a/deps/v8/src/v8conversions.h b/deps/v8/src/v8conversions.h
index 0147d8c37..3a7b5242a 100644
--- a/deps/v8/src/v8conversions.h
+++ b/deps/v8/src/v8conversions.h
@@ -55,6 +55,19 @@ double StringToDouble(UnicodeCache* unicode_cache,
// Converts a string into an integer.
double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
+// Converts a number into size_t.
+inline size_t NumberToSize(Isolate* isolate,
+ Object* number) {
+ SealHandleScope shs(isolate);
+ if (number->IsSmi()) {
+ return Smi::cast(number)->value();
+ } else {
+ ASSERT(number->IsHeapNumber());
+ double value = HeapNumber::cast(number)->value();
+ return static_cast<size_t>(value);
+ }
+}
+
} } // namespace v8::internal
#endif // V8_V8CONVERSIONS_H_
diff --git a/deps/v8/src/v8dll-main.cc b/deps/v8/src/v8dll-main.cc
index 49d868957..7f6c9f955 100644
--- a/deps/v8/src/v8dll-main.cc
+++ b/deps/v8/src/v8dll-main.cc
@@ -30,8 +30,8 @@
#undef USING_V8_SHARED
#include "../include/v8.h"
-#ifdef WIN32
-#include <windows.h> // NOLINT
+#if V8_OS_WIN
+#include "win32-headers.h"
extern "C" {
BOOL WINAPI DllMain(HANDLE hinstDLL,
@@ -41,4 +41,4 @@ BOOL WINAPI DllMain(HANDLE hinstDLL,
return TRUE;
}
}
-#endif
+#endif // V8_OS_WIN
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index 95390adcf..7fa2fd62c 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -71,6 +71,8 @@ const Address kZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef));
const Address kHandleZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf));
+const Address kGlobalHandleZapValue =
+ reinterpret_cast<Address>(V8_UINT64_C(0x1baffed00baffedf));
const Address kFromSpaceZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
@@ -79,6 +81,7 @@ const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
#else
const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
+const Address kGlobalHandleZapValue = reinterpret_cast<Address>(0xbaffedf);
const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
const uint32_t kSlotsZapValue = 0xbeefdeef;
const uint32_t kDebugZapValue = 0xbadbaddb;
@@ -94,7 +97,7 @@ const int kPageSizeBits = 20;
// On Intel architecture, cache line size is 64 bytes.
// On ARM it may be less (32 bytes), but as far this constant is
// used for aligning data, it doesn't hurt to align on a greater value.
-const int kProcessorCacheLineSize = 64;
+#define PROCESSOR_CACHE_LINE_SIZE 64
// Constants relevant to double precision floating point numbers.
// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
@@ -108,7 +111,6 @@ class AccessorInfo;
class Allocation;
class Arguments;
class Assembler;
-class AssertNoAllocation;
class Code;
class CodeGenerator;
class CodeStub;
@@ -125,12 +127,13 @@ class FunctionTemplateInfo;
class MemoryChunk;
class SeededNumberDictionary;
class UnseededNumberDictionary;
-class StringDictionary;
+class NameDictionary;
template <typename T> class Handle;
class Heap;
class HeapObject;
class IC;
class InterceptorInfo;
+class JSReceiver;
class JSArray;
class JSFunction;
class JSObject;
@@ -152,15 +155,15 @@ class Smi;
template <typename Config, class Allocator = FreeStoreAllocationPolicy>
class SplayTree;
class String;
+class Name;
class Struct;
class Variable;
class RelocInfo;
class Deserializer;
class MessageLocation;
-class ObjectGroup;
-class TickSample;
class VirtualMemory;
class Mutex;
+class RecursiveMutex;
typedef bool (*WeakSlotCallback)(Object** pointer);
@@ -178,12 +181,13 @@ enum AllocationSpace {
CODE_SPACE, // No pointers to new space, marked executable.
MAP_SPACE, // Only and all map objects.
CELL_SPACE, // Only and all cell objects.
+ PROPERTY_CELL_SPACE, // Only and all global property cell objects.
LO_SPACE, // Promoted large objects.
FIRST_SPACE = NEW_SPACE,
LAST_SPACE = LO_SPACE,
FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
- LAST_PAGED_SPACE = CELL_SPACE
+ LAST_PAGED_SPACE = PROPERTY_CELL_SPACE
};
const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
@@ -260,16 +264,20 @@ enum InlineCacheState {
// Like MONOMORPHIC but check failed due to prototype.
MONOMORPHIC_PROTOTYPE_FAILURE,
// Multiple receiver types have been seen.
+ POLYMORPHIC,
+ // Many receiver types have been seen.
MEGAMORPHIC,
- // Special states for debug break or step in prepare stubs.
- DEBUG_BREAK,
- DEBUG_PREPARE_STEP_IN
+ // A generic handler is installed and no extra typefeedback is recorded.
+ GENERIC,
+ // Special state for debug break or step in prepare stubs.
+ DEBUG_STUB
};
enum CheckType {
RECEIVER_MAP_CHECK,
STRING_CHECK,
+ SYMBOL_CHECK,
NUMBER_CHECK,
BOOLEAN_CHECK
};
@@ -339,8 +347,9 @@ union IeeeDoubleBigEndianArchType {
// AccessorCallback
struct AccessorDescriptor {
- MaybeObject* (*getter)(Object* object, void* data);
- MaybeObject* (*setter)(JSObject* object, Object* value, void* data);
+ MaybeObject* (*getter)(Isolate* isolate, Object* object, void* data);
+ MaybeObject* (*setter)(
+ Isolate* isolate, JSObject* object, Object* value, void* data);
void* data;
};
@@ -351,20 +360,13 @@ struct AccessorDescriptor {
// VMState object leaves a state by popping the current state from the
// stack.
-#define STATE_TAG_LIST(V) \
- V(JS) \
- V(GC) \
- V(COMPILER) \
- V(PARALLEL_COMPILER_PROLOGUE) \
- V(OTHER) \
- V(EXTERNAL)
-
enum StateTag {
-#define DEF_STATE_TAG(name) name,
- STATE_TAG_LIST(DEF_STATE_TAG)
-#undef DEF_STATE_TAG
- // Pseudo-types.
- state_tag_count
+ JS,
+ GC,
+ COMPILER,
+ OTHER,
+ EXTERNAL,
+ IDLE
};
@@ -412,29 +414,19 @@ enum StateTag {
#endif
-enum CpuImplementer {
- UNKNOWN_IMPLEMENTER,
- ARM_IMPLEMENTER,
- QUALCOMM_IMPLEMENTER
-};
-
-
// Feature flags bit positions. They are mostly based on the CPUID spec.
-// (We assign CPUID itself to one of the currently reserved bits --
-// feel free to change this if needed.)
// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
enum CpuFeature { SSE4_1 = 32 + 19, // x86
SSE3 = 32 + 0, // x86
SSE2 = 26, // x86
CMOV = 15, // x86
- RDTSC = 4, // x86
- CPUID = 10, // x86
VFP3 = 1, // ARM
ARMv7 = 2, // ARM
- VFP2 = 3, // ARM
- SUDIV = 4, // ARM
- UNALIGNED_ACCESSES = 5, // ARM
- MOVW_MOVT_IMMEDIATE_LOADS = 6, // ARM
+ SUDIV = 3, // ARM
+ UNALIGNED_ACCESSES = 4, // ARM
+ MOVW_MOVT_IMMEDIATE_LOADS = 5, // ARM
+ VFP32DREGS = 6, // ARM
+ NEON = 7, // ARM
SAHF = 0, // x86
FPU = 1}; // MIPS
@@ -483,11 +475,19 @@ enum VariableMode {
CONST, // declared via 'const' declarations
- LET, // declared via 'let' declarations
+ LET, // declared via 'let' declarations (first lexical)
CONST_HARMONY, // declared via 'const' declarations in harmony mode
+ MODULE, // declared via 'module' declaration (last lexical)
+
// Variables introduced by the compiler:
+ INTERNAL, // like VAR, but not user-visible (may or may not
+ // be in a context)
+
+ TEMPORARY, // temporary variables (not user-visible), stack-allocated
+ // unless the scope as a whole has forced context allocation
+
DYNAMIC, // always require dynamic lookup (we don't know
// the declaration)
@@ -495,16 +495,10 @@ enum VariableMode {
// variable is global unless it has been shadowed
// by an eval-introduced variable
- DYNAMIC_LOCAL, // requires dynamic lookup, but we know that the
+ DYNAMIC_LOCAL // requires dynamic lookup, but we know that the
// variable is local and where it is unless it
// has been shadowed by an eval-introduced
// variable
-
- INTERNAL, // like VAR, but not user-visible (may or may not
- // be in a context)
-
- TEMPORARY // temporary variables (not user-visible), never
- // in a context
};
@@ -514,17 +508,17 @@ inline bool IsDynamicVariableMode(VariableMode mode) {
inline bool IsDeclaredVariableMode(VariableMode mode) {
- return mode >= VAR && mode <= CONST_HARMONY;
+ return mode >= VAR && mode <= MODULE;
}
inline bool IsLexicalVariableMode(VariableMode mode) {
- return mode >= LET && mode <= CONST_HARMONY;
+ return mode >= LET && mode <= MODULE;
}
inline bool IsImmutableVariableMode(VariableMode mode) {
- return mode == CONST || mode == CONST_HARMONY;
+ return mode == CONST || (mode >= CONST_HARMONY && mode <= MODULE);
}
@@ -571,6 +565,11 @@ enum ClearExceptionFlag {
};
+enum MinusZeroMode {
+ TREAT_MINUS_ZERO_AS_ZERO,
+ FAIL_ON_MINUS_ZERO
+};
+
} } // namespace v8::internal
#endif // V8_V8GLOBALS_H_
diff --git a/deps/v8/src/v8memory.h b/deps/v8/src/v8memory.h
index f71de8207..c72ce7ab7 100644
--- a/deps/v8/src/v8memory.h
+++ b/deps/v8/src/v8memory.h
@@ -64,6 +64,14 @@ class Memory {
return *reinterpret_cast<unsigned*>(addr);
}
+ static intptr_t& intptr_at(Address addr) {
+ return *reinterpret_cast<intptr_t*>(addr);
+ }
+
+ static uintptr_t& uintptr_at(Address addr) {
+ return *reinterpret_cast<uintptr_t*>(addr);
+ }
+
static double& double_at(Address addr) {
return *reinterpret_cast<double*>(addr);
}
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index e2e642941..c42d5c4d3 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -26,14 +26,12 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file relies on the fact that the following declarations have been made
-//
// in runtime.js:
// var $Object = global.Object;
// var $Boolean = global.Boolean;
// var $Number = global.Number;
// var $Function = global.Function;
// var $Array = global.Array;
-// var $NaN = 0/0;
//
// in math.js:
// var $floor = MathFloor
@@ -43,7 +41,6 @@ var $isFinite = GlobalIsFinite;
// ----------------------------------------------------------------------------
-
// Helper function used to install functions on objects.
function InstallFunctions(object, attributes, functions) {
if (functions.length >= 8) {
@@ -60,7 +57,29 @@ function InstallFunctions(object, attributes, functions) {
%ToFastProperties(object);
}
-// Prevents changes to the prototype of a built-infunction.
+
+// Helper function to install a getter-only accessor property.
+function InstallGetter(object, name, getter) {
+ %FunctionSetName(getter, name);
+ %FunctionRemovePrototype(getter);
+ %DefineOrRedefineAccessorProperty(object, name, getter, null, DONT_ENUM);
+ %SetNativeFlag(getter);
+}
+
+
+// Helper function to install a getter/setter accessor property.
+function InstallGetterSetter(object, name, getter, setter) {
+ %FunctionSetName(getter, name);
+ %FunctionSetName(setter, name);
+ %FunctionRemovePrototype(getter);
+ %FunctionRemovePrototype(setter);
+ %DefineOrRedefineAccessorProperty(object, name, getter, setter, DONT_ENUM);
+ %SetNativeFlag(getter);
+ %SetNativeFlag(setter);
+}
+
+
+// Prevents changes to the prototype of a built-in function.
// The "prototype" property of the function object is made non-configurable,
// and the prototype object is made non-extensible. The latter prevents
// changing the __proto__ property.
@@ -75,7 +94,7 @@ function SetUpLockedPrototype(constructor, fields, methods) {
}
if (fields) {
for (var i = 0; i < fields.length; i++) {
- %SetProperty(prototype, fields[i], void 0, DONT_ENUM | DONT_DELETE);
+ %SetProperty(prototype, fields[i], UNDEFINED, DONT_ENUM | DONT_DELETE);
}
}
for (var i = 0; i < methods.length; i += 2) {
@@ -84,7 +103,7 @@ function SetUpLockedPrototype(constructor, fields, methods) {
%SetProperty(prototype, key, f, DONT_ENUM | DONT_DELETE | READ_ONLY);
%SetNativeFlag(f);
}
- prototype.__proto__ = null;
+ %SetPrototype(prototype, null);
%ToFastProperties(prototype);
}
@@ -128,7 +147,7 @@ function GlobalParseInt(string, radix) {
string = TO_STRING_INLINE(string);
radix = TO_INT32(radix);
if (!(radix == 0 || (2 <= radix && radix <= 36))) {
- return $NaN;
+ return NAN;
}
}
@@ -164,7 +183,7 @@ function GlobalEval(x) {
'be the global object from which eval originated');
}
- var f = %CompileString(x);
+ var f = %CompileString(x, false);
if (!IS_FUNCTION(f)) return f;
return %_CallFunction(global_receiver, f);
@@ -176,15 +195,17 @@ function GlobalEval(x) {
// Set up global object.
function SetUpGlobal() {
%CheckIsBootstrapping();
+
+ var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
+
// ECMA 262 - 15.1.1.1.
- %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty(global, "NaN", NAN, attributes);
// ECMA-262 - 15.1.1.2.
- %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty(global, "Infinity", INFINITY, attributes);
// ECMA-262 - 15.1.1.3.
- %SetProperty(global, "undefined", void 0,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty(global, "undefined", UNDEFINED, attributes);
// Set up non-enumerable function on the global object.
InstallFunctions(global, DONT_ENUM, $Array(
@@ -198,33 +219,14 @@ function SetUpGlobal() {
SetUpGlobal();
-// ----------------------------------------------------------------------------
-// Boolean (first part of definition)
-
-
-%SetCode($Boolean, function(x) {
- if (%_IsConstructCall()) {
- %_SetValueOf(this, ToBoolean(x));
- } else {
- return ToBoolean(x);
- }
-});
-
-%FunctionSetPrototype($Boolean, new $Boolean(false));
-
-%SetProperty($Boolean.prototype, "constructor", $Boolean, DONT_ENUM);
// ----------------------------------------------------------------------------
// Object
-$Object.prototype.constructor = $Object;
-
// ECMA-262 - 15.2.4.2
function ObjectToString() {
- if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- return '[object Undefined]';
- }
- if (IS_NULL(this)) return '[object Null]';
+ if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) return "[object Undefined]";
+ if (IS_NULL(this)) return "[object Null]";
return "[object " + %_ClassOf(ToObject(this)) + "]";
}
@@ -248,10 +250,13 @@ function ObjectValueOf() {
// ECMA-262 - 15.2.4.5
function ObjectHasOwnProperty(V) {
if (%IsJSProxy(this)) {
+ // TODO(rossberg): adjust once there is a story for symbols vs proxies.
+ if (IS_SYMBOL(V)) return false;
+
var handler = %GetHandler(this);
- return CallTrap1(handler, "hasOwn", DerivedHasOwnTrap, TO_STRING_INLINE(V));
+ return CallTrap1(handler, "hasOwn", DerivedHasOwnTrap, ToName(V));
}
- return %HasLocalProperty(TO_OBJECT_INLINE(this), TO_STRING_INLINE(V));
+ return %HasLocalProperty(TO_OBJECT_INLINE(this), ToName(V));
}
@@ -268,8 +273,11 @@ function ObjectIsPrototypeOf(V) {
// ECMA-262 - 15.2.4.6
function ObjectPropertyIsEnumerable(V) {
- var P = ToString(V);
+ var P = ToName(V);
if (%IsJSProxy(this)) {
+ // TODO(rossberg): adjust once there is a story for symbols vs proxies.
+ if (IS_SYMBOL(V)) return false;
+
var desc = GetOwnProperty(this, P);
return IS_UNDEFINED(desc) ? false : desc.isEnumerable();
}
@@ -291,7 +299,7 @@ function ObjectDefineGetter(name, fun) {
desc.setGet(fun);
desc.setEnumerable(true);
desc.setConfigurable(true);
- DefineOwnProperty(ToObject(receiver), ToString(name), desc, false);
+ DefineOwnProperty(ToObject(receiver), ToName(name), desc, false);
}
@@ -300,7 +308,7 @@ function ObjectLookupGetter(name) {
if (receiver == null && !IS_UNDETECTABLE(receiver)) {
receiver = %GlobalReceiver(global);
}
- return %LookupAccessor(ToObject(receiver), ToString(name), GETTER);
+ return %LookupAccessor(ToObject(receiver), ToName(name), GETTER);
}
@@ -317,7 +325,7 @@ function ObjectDefineSetter(name, fun) {
desc.setSet(fun);
desc.setEnumerable(true);
desc.setConfigurable(true);
- DefineOwnProperty(ToObject(receiver), ToString(name), desc, false);
+ DefineOwnProperty(ToObject(receiver), ToName(name), desc, false);
}
@@ -326,7 +334,7 @@ function ObjectLookupSetter(name) {
if (receiver == null && !IS_UNDETECTABLE(receiver)) {
receiver = %GlobalReceiver(global);
}
- return %LookupAccessor(ToObject(receiver), ToString(name), SETTER);
+ return %LookupAccessor(ToObject(receiver), ToName(name), SETTER);
}
@@ -337,7 +345,7 @@ function ObjectKeys(obj) {
if (%IsJSProxy(obj)) {
var handler = %GetHandler(obj);
var names = CallTrap0(handler, "keys", DerivedKeysTrap);
- return ToStringArray(names, "keys");
+ return ToNameArray(names, "keys", false);
}
return %LocalKeys(obj);
}
@@ -381,7 +389,8 @@ function FromPropertyDescriptor(desc) {
}
// Must be an AccessorDescriptor then. We never return a generic descriptor.
return { get: desc.getGet(),
- set: desc.getSet(),
+ set: desc.getSet() === ObjectSetProto ? ObjectPoisonProto
+ : desc.getSet(),
enumerable: desc.isEnumerable(),
configurable: desc.isConfigurable() };
}
@@ -466,12 +475,12 @@ function ToPropertyDescriptor(obj) {
function ToCompletePropertyDescriptor(obj) {
var desc = ToPropertyDescriptor(obj);
if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
- if (!desc.hasValue()) desc.setValue(void 0);
+ if (!desc.hasValue()) desc.setValue(UNDEFINED);
if (!desc.hasWritable()) desc.setWritable(false);
} else {
// Is accessor descriptor.
- if (!desc.hasGetter()) desc.setGet(void 0);
- if (!desc.hasSetter()) desc.setSet(void 0);
+ if (!desc.hasGetter()) desc.setGet(UNDEFINED);
+ if (!desc.hasSetter()) desc.setSet(UNDEFINED);
}
if (!desc.hasEnumerable()) desc.setEnumerable(false);
if (!desc.hasConfigurable()) desc.setConfigurable(false);
@@ -482,7 +491,7 @@ function ToCompletePropertyDescriptor(obj) {
function PropertyDescriptor() {
// Initialize here so they are all in-object and have the same map.
// Default values from ES5 8.6.1.
- this.value_ = void 0;
+ this.value_ = UNDEFINED;
this.hasValue_ = false;
this.writable_ = false;
this.hasWritable_ = false;
@@ -490,9 +499,9 @@ function PropertyDescriptor() {
this.hasEnumerable_ = false;
this.configurable_ = false;
this.hasConfigurable_ = false;
- this.get_ = void 0;
+ this.get_ = UNDEFINED;
this.hasGetter_ = false;
- this.set_ = void 0;
+ this.set_ = UNDEFINED;
this.hasSetter_ = false;
}
@@ -584,7 +593,7 @@ function ConvertDescriptorArrayToDescriptor(desc_array) {
}
if (IS_UNDEFINED(desc_array)) {
- return void 0;
+ return UNDEFINED;
}
var desc = new PropertyDescriptor();
@@ -635,10 +644,14 @@ function CallTrap2(handler, name, defaultTrap, x, y) {
// ES5 section 8.12.1.
function GetOwnProperty(obj, v) {
- var p = ToString(v);
+ var p = ToName(v);
if (%IsJSProxy(obj)) {
+ // TODO(rossberg): adjust once there is a story for symbols vs proxies.
+ if (IS_SYMBOL(v)) return UNDEFINED;
+
var handler = %GetHandler(obj);
- var descriptor = CallTrap1(handler, "getOwnPropertyDescriptor", void 0, p);
+ var descriptor = CallTrap1(
+ handler, "getOwnPropertyDescriptor", UNDEFINED, p);
if (IS_UNDEFINED(descriptor)) return descriptor;
var desc = ToCompletePropertyDescriptor(descriptor);
if (!desc.isConfigurable()) {
@@ -651,10 +664,10 @@ function GetOwnProperty(obj, v) {
// GetOwnProperty returns an array indexed by the constants
// defined in macros.py.
// If p is not a property on obj undefined is returned.
- var props = %GetOwnProperty(ToObject(obj), ToString(v));
+ var props = %GetOwnProperty(ToObject(obj), p);
// A false value here means that access checks failed.
- if (props === false) return void 0;
+ if (props === false) return UNDEFINED;
return ConvertDescriptorArrayToDescriptor(props);
}
@@ -677,8 +690,11 @@ function Delete(obj, p, should_throw) {
// Harmony proxies.
function DefineProxyProperty(obj, p, attributes, should_throw) {
+ // TODO(rossberg): adjust once there is a story for symbols vs proxies.
+ if (IS_SYMBOL(p)) return false;
+
var handler = %GetHandler(obj);
- var result = CallTrap2(handler, "defineProperty", void 0, p, attributes);
+ var result = CallTrap2(handler, "defineProperty", UNDEFINED, p, attributes);
if (!ToBoolean(result)) {
if (should_throw) {
throw MakeTypeError("handler_returned_false",
@@ -693,9 +709,9 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
// ES5 8.12.9.
function DefineObjectProperty(obj, p, desc, should_throw) {
- var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
+ var current_or_access = %GetOwnProperty(ToObject(obj), ToName(p));
// A false value here means that access checks failed.
- if (current_or_access === false) return void 0;
+ if (current_or_access === false) return UNDEFINED;
var current = ConvertDescriptorArrayToDescriptor(current_or_access);
var extensible = %IsExtensible(ToObject(obj));
@@ -826,7 +842,7 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
flag |= READ_ONLY;
}
- var value = void 0; // Default value is undefined.
+ var value = UNDEFINED; // Default value is undefined.
if (desc.hasValue()) {
value = desc.getValue();
} else if (!IS_UNDEFINED(current) && IsDataDescriptor(current)) {
@@ -856,8 +872,9 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
// DefineObjectProperty() to modify its value.
// Step 3 - Special handling for length property.
- if (p == "length") {
+ if (p === "length") {
var length = obj.length;
+ var old_length = length;
if (!desc.hasValue()) {
return DefineObjectProperty(obj, "length", desc, should_throw);
}
@@ -874,8 +891,24 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
}
}
var threw = false;
+
+ var emit_splice = %IsObserved(obj) && new_length !== old_length;
+ var removed;
+ if (emit_splice) {
+ BeginPerformSplice(obj);
+ removed = [];
+ if (new_length < old_length)
+ removed.length = old_length - new_length;
+ }
+
while (new_length < length--) {
- if (!Delete(obj, ToString(length), false)) {
+ var index = ToString(length);
+ if (emit_splice) {
+ var deletedDesc = GetOwnProperty(obj, index);
+ if (deletedDesc && deletedDesc.hasValue())
+ removed[length - new_length] = deletedDesc.getValue();
+ }
+ if (!Delete(obj, index, false)) {
new_length = length + 1;
threw = true;
break;
@@ -883,10 +916,22 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
}
// Make sure the below call to DefineObjectProperty() doesn't overwrite
// any magic "length" property by removing the value.
+ // TODO(mstarzinger): This hack should be removed once we have addressed the
+ // respective TODO in Runtime_DefineOrRedefineDataProperty.
+ // For the time being, we need a hack to prevent Object.observe from
+ // generating two change records.
obj.length = new_length;
- desc.value_ = void 0;
+ desc.value_ = UNDEFINED;
desc.hasValue_ = false;
- if (!DefineObjectProperty(obj, "length", desc, should_throw) || threw) {
+ threw = !DefineObjectProperty(obj, "length", desc, should_throw) || threw;
+ if (emit_splice) {
+ EndPerformSplice(obj);
+ EnqueueSpliceRecord(obj,
+ new_length < old_length ? new_length : old_length,
+ removed,
+ new_length > old_length ? new_length - old_length : 0);
+ }
+ if (threw) {
if (should_throw) {
throw MakeTypeError("redefine_disallowed", [p]);
} else {
@@ -898,11 +943,19 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
// Step 4 - Special handling for array index.
var index = ToUint32(p);
- if (index == ToNumber(p) && index != 4294967295) {
+ var emit_splice = false;
+ if (ToString(index) == p && index != 4294967295) {
var length = obj.length;
+ if (index >= length && %IsObserved(obj)) {
+ emit_splice = true;
+ BeginPerformSplice(obj);
+ }
+
var length_desc = GetOwnProperty(obj, "length");
if ((index >= length && !length_desc.isWritable()) ||
!DefineObjectProperty(obj, p, desc, true)) {
+ if (emit_splice)
+ EndPerformSplice(obj);
if (should_throw) {
throw MakeTypeError("define_disallowed", [p]);
} else {
@@ -912,6 +965,10 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
if (index >= length) {
obj.length = index + 1;
}
+ if (emit_splice) {
+ EndPerformSplice(obj);
+ EnqueueSpliceRecord(obj, length, [], index + 1 - length);
+ }
return true;
}
@@ -923,6 +980,9 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies.
function DefineOwnProperty(obj, p, desc, should_throw) {
if (%IsJSProxy(obj)) {
+ // TODO(rossberg): adjust once there is a story for symbols vs proxies.
+ if (IS_SYMBOL(p)) return false;
+
var attributes = FromGenericPropertyDescriptor(desc);
return DefineProxyProperty(obj, p, attributes, should_throw);
} else if (IS_ARRAY(obj)) {
@@ -954,21 +1014,26 @@ function ObjectGetOwnPropertyDescriptor(obj, p) {
// For Harmony proxies
-function ToStringArray(obj, trap) {
+function ToNameArray(obj, trap, includeSymbols) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("proxy_non_object_prop_names", [obj, trap]);
}
var n = ToUint32(obj.length);
var array = new $Array(n);
- var names = {}; // TODO(rossberg): use sets once they are ready.
+ var realLength = 0;
+ var names = { __proto__: null }; // TODO(rossberg): use sets once ready.
for (var index = 0; index < n; index++) {
- var s = ToString(obj[index]);
+ var s = ToName(obj[index]);
+ // TODO(rossberg): adjust once there is a story for symbols vs proxies.
+ if (IS_SYMBOL(s) && !includeSymbols) continue;
if (%HasLocalProperty(names, s)) {
throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s]);
}
array[index] = s;
+ ++realLength;
names[s] = 0;
}
+ array.length = realLength;
return array;
}
@@ -981,54 +1046,66 @@ function ObjectGetOwnPropertyNames(obj) {
// Special handling for proxies.
if (%IsJSProxy(obj)) {
var handler = %GetHandler(obj);
- var names = CallTrap0(handler, "getOwnPropertyNames", void 0);
- return ToStringArray(names, "getOwnPropertyNames");
+ var names = CallTrap0(handler, "getOwnPropertyNames", UNDEFINED);
+ return ToNameArray(names, "getOwnPropertyNames", false);
}
+ var nameArrays = new InternalArray();
+
// Find all the indexed properties.
// Get the local element names.
- var propertyNames = %GetLocalElementNames(obj);
+ var localElementNames = %GetLocalElementNames(obj);
+ for (var i = 0; i < localElementNames.length; ++i) {
+ localElementNames[i] = %_NumberToString(localElementNames[i]);
+ }
+ nameArrays.push(localElementNames);
// Get names for indexed interceptor properties.
- if (%GetInterceptorInfo(obj) & 1) {
- var indexedInterceptorNames =
- %GetIndexedInterceptorElementNames(obj);
- if (indexedInterceptorNames) {
- propertyNames = propertyNames.concat(indexedInterceptorNames);
+ var interceptorInfo = %GetInterceptorInfo(obj);
+ if ((interceptorInfo & 1) != 0) {
+ var indexedInterceptorNames = %GetIndexedInterceptorElementNames(obj);
+ if (!IS_UNDEFINED(indexedInterceptorNames)) {
+ nameArrays.push(indexedInterceptorNames);
}
}
// Find all the named properties.
// Get the local property names.
- propertyNames = propertyNames.concat(%GetLocalPropertyNames(obj));
+ nameArrays.push(%GetLocalPropertyNames(obj, false));
// Get names for named interceptor properties if any.
-
- if (%GetInterceptorInfo(obj) & 2) {
- var namedInterceptorNames =
- %GetNamedInterceptorPropertyNames(obj);
- if (namedInterceptorNames) {
- propertyNames = propertyNames.concat(namedInterceptorNames);
+ if ((interceptorInfo & 2) != 0) {
+ var namedInterceptorNames = %GetNamedInterceptorPropertyNames(obj);
+ if (!IS_UNDEFINED(namedInterceptorNames)) {
+ nameArrays.push(namedInterceptorNames);
}
}
- // Property names are expected to be unique strings.
- var propertySet = {};
- var j = 0;
- for (var i = 0; i < propertyNames.length; ++i) {
- var name = ToString(propertyNames[i]);
- // We need to check for the exact property value since for intrinsic
- // properties like toString if(propertySet["toString"]) will always
- // succeed.
- if (propertySet[name] === true) {
- continue;
+ var propertyNames =
+ %Apply(InternalArray.prototype.concat,
+ nameArrays[0], nameArrays, 1, nameArrays.length - 1);
+
+ // Property names are expected to be unique strings,
+ // but interceptors can interfere with that assumption.
+ if (interceptorInfo != 0) {
+ var propertySet = { __proto__: null };
+ var j = 0;
+ for (var i = 0; i < propertyNames.length; ++i) {
+ if (IS_SYMBOL(propertyNames[i])) continue;
+ var name = ToString(propertyNames[i]);
+ // We need to check for the exact property value since for intrinsic
+ // properties like toString if(propertySet["toString"]) will always
+ // succeed.
+ if (propertySet[name] === true) {
+ continue;
+ }
+ propertySet[name] = true;
+ propertyNames[j++] = name;
}
- propertySet[name] = true;
- propertyNames[j++] = name;
+ propertyNames.length = j;
}
- propertyNames.length = j;
return propertyNames;
}
@@ -1039,8 +1116,7 @@ function ObjectCreate(proto, properties) {
if (!IS_SPEC_OBJECT(proto) && proto !== null) {
throw MakeTypeError("proto_object_or_null", [proto]);
}
- var obj = new $Object();
- obj.__proto__ = proto;
+ var obj = { __proto__: proto };
if (!IS_UNDEFINED(properties)) ObjectDefineProperties(obj, properties);
return obj;
}
@@ -1051,12 +1127,12 @@ function ObjectDefineProperty(obj, p, attributes) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("called_on_non_object", ["Object.defineProperty"]);
}
- var name = ToString(p);
+ var name = ToName(p);
if (%IsJSProxy(obj)) {
// Clone the attributes object for protection.
// TODO(rossberg): not spec'ed yet, so not sure if this should involve
// non-own properties as it does (or non-enumerable ones, as it doesn't?).
- var attributesClone = {};
+ var attributesClone = { __proto__: null };
for (var a in attributes) {
attributesClone[a] = attributes[a];
}
@@ -1119,7 +1195,7 @@ function ObjectDefineProperties(obj, properties) {
// Harmony proxies.
function ProxyFix(obj) {
var handler = %GetHandler(obj);
- var props = CallTrap0(handler, "fix", void 0);
+ var props = CallTrap0(handler, "fix", UNDEFINED);
if (IS_UNDEFINED(props)) {
throw MakeTypeError("handler_returned_undefined", [handler, "fix"]);
}
@@ -1172,20 +1248,27 @@ function ObjectFreeze(obj) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("called_on_non_object", ["Object.freeze"]);
}
- if (%IsJSProxy(obj)) {
- ProxyFix(obj);
- }
- var names = ObjectGetOwnPropertyNames(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnProperty(obj, name);
- if (desc.isWritable() || desc.isConfigurable()) {
- if (IsDataDescriptor(desc)) desc.setWritable(false);
- desc.setConfigurable(false);
- DefineOwnProperty(obj, name, desc, true);
+ var isProxy = %IsJSProxy(obj);
+ if (isProxy || %HasNonStrictArgumentsElements(obj)) {
+ if (isProxy) {
+ ProxyFix(obj);
}
+ var names = ObjectGetOwnPropertyNames(obj);
+ for (var i = 0; i < names.length; i++) {
+ var name = names[i];
+ var desc = GetOwnProperty(obj, name);
+ if (desc.isWritable() || desc.isConfigurable()) {
+ if (IsDataDescriptor(desc)) desc.setWritable(false);
+ desc.setConfigurable(false);
+ DefineOwnProperty(obj, name, desc, true);
+ }
+ }
+ %PreventExtensions(obj);
+ } else {
+ // TODO(adamk): Is it worth going to this fast path if the
+ // object's properties are already in dictionary mode?
+ %ObjectFreeze(obj);
}
- %PreventExtensions(obj);
return obj;
}
@@ -1211,16 +1294,16 @@ function ObjectIsSealed(obj) {
if (%IsJSProxy(obj)) {
return false;
}
+ if (%IsExtensible(obj)) {
+ return false;
+ }
var names = ObjectGetOwnPropertyNames(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
var desc = GetOwnProperty(obj, name);
if (desc.isConfigurable()) return false;
}
- if (!ObjectIsExtensible(obj)) {
- return true;
- }
- return false;
+ return true;
}
@@ -1232,6 +1315,9 @@ function ObjectIsFrozen(obj) {
if (%IsJSProxy(obj)) {
return false;
}
+ if (%IsExtensible(obj)) {
+ return false;
+ }
var names = ObjectGetOwnPropertyNames(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
@@ -1239,10 +1325,7 @@ function ObjectIsFrozen(obj) {
if (IsDataDescriptor(desc) && desc.isWritable()) return false;
if (desc.isConfigurable()) return false;
}
- if (!ObjectIsExtensible(obj)) {
- return true;
- }
- return false;
+ return true;
}
@@ -1268,7 +1351,25 @@ function ObjectIs(obj1, obj2) {
}
-%SetCode($Object, function(x) {
+// Harmony __proto__ getter.
+function ObjectGetProto() {
+ return %GetPrototype(this);
+}
+
+
+// Harmony __proto__ setter.
+function ObjectSetProto(obj) {
+ return %SetPrototype(this, obj);
+}
+
+
+// Harmony __proto__ poison pill.
+function ObjectPoisonProto(obj) {
+ throw MakeTypeError("proto_poison_pill", []);
+}
+
+
+function ObjectConstructor(x) {
if (%_IsConstructCall()) {
if (x == null) return this;
return ToObject(x);
@@ -1276,16 +1377,24 @@ function ObjectIs(obj1, obj2) {
if (x == null) return { };
return ToObject(x);
}
-});
+}
-%SetExpectedNumberOfProperties($Object, 4);
// ----------------------------------------------------------------------------
// Object
function SetUpObject() {
%CheckIsBootstrapping();
- // Set Up non-enumerable functions on the Object.prototype object.
+
+ %SetNativeFlag($Object);
+ %SetCode($Object, ObjectConstructor);
+ %FunctionSetName(ObjectPoisonProto, "__proto__");
+ %FunctionRemovePrototype(ObjectPoisonProto);
+ %SetExpectedNumberOfProperties($Object, 4);
+
+ %SetProperty($Object.prototype, "constructor", $Object, DONT_ENUM);
+
+ // Set up non-enumerable functions on the Object.prototype object.
InstallFunctions($Object.prototype, DONT_ENUM, $Array(
"toString", ObjectToString,
"toLocaleString", ObjectToLocaleString,
@@ -1298,6 +1407,10 @@ function SetUpObject() {
"__defineSetter__", ObjectDefineSetter,
"__lookupSetter__", ObjectLookupSetter
));
+ InstallGetterSetter($Object.prototype, "__proto__",
+ ObjectGetProto, ObjectSetProto);
+
+ // Set up non-enumerable functions in the Object object.
InstallFunctions($Object, DONT_ENUM, $Array(
"keys", ObjectKeys,
"create", ObjectCreate,
@@ -1318,9 +1431,19 @@ function SetUpObject() {
SetUpObject();
+
// ----------------------------------------------------------------------------
// Boolean
+function BooleanConstructor(x) {
+ if (%_IsConstructCall()) {
+ %_SetValueOf(this, ToBoolean(x));
+ } else {
+ return ToBoolean(x);
+ }
+}
+
+
function BooleanToString() {
// NOTE: Both Boolean objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
@@ -1347,9 +1470,13 @@ function BooleanValueOf() {
// ----------------------------------------------------------------------------
-
function SetUpBoolean () {
%CheckIsBootstrapping();
+
+ %SetCode($Boolean, BooleanConstructor);
+ %FunctionSetPrototype($Boolean, new $Boolean(false));
+ %SetProperty($Boolean.prototype, "constructor", $Boolean, DONT_ENUM);
+
InstallFunctions($Boolean.prototype, DONT_ENUM, $Array(
"toString", BooleanToString,
"valueOf", BooleanValueOf
@@ -1362,17 +1489,15 @@ SetUpBoolean();
// ----------------------------------------------------------------------------
// Number
-// Set the Number function and constructor.
-%SetCode($Number, function(x) {
+function NumberConstructor(x) {
var value = %_ArgumentsLength() == 0 ? 0 : ToNumber(x);
if (%_IsConstructCall()) {
%_SetValueOf(this, value);
} else {
return value;
}
-});
+}
-%FunctionSetPrototype($Number, new $Number(0));
// ECMA-262 section 15.7.4.2.
function NumberToString(radix) {
@@ -1403,11 +1528,7 @@ function NumberToString(radix) {
// ECMA-262 section 15.7.4.3
function NumberToLocaleString() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Number.prototype.toLocaleString"]);
- }
- return this.toString();
+ return %_CallFunction(this, NumberToString);
}
@@ -1424,50 +1545,76 @@ function NumberValueOf() {
// ECMA-262 section 15.7.4.5
function NumberToFixed(fractionDigits) {
+ var x = this;
+ if (!IS_NUMBER(this)) {
+ if (!IS_NUMBER_WRAPPER(this)) {
+ throw MakeTypeError("incompatible_method_receiver",
+ ["Number.prototype.toFixed", this]);
+ }
+ // Get the value of this number in case it's an object.
+ x = %_ValueOf(this);
+ }
var f = TO_INTEGER(fractionDigits);
+
if (f < 0 || f > 20) {
throw new $RangeError("toFixed() digits argument must be between 0 and 20");
}
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Number.prototype.toFixed"]);
- }
- var x = ToNumber(this);
+
+ if (NUMBER_IS_NAN(x)) return "NaN";
+ if (x == INFINITY) return "Infinity";
+ if (x == -INFINITY) return "-Infinity";
+
return %NumberToFixed(x, f);
}
// ECMA-262 section 15.7.4.6
function NumberToExponential(fractionDigits) {
- var f = -1;
- if (!IS_UNDEFINED(fractionDigits)) {
- f = TO_INTEGER(fractionDigits);
- if (f < 0 || f > 20) {
- throw new $RangeError(
- "toExponential() argument must be between 0 and 20");
+ var x = this;
+ if (!IS_NUMBER(this)) {
+ if (!IS_NUMBER_WRAPPER(this)) {
+ throw MakeTypeError("incompatible_method_receiver",
+ ["Number.prototype.toExponential", this]);
}
+ // Get the value of this number in case it's an object.
+ x = %_ValueOf(this);
}
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Number.prototype.toExponential"]);
+ var f = IS_UNDEFINED(fractionDigits) ? UNDEFINED : TO_INTEGER(fractionDigits);
+
+ if (NUMBER_IS_NAN(x)) return "NaN";
+ if (x == INFINITY) return "Infinity";
+ if (x == -INFINITY) return "-Infinity";
+
+ if (IS_UNDEFINED(f)) {
+ f = -1; // Signal for runtime function that f is not defined.
+ } else if (f < 0 || f > 20) {
+ throw new $RangeError("toExponential() argument must be between 0 and 20");
}
- var x = ToNumber(this);
return %NumberToExponential(x, f);
}
// ECMA-262 section 15.7.4.7
function NumberToPrecision(precision) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Number.prototype.toPrecision"]);
+ var x = this;
+ if (!IS_NUMBER(this)) {
+ if (!IS_NUMBER_WRAPPER(this)) {
+ throw MakeTypeError("incompatible_method_receiver",
+ ["Number.prototype.toPrecision", this]);
+ }
+ // Get the value of this number in case it's an object.
+ x = %_ValueOf(this);
}
if (IS_UNDEFINED(precision)) return ToString(%_ValueOf(this));
var p = TO_INTEGER(precision);
+
+ if (NUMBER_IS_NAN(x)) return "NaN";
+ if (x == INFINITY) return "Infinity";
+ if (x == -INFINITY) return "-Infinity";
+
if (p < 1 || p > 21) {
throw new $RangeError("toPrecision() argument must be between 1 and 21");
}
- var x = ToNumber(this);
return %NumberToPrecision(x, p);
}
@@ -1488,6 +1635,10 @@ function NumberIsNaN(number) {
function SetUpNumber() {
%CheckIsBootstrapping();
+
+ %SetCode($Number, NumberConstructor);
+ %FunctionSetPrototype($Number, new $Number(0));
+
%OptimizeObjectForAddingMultipleProperties($Number.prototype, 8);
// Set up the constructor property on the Number prototype object.
%SetProperty($Number.prototype, "constructor", $Number, DONT_ENUM);
@@ -1504,18 +1655,18 @@ function SetUpNumber() {
DONT_ENUM | DONT_DELETE | READ_ONLY);
// ECMA-262 section 15.7.3.3.
- %SetProperty($Number, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($Number, "NaN", NAN, DONT_ENUM | DONT_DELETE | READ_ONLY);
// ECMA-262 section 15.7.3.4.
%SetProperty($Number,
"NEGATIVE_INFINITY",
- -1/0,
+ -INFINITY,
DONT_ENUM | DONT_DELETE | READ_ONLY);
// ECMA-262 section 15.7.3.5.
%SetProperty($Number,
"POSITIVE_INFINITY",
- 1/0,
+ INFINITY,
DONT_ENUM | DONT_DELETE | READ_ONLY);
%ToFastProperties($Number);
@@ -1540,8 +1691,6 @@ SetUpNumber();
// ----------------------------------------------------------------------------
// Function
-$Function.prototype.constructor = $Function;
-
function FunctionSourceString(func) {
while (%IsJSFunctionProxy(func)) {
func = %GetCallTrap(func);
@@ -1565,7 +1714,8 @@ function FunctionSourceString(func) {
var name = %FunctionNameShouldPrintAsAnonymous(func)
? 'anonymous'
: %FunctionGetName(func);
- return 'function ' + name + source;
+ var head = %FunctionIsGenerator(func) ? 'function* ' : 'function ';
+ return head + name + source;
}
@@ -1637,36 +1787,49 @@ function FunctionBind(this_arg) { // Length is 1.
}
-function NewFunction(arg1) { // length == 1
- var n = %_ArgumentsLength();
+function NewFunctionString(arguments, function_token) {
+ var n = arguments.length;
var p = '';
if (n > 1) {
- p = new InternalArray(n - 1);
- for (var i = 0; i < n - 1; i++) p[i] = %_Arguments(i);
- p = Join(p, n - 1, ',', NonStringToString);
+ p = ToString(arguments[0]);
+ for (var i = 1; i < n - 1; i++) {
+ p += ',' + ToString(arguments[i]);
+ }
// If the formal parameters string include ) - an illegal
// character - it may make the combined function expression
// compile. We avoid this problem by checking for this early on.
- if (p.indexOf(')') != -1) throw MakeSyntaxError('unable_to_parse',[]);
+ if (%_CallFunction(p, ')', StringIndexOf) != -1) {
+ throw MakeSyntaxError('paren_in_arg_string', []);
+ }
+ // If the formal parameters include an unbalanced block comment, the
+ // function must be rejected. Since JavaScript does not allow nested
+ // comments we can include a trailing block comment to catch this.
+ p += '\n/' + '**/';
}
- var body = (n > 0) ? ToString(%_Arguments(n - 1)) : '';
- var source = '(function(' + p + ') {\n' + body + '\n})';
+ var body = (n > 0) ? ToString(arguments[n - 1]) : '';
+ return '(' + function_token + '(' + p + ') {\n' + body + '\n})';
+}
- // The call to SetNewFunctionAttributes will ensure the prototype
- // property of the resulting function is enumerable (ECMA262, 15.3.5.2).
- var global_receiver = %GlobalReceiver(global);
- var f = %_CallFunction(global_receiver, %CompileString(source));
+function FunctionConstructor(arg1) { // length == 1
+ var source = NewFunctionString(arguments, 'function');
+ var global_receiver = %GlobalReceiver(global);
+ // Compile the string in the constructor and not a helper so that errors
+ // appear to come from here.
+ var f = %_CallFunction(global_receiver, %CompileString(source, true));
%FunctionMarkNameShouldPrintAsAnonymous(f);
- return %SetNewFunctionAttributes(f);
+ return f;
}
-%SetCode($Function, NewFunction);
// ----------------------------------------------------------------------------
function SetUpFunction() {
%CheckIsBootstrapping();
+
+ %SetCode($Function, FunctionConstructor);
+ %SetProperty($Function.prototype, "constructor", $Function, DONT_ENUM);
+
InstallFunctions($Function.prototype, DONT_ENUM, $Array(
"bind", FunctionBind,
"toString", FunctionToString
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index 32ea5e197..cc4f43965 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -42,15 +42,13 @@ namespace v8 {
bool Locker::active_ = false;
-// Constructor for the Locker object. Once the Locker is constructed the
-// current thread will be guaranteed to have the lock for a given isolate.
-Locker::Locker(v8::Isolate* isolate)
- : has_lock_(false),
- top_level_(true),
- isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
- if (isolate_ == NULL) {
- isolate_ = i::Isolate::GetDefaultIsolateForLocking();
- }
+// Once the Locker is initialized, the current thread will be guaranteed to have
+// the lock for a given isolate.
+void Locker::Initialize(v8::Isolate* isolate) {
+ ASSERT(isolate != NULL);
+ has_lock_= false;
+ top_level_ = true;
+ isolate_ = reinterpret_cast<i::Isolate*>(isolate);
// Record that the Locker has been used at least once.
active_ = true;
// Get the big lock if necessary.
@@ -86,10 +84,8 @@ Locker::Locker(v8::Isolate* isolate)
bool Locker::IsLocked(v8::Isolate* isolate) {
+ ASSERT(isolate != NULL);
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- if (internal_isolate == NULL) {
- internal_isolate = i::Isolate::GetDefaultIsolateForLocking();
- }
return internal_isolate->thread_manager()->IsLockedByCurrentThread();
}
@@ -115,11 +111,9 @@ Locker::~Locker() {
}
-Unlocker::Unlocker(v8::Isolate* isolate)
- : isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
- if (isolate_ == NULL) {
- isolate_ = i::Isolate::GetDefaultIsolateForLocking();
- }
+void Unlocker::Initialize(v8::Isolate* isolate) {
+ ASSERT(isolate != NULL);
+ isolate_ = reinterpret_cast<i::Isolate*>(isolate);
ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
if (isolate_->IsDefaultIsolate()) {
isolate_->Exit();
@@ -139,13 +133,15 @@ Unlocker::~Unlocker() {
}
-void Locker::StartPreemption(int every_n_ms) {
- v8::internal::ContextSwitcher::StartPreemption(every_n_ms);
+void Locker::StartPreemption(v8::Isolate* isolate, int every_n_ms) {
+ v8::internal::ContextSwitcher::StartPreemption(
+ reinterpret_cast<i::Isolate*>(isolate), every_n_ms);
}
-void Locker::StopPreemption() {
- v8::internal::ContextSwitcher::StopPreemption();
+void Locker::StopPreemption(v8::Isolate* isolate) {
+ v8::internal::ContextSwitcher::StopPreemption(
+ reinterpret_cast<i::Isolate*>(isolate));
}
@@ -210,7 +206,7 @@ bool ThreadManager::RestoreThread() {
void ThreadManager::Lock() {
- mutex_->Lock();
+ mutex_.Lock();
mutex_owner_ = ThreadId::Current();
ASSERT(IsLockedByCurrentThread());
}
@@ -218,7 +214,7 @@ void ThreadManager::Lock() {
void ThreadManager::Unlock() {
mutex_owner_ = ThreadId::Invalid();
- mutex_->Unlock();
+ mutex_.Unlock();
}
@@ -299,8 +295,7 @@ ThreadState* ThreadState::Next() {
// be distinguished from not having a thread id at all (since NULL is
// defined as 0.)
ThreadManager::ThreadManager()
- : mutex_(OS::CreateMutex()),
- mutex_owner_(ThreadId::Invalid()),
+ : mutex_owner_(ThreadId::Invalid()),
lazily_archived_thread_(ThreadId::Invalid()),
lazily_archived_thread_state_(NULL),
free_anchor_(NULL),
@@ -311,7 +306,6 @@ ThreadManager::ThreadManager()
ThreadManager::~ThreadManager() {
- delete mutex_;
DeleteThreadStateList(free_anchor_);
DeleteThreadStateList(in_use_anchor_);
}
@@ -384,6 +378,7 @@ bool ThreadManager::IsArchived() {
return data != NULL && data->thread_state() != NULL;
}
+
void ThreadManager::Iterate(ObjectVisitor* v) {
// Expecting no threads during serialization/deserialization
for (ThreadState* state = FirstThreadStateInUse();
@@ -434,8 +429,7 @@ ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
// Set the scheduling interval of V8 threads. This function starts the
// ContextSwitcher thread if needed.
-void ContextSwitcher::StartPreemption(int every_n_ms) {
- Isolate* isolate = Isolate::Current();
+void ContextSwitcher::StartPreemption(Isolate* isolate, int every_n_ms) {
ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
if (isolate->context_switcher() == NULL) {
// If the ContextSwitcher thread is not running at the moment start it now.
@@ -451,8 +445,7 @@ void ContextSwitcher::StartPreemption(int every_n_ms) {
// Disable preemption of V8 threads. If multiple threads want to use V8 they
// must cooperatively schedule amongst them from this point on.
-void ContextSwitcher::StopPreemption() {
- Isolate* isolate = Isolate::Current();
+void ContextSwitcher::StopPreemption(Isolate* isolate) {
ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
if (isolate->context_switcher() != NULL) {
// The ContextSwitcher thread is running. We need to stop it and release
@@ -479,7 +472,6 @@ void ContextSwitcher::Run() {
// Acknowledge the preemption by the receiving thread.
void ContextSwitcher::PreemptionReceived() {
- ASSERT(Locker::IsLocked());
// There is currently no accounting being done for this. But could be in the
// future, which is why we leave this in.
}
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index 8dce8602f..1edacfc3b 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -119,7 +119,7 @@ class ThreadManager {
void EagerlyArchiveThread();
- Mutex* mutex_;
+ Mutex mutex_;
ThreadId mutex_owner_;
ThreadId lazily_archived_thread_;
ThreadState* lazily_archived_thread_state_;
@@ -146,10 +146,10 @@ class ThreadManager {
class ContextSwitcher: public Thread {
public:
// Set the preemption interval for the ContextSwitcher thread.
- static void StartPreemption(int every_n_ms);
+ static void StartPreemption(Isolate* isolate, int every_n_ms);
// Stop sending preemption requests to threads.
- static void StopPreemption();
+ static void StopPreemption(Isolate* isolate);
// Preempted thread needs to call back to the ContextSwitcher to acknowledge
// the handling of a preemption request.
diff --git a/deps/v8/src/v8utils.cc b/deps/v8/src/v8utils.cc
index 627169e70..7390d854e 100644
--- a/deps/v8/src/v8utils.cc
+++ b/deps/v8/src/v8utils.cc
@@ -105,12 +105,12 @@ char* ReadLine(const char* prompt) {
char* new_result = NewArray<char>(new_len);
// Copy the existing input into the new array and set the new
// array as the result.
- memcpy(new_result, result, offset * kCharSize);
+ OS::MemCopy(new_result, result, offset * kCharSize);
DeleteArray(result);
result = new_result;
}
// Copy the newly read line into the result.
- memcpy(result + offset, line_buf, len * kCharSize);
+ OS::MemCopy(result + offset, line_buf, len * kCharSize);
offset += len;
}
ASSERT(result != NULL);
@@ -264,7 +264,7 @@ void StringBuilder::AddFormatted(const char* format, ...) {
void StringBuilder::AddFormattedList(const char* format, va_list list) {
- ASSERT(!is_finalized() && position_ < buffer_.length());
+ ASSERT(!is_finalized() && position_ <= buffer_.length());
int n = OS::VSNPrintF(buffer_ + position_, format, list);
if (n < 0 || n >= (buffer_.length() - position_)) {
position_ = buffer_.length();
@@ -273,97 +273,4 @@ void StringBuilder::AddFormattedList(const char* format, va_list list) {
}
}
-
-MemoryMappedExternalResource::MemoryMappedExternalResource(const char* filename)
- : filename_(NULL),
- data_(NULL),
- length_(0),
- remove_file_on_cleanup_(false) {
- Init(filename);
-}
-
-
-MemoryMappedExternalResource::
- MemoryMappedExternalResource(const char* filename,
- bool remove_file_on_cleanup)
- : filename_(NULL),
- data_(NULL),
- length_(0),
- remove_file_on_cleanup_(remove_file_on_cleanup) {
- Init(filename);
-}
-
-
-MemoryMappedExternalResource::~MemoryMappedExternalResource() {
- // Release the resources if we had successfully acquired them:
- if (file_ != NULL) {
- delete file_;
- if (remove_file_on_cleanup_) {
- OS::Remove(filename_);
- }
- DeleteArray<char>(filename_);
- }
-}
-
-
-void MemoryMappedExternalResource::Init(const char* filename) {
- file_ = OS::MemoryMappedFile::open(filename);
- if (file_ != NULL) {
- filename_ = StrDup(filename);
- data_ = reinterpret_cast<char*>(file_->memory());
- length_ = file_->size();
- }
-}
-
-
-bool MemoryMappedExternalResource::EnsureIsAscii(bool abort_if_failed) const {
- bool is_ascii = true;
-
- int line_no = 1;
- const char* start_of_line = data_;
- const char* end = data_ + length_;
- for (const char* p = data_; p < end; p++) {
- char c = *p;
- if ((c & 0x80) != 0) {
- // Non-ASCII detected:
- is_ascii = false;
-
- // Report the error and abort if appropriate:
- if (abort_if_failed) {
- int char_no = static_cast<int>(p - start_of_line) - 1;
-
- ASSERT(filename_ != NULL);
- PrintF("\n\n\n"
- "Abort: Non-Ascii character 0x%.2x in file %s line %d char %d",
- c, filename_, line_no, char_no);
-
- // Allow for some context up to kNumberOfLeadingContextChars chars
- // before the offending non-ASCII char to help the user see where
- // the offending char is.
- const int kNumberOfLeadingContextChars = 10;
- const char* err_context = p - kNumberOfLeadingContextChars;
- if (err_context < data_) {
- err_context = data_;
- }
- // Compute the length of the error context and print it.
- int err_context_length = static_cast<int>(p - err_context);
- if (err_context_length != 0) {
- PrintF(" after \"%.*s\"", err_context_length, err_context);
- }
- PrintF(".\n\n\n");
- OS::Abort();
- }
-
- break; // Non-ASCII detected. No need to continue scanning.
- }
- if (c == '\n') {
- start_of_line = p;
- line_no++;
- }
- }
-
- return is_ascii;
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h
index 111abdf8b..02e57ebe7 100644
--- a/deps/v8/src/v8utils.h
+++ b/deps/v8/src/v8utils.h
@@ -111,6 +111,7 @@ int WriteAsCFile(const char* filename, const char* varname,
const char* str, int size, bool verbose = true);
+// ----------------------------------------------------------------------------
// Data structures
template <typename T>
@@ -120,27 +121,75 @@ inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
reinterpret_cast<v8::internal::Handle<Object>*>(elms), length);
}
+
+// ----------------------------------------------------------------------------
// Memory
-// Copies data from |src| to |dst|. The data spans MUST not overlap.
+// Copies words from |src| to |dst|. The data spans must not overlap.
+template <typename T>
+inline void CopyWords(T* dst, const T* src, size_t num_words) {
+ STATIC_ASSERT(sizeof(T) == kPointerSize);
+ ASSERT(Min(dst, const_cast<T*>(src)) + num_words <=
+ Max(dst, const_cast<T*>(src)));
+ ASSERT(num_words > 0);
+
+ // Use block copying OS::MemCopy if the segment we're copying is
+ // enough to justify the extra call/setup overhead.
+ static const size_t kBlockCopyLimit = 16;
+
+ if (num_words < kBlockCopyLimit) {
+ do {
+ num_words--;
+ *dst++ = *src++;
+ } while (num_words > 0);
+ } else {
+ OS::MemCopy(dst, src, num_words * kPointerSize);
+ }
+}
+
+
+// Copies words from |src| to |dst|. No restrictions.
template <typename T>
-inline void CopyWords(T* dst, T* src, int num_words) {
+inline void MoveWords(T* dst, const T* src, size_t num_words) {
STATIC_ASSERT(sizeof(T) == kPointerSize);
- ASSERT(Min(dst, src) + num_words <= Max(dst, src));
ASSERT(num_words > 0);
- // Use block copying memcpy if the segment we're copying is
+ // Use block copying OS::MemCopy if the segment we're copying is
// enough to justify the extra call/setup overhead.
- static const int kBlockCopyLimit = 16;
+ static const size_t kBlockCopyLimit = 16;
- if (num_words >= kBlockCopyLimit) {
- memcpy(dst, src, num_words * kPointerSize);
+ if (num_words < kBlockCopyLimit &&
+ ((dst < src) || (dst >= (src + num_words * kPointerSize)))) {
+ T* end = dst + num_words;
+ do {
+ num_words--;
+ *dst++ = *src++;
+ } while (num_words > 0);
} else {
- int remaining = num_words;
+ OS::MemMove(dst, src, num_words * kPointerSize);
+ }
+}
+
+
+// Copies data from |src| to |dst|. The data spans must not overlap.
+template <typename T>
+inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
+ STATIC_ASSERT(sizeof(T) == 1);
+ ASSERT(Min(dst, const_cast<T*>(src)) + num_bytes <=
+ Max(dst, const_cast<T*>(src)));
+ if (num_bytes == 0) return;
+
+ // Use block copying OS::MemCopy if the segment we're copying is
+ // enough to justify the extra call/setup overhead.
+ static const int kBlockCopyLimit = OS::kMinComplexMemCopy;
+
+ if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) {
do {
- remaining--;
+ num_bytes--;
*dst++ = *src++;
- } while (remaining > 0);
+ } while (num_bytes > 0);
+ } else {
+ OS::MemCopy(dst, src, num_bytes);
}
}
@@ -153,11 +202,18 @@ inline void MemsetPointer(T** dest, U* value, int counter) {
a = b; // Fake assignment to check assignability.
USE(a);
#endif // DEBUG
-#if defined(V8_HOST_ARCH_IA32)
+#if V8_HOST_ARCH_IA32
#define STOS "stosl"
-#elif defined(V8_HOST_ARCH_X64)
+#elif V8_HOST_ARCH_X64
#define STOS "stosq"
#endif
+#if defined(__native_client__)
+ // This STOS sequence does not validate for x86_64 Native Client.
+ // Here we #undef STOS to force use of the slower C version.
+ // TODO(bradchen): Profile V8 and implement a faster REP STOS
+ // here if the profile indicates it matters.
+#undef STOS
+#endif
#if defined(__GNUC__) && defined(STOS)
asm volatile(
@@ -202,15 +258,49 @@ Vector<const char> ReadFile(FILE* file,
bool verbose = true);
+template <typename sourcechar, typename sinkchar>
+INLINE(static void CopyCharsUnsigned(sinkchar* dest,
+ const sourcechar* src,
+ int chars));
+#if defined(V8_HOST_ARCH_ARM)
+INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
+#endif
+
// Copy from ASCII/16bit chars to ASCII/16bit chars.
template <typename sourcechar, typename sinkchar>
INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
+template<typename sourcechar, typename sinkchar>
+void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
+ ASSERT(sizeof(sourcechar) <= 2);
+ ASSERT(sizeof(sinkchar) <= 2);
+ if (sizeof(sinkchar) == 1) {
+ if (sizeof(sourcechar) == 1) {
+ CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src),
+ chars);
+ } else {
+ CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint16_t*>(src),
+ chars);
+ }
+ } else {
+ if (sizeof(sourcechar) == 1) {
+ CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src),
+ chars);
+ } else {
+ CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
+ reinterpret_cast<const uint16_t*>(src),
+ chars);
+ }
+ }
+}
template <typename sourcechar, typename sinkchar>
-void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
- ASSERT(chars >= 0);
- if (chars == 0) return;
+void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) {
sinkchar* limit = dest + chars;
#ifdef V8_HOST_CAN_READ_UNALIGNED
if (sizeof(*dest) == sizeof(*src)) {
@@ -220,7 +310,8 @@ void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
}
// Number of characters in a uintptr_t.
static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
- while (dest <= limit - kStepSize) {
+ ASSERT(dest + kStepSize > dest); // Check for overflow.
+ while (dest + kStepSize <= limit) {
*reinterpret_cast<uintptr_t*>(dest) =
*reinterpret_cast<const uintptr_t*>(src);
dest += kStepSize;
@@ -234,36 +325,104 @@ void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
}
-// A resource for using mmapped files to back external strings that are read
-// from files.
-class MemoryMappedExternalResource: public
- v8::String::ExternalAsciiStringResource {
- public:
- explicit MemoryMappedExternalResource(const char* filename);
- MemoryMappedExternalResource(const char* filename,
- bool remove_file_on_cleanup);
- virtual ~MemoryMappedExternalResource();
+#if defined(V8_HOST_ARCH_ARM)
+void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
+ switch (static_cast<unsigned>(chars)) {
+ case 0:
+ break;
+ case 1:
+ *dest = *src;
+ break;
+ case 2:
+ memcpy(dest, src, 2);
+ break;
+ case 3:
+ memcpy(dest, src, 3);
+ break;
+ case 4:
+ memcpy(dest, src, 4);
+ break;
+ case 5:
+ memcpy(dest, src, 5);
+ break;
+ case 6:
+ memcpy(dest, src, 6);
+ break;
+ case 7:
+ memcpy(dest, src, 7);
+ break;
+ case 8:
+ memcpy(dest, src, 8);
+ break;
+ case 9:
+ memcpy(dest, src, 9);
+ break;
+ case 10:
+ memcpy(dest, src, 10);
+ break;
+ case 11:
+ memcpy(dest, src, 11);
+ break;
+ case 12:
+ memcpy(dest, src, 12);
+ break;
+ case 13:
+ memcpy(dest, src, 13);
+ break;
+ case 14:
+ memcpy(dest, src, 14);
+ break;
+ case 15:
+ memcpy(dest, src, 15);
+ break;
+ default:
+ OS::MemCopy(dest, src, chars);
+ break;
+ }
+}
- virtual const char* data() const { return data_; }
- virtual size_t length() const { return length_; }
- bool exists() const { return file_ != NULL; }
- bool is_empty() const { return length_ == 0; }
+void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars) {
+ if (chars >= OS::kMinComplexConvertMemCopy) {
+ OS::MemCopyUint16Uint8(dest, src, chars);
+ } else {
+ OS::MemCopyUint16Uint8Wrapper(dest, src, chars);
+ }
+}
- bool EnsureIsAscii(bool abort_if_failed) const;
- bool EnsureIsAscii() const { return EnsureIsAscii(true); }
- bool IsAscii() const { return EnsureIsAscii(false); }
- private:
- void Init(const char* filename);
-
- char* filename_;
- OS::MemoryMappedFile* file_;
+void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
+ switch (static_cast<unsigned>(chars)) {
+ case 0:
+ break;
+ case 1:
+ *dest = *src;
+ break;
+ case 2:
+ memcpy(dest, src, 4);
+ break;
+ case 3:
+ memcpy(dest, src, 6);
+ break;
+ case 4:
+ memcpy(dest, src, 8);
+ break;
+ case 5:
+ memcpy(dest, src, 10);
+ break;
+ case 6:
+ memcpy(dest, src, 12);
+ break;
+ case 7:
+ memcpy(dest, src, 14);
+ break;
+ default:
+ OS::MemCopy(dest, src, chars * sizeof(*dest));
+ break;
+ }
+}
+#endif
- const char* data_;
- size_t length_;
- bool remove_file_on_cleanup_;
-};
class StringBuilder : public SimpleStringBuilder {
public:
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index 0416f3a39..488da42ce 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -41,8 +41,9 @@ const char* Variable::Mode2String(VariableMode mode) {
switch (mode) {
case VAR: return "VAR";
case CONST: return "CONST";
- case CONST_HARMONY: return "CONST_HARMONY";
case LET: return "LET";
+ case CONST_HARMONY: return "CONST_HARMONY";
+ case MODULE: return "MODULE";
case DYNAMIC: return "DYNAMIC";
case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL";
@@ -75,7 +76,7 @@ Variable::Variable(Scope* scope,
initialization_flag_(initialization_flag),
interface_(interface) {
// Names must be canonicalized for fast equality checks.
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
// Var declared variables never need initialization.
ASSERT(!(mode == VAR && initialization_flag == kNeedsInitialization));
}
@@ -84,7 +85,8 @@ Variable::Variable(Scope* scope,
bool Variable::IsGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
// activation frame.
- return mode_ != TEMPORARY && !IsLexicalVariableMode(mode_)
+ return (IsDynamicVariableMode(mode_) ||
+ (IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_)))
&& scope_ != NULL && scope_->is_global_scope();
}
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index ba26b8047..39451d5df 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -130,8 +130,8 @@ class Variable: public ZoneObject {
bool is_arguments() const { return kind_ == ARGUMENTS; }
// True if the variable is named eval and not known to be shadowed.
- bool is_possibly_eval() const {
- return IsVariable(FACTORY->eval_symbol());
+ bool is_possibly_eval(Isolate* isolate) const {
+ return IsVariable(isolate->factory()->eval_string());
}
Variable* local_if_not_shadowed() const {
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index f7dcbd303..dcf8df7e2 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -30,19 +30,19 @@
#include "version.h"
// These macros define the version number for the current version.
-// NOTE these macros are used by the SCons build script so their names
-// cannot be changed without changing the SCons build script.
+// NOTE these macros are used by some of the tool scripts and the build
+// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 14
-#define BUILD_NUMBER 5
-#define PATCH_LEVEL 9
+#define MINOR_VERSION 22
+#define BUILD_NUMBER 24
+#define PATCH_LEVEL 5
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
-// Define SONAME to have the SCons build the put a specific SONAME into the
+// Define SONAME to have the build system put a specific SONAME into the
// shared library instead the generic SONAME generated from the V8 version
-// number. This define is mainly used by the SCons build script.
+// number. This define is mainly used by the build system script.
#define SONAME ""
#if IS_CANDIDATE_VERSION
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index 384940dfa..658773e6d 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -29,7 +29,8 @@
#define V8_VM_STATE_INL_H_
#include "vm-state.h"
-#include "runtime-profiler.h"
+#include "log.h"
+#include "simulator.h"
namespace v8 {
namespace internal {
@@ -47,8 +48,6 @@ inline const char* StateToString(StateTag state) {
return "GC";
case COMPILER:
return "COMPILER";
- case PARALLEL_COMPILER_PROLOGUE:
- return "PARALLEL_COMPILER_PROLOGUE";
case OTHER:
return "OTHER";
case EXTERNAL:
@@ -60,37 +59,48 @@ inline const char* StateToString(StateTag state) {
}
-VMState::VMState(Isolate* isolate, StateTag tag)
+template <StateTag Tag>
+VMState<Tag>::VMState(Isolate* isolate)
: isolate_(isolate), previous_tag_(isolate->current_vm_state()) {
- if (FLAG_log_state_changes) {
- LOG(isolate, UncheckedStringEvent("Entering", StateToString(tag)));
- LOG(isolate, UncheckedStringEvent("From", StateToString(previous_tag_)));
+ if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
+ LOG(isolate_,
+ TimerEvent(Logger::START, Logger::TimerEventScope::v8_external));
}
-
- isolate_->SetCurrentVMState(tag);
+ isolate_->set_current_vm_state(Tag);
}
-VMState::~VMState() {
- if (FLAG_log_state_changes) {
- LOG(isolate_,
- UncheckedStringEvent("Leaving",
- StateToString(isolate_->current_vm_state())));
+template <StateTag Tag>
+VMState<Tag>::~VMState() {
+ if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
LOG(isolate_,
- UncheckedStringEvent("To", StateToString(previous_tag_)));
+ TimerEvent(Logger::END, Logger::TimerEventScope::v8_external));
}
-
- isolate_->SetCurrentVMState(previous_tag_);
+ isolate_->set_current_vm_state(previous_tag_);
}
ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
- : isolate_(isolate), previous_callback_(isolate->external_callback()) {
- isolate_->set_external_callback(callback);
+ : isolate_(isolate),
+ callback_(callback),
+ previous_scope_(isolate->external_callback_scope()) {
+#ifdef USE_SIMULATOR
+ int32_t sp = Simulator::current(isolate)->get_register(Simulator::sp);
+ scope_address_ = reinterpret_cast<Address>(static_cast<intptr_t>(sp));
+#endif
+ isolate_->set_external_callback_scope(this);
}
ExternalCallbackScope::~ExternalCallbackScope() {
- isolate_->set_external_callback(previous_callback_);
+ isolate_->set_external_callback_scope(previous_scope_);
+}
+
+Address ExternalCallbackScope::scope_address() {
+#ifdef USE_SIMULATOR
+ return scope_address_;
+#else
+ return reinterpret_cast<Address>(this);
+#endif
}
diff --git a/deps/v8/src/vm-state.h b/deps/v8/src/vm-state.h
index 831e2d396..f592bb92c 100644
--- a/deps/v8/src/vm-state.h
+++ b/deps/v8/src/vm-state.h
@@ -34,9 +34,10 @@
namespace v8 {
namespace internal {
+template <StateTag Tag>
class VMState BASE_EMBEDDED {
public:
- inline VMState(Isolate* isolate, StateTag tag);
+ explicit inline VMState(Isolate* isolate);
inline ~VMState();
private:
@@ -49,9 +50,18 @@ class ExternalCallbackScope BASE_EMBEDDED {
public:
inline ExternalCallbackScope(Isolate* isolate, Address callback);
inline ~ExternalCallbackScope();
+ Address callback() { return callback_; }
+ Address* callback_address() { return &callback_; }
+ ExternalCallbackScope* previous() { return previous_scope_; }
+ inline Address scope_address();
+
private:
Isolate* isolate_;
- Address previous_callback_;
+ Address callback_;
+ ExternalCallbackScope* previous_scope_;
+#ifdef USE_SIMULATOR
+ Address scope_address_;
+#endif
};
} } // namespace v8::internal
diff --git a/deps/v8/src/win32-headers.h b/deps/v8/src/win32-headers.h
index 5d9c89e31..98b0120ea 100644
--- a/deps/v8/src/win32-headers.h
+++ b/deps/v8/src/win32-headers.h
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef V8_WIN32_HEADERS_H_
+#define V8_WIN32_HEADERS_H_
+
#ifndef WIN32_LEAN_AND_MEAN
// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
#define WIN32_LEAN_AND_MEAN
@@ -55,7 +58,6 @@
#include <windows.h>
-#ifdef V8_WIN32_HEADERS_FULL
#include <signal.h> // For raise().
#include <time.h> // For LocalOffset() implementation.
#include <mmsystem.h> // For timeGetTime().
@@ -81,7 +83,6 @@
#endif // __MINGW32__
#include <process.h> // For _beginthreadex().
#include <stdlib.h>
-#endif // V8_WIN32_HEADERS_FULL
#undef VOID
#undef DELETE
@@ -89,12 +90,12 @@
#undef THIS
#undef CONST
#undef NAN
-#undef TRUE
-#undef FALSE
#undef UNKNOWN
#undef NONE
#undef ANY
#undef IGNORE
#undef GetObject
-#undef CreateMutex
#undef CreateSemaphore
+#undef Yield
+
+#endif // V8_WIN32_HEADERS_H_
diff --git a/deps/v8/src/win32-math.cc b/deps/v8/src/win32-math.cc
index 3410872bb..8f6d07743 100644
--- a/deps/v8/src/win32-math.cc
+++ b/deps/v8/src/win32-math.cc
@@ -29,27 +29,18 @@
// refer to The Open Group Base Specification for specification of the correct
// semantics for these functions.
// (http://www.opengroup.org/onlinepubs/000095399/)
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && (_MSC_VER < 1800)
-#undef V8_WIN32_LEAN_AND_MEAN
-#define V8_WIN32_HEADERS_FULL
#include "win32-headers.h"
#include <limits.h> // Required for INT_MAX etc.
-#include <math.h>
#include <float.h> // Required for DBL_MAX and on Win32 for finite()
+#include <cmath>
#include "win32-math.h"
#include "checks.h"
-namespace v8 {
-
-// Test for finite value - usually defined in math.h
-int isfinite(double x) {
- return _finite(x);
-}
-
-} // namespace v8
+namespace std {
// Test for a NaN (not a number) value - usually defined in math.h
int isnan(double x) {
@@ -63,6 +54,12 @@ int isinf(double x) {
}
+// Test for finite value - usually defined in math.h
+int isfinite(double x) {
+ return _finite(x);
+}
+
+
// Test if x is less than y and both nominal - usually defined in math.h
int isless(double x, double y) {
return isnan(x) || isnan(y) ? 0 : x < y;
@@ -103,4 +100,6 @@ int signbit(double x) {
return x < 0;
}
+} // namespace std
+
#endif // _MSC_VER
diff --git a/deps/v8/src/win32-math.h b/deps/v8/src/win32-math.h
index 68759990b..fd9312b0f 100644
--- a/deps/v8/src/win32-math.h
+++ b/deps/v8/src/win32-math.h
@@ -37,6 +37,8 @@
#error Wrong environment, expected MSVC.
#endif // _MSC_VER
+// MSVC 2013+ provides implementations of all standard math functions.
+#if (_MSC_VER < 1800)
enum {
FP_NAN,
FP_INFINITE,
@@ -45,17 +47,19 @@ enum {
FP_NORMAL
};
-namespace v8 {
-int isfinite(double x);
-
-} // namespace v8
+namespace std {
-int isnan(double x);
+int isfinite(double x);
int isinf(double x);
+int isnan(double x);
int isless(double x, double y);
int isgreater(double x, double y);
int fpclassify(double x);
int signbit(double x);
+} // namespace std
+
+#endif // _MSC_VER < 1800
+
#endif // V8_WIN32_MATH_H_
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index d022340c1..afac886c7 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -42,17 +42,28 @@ namespace internal {
// Implementation of Assembler
+static const byte kCallOpcode = 0xE8;
+static const int kNoCodeAgeSequenceLength = 6;
+
+
void Assembler::emitl(uint32_t x) {
Memory::uint32_at(pc_) = x;
pc_ += sizeof(uint32_t);
}
-void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
- Memory::uint64_at(pc_) = x;
- if (rmode != RelocInfo::NONE) {
- RecordRelocInfo(rmode, x);
+void Assembler::emitp(void* x, RelocInfo::Mode rmode) {
+ uintptr_t value = reinterpret_cast<uintptr_t>(x);
+ Memory::uintptr_at(pc_) = value;
+ if (!RelocInfo::IsNone(rmode)) {
+ RecordRelocInfo(rmode, value);
}
+ pc_ += sizeof(uintptr_t);
+}
+
+
+void Assembler::emitq(uint64_t x) {
+ Memory::uint64_at(pc_) = x;
pc_ += sizeof(uint64_t);
}
@@ -66,7 +77,8 @@ void Assembler::emitw(uint16_t x) {
void Assembler::emit_code_target(Handle<Code> target,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ ASSERT(RelocInfo::IsCodeTarget(rmode) ||
+ rmode == RelocInfo::CODE_AGE_SEQUENCE);
if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id.ToInt());
} else {
@@ -83,6 +95,14 @@ void Assembler::emit_code_target(Handle<Code> target,
}
+void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
+ ASSERT(RelocInfo::IsRuntimeEntry(rmode));
+ ASSERT(isolate()->code_range()->exists());
+ RecordRelocInfo(rmode);
+ emitl(static_cast<uint32_t>(entry - isolate()->code_range()->start()));
+}
+
+
void Assembler::emit_rex_64(Register reg, Register rm_reg) {
emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
}
@@ -205,6 +225,12 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
return code_targets_[Memory::int32_at(pc)];
}
+
+Address Assembler::runtime_entry_at(Address pc) {
+ ASSERT(isolate()->code_range()->exists());
+ return Memory::int32_at(pc) + isolate()->code_range()->start();
+}
+
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -214,25 +240,27 @@ void RelocInfo::apply(intptr_t delta) {
// absolute code pointer inside code object moves with the code object.
Memory::Address_at(pc_) += static_cast<int32_t>(delta);
CPU::FlushICache(pc_, sizeof(Address));
- } else if (IsCodeTarget(rmode_)) {
+ } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
CPU::FlushICache(pc_, sizeof(int32_t));
+ } else if (rmode_ == CODE_AGE_SEQUENCE) {
+ if (*pc_ == kCallOpcode) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ *p -= static_cast<int32_t>(delta); // Relocate entry.
+ CPU::FlushICache(p, sizeof(uint32_t));
+ }
}
}
Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- if (IsCodeTarget(rmode_)) {
- return Assembler::target_address_at(pc_);
- } else {
- return Memory::Address_at(pc_);
- }
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ return Assembler::target_address_at(pc_);
}
Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(pc_);
@@ -249,17 +277,12 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- if (IsCodeTarget(rmode_)) {
- Assembler::set_target_address_at(pc_, target);
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ Assembler::set_target_address_at(pc_, target);
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
- } else {
- Memory::Address_at(pc_) = target;
- CPU::FlushICache(pc_, sizeof(Address));
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
}
}
@@ -294,6 +317,7 @@ Address* RelocInfo::target_reference_address() {
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
Memory::Object_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
if (mode == UPDATE_WRITE_BARRIER &&
@@ -305,24 +329,35 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
}
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ return origin->runtime_entry_at(pc_);
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode mode) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ if (target_address() != target) set_target_address(target, mode);
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ ASSERT(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
+ return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
if (mode == UPDATE_WRITE_BARRIER &&
@@ -337,13 +372,14 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
bool RelocInfo::IsPatchedReturnSequence() {
// The recognized call sequence is:
- // movq(kScratchRegister, immediate64); call(kScratchRegister);
+ // movq(kScratchRegister, address); call(kScratchRegister);
// It only needs to be distinguished from a return sequence
// movq(rsp, rbp); pop(rbp); ret(n); int3 *6
// The 11th byte is int3 (0xCC) in the return sequence and
// REX.WB (0x48+register bit) for the call sequence.
#ifdef ENABLE_DEBUGGER_SUPPORT
- return pc_[10] != 0xCC;
+ return pc_[Assembler::kMoveAddressIntoScratchRegisterInstructionLength] !=
+ 0xCC;
#else
return false;
#endif
@@ -355,6 +391,28 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return origin->code_target_object_handle_at(pc_ + 1);
+}
+
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return Code::GetCodeFromTargetAddress(
+ Assembler::target_address_at(pc_ + 1));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(*pc_ == kCallOpcode);
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
+}
+
+
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -396,28 +454,29 @@ Object** RelocInfo::call_object_address() {
}
-void RelocInfo::Visit(ObjectVisitor* visitor) {
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
+ isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
}
@@ -431,11 +490,13 @@ void RelocInfo::Visit(Heap* heap) {
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@@ -444,7 +505,7 @@ void RelocInfo::Visit(Heap* heap) {
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 862a73557..dcb9fa562 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "macro-assembler.h"
#include "serialize.h"
@@ -43,7 +43,13 @@ namespace internal {
bool CpuFeatures::initialized_ = false;
#endif
uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
-uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
+uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
+uint64_t CpuFeatures::cross_compile_ = 0;
+
+ExternalReference ExternalReference::cpu_features() {
+ ASSERT(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
void CpuFeatures::Probe() {
@@ -57,98 +63,32 @@ void CpuFeatures::Probe() {
return; // No features if we might serialize.
}
- const int kBufferSize = 4 * KB;
- VirtualMemory* memory = new VirtualMemory(kBufferSize);
- if (!memory->IsReserved()) {
- delete memory;
- return;
+ uint64_t probed_features = 0;
+ CPU cpu;
+ if (cpu.has_sse41()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE4_1;
}
- ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
- if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
- delete memory;
- return;
+ if (cpu.has_sse3()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE3;
}
- Assembler assm(NULL, memory->address(), kBufferSize);
- Label cpuid, done;
-#define __ assm.
- // Save old rsp, since we are going to modify the stack.
- __ push(rbp);
- __ pushfq();
- __ push(rdi);
- __ push(rcx);
- __ push(rbx);
- __ movq(rbp, rsp);
-
- // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
- __ pushfq();
- __ pop(rax);
- __ movq(rdx, rax);
- __ xor_(rax, Immediate(0x200000)); // Flip bit 21.
- __ push(rax);
- __ popfq();
- __ pushfq();
- __ pop(rax);
- __ xor_(rax, rdx); // Different if CPUID is supported.
- __ j(not_zero, &cpuid);
-
- // CPUID not supported. Clear the supported features in rax.
- __ xor_(rax, rax);
- __ jmp(&done);
-
- // Invoke CPUID with 1 in eax to get feature information in
- // ecx:edx. Temporarily enable CPUID support because we know it's
- // safe here.
- __ bind(&cpuid);
- __ movl(rax, Immediate(1));
- supported_ = kDefaultCpuFeatures | (1 << CPUID);
- { Scope fscope(CPUID);
- __ cpuid();
- // Move the result from ecx:edx to rdi.
- __ movl(rdi, rdx); // Zero-extended to 64 bits.
- __ shl(rcx, Immediate(32));
- __ or_(rdi, rcx);
-
- // Get the sahf supported flag, from CPUID(0x80000001)
- __ movq(rax, 0x80000001, RelocInfo::NONE);
- __ cpuid();
+ // SSE2 must be available on every x64 CPU.
+ ASSERT(cpu.has_sse2());
+ probed_features |= static_cast<uint64_t>(1) << SSE2;
+
+ // CMOD must be available on every x64 CPU.
+ ASSERT(cpu.has_cmov());
+ probed_features |= static_cast<uint64_t>(1) << CMOV;
+
+ // SAHF is not generally available in long mode.
+ if (cpu.has_sahf()) {
+ probed_features |= static_cast<uint64_t>(1) << SAHF;
}
- supported_ = kDefaultCpuFeatures;
- // Put the CPU flags in rax.
- // rax = (rcx & 1) | (rdi & ~1) | (1 << CPUID).
- __ movl(rax, Immediate(1));
- __ and_(rcx, rax); // Bit 0 is set if SAHF instruction supported.
- __ not_(rax);
- __ and_(rax, rdi);
- __ or_(rax, rcx);
- __ or_(rax, Immediate(1 << CPUID));
-
- // Done.
- __ bind(&done);
- __ movq(rsp, rbp);
- __ pop(rbx);
- __ pop(rcx);
- __ pop(rdi);
- __ popfq();
- __ pop(rbp);
- __ ret(0);
-#undef __
-
- typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
- supported_ = probe();
- found_by_runtime_probing_ = supported_;
- found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
- uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
- supported_ |= os_guarantees;
- found_by_runtime_probing_ &= ~os_guarantees;
- // SSE2 and CMOV must be available on an X64 CPU.
- ASSERT(IsSupported(CPUID));
- ASSERT(IsSupported(SSE2));
- ASSERT(IsSupported(CMOV));
-
- delete memory;
+ uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
+ supported_ = probed_features | platform_features;
+ found_by_runtime_probing_only_
+ = probed_features & ~kDefaultCpuFeatures & ~platform_features;
}
@@ -158,10 +98,7 @@ void CpuFeatures::Probe() {
// Patch the code at the current PC with a call to the target address.
// Additional guard int3 instructions can be added if required.
void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Load register with immediate 64 and call through a register instructions
- // takes up 13 bytes and int3 takes up one byte.
- static const int kCallCodeSize = 13;
- int code_size = kCallCodeSize + guard_bytes;
+ int code_size = Assembler::kCallSequenceLength + guard_bytes;
// Create a code patcher.
CodePatcher patcher(pc_, code_size);
@@ -173,11 +110,11 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
#endif
// Patch the code.
- patcher.masm()->movq(r10, target, RelocInfo::NONE);
- patcher.masm()->call(r10);
+ patcher.masm()->movq(kScratchRegister, target, RelocInfo::NONE64);
+ patcher.masm()->call(kScratchRegister);
// Check that the size of the code generated is as expected.
- ASSERT_EQ(kCallCodeSize,
+ ASSERT_EQ(Assembler::kCallSequenceLength,
patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
// Add the requested number of int3 instructions after the call.
@@ -201,7 +138,8 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// -----------------------------------------------------------------------------
// Register constants.
-const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
+const int
+ Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
// rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
0, 3, 2, 1, 7, 8, 9, 11, 14, 15
};
@@ -346,50 +284,20 @@ bool Operand::AddressUsesRegister(Register reg) const {
static void InitCoverageLog();
#endif
-Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
- : AssemblerBase(arg_isolate),
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
code_targets_(100),
- positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code),
- predictable_code_size_(false) {
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate() != NULL && isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
+ positions_recorder_(this) {
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
// existing code in it.
#ifdef DEBUG
if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size); // int3
+ memset(buffer_, 0xCC, buffer_size_); // int3
}
#endif
- // Set up buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
#ifdef GENERATED_CODE_COVERAGE
@@ -398,19 +306,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
}
-Assembler::~Assembler() {
- if (own_buffer_) {
- if (isolate() != NULL &&
- isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
@@ -501,7 +396,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > HEAP->MaxOldGenerationSize())) {
+ (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -521,9 +416,9 @@ void Assembler::GrowBuffer() {
intptr_t pc_delta = desc.buffer - buffer_;
intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
(buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
+ OS::MemMove(desc.buffer, buffer_, desc.instr_size);
+ OS::MemMove(rc_delta + reloc_info_writer.pos(),
+ reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
if (isolate() != NULL &&
@@ -876,6 +771,16 @@ void Assembler::call(Label* L) {
}
+void Assembler::call(Address entry, RelocInfo::Mode rmode) {
+ ASSERT(RelocInfo::IsRuntimeEntry(rmode));
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ // 1110 1000 #32-bit disp.
+ emit(0xE8);
+ emit_runtime_entry(entry, rmode);
+}
+
+
void Assembler::call(Handle<Code> target,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
@@ -928,11 +833,13 @@ void Assembler::clc() {
emit(0xF8);
}
+
void Assembler::cld() {
EnsureSpace ensure_space(this);
emit(0xFC);
}
+
void Assembler::cdq() {
EnsureSpace ensure_space(this);
emit(0x99);
@@ -1014,7 +921,6 @@ void Assembler::cmpb_al(Immediate imm8) {
void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
emit(0x0F);
emit(0xA2);
@@ -1238,13 +1144,13 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
// Determine whether we can use 1-byte offsets for backwards branches,
// which have a max range of 128 bytes.
- // We also need to check the predictable_code_size_ flag here, because
- // on x64, when the full code generator recompiles code for debugging, some
- // places need to be padded out to a certain size. The debugger is keeping
- // track of how often it did this so that it can adjust return addresses on
- // the stack, but if the size of jump instructions can also change, that's
- // not enough and the calculated offsets would be incorrect.
- if (is_int8(offs - short_size) && !predictable_code_size_) {
+ // We also need to check predictable_code_size() flag here, because on x64,
+ // when the full code generator recompiles code for debugging, some places
+ // need to be padded out to a certain size. The debugger is keeping track of
+ // how often it did this so that it can adjust return addresses on the
+ // stack, but if the size of jump instructions can also change, that's not
+ // enough and the calculated offsets would be incorrect.
+ if (is_int8(offs - short_size) && !predictable_code_size()) {
// 0111 tttn #8-bit disp.
emit(0x70 | cc);
emit((offs - short_size) & 0xFF);
@@ -1282,6 +1188,16 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
}
+void Assembler::j(Condition cc, Address entry, RelocInfo::Mode rmode) {
+ ASSERT(RelocInfo::IsRuntimeEntry(rmode));
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint4(cc));
+ emit(0x0F);
+ emit(0x80 | cc);
+ emit_runtime_entry(entry, rmode);
+}
+
+
void Assembler::j(Condition cc,
Handle<Code> target,
RelocInfo::Mode rmode) {
@@ -1301,7 +1217,7 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
if (L->is_bound()) {
int offs = L->pos() - pc_offset() - 1;
ASSERT(offs <= 0);
- if (is_int8(offs - short_size) && !predictable_code_size_) {
+ if (is_int8(offs - short_size) && !predictable_code_size()) {
// 1110 1011 #8-bit disp.
emit(0xEB);
emit((offs - short_size) & 0xFF);
@@ -1344,6 +1260,15 @@ void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
}
+void Assembler::jmp(Address entry, RelocInfo::Mode rmode) {
+ ASSERT(RelocInfo::IsRuntimeEntry(rmode));
+ EnsureSpace ensure_space(this);
+ ASSERT(RelocInfo::IsRuntimeEntry(rmode));
+ emit(0xE9);
+ emit_runtime_entry(entry, rmode);
+}
+
+
void Assembler::jmp(Register target) {
EnsureSpace ensure_space(this);
// Opcode FF/4 r64.
@@ -1382,7 +1307,7 @@ void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
emit(0x48); // REX.W
emit(0xA1);
- emitq(reinterpret_cast<uintptr_t>(value), mode);
+ emitp(value, mode);
}
@@ -1534,33 +1459,30 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
- emitq(reinterpret_cast<uintptr_t>(value), rmode);
+ emitp(value, rmode);
}
void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
// Non-relocatable values might not need a 64-bit representation.
- if (rmode == RelocInfo::NONE) {
- // Sadly, there is no zero or sign extending move for 8-bit immediates.
- if (is_int32(value)) {
- movq(dst, Immediate(static_cast<int32_t>(value)));
- return;
- } else if (is_uint32(value)) {
- movl(dst, Immediate(static_cast<int32_t>(value)));
- return;
- }
+ ASSERT(RelocInfo::IsNone(rmode));
+ if (is_uint32(value)) {
+ movl(dst, Immediate(static_cast<int32_t>(value)));
+ } else if (is_int32(value)) {
+ movq(dst, Immediate(static_cast<int32_t>(value)));
+ } else {
// Value cannot be represented by 32 bits, so do a full 64 bit immediate
// value.
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst);
+ emit(0xB8 | dst.low_bits());
+ emitq(value);
}
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitq(value, rmode);
}
void Assembler::movq(Register dst, ExternalReference ref) {
- int64_t value = reinterpret_cast<int64_t>(ref.address());
+ Address value = reinterpret_cast<Address>(ref.address());
movq(dst, value, RelocInfo::EXTERNAL_REFERENCE);
}
@@ -1598,20 +1520,21 @@ void Assembler::movl(const Operand& dst, Label* src) {
void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
+ AllowDeferredHandleDereference using_raw_address;
// If there is no relocation info, emit the value of the handle efficiently
// (possibly using less that 8 bytes for the value).
- if (mode == RelocInfo::NONE) {
+ if (RelocInfo::IsNone(mode)) {
// There is no possible reason to store a heap pointer without relocation
// info, so it must be a smi.
ASSERT(value->IsSmi());
- movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE);
+ movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE64);
} else {
EnsureSpace ensure_space(this);
ASSERT(value->IsHeapObject());
- ASSERT(!HEAP->InNewSpace(*value));
+ ASSERT(!isolate()->heap()->InNewSpace(*value));
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
- emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
+ emitp(value.location(), mode);
}
}
@@ -1688,6 +1611,15 @@ void Assembler::movzxwl(Register dst, const Operand& src) {
}
+void Assembler::movzxwl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xB7);
+ emit_modrm(dst, src);
+}
+
+
void Assembler::repmovsb() {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -1918,13 +1850,6 @@ void Assembler::pushfq() {
}
-void Assembler::rdtsc() {
- EnsureSpace ensure_space(this);
- emit(0x0F);
- emit(0x31);
-}
-
-
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
ASSERT(is_uint16(imm16));
@@ -1972,7 +1897,7 @@ void Assembler::shrd(Register dst, Register src) {
}
-void Assembler::xchg(Register dst, Register src) {
+void Assembler::xchgq(Register dst, Register src) {
EnsureSpace ensure_space(this);
if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
Register other = src.is(rax) ? dst : src;
@@ -1990,11 +1915,29 @@ void Assembler::xchg(Register dst, Register src) {
}
+void Assembler::xchgl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
+ Register other = src.is(rax) ? dst : src;
+ emit_optional_rex_32(other);
+ emit(0x90 | other.low_bits());
+ } else if (dst.low_bits() == 4) {
+ emit_optional_rex_32(dst, src);
+ emit(0x87);
+ emit_modrm(dst, src);
+ } else {
+ emit_optional_rex_32(src, dst);
+ emit(0x87);
+ emit_modrm(src, dst);
+ }
+}
+
+
void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
emit(0x48); // REX.W
emit(0xA3);
- emitq(reinterpret_cast<uintptr_t>(dst), mode);
+ emitp(dst, mode);
}
@@ -2108,6 +2051,14 @@ void Assembler::testl(const Operand& op, Immediate mask) {
}
+void Assembler::testl(const Operand& op, Register reg) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(reg, op);
+ emit(0x85);
+ emit_operand(reg, op);
+}
+
+
void Assembler::testq(const Operand& op, Register reg) {
EnsureSpace ensure_space(this);
emit_rex_64(reg, op);
@@ -2131,6 +2082,10 @@ void Assembler::testq(Register dst, Register src) {
void Assembler::testq(Register dst, Immediate mask) {
+ if (is_uint8(mask.value_)) {
+ testb(dst, mask);
+ return;
+ }
EnsureSpace ensure_space(this);
if (dst.is(rax)) {
emit_rex_64();
@@ -2246,7 +2201,7 @@ void Assembler::fistp_s(const Operand& adr) {
void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
+ ASSERT(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDB);
@@ -2255,7 +2210,7 @@ void Assembler::fisttp_s(const Operand& adr) {
void Assembler::fisttp_d(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
+ ASSERT(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDD);
@@ -2520,6 +2475,18 @@ void Assembler::emit_farith(int b1, int b2, int i) {
emit(b2 + i);
}
+
+// SSE operations.
+
+void Assembler::andps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
// SSE 2 operations.
void Assembler::movd(XMMRegister dst, Register src) {
@@ -2580,6 +2547,7 @@ void Assembler::movq(XMMRegister dst, XMMRegister src) {
}
}
+
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -2600,16 +2568,36 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) {
}
+void Assembler::movdqu(const Operand& dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0x7F);
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movdqu(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
- ASSERT(CpuFeatures::IsSupported(SSE4_1));
+ ASSERT(IsEnabled(SSE4_1));
ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
emit(0x66);
- emit_optional_rex_32(dst, src);
+ emit_optional_rex_32(src, dst);
emit(0x0F);
emit(0x3A);
emit(0x17);
- emit_sse_operand(dst, src);
+ emit_sse_operand(src, dst);
emit(imm8);
}
@@ -2850,6 +2838,16 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::addsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -2860,6 +2858,16 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::mulsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -2949,9 +2957,20 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
}
+void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC2);
+ emit_sse_operand(dst, src);
+ emit(0x01); // LT == 1
+}
+
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src,
Assembler::RoundingMode mode) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ ASSERT(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -2974,6 +2993,15 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
}
+void Assembler::movmskps(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x50);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
@@ -2984,10 +3012,12 @@ void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
+
void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
+
void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
@@ -3008,9 +3038,9 @@ void Assembler::dd(uint32_t data) {
// Relocation information implementations.
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- ASSERT(rmode != RelocInfo::NONE);
- // Don't record external references unless the heap will be serialized.
+ ASSERT(!RelocInfo::IsNone(rmode));
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+ // Don't record external references unless the heap will be serialized.
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
@@ -3019,11 +3049,15 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!Serializer::enabled() && !emit_debug_code()) {
return;
}
+ } else if (rmode == RelocInfo::CODE_AGE_SEQUENCE) {
+ // Don't record psuedo relocation info for code age sequence mode.
+ return;
}
RelocInfo rinfo(pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
+
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
@@ -3047,7 +3081,9 @@ void Assembler::RecordComment(const char* msg, bool force) {
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::INTERNAL_REFERENCE;
+ 1 << RelocInfo::RUNTIME_ENTRY |
+ 1 << RelocInfo::INTERNAL_REFERENCE |
+ 1 << RelocInfo::CODE_AGE_SEQUENCE;
bool RelocInfo::IsCodedSpecially() {
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index e8b0be9ba..508c62211 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -95,21 +95,24 @@ struct Register {
// r10 - fixed scratch register
// r12 - smi constant register
// r13 - root register
+ static const int kMaxNumAllocatableRegisters = 10;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 10;
static int ToAllocationIndex(Register reg) {
return kAllocationIndexByRegisterCode[reg.code()];
}
static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
Register result = { kRegisterCodeByAllocationIndex[index] };
return result;
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"rax",
"rbx",
@@ -157,7 +160,7 @@ struct Register {
int code_;
private:
- static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters];
+ static const int kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters];
static const int kAllocationIndexByRegisterCode[kNumRegisters];
};
@@ -197,10 +200,26 @@ const Register r14 = { kRegister_r14_Code };
const Register r15 = { kRegister_r15_Code };
const Register no_reg = { kRegister_no_reg_Code };
+#ifdef _WIN64
+ // Windows calling convention
+ const Register arg_reg_1 = { kRegister_rcx_Code };
+ const Register arg_reg_2 = { kRegister_rdx_Code };
+ const Register arg_reg_3 = { kRegister_r8_Code };
+ const Register arg_reg_4 = { kRegister_r9_Code };
+#else
+ // AMD64 calling convention
+ const Register arg_reg_1 = { kRegister_rdi_Code };
+ const Register arg_reg_2 = { kRegister_rsi_Code };
+ const Register arg_reg_3 = { kRegister_rdx_Code };
+ const Register arg_reg_4 = { kRegister_rcx_Code };
+#endif // _WIN64
struct XMMRegister {
- static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 15;
+ static const int kMaxNumRegisters = 16;
+ static const int kMaxNumAllocatableRegisters = 15;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
static int ToAllocationIndex(XMMRegister reg) {
ASSERT(reg.code() != 0);
@@ -208,13 +227,13 @@ struct XMMRegister {
}
static XMMRegister FromAllocationIndex(int index) {
- ASSERT(0 <= index && index < kNumAllocatableRegisters);
+ ASSERT(0 <= index && index < kMaxNumAllocatableRegisters);
XMMRegister result = { index + 1 };
return result;
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"xmm1",
"xmm2",
@@ -237,11 +256,11 @@ struct XMMRegister {
static XMMRegister from_code(int code) {
ASSERT(code >= 0);
- ASSERT(code < kNumRegisters);
+ ASSERT(code < kMaxNumRegisters);
XMMRegister r = { code };
return r;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+ bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; }
bool is(XMMRegister reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
@@ -436,13 +455,13 @@ class Operand BASE_EMBEDDED {
// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
+// Supported features must be enabled by a CpuFeatureScope before use.
// Example:
-// if (CpuFeatures::IsSupported(SSE3)) {
-// CpuFeatures::Scope fscope(SSE3);
+// if (assembler->IsSupported(SSE3)) {
+// CpuFeatureScope fscope(assembler, SSE3);
// // Generate SSE3 floating point code.
// } else {
-// // Generate standard x87 or SSE2 floating point code.
+// // Generate standard SSE2 floating point code.
// }
class CpuFeatures : public AllStatic {
public:
@@ -452,78 +471,61 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
+ if (Check(f, cross_compile_)) return true;
ASSERT(initialized_);
- if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == RDTSC && !FLAG_enable_rdtsc) return false;
if (f == SAHF && !FLAG_enable_sahf) return false;
- return (supported_ & (V8_UINT64_C(1) << f)) != 0;
+ return Check(f, supported_);
}
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
+ static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- uint64_t enabled = isolate->enabled_cpu_features();
- return (enabled & (V8_UINT64_C(1) << f)) != 0;
+ return Check(f, found_by_runtime_probing_only_);
}
-#endif
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- uint64_t mask = V8_UINT64_C(1) << f;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = isolate_->enabled_cpu_features();
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
+ static bool IsSafeForSnapshot(CpuFeature f) {
+ return Check(f, cross_compile_) ||
+ (IsSupported(f) &&
+ (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ }
- private:
- Isolate* isolate_;
- uint64_t old_enabled_;
-#else
+ static bool VerifyCrossCompiling() {
+ return cross_compile_ == 0;
+ }
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ uint64_t mask = flag2set(f);
+ return cross_compile_ == 0 ||
+ (cross_compile_ & mask) == mask;
+ }
private:
- // Safe defaults include SSE2 and CMOV for X64. It is always available, if
+ static bool Check(CpuFeature f, uint64_t set) {
+ return (set & flag2set(f)) != 0;
+ }
+
+ static uint64_t flag2set(CpuFeature f) {
+ return static_cast<uint64_t>(1) << f;
+ }
+
+ // Safe defaults include CMOV for X64. It is always available, if
// anyone checks, but they shouldn't need to check.
// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
- static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
+ static const uint64_t kDefaultCpuFeatures = (1 << CMOV);
#ifdef DEBUG
static bool initialized_;
#endif
static uint64_t supported_;
- static uint64_t found_by_runtime_probing_;
+ static uint64_t found_by_runtime_probing_only_;
+ static uint64_t cross_compile_;
+
+ friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -556,15 +558,7 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- ~Assembler();
-
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
- // Avoids using instructions that vary in size in unpredictable ways between
- // the snapshot and the running VM. This is needed by the full compiler so
- // that it can recompile code with debug support and fix the PC.
- void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
+ virtual ~Assembler() { }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -600,34 +594,42 @@ class Assembler : public AssemblerBase {
}
inline Handle<Object> code_target_object_handle_at(Address pc);
+ inline Address runtime_entry_at(Address pc);
// Number of bytes taken up by the branch target in the code.
static const int kSpecialTargetSize = 4; // Use 32-bit displacement.
// Distance between the address of the code target in the call instruction
// and the return address pushed on the stack.
static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement.
- // Distance between the start of the JS return sequence and where the
- // 32-bit displacement of a near call would be, relative to the pushed
- // return address. TODO: Use return sequence length instead.
- // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
- static const int kPatchReturnSequenceAddressOffset = 13 - 4;
- // Distance between start of patched debug break slot and where the
- // 32-bit displacement of a near call would be, relative to the pushed
- // return address. TODO: Use return sequence length instead.
- // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
- static const int kPatchDebugBreakSlotAddressOffset = 13 - 4;
- // TODO(X64): Rename this, removing the "Real", after changing the above.
- static const int kRealPatchReturnSequenceAddressOffset = 2;
-
- // Some x64 JS code is padded with int3 to make it large
- // enough to hold an instruction when the debugger patches it.
- static const int kJumpInstructionLength = 13;
- static const int kCallInstructionLength = 13;
- static const int kJSReturnSequenceLength = 13;
+ // The length of call(kScratchRegister).
+ static const int kCallScratchRegisterInstructionLength = 3;
+ // The length of call(Immediate32).
static const int kShortCallInstructionLength = 5;
- static const int kPatchDebugBreakSlotReturnOffset = 4;
-
- // The debug break slot must be able to contain a call instruction.
- static const int kDebugBreakSlotLength = kCallInstructionLength;
+ // The length of movq(kScratchRegister, address).
+ static const int kMoveAddressIntoScratchRegisterInstructionLength =
+ 2 + kPointerSize;
+ // The length of movq(kScratchRegister, address) and call(kScratchRegister).
+ static const int kCallSequenceLength =
+ kMoveAddressIntoScratchRegisterInstructionLength +
+ kCallScratchRegisterInstructionLength;
+
+ // The js return and debug break slot must be able to contain an indirect
+ // call sequence, some x64 JS code is padded with int3 to make it large
+ // enough to hold an instruction when the debugger patches it.
+ static const int kJSReturnSequenceLength = kCallSequenceLength;
+ static const int kDebugBreakSlotLength = kCallSequenceLength;
+ static const int kPatchDebugBreakSlotReturnOffset = kCallTargetAddressOffset;
+ // Distance between the start of the JS return sequence and where the
+ // 32-bit displacement of a short call would be. The short call is from
+ // SetDebugBreakAtIC from debug-x64.cc.
+ static const int kPatchReturnSequenceAddressOffset =
+ kJSReturnSequenceLength - kPatchDebugBreakSlotReturnOffset;
+ // Distance between the start of the JS return sequence and where the
+ // 32-bit displacement of a short call would be. The short call is from
+ // SetDebugBreakAtIC from debug-x64.cc.
+ static const int kPatchDebugBreakSlotAddressOffset =
+ kDebugBreakSlotLength - kPatchDebugBreakSlotReturnOffset;
+ static const int kRealPatchReturnSequenceAddressOffset =
+ kMoveAddressIntoScratchRegisterInstructionLength - kPointerSize;
// One byte opcode for test eax,0xXXXXXXXX.
static const byte kTestEaxByte = 0xA9;
@@ -721,7 +723,6 @@ class Assembler : public AssemblerBase {
// All 64-bit immediates must have a relocation mode.
void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
- void movq(Register dst, const char* s, RelocInfo::Mode rmode);
// Moves the address of the external reference into the register.
void movq(Register dst, ExternalReference ext);
void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
@@ -734,6 +735,7 @@ class Assembler : public AssemblerBase {
void movzxbl(Register dst, const Operand& src);
void movzxwq(Register dst, const Operand& src);
void movzxwl(Register dst, const Operand& src);
+ void movzxwl(Register dst, Register src);
// Repeated moves.
@@ -753,7 +755,8 @@ class Assembler : public AssemblerBase {
void cmovl(Condition cc, Register dst, const Operand& src);
// Exchange two registers
- void xchg(Register dst, Register src);
+ void xchgq(Register dst, Register src);
+ void xchgl(Register dst, Register src);
// Arithmetics
void addl(Register dst, Register src) {
@@ -988,6 +991,10 @@ class Assembler : public AssemblerBase {
arithmetic_op(0x09, src, dst);
}
+ void orl(const Operand& dst, Register src) {
+ arithmetic_op_32(0x09, src, dst);
+ }
+
void or_(Register dst, Immediate src) {
immediate_arithmetic_op(0x1, dst, src);
}
@@ -1013,6 +1020,10 @@ class Assembler : public AssemblerBase {
shift(dst, imm8, 0x0);
}
+ void roll(Register dst, Immediate imm8) {
+ shift_32(dst, imm8, 0x0);
+ }
+
void rcr(Register dst, Immediate imm8) {
shift(dst, imm8, 0x3);
}
@@ -1021,6 +1032,14 @@ class Assembler : public AssemblerBase {
shift(dst, imm8, 0x1);
}
+ void rorl(Register dst, Immediate imm8) {
+ shift_32(dst, imm8, 0x1);
+ }
+
+ void rorl_cl(Register dst) {
+ shift_32(dst, 0x1);
+ }
+
// Shifts dst:src left by cl bits, affecting only dst.
void shld(Register dst, Register src);
@@ -1112,6 +1131,10 @@ class Assembler : public AssemblerBase {
arithmetic_op_32(0x2B, dst, src);
}
+ void subl(const Operand& dst, Register src) {
+ arithmetic_op_32(0x29, src, dst);
+ }
+
void subl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x5, dst, src);
}
@@ -1130,6 +1153,7 @@ class Assembler : public AssemblerBase {
void testb(const Operand& op, Register reg);
void testl(Register dst, Register src);
void testl(Register reg, Immediate mask);
+ void testl(const Operand& op, Register reg);
void testl(const Operand& op, Immediate mask);
void testq(const Operand& op, Register reg);
void testq(Register dst, Register src);
@@ -1155,6 +1179,10 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op_32(0x6, dst, src);
}
+ void xorl(const Operand& dst, Register src) {
+ arithmetic_op_32(0x31, src, dst);
+ }
+
void xorl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x6, dst, src);
}
@@ -1186,7 +1214,6 @@ class Assembler : public AssemblerBase {
void hlt();
void int3();
void nop();
- void rdtsc();
void ret(int imm16);
void setcc(Condition cc, Register reg);
@@ -1210,6 +1237,7 @@ class Assembler : public AssemblerBase {
// Calls
// Call near relative 32-bit displacement, relative to next instruction.
void call(Label* L);
+ void call(Address entry, RelocInfo::Mode rmode);
void call(Handle<Code> target,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None());
@@ -1231,6 +1259,7 @@ class Assembler : public AssemblerBase {
// Use a 32-bit signed displacement.
// Unconditional jump to L
void jmp(Label* L, Label::Distance distance = Label::kFar);
+ void jmp(Address entry, RelocInfo::Mode rmode);
void jmp(Handle<Code> target, RelocInfo::Mode rmode);
// Jump near absolute indirect (r64)
@@ -1243,6 +1272,7 @@ class Assembler : public AssemblerBase {
void j(Condition cc,
Label* L,
Label::Distance distance = Label::kFar);
+ void j(Condition cc, Address entry, RelocInfo::Mode rmode);
void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
// Floating-point operations
@@ -1316,13 +1346,26 @@ class Assembler : public AssemblerBase {
void sahf();
+ // SSE instructions
+ void movaps(XMMRegister dst, XMMRegister src);
+ void movss(XMMRegister dst, const Operand& src);
+ void movss(const Operand& dst, XMMRegister src);
+
+ void cvttss2si(Register dst, const Operand& src);
+ void cvttss2si(Register dst, XMMRegister src);
+ void cvtlsi2ss(XMMRegister dst, Register src);
+
+ void xorps(XMMRegister dst, XMMRegister src);
+ void andps(XMMRegister dst, XMMRegister src);
+
+ void movmskps(Register dst, XMMRegister src);
+
// SSE2 instructions
void movd(XMMRegister dst, Register src);
void movd(Register dst, XMMRegister src);
void movq(XMMRegister dst, Register src);
void movq(Register dst, XMMRegister src);
void movq(XMMRegister dst, XMMRegister src);
- void extractps(Register dst, XMMRegister src, byte imm8);
// Don't use this unless it's important to keep the
// top half of the destination register unchanged.
@@ -1336,14 +1379,11 @@ class Assembler : public AssemblerBase {
void movdqa(const Operand& dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src);
- void movapd(XMMRegister dst, XMMRegister src);
- void movaps(XMMRegister dst, XMMRegister src);
+ void movdqu(const Operand& dst, XMMRegister src);
+ void movdqu(XMMRegister dst, const Operand& src);
- void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& dst, XMMRegister src);
+ void movapd(XMMRegister dst, XMMRegister src);
- void cvttss2si(Register dst, const Operand& src);
- void cvttss2si(Register dst, XMMRegister src);
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, XMMRegister src);
@@ -1353,7 +1393,6 @@ class Assembler : public AssemblerBase {
void cvtqsi2sd(XMMRegister dst, const Operand& src);
void cvtqsi2sd(XMMRegister dst, Register src);
- void cvtlsi2ss(XMMRegister dst, Register src);
void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtss2sd(XMMRegister dst, const Operand& src);
@@ -1363,18 +1402,25 @@ class Assembler : public AssemblerBase {
void cvtsd2siq(Register dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src);
+ void addsd(XMMRegister dst, const Operand& src);
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
+ void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
void orpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
+ void cmpltsd(XMMRegister dst, XMMRegister src);
+
+ void movmskpd(Register dst, XMMRegister src);
+
+ // SSE 4.1 instruction
+ void extractps(Register dst, XMMRegister src, byte imm8);
enum RoundingMode {
kRoundToNearest = 0x0,
@@ -1385,14 +1431,6 @@ class Assembler : public AssemblerBase {
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
- void movmskpd(Register dst, XMMRegister src);
-
- // The first argument is the reg field, the second argument is the r/m field.
- void emit_sse_operand(XMMRegister dst, XMMRegister src);
- void emit_sse_operand(XMMRegister reg, const Operand& adr);
- void emit_sse_operand(XMMRegister dst, Register src);
- void emit_sse_operand(Register dst, XMMRegister src);
-
// Debugging
void Print();
@@ -1416,8 +1454,6 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
- int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
-
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Check if there is less than kGap bytes available in the buffer.
@@ -1436,15 +1472,10 @@ class Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
- static const int kMinimalBufferSize = 4*KB;
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
- protected:
- bool emit_debug_code() const { return emit_debug_code_; }
- bool predictable_code_size() const { return predictable_code_size_; }
-
private:
byte* addr_at(int pos) { return buffer_ + pos; }
uint32_t long_at(int pos) {
@@ -1459,11 +1490,13 @@ class Assembler : public AssemblerBase {
void emit(byte x) { *pc_++ = x; }
inline void emitl(uint32_t x);
- inline void emitq(uint64_t x, RelocInfo::Mode rmode);
+ inline void emitp(void* x, RelocInfo::Mode rmode);
+ inline void emitq(uint64_t x);
inline void emitw(uint16_t x);
inline void emit_code_target(Handle<Code> target,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id = TypeFeedbackId::None());
+ inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode);
void emit(Immediate x) { emitl(x.value_); }
// Emits a REX prefix that encodes a 64-bit operand size and
@@ -1578,6 +1611,12 @@ class Assembler : public AssemblerBase {
// Emit the code-object-relative offset of the label's position
inline void emit_code_relative_offset(Label* label);
+ // The first argument is the reg field, the second argument is the r/m field.
+ void emit_sse_operand(XMMRegister dst, XMMRegister src);
+ void emit_sse_operand(XMMRegister reg, const Operand& adr);
+ void emit_sse_operand(XMMRegister dst, Register src);
+ void emit_sse_operand(Register dst, XMMRegister src);
+
// Emit machine code for one of the operations ADD, ADC, SUB, SBC,
// AND, OR, XOR, or CMP. The encodings of these operations are all
// similar, differing just in the opcode or in the reg field of the
@@ -1632,24 +1671,12 @@ class Assembler : public AssemblerBase {
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
// code generation
- byte* pc_; // the program counter; moves forward
RelocInfoWriter reloc_info_writer;
List< Handle<Code> > code_targets_;
PositionsRecorder positions_recorder_;
-
- bool emit_debug_code_;
- bool predictable_code_size_;
-
friend class PositionsRecorder;
};
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 9e4153a86..f65b25c65 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "codegen.h"
#include "deoptimizer.h"
@@ -44,24 +44,24 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
- // -- rax : number of arguments excluding receiver
- // -- rdi : called function (only guaranteed when
- // extra_args requires it)
- // -- rsi : context
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
+ // -- rax : number of arguments excluding receiver
+ // -- rdi : called function (only guaranteed when
+ // extra_args requires it)
+ // -- rsi : context
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
// -- ...
- // -- rsp[8 * argc] : first argument (argc == rax)
- // -- rsp[8 * (argc +1)] : receiver
+ // -- rsp[8 * argc] : first argument (argc == rax)
+ // -- rsp[8 * (argc + 1)] : receiver
// -----------------------------------
// Insert extra arguments.
int num_extra_args = 0;
if (extra_args == NEEDS_CALLED_FUNCTION) {
num_extra_args = 1;
- __ pop(kScratchRegister); // Save return address.
+ __ PopReturnAddressTo(kScratchRegister);
__ push(rdi);
- __ push(kScratchRegister); // Restore return address.
+ __ PushReturnAddressFrom(kScratchRegister);
} else {
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
}
@@ -73,6 +73,24 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
+ // Function is also the parameter to the runtime call.
+ __ push(rdi);
+
+ __ CallRuntime(function_id, 1);
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore receiver.
+ __ pop(rdi);
+}
+
+
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ movq(kScratchRegister,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -84,30 +102,27 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok);
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
+ CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
+ // Tail call to returned code.
+ __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rax);
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
- // Tear down internal frame.
- }
+void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
@@ -193,12 +208,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2));
// rdi: size of new object
- __ AllocateInNewSpace(rdi,
- rbx,
- rdi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
+ __ Allocate(rdi,
+ rbx,
+ rdi,
+ no_reg,
+ &rt_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields.
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
@@ -222,7 +237,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
if (FLAG_debug_code) {
__ cmpq(rsi, rdi);
__ Assert(less_equal,
- "Unexpected number of pre-allocated property fields.");
+ kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ InitializeFieldsWithFiller(rcx, rsi, rdx);
__ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
@@ -253,21 +268,21 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ subq(rdx, rcx);
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
+ __ Assert(positive, kPropertyAllocationCountFailed);
// Scale the number of elements by pointer size and add the header for
// FixedArrays to the start of the next object calculation from above.
// rbx: JSObject
// rdi: start of next object (will be start of FixedArray)
// rdx: number of elements in properties array
- __ AllocateInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- rdx,
- rdi,
- rax,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
+ __ Allocate(FixedArray::kHeaderSize,
+ times_pointer_size,
+ rdx,
+ rdi,
+ rax,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
// Initialize the FixedArray.
// rbx: JSObject
@@ -402,10 +417,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Remove caller arguments from the stack and return.
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
__ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->constructed_objects(), 1);
__ ret(0);
@@ -429,6 +444,8 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Expects five C++ function parameters.
// - Address entry (ignored)
// - JSFunction* function (
@@ -446,10 +463,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
#ifdef _WIN64
// MSVC parameters in:
- // rcx : entry (ignored)
- // rdx : function
- // r8 : receiver
- // r9 : argc
+ // rcx : entry (ignored)
+ // rdx : function
+ // r8 : receiver
+ // r9 : argc
// [rsp+0x20] : argv
// Clear the context before we push it when entering the internal frame.
@@ -498,9 +515,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
#endif // _WIN64
// Current stack contents:
- // [rsp + 2 * kPointerSize ... ]: Internal frame
- // [rsp + kPointerSize] : function
- // [rsp] : receiver
+ // [rsp + 2 * kPointerSize ... ] : Internal frame
+ // [rsp + kPointerSize] : function
+ // [rsp] : receiver
// Current register contents:
// rax : argc
// rbx : argv
@@ -523,6 +540,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code.
if (is_construct) {
+ // No type feedback cell is available
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->factory()->undefined_value());
+ __ Move(rbx, undefined_sentinel);
// Expects rdi to hold function pointer.
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
@@ -553,56 +574,111 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ CallRuntimePassFunction(masm, Runtime::kLazyCompile);
+ // Do a tail-call of the compiled function.
+ __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rax);
+}
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
+ // Do a tail-call of the compiled function.
+ __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rax);
+}
+
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
- // Tear down internal frame.
+ // Re-execute the code that was patched back to the young age when
+ // the stub returns.
+ __ subq(Operand(rsp, 0), Immediate(5));
+ __ Pushad();
+ __ movq(arg_reg_2,
+ ExternalReference::isolate_address(masm->isolate()));
+ __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(1);
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 1);
}
+ __ Popad();
+ __ ret(0);
+}
- // Do a tail-call of the compiled function.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+ __ Pushad();
+ __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
+ __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+ __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(1);
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 1);
+ }
+ __ Popad();
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
+ // Perform prologue operations usually performed by the young code stub.
+ __ PopReturnAddressTo(kScratchRegister);
+ __ push(rbp); // Caller's frame pointer.
+ __ movq(rbp, rsp);
+ __ push(rsi); // Callee's context.
+ __ push(rdi); // Callee's JS Function.
+ __ PushReturnAddressFrom(kScratchRegister);
+
+ // Jump to point after the code-age stub.
+ __ ret(0);
+}
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Restore call kind information.
- __ pop(rcx);
- // Restore function.
- __ pop(rdi);
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ Pushad();
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ Popad();
// Tear down internal frame.
}
- // Do a tail-call of the compiled function.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
+ __ pop(MemOperand(rsp, 0)); // Ignore state offset
+ __ ret(0); // Return to IC Miss stub, continuation still on stack.
}
@@ -620,57 +696,48 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
}
// Get the full codegen state from the stack and untag it.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+ __ SmiToInteger32(kScratchRegister, Operand(rsp, kPCOnStackSize));
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
+ __ movq(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
+ __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
__ bind(&not_tos_rax);
- __ Abort("no cases left");
+ __ Abort(kNoCasesLeft);
}
+
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ Pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ Popad();
- __ ret(0);
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Stack Layout:
- // rsp[0]: Return address
- // rsp[1]: Argument n
- // rsp[2]: Argument n-1
+ // rsp[0] : Return address
+ // rsp[8] : Argument n
+ // rsp[16] : Argument n-1
// ...
- // rsp[n]: Argument 1
- // rsp[n+1]: Receiver (function to call)
+ // rsp[8 * n] : Argument 1
+ // rsp[8 * (n + 1)] : Receiver (function to call)
//
// rax contains the number of arguments, n, not counting the receiver.
//
@@ -678,9 +745,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label done;
__ testq(rax, rax);
__ j(not_zero, &done);
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ Push(masm->isolate()->factory()->undefined_value());
- __ push(rbx);
+ __ PushReturnAddressFrom(rbx);
__ incq(rax);
__ bind(&done);
}
@@ -688,8 +755,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack, check
// if it is a function.
Label slow, non_function;
- // The function to call is at position n+1 on the stack.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, rax);
+ __ movq(rdi, args.GetReceiverOperand());
__ JumpIfSmi(rdi, &non_function);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &slow);
@@ -714,7 +781,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ j(not_zero, &shift_arguments);
// Compute the receiver in non-strict mode.
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
+ __ movq(rbx, args.GetArgumentOperand(1));
__ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
@@ -743,7 +810,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
// Restore the function to rdi.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ __ movq(rdi, args.GetReceiverOperand());
__ jmp(&patch_receiver, Label::kNear);
// Use the global receiver object from the called function as the
@@ -757,7 +824,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
+ __ movq(args.GetArgumentOperand(1), rbx);
__ jmp(&shift_arguments);
}
@@ -774,7 +841,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// CALL_NON_FUNCTION builtin expects the non-function callee as
// receiver, so overwrite the first argument which will ultimately
// become the receiver.
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
+ __ movq(args.GetArgumentOperand(1), rdi);
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
@@ -801,9 +868,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ cmpq(rdx, Immediate(1));
__ j(not_equal, &non_proxy);
- __ pop(rdx); // return address
+ __ PopReturnAddressTo(rdx);
__ push(rdi); // re-add proxy object as additional argument
- __ push(rdx);
+ __ PushReturnAddressFrom(rdx);
__ incq(rax);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
@@ -838,21 +905,21 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Stack at entry:
- // rsp: return address
- // rsp+8: arguments
- // rsp+16: receiver ("this")
- // rsp+24: function
+ // rsp : return address
+ // rsp[8] : arguments
+ // rsp[16] : receiver ("this")
+ // rsp[24] : function
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
// Stack frame:
- // rbp: Old base pointer
- // rbp[1]: return address
- // rbp[2]: function arguments
- // rbp[3]: receiver
- // rbp[4]: function
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
+ // rbp : Old base pointer
+ // rbp[8] : return address
+ // rbp[16] : function arguments
+ // rbp[24] : receiver
+ // rbp[32] : function
+ static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
+ static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
+ static const int kFunctionOffset = kReceiverOffset + kPointerSize;
__ push(Operand(rbp, kFunctionOffset));
__ push(Operand(rbp, kArgumentsOffset));
@@ -1002,375 +1069,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
-
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- Factory* factory = masm->isolate()->factory();
- __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
- __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
- factory->empty_fixed_array());
- // Field JSArray::kElementsOffset is initialized later.
- __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
-
- // If no storage is requested for the elements array just set the empty
- // fixed array.
- if (initial_capacity == 0) {
- __ Move(FieldOperand(result, JSArray::kElementsOffset),
- factory->empty_fixed_array());
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ lea(scratch1, Operand(result, JSArray::kSize));
- __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array
- // scratch2: start of next object
- __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
- factory->fixed_array_map());
- __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
- Smi::FromInt(initial_capacity));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
- static const int kLoopUnfoldLimit = 4;
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- if (initial_capacity <= kLoopUnfoldLimit) {
- // Use a scratch register here to have only one reloc info when unfolding
- // the loop.
- for (int i = 0; i < initial_capacity; i++) {
- __ movq(FieldOperand(scratch1,
- FixedArray::kHeaderSize + i * kPointerSize),
- scratch3);
- }
- } else {
- Label loop, entry;
- __ movq(scratch2, Immediate(initial_capacity));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(FieldOperand(scratch1,
- scratch2,
- times_pointer_size,
- FixedArray::kHeaderSize),
- scratch3);
- __ bind(&entry);
- __ decq(scratch2);
- __ j(not_sign, &loop);
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array,
- Register elements_array_end,
- Register scratch,
- bool fill_with_hole,
- Label* gc_required) {
- __ LoadInitialArrayMap(array_function, scratch,
- elements_array, fill_with_hole);
-
- if (FLAG_debug_code) { // Assert that array size is not zero.
- __ testq(array_size, array_size);
- __ Assert(not_zero, "array size is unexpectedly 0");
- }
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested elements.
- SmiIndex index =
- masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
- __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- index.scale,
- index.reg,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array: initial map
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- Factory* factory = masm->isolate()->factory();
- __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ Move(elements_array, factory->empty_fixed_array());
- __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
- // Field JSArray::kElementsOffset is initialized later.
- __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ lea(elements_array, Operand(result, JSArray::kSize));
- __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
- // Initialize the fixed array. FixedArray length is stored as a smi.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
- factory->fixed_array_map());
- // For non-empty JSArrays the length of the FixedArray and the JSArray is the
- // same.
- __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- if (fill_with_hole) {
- Label loop, entry;
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ lea(elements_array, Operand(elements_array,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(elements_array, 0), scratch);
- __ addq(elements_array, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(elements_array, elements_array_end);
- __ j(below, &loop);
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// rdi: constructor (built-in Array function)
-// rax: argc
-// rsp[0]: return address
-// rsp[8]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in rdi needs to be preserved for
-// entering the generic code. In both cases argc in rax needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// a construct call and a normal call.
-static void ArrayNativeCode(MacroAssembler* masm,
- Label* call_generic_code) {
- Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array,
- has_non_smi_element, finish, cant_transition_map, not_double;
-
- // Check for array construction with zero arguments.
- __ testq(rax, rax);
- __ j(not_zero, &argc_one_or_more);
-
- __ bind(&empty_array);
- // Handle construction of an empty array.
- AllocateEmptyJSArray(masm,
- rdi,
- rbx,
- rcx,
- rdx,
- r8,
- call_generic_code);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->array_function_native(), 1);
- __ movq(rax, rbx);
- __ ret(kPointerSize);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmpq(rax, Immediate(1));
- __ j(not_equal, &argc_two_or_more);
- __ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
-
- __ SmiTest(rdx);
- __ j(not_zero, &not_empty_array);
- __ pop(r8); // Adjust stack.
- __ Drop(1);
- __ push(r8);
- __ movq(rax, Immediate(0)); // Treat this as a call with argc of zero.
- __ jmp(&empty_array);
-
- __ bind(&not_empty_array);
- __ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is to large to actually allocate an elements array.
- __ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
- __ j(greater_equal, call_generic_code);
-
- // rax: argc
- // rdx: array_size (smi)
- // rdi: constructor
- // esp[0]: return address
- // esp[8]: argument
- AllocateJSArray(masm,
- rdi,
- rdx,
- rbx,
- rcx,
- r8,
- r9,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1);
- __ movq(rax, rbx);
- __ ret(2 * kPointerSize);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ movq(rdx, rax);
- __ Integer32ToSmi(rdx, rdx); // Convet argc to a smi.
- // rax: argc
- // rdx: array_size (smi)
- // rdi: constructor
- // esp[0] : return address
- // esp[8] : last argument
- AllocateJSArray(masm,
- rdi,
- rdx,
- rbx,
- rcx,
- r8,
- r9,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1);
-
- // rax: argc
- // rbx: JSArray
- // rcx: elements_array
- // r8: elements_array_end (untagged)
- // esp[0]: return address
- // esp[8]: last argument
-
- // Location of the last argument
- __ lea(r9, Operand(rsp, kPointerSize));
-
- // Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArrayis false, so the FixedArray is returned in rcx).
- __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
-
- // rax: argc
- // rbx: JSArray
- // rdx: location of the first array element
- // r9: location of the last argument
- // esp[0]: return address
- // esp[8]: last argument
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(r8, &has_non_smi_element);
- }
- __ movq(Operand(rdx, 0), r8);
- __ addq(rdx, Immediate(kPointerSize));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop);
-
- // Remove caller arguments from the stack and return.
- // rax: argc
- // rbx: JSArray
- // esp[0]: return address
- // esp[8]: last argument
- __ bind(&finish);
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ push(rcx);
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(r8,
- masm->isolate()->factory()->heap_number_map(),
- &not_double,
- DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- __ UndoAllocationInNewSpace(rbx);
- __ jmp(call_generic_code);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- // rbx: JSArray
- __ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- r11,
- kScratchRegister,
- &cant_transition_map);
-
- __ movq(FieldOperand(rbx, HeapObject::kMapOffset), r11);
- __ RecordWriteField(rbx, HeapObject::kMapOffset, r11, r8,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Finish the array initialization loop.
- Label loop2;
- __ bind(&loop2);
- __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
- __ movq(Operand(rdx, 0), r8);
- __ addq(rdx, Immediate(kPointerSize));
- __ decq(rcx);
- __ j(greater_equal, &loop2);
- __ jmp(&finish);
-}
-
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : argc
+ // -- rax : argc
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
@@ -1385,27 +1086,22 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for InternalArray function");
+ __ Check(not_smi, kUnexpectedInitialMapForInternalArrayFunction);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for InternalArray function");
+ __ Check(equal, kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
// function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : argc
+ // -- rax : argc
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
@@ -1420,54 +1116,19 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for Array function");
+ __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
+ __ Check(equal, kUnexpectedInitialMapForArrayFunction);
}
// Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rdi : constructor
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- Label generic_constructor;
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
- // Initial map for the builtin Array function should be a map.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for Array function");
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ // tail call a stub
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+ __ Move(rbx, undefined_sentinel);
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -1485,30 +1146,28 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
__ cmpq(rdi, rcx);
- __ Assert(equal, "Unexpected String function");
+ __ Assert(equal, kUnexpectedStringFunction);
}
// Load the first argument into rax and get rid of the rest
// (including the receiver).
+ StackArgumentsAccessor args(rsp, rax);
Label no_arguments;
__ testq(rax, rax);
__ j(zero, &no_arguments);
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
- __ pop(rcx);
+ __ movq(rbx, args.GetArgumentOperand(1));
+ __ PopReturnAddressTo(rcx);
__ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
__ movq(rax, rbx);
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- rax, // Input.
- rbx, // Result.
- rcx, // Scratch 1.
- rdx, // Scratch 2.
- false, // Input is known to be smi?
- &not_cached);
+ __ LookupNumberStringCache(rax, // Input.
+ rbx, // Result.
+ rcx, // Scratch 1.
+ rdx, // Scratch 2.
+ &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
@@ -1520,21 +1179,21 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Allocate a JSValue and put the tagged pointer into rax.
Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- rax, // Result.
- rcx, // New allocation top (we ignore it).
- no_reg,
- &gc_required,
- TAG_OBJECT);
+ __ Allocate(JSValue::kSize,
+ rax, // Result.
+ rcx, // New allocation top (we ignore it).
+ no_reg,
+ &gc_required,
+ TAG_OBJECT);
// Set the map.
__ LoadGlobalFunctionInitialMap(rdi, rcx);
if (FLAG_debug_code) {
__ cmpb(FieldOperand(rcx, Map::kInstanceSizeOffset),
Immediate(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(equal, "Unexpected string wrapper instance size");
+ __ Assert(equal, kUnexpectedStringWrapperInstanceSize);
__ cmpb(FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset), Immediate(0));
- __ Assert(equal, "Unexpected unused properties of string wrapper");
+ __ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
}
__ movq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
@@ -1580,10 +1239,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the empty string into rbx, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
- __ LoadRoot(rbx, Heap::kEmptyStringRootIndex);
- __ pop(rcx);
+ __ LoadRoot(rbx, Heap::kempty_stringRootIndex);
+ __ PopReturnAddressTo(rcx);
__ lea(rsp, Operand(rsp, kPointerSize));
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
__ jmp(&argument_is_string);
// At this point the argument is already a string. Call runtime to
@@ -1626,10 +1285,10 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ pop(rbp);
// Remove caller arguments from the stack.
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
__ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
}
@@ -1720,61 +1379,63 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // Get the loop depth of the stack guard check. This is recorded in
- // a test(rax, depth) instruction right after the call.
- Label stack_check;
- __ movq(rbx, Operand(rsp, 0)); // return address
- __ movzxbq(rbx, Operand(rbx, 1)); // depth
-
- // Get the loop nesting level at which we allow OSR from the
- // unoptimized code and check if we want to do OSR yet. If not we
- // should perform a stack guard check so we can get interrupts while
- // waiting for on-stack replacement.
+ // Lookup the function in the JavaScript frame.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
- __ cmpb(rbx, FieldOperand(rcx, Code::kAllowOSRAtLoopNestingLevelOffset));
- __ j(greater, &stack_check);
-
- // Pass the function to optimize as the argument to the on-stack
- // replacement runtime function.
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Lookup and calculate pc offset.
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
+ __ movq(rbx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ subq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ subq(rdx, FieldOperand(rbx, SharedFunctionInfo::kCodeOffset));
+ __ Integer32ToSmi(rdx, rdx);
+
+ // Pass both function and pc offset as arguments.
__ push(rax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ push(rdx);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
Label skip;
- __ SmiCompare(rax, Smi::FromInt(-1));
+ // If the code object is null, just return to the unoptimized code.
+ __ cmpq(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
- // If we decide not to perform on-stack replacement we perform a
- // stack guard check to enable interrupts.
- __ bind(&stack_check);
+ __ bind(&skip);
+
+ // Load deoptimization data from the code object.
+ __ movq(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ __ SmiToInteger32(rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ __ lea(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
+
+ // Overwrite the return address on the stack.
+ __ movq(Operand(rsp, 0), rax);
+
+ // And "return" to the OSR entry point of the function.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
-
- StackCheckStub stub;
- __ TailCallStub(&stub);
- if (FLAG_debug_code) {
- __ Abort("Unreachable code: returned from tail call.");
+ __ j(above_equal, &ok);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
}
+ __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
__ bind(&ok);
__ ret(0);
-
- __ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiToInteger32(rax, rax);
- __ push(rax);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
}
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index f0f9c5d27..b3ab8c1e7 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,171 +27,305 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "bootstrapper.h"
#include "code-stubs.h"
#include "regexp-macro-assembler.h"
+#include "stub-cache.h"
+#include "runtime.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label check_heap_number, call_builtin;
- __ SmiTest(rax);
- __ j(not_zero, &check_heap_number, Label::kNear);
- __ Ret();
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rbx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
- __ bind(&check_heap_number);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_builtin, Label::kNear);
- __ Ret();
- __ bind(&call_builtin);
- __ pop(rcx); // Pop return address.
- __ push(rax);
- __ push(rcx); // Push return address.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+void ToNumberStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in rsi.
- Counters* counters = masm->isolate()->counters();
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
- __ IncrementCounter(counters->fast_new_closure_total(), 1);
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax, rbx, rcx };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+}
- // Get the function info from the stack.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax, rbx, rcx, rdx };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+}
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index)));
- __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r8, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), r8);
- __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
- __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
- __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ movq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ testq(rbx, rbx);
- __ j(not_zero, &check_optimized, Label::kNear);
+
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rbx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rcx, rax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax, rbx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+}
+
+
+void BinaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
+static void InitializeArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // rax -- number of arguments
+ // rdi -- function
+ // rbx -- type info cell with elements kind
+ static Register registers[] = { rdi, rbx };
+ descriptor->register_param_count_ = 2;
+ if (constant_stack_parameter_count != 0) {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = rax;
}
- __ bind(&install_unoptimized);
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset),
- rdi); // Initialize with undefined.
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->register_params_ = registers;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+}
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
- __ bind(&check_optimized);
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // rax -- number of arguments
+ // rdi -- constructor function
+ static Register registers[] = { rdi };
+ descriptor->register_param_count_ = 1;
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
+ if (constant_stack_parameter_count != 0) {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = rax;
+ }
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->register_params_ = registers;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+}
- // rcx holds native context, ebx points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into edx.
- __ movq(rdx, FieldOperand(rbx, FixedArray::kHeaderSize + kPointerSize));
- __ cmpq(rcx, FieldOperand(rbx, FixedArray::kHeaderSize));
- __ j(equal, &install_optimized);
- // Iterate through the rest of map backwards. rdx holds an index.
- Label loop;
- Label restore;
- __ movq(rdx, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ SmiToInteger32(rdx, rdx);
- __ bind(&loop);
- // Do not double check first entry.
- __ cmpq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
- __ j(equal, &restore);
- __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength)); // Skip an entry.
- __ cmpq(rcx, FieldOperand(rbx,
- rdx,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, &loop, Label::kNear);
- // Hit: fetch the optimized code.
- __ movq(rdx, FieldOperand(rbx,
- rdx,
- times_pointer_size,
- FixedArray::kHeaderSize + 1 * kPointerSize));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
-
- // Now link a function into a list of optimized functions.
- __ movq(rdx, ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdx);
- // No need for write barrier as JSFunction (rax) is in the new space.
-
- __ movq(ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST), rax);
- // Store JSFunction (rax) into rdx before issuing write barrier as
- // it clobbers all the registers passed.
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- rdx,
- rbx,
- kDontSaveFPRegs);
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+}
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
- __ bind(&restore);
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- __ jmp(&install_unoptimized);
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+}
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(rcx); // Temporarily remove return address.
- __ pop(rdx);
- __ push(rsi);
- __ push(rdx);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ push(rcx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+void CompareNilICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(CompareNilIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+}
+
+
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rcx, rax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax, rbx, rcx, rdx };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ Isolate* isolate = masm->isolate();
+ isolate->counters()->code_stubs()->Increment();
+
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ int param_count = descriptor->register_param_count_;
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ASSERT(descriptor->register_param_count_ == 0 ||
+ rax.is(descriptor->register_params_[param_count - 1]));
+ // Push arguments
+ for (int i = 0; i < param_count; ++i) {
+ __ push(descriptor->register_params_[i]);
+ }
+ ExternalReference miss = descriptor->miss_handler();
+ __ CallExternalReference(miss, descriptor->register_param_count_);
+ }
+
+ __ Ret();
}
@@ -199,11 +333,12 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- rax, rbx, rcx, &gc, TAG_OBJECT);
+ __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize,
+ rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(0));
// Set up the object header.
__ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
@@ -239,20 +374,20 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
- // [rsp + (1 * kPointerSize)]: function
- // [rsp + (2 * kPointerSize)]: serialized scope info
+ // [rsp + (1 * kPointerSize)] : function
+ // [rsp + (2 * kPointerSize)] : serialized scope info
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- rax, rbx, rcx, &gc, TAG_OBJECT);
+ __ Allocate(FixedArray::SizeFor(length),
+ rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
-
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(1));
// Get the serialized scope info from the stack.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rbx, args.GetArgumentOperand(0));
// Set up the object header.
__ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
@@ -266,9 +401,8 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
Label after_sentinel;
__ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
__ cmpq(rcx, Immediate(0));
- __ Assert(equal, message);
+ __ Assert(equal, kExpected0AsASmiSentinel);
}
__ movq(rcx, GlobalObjectOperand());
__ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
@@ -300,293 +434,12 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
}
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- Label* fail) {
- // Registers on entry:
- //
- // rcx: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
- int size = JSArray::kSize + elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rax, i), rbx);
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ lea(rdx, Operand(rax, JSArray::kSize));
- __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
-
- // Copy the elements array.
- if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
- }
- } else {
- ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
- int i;
- for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
- }
- while (i < elements_size) {
- __ movsd(xmm0, FieldOperand(rcx, i));
- __ movsd(FieldOperand(rdx, i), xmm0);
- i += kDoubleSize;
- }
- ASSERT(i == elements_size);
- }
- }
-}
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + kPointerSize]: constant elements.
- // [rsp + (2 * kPointerSize)]: literal index.
- // [rsp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into rcx and check if we need to create a
- // boilerplate.
- __ movq(rcx, Operand(rsp, 3 * kPointerSize));
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rcx,
- FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- Label slow_case;
- __ j(equal, &slow_case);
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- // rcx is boilerplate object.
- Factory* factory = masm->isolate()->factory();
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ movq(rbx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- factory->fixed_cow_array_map());
- __ j(not_equal, &check_fast_elements);
- GenerateFastCloneShallowArrayCommon(masm, 0,
- COPY_ON_WRITE_ELEMENTS, &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&check_fast_elements);
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- factory->fixed_array_map());
- __ j(not_equal, &double_elements);
- GenerateFastCloneShallowArrayCommon(masm, length_,
- CLONE_ELEMENTS, &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(rcx);
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
- expected_map_index);
- __ Assert(equal, message);
- __ pop(rcx);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + kPointerSize]: object literal flags.
- // [rsp + (2 * kPointerSize)]: constant properties.
- // [rsp + (3 * kPointerSize)]: literal index.
- // [rsp + (4 * kPointerSize)]: literals array.
-
- // Load boilerplate object into ecx and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ movq(rcx, Operand(rsp, 4 * kPointerSize));
- __ movq(rax, Operand(rsp, 3 * kPointerSize));
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rcx,
- FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case);
-
- // Check that the boilerplate contains only fast properties and we can
- // statically determine the instance size.
- int size = JSObject::kHeaderSize + length_ * kPointerSize;
- __ movq(rax, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movzxbq(rax, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ cmpq(rax, Immediate(size >> kPointerSizeLog2));
- __ j(not_equal, &slow_case);
-
- // Allocate the JS object and copy header together with all in-object
- // properties from the boilerplate.
- __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
- for (int i = 0; i < size; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rax, i), rbx);
- }
-
- // Return and remove the on-stack parameters.
- __ ret(4 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
-// The stub expects its argument on the stack and returns its result in tos_:
-// zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- Label patch;
- const Register argument = rax;
- const Register map = rdx;
-
- if (!types_.IsEmpty()) {
- __ movq(argument, Operand(rsp, 1 * kPointerSize));
- }
-
- // undefined -> false
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- Label not_smi;
- __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
- // argument contains the correct return value already
- if (!tos_.is(argument)) {
- __ movq(tos_, argument);
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_smi);
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(argument, &patch, Label::kNear);
- }
-
- if (types_.NeedsMap()) {
- __ movq(map, FieldOperand(argument, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ testb(FieldOperand(map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- // Undetectable -> false.
- Label not_undetectable;
- __ j(zero, &not_undetectable, Label::kNear);
- __ Set(tos_, 0);
- __ ret(1 * kPointerSize);
- __ bind(&not_undetectable);
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // spec object -> true.
- Label not_js_object;
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, &not_js_object, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, 1);
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_js_object);
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ movq(tos_, FieldOperand(argument, String::kLengthOffset));
- __ ret(1 * kPointerSize); // the string length is OK as the return value
- __ bind(&not_string);
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number, false_result;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(argument, HeapNumber::kValueOffset));
- __ j(zero, &false_result, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, 1);
- }
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ Set(tos_, 0);
- __ ret(1 * kPointerSize);
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ PushCallerSaved(save_doubles_);
const int argument_count = 1;
__ PrepareCallCFunction(argument_count);
-#ifdef _WIN64
- __ LoadAddress(rcx, ExternalReference::isolate_address());
-#else
- __ LoadAddress(rdi, ExternalReference::isolate_address());
-#endif
+ __ LoadAddress(arg_reg_1,
+ ExternalReference::isolate_address(masm->isolate()));
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(
@@ -597,992 +450,118 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- const Register argument = rax;
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- Label different_value;
- __ CompareRoot(argument, value);
- __ j(not_equal, &different_value, Label::kNear);
- if (!result) {
- // If we have to return zero, there is no way around clearing tos_.
- __ Set(tos_, 0);
- } else if (!tos_.is(argument)) {
- // If we have to return non-zero, we can re-use the argument if it is the
- // same register as the result, because we never see Smi-zero here.
- __ Set(tos_, 1);
- }
- __ ret(1 * kPointerSize);
- __ bind(&different_value);
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(rcx); // Get return address, operand is now on top of stack.
- __ Push(Smi::FromInt(tos_.code()));
- __ Push(Smi::FromInt(types_.ToByte()));
- __ push(rcx); // Push return address.
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
-}
-
-
class FloatingPointHelper : public AllStatic {
public:
+ enum ConvertUndefined {
+ CONVERT_UNDEFINED_TO_ZERO,
+ BAILOUT_ON_UNDEFINED
+ };
// Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
// If the operands are not both numbers, jump to not_numbers.
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
// NumberOperands assumes both are smis or heap numbers.
- static void LoadSSE2SmiOperands(MacroAssembler* masm);
- static void LoadSSE2NumberOperands(MacroAssembler* masm);
static void LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers);
-
- // Takes the operands in rdx and rax and loads them as integers in rax
- // and rcx.
- static void LoadAsIntegers(MacroAssembler* masm,
- Label* operand_conversion_failure,
- Register heap_number_map);
- // As above, but we know the operands to be numbers. In that case,
- // conversion can't fail.
- static void LoadNumbersAsIntegers(MacroAssembler* masm);
-
- // Tries to convert two values to smis losslessly.
- // This fails if either argument is not a Smi nor a HeapNumber,
- // or if it's a HeapNumber with a value that can't be converted
- // losslessly to a Smi. In that case, control transitions to the
- // on_not_smis label.
- // On success, either control goes to the on_success label (if one is
- // provided), or it falls through at the end of the code (if on_success
- // is NULL).
- // On success, both first and second holds Smi tagged values.
- // One of first or second must be non-Smi when entering.
- static void NumbersToSmis(MacroAssembler* masm,
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* on_success,
- Label* on_not_smis);
};
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
- Register result,
- Register source) {
- // Result may be rcx. If result and source are the same register, source will
- // be overwritten.
- ASSERT(!result.is(rdi) && !result.is(rbx));
- // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
- // cvttsd2si (32-bit version) directly.
- Register double_exponent = rbx;
- Register double_value = rdi;
- Label done, exponent_63_plus;
- // Get double and extract exponent.
- __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
- // Clear result preemptively, in case we need to return zero.
- __ xorl(result, result);
- __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
- // Double to remove sign bit, shift exponent down to least significant bits.
- // and subtract bias to get the unshifted, unbiased exponent.
- __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
- __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
- __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
- // Check whether the exponent is too big for a 63 bit unsigned integer.
- __ cmpl(double_exponent, Immediate(63));
- __ j(above_equal, &exponent_63_plus, Label::kNear);
- // Handle exponent range 0..62.
- __ cvttsd2siq(result, xmm0);
- __ jmp(&done, Label::kNear);
-
- __ bind(&exponent_63_plus);
- // Exponent negative or 63+.
- __ cmpl(double_exponent, Immediate(83));
- // If exponent negative or above 83, number contains no significant bits in
- // the range 0..2^31, so result is zero, and rcx already holds zero.
- __ j(above, &done, Label::kNear);
-
- // Exponent in rage 63..83.
- // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
- // the least significant exponent-52 bits.
-
- // Negate low bits of mantissa if value is negative.
- __ addq(double_value, double_value); // Move sign bit to carry.
- __ sbbl(result, result); // And convert carry to -1 in result register.
- // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
- __ addl(double_value, result);
- // Do xor in opposite directions depending on where we want the result
- // (depending on whether result is rcx or not).
-
- if (result.is(rcx)) {
- __ xorl(double_value, result);
- // Left shift mantissa by (exponent - mantissabits - 1) to save the
- // bits that have positional values below 2^32 (the extra -1 comes from the
- // doubling done above to move the sign bit into the carry flag).
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(double_value);
- __ movl(result, double_value);
- } else {
- // As the then-branch, but move double-value to result before shifting.
- __ xorl(result, double_value);
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(result);
- }
-
- __ bind(&done);
-}
-
-
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(rcx); // Save return address.
-
- __ push(rax); // the operand
- __ Push(Smi::FromInt(op_));
- __ Push(Smi::FromInt(mode_));
- __ Push(Smi::FromInt(operand_type_));
-
- __ push(rcx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label slow;
- GenerateSmiCodeSub(masm, &slow, &slow, Label::kNear, Label::kNear);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow,
- Label::Distance non_smi_near,
- Label::Distance slow_near) {
- Label done;
- __ JumpIfNotSmi(rax, non_smi, non_smi_near);
- __ SmiNeg(rax, rax, &done, Label::kNear);
- __ jmp(slow, slow_near);
- __ bind(&done);
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near) {
- __ JumpIfNotSmi(rax, non_smi, non_smi_near);
- __ SmiNot(rax, rax);
- __ ret(0);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateHeapNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateHeapNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
- Label non_smi, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &call_builtin, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberStubBitNot(
- MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- // Check if the operand is a heap number.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, slow);
-
- // Operand is a float, negate its value by flipping the sign bit.
- if (mode_ == UNARY_OVERWRITE) {
- __ Set(kScratchRegister, 0x01);
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(FieldOperand(rax, HeapNumber::kValueOffset), kScratchRegister);
- } else {
- // Allocate a heap number before calculating the answer,
- // so we don't have an untagged double around during GC.
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rcx, rax);
- __ pop(rax);
- }
- __ bind(&heapnumber_allocated);
- // rcx: allocated 'empty' number
-
- // Copy the double value to the new heap number, flipping the sign.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Set(kScratchRegister, 0x01);
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
- }
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
- Label* slow) {
- // Check if the operand is a heap number.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, slow);
-
- // Convert the heap number in rax to an untagged integer in rcx.
- IntegerConvert(masm, rax, rax);
-
- // Do the bitwise operation and smi tag the result.
- __ notl(rax);
- __ Integer32ToSmi(rax, rax);
- __ ret(0);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
- // Handle the slow case by jumping to the JavaScript builtin.
- __ pop(rcx); // pop return address
- __ push(rax);
- __ push(rcx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(rcx); // Save return address.
- __ push(rdx);
- __ push(rax);
- // Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ Push(Smi::FromInt(MinorKey()));
- __ Push(Smi::FromInt(op_));
- __ Push(Smi::FromInt(operands_type_));
-
- __ push(rcx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 5,
- 1);
-}
-
-
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- UNREACHABLE();
- // The int32 case is identical to the Smi case. We avoid creating this
- // ic state on x64.
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Register input_reg = this->source();
+ Register final_result_reg = this->destination();
+ ASSERT(is_truncating());
-void BinaryOpStub::GenerateSmiCode(
- MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
-
- // Arguments to BinaryOpStub are in rdx and rax.
- const Register left = rdx;
- const Register right = rax;
-
- // We only generate heapnumber answers for overflowing calculations
- // for the four basic arithmetic operations and logical right shift by 0.
- bool generate_inline_heapnumber_results =
- (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
- (op_ == Token::ADD || op_ == Token::SUB ||
- op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
-
- // Smi check of both operands. If op is BIT_OR, the check is delayed
- // until after the OR operation.
- Label not_smis;
- Label use_fp_on_smis;
- Label fail;
-
- if (op_ != Token::BIT_OR) {
- Comment smi_check_comment(masm, "-- Smi check arguments");
- __ JumpIfNotBothSmi(left, right, &not_smis);
- }
+ Label check_negative, process_64_bits, done;
- Label smi_values;
- __ bind(&smi_values);
- // Perform the operation.
- Comment perform_smi(masm, "-- Perform smi operation");
- switch (op_) {
- case Token::ADD:
- ASSERT(right.is(rax));
- __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
- break;
-
- case Token::SUB:
- __ SmiSub(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
-
- case Token::MUL:
- ASSERT(right.is(rax));
- __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
- break;
-
- case Token::DIV:
- // SmiDiv will not accept left in rdx or right in rax.
- __ movq(rbx, rax);
- __ movq(rcx, rdx);
- __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
- break;
-
- case Token::MOD:
- // SmiMod will not accept left in rdx or right in rax.
- __ movq(rbx, rax);
- __ movq(rcx, rdx);
- __ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
- break;
-
- case Token::BIT_OR: {
- ASSERT(right.is(rax));
- __ SmiOrIfSmis(right, right, left, &not_smis); // BIT_OR is commutative.
- break;
- }
- case Token::BIT_XOR:
- ASSERT(right.is(rax));
- __ SmiXor(right, right, left); // BIT_XOR is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(rax));
- __ SmiAnd(right, right, left); // BIT_AND is commutative.
- break;
-
- case Token::SHL:
- __ SmiShiftLeft(left, left, right);
- __ movq(rax, left);
- break;
-
- case Token::SAR:
- __ SmiShiftArithmeticRight(left, left, right);
- __ movq(rax, left);
- break;
-
- case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
+ int double_offset = offset();
- default:
- UNREACHABLE();
- }
+ // Account for return address and saved regs if input is rsp.
+ if (input_reg.is(rsp)) double_offset += 3 * kPointerSize;
- // 5. Emit return of result in rax. Some operations have registers pushed.
- __ ret(0);
+ MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
+ MemOperand exponent_operand(MemOperand(input_reg,
+ double_offset + kDoubleSize / 2));
- if (use_fp_on_smis.is_linked()) {
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- __ bind(&use_fp_on_smis);
- if (op_ == Token::DIV || op_ == Token::MOD) {
- // Restore left and right to rdx and rax.
- __ movq(rdx, rcx);
- __ movq(rax, rbx);
+ Register scratch1;
+ Register scratch_candidates[3] = { rbx, rdx, rdi };
+ for (int i = 0; i < 3; i++) {
+ scratch1 = scratch_candidates[i];
+ if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
}
- if (generate_inline_heapnumber_results) {
- __ AllocateHeapNumber(rcx, rbx, slow);
- Comment perform_float(masm, "-- Perform float operation on smis");
- if (op_ == Token::SHR) {
- __ SmiToInteger32(left, left);
- __ cvtqsi2sd(xmm0, left);
- } else {
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- }
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
+ // Since we must use rcx for shifts below, use some other register (rax)
+ // to calculate the result if ecx is the requested return register.
+ Register result_reg = final_result_reg.is(rcx) ? rax : final_result_reg;
+ // Save ecx if it isn't the return register and therefore volatile, or if it
+ // is the return register, then save the temp register we use in its stead
+ // for the result.
+ Register save_reg = final_result_reg.is(rcx) ? rax : rcx;
+ __ push(scratch1);
+ __ push(save_reg);
+
+ bool stash_exponent_copy = !input_reg.is(rsp);
+ __ movl(scratch1, mantissa_operand);
+ __ movsd(xmm0, mantissa_operand);
+ __ movl(rcx, exponent_operand);
+ if (stash_exponent_copy) __ push(rcx);
+
+ __ andl(rcx, Immediate(HeapNumber::kExponentMask));
+ __ shrl(rcx, Immediate(HeapNumber::kExponentShift));
+ __ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias));
+ __ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits));
+ __ j(below, &process_64_bits);
+
+ // Result is entirely in lower 32-bits of mantissa
+ int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
+ __ subl(rcx, Immediate(delta));
+ __ xorl(result_reg, result_reg);
+ __ cmpl(rcx, Immediate(31));
+ __ j(above, &done);
+ __ shll_cl(scratch1);
+ __ jmp(&check_negative);
+
+ __ bind(&process_64_bits);
+ __ cvttsd2siq(result_reg, xmm0);
+ __ jmp(&done, Label::kNear);
+
+ // If the double was negative, negate the integer result.
+ __ bind(&check_negative);
+ __ movl(result_reg, scratch1);
+ __ negl(result_reg);
+ if (stash_exponent_copy) {
+ __ cmpl(MemOperand(rsp, 0), Immediate(0));
} else {
- __ jmp(&fail);
+ __ cmpl(exponent_operand, Immediate(0));
}
- }
-
- // 7. Non-smi operands reach the end of the code generated by
- // GenerateSmiCode, and fall through to subsequent code,
- // with the operands in rdx and rax.
- // But first we check if non-smi values are HeapNumbers holding
- // values that could be smi.
- __ bind(&not_smis);
- Comment done_comment(masm, "-- Enter non-smi code");
- FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
- &smi_values, &fail);
- __ jmp(&smi_values);
- __ bind(&fail);
-}
+ __ cmovl(greater, result_reg, scratch1);
-
-void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- GenerateHeapResultAllocation(masm, allocation_failure);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- break;
- }
- case Token::MOD: {
- // For MOD we jump to the allocation_failure label, to call runtime.
- __ jmp(allocation_failure);
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_shr_result;
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
- heap_number_map);
- switch (op_) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: {
- __ shrl_cl(rax);
- // Check if result is negative. This can only happen for a shift
- // by zero.
- __ testl(rax, rax);
- __ j(negative, &non_smi_shr_result);
- break;
- }
- default: UNREACHABLE();
- }
- STATIC_ASSERT(kSmiValueSize == 32);
- // Tag smi result and return.
- __ Integer32ToSmi(rax, rax);
- __ Ret();
-
- // Logical shift right can produce an unsigned int32 that is not
- // an int32, and so is not in the smi range. Allocate a heap number
- // in that case.
- if (op_ == Token::SHR) {
- __ bind(&non_smi_shr_result);
- Label allocation_failed;
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
- // Allocate heap number in new space.
- // Not using AllocateHeapNumber macro in order to reuse
- // already loaded heap_number_map.
- __ AllocateInNewSpace(HeapNumber::kSize,
- rax,
- rdx,
- no_reg,
- &allocation_failed,
- TAG_OBJECT);
- // Set the map.
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- __ movq(FieldOperand(rax, HeapObject::kMapOffset),
- heap_number_map);
- __ cvtqsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ Ret();
-
- __ bind(&allocation_failed);
- // We need tagged values in rdx and rax for the following code,
- // not int32 in rax and rcx.
- __ Integer32ToSmi(rax, rcx);
- __ Integer32ToSmi(rdx, rbx);
- __ jmp(allocation_failure);
- }
- break;
+ // Restore registers
+ __ bind(&done);
+ if (stash_exponent_copy) {
+ __ addq(rsp, Immediate(kDoubleSize));
}
- default: UNREACHABLE(); break;
- }
- // No fall-through from this generated code.
- if (FLAG_debug_code) {
- __ Abort("Unexpected fall-through in "
- "BinaryStub::GenerateFloatingPointCode.");
- }
-}
-
-
-void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &left_not_string, Label::kNear);
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label call_runtime;
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- GenerateTypeTransition(masm);
-
- if (call_runtime.is_linked()) {
- __ bind(&call_runtime);
- GenerateCallRuntimeCode(masm);
- }
-}
-
-
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- GenerateStringAddCode(masm);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
-
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateStringAddCode(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(rdx, rdx);
- } else {
- __ LoadRoot(rdx, Heap::kNanValueRootIndex);
- }
- __ jmp(&done, Label::kNear);
- __ bind(&check);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &done, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(rax, rax);
- } else {
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateHeapNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label gc_required, not_number;
- GenerateFloatingPointCode(masm, &gc_required, &not_number);
-
- __ bind(&not_number);
- GenerateTypeTransition(masm);
-
- __ bind(&gc_required);
- GenerateCallRuntimeCode(masm);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
-
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
-
- GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateStringAddCode(masm);
- }
-
- __ bind(&call_runtime);
- GenerateCallRuntimeCode(masm);
-}
-
-
-void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure) {
- Label skip_allocation;
- OverwriteMode mode = mode_;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in rdx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rdx, &skip_allocation);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, alloc_failure);
- // Now rdx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- // Use object in rdx as a result holder
- __ movq(rax, rdx);
- break;
+ if (!final_result_reg.is(result_reg)) {
+ ASSERT(final_result_reg.is(rcx));
+ __ movl(final_result_reg, result_reg);
}
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, alloc_failure);
- // Now rax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ pop(rcx);
- __ push(rdx);
- __ push(rax);
- __ push(rcx);
+ __ pop(save_reg);
+ __ pop(scratch1);
+ __ ret(0);
}
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
- // rsp[8]: argument (should be number).
- // rsp[0]: return address.
+ // rsp[8] : argument (should be number).
+ // rsp[0] : return address.
// Output:
// rax: tagged double result.
// UNTAGGED case:
// Input::
- // rsp[0]: return address.
- // xmm1: untagged double input argument
+ // rsp[0] : return address.
+ // xmm1 : untagged double input argument
// Output:
- // xmm1: untagged double result.
+ // xmm1 : untagged double result.
Label runtime_call;
Label runtime_call_clear_stack;
@@ -1590,14 +569,16 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
const bool tagged = (argument_type_ == TAGGED);
if (tagged) {
Label input_not_smi, loaded;
+
// Test that rax is a number.
- __ movq(rax, Operand(rsp, kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0));
__ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
// Input is a smi. Untag and load it onto the FPU stack.
// Then load the bits of the double into rbx.
__ SmiToInteger32(rax, rax);
__ subq(rsp, Immediate(kDoubleSize));
- __ cvtlsi2sd(xmm1, rax);
+ __ Cvtlsi2sd(xmm1, rax);
__ movsd(Operand(rsp, 0), xmm1);
__ movq(rbx, xmm1);
__ movq(rdx, xmm1);
@@ -1613,7 +594,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Input is a HeapNumber. Push it on the FPU stack and load its
// bits into rbx.
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(rdx, rbx);
__ bind(&loaded);
@@ -1652,7 +633,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference::transcendental_cache_array_address(masm->isolate());
__ movq(rax, cache_array);
int cache_array_index =
- type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
+ type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
__ movq(rax, Operand(rax, cache_array_index));
// rax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
@@ -1668,7 +649,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
// Two uint_32's and a pointer per element.
- CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
+ CHECK_EQ(2 * kIntSize + 1 * kPointerSize,
+ static_cast<int>(elem2_start - elem_start));
CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
@@ -1873,118 +855,6 @@ void TranscendentalCacheStub::GenerateOperation(
}
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
- // Check float operands.
- Label done;
- Label rax_is_smi;
- Label rax_is_object;
- Label rdx_is_object;
-
- __ JumpIfNotSmi(rdx, &rdx_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ JumpIfSmi(rax, &rax_is_smi);
-
- __ bind(&rax_is_object);
- IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
- __ jmp(&done);
-
- __ bind(&rdx_is_object);
- IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
- __ JumpIfNotSmi(rax, &rax_is_object);
- __ bind(&rax_is_smi);
- __ SmiToInteger32(rcx, rax);
-
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-// Jump to conversion_failure: rdx and rax are unchanged.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- Label* conversion_failure,
- Register heap_number_map) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- __ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(r8, rdx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ Set(r8, 0);
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the rdx heap number in rcx.
- IntegerConvert(masm, r8, rdx);
-
- // Here r8 has the untagged integer, rax has a Smi or a heap number.
- __ bind(&load_arg2);
- // Test if arg2 is a Smi.
- __ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rcx, rax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ Set(rcx, 0);
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the rax heap number in rcx.
- IntegerConvert(masm, rcx, rax);
- __ bind(&done);
- __ movl(rax, r8);
-}
-
-
-void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-}
-
-
-void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
- // Load operand in rdx into xmm0.
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1.
- __ JumpIfSmi(rax, &load_smi_rax);
- __ bind(&load_nonsmi_rax);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-
- __ bind(&done);
-}
-
-
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
@@ -2005,75 +875,18 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
__ bind(&load_smi_rdx);
__ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ Cvtlsi2sd(xmm0, kScratchRegister);
__ JumpIfNotSmi(rax, &load_nonsmi_rax);
__ bind(&load_smi_rax);
__ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
+ __ Cvtlsi2sd(xmm1, kScratchRegister);
__ bind(&done);
}
-void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* on_success,
- Label* on_not_smis) {
- Register heap_number_map = scratch3;
- Register smi_result = scratch1;
- Label done;
-
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label first_smi;
- __ JumpIfSmi(first, &first_smi, Label::kNear);
- __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, on_not_smis);
- // Convert HeapNumber to smi if possible.
- __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
- __ movq(scratch2, xmm0);
- __ cvttsd2siq(smi_result, xmm0);
- // Check if conversion was successful by converting back and
- // comparing to the original double's bits.
- __ cvtlsi2sd(xmm1, smi_result);
- __ movq(kScratchRegister, xmm1);
- __ cmpq(scratch2, kScratchRegister);
- __ j(not_equal, on_not_smis);
- __ Integer32ToSmi(first, smi_result);
-
- __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
- __ bind(&first_smi);
- __ AssertNotSmi(second);
- __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, on_not_smis);
- // Convert second to smi, if possible.
- __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
- __ movq(scratch2, xmm0);
- __ cvttsd2siq(smi_result, xmm0);
- __ cvtlsi2sd(xmm1, smi_result);
- __ movq(kScratchRegister, xmm1);
- __ cmpq(scratch2, kScratchRegister);
- __ j(not_equal, on_not_smis);
- __ Integer32ToSmi(second, smi_result);
- if (on_success != NULL) {
- __ jmp(on_success);
- } else {
- __ bind(&done);
- }
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
- // Choose register conforming to calling convention (when bailing out).
-#ifdef _WIN64
const Register exponent = rdx;
-#else
- const Register exponent = rdi;
-#endif
const Register base = rax;
const Register scratch = rcx;
const XMMRegister double_result = xmm3;
@@ -2085,15 +898,16 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Save 1 in double_result - we need this several times later on.
__ movq(scratch, Immediate(1));
- __ cvtlsi2sd(double_result, scratch);
+ __ Cvtlsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack.
- __ movq(base, Operand(rsp, 2 * kPointerSize));
- __ movq(exponent, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(base, args.GetArgumentOperand(0));
+ __ movq(exponent, args.GetArgumentOperand(1));
__ JumpIfSmi(base, &base_is_smi, Label::kNear);
__ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
@@ -2104,7 +918,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&base_is_smi);
__ SmiToInteger32(base, base);
- __ cvtlsi2sd(double_base, base);
+ __ Cvtlsi2sd(double_base, base);
__ bind(&unpack_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -2126,16 +940,17 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}
if (exponent_type_ != INTEGER) {
- Label fast_power;
+ Label fast_power, try_arithmetic_simplification;
// Detect integer exponents stored as double.
+ __ DoubleToI(exponent, double_exponent, double_scratch,
+ TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification);
+ __ jmp(&int_exponent);
+
+ __ bind(&try_arithmetic_simplification);
__ cvttsd2si(exponent, double_exponent);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
__ cmpl(exponent, Immediate(0x80000000u));
__ j(equal, &call_runtime);
- __ cvtlsi2sd(double_scratch, exponent);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_exponent, double_scratch);
- __ j(equal, &int_exponent);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -2144,7 +959,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label continue_sqrt, continue_rsqrt, not_plus_half;
// Test for 0.5.
// Load double_scratch with 0.5.
- __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE);
+ __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE64);
__ movq(double_scratch, scratch);
// Already ruled out NaNs for exponent.
__ ucomisd(double_scratch, double_exponent);
@@ -2154,7 +969,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
__ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
@@ -2186,7 +1001,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
__ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
@@ -2229,7 +1044,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
__ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
__ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
+ __ faddp(1); // 2^(X-rnd(X)), rnd(X)
// FSCALE calculates st(0) * 2^st(1)
__ fscale(); // 2^X, rnd(X)
__ fstp(1);
@@ -2292,7 +1107,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// and may not have contained the exponent value in the first place when the
// input was a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
- __ cvtlsi2sd(double_exponent, exponent);
+ __ Cvtlsi2sd(double_exponent, exponent);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
@@ -2302,7 +1117,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
// The stub is called from non-optimized code, which expects the result
- // as heap number in eax.
+ // as heap number in rax.
__ bind(&done);
__ AllocateHeapNumber(rax, rcx, &call_runtime);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
@@ -2331,14 +1146,131 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ Cmp(rax, masm->isolate()->factory()->prototype_string());
+ __ j(not_equal, &miss);
+ receiver = rdx;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- rax : receiver
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -----------------------------------
+ receiver = rax;
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8, r9, &miss);
+ __ bind(&miss);
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void StringLengthStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ Cmp(rax, masm->isolate()->factory()->length_string());
+ __ j(not_equal, &miss);
+ receiver = rdx;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- rax : receiver
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -----------------------------------
+ receiver = rax;
+ }
+
+ StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss);
+ __ bind(&miss);
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ //
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
+
+ Label miss;
+
+ Register receiver = rdx;
+ Register value = rax;
+ Register scratch = rbx;
+ if (kind() == Code::KEYED_STORE_IC) {
+ __ Cmp(rcx, masm->isolate()->factory()->length_string());
+ __ j(not_equal, &miss);
+ }
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+ __ j(not_equal, &miss);
+
+ // Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
+ __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
+ __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
+ __ j(not_equal, &miss);
+
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
+ __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(equal, &miss);
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ PopReturnAddressTo(scratch);
+ __ push(receiver);
+ __ push(value);
+ __ PushReturnAddressFrom(scratch);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&miss);
+
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in rdx and the parameter count is in rax.
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
// Check that the key is a smi.
Label slow;
__ JumpIfNotSmi(rdx, &slow);
@@ -2360,10 +1292,10 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+ __ SmiSub(rax, rax, rdx);
+ __ SmiToInteger32(rax, rax);
+ StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0));
__ Ret();
// Arguments adaptor case: Check index against actual arguments
@@ -2375,26 +1307,27 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
- index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
- __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+ __ SmiSub(rcx, rcx, rdx);
+ __ SmiToInteger32(rcx, rcx);
+ StackArgumentsAccessor adaptor_args(rbx, rcx,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, adaptor_args.GetArgumentOperand(0));
__ Ret();
// Slow-case: Handle non-smi or out-of-bounds access to arguments
// by calling the runtime system.
__ bind(&slow);
- __ pop(rbx); // Return address.
+ __ PopReturnAddressTo(rbx);
__ push(rdx);
- __ push(rbx);
+ __ PushReturnAddressFrom(rbx);
__ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
}
void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Stack layout:
- // rsp[0] : return address
- // rsp[8] : number of parameters (tagged)
+ // rsp[0] : return address
+ // rsp[8] : number of parameters (tagged)
// rsp[16] : receiver displacement
// rsp[24] : function
// Registers used over the whole function:
@@ -2403,7 +1336,8 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Factory* factory = masm->isolate()->factory();
- __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ SmiToInteger64(rbx, args.GetArgumentOperand(2));
// rbx = parameter count (untagged)
// Check if the calling frame is an arguments adaptor frame.
@@ -2425,7 +1359,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+ __ movq(args.GetArgumentOperand(1), rdx);
// rbx = parameter count (untagged)
// rcx = argument count (untagged)
@@ -2454,7 +1388,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ addq(r8, Immediate(Heap::kArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ AllocateInNewSpace(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
+ __ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
// rax = address of new object(s) (tagged)
// rcx = argument count (untagged)
@@ -2486,7 +1420,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ movq(rdx, Operand(rsp, 3 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(0));
__ movq(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize),
rdx);
@@ -2537,7 +1471,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Load tagged parameter count into r9.
__ Integer32ToSmi(r9, rbx);
__ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addq(r8, Operand(rsp, 1 * kPointerSize));
+ __ addq(r8, args.GetArgumentOperand(2));
__ subq(r8, r9);
__ Move(r11, factory->the_hole_value());
__ movq(rdx, rdi);
@@ -2576,7 +1510,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Label arguments_loop, arguments_test;
__ movq(r8, rbx);
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(1));
// Untag rcx for the loop below.
__ SmiToInteger64(rcx, rcx);
__ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
@@ -2603,16 +1537,16 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rcx = argument count (untagged)
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
- __ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
+ __ movq(args.GetArgumentOperand(2), rcx); // Patch argument count.
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[8] : number of parameters
- // esp[16] : receiver displacement
- // esp[24] : function
+ // rsp[0] : return address
+ // rsp[8] : number of parameters
+ // rsp[16] : receiver displacement
+ // rsp[24] : function
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
@@ -2622,12 +1556,13 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer.
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ movq(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+ __ movq(args.GetArgumentOperand(1), rdx);
__ bind(&runtime);
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@@ -2635,8 +1570,8 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // rsp[0] : return address
- // rsp[8] : number of parameters
+ // rsp[0] : return address
+ // rsp[8] : number of parameters
// rsp[16] : receiver displacement
// rsp[24] : function
@@ -2648,18 +1583,19 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ j(equal, &adaptor_frame);
// Get the length from the frame.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(2));
__ SmiToInteger64(rcx, rcx);
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ movq(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+ __ movq(args.GetArgumentOperand(1), rdx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
@@ -2672,7 +1608,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
// Do the allocation of both objects in one go.
- __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
+ __ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
// Get the arguments boilerplate from the current native context.
__ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
@@ -2689,7 +1625,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ movq(rcx, args.GetArgumentOperand(2));
__ movq(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
@@ -2700,7 +1636,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ j(zero, &done);
// Get the parameters pointer from the stack.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(1));
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@@ -2743,17 +1679,22 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: last_match_info (expected JSArray)
- // rsp[16]: previous index
- // rsp[24]: subject string
- // rsp[32]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
-
+ // rsp[0] : return address
+ // rsp[8] : last_match_info (expected JSArray)
+ // rsp[16] : previous index
+ // rsp[24] : subject string
+ // rsp[32] : JSRegExp object
+
+ enum RegExpExecStubArgumentIndices {
+ JS_REG_EXP_OBJECT_ARGUMENT_INDEX,
+ SUBJECT_STRING_ARGUMENT_INDEX,
+ PREVIOUS_INDEX_ARGUMENT_INDEX,
+ LAST_MATCH_INFO_ARGUMENT_INDEX,
+ REG_EXP_EXEC_ARGUMENT_COUNT
+ };
+
+ StackArgumentsAccessor args(rsp, REG_EXP_EXEC_ARGUMENT_COUNT,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
Label runtime;
// Ensure that a RegExp stack is allocated.
Isolate* isolate = masm->isolate();
@@ -2766,18 +1707,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
__ JumpIfSmi(rax, &runtime);
__ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
+
// Check that the RegExp has been compiled (data contains a fixed array).
__ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
Condition is_smi = masm->CheckSmi(rax);
__ Check(NegateCondition(is_smi),
- "Unexpected type for RegExp data, FixedArray expected");
+ kUnexpectedTypeForRegExpDataFixedArrayExpected);
__ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected);
}
// rax: RegExp data (FixedArray)
@@ -2790,149 +1732,121 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the number of captures fit in the static offsets vector buffer.
__ SmiToInteger32(rdx,
FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- __ leal(rdx, Operand(rdx, rdx, times_1, 2));
- // Check that the static offsets vector buffer is large enough.
- __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize));
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures <= offsets vector size / 2 - 1
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1));
__ j(above, &runtime);
- // rax: RegExp data (FixedArray)
- // rdx: Number of capture registers
- // Check that the second argument is a string.
- __ movq(rdi, Operand(rsp, kSubjectOffset));
- __ JumpIfSmi(rdi, &runtime);
- Condition is_string = masm->IsObjectStringType(rdi, rbx, rbx);
- __ j(NegateCondition(is_string), &runtime);
-
- // rdi: Subject string.
- // rax: RegExp data (FixedArray).
- // rdx: Number of capture registers.
- // Check that the third argument is a positive smi less than the string
- // length. A negative value will be greater (unsigned comparison).
- __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
- __ JumpIfNotSmi(rbx, &runtime);
- __ SmiCompare(rbx, FieldOperand(rdi, String::kLengthOffset));
- __ j(above_equal, &runtime);
-
- // rax: RegExp data (FixedArray)
- // rdx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ movq(rdi, Operand(rsp, kLastMatchInfoOffset));
- __ JumpIfSmi(rdi, &runtime);
- __ CmpObjectType(rdi, JS_ARRAY_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(rdi, JSArray::kElementsOffset));
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information. Ensure no overflow in add.
- STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ SmiToInteger32(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmpl(rdx, rdi);
- __ j(greater, &runtime);
-
// Reset offset for possibly sliced string.
__ Set(r14, 0);
- // rax: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_ascii_string, seq_two_byte_string, check_code;
- __ movq(rdi, Operand(rsp, kSubjectOffset));
- // Make a copy of the original subject string.
- __ movq(r15, rdi);
+ __ movq(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
+ __ JumpIfSmi(rdi, &runtime);
+ __ movq(r15, rdi); // Make a copy of the original subject string.
__ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- // First check for flat two byte string.
+ // rax: RegExp data (FixedArray)
+ // rdi: subject string
+ // r15: subject string
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential two byte? If yes, go to (9).
+ // (2) Sequential one byte? If yes, go to (6).
+ // (3) Anything but sequential or cons? If yes, go to (7).
+ // (4) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (5a) Is subject sequential two byte? If yes, go to (9).
+ // (5b) Is subject external? If yes, go to (8).
+ // (6) One byte sequential. Load regexp code for one byte.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (7) Not a long external string? If yes, go to (10).
+ // (8) External string. Make it, offset-wise, look like a sequential string.
+ // (8a) Is the external string one byte? If yes, go to (6).
+ // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
+ // (10) Short external string or not a string? If yes, bail out to runtime.
+ // (11) Sliced string. Replace subject with parent. Go to (5a).
+
+ Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
+ external_string /* 8 */, check_underlying /* 5a */,
+ not_seq_nor_cons /* 7 */, check_code /* E */,
+ not_long_external /* 10 */;
+
+ // (1) Sequential two byte? If yes, go to (9).
__ andb(rbx, Immediate(kIsNotStringMask |
kStringRepresentationMask |
kStringEncodingMask |
kShortExternalStringMask));
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be a flat ASCII string. None of the following
- // string type tests will succeed if subject is not a string or a short
- // external string.
+ __ j(zero, &seq_two_byte_string); // Go to (9).
+
+ // (2) Sequential one byte? If yes, go to (6).
+ // Any other sequential string must be one byte.
__ andb(rbx, Immediate(kIsNotStringMask |
kStringRepresentationMask |
kShortExternalStringMask));
- __ j(zero, &seq_ascii_string, Label::kNear);
-
- // rbx: whether subject is a string and if yes, its string representation
- // Check for flat cons string or sliced string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- // In the case of a sliced string its offset has to be taken into account.
- Label cons_string, external_string, check_encoding;
+ __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
+
+ // (3) Anything but sequential or cons? If yes, go to (7).
+ // We check whether the subject string is a cons, since sequential strings
+ // have already been covered.
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmpq(rbx, Immediate(kExternalStringTag));
- __ j(less, &cons_string, Label::kNear);
- __ j(equal, &external_string);
-
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
- __ j(not_zero, &runtime);
+ __ j(greater_equal, &not_seq_nor_cons); // Go to (7).
- // String is sliced.
- __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
- // r14: slice offset
- // r15: original subject string
- // rdi: parent string
- __ jmp(&check_encoding, Label::kNear);
- // String is a cons string, check whether it is flat.
- __ bind(&cons_string);
+ // (4) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
__ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
+ Heap::kempty_stringRootIndex);
__ j(not_equal, &runtime);
__ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
- // rdi: first part of cons string or parent of sliced string.
- // rbx: map of first part of cons string or map of parent of sliced string.
- // Is first part of cons or parent of slice a flat two byte string?
- __ bind(&check_encoding);
+ __ bind(&check_underlying);
__ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
- Immediate(kStringRepresentationMask | kStringEncodingMask));
+ __ movq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+
+ // (5a) Is subject sequential two byte? If yes, go to (9).
+ __ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be sequential ASCII or external.
- __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
- Immediate(kStringRepresentationMask));
- __ j(not_zero, &external_string);
-
- __ bind(&seq_ascii_string);
- // rdi: subject string (sequential ASCII)
+ __ j(zero, &seq_two_byte_string); // Go to (9).
+ // (5b) Is subject external? If yes, go to (8).
+ __ testb(rbx, Immediate(kStringRepresentationMask));
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ j(not_zero, &external_string); // Go to (8)
+
+ // (6) One byte sequential. Load regexp code for one byte.
+ __ bind(&seq_one_byte_string);
// rax: RegExp data (FixedArray)
__ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
- __ Set(rcx, 1); // Type is ASCII.
- __ jmp(&check_code, Label::kNear);
-
- __ bind(&seq_two_byte_string);
- // rdi: subject string (flat two-byte)
- // rax: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
- __ Set(rcx, 0); // Type is two byte.
+ __ Set(rcx, 1); // Type is one byte.
+ // (E) Carry on. String handling is done.
__ bind(&check_code);
+ // r11: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// smi (code flushing support)
__ JumpIfSmi(r11, &runtime);
- // rdi: subject string
+ // rdi: sequential subject string (or look-alike, external string)
+ // r15: original subject string
// rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
// r11: code
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
- __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
+ // We have to use r15 instead of rdi to load the length because rdi might
+ // have been only made to look like a sequential string when it actually
+ // is an external string.
+ __ movq(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
+ __ JumpIfNotSmi(rbx, &runtime);
+ __ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
+ __ j(above_equal, &runtime);
+ __ SmiToInteger64(rbx, rbx);
// rdi: subject string
// rbx: previous index
@@ -2949,9 +1863,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ EnterApiExitFrame(argument_slots_on_stack);
// Argument 9: Pass current isolate address.
- // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
- // Immediate(ExternalReference::isolate_address()));
- __ LoadAddress(kScratchRegister, ExternalReference::isolate_address());
+ __ LoadAddress(kScratchRegister,
+ ExternalReference::isolate_address(masm->isolate()));
__ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
kScratchRegister);
@@ -2984,20 +1897,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8);
#endif
- // First four arguments are passed in registers on both Linux and Windows.
-#ifdef _WIN64
- Register arg4 = r9;
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg4 = rcx;
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
-
- // Keep track on aliasing between argX defined above and the registers used.
// rdi: subject string
// rbx: previous index
// rcx: encoding of subject string (1 if ASCII 0 if two_byte);
@@ -3006,7 +1905,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r15: original subject string
// Argument 2: Previous index.
- __ movq(arg2, rbx);
+ __ movq(arg_reg_2, rbx);
// Argument 4: End of string data
// Argument 3: Start of string data
@@ -3014,20 +1913,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Prepare start and end index of the input.
// Load the length from the original sliced string if that is the case.
__ addq(rbx, r14);
- __ SmiToInteger32(arg3, FieldOperand(r15, String::kLengthOffset));
- __ addq(r14, arg3); // Using arg3 as scratch.
+ __ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset));
+ __ addq(r14, arg_reg_3); // Using arg3 as scratch.
// rbx: start index of the input
// r14: end index of the input
// r15: original subject string
__ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
__ j(zero, &setup_two_byte, Label::kNear);
- __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqAsciiString::kHeaderSize));
- __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(arg_reg_4,
+ FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
+ __ lea(arg_reg_3,
+ FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
__ jmp(&setup_rest, Label::kNear);
__ bind(&setup_two_byte);
- __ lea(arg4, FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
+ __ lea(arg_reg_4,
+ FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
+ __ lea(arg_reg_3,
+ FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
__ bind(&setup_rest);
// Argument 1: Original subject string.
@@ -3035,13 +1938,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// use rbp, which points exactly to one pointer size below the previous rsp.
// (Because creating a new stack frame pushes the previous rbp onto the stack
// and thereby moves up rsp by one kPointerSize.)
- __ movq(arg1, r15);
+ __ movq(arg_reg_1, r15);
// Locate the code entry and call it.
__ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(r11);
- __ LeaveApiExitFrame();
+ __ LeaveApiExitFrame(true);
// Check the result.
Label success;
@@ -3059,11 +1962,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// For failure return null.
__ LoadRoot(rax, Heap::kNullValueRootIndex);
- __ ret(4 * kPointerSize);
+ __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
// Load RegExp data.
__ bind(&success);
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
__ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
__ SmiToInteger32(rax,
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
@@ -3071,9 +1974,23 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ leal(rdx, Operand(rax, rax, times_1, 2));
// rdx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
+ // Check that the fourth object is a JSArray object.
+ __ movq(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
+ __ JumpIfSmi(r15, &runtime);
+ __ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ movq(rbx, FieldOperand(r15, JSArray::kElementsOffset));
+ __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information. Ensure no overflow in add.
+ STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
+ __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ subl(rax, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmpl(rdx, rax);
+ __ j(greater, &runtime);
// rbx: last_match_info backing store (FixedArray)
// rdx: number of capture registers
@@ -3082,14 +1999,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
kScratchRegister);
// Store last subject and last input.
- __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
__ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
+ __ movq(rcx, rax);
__ RecordWriteField(rbx,
RegExpImpl::kLastSubjectOffset,
rax,
rdi,
kDontSaveFPRegs);
- __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(rax, rcx);
__ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
__ RecordWriteField(rbx,
RegExpImpl::kLastInputOffset,
@@ -3123,8 +2041,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&done);
// Return last match info.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ ret(4 * kPointerSize);
+ __ movq(rax, r15);
+ __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
__ bind(&exception);
// Result must now be exception. If there is no pending exception already a
@@ -3149,9 +2067,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&termination_exception);
__ ThrowUncatchable(rax);
- // External string. Short external strings have already been ruled out.
- // rdi: subject string (expected to be external)
- // rbx: scratch
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (7) Not a long external string? If yes, go to (10).
+ __ bind(&not_seq_nor_cons);
+ // Compare flags are still set from (3).
+ __ j(greater, &not_long_external, Label::kNear); // Go to (10).
+
+ // (8) External string. Short external strings have been ruled out.
__ bind(&external_string);
__ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
@@ -3159,20 +2085,37 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ testb(rbx, Immediate(kIsIndirectStringMask));
- __ Assert(zero, "external string expected, but not found");
+ __ Assert(zero, kExternalStringExpectedButNotFound);
}
__ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
+ // (8a) Is the external string one byte? If yes, go to (6).
__ testb(rbx, Immediate(kStringEncodingMask));
- __ j(not_zero, &seq_ascii_string);
- __ jmp(&seq_two_byte_string);
+ __ j(not_zero, &seq_one_byte_string); // Goto (6).
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ // rdi: subject string (flat two-byte)
+ // rax: RegExp data (FixedArray)
+ // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
+ __ bind(&seq_two_byte_string);
+ __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
+ __ Set(rcx, 0); // Type is two byte.
+ __ jmp(&check_code); // Go to (E).
+
+ // (10) Not a string or a short external string? If yes, bail out to runtime.
+ __ bind(&not_long_external);
+ // Catch non-string subject or short external string.
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
+ __ j(not_zero, &runtime);
+
+ // (11) Sliced string. Replace subject with parent. Go to (5a).
+ // Load offset into r14 and replace subject string with parent.
+ __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
+ __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
+ __ jmp(&check_underlying);
#endif // V8_INTERPRETED_REGEXP
}
@@ -3181,7 +2124,8 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
const int kMaxInlineLength = 100;
Label slowcase;
Label done;
- __ movq(r8, Operand(rsp, kPointerSize * 3));
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(r8, args.GetArgumentOperand(0));
__ JumpIfNotSmi(r8, &slowcase);
__ SmiToInteger32(rbx, r8);
__ cmpl(rbx, Immediate(kMaxInlineLength));
@@ -3192,14 +2136,14 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Allocate RegExpResult followed by FixedArray with size in rbx.
// JSArray: [Map][empty properties][Elements][Length-smi][index][input]
// Elements: [Map][Length][..elements..]
- __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- rbx, // In: Number of elements.
- rax, // Out: Start of allocation (tagged).
- rcx, // Out: End of allocation.
- rdx, // Scratch register
- &slowcase,
- TAG_OBJECT);
+ __ Allocate(JSRegExpResult::kSize + FixedArray::kHeaderSize,
+ times_pointer_size,
+ rbx, // In: Number of elements.
+ rax, // Out: Start of allocation (tagged).
+ rcx, // Out: End of allocation.
+ rdx, // Scratch register
+ &slowcase,
+ TAG_OBJECT);
// rax: Start of allocated area, object-tagged.
// rbx: Number of array elements as int32.
// r8: Number of array elements as smi.
@@ -3219,11 +2163,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
// Set input, index and length fields from arguments.
- __ movq(r8, Operand(rsp, kPointerSize * 1));
+ __ movq(r8, args.GetArgumentOperand(2));
__ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
- __ movq(r8, Operand(rsp, kPointerSize * 2));
+ __ movq(r8, args.GetArgumentOperand(1));
__ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
- __ movq(r8, Operand(rsp, kPointerSize * 3));
+ __ movq(r8, args.GetArgumentOperand(0));
__ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
// Fill out the elements FixedArray.
@@ -3261,115 +2205,6 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ SmiToInteger32(
- mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shrl(mask, Immediate(1));
- __ subq(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- Factory* factory = masm->isolate()->factory();
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- factory->heap_number_map(),
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- Register probe = mask;
- __ movq(probe,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
- }
-
- __ bind(&is_smi);
- __ SmiToInteger32(scratch, object);
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmpq(object,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ movq(result,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
-void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask) {
- __ and_(hash, mask);
- // Each entry in string cache consists of two pointer sized fields,
- // but times_twice_pointer_size (multiplication by 16) scale factor
- // is not supported by addrmode on x64 platform.
- // So we have to premultiply entry index before lookup.
- __ shl(hash, Immediate(kPointerSizeLog2 + 1));
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ movq(rbx, Operand(rsp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -3378,30 +2213,57 @@ static int NegativeComparisonResult(Condition cc) {
}
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+static void CheckInputType(MacroAssembler* masm,
+ Register input,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CompareMap(input, masm->isolate()->factory()->heap_number_map(), NULL);
+ __ j(not_equal, fail);
+ }
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+
+static void BranchIfNotInternalizedString(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbq(scratch,
+ FieldOperand(scratch, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ testb(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ __ j(not_zero, label);
+}
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects, done;
+ Condition cc = GetCondition();
Factory* factory = masm->isolate()->factory();
- // Compare two smis if required.
- if (include_smi_compare_) {
- Label non_smi, smi_done;
- __ JumpIfNotBothSmi(rax, rdx, &non_smi);
- __ subq(rdx, rax);
- __ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
- __ bind(&smi_done);
- __ movq(rax, rdx);
- __ ret(0);
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- Label ok;
- __ JumpIfNotSmi(rdx, &ok);
- __ JumpIfNotSmi(rax, &ok);
- __ Abort("CompareStub: smi operands");
- __ bind(&ok);
- }
+ Label miss;
+ CheckInputType(masm, rdx, left_, &miss);
+ CheckInputType(masm, rax, right_, &miss);
+
+ // Compare two smis.
+ Label non_smi, smi_done;
+ __ JumpIfNotBothSmi(rax, rdx, &non_smi);
+ __ subq(rdx, rax);
+ __ j(no_overflow, &smi_done);
+ __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
+ __ bind(&smi_done);
+ __ movq(rax, rdx);
+ __ ret(0);
+ __ bind(&non_smi);
// The compare stub returns a positive, negative, or zero 64-bit integer
// value in rax, corresponding to result of comparing the two inputs.
@@ -3414,66 +2276,58 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ cmpq(rax, rdx);
__ j(not_equal, &not_identical, Label::kNear);
- if (cc_ != equal) {
+ if (cc != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
Label check_for_nan;
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(rax, NegativeComparisonResult(cc_));
+ __ Set(rax, NegativeComparisonResult(cc));
__ ret(0);
__ bind(&check_for_nan);
}
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- // We cannot set rax to EQUAL until just before return because
- // rax must be unchanged on jump to not_identical.
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(rax, EQUAL);
- __ ret(0);
- } else {
- Label heap_number;
- // If it's not a heap number, then return equal for (in)equality operator.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
- if (cc_ != equal) {
- // Call runtime on identical objects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &not_identical, Label::kNear);
- }
- __ Set(rax, EQUAL);
- __ ret(0);
+ Label heap_number;
+ // If it's not a heap number, then return equal for (in)equality operator.
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+ factory->heap_number_map());
+ __ j(equal, &heap_number, Label::kNear);
+ if (cc != equal) {
+ // Call runtime on identical objects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &not_identical, Label::kNear);
+ }
+ __ Set(rax, EQUAL);
+ __ ret(0);
- __ bind(&heap_number);
- // It is a heap number, so return equal if it's not NaN.
- // For NaN, return 1 for every condition except greater and
- // greater-equal. Return -1 for them, so the comparison yields
- // false for all conditions except not-equal.
- __ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
- __ setcc(parity_even, rax);
- // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
- if (cc_ == greater_equal || cc_ == greater) {
- __ neg(rax);
- }
- __ ret(0);
+ __ bind(&heap_number);
+ // It is a heap number, so return equal if it's not NaN.
+ // For NaN, return 1 for every condition except greater and
+ // greater-equal. Return -1 for them, so the comparison yields
+ // false for all conditions except not-equal.
+ __ Set(rax, EQUAL);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm0);
+ __ setcc(parity_even, rax);
+ // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
+ if (cc == greater_equal || cc == greater) {
+ __ neg(rax);
}
+ __ ret(0);
__ bind(&not_identical);
}
- if (cc_ == equal) { // Both strict and non-strict.
+ if (cc == equal) { // Both strict and non-strict.
Label slow; // Fallthrough label.
// If we're doing a strict equality comparison, we don't have to do
// type conversion, so we generate code to do fast comparison for objects
// and oddballs. Non-smi numbers and strings still go through the usual
// slow-case code.
- if (strict_) {
+ if (strict()) {
// If either is a Smi (we know that not both are), then they can only
// be equal if the other is a HeapNumber. If so, use the slow case.
{
@@ -3501,7 +2355,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
Label first_non_object;
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(below, &first_non_object, Label::kNear);
- // Return non-zero (eax (not rax) is not zero)
+ // Return non-zero (rax (not rax) is not zero)
Label return_not_equal;
STATIC_ASSERT(kHeapObjectTag != 0);
__ bind(&return_not_equal);
@@ -3525,46 +2379,46 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
// Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
- __ xorl(rax, rax);
- __ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ setcc(above, rax);
- __ setcc(below, rcx);
- __ subq(rax, rcx);
- __ ret(0);
+ Label non_number_comparison;
+ Label unordered;
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
+ __ xorl(rax, rax);
+ __ xorl(rcx, rcx);
+ __ ucomisd(xmm0, xmm1);
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ Set(rax, 1);
- } else {
- __ Set(rax, -1);
- }
- __ ret(0);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ setcc(above, rax);
+ __ setcc(below, rcx);
+ __ subq(rax, rcx);
+ __ ret(0);
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc != not_equal);
+ if (cc == less || cc == less_equal) {
+ __ Set(rax, 1);
+ } else {
+ __ Set(rax, -1);
}
+ __ ret(0);
- // Fast negative check for symbol-to-symbol equality.
- Label check_for_strings;
- if (cc_ == equal) {
- BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
- BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
- // We've already checked for object identity, so if both operands
- // are symbols they aren't equal. Register eax (not rax) already holds a
- // non-zero value, which indicates not equal, so just return.
+ // Fast negative check for internalized-to-internalized equality.
+ Label check_for_strings;
+ if (cc == equal) {
+ BranchIfNotInternalizedString(
+ masm, &check_for_strings, rax, kScratchRegister);
+ BranchIfNotInternalizedString(
+ masm, &check_for_strings, rdx, kScratchRegister);
+
+ // We've already checked for object identity, so if both operands are
+ // internalized strings they aren't equal. Register rax (not rax) already
+ // holds a non-zero value, which indicates not equal, so just return.
__ ret(0);
}
@@ -3574,7 +2428,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
rdx, rax, rcx, rbx, &check_unequal_objects);
// Inline comparison of ASCII strings.
- if (cc_ == equal) {
+ if (cc == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
rdx,
rax,
@@ -3591,11 +2445,11 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
+ __ Abort(kUnexpectedFallThroughFromStringComparison);
#endif
__ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
+ if (cc == equal && !strict()) {
// Not strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
@@ -3629,51 +2483,27 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
// Push arguments below the return address to prepare jump to builtin.
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
__ push(rdx);
__ push(rax);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == equal) {
+ builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
builtin = Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
+ __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
}
- // Restore return address on the stack.
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
- FieldOperand(scratch, Map::kInstanceTypeOffset));
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ testb(scratch, Immediate(kIsSymbolMask));
- __ j(zero, label);
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -3681,34 +2511,81 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // rax : number of arguments to the construct function
// rbx : cache cell for call target
// rdi : the function to call
Isolate* isolate = masm->isolate();
- Label initialize, done;
+ Label initialize, done, miss, megamorphic, not_array_function;
// Load the cache state into rcx.
- __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
+ __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmpq(rcx, rdi);
- __ j(equal, &done, Label::kNear);
+ __ j(equal, &done);
__ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
- __ j(equal, &done, Label::kNear);
+ __ j(equal, &done);
+
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the cell either some other function or an
+ // AllocationSite. Do a map check on the object in rcx.
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
+ __ j(not_equal, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(rcx);
+ __ cmpq(rdi, rcx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done);
+
+ __ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
- __ j(equal, &initialize, Label::kNear);
+ __ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ __ bind(&megamorphic);
+ __ Move(FieldOperand(rbx, Cell::kValueOffset),
TypeFeedbackCells::MegamorphicSentinel(isolate));
- __ jmp(&done, Label::kNear);
+ __ jmp(&done);
- // An uninitialized cache is patched with the function.
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(rcx);
+ __ cmpq(rdi, rcx);
+ __ j(not_equal, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the cell
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Arguments register must be smi-tagged to call out.
+ __ Integer32ToSmi(rax, rax);
+ __ push(rax);
+ __ push(rdi);
+ __ push(rbx);
+
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
+
+ __ pop(rbx);
+ __ pop(rdi);
+ __ pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
+ __ jmp(&done);
+
+ __ bind(&not_array_function);
+ __ movq(FieldOperand(rbx, Cell::kValueOffset), rdi);
// No need for a write barrier here - cells are rescanned.
__ bind(&done);
@@ -3720,6 +2597,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// rdi : the function to call
Isolate* isolate = masm->isolate();
Label slow, non_function;
+ StackArgumentsAccessor args(rsp, argc_);
// The receiver might implicitly be the global object. This is
// indicated by passing the hole as the receiver to the call
@@ -3727,15 +2605,14 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (ReceiverMightBeImplicit()) {
Label call;
// Get the receiver from the stack.
- // +1 ~ return address
- __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
+ __ movq(rax, args.GetReceiverOperand());
// Call as function is indicated with the hole.
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &call, Label::kNear);
// Patch the receiver on the stack with the global receiver object.
__ movq(rcx, GlobalObjectOperand());
__ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx);
+ __ movq(args.GetReceiverOperand(), rcx);
__ bind(&call);
}
@@ -3775,15 +2652,15 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
// object (undefined) so no write barrier is needed.
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ __ Move(FieldOperand(rbx, Cell::kValueOffset),
TypeFeedbackCells::MegamorphicSentinel(isolate));
}
// Check for function proxy.
__ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
__ j(not_equal, &non_function);
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
__ push(rdi); // put proxy as additional argument under return address
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
__ Set(rax, argc_ + 1);
__ Set(rbx, 0);
__ SetCallKind(rcx, CALL_AS_METHOD);
@@ -3797,13 +2674,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
__ bind(&non_function);
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
+ __ movq(args.GetReceiverOperand(), rdi);
__ Set(rax, argc_);
__ Set(rbx, 0);
__ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
Handle<Code> adaptor =
- Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
+ isolate->builtins()->ArgumentsAdaptorTrampoline();
__ Jump(adaptor, RelocInfo::CODE_TARGET);
}
@@ -3825,10 +2702,12 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
// Jump to the function-specific construct stub.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
- __ jmp(rbx);
+ Register jmp_reg = rcx;
+ __ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(jmp_reg, FieldOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
+ __ jmp(jmp_reg);
// rdi: called object
// rax: number of arguments
@@ -3856,7 +2735,7 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
+bool CEntryStub::IsPregenerated(Isolate* isolate) {
#ifdef _WIN64
return result_size_ == 1;
#else
@@ -3865,23 +2744,40 @@ bool CEntryStub::IsPregenerated() {
}
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpStub::GenerateAheadOfTime(isolate);
}
-void CodeStub::GenerateFPStubs() {
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
}
-void CEntryStub::GenerateAheadOfTime() {
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
CEntryStub save_doubles(1, kSaveFPRegs);
- save_doubles.GetCode()->set_is_pregenerated(true);
+ save_doubles.GetCode(isolate)->set_is_pregenerated(true);
+}
+
+
+static void JumpIfOOM(MacroAssembler* masm,
+ Register value,
+ Register scratch,
+ Label* oom_label) {
+ __ movq(scratch, value);
+ STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
+ STATIC_ASSERT(kFailureTag == 3);
+ __ and_(scratch, Immediate(0xf));
+ __ cmpq(scratch, Immediate(0xf));
+ __ j(equal, oom_label);
}
@@ -3914,14 +2810,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
// stack is known to be aligned. This function takes one argument which is
// passed in register.
-#ifdef _WIN64
- __ movq(rcx, rax);
-#else // _WIN64
- __ movq(rdi, rax);
-#endif
+ __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
+ __ movq(arg_reg_1, rax);
__ movq(kScratchRegister,
- FUNCTION_ADDR(Runtime::PerformGC),
- RelocInfo::RUNTIME_ENTRY);
+ ExternalReference::perform_gc_function(masm->isolate()));
__ call(kScratchRegister);
}
@@ -3934,29 +2826,30 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Call C function.
#ifdef _WIN64
- // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
- // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
- __ movq(StackSpaceOperand(0), r14); // argc.
- __ movq(StackSpaceOperand(1), r15); // argv.
+ // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
+ // Pass argv and argc as two parameters. The arguments object will
+ // be created by stubs declared by DECLARE_RUNTIME_FUNCTION().
if (result_size_ < 2) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
- __ lea(rcx, StackSpaceOperand(0));
- __ LoadAddress(rdx, ExternalReference::isolate_address());
+ __ movq(rcx, r14); // argc.
+ __ movq(rdx, r15); // argv.
+ __ movq(r8, ExternalReference::isolate_address(masm->isolate()));
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
__ lea(rcx, StackSpaceOperand(2));
// Pass a pointer to the Arguments object as the second argument.
- __ lea(rdx, StackSpaceOperand(0));
- __ LoadAddress(r8, ExternalReference::isolate_address());
+ __ movq(rdx, r14); // argc.
+ __ movq(r8, r15); // argv.
+ __ movq(r9, ExternalReference::isolate_address(masm->isolate()));
}
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movq(rdi, r14); // argc.
__ movq(rsi, r15); // argv.
- __ movq(rdx, ExternalReference::isolate_address());
+ __ movq(rdx, ExternalReference::isolate_address(masm->isolate()));
#endif
__ call(rbx);
// Result is in rax - do not destroy this register!
@@ -3999,16 +2892,21 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &retry, Label::kNear);
// Special handling of out of memory exceptions.
- __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
- __ cmpq(rax, kScratchRegister);
- __ j(equal, throw_out_of_memory_exception);
+ JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
- // Retrieve the pending exception and clear the variable.
+ // Retrieve the pending exception.
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, masm->isolate());
Operand pending_exception_operand =
masm->ExternalOperand(pending_exception_address);
__ movq(rax, pending_exception_operand);
+
+ // See if we just retrieved an OOM exception.
+ JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
+
+ // Clear the pending exception.
+ pending_exception_operand =
+ masm->ExternalOperand(pending_exception_address);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
__ movq(pending_exception_operand, rdx);
@@ -4037,6 +2935,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// this by performing a garbage collection and retrying the
// builtin once.
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Enter the exit frame that transitions from JavaScript to C++.
#ifdef _WIN64
int arg_stack_space = (result_size_ < 2 ? 2 : 4);
@@ -4079,7 +2979,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Do full GC and retry runtime call one final time.
Failure* failure = Failure::InternalError();
- __ movq(rax, failure, RelocInfo::NONE);
+ __ movq(rax, failure, RelocInfo::NONE64);
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
@@ -4098,7 +2998,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Set pending exception and rax to out of memory exception.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
isolate);
- __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ Label already_have_failure;
+ JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure);
+ __ movq(rax, Failure::OutOfMemoryException(0x1), RelocInfo::NONE64);
+ __ bind(&already_have_failure);
__ Store(pending_exception, rax);
// Fall through to the next label.
@@ -4113,6 +3016,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
{ // NOLINT. Scope block confuses linter.
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
// Set up frame.
@@ -4126,7 +3032,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Cannot use smi-register for loading yet.
__ movq(kScratchRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
- RelocInfo::NONE);
+ RelocInfo::NONE64);
__ push(kScratchRegister); // context slot
__ push(kScratchRegister); // function slot
// Save callee-saved registers (X64/Win64 calling conventions).
@@ -4139,8 +3045,21 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
#endif
__ push(rbx);
- // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
- // callee save as well.
+
+#ifdef _WIN64
+ // On Win64 XMM6-XMM15 are callee-save
+ __ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3), xmm9);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4), xmm10);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5), xmm11);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6), xmm12);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7), xmm13);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8), xmm14);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9), xmm15);
+#endif
// Set up the roots and smi constant registers.
// Needs to be done before any further smi loads.
@@ -4181,7 +3100,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
isolate);
__ Store(pending_exception, rax);
- __ movq(rax, Failure::Exception(), RelocInfo::NONE);
+ __ movq(rax, Failure::Exception(), RelocInfo::NONE64);
__ jmp(&exit);
// Invoke: Link this frame into the handler chain. There's only one
@@ -4230,6 +3149,21 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
// Restore callee-saved registers (X64 conventions).
+#ifdef _WIN64
+ // On Win64 XMM6-XMM15 are callee-save
+ __ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0));
+ __ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1));
+ __ movdqu(xmm8, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2));
+ __ movdqu(xmm9, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3));
+ __ movdqu(xmm10, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4));
+ __ movdqu(xmm11, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5));
+ __ movdqu(xmm12, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6));
+ __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
+ __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
+ __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
+ __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+#endif
+
__ pop(rbx);
#ifdef _WIN64
// Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
@@ -4251,14 +3185,14 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
void InstanceofStub::Generate(MacroAssembler* masm) {
// Implements "value instanceof function" operator.
// Expected input state with no inline cache:
- // rsp[0] : return address
- // rsp[1] : function pointer
- // rsp[2] : value
+ // rsp[0] : return address
+ // rsp[8] : function pointer
+ // rsp[16] : value
// Expected input state with an inline one-element cache:
- // rsp[0] : return address
- // rsp[1] : offset from return address to location of inline cache
- // rsp[2] : function pointer
- // rsp[3] : value
+ // rsp[0] : return address
+ // rsp[8] : offset from return address to location of inline cache
+ // rsp[16] : function pointer
+ // rsp[24] : value
// Returns a bitwise zero to indicate that the value
// is and instance of the function and anything else to
// indicate that the value is not an instance.
@@ -4267,7 +3201,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const int kOffsetToResultValue = 18;
// The last 4 bytes of the instruction sequence
// movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
- // Move(kScratchRegister, FACTORY->the_hole_value())
+ // Move(kScratchRegister, Factory::the_hole_value())
// in front of the hole value address.
static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
// The last 4 bytes of the instruction sequence
@@ -4277,12 +3211,13 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const unsigned int kWordBeforeResultValue = 0x458B4909;
// Only the inline check flag is supported on X64.
ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
- int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
+ int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
// Get the object - go slow case if it's a smi.
Label slow;
-
- __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
+ StackArgumentsAccessor args(rsp, 2 + extra_argument_offset,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0));
__ JumpIfSmi(rax, &slow);
// Check that the left hand is a JS object. Leave its map in rax.
@@ -4292,7 +3227,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(above, &slow);
// Get the prototype of the function.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
+ __ movq(rdx, args.GetArgumentOperand(1));
// rdx is function, rax is map.
// If there is a call site cache don't look in the global cache, but do the
@@ -4327,12 +3262,12 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
} else {
// Get return address and delta to inlined map check.
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ subq(kScratchRegister, args.GetArgumentOperand(2));
if (FLAG_debug_code) {
__ movl(rdi, Immediate(kWordBeforeMapCheckValue));
__ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
- __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck);
}
__ movq(kScratchRegister,
Operand(kScratchRegister, kOffsetToMapCheckValue));
@@ -4368,17 +3303,17 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Assert it is a 1-byte signed value.
ASSERT(true_offset >= 0 && true_offset < 0x100);
__ movl(rax, Immediate(true_offset));
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ subq(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
__ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov).");
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
}
__ Set(rax, 0);
}
- __ ret(2 * kPointerSize + extra_stack_space);
+ __ ret((2 + extra_argument_offset) * kPointerSize);
__ bind(&is_not_instance);
if (!HasCallSiteInlineCheck()) {
@@ -4391,24 +3326,24 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Assert it is a 1-byte signed value.
ASSERT(false_offset >= 0 && false_offset < 0x100);
__ movl(rax, Immediate(false_offset));
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ subq(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
__ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
}
}
- __ ret(2 * kPointerSize + extra_stack_space);
+ __ ret((2 + extra_argument_offset) * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
__ bind(&slow);
if (HasCallSiteInlineCheck()) {
// Remove extra value from the stack.
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
__ pop(rax);
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
}
@@ -4421,44 +3356,6 @@ Register InstanceofStub::left() { return no_reg; }
Register InstanceofStub::right() { return no_reg; }
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == equal || cc_ == not_equal;
- stream->Add("CompareStub_%s", cc_name);
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -4499,7 +3396,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
Factory* factory = masm->isolate()->factory();
// Index is not a smi.
@@ -4549,7 +3446,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
@@ -4559,7 +3456,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
__ JumpIfNotSmi(code_, &slow_case_);
- __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
+ __ SmiCompare(code_, Smi::FromInt(String::kMaxOneByteCharCode));
__ j(above, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
@@ -4575,7 +3472,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
@@ -4587,24 +3484,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
@@ -4613,11 +3493,16 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Builtins::JavaScript builtin_id = Builtins::ADD;
// Load the two arguments.
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left).
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0)); // First argument (left).
+ __ movq(rdx, args.GetArgumentOperand(1)); // Second argument (right).
// Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
+ // Otherwise, at least one of the arguments is definitely a string,
+ // and we convert the one that is not known to be a string.
+ if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
__ JumpIfSmi(rax, &call_runtime);
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
__ j(above_equal, &call_runtime);
@@ -4626,20 +3511,16 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(rdx, &call_runtime);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
__ j(above_equal, &call_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
+ } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
+ GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
+ GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
}
// Both arguments are strings.
@@ -4675,7 +3556,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If arguments where known to be strings, maps are not loaded to r8 and r9
// by the code above.
- if (flags_ != NO_STRING_ADD_FLAGS) {
+ if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
__ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
}
@@ -4686,8 +3567,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Look at the length of the result of adding the two strings.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
__ SmiAdd(rbx, rbx, rcx);
- // Use the symbol table when adding two one character strings, as it
- // helps later optimizations to return a symbol here.
+ // Use the string table when adding two one character strings, as it
+ // helps later optimizations to return an internalized string here.
__ SmiCompare(rbx, Smi::FromInt(2));
__ j(not_equal, &longer_than_two);
@@ -4696,13 +3577,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
&call_runtime);
// Get the two characters forming the sub string.
- __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
- __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
+ __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
- // Try to lookup two character string in symbol table. If it is not found
+ // Try to lookup two character string in string table. If it is not found
// just allocate a new one.
Label make_two_character_string, make_flat_ascii_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ StringHelper::GenerateTwoCharacterStringTableProbe(
masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -4713,11 +3594,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rbx - first byte: first character
// rbx - second byte: *maybe* second character
// Make sure that the second byte of rbx contains the second character.
- __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
__ shll(rcx, Immediate(kBitsPerByte));
__ orl(rbx, rcx);
// Write both characters to the new string.
- __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
+ __ movw(FieldOperand(rax, SeqOneByteString::kHeaderSize), rbx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -4740,7 +3621,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Label non_ascii, allocated, ascii_data;
__ movl(rcx, r8);
__ and_(rcx, r9);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testl(rcx, Immediate(kStringEncodingMask));
__ j(zero, &non_ascii);
@@ -4752,23 +3633,49 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
__ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
+
+ Label skip_write_barrier, after_writing;
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(masm->isolate());
+ __ Load(rbx, high_promotion_mode);
+ __ testb(rbx, Immediate(1));
+ __ j(zero, &skip_write_barrier);
+
+ __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
+ __ RecordWriteField(rcx,
+ ConsString::kFirstOffset,
+ rax,
+ rbx,
+ kDontSaveFPRegs);
+ __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
+ __ RecordWriteField(rcx,
+ ConsString::kSecondOffset,
+ rdx,
+ rbx,
+ kDontSaveFPRegs);
+ __ jmp(&after_writing);
+
+ __ bind(&skip_write_barrier);
__ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
__ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
+
+ __ bind(&after_writing);
+
__ movq(rax, rcx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
- // to contain only ASCII characters.
+ // to contain only one byte characters.
// rcx: first instance type AND second instance type.
// r8: first instance type.
// r9: second instance type.
- __ testb(rcx, Immediate(kAsciiDataHintMask));
+ __ testb(rcx, Immediate(kOneByteDataHintMask));
__ j(not_zero, &ascii_data);
__ xor_(r8, r9);
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
- __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+ STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
+ __ andb(r8, Immediate(kOneByteStringTag | kOneByteDataHintTag));
+ __ cmpb(r8, Immediate(kOneByteStringTag | kOneByteDataHintTag));
__ j(equal, &ascii_data);
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
@@ -4799,8 +3706,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
__ jmp(&first_prepared, Label::kNear);
__ bind(&first_is_sequential);
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rcx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ lea(rcx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
__ bind(&first_prepared);
// Check whether both strings have same encoding.
@@ -4820,8 +3727,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
__ jmp(&second_prepared, Label::kNear);
__ bind(&second_is_sequential);
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rdx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ lea(rdx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
__ bind(&second_prepared);
Label non_ascii_string_add_flat_result;
@@ -4837,7 +3744,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
// rax: result string
// Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ __ lea(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
// rcx: first char of first string
// rbx: first character of result
// r14: length of first string
@@ -4877,6 +3784,21 @@ void StringAddStub::Generate(MacroAssembler* masm) {
}
+void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ push(rax);
+ __ push(rdx);
+}
+
+
+void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm,
+ Register temp) {
+ __ PopReturnAddressTo(temp);
+ __ pop(rdx);
+ __ pop(rax);
+ __ PushReturnAddressFrom(temp);
+}
+
+
void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
int stack_offset,
Register arg,
@@ -4891,31 +3813,11 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
__ j(below, &done);
// Check the number to string cache.
- Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- false,
- &not_cached);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
__ movq(arg, scratch1);
__ movq(Operand(rsp, stack_offset), arg);
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
- __ j(not_equal, slow);
- __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(zero, slow);
- __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
- __ movq(Operand(rsp, stack_offset), arg);
-
__ bind(&done);
}
@@ -4971,17 +3873,17 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Don't enter the rep movs if there are less than 4 bytes to copy.
Label last_bytes;
- __ testl(count, Immediate(~7));
+ __ testl(count, Immediate(~(kPointerSize - 1)));
__ j(zero, &last_bytes, Label::kNear);
// Copy from edi to esi using rep movs instruction.
__ movl(kScratchRegister, count);
- __ shr(count, Immediate(3)); // Number of doublewords to copy.
+ __ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy.
__ repmovsq();
// Find number of bytes left.
__ movl(count, kScratchRegister);
- __ and_(count, Immediate(7));
+ __ and_(count, Immediate(kPointerSize - 1));
// Check if there are more bytes to copy.
__ bind(&last_bytes);
@@ -5001,7 +3903,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ bind(&done);
}
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -5013,7 +3915,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register scratch = scratch3;
// Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
+ // different hash algorithm. Don't try to look for these in the string table.
Label not_array_index;
__ leal(scratch, Operand(c1, -'0'));
__ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
@@ -5037,14 +3939,14 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string.
- // Load the symbol table.
- Register symbol_table = c2;
- __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+ // Load the string table.
+ Register string_table = c2;
+ __ LoadRoot(string_table, Heap::kStringTableRootIndex);
- // Calculate capacity mask from the symbol table capacity.
+ // Calculate capacity mask from the string table capacity.
Register mask = scratch2;
__ SmiToInteger32(mask,
- FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
+ FieldOperand(string_table, StringTable::kCapacityOffset));
__ decl(mask);
Register map = scratch4;
@@ -5052,31 +3954,31 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Registers
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string (32-bit int)
- // symbol_table: symbol table
+ // string_table: string table
// mask: capacity mask (32-bit int)
// map: -
// scratch: -
- // Perform a number of probes in the symbol table.
+ // Perform a number of probes in the string table.
static const int kProbes = 4;
- Label found_in_symbol_table;
+ Label found_in_string_table;
Label next_probe[kProbes];
Register candidate = scratch; // Scratch register contains candidate.
for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
+ // Calculate entry in string table.
__ movl(scratch, hash);
if (i > 0) {
- __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
+ __ addl(scratch, Immediate(StringTable::GetProbeOffset(i)));
}
__ andl(scratch, mask);
- // Load the entry from the symbol table.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ // Load the entry from the string table.
+ STATIC_ASSERT(StringTable::kEntrySize == 1);
__ movq(candidate,
- FieldOperand(symbol_table,
+ FieldOperand(string_table,
scratch,
times_pointer_size,
- SymbolTable::kElementsStartOffset));
+ StringTable::kElementsStartOffset));
// If entry is undefined no string with this hash can be found.
Label is_string;
@@ -5089,7 +3991,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
if (FLAG_debug_code) {
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ cmpq(kScratchRegister, candidate);
- __ Assert(equal, "oddball in symbol table is not undefined or the hole");
+ __ Assert(equal, kOddballInStringTableIsNotUndefinedOrTheHole);
}
__ jmp(&next_probe[i]);
@@ -5110,10 +4012,10 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
temp, temp, &next_probe[i]);
// Check if the two characters match.
- __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ movl(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
__ andl(temp, Immediate(0x0000ffff));
__ cmpl(chars, temp);
- __ j(equal, &found_in_symbol_table);
+ __ j(equal, &found_in_string_table);
__ bind(&next_probe[i]);
}
@@ -5122,7 +4024,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Scratch register contains result when we fall through to here.
Register result = candidate;
- __ bind(&found_in_symbol_table);
+ __ bind(&found_in_string_table);
if (!result.is(rax)) {
__ movq(rax, result);
}
@@ -5187,22 +4089,28 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
__ bind(&hash_not_zero);
}
+
void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
// Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: to
- // rsp[16]: from
- // rsp[24]: string
+ // rsp[0] : return address
+ // rsp[8] : to
+ // rsp[16] : from
+ // rsp[24] : string
+
+ enum SubStringStubArgumentIndices {
+ STRING_ARGUMENT_INDEX,
+ FROM_ARGUMENT_INDEX,
+ TO_ARGUMENT_INDEX,
+ SUB_STRING_ARGUMENT_COUNT
+ };
- const int kToOffset = 1 * kPointerSize;
- const int kFromOffset = kToOffset + kPointerSize;
- const int kStringOffset = kFromOffset + kPointerSize;
- const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
+ StackArgumentsAccessor args(rsp, SUB_STRING_ARGUMENT_COUNT,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
// Make sure first argument is a string.
- __ movq(rax, Operand(rsp, kStringOffset));
+ __ movq(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
STATIC_ASSERT(kSmiTag == 0);
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
@@ -5212,8 +4120,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rax: string
// rbx: instance type
// Calculate length of sub string using the smi values.
- __ movq(rcx, Operand(rsp, kToOffset));
- __ movq(rdx, Operand(rsp, kFromOffset));
+ __ movq(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
+ __ movq(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
__ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
@@ -5226,8 +4134,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Return original string.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
+ __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
__ bind(&not_original_string);
+
+ Label single_char;
+ __ SmiCompare(rcx, Smi::FromInt(1));
+ __ j(equal, &single_char);
+
__ SmiToInteger32(rcx, rcx);
// rax: string
@@ -5248,7 +4161,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Cons string. Check whether it is flat, then fetch first part.
// Flat cons strings have an empty second part.
__ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
+ Heap::kempty_stringRootIndex);
__ j(not_equal, &runtime);
__ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
// Update instance type.
@@ -5288,12 +4201,18 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(rbx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_slice, Label::kNear);
+ // Make long jumps when allocations tracking is on due to
+ // RecordObjectAllocation inside MacroAssembler::Allocate.
+ Label::Distance jump_distance =
+ masm->isolate()->heap_profiler()->is_tracking_allocations()
+ ? Label::kFar
+ : Label::kNear;
+ __ j(zero, &two_byte_slice, jump_distance);
__ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
- __ jmp(&set_slice_header, Label::kNear);
+ __ jmp(&set_slice_header, jump_distance);
__ bind(&two_byte_slice);
__ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
__ bind(&set_slice_header);
@@ -5304,7 +4223,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
__ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
__ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
+ __ ret(3 * kPointerSize);
__ bind(&copy_routine);
}
@@ -5328,11 +4247,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &runtime);
__ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ testb(rbx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_sequential);
@@ -5345,10 +4264,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
__ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
// rax: result string
// rcx: result length
@@ -5358,7 +4277,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
__ movq(rsi, r14); // Restore rsi.
__ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
+ __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
__ bind(&two_byte_sequential);
// Allocate the result.
@@ -5370,7 +4289,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
__ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
__ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
@@ -5383,11 +4302,22 @@ void SubStringStub::Generate(MacroAssembler* masm) {
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
__ movq(rsi, r14); // Restore esi.
__ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
+ __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+ __ bind(&single_char);
+ // rax: string
+ // rbx: instance type
+ // rcx: sub string length (smi)
+ // rdx: from index (smi)
+ StringCharAtGenerator generator(
+ rax, rdx, rcx, rax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
+ generator.SkipSlow(masm, &runtime);
}
@@ -5475,16 +4405,22 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Compare lengths (precomputed).
__ bind(&compare_lengths);
__ SmiTest(length_difference);
- __ j(not_zero, &result_not_equal, Label::kNear);
+ Label length_not_equal;
+ __ j(not_zero, &length_not_equal, Label::kNear);
// Result is EQUAL.
__ Move(rax, Smi::FromInt(EQUAL));
__ ret(0);
Label result_greater;
+ Label result_less;
+ __ bind(&length_not_equal);
+ __ j(greater, &result_greater, Label::kNear);
+ __ jmp(&result_less, Label::kNear);
__ bind(&result_not_equal);
// Unequal comparison of left to right, either character or length.
- __ j(greater, &result_greater, Label::kNear);
+ __ j(above, &result_greater, Label::kNear);
+ __ bind(&result_less);
// Result is LESS.
__ Move(rax, Smi::FromInt(LESS));
@@ -5510,9 +4446,9 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiToInteger32(length, length);
__ lea(left,
- FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+ FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
__ lea(right,
- FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+ FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
__ neg(length);
Register index = length; // index = -length;
@@ -5531,12 +4467,13 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
Label runtime;
// Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: right string
- // rsp[16]: left string
+ // rsp[0] : return address
+ // rsp[8] : right string
+ // rsp[16] : left string
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
- __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rdx, args.GetArgumentOperand(0)); // left
+ __ movq(rax, args.GetArgumentOperand(1)); // right
// Check for identity.
Label not_same;
@@ -5555,9 +4492,9 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Inline comparison of ASCII strings.
__ IncrementCounter(counters->string_compare_native(), 1);
// Drop arguments from the stack
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
__ addq(rsp, Immediate(2 * kPointerSize));
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
@@ -5568,7 +4505,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
@@ -5580,7 +4517,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
__ subq(rdx, rax);
__ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
- __ SmiNot(rdx, rdx);
+ __ not_(rdx);
__ bind(&done);
__ movq(rax, rdx);
}
@@ -5591,24 +4528,42 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- Condition either_smi = masm->CheckEitherSmi(rax, rdx);
- __ j(either_smi, &generic_stub, Label::kNear);
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rdx, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rax, &miss);
+ }
+
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(rax, &right_smi, Label::kNear);
+ __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL);
__ j(not_equal, &maybe_undefined1, Label::kNear);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&left, Label::kNear);
+ __ bind(&right_smi);
+ __ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
+ __ Cvtlsi2sd(xmm1, rcx);
+
+ __ bind(&left);
+ __ JumpIfSmi(rdx, &left_smi, Label::kNear);
+ __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL);
__ j(not_equal, &maybe_undefined2, Label::kNear);
-
- // Load left and right operand
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+ __ bind(&left_smi);
+ __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
+ __ Cvtlsi2sd(xmm0, rcx);
+ __ bind(&done);
// Compare operands
__ ucomisd(xmm0, xmm1);
@@ -5624,14 +4579,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ ret(0);
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ Cmp(rax, masm->isolate()->factory()->undefined_value());
__ j(not_equal, &miss);
+ __ JumpIfSmi(rdx, &unordered);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ jmp(&unordered);
@@ -5648,8 +4605,51 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+ ASSERT(GetCondition() == equal);
+
+ // Registers containing left and right operands respectively.
+ Register left = rdx;
+ Register right = rax;
+ Register tmp1 = rcx;
+ Register tmp2 = rbx;
+
+ // Check that both operands are heap objects.
+ Label miss;
+ Condition cond = masm->CheckEitherSmi(left, right, tmp1);
+ __ j(cond, &miss, Label::kNear);
+
+ // Check that both operands are internalized strings.
+ __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ or_(tmp1, tmp2);
+ __ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ __ j(not_zero, &miss, Label::kNear);
+
+ // Internalized strings are compared by identity.
+ Label done;
+ __ cmpq(left, right);
+ // Make sure rax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(rax));
+ __ j(not_equal, &done, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ bind(&done);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
ASSERT(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -5663,17 +4663,17 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
Condition cond = masm->CheckEitherSmi(left, right, tmp1);
__ j(cond, &miss, Label::kNear);
- // Check that both operands are symbols.
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
__ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp1, tmp2);
- __ testb(tmp1, Immediate(kIsSymbolMask));
- __ j(zero, &miss, Label::kNear);
- // Symbols are compared by identity.
+ __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
+ __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
+
+ // Unique names are compared by identity.
Label done;
__ cmpq(left, right);
// Make sure rax is non-zero. At this point input operands are
@@ -5692,7 +4692,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -5732,14 +4732,15 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle not identical strings.
__ bind(&not_same);
- // Check that both strings are symbols. If they are, we're done
- // because we already know they are not identical.
+ // Check that both strings are internalized strings. If they are, we're done
+ // because we already know they are not identical. We also know they are both
+ // strings.
if (equality) {
Label do_compare;
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp1, tmp2);
- __ testb(tmp1, Immediate(kIsSymbolMask));
- __ j(zero, &do_compare, Label::kNear);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ or_(tmp1, tmp2);
+ __ testb(tmp1, Immediate(kIsNotInternalizedMask));
+ __ j(not_zero, &do_compare, Label::kNear);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
@@ -5762,10 +4763,10 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
- __ pop(tmp1); // Return address.
+ __ PopReturnAddressTo(tmp1);
__ push(left);
__ push(right);
- __ push(tmp1);
+ __ PushReturnAddressFrom(tmp1);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
@@ -5778,7 +4779,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
@@ -5842,12 +4843,13 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
}
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<String> name,
- Register r0) {
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ Handle<Name> name,
+ Register r0) {
+ ASSERT(name->IsUniqueName());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
@@ -5861,10 +4863,10 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
__ decl(index);
__ and_(index,
- Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
+ Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
// Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
__ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
Register entity_name = r0;
@@ -5878,27 +4880,22 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ j(equal, done);
// Stop if found the property.
- __ Cmp(entity_name, Handle<String>(name));
+ __ Cmp(entity_name, Handle<Name>(name));
__ j(equal, miss);
- Label the_hole;
+ Label good;
// Check for the hole and skip.
__ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
- __ j(equal, &the_hole, Label::kNear);
+ __ j(equal, &good, Label::kNear);
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not a unique name.
__ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- Immediate(kIsSymbolMask));
- __ j(zero, miss);
-
- __ bind(&the_hole);
+ __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+ miss);
+ __ bind(&good);
}
- StringDictionaryLookupStub stub(properties,
- r0,
- r0,
- StringDictionaryLookupStub::NEGATIVE_LOOKUP);
+ NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP);
__ Push(Handle<Object>(name));
__ push(Immediate(name->Hash()));
__ CallStub(&stub);
@@ -5908,38 +4905,38 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
}
-// Probe the string dictionary in the |elements| register. Jump to the
+// Probe the name dictionary in the |elements| register. Jump to the
// |done| label if a property with the given name is found leaving the
// index into the dictionary in |r1|. Jump to the |miss| label
// otherwise.
-void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1) {
+void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1) {
ASSERT(!elements.is(r0));
ASSERT(!elements.is(r1));
ASSERT(!name.is(r0));
ASSERT(!name.is(r1));
- __ AssertString(name);
+ __ AssertName(name);
__ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
__ decl(r0);
for (int i = 0; i < kInlinedProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
- __ shrl(r1, Immediate(String::kHashShift));
+ __ movl(r1, FieldOperand(name, Name::kHashFieldOffset));
+ __ shrl(r1, Immediate(Name::kHashShift));
if (i > 0) {
- __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
+ __ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
}
__ and_(r1, r0);
// Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
__ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
// Check if the key is identical to the name.
@@ -5948,13 +4945,10 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ j(equal, done);
}
- StringDictionaryLookupStub stub(elements,
- r0,
- r1,
- POSITIVE_LOOKUP);
+ NameDictionaryLookupStub stub(elements, r0, r1, POSITIVE_LOOKUP);
__ push(name);
- __ movl(r0, FieldOperand(name, String::kHashFieldOffset));
- __ shrl(r0, Immediate(String::kHashShift));
+ __ movl(r0, FieldOperand(name, Name::kHashFieldOffset));
+ __ shrl(r0, Immediate(Name::kHashShift));
__ push(r0);
__ CallStub(&stub);
@@ -5964,15 +4958,15 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
}
-void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// Stack frame on entry:
- // esp[0 * kPointerSize]: return address.
- // esp[1 * kPointerSize]: key's hash.
- // esp[2 * kPointerSize]: key.
+ // rsp[0 * kPointerSize] : return address.
+ // rsp[1 * kPointerSize] : key's hash.
+ // rsp[2 * kPointerSize] : key.
// Registers:
- // dictionary_: StringDictionary to probe.
+ // dictionary_: NameDictionary to probe.
// result_: used as scratch.
// index_: will hold an index of entry if lookup is successful.
// might alias with result_.
@@ -5992,16 +4986,18 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
// (their names are the null value).
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER,
+ kPointerSize);
for (int i = kInlinedProbes; i < kTotalProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ movq(scratch, Operand(rsp, 2 * kPointerSize));
+ __ movq(scratch, args.GetArgumentOperand(1));
if (i > 0) {
- __ addl(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
+ __ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
}
__ and_(scratch, Operand(rsp, 0));
// Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
__ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
@@ -6014,19 +5010,18 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ j(equal, &not_in_dictionary);
// Stop if found the property.
- __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
+ __ cmpq(scratch, args.GetArgumentOperand(0));
__ j(equal, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // If we hit a non symbol key during negative lookup
- // we have to bailout as this key might be equal to the
+ // If we hit a key that is not a unique name during negative
+ // lookup we have to bailout as this key might be equal to the
// key we are looking for.
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not a unique name.
__ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
- Immediate(kIsSymbolMask));
- __ j(zero, &maybe_in_dictionary);
+ __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ &maybe_in_dictionary);
}
}
@@ -6065,8 +5060,6 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
{ REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField and
// KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
{ REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
@@ -6092,15 +5085,18 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
{ REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub::Generate
+ // FastNewClosureStub::Generate and
+ // StringAddStub::Generate
{ REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET},
+ // StringAddStub::Generate
+ { REG(rcx), REG(rax), REG(rbx), EMIT_REMEMBERED_SET},
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
#undef REG
-bool RecordWriteStub::IsPregenerated() {
+bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -6116,15 +5112,16 @@ bool RecordWriteStub::IsPregenerated() {
}
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode()->set_is_pregenerated(true);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
}
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -6133,7 +5130,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
entry->address,
entry->action,
kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
}
}
@@ -6224,29 +5221,16 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
-#ifdef _WIN64
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
Register address =
- arg1.is(regs_.address()) ? kScratchRegister : regs_.address();
+ arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address();
ASSERT(!address.is(regs_.object()));
- ASSERT(!address.is(arg1));
+ ASSERT(!address.is(arg_reg_1));
__ Move(address, regs_.address());
- __ Move(arg1, regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- // TODO(gc) Can we just set address arg2 in the beginning?
- __ Move(arg2, address);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ movq(arg2, Operand(address, 0));
- }
- __ LoadAddress(arg3, ExternalReference::isolate_address());
+ __ Move(arg_reg_1, regs_.object());
+ // TODO(gc) Can we just set address arg2 in the beginning?
+ __ Move(arg_reg_2, address);
+ __ LoadAddress(arg_reg_3,
+ ExternalReference::isolate_address(masm->isolate()));
int argument_count = 3;
AllowExternalCallThatCantCauseGC scope(masm);
@@ -6361,12 +5345,12 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : element value to store
- // -- rbx : array literal
- // -- rdi : map of array literal
- // -- rcx : element index as smi
- // -- rdx : array literal index in function
- // -- rsp[0] : return address
+ // -- rax : element value to store
+ // -- rcx : element index as smi
+ // -- rsp[0] : return address
+ // -- rsp[8] : array literal index in function
+ // -- rsp[16] : array literal
+ // clobbers rbx, rdx, rdi
// -----------------------------------
Label element_done;
@@ -6375,6 +5359,12 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label slow_elements;
Label fast_elements;
+ // Get array literal index, array literal and its map.
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rdx, args.GetArgumentOperand(1));
+ __ movq(rbx, args.GetArgumentOperand(0));
+ __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
+
__ CheckFastElements(rdi, &double_elements);
// FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
@@ -6385,16 +5375,14 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// the runtime.
__ bind(&slow_elements);
- __ pop(rdi); // Pop return address and remember to put back later for tail
- // call.
+ __ PopReturnAddressTo(rdi);
__ push(rbx);
__ push(rcx);
__ push(rax);
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ push(rdx);
- __ push(rdi); // Return return address so that tail call returns to right
- // place.
+ __ PushReturnAddressFrom(rdi);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
@@ -6434,8 +5422,28 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
}
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ movq(rbx, MemOperand(rbp, parameter_count_offset));
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ PopReturnAddressTo(rcx);
+ int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
+ ? kPointerSize
+ : 0;
+ __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
+ __ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ // It's always safe to call the entry hook stub, as the hook itself
+ // is not allowed to call back to V8.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
ProfileEntryHookStub stub;
masm->CallStub(&stub);
}
@@ -6443,45 +5451,25 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // Save volatile registers.
- // Live registers at this point are the same as at the start of any
- // JS function:
- // o rdi: the JS function object being called (i.e. ourselves)
- // o rsi: our context
- // o rbp: our caller's frame pointer
- // o rsp: stack pointer (pointing to return address)
- // o rcx: rcx is zero for method calls and non-zero for function calls.
-#ifdef _WIN64
- const int kNumSavedRegisters = 1;
-
- __ push(rcx);
-#else
- const int kNumSavedRegisters = 3;
-
- __ push(rcx);
- __ push(rdi);
- __ push(rsi);
-#endif
+ // This stub can be called from essentially anywhere, so it needs to save
+ // all volatile and callee-save registers.
+ const size_t kNumSavedRegisters = 2;
+ __ push(arg_reg_1);
+ __ push(arg_reg_2);
// Calculate the original stack pointer and store it in the second arg.
-#ifdef _WIN64
- __ lea(rdx, Operand(rsp, kNumSavedRegisters * kPointerSize));
-#else
- __ lea(rsi, Operand(rsp, kNumSavedRegisters * kPointerSize));
-#endif
+ __ lea(arg_reg_2, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize));
// Calculate the function address to the first arg.
-#ifdef _WIN64
- __ movq(rcx, Operand(rdx, 0));
- __ subq(rcx, Immediate(Assembler::kShortCallInstructionLength));
-#else
- __ movq(rdi, Operand(rsi, 0));
- __ subq(rdi, Immediate(Assembler::kShortCallInstructionLength));
-#endif
+ __ movq(arg_reg_1, Operand(rsp, kNumSavedRegisters * kPointerSize));
+ __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+
+ // Save the remainder of the volatile registers.
+ masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
// Call the entry hook function.
- __ movq(rax, &entry_hook_, RelocInfo::NONE);
- __ movq(rax, Operand(rax, 0));
+ __ movq(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
+ RelocInfo::NONE64);
AllowExternalCallThatCantCauseGC scope(masm);
@@ -6490,17 +5478,345 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ CallCFunction(rax, kArgumentCount);
// Restore volatile regs.
-#ifdef _WIN64
- __ pop(rcx);
-#else
- __ pop(rsi);
- __ pop(rdi);
- __ pop(rcx);
-#endif
+ masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
+ __ pop(arg_reg_2);
+ __ pop(arg_reg_1);
__ Ret();
}
+
+template<class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(),
+ CONTEXT_CHECK_REQUIRED,
+ mode);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmpl(rdx, Immediate(kind));
+ __ j(not_equal, &next);
+ T stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // rbx - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // rdx - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // rax - number of arguments
+ // rdi - constructor?
+ // rsp[0] - return address
+ // rsp[8] - last argument
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ testb(rdx, Immediate(1));
+ __ j(not_zero, &normal_sequence);
+ }
+
+ // look at the first argument
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(0));
+ __ testq(rcx, rcx);
+ __ j(zero, &normal_sequence);
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the cell).
+ __ incl(rdx);
+ __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
+ if (FLAG_debug_code) {
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
+ __ Assert(equal, kExpectedAllocationSiteInCell);
+ }
+
+ // Save the resulting elements kind in type info
+ __ Integer32ToSmi(rdx, rdx);
+ __ movq(FieldOperand(rcx, AllocationSite::kTransitionInfoOffset), rdx);
+ __ SmiToInteger32(rdx, rdx);
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmpl(rdx, Immediate(kind));
+ __ j(not_equal, &next);
+ ArraySingleArgumentConstructorStub stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+template<class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ ElementsKind initial_kind = GetInitialFastElementsKind();
+ ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
+
+ int to_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(kind);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
+ (!FLAG_track_allocation_sites &&
+ (kind == initial_kind || kind == initial_holey_kind))) {
+ T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ }
+ }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
+ stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
+ stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
+ stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ testq(rax, rax);
+ __ j(not_zero, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmpl(rax, Immediate(1));
+ __ j(greater, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rbx : type info cell
+ // -- rdi : constructor
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -----------------------------------
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
+ __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
+ __ CmpObjectType(rcx, MAP_TYPE, rcx);
+ __ Check(equal, kUnexpectedInitialMapForArrayFunction);
+
+ // We should either have undefined in rbx or a valid cell
+ Label okay_here;
+ Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
+ __ Cmp(rbx, undefined_sentinel);
+ __ j(equal, &okay_here);
+ __ Cmp(FieldOperand(rbx, 0), cell_map);
+ __ Assert(equal, kExpectedPropertyCellInRegisterRbx);
+ __ bind(&okay_here);
+ }
+
+ Label no_info;
+ // If the type cell is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
+ __ Cmp(rbx, undefined_sentinel);
+ __ j(equal, &no_info);
+ __ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
+ __ Cmp(FieldOperand(rdx, 0),
+ masm->isolate()->factory()->allocation_site_map());
+ __ j(not_equal, &no_info);
+
+ __ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));
+ __ SmiToInteger32(rdx, rdx);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label not_zero_case, not_one_case;
+ Label normal_sequence;
+
+ __ testq(rax, rax);
+ __ j(not_zero, &not_zero_case);
+ InternalArrayNoArgumentConstructorStub stub0(kind);
+ __ TailCallStub(&stub0);
+
+ __ bind(&not_zero_case);
+ __ cmpl(rax, Immediate(1));
+ __ j(greater, &not_one_case);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(0));
+ __ testq(rcx, rcx);
+ __ j(zero, &normal_sequence);
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+ }
+
+ __ bind(&normal_sequence);
+ InternalArraySingleArgumentConstructorStub stub1(kind);
+ __ TailCallStub(&stub1);
+
+ __ bind(&not_one_case);
+ InternalArrayNArgumentsConstructorStub stubN(kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rbx : type info cell
+ // -- rdi : constructor
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
+ __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
+ __ CmpObjectType(rcx, MAP_TYPE, rcx);
+ __ Check(equal, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Figure out the right elements kind
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(rcx, Immediate(Map::kElementsKindMask));
+ __ shr(rcx, Immediate(Map::kElementsKindShift));
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmpl(rcx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &done);
+ __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ Assert(equal,
+ kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmpl(rcx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 6a1a18f83..c76abcf00 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -35,9 +35,11 @@ namespace v8 {
namespace internal {
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
+
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0,
@@ -60,15 +62,15 @@ class TranscendentalCacheStub: public CodeStub {
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated() { return true; }
- static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
@@ -79,173 +81,6 @@ class StoreBufferOverflowStub: public CodeStub {
};
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
-class UnaryOpStub: public CodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow,
- Label::Distance non_smi_near = Label::kFar,
- Label::Distance slow_near = Label::kFar);
- void GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near);
-
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateHeapNumberStubSub(MacroAssembler* masm);
- void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 15 bits RRRTTTOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 9, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 12, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure);
- void GenerateStringAddCode(MacroAssembler* masm);
- void GenerateCallRuntimeCode(MacroAssembler* masm);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -268,11 +103,11 @@ class StringHelper : public AllStatic {
bool ascii);
- // Probe the symbol table for a two character string. If the string is
+ // Probe the string table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
+ // does not guarantee that the string is not in the string table. If the
// string is found the code falls through with the string in register rax.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -299,20 +134,7 @@ class StringHelper : public AllStatic {
};
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -330,11 +152,14 @@ class StringAddStub: public CodeStub {
Register scratch3,
Label* slow);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateRegisterArgsPop(MacroAssembler* masm, Register temp);
+
const StringAddFlags flags_;
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -346,7 +171,7 @@ class SubStringStub: public CodeStub {
};
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() {}
@@ -383,43 +208,14 @@ class StringCompareStub: public CodeStub {
};
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found);
-
- private:
- static void GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask);
-
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringDictionaryLookupStub: public CodeStub {
+class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- StringDictionaryLookupStub(Register dictionary,
- Register result,
- Register index,
- LookupMode mode)
+ NameDictionaryLookupStub(Register dictionary,
+ Register result,
+ Register index,
+ LookupMode mode)
: dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
void Generate(MacroAssembler* masm);
@@ -428,7 +224,7 @@ class StringDictionaryLookupStub: public CodeStub {
Label* miss,
Label* done,
Register properties,
- Handle<String> name,
+ Handle<Name> name,
Register r0);
static void GeneratePositiveLookup(MacroAssembler* masm,
@@ -446,14 +242,14 @@ class StringDictionaryLookupStub: public CodeStub {
static const int kTotalProbes = 20;
static const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryLookup; }
+ Major MajorKey() { return NameDictionaryLookup; }
int MinorKey() {
return DictionaryBits::encode(dictionary_.code()) |
@@ -474,7 +270,7 @@ class StringDictionaryLookupStub: public CodeStub {
};
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
@@ -497,8 +293,8 @@ class RecordWriteStub: public CodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
@@ -657,7 +453,7 @@ class RecordWriteStub: public CodeStub {
Register GetRegThatIsNotRcxOr(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(rcx)) continue;
if (candidate.is(r1)) continue;
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 2924810c1..390ec7c9c 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "codegen.h"
#include "macro-assembler.h"
@@ -91,7 +91,38 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!FLAG_fast_math) return &exp;
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &exp;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ // xmm0: raw double input.
+ XMMRegister input = xmm0;
+ XMMRegister result = xmm1;
+ __ push(rax);
+ __ push(rbx);
+
+ MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
+
+ __ pop(rbx);
+ __ pop(rax);
+ __ movsd(xmm0, result);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
@@ -115,7 +146,7 @@ UnaryMathFunction CreateSqrtFunction() {
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
@@ -182,7 +213,7 @@ ModuloFunction CreateModuloFunction() {
__ j(zero, &valid_result);
__ fstp(0); // Drop result in st(0).
int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
- __ movq(rcx, kNaNValue, RelocInfo::NONE);
+ __ movq(rcx, kNaNValue, RelocInfo::NONE64);
__ movq(Operand(rsp, kPointerSize), rcx);
__ movsd(xmm0, Operand(rsp, kPointerSize));
__ jmp(&return_result);
@@ -221,7 +252,8 @@ ModuloFunction CreateModuloFunction() {
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm) {
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- rax : value
// -- rbx : target map
@@ -229,6 +261,11 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, allocation_memento_found);
+ }
+
// Set transitioned map.
__ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
@@ -242,7 +279,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- rax : value
// -- rbx : target map
@@ -253,6 +290,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// The fail label is not actually used since we do not allocate.
Label allocated, new_backing_store, only_change_map, done;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -297,14 +338,14 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
Label loop, entry, convert_hole;
- __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+ __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
// r15: the-hole NaN
__ jmp(&entry);
// Allocate new backing store.
__ bind(&new_backing_store);
- __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
- __ AllocateInNewSpace(rdi, r14, r11, r15, fail, TAG_OBJECT);
+ __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
+ __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
__ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
@@ -338,12 +379,12 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Conversion loop.
__ bind(&loop);
__ movq(rbx,
- FieldOperand(r8, r9, times_8, FixedArray::kHeaderSize));
+ FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
// r9 : current element's index
// rbx: current element (smi-tagged)
__ JumpIfNotSmi(rbx, &convert_hole);
__ SmiToInteger32(rbx, rbx);
- __ cvtlsi2sd(xmm0, rbx);
+ __ Cvtlsi2sd(xmm0, rbx);
__ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
xmm0);
__ jmp(&entry);
@@ -351,7 +392,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
if (FLAG_debug_code) {
__ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ Assert(equal, "object found in smi-only array");
+ __ Assert(equal, kObjectFoundInSmiOnlyArray);
}
__ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
@@ -364,7 +405,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- rax : value
// -- rbx : target map
@@ -374,6 +415,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -387,7 +432,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// r8 : source FixedDoubleArray
// r9 : number of elements
__ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
- __ AllocateInNewSpace(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
+ __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
// r11: destination FixedArray
__ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
@@ -395,7 +440,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
// Prepare for conversion loop.
- __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+ __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
__ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
// rsi: the-hole NaN
// rdi: pointer to the-hole
@@ -411,7 +456,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ bind(&loop);
__ movq(r14, FieldOperand(r8,
r9,
- times_pointer_size,
+ times_8,
FixedDoubleArray::kHeaderSize));
// r9 : current element's index
// r14: current element
@@ -421,7 +466,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(rax, r15, &gc_required);
// rax: new heap number
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
+ __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), r14);
__ movq(FieldOperand(r11,
r9,
times_pointer_size,
@@ -506,7 +551,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// the string.
__ bind(&cons_string);
__ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
+ Heap::kempty_stringRootIndex);
__ j(not_equal, call_runtime);
__ movq(string, FieldOperand(string, ConsString::kFirstOffset));
@@ -529,7 +574,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ testb(result, Immediate(kIsIndirectStringMask));
- __ Assert(zero, "external string expected, but not found");
+ __ Assert(zero, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
STATIC_CHECK(kShortExternalStringTag != 0);
@@ -551,7 +596,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Dispatch on the encoding: ASCII or two-byte.
Label ascii;
__ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(result, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii, Label::kNear);
@@ -571,12 +616,148 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ movzxbl(result, FieldOperand(string,
index,
times_1,
- SeqAsciiString::kHeaderSize));
+ SeqOneByteString::kHeaderSize));
+ __ bind(&done);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ XMMRegister input,
+ XMMRegister result,
+ XMMRegister double_scratch,
+ Register temp1,
+ Register temp2) {
+ ASSERT(!input.is(result));
+ ASSERT(!input.is(double_scratch));
+ ASSERT(!result.is(double_scratch));
+ ASSERT(!temp1.is(temp2));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label done;
+
+ __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
+ __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
+ __ xorpd(result, result);
+ __ ucomisd(double_scratch, input);
+ __ j(above_equal, &done);
+ __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
+ __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
+ __ j(above_equal, &done);
+ __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
+ __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
+ __ mulsd(double_scratch, input);
+ __ addsd(double_scratch, result);
+ __ movq(temp2, double_scratch);
+ __ subsd(double_scratch, result);
+ __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
+ __ lea(temp1, Operand(temp2, 0x1ff800));
+ __ and_(temp2, Immediate(0x7ff));
+ __ shr(temp1, Immediate(11));
+ __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
+ __ movq(kScratchRegister, ExternalReference::math_exp_log_table());
+ __ shl(temp1, Immediate(52));
+ __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
+ __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
+ __ subsd(double_scratch, input);
+ __ movsd(input, double_scratch);
+ __ subsd(result, double_scratch);
+ __ mulsd(input, double_scratch);
+ __ mulsd(result, input);
+ __ movq(input, temp1);
+ __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
+ __ subsd(result, double_scratch);
+ __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
+ __ mulsd(result, input);
+
__ bind(&done);
}
#undef __
+
+static byte* GetNoCodeAgeSequence(uint32_t* length) {
+ static bool initialized = false;
+ static byte sequence[kNoCodeAgeSequenceLength];
+ *length = kNoCodeAgeSequenceLength;
+ if (!initialized) {
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found both in
+ // FUNCTION and OPTIMIZED_FUNCTION code:
+ CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
+ patcher.masm()->push(rbp);
+ patcher.masm()->movq(rbp, rsp);
+ patcher.masm()->push(rsi);
+ patcher.masm()->push(rdi);
+ initialized = true;
+ }
+ return sequence;
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ bool result = (!memcmp(sequence, young_sequence, young_length));
+ ASSERT(result || *sequence == kCallOpcode);
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ sequence++; // Skip the kCallOpcode byte
+ Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
+ Assembler::kCallTargetAddressOffset;
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (age == kNoAgeCodeAge) {
+ CopyBytes(sequence, young_sequence, young_length);
+ CPU::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
+ CodePatcher patcher(sequence, young_length);
+ patcher.masm()->call(stub->instruction_start());
+ patcher.masm()->Nop(
+ kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
+ }
+}
+
+
+Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
+ ASSERT(index >= 0);
+ int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
+ int displacement_to_last_argument = base_reg_.is(rsp) ?
+ kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
+ displacement_to_last_argument += extra_displacement_to_last_argument_;
+ if (argument_count_reg_.is(no_reg)) {
+ // argument[0] is at base_reg_ + displacement_to_last_argument +
+ // (argument_count_immediate_ + receiver - 1) * kPointerSize.
+ ASSERT(argument_count_immediate_ + receiver > 0);
+ return Operand(base_reg_, displacement_to_last_argument +
+ (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
+ } else {
+ // argument[0] is at base_reg_ + displacement_to_last_argument +
+ // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
+ return Operand(base_reg_, argument_count_reg_, times_pointer_size,
+ displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 2e8075103..811ac507d 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -39,16 +39,19 @@ class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
// -------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
public:
+ explicit CodeGenerator(Isolate* isolate) {
+ InitializeAstVisitor(isolate);
+ }
+
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
+ static void MakeCodePrologue(CompilationInfo* info, const char* kind);
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
@@ -58,12 +61,14 @@ class CodeGenerator: public AstVisitor {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
- static bool ShouldGenerateLog(Expression* type);
+ static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static bool RecordPositions(MacroAssembler* masm,
int pos,
bool right_here = false);
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
@@ -84,6 +89,87 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
+
+class MathExpGenerator : public AllStatic {
+ public:
+ static void EmitMathExp(MacroAssembler* masm,
+ XMMRegister input,
+ XMMRegister result,
+ XMMRegister double_scratch,
+ Register temp1,
+ Register temp2);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
+
+enum StackArgumentsAccessorReceiverMode {
+ ARGUMENTS_CONTAIN_RECEIVER,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER
+};
+
+
+class StackArgumentsAccessor BASE_EMBEDDED {
+ public:
+ StackArgumentsAccessor(
+ Register base_reg,
+ int argument_count_immediate,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0)
+ : base_reg_(base_reg),
+ argument_count_reg_(no_reg),
+ argument_count_immediate_(argument_count_immediate),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) { }
+
+ StackArgumentsAccessor(
+ Register base_reg,
+ Register argument_count_reg,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0)
+ : base_reg_(base_reg),
+ argument_count_reg_(argument_count_reg),
+ argument_count_immediate_(0),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) { }
+
+ StackArgumentsAccessor(
+ Register base_reg,
+ const ParameterCount& parameter_count,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0)
+ : base_reg_(base_reg),
+ argument_count_reg_(parameter_count.is_reg() ?
+ parameter_count.reg() : no_reg),
+ argument_count_immediate_(parameter_count.is_immediate() ?
+ parameter_count.immediate() : 0),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) { }
+
+ Operand GetArgumentOperand(int index);
+ Operand GetReceiverOperand() {
+ ASSERT(receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER);
+ return GetArgumentOperand(0);
+ }
+
+ private:
+ const Register base_reg_;
+ const Register argument_count_reg_;
+ const int argument_count_immediate_;
+ const StackArgumentsAccessorReceiverMode receiver_mode_;
+ const int extra_displacement_to_last_argument_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
+};
+
+
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index 80e22c629..4fa290a8b 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -33,7 +33,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "cpu.h"
#include "macro-assembler.h"
@@ -72,18 +72,6 @@ void CPU::FlushICache(void* start, size_t size) {
#endif
}
-
-void CPU::DebugBreak() {
-#ifdef _MSC_VER
- // To avoid Visual Studio runtime support the following code can be used
- // instead
- // __asm { int 3 }
- __debugbreak();
-#else
- asm("int $3");
-#endif
-}
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 1b29e58d5..6612242a0 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "assembler.h"
#include "codegen.h"
@@ -48,11 +48,10 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x64.cc
// for the precise return instructions sequence.
void BreakLocationIterator::SetDebugBreakAtReturn() {
- ASSERT(Assembler::kJSReturnSequenceLength >=
- Assembler::kCallInstructionLength);
+ ASSERT(Assembler::kJSReturnSequenceLength >= Assembler::kCallSequenceLength);
rinfo()->PatchCodeWithCall(
- Isolate::Current()->debug()->debug_break_return()->entry(),
- Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry(),
+ Assembler::kJSReturnSequenceLength - Assembler::kCallSequenceLength);
}
@@ -81,8 +80,8 @@ bool BreakLocationIterator::IsDebugBreakAtSlot() {
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
rinfo()->PatchCodeWithCall(
- Isolate::Current()->debug()->debug_break_slot()->entry(),
- Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
+ debug_info_->GetIsolate()->debug()->debug_break_slot()->entry(),
+ Assembler::kDebugBreakSlotLength - Assembler::kCallSequenceLength);
}
@@ -124,14 +123,8 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if ((object_regs & (1 << r)) != 0) {
__ push(reg);
}
- // Store the 64-bit value as two smis.
if ((non_object_regs & (1 << r)) != 0) {
- __ movq(kScratchRegister, reg);
- __ Integer32ToSmi(reg, reg);
- __ push(reg);
- __ sar(kScratchRegister, Immediate(32));
- __ Integer32ToSmi(kScratchRegister, kScratchRegister);
- __ push(kScratchRegister);
+ __ PushInt64AsTwoSmis(reg);
}
}
@@ -156,12 +149,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
}
// Reconstruct the 64-bit value from two smis.
if ((non_object_regs & (1 << r)) != 0) {
- __ pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ shl(kScratchRegister, Immediate(32));
- __ pop(reg);
- __ SmiToInteger32(reg, reg);
- __ or_(reg, kScratchRegister);
+ __ PopInt64AsTwoSmis(reg);
}
}
@@ -233,6 +221,15 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, rax.bit(), 0, false);
+}
+
+
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Register state for IC call call (from ic-x64.cc)
// ----------- S t a t e -------------
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index a3fe8f9cf..bf11e0860 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "codegen.h"
#include "deoptimizer.h"
@@ -42,23 +42,11 @@ const int Deoptimizer::table_entry_size_ = 10;
int Deoptimizer::patch_size() {
- return Assembler::kCallInstructionLength;
+ return Assembler::kCallSequenceLength;
}
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- HandleScope scope;
- AssertNoAllocation no_allocation;
-
- if (!function->IsOptimized()) return;
-
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
- // Get the optimized code.
- Code* code = function->code();
-
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
@@ -69,7 +57,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// before the safepoint table (space was allocated there when the Code
// object was created, if necessary).
- Address instruction_start = function->code()->instruction_start();
+ Address instruction_start = code->instruction_start();
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
@@ -81,8 +69,9 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
Address call_address = instruction_start + deopt_data->Pc(i)->value();
// There is room enough to write a long call instruction because we pad
// LLazyBailout instructions with nops if necessary.
- CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
- patcher.masm()->Call(GetDeoptimizationEntry(i, LAZY), RelocInfo::NONE);
+ CodePatcher patcher(call_address, Assembler::kCallSequenceLength);
+ patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY),
+ RelocInfo::NONE64);
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
@@ -90,782 +79,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
prev_call_address = call_address;
#endif
}
-
- Isolate* isolate = code->GetIsolate();
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
- }
-}
-
-
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x1f;
-static const byte kJaeInstruction = 0x73;
-static const byte kJaeOffset = 0x07;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(check_code->entry(),
- Assembler::target_address_at(call_target_address));
- // The stack check code matches the pattern:
- //
- // cmp rsp, <limit>
- // jae ok
- // call <stack guard>
- // test rax, <loop nesting depth>
- // ok: ...
- //
- // We will patch away the branch so the code is:
- //
- // cmp rsp, <limit> ;; Not changed
- // nop
- // nop
- // call <on-stack replacment>
- // test rax, <loop nesting depth>
- // ok:
- //
- if (FLAG_count_based_interrupts) {
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- } else {
- ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
- }
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- *(call_target_address - 3) = kNopByteOne;
- *(call_target_address - 2) = kNopByteTwo;
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT(replacement_code->entry() ==
- Assembler::target_address_at(call_target_address));
- // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
- // restore the conditional branch.
- ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (FLAG_count_based_interrupts) {
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- } else {
- *(call_target_address - 3) = kJaeInstruction;
- *(call_target_address - 2) = kJaeOffset;
- }
- Assembler::set_target_address_at(call_target_address,
- check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, check_code);
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
- // TODO(kasperl): This should not be the bailout_id_. It should be
- // the ast id. Confusing.
- ASSERT(bailout_id_ == ast_id);
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [rsp + %d] <- 0x%08" V8PRIxPTR " ; [rsp + %d] "
- "(fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
- output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- intptr_t pc = reinterpret_cast<intptr_t>(
- optimized_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation =
- function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
- }
-}
-
-
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index) {
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
- // Arguments adaptor can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- intptr_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // A marker value is used in place of the context.
- output_offset -= kPointerSize;
- intptr_t context = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- output_frame->SetFrameSlot(output_offset, context);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; context (adaptor sentinel)\n",
- top_address + output_offset, output_offset, context);
- }
-
- // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- ASSERT(0 == output_offset);
-
- Builtins* builtins = isolate_->builtins();
- Code* adaptor_trampoline =
- builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
- intptr_t pc_value = reinterpret_cast<intptr_t>(
- adaptor_trampoline->instruction_start() +
- isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
- output_frame->SetPc(pc_value);
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = 7 * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
- // Construct stub can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- intptr_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; function (construct sentinel)\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The output frame reflects a JSConstructStubGeneric frame.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(construct_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; allocated receiver\n",
- top_address + output_offset, output_offset, value);
- }
-
- ASSERT(0 == output_offset);
-
- intptr_t pc = reinterpret_cast<intptr_t>(
- construct_stub->instruction_start() +
- isolate_->heap()->construct_stub_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
- bool is_setter_stub_frame) {
- JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
- // The receiver (and the implicit return value, if any) are expected in
- // registers by the LoadIC/StoreIC, so they don't belong to the output stack
- // frame. This means that we have to use a height of 0.
- unsigned height = 0;
- unsigned height_in_bytes = height * kPointerSize;
- const char* kind = is_setter_stub_frame ? "setter" : "getter";
- if (FLAG_trace_deopt) {
- PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
- }
-
- // We need 1 stack entry for the return address + 4 stack entries from
- // StackFrame::INTERNAL (FP, context, frame type, code object, see
- // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
- // entry for the implicit return value, see
- // StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0);
- unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, accessor);
- output_frame->SetFrameType(StackFrame::INTERNAL);
-
- // A frame for an accessor stub can not be the topmost or bottommost one.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous frame's top and
- // this frame's size.
- intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- unsigned output_offset = output_frame_size;
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; function (%s sentinel)\n",
- top_address + output_offset, output_offset, value, kind);
- }
-
- // Get Code object from accessor stub.
- output_offset -= kPointerSize;
- Builtins::Name name = is_setter_stub_frame ?
- Builtins::kStoreIC_Setter_ForDeopt :
- Builtins::kLoadIC_Getter_ForDeopt;
- Code* accessor_stub = isolate_->builtins()->builtin(name);
- value = reinterpret_cast<intptr_t>(accessor_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Skip receiver.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
-
- if (is_setter_stub_frame) {
- // The implicit return value was part of the artificial setter stub
- // environment.
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- ASSERT(0 == output_offset);
-
- Smi* offset = is_setter_stub_frame ?
- isolate_->heap()->setter_stub_deopt_pc_offset() :
- isolate_->heap()->getter_stub_deopt_pc_offset();
- intptr_t pc = reinterpret_cast<intptr_t>(
- accessor_stub->instruction_start() + offset->value());
- output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- intptr_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(rsi.code(), value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Code* continuation = (bailout_type_ == EAGER)
- ? isolate_->builtins()->builtin(Builtins::kNotifyDeoptimized)
- : isolate_->builtins()->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
- }
}
@@ -878,7 +91,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -889,6 +102,30 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ intptr_t handler =
+ reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
+ int params = descriptor->environment_length();
+ output_frame->SetRegister(rax.code(), params);
+ output_frame->SetRegister(rbx.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ // There is no dynamic alignment padding on x64 in the input frame.
+ return false;
+}
+
+
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
@@ -898,10 +135,10 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::kNumAllocatableRegisters;
+ XMMRegister::NumAllocatableRegisters();
__ subq(rsp, Immediate(kDoubleRegsSize));
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ movsd(Operand(rsp, offset), xmm_reg);
@@ -917,39 +154,19 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
kDoubleRegsSize;
- // When calling new_deoptimizer_function we need to pass the last argument
- // on the stack on windows and in r8 on linux. The remaining arguments are
- // all passed in registers (different ones on linux and windows though).
-
-#ifdef _WIN64
- Register arg4 = r9;
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg4 = rcx;
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
-
// We use this to keep the value of the fifth argument temporarily.
// Unfortunately we can't store it directly in r8 (used for passing
// this on linux), since it is another parameter passing register on windows.
Register arg5 = r11;
// Get the bailout id from the stack.
- __ movq(arg3, Operand(rsp, kSavedRegistersAreaSize));
+ __ movq(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize));
- // Get the address of the location in the code object if possible
+ // Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
- if (type() == EAGER) {
- __ Set(arg4, 0);
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- } else {
- __ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
- }
+ __ movq(arg_reg_4,
+ Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
__ subq(arg5, rbp);
__ neg(arg5);
@@ -957,26 +174,23 @@ void Deoptimizer::EntryGenerator::Generate() {
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(arg1, rax);
- __ Set(arg2, type());
+ __ movq(arg_reg_1, rax);
+ __ Set(arg_reg_2, type());
// Args 3 and 4 are already in the right registers.
// On windows put the arguments on the stack (PrepareCallCFunction
// has created space for this). On linux pass the arguments in r8 and r9.
#ifdef _WIN64
__ movq(Operand(rsp, 4 * kPointerSize), arg5);
- __ LoadAddress(arg5, ExternalReference::isolate_address());
+ __ LoadAddress(arg5, ExternalReference::isolate_address(isolate()));
__ movq(Operand(rsp, 5 * kPointerSize), arg5);
#else
__ movq(r8, arg5);
- __ LoadAddress(r9, ExternalReference::isolate_address());
+ __ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#endif
- Isolate* isolate = masm()->isolate();
-
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ { AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
}
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
@@ -990,17 +204,13 @@ void Deoptimizer::EntryGenerator::Generate() {
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
__ pop(Operand(rbx, dst_offset));
}
- // Remove the bailout id from the stack.
- if (type() == EAGER) {
- __ addq(rsp, Immediate(kPointerSize));
- } else {
- __ addq(rsp, Immediate(2 * kPointerSize));
- }
+ // Remove the bailout id and return address from the stack.
+ __ addq(rsp, Immediate(2 * kPointerSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
@@ -1011,58 +221,61 @@ void Deoptimizer::EntryGenerator::Generate() {
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ Label pop_loop_header;
+ __ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(rdx, 0));
__ addq(rdx, Immediate(sizeof(intptr_t)));
+ __ bind(&pop_loop_header);
__ cmpq(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
__ push(rax);
__ PrepareCallCFunction(2);
- __ movq(arg1, rax);
- __ LoadAddress(arg2, ExternalReference::isolate_address());
+ __ movq(arg_reg_1, rax);
+ __ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate()));
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 2);
+ ExternalReference::compute_output_frames_function(isolate()), 2);
}
__ pop(rax);
// Replace the current frame with the output frames.
- Label outer_push_loop, inner_push_loop;
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movq(rax, Operand(rax, Deoptimizer::output_offset()));
- __ lea(rdx, Operand(rax, rdx, times_8, 0));
+ __ lea(rdx, Operand(rax, rdx, times_pointer_size, 0));
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
__ movq(rbx, Operand(rax, 0));
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ subq(rcx, Immediate(sizeof(intptr_t)));
__ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+ __ bind(&inner_loop_header);
__ testq(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addq(rax, Immediate(kPointerSize));
+ __ bind(&outer_loop_header);
__ cmpq(rax, rdx);
__ j(below, &outer_push_loop);
- // In case of OSR, we have to restore the XMM registers.
- if (type() == OSR) {
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(rbx, src_offset));
- }
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ movsd(xmm_reg, Operand(rbx, src_offset));
}
// Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ push(Operand(rbx, FrameDescription::state_offset()));
- }
+ __ push(Operand(rbx, FrameDescription::state_offset()));
__ push(Operand(rbx, FrameDescription::pc_offset()));
__ push(Operand(rbx, FrameDescription::continuation_offset()));
@@ -1106,6 +319,17 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&done);
}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
#undef __
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index c8606c40b..7735b552f 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -31,7 +31,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "disasm.h"
#include "lazy-instance.h"
@@ -49,6 +49,7 @@ enum OperandType {
BYTE_OPER_REG_OP_ORDER = OPER_REG_OP_ORDER | BYTE_SIZE_OPERAND_FLAG
};
+
//------------------------------------------------------------------
// Tables
//------------------------------------------------------------------
@@ -92,7 +93,7 @@ static const ByteMnemonic two_operands_instr[] = {
{ 0x39, OPER_REG_OP_ORDER, "cmp" },
{ 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" },
{ 0x3B, REG_OPER_OP_ORDER, "cmp" },
- { 0x63, REG_OPER_OP_ORDER, "movsxlq" },
+ { 0x63, REG_OPER_OP_ORDER, "movsxl" },
{ 0x84, BYTE_REG_OPER_OP_ORDER, "test" },
{ 0x85, REG_OPER_OP_ORDER, "test" },
{ 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" },
@@ -293,6 +294,7 @@ static InstructionDesc cmov_instructions[16] = {
{"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}
};
+
//------------------------------------------------------------------------------
// DisassemblerX64 implementation.
@@ -301,6 +303,7 @@ enum UnimplementedOpcodeAction {
ABORT_ON_UNIMPLEMENTED_OPCODE
};
+
// A new DisassemblerX64 object is created to disassemble each instruction.
// The object can only disassemble a single instruction.
class DisassemblerX64 {
@@ -329,10 +332,10 @@ class DisassemblerX64 {
private:
enum OperandSize {
- BYTE_SIZE = 0,
- WORD_SIZE = 1,
- DOUBLEWORD_SIZE = 2,
- QUADWORD_SIZE = 3
+ OPERAND_BYTE_SIZE = 0,
+ OPERAND_WORD_SIZE = 1,
+ OPERAND_DOUBLEWORD_SIZE = 2,
+ OPERAND_QUADWORD_SIZE = 3
};
const NameConverter& converter_;
@@ -366,10 +369,10 @@ class DisassemblerX64 {
bool rex_w() { return (rex_ & 0x08) != 0; }
OperandSize operand_size() {
- if (byte_size_operand_) return BYTE_SIZE;
- if (rex_w()) return QUADWORD_SIZE;
- if (operand_size_ != 0) return WORD_SIZE;
- return DOUBLEWORD_SIZE;
+ if (byte_size_operand_) return OPERAND_BYTE_SIZE;
+ if (rex_w()) return OPERAND_QUADWORD_SIZE;
+ if (operand_size_ != 0) return OPERAND_WORD_SIZE;
+ return OPERAND_DOUBLEWORD_SIZE;
}
char operand_size_code() {
@@ -559,19 +562,19 @@ int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
int64_t value;
int count;
switch (size) {
- case BYTE_SIZE:
+ case OPERAND_BYTE_SIZE:
value = *data;
count = 1;
break;
- case WORD_SIZE:
+ case OPERAND_WORD_SIZE:
value = *reinterpret_cast<int16_t*>(data);
count = 2;
break;
- case DOUBLEWORD_SIZE:
+ case OPERAND_DOUBLEWORD_SIZE:
value = *reinterpret_cast<uint32_t*>(data);
count = 4;
break;
- case QUADWORD_SIZE:
+ case OPERAND_QUADWORD_SIZE:
value = *reinterpret_cast<int32_t*>(data);
count = 4;
break;
@@ -679,7 +682,8 @@ int DisassemblerX64::PrintImmediateOp(byte* data) {
AppendToBuffer("%s%c ", mnem, operand_size_code());
int count = PrintRightOperand(data + 1);
AppendToBuffer(",0x");
- OperandSize immediate_size = byte_size_immediate ? BYTE_SIZE : operand_size();
+ OperandSize immediate_size =
+ byte_size_immediate ? OPERAND_BYTE_SIZE : operand_size();
count += PrintImmediate(data + 1 + count, immediate_size);
return 1 + count;
}
@@ -1032,14 +1036,14 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("extractps "); // reg/m32, xmm, imm8
current += PrintRightOperand(current);
- AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3);
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
current += 1;
} else if (third_byte == 0x0b) {
get_modrm(*current, &mod, &regop, &rm);
// roundsd xmm, xmm/m64, imm8
- AppendToBuffer("roundsd %s, ", NameOfCPURegister(regop));
- current += PrintRightOperand(current);
- AppendToBuffer(", %d", (*current) & 3);
+ AppendToBuffer("roundsd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", (*current) & 3);
current += 1;
} else {
UnimplementedInstruction();
@@ -1058,12 +1062,12 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} // else no immediate displacement.
AppendToBuffer("nop");
} else if (opcode == 0x28) {
- AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
+ AppendToBuffer("movapd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x29) {
AppendToBuffer("movapd ");
current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x6E) {
AppendToBuffer("mov%c %s,",
rex_w() ? 'q' : 'd',
@@ -1077,15 +1081,15 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer("mov%c ",
rex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x7F) {
AppendToBuffer("movdqa ");
current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0xD6) {
AppendToBuffer("movq ");
current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x50) {
AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
@@ -1150,6 +1154,25 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0xC2) {
+ // Intel manual 2A, Table 3-18.
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ const char* const pseudo_op[] = {
+ "cmpeqsd",
+ "cmpltsd",
+ "cmplesd",
+ "cmpunordsd",
+ "cmpneqsd",
+ "cmpnltsd",
+ "cmpnlesd",
+ "cmpordsd"
+ };
+ AppendToBuffer("%s %s,%s",
+ pseudo_op[current[1]],
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ current += 2;
} else {
UnimplementedInstruction();
}
@@ -1191,7 +1214,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x7E) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movq %s, ", NameOfXMMRegister(regop));
+ AppendToBuffer("movq %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else {
UnimplementedInstruction();
@@ -1215,7 +1238,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// movaps xmm, xmm/m128
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movaps %s, ", NameOfXMMRegister(regop));
+ AppendToBuffer("movaps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x29) {
@@ -1224,10 +1247,10 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("movaps ");
current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (opcode == 0xA2 || opcode == 0x31) {
- // RDTSC or CPUID
+ } else if (opcode == 0xA2) {
+ // CPUID
AppendToBuffer("%s", mnemonic);
} else if ((opcode & 0xF0) == 0x40) {
@@ -1237,11 +1260,25 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
byte_size_operand_ = idesc.byte_size_operation;
current += PrintOperands(idesc.mnem, idesc.op_order_, current);
+ } else if (opcode == 0x54) {
+ // xorps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("andps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
} else if (opcode == 0x57) {
// xorps xmm, xmm/m128
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop));
+ AppendToBuffer("xorps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x50) {
+ // movmskps reg, xmm
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("movmskps %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if ((opcode & 0xF0) == 0x80) {
@@ -1284,14 +1321,14 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "nop";
case 0x2A: // F2/F3 prefix.
return "cvtsi2s";
- case 0x31:
- return "rdtsc";
case 0x51: // F2 prefix.
return "sqrtsd";
case 0x58: // F2 prefix.
return "addsd";
case 0x59: // F2 prefix.
return "mulsd";
+ case 0x5A: // F2 prefix.
+ return "cvtsd2ss";
case 0x5C: // F2 prefix.
return "subsd";
case 0x5E: // F2 prefix.
@@ -1388,15 +1425,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case MOVE_REG_INSTR: {
byte* addr = NULL;
switch (operand_size()) {
- case WORD_SIZE:
+ case OPERAND_WORD_SIZE:
addr = reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
data += 3;
break;
- case DOUBLEWORD_SIZE:
+ case OPERAND_DOUBLEWORD_SIZE:
addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
data += 5;
break;
- case QUADWORD_SIZE:
+ case OPERAND_QUADWORD_SIZE:
addr = reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
data += 9;
break;
@@ -1420,7 +1457,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case SHORT_IMMEDIATE_INSTR: {
byte* addr =
reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
- AppendToBuffer("%s rax, %s", idesc.mnem, NameOfAddress(addr));
+ AppendToBuffer("%s rax,%s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
}
@@ -1569,7 +1606,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (reg == 0) {
AppendToBuffer("nop"); // Common name for xchg rax,rax.
} else {
- AppendToBuffer("xchg%c rax, %s",
+ AppendToBuffer("xchg%c rax,%s",
operand_size_code(),
NameOfCPURegister(reg));
}
@@ -1598,14 +1635,14 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
bool is_32bit = (opcode >= 0xB8);
int reg = (opcode & 0x7) | (rex_b() ? 8 : 0);
if (is_32bit) {
- AppendToBuffer("mov%c %s, ",
+ AppendToBuffer("mov%c %s,",
operand_size_code(),
NameOfCPURegister(reg));
- data += PrintImmediate(data, DOUBLEWORD_SIZE);
+ data += PrintImmediate(data, OPERAND_DOUBLEWORD_SIZE);
} else {
- AppendToBuffer("movb %s, ",
+ AppendToBuffer("movb %s,",
NameOfByteCPURegister(reg));
- data += PrintImmediate(data, BYTE_SIZE);
+ data += PrintImmediate(data, OPERAND_BYTE_SIZE);
}
break;
}
@@ -1634,7 +1671,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0xA1: // Fall through.
case 0xA3:
switch (operand_size()) {
- case DOUBLEWORD_SIZE: {
+ case OPERAND_DOUBLEWORD_SIZE: {
const char* memory_location = NameOfAddress(
reinterpret_cast<byte*>(
*reinterpret_cast<int32_t*>(data + 1)));
@@ -1646,7 +1683,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 5;
break;
}
- case QUADWORD_SIZE: {
+ case OPERAND_QUADWORD_SIZE: {
// New x64 instruction mov rax,(imm_64).
const char* memory_location = NameOfAddress(
*reinterpret_cast<byte**>(data + 1));
@@ -1672,15 +1709,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0xA9: {
int64_t value = 0;
switch (operand_size()) {
- case WORD_SIZE:
+ case OPERAND_WORD_SIZE:
value = *reinterpret_cast<uint16_t*>(data + 1);
data += 3;
break;
- case DOUBLEWORD_SIZE:
+ case OPERAND_DOUBLEWORD_SIZE:
value = *reinterpret_cast<uint32_t*>(data + 1);
data += 5;
break;
- case QUADWORD_SIZE:
+ case OPERAND_QUADWORD_SIZE:
value = *reinterpret_cast<int32_t*>(data + 1);
data += 5;
break;
@@ -1724,6 +1761,11 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += F6F7Instruction(data);
break;
+ case 0x3C:
+ AppendToBuffer("cmp al,0x%x", *reinterpret_cast<int8_t*>(data + 1));
+ data +=2;
+ break;
+
default:
UnimplementedInstruction();
data += 1;
@@ -1751,6 +1793,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
return instr_len;
}
+
//------------------------------------------------------------------------------
@@ -1810,6 +1853,7 @@ const char* NameConverter::NameInCode(byte* addr) const {
return "";
}
+
//------------------------------------------------------------------------------
Disassembler::Disassembler(const NameConverter& converter)
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index 6c58bc9e0..5cc27a6e1 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -27,17 +27,23 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
-#include "frames-inl.h"
+#include "assembler.h"
+#include "assembler-x64.h"
+#include "assembler-x64-inl.h"
+#include "frames.h"
namespace v8 {
namespace internal {
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
+Register JavaScriptFrame::fp_register() { return rbp; }
+Register JavaScriptFrame::context_register() { return rsi; }
+
+
+Register StubFailureTrampolineFrame::fp_register() { return rbp; }
+Register StubFailureTrampolineFrame::context_register() { return rsi; }
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 3e3d63d62..fb17964ad 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -48,22 +48,15 @@ const int kNumSafepointRegisters = 16;
// ----------------------------------------------------
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
class EntryFrameConstants : public AllStatic {
public:
#ifdef _WIN64
- static const int kCallerFPOffset = -10 * kPointerSize;
+ static const int kCalleeSaveXMMRegisters = 10;
+ static const int kXMMRegisterSize = 16;
+ static const int kXMMRegistersBlockSize =
+ kXMMRegisterSize * kCalleeSaveXMMRegisters;
+ static const int kCallerFPOffset =
+ -10 * kPointerSize - kXMMRegistersBlockSize;
#else
static const int kCallerFPOffset = -8 * kPointerSize;
#endif
@@ -77,25 +70,11 @@ class ExitFrameConstants : public AllStatic {
static const int kSPOffset = -1 * kPointerSize;
static const int kCallerFPOffset = +0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
+ static const int kCallerPCOffset = kFPOnStackSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
- static const int kCallerSPDisplacement = +2 * kPointerSize;
-};
-
-
-class StandardFrameConstants : public AllStatic {
- public:
- // Fixed part of the frame consists of return address, caller fp,
- // context and function.
- static const int kFixedFrameSize = 4 * kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
+ static const int kCallerSPDisplacement = kCallerPCOffset + kPCOnStackSize;
};
@@ -103,7 +82,7 @@ class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
+ static const int kLastParameterOffset = kFPOnStackSize + kPCOnStackSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
@@ -114,14 +93,30 @@ class JavaScriptFrameConstants : public AllStatic {
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
+ // FP-relative.
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kImplicitReceiverOffset = -5 * kPointerSize;
+ static const int kConstructorOffset = kMinInt;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
+};
+
+
class InternalFrameConstants : public AllStatic {
public:
+ // FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};
@@ -131,6 +126,12 @@ inline Object* JavaScriptFrame::function_slot_object() const {
return Memory::Object_at(fp() + offset);
}
+
+inline void StackHandler::SetFp(Address slot, Address fp) {
+ Memory::Address_at(slot) = fp;
+}
+
+
} } // namespace v8::internal
#endif // V8_X64_FRAMES_X64_H_
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 475fb9de3..02ba67b90 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "code-stubs.h"
#include "codegen.h"
@@ -118,8 +118,8 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -127,7 +127,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
@@ -140,10 +140,9 @@ void FullCodeGenerator::Generate() {
Label ok;
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
+ StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+ __ movq(args.GetReceiverOperand(), kScratchRegister);
__ bind(&ok);
}
@@ -152,13 +151,14 @@ void FullCodeGenerator::Generate() {
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
+ info->set_prologue_offset(masm_->pc_offset());
+ __ Prologue(BUILD_FUNCTION_FRAME);
+ info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
} else if (locals_count > 1) {
@@ -276,8 +276,7 @@ void FullCodeGenerator::Generate() {
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -304,7 +303,7 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
- __ SmiAddConstant(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ __ SmiAddConstant(FieldOperand(rbx, Cell::kValueOffset),
Smi::FromInt(-delta));
}
@@ -318,51 +317,33 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
__ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
__ movq(kScratchRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
- RelocInfo::NONE);
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- kScratchRegister);
+ RelocInfo::NONE64);
+ __ movq(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Stack check");
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ __ j(positive, &ok, Label::kNear);
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
-
- // Loop stack checks can be patched to perform on-stack replacement. In
- // order to decide whether or not to perform OSR we embed the loop depth
- // in a test instruction after the call so we can extract it from the OSR
- // builtin.
- ASSERT(loop_depth() > 0);
- __ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
+ RecordBackEdge(stmt->OsrEntryId());
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -391,7 +372,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ Max(1, distance / kCodeSizeMultiplier));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -401,8 +382,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(rax);
EmitProfilingCounterReset();
@@ -419,6 +400,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// patch with the code required by the debugger.
__ movq(rsp, rbp);
__ pop(rbp);
+ int no_frame_start = masm_->pc_offset();
int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, rcx);
@@ -436,6 +418,7 @@ void FullCodeGenerator::EmitReturnSequence() {
ASSERT(Assembler::kJSReturnSequenceLength <=
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -662,9 +645,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- ToBooleanStub stub(result_register());
- __ push(result_register());
- __ CallStub(&stub, condition->test_id());
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
__ testq(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@@ -692,7 +674,8 @@ MemOperand FullCodeGenerator::StackOperand(Variable* var) {
int offset = -var->index() * kPointerSize;
// Adjust by a (parameter or local) base offset.
if (var->IsParameter()) {
- offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+ offset += kFPOnStackSize + kPCOnStackSize +
+ (info_->scope()->num_parameters() - 1) * kPointerSize;
} else {
offset += JavaScriptFrameConstants::kLocal0Offset;
}
@@ -759,16 +742,15 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current function
- // context.
+ // The variable in the declaration always resides in the current context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
__ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
- __ Check(not_equal, "Declaration in with context.");
+ __ Check(not_equal, kDeclarationInWithContext);
__ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
- __ Check(not_equal, "Declaration in catch context.");
+ __ Check(not_equal, kDeclarationInCatchContext);
}
}
@@ -891,33 +873,32 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- Handle<JSModule> instance = declaration->module()->interface()->Instance();
- ASSERT(!instance.is_null());
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name(), zone());
- globals_->Add(instance, zone());
- Visit(declaration->module());
- break;
- }
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ Move(ContextOperand(rsi, variable->index()), instance);
- Visit(declaration->module());
- break;
- }
+ // Load instance object.
+ __ LoadContext(rax, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ movq(rax, ContextOperand(rax, variable->interface()->Index()));
+ __ movq(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
+ // Assign it.
+ __ movq(ContextOperand(rsi, variable->index()), rax);
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(rsi,
+ Context::SlotOffset(variable->index()),
+ rax,
+ rcx,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
}
@@ -959,6 +940,14 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1009,7 +998,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1051,9 +1040,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, &exit);
@@ -1134,13 +1122,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+ Handle<Cell> cell = isolate()->factory()->NewCell(
+ Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(rbx, cell);
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ __ Move(rbx, cell);
+ __ Move(FieldOperand(rbx, Cell::kValueOffset),
Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
__ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
@@ -1214,7 +1201,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(loop_statement.continue_label());
__ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ jmp(&loop);
// Remove the pointers stored on the stack.
@@ -1228,6 +1215,64 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(rax, &convert);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &done_convert);
+ __ bind(&convert);
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
@@ -1241,8 +1286,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
- __ Push(info);
+ FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ __ Move(rbx, info);
__ CallStub(&stub);
} else {
__ push(rsi);
@@ -1368,9 +1413,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST ||
- local->mode() == CONST_HARMONY ||
- local->mode() == LET) {
+ if (local->mode() == LET ||
+ local->mode() == CONST ||
+ local->mode() == CONST_HARMONY) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, done);
if (local->mode() == CONST) {
@@ -1515,7 +1560,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
@@ -1553,24 +1598,29 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_properties);
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
flags |= expr->has_function()
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
- __ Push(Smi::FromInt(flags));
int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (flags != ObjectLiteral::kFastElements ||
+ if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(expr->literal_index()));
+ __ Push(constant_properties);
+ __ Push(Smi::FromInt(flags));
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ Move(rbx, Smi::FromInt(expr->literal_index()));
+ __ Move(rcx, constant_properties);
+ __ Move(rdx, Smi::FromInt(flags));
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
}
@@ -1602,10 +1652,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
+ if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- __ Move(rcx, key->handle());
+ __ Move(rcx, key->value());
__ movq(rdx, Operand(rsp, 0));
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -1617,8 +1667,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
}
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
__ push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(key);
VisitForStackValue(value);
@@ -1629,6 +1677,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Drop(3);
}
break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ push(Operand(rsp, 0)); // Duplicate receiver.
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ CallRuntime(Runtime::kSetPrototype, 2);
+ } else {
+ __ Drop(2);
+ }
+ break;
case ObjectLiteral::Property::GETTER:
accessor_table.lookup(key)->second->getter = value;
break;
@@ -1679,33 +1736,54 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_elements);
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+ __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Move(rbx, Smi::FromInt(expr->literal_index()));
+ __ Move(rcx, constant_elements);
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ DONT_TRACK_ALLOCATION_SITE,
length);
__ CallStub(&stub);
} else if (expr->depth() > 1) {
+ __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(expr->literal_index()));
+ __ Push(constant_elements);
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ } else if (Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(expr->literal_index()));
+ __ Push(constant_elements);
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
- FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
- ? FastCloneShallowArrayStub::CLONE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
+ if (has_constant_fast_elements) {
+ mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Move(rbx, Smi::FromInt(expr->literal_index()));
+ __ Move(rcx, constant_elements);
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
__ CallStub(&stub);
}
@@ -1717,13 +1795,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(rax);
+ __ push(rax); // array literal
+ __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
@@ -1732,7 +1808,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
+ __ movq(rbx, Operand(rsp, kPointerSize)); // Copy of array literal.
__ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
// Store the subexpression value in the array's elements.
__ movq(FieldOperand(rbx, offset), result_register());
@@ -1743,10 +1819,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
INLINE_SMI_CHECK);
} else {
// Store the subexpression value in the array's elements.
- __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
- __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
__ Move(rcx, Smi::FromInt(i));
- __ Move(rdx, Smi::FromInt(expr->literal_index()));
StoreArrayLiteralElementStub stub;
__ CallStub(&stub);
}
@@ -1755,6 +1828,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
+ __ addq(rsp, Immediate(kPointerSize)); // literal index
context()->PlugTOS();
} else {
context()->Plug(rax);
@@ -1875,10 +1949,291 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ switch (expr->yield_kind()) {
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+
+ __ bind(&continuation);
+ __ jmp(&resume);
+
+ __ bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
+ Smi::FromInt(continuation.pos()));
+ __ movq(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+ __ movq(rcx, rsi);
+ __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
+ kDontSaveFPRegs);
+ __ lea(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
+ __ cmpq(rsp, rbx);
+ __ j(equal, &post_runtime);
+ __ push(rax); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ movq(context_register(),
+ Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+
+ __ pop(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::FINAL: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ Move(FieldOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset),
+ Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
+ // Initial send value is undefined.
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ jmp(&l_next);
+
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+ __ bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ LoadRoot(rcx, Heap::kthrow_stringRootIndex); // "throw"
+ __ push(rcx);
+ __ push(Operand(rsp, 2 * kPointerSize)); // iter
+ __ push(rax); // exception
+ __ jmp(&l_call);
+
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
+ __ bind(&l_try);
+ __ pop(rax); // result
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ push(rax); // result
+ __ jmp(&l_suspend);
+ __ bind(&l_continuation);
+ __ jmp(&l_resume);
+ __ bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ movq(rax, Operand(rsp, generator_object_depth));
+ __ push(rax); // g
+ ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+ __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
+ Smi::FromInt(l_continuation.pos()));
+ __ movq(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+ __ movq(rcx, rsi);
+ __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
+ kDontSaveFPRegs);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ movq(context_register(),
+ Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ pop(rax); // result
+ EmitReturnSequence();
+ __ bind(&l_resume); // received in rax
+ __ PopTryHandler();
+
+ // receiver = iter; f = 'next'; arg = received;
+ __ bind(&l_next);
+ __ LoadRoot(rcx, Heap::knext_stringRootIndex); // "next"
+ __ push(rcx);
+ __ push(Operand(rsp, 2 * kPointerSize)); // iter
+ __ push(rax); // received
+
+ // result = receiver[f](arg);
+ __ bind(&l_call);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
+ CallIC(ic);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The key is still on the stack; drop it.
+
+ // if (!result.done) goto l_try;
+ __ bind(&l_loop);
+ __ push(rax); // save result
+ __ LoadRoot(rcx, Heap::kdone_stringRootIndex); // "done"
+ Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(done_ic); // result.done in rax
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ testq(result_register(), result_register());
+ __ j(zero, &l_try);
+
+ // result.value
+ __ pop(rax); // result
+ __ LoadRoot(rcx, Heap::kvalue_stringRootIndex); // "value"
+ Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(value_ic); // result.value in rax
+ context()->DropAndPlug(2, rax); // drop iter and g
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
+ Expression *value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ // The value stays in rax, and is ultimately read by the resumed generator, as
+ // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. rbx
+ // will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ __ pop(rbx);
+
+ // Check generator state.
+ Label wrong_state, done;
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
+ __ SmiCompare(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
+ Smi::FromInt(0));
+ __ j(less_equal, &wrong_state);
+
+ // Load suspended function and context.
+ __ movq(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
+ __ movq(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+
+ // Push receiver.
+ __ push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
+
+ // Push holes for arguments to generator function.
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movsxlq(rdx,
+ FieldOperand(rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
+ Label push_argument_holes, push_frame;
+ __ bind(&push_argument_holes);
+ __ subq(rdx, Immediate(1));
+ __ j(carry, &push_frame);
+ __ push(rcx);
+ __ jmp(&push_argument_holes);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame;
+ __ bind(&push_frame);
+ __ call(&resume_frame);
+ __ jmp(&done);
+ __ bind(&resume_frame);
+ __ push(rbp); // Caller's frame pointer.
+ __ movq(rbp, rsp);
+ __ push(rsi); // Callee's context.
+ __ push(rdi); // Callee's JS Function.
+
+ // Load the operand stack size.
+ __ movq(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
+ __ movq(rdx, FieldOperand(rdx, FixedArray::kLengthOffset));
+ __ SmiToInteger32(rdx, rdx);
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ cmpq(rdx, Immediate(0));
+ __ j(not_zero, &slow_resume);
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ SmiToInteger64(rcx,
+ FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
+ __ addq(rdx, rcx);
+ __ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
+ Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ jmp(rdx);
+ __ bind(&slow_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ Label push_operand_holes, call_resume;
+ __ bind(&push_operand_holes);
+ __ subq(rdx, Immediate(1));
+ __ j(carry, &call_resume);
+ __ push(rcx);
+ __ jmp(&push_operand_holes);
+ __ bind(&call_resume);
+ __ push(rbx);
+ __ push(result_register());
+ __ Push(Smi::FromInt(resume_mode));
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ // Not reached: the runtime call returns elsewhere.
+ __ Abort(kGeneratorFailedToResume);
+
+ // Throw error if we attempt to operate on a running generator.
+ __ bind(&wrong_state);
+ __ push(rbx);
+ __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+
+ __ bind(&done);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ __ Allocate(map->instance_size(), rax, rcx, rdx, &gc_required, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ movq(context_register(),
+ Operand(rbp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&allocated);
+ __ Move(rbx, map);
+ __ pop(rcx);
+ __ Move(rdx, isolate()->factory()->ToBoolean(done));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), rbx);
+ __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ Move(FieldOperand(rax, JSObject::kElementsOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ movq(FieldOperand(rax, JSGeneratorObject::kResultValuePropertyOffset),
+ rcx);
+ __ movq(FieldOperand(rax, JSGeneratorObject::kResultDonePropertyOffset),
+ rdx);
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(rax, JSGeneratorObject::kResultValuePropertyOffset,
+ rcx, rdx, kDontSaveFPRegs);
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
- __ Move(rcx, key->handle());
+ __ Move(rcx, key->value());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -1909,7 +2264,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ movq(rax, rcx);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -1959,7 +2314,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(rdx);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(rax);
@@ -1967,7 +2322,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten to have a 'throw
+ // Invalid left-hand sides are rewritten by the parser to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
VisitForEffect(expr);
@@ -1997,7 +2352,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->obj());
__ movq(rdx, rax);
__ pop(rax); // Restore value.
- __ Move(rcx, prop->key()->AsLiteral()->handle());
+ __ Move(rcx, prop->key()->AsLiteral()->value());
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2090,7 +2445,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Check for an uninitialized let binding.
__ movq(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ Check(equal, "Let binding re-initialization.");
+ __ Check(equal, kLetBindingReInitialization);
}
// Perform the assignment.
__ movq(location, rax);
@@ -2120,7 +2475,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ Move(rcx, prop->key()->AsLiteral()->handle());
+ __ Move(rcx, prop->key()->AsLiteral()->value());
__ pop(rdx);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -2250,8 +2605,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
__ Move(rbx, cell);
@@ -2275,7 +2629,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
// Push the receiver of the enclosing function and do runtime call.
- __ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize));
+ StackArgumentsAccessor args(rbp, info_->scope()->num_parameters());
+ __ push(args.GetReceiverOperand());
// Push the language mode.
__ Push(Smi::FromInt(language_mode()));
@@ -2300,7 +2655,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
@@ -2381,7 +2736,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
if (property->key()->IsPropertyName()) {
EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
+ property->key()->AsLiteral()->value(),
RelocInfo::CODE_TARGET);
} else {
EmitKeyedCallWithIC(expr, property->key());
@@ -2434,13 +2789,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code, but not in the snapshot.
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
__ Move(rbx, cell);
CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(rax);
}
@@ -2573,7 +2927,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@@ -2587,7 +2941,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(not_zero, if_true);
+ __ j(not_zero, &skip_lookup);
// Check for fast case object. Generate false result for slow case object.
__ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
@@ -2595,7 +2949,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
__ j(equal, if_false);
- // Look for valueOf symbol in the descriptor array, and indicate false if
+ // Look for valueOf string in the descriptor array, and indicate false if
// found. Since we omit an enumeration index check, if it is added via a
// transition that shares its descriptor array, this is a false positive.
Label entry, loop, done;
@@ -2605,7 +2959,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ cmpq(rcx, Immediate(0));
__ j(equal, &done);
- __ LoadInstanceDescriptors(rbx, rbx);
+ __ LoadInstanceDescriptors(rbx, r8);
// rbx: descriptor array.
// rcx: valid entries in the descriptor array.
// Calculate the end of the descriptor array.
@@ -2613,24 +2967,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
__ lea(rcx,
Operand(
- rbx, index.reg, index.scale, DescriptorArray::kFirstOffset));
+ r8, index.reg, index.scale, DescriptorArray::kFirstOffset));
// Calculate location of the first key name.
- __ addq(rbx, Immediate(DescriptorArray::kFirstOffset));
+ __ addq(r8, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
+ // internalized string "valueOf" the result is false.
__ jmp(&entry);
__ bind(&loop);
- __ movq(rdx, FieldOperand(rbx, 0));
- __ Cmp(rdx, FACTORY->value_of_symbol());
+ __ movq(rdx, FieldOperand(r8, 0));
+ __ Cmp(rdx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
- __ addq(rbx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
+ __ addq(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
- __ cmpq(rbx, rcx);
+ __ cmpq(r8, rcx);
__ j(not_equal, &loop);
__ bind(&done);
- // Reload map as register rbx was used as temporary above.
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+
+ __ bind(&skip_lookup);
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
@@ -2642,14 +3000,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
__ cmpq(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, if_false);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ jmp(if_true);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
context()->Plug(if_true, if_false);
}
@@ -2856,12 +3209,12 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ Move(rax, isolate()->factory()->function_class_symbol());
+ __ Move(rax, isolate()->factory()->function_class_string());
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ Move(rax, isolate()->factory()->Object_symbol());
+ __ Move(rax, isolate()->factory()->Object_string());
__ jmp(&done);
// Non-JS objects have class null.
@@ -2885,7 +3238,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
@@ -2915,16 +3268,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Return a random uint32 number in rax.
// The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
__ PrepareCallCFunction(1);
-#ifdef _WIN64
- __ movq(rcx,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
-
-#else
- __ movq(rdi,
+ __ movq(arg_reg_1,
ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
-#endif
+ __ movq(arg_reg_1,
+ FieldOperand(arg_reg_1, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// Convert 32 random bits in rax to 0.(32 random bits) in a double
@@ -2993,7 +3340,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -3012,7 +3359,8 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ movq(scratch, stamp);
+ Operand stamp_operand = __ ExternalOperand(stamp);
+ __ movq(scratch, stamp_operand);
__ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ movq(result, FieldOperand(object, JSDate::kValueOffset +
@@ -3021,13 +3369,8 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
-#ifdef _WIN64
- __ movq(rcx, object);
- __ movq(rdx, index, RelocInfo::NONE);
-#else
- __ movq(rdi, object);
- __ movq(rsi, index, RelocInfo::NONE);
-#endif
+ __ movq(arg_reg_1, object);
+ __ movq(arg_reg_2, index, RelocInfo::NONE64);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
@@ -3040,6 +3383,84 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ __ Check(masm()->CheckSmi(index), kNonSmiIndex);
+ __ Check(masm()->CheckSmi(value), kNonSmiValue);
+
+ __ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
+ __ Check(less, kIndexIsTooLarge);
+
+ __ SmiCompare(index, Smi::FromInt(0));
+ __ Check(greater_equal, kIndexIsNegative);
+
+ __ push(value);
+ __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ __ cmpq(value, Immediate(encoding_mask));
+ __ Check(equal, kUnexpectedStringType);
+ __ pop(value);
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = rax;
+ Register index = rbx;
+ Register value = rcx;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(value);
+ __ pop(index);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
+ __ SmiToInteger32(value, value);
+ __ SmiToInteger32(index, index);
+ __ movb(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
+ value);
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = rax;
+ Register index = rbx;
+ Register value = rcx;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(value);
+ __ pop(index);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ if (FLAG_debug_code) {
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ }
+
+ __ SmiToInteger32(value, value);
+ __ SmiToInteger32(index, index);
+ __ movw(FieldOperand(string, index, times_2, SeqTwoByteString::kHeaderSize),
+ value);
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3084,8 +3505,8 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into rax and call the stub.
+ VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
@@ -3189,7 +3610,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
__ jmp(&done);
__ bind(&need_conversion);
@@ -3213,7 +3634,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringAddStub stub(NO_STRING_ADD_FLAGS);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3340,12 +3761,12 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
+ __ Abort(kAttemptToUseUndefinedCache);
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
context()->Plug(rax);
return;
@@ -3515,7 +3936,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movq(array_length, FieldOperand(array, JSArray::kLengthOffset));
__ SmiCompare(array_length, Smi::FromInt(0));
__ j(not_zero, &non_trivial_array);
- __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(rax, Heap::kempty_stringRootIndex);
__ jmp(&return_result);
// Save the array length on the stack.
@@ -3539,7 +3960,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// scratch, string_length(int32), elements(FixedArray*).
if (generate_debug_code_) {
__ cmpq(index, array_length);
- __ Assert(below, "No empty arrays here in EmitFastAsciiArrayJoin");
+ __ Assert(below, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
__ movq(string, FieldOperand(elements,
@@ -3551,10 +3972,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag));
+ __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
__ j(not_equal, &bailout);
__ AddSmiField(string_length,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
+ FieldOperand(string, SeqOneByteString::kLengthOffset));
__ j(overflow, &bailout);
__ incl(index);
__ cmpl(index, array_length);
@@ -3590,7 +4011,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag));
+ __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
__ j(not_equal, &bailout);
// Live registers:
@@ -3601,7 +4022,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times (array_length - 1)) to string_length.
__ SmiToInteger32(scratch,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
+ FieldOperand(string, SeqOneByteString::kLengthOffset));
__ decl(index);
__ imull(scratch, index);
__ j(overflow, &bailout);
@@ -3614,10 +4035,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
__ movq(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+ __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
__ movq(string, separator_operand);
- __ SmiCompare(FieldOperand(string, SeqAsciiString::kLengthOffset),
+ __ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
Smi::FromInt(1));
__ j(equal, &one_char_separator);
__ j(greater, &long_separator);
@@ -3643,7 +4064,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
__ bind(&loop_1_condition);
@@ -3661,7 +4082,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ bind(&one_char_separator);
// Get the separator ASCII character value.
// Register "string" holds the separator.
- __ movzxbl(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ movzxbl(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
__ Set(index, 0);
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
@@ -3687,7 +4108,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
__ cmpl(index, array_length_operand);
@@ -3712,7 +4133,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(scratch,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ movq(separator_operand, string);
// Jump into the loop after the code that copies the separator, so the first
@@ -3738,7 +4159,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incq(index);
__ j(not_equal, &loop_3); // Loop while (index < 0).
@@ -3903,50 +4324,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- __ JumpIfSmi(result_register(), &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
default:
UNREACHABLE();
}
}
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- // TODO(svenpanne): Allowing format strings in Comment would be nice here...
- Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
- // UnaryOpStub expects the argument to be in the
- // accumulator register rax.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
@@ -4003,7 +4386,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call ToNumber only if operand is not a smi.
Label no_conversion;
- __ JumpIfSmi(rax, &no_conversion, Label::kNear);
+ if (ShouldInlineSmiCase(expr->op())) {
+ __ JumpIfSmi(rax, &no_conversion, Label::kNear);
+ }
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
@@ -4056,14 +4441,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
// Call stub for +1/-1.
+ __ movq(rdx, rax);
+ __ Move(rax, Smi::FromInt(1));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- if (expr->op() == Token::INC) {
- __ Move(rdx, Smi::FromInt(1));
- } else {
- __ movq(rdx, rax);
- __ Move(rax, Smi::FromInt(1));
- }
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()),
+ RelocInfo::CODE_TARGET,
+ expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4092,7 +4475,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ Move(rcx, prop->key()->AsLiteral()->handle());
+ __ Move(rcx, prop->key()->AsLiteral()->value());
__ pop(rdx);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
@@ -4181,12 +4564,12 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_symbol())) {
+ if (check->Equals(isolate()->heap()->number_string())) {
__ JumpIfSmi(rax, if_true);
__ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ } else if (check->Equals(isolate()->heap()->string_string())) {
__ JumpIfSmi(rax, if_false);
// Check for undetectable objects => false.
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
@@ -4194,16 +4577,20 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ __ JumpIfSmi(rax, if_false);
+ __ CmpObjectType(rax, SYMBOL_TYPE, rdx);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->boolean_string())) {
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
__ j(equal, if_true);
__ CompareRoot(rax, Heap::kFalseValueRootIndex);
Split(equal, if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_symbol())) {
+ check->Equals(isolate()->heap()->null_string())) {
__ CompareRoot(rax, Heap::kNullValueRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ } else if (check->Equals(isolate()->heap()->undefined_string())) {
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
__ JumpIfSmi(rax, if_false);
@@ -4212,14 +4599,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ } else if (check->Equals(isolate()->heap()->function_string())) {
__ JumpIfSmi(rax, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
__ j(equal, if_true);
__ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ } else if (check->Equals(isolate()->heap()->object_string())) {
__ JumpIfSmi(rax, if_false);
if (!FLAG_harmony_typeof) {
__ CompareRoot(rax, Heap::kNullValueRootIndex);
@@ -4281,29 +4668,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = no_condition;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cc = CompareIC::ComputeCondition(op);
__ pop(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
@@ -4320,7 +4685,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -4348,24 +4713,16 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ CompareRoot(rax, nil_value);
if (expr->op() == Token::EQ_STRICT) {
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ CompareRoot(rax, nil_value);
Split(equal, if_true, if_false, fall_through);
} else {
- Heap::RootListIndex other_nil_value = nil == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- __ j(equal, if_true);
- __ CompareRoot(rax, other_nil_value);
- __ j(equal, if_true);
- __ JumpIfSmi(rax, if_false);
- // It can be an undetectable object.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ __ testq(rax, rax);
Split(not_zero, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
@@ -4428,7 +4785,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Cook return address on top of stack (smi encoded Code* delta)
- __ pop(rdx);
+ __ PopReturnAddressTo(rdx);
__ Move(rcx, masm_->CodeObject());
__ subq(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
@@ -4518,6 +4875,79 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
+
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x1d;
+static const byte kCallInstruction = 0xe8;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ Address jns_offset_address = call_target_address - 2;
+
+ switch (target_state) {
+ case INTERRUPT:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // jns ok
+ // call <interrupt stub>
+ // ok:
+ *jns_instr_address = kJnsInstruction;
+ *jns_offset_address = kJnsOffset;
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // ok:
+ *jns_instr_address = kNopByteOne;
+ *jns_offset_address = kNopByteTwo;
+ break;
+ }
+
+ Assembler::set_target_address_at(call_target_address,
+ replacement_code->entry());
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+
+ if (*jns_instr_address == kJnsInstruction) {
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return INTERRUPT;
+ }
+
+ ASSERT_EQ(kNopByteOne, *jns_instr_address);
+ ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+
+ if (Assembler::target_address_at(call_target_address) ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index efa07a80b..15f410c13 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "codegen.h"
#include "ic-inl.h"
@@ -60,11 +60,11 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register r0,
- Register r1,
- Label* miss) {
+static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register r0,
+ Register r1,
+ Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// r0: used to hold receiver instance type.
@@ -101,8 +101,8 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
// Helper function used to load a property from a dictionary backing storage.
// This function may return false negatives, so miss_label
// must always call a backup property load that is complete.
-// This function is safe to call if name is not a symbol, and will jump to
-// the miss_label in that case.
+// This function is safe to call if name is not an internalized string,
+// and will jump to the miss_label in that case.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm,
@@ -127,21 +127,21 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
// If probing finds an entry in the dictionary, r1 contains the
// index into the dictionary. Check that the value is a normal
// property.
__ bind(&done);
const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ Test(Operand(elements, r1, times_pointer_size,
kDetailsOffset - kHeapObjectTag),
@@ -160,8 +160,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// storage. This function may fail to store a property even though it
// is in the dictionary, so code at miss_label must always call a
// backup property store that is complete. This function is safe to
-// call if name is not a symbol, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
+// call if name is not an internalized string, and will jump to the miss_label
+// in that case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
static void GenerateDictionaryStore(MacroAssembler* masm,
Label* miss_label,
@@ -184,21 +184,21 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- scratch0,
- scratch1);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ scratch0,
+ scratch1);
// If probing finds an entry in the dictionary, scratch0 contains the
// index into the dictionary. Check that the value is a normal
// property that is not read only.
__ bind(&done);
const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask =
(PropertyDetails::TypeField::kMask |
@@ -224,49 +224,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
}
-void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadArrayLength(masm, rax, rdx, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss,
- support_wrappers);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, rax, rdx, rbx, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
@@ -356,31 +313,38 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
}
-// Checks whether a key is an array index string or a symbol string.
-// Falls through if the key is a symbol.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_symbol) {
+// Checks whether a key is an array index string or a unique name.
+// Falls through if the key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_unique) {
// Register use:
// key - holds the key and is unchanged. Assumed to be non-smi.
// Scratch registers:
// map - used to hold the map of the key.
// hash - used to hold the hash of the key.
- __ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
- __ j(above_equal, not_symbol);
+ Label unique;
+ __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
+ __ j(above, not_unique);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ j(equal, &unique);
+
// Is the string an array index, with cached numeric value?
- __ movl(hash, FieldOperand(key, String::kHashFieldOffset));
- __ testl(hash, Immediate(String::kContainsCachedArrayIndexMask));
+ __ movl(hash, FieldOperand(key, Name::kHashFieldOffset));
+ __ testl(hash, Immediate(Name::kContainsCachedArrayIndexMask));
__ j(zero, index_string); // The value in hash is used at jump target.
- // Is the string a symbol?
- STATIC_ASSERT(kSymbolTag != 0);
+ // Is the string internalized? We already know it's a string so a single
+ // bit test is enough.
+ STATIC_ASSERT(kNotInternalizedTag != 0);
__ testb(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(kIsSymbolMask));
- __ j(zero, not_symbol);
+ Immediate(kIsNotInternalizedMask));
+ __ j(not_zero, not_unique);
+
+ __ bind(&unique);
}
@@ -391,11 +355,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
+ Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
// Check that the key is a smi.
- __ JumpIfNotSmi(rax, &check_string);
+ __ JumpIfNotSmi(rax, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@@ -440,8 +404,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ IncrementCounter(counters->keyed_load_generic_slow(), 1);
GenerateRuntimeGetProperty(masm);
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, rax, rcx, rbx, &index_string, &slow);
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, rax, rcx, rbx, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(
masm, rdx, rcx, Map::kHasNamedInterceptor, &slow);
@@ -464,7 +428,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
__ and_(rcx, Immediate(mask));
- // Load the key (consisting of map and symbol) from the cache and
+ // Load the key (consisting of map and internalized string) from the cache and
// check for match.
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
@@ -542,7 +506,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
__ ret(0);
- __ bind(&index_string);
+ __ bind(&index_name);
__ IndexFromHash(rbx, rax);
__ jmp(&index_smi);
}
@@ -576,7 +540,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -606,10 +570,10 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ j(not_zero, &slow);
// Everything is fine, call runtime.
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
__ push(rdx); // receiver
__ push(rax); // key
- __ push(rcx); // return address
+ __ PushReturnAddressFrom(rcx);
// Perform tail call to the entry.
__ TailCallExternalReference(
@@ -619,7 +583,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1);
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -709,7 +673,9 @@ static void KeyedStoreGenerateGenericHelper(
rbx,
rdi,
slow);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@@ -720,7 +686,9 @@ static void KeyedStoreGenerateGenericHelper(
rbx,
rdi,
slow);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -734,7 +702,8 @@ static void KeyedStoreGenerateGenericHelper(
rbx,
rdi,
slow);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@@ -743,10 +712,10 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
Label fast_double, fast_double_grow;
@@ -853,8 +822,8 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
extra_state,
Code::NORMAL,
argc);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- rax);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, rdx, rcx, rbx, rax);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@@ -890,8 +859,8 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, rdx, rcx, rbx, no_reg);
__ bind(&miss);
}
@@ -901,14 +870,14 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
int argc,
Label* miss) {
// ----------- S t a t e -------------
- // rcx : function name
- // rdi : function
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
+ // rcx : function name
+ // rdi : function
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
__ JumpIfSmi(rdi, miss);
// Check that the value is a JavaScript function.
@@ -925,20 +894,20 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
// The generated code falls through if the call should be handled by runtime.
void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Label miss;
- // Get the receiver of the function from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
- GenerateStringDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
// rax: elements
// Search the dictionary placing the result in rdi.
@@ -955,13 +924,13 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
IC::UtilityId id,
Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Counters* counters = masm->isolate()->counters();
@@ -971,8 +940,8 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
__ IncrementCounter(counters->keyed_call_miss(), 1);
}
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Enter an internal frame.
{
@@ -996,7 +965,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
// This can happen only for regular CallIC but not KeyedCallIC.
if (id == IC::kCallIC_Miss) {
Label invoke, global;
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
+ __ movq(rdx, args.GetReceiverOperand());
__ JumpIfSmi(rdx, &invoke);
__ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
__ j(equal, &global);
@@ -1006,7 +975,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
// Patch the receiver on the stack.
__ bind(&global);
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
__ bind(&invoke);
}
@@ -1027,17 +996,17 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
int argc,
Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
GenerateMiss(masm, argc, extra_ic_state);
}
@@ -1045,24 +1014,24 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
Label do_call, slow_call, slow_load;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
+ Label check_number_dictionary, check_name, lookup_monomorphic_cache;
+ Label index_smi, index_name;
// Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &check_string);
+ __ JumpIfNotSmi(rcx, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
@@ -1110,10 +1079,10 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ movq(rdi, rax);
__ jmp(&do_call);
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, rcx, rax, rbx, &index_string, &slow_call);
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, rcx, rax, rbx, &index_name, &slow_call);
- // The key is known to be a symbol.
+ // The key is known to be a unique name.
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
@@ -1140,14 +1109,14 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&slow_call);
// This branch is taken if:
// - the receiver requires boxing or access check,
- // - the key is neither smi nor symbol,
+ // - the key is neither smi nor a unique name,
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
__ IncrementCounter(counters->keyed_call_generic_slow(), 1);
GenerateMiss(masm, argc);
- __ bind(&index_string);
+ __ bind(&index_name);
__ IndexFromHash(rbx, rcx);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
@@ -1156,19 +1125,19 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- // Check if the name is a string.
+ // Check if the name is really a name.
Label miss;
__ JumpIfSmi(rcx, &miss);
- Condition cond = masm->IsObjectStringType(rcx, rax, rax);
+ Condition cond = masm->IsObjectNameType(rcx, rax, rax);
__ j(NegateCondition(cond), &miss);
CallICBase::GenerateNormal(masm, argc);
__ bind(&miss);
@@ -1261,7 +1230,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rsp[0] : return address
// -----------------------------------
Label slow, notin;
Operand mapped_location =
@@ -1278,16 +1247,16 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ movq(rax, unmapped_location);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
Label slow, notin;
Operand mapped_location = GenerateMappedArgumentsLookup(
@@ -1317,23 +1286,24 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
INLINE_SMI_CHECK);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
int argc) {
// ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Label slow, notin;
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rax, r8, &notin, &slow);
__ movq(rdi, mapped_location);
@@ -1359,12 +1329,13 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rax, rcx, rbx,
- rdx);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
+ Code::NORMAL, Code::LOAD_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, rax, rcx, rbx, rdx);
- // Cache miss: Jump to runtime.
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+ GenerateMiss(masm);
}
@@ -1376,7 +1347,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -----------------------------------
Label miss;
- GenerateStringDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss);
// rdx: elements
// Search the dictionary placing the result in rax.
@@ -1399,10 +1370,10 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->load_miss(), 1);
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rax); // receiver
__ push(rcx); // name
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
ExternalReference ref =
@@ -1411,23 +1382,40 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : receiver
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ __ PopReturnAddressTo(rbx);
+ __ push(rax); // receiver
+ __ push(rcx); // name
+ __ PushReturnAddressFrom(rbx);
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rsp[0] : return address
// -----------------------------------
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->keyed_load_miss(), 1);
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx); // receiver
__ push(rax); // name
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
masm->isolate())
: ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
@@ -1439,13 +1427,13 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx); // receiver
__ push(rax); // name
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
@@ -1462,10 +1450,11 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- no_reg);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, strict_mode,
+ Code::NORMAL, Code::STORE_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, rdx, rcx, rbx, no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1480,11 +1469,11 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx); // receiver
__ push(rcx); // name
__ push(rax); // value
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
ExternalReference ref =
@@ -1493,65 +1482,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = rdx;
- Register value = rax;
- Register scratch = rbx;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ pop(scratch);
- __ push(receiver);
- __ push(value);
- __ push(scratch); // return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- GenerateMiss(masm);
-}
-
-
void StoreIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
@@ -1562,7 +1492,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Label miss;
- GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9);
Counters* counters = masm->isolate()->counters();
@@ -1575,21 +1505,21 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx);
__ push(rcx);
__ push(rax);
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode));
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
@@ -1599,110 +1529,85 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx); // receiver
__ push(rcx); // key
__ push(rax); // value
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode)); // Strict mode.
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
}
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx); // receiver
__ push(rcx); // key
__ push(rax); // value
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx); // receiver
__ push(rcx); // key
__ push(rax); // value
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
- ExternalReference ref = force_generic
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ----------- S t a t e -------------
- // -- rbx : target map
- // -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
- // Must return the modified receiver in eax.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
- __ movq(rax, rdx);
- __ Ret();
- __ bind(&fail);
- }
-
- __ pop(rbx);
- __ push(rdx);
- __ push(rbx); // return address
- __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rbx : target map
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- // Must return the modified receiver in eax.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
- __ movq(rax, rdx);
- __ Ret();
- __ bind(&fail);
- }
+ __ PopReturnAddressTo(rbx);
+ __ push(rdx); // receiver
+ __ push(rcx); // key
+ __ push(rax); // value
+ __ PushReturnAddressFrom(rbx);
- __ pop(rbx);
- __ push(rdx);
- __ push(rbx); // return address
- __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+ // Do tail-call to runtime routine.
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
+ ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+ masm->isolate())
+ : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -1729,7 +1634,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-static bool HasInlinedSmiCode(Address address) {
+bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1740,39 +1645,6 @@ static bool HasInlinedSmiCode(Address address) {
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
-
- State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
-}
-
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index b461e6290..31fbcd27a 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,11 +27,12 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "x64/lithium-codegen-x64.h"
#include "code-stubs.h"
#include "stub-cache.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -39,7 +40,7 @@ namespace internal {
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public CallWrapper {
+class SafepointGenerator V8_FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -47,13 +48,13 @@ class SafepointGenerator : public CallWrapper {
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
+ virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const {
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {
codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
}
- virtual void AfterCall() const {
+ virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -67,7 +68,7 @@ class SafepointGenerator : public CallWrapper {
#define __ masm()->
bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
+ LPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
@@ -88,95 +89,105 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ RegisterDependentCodeForEmbeddedMaps(code);
PopulateDeoptimizationData(code);
+ info()->CommitDependencies(code);
}
-void LChunkBuilder::Abort(const char* reason) {
+void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- int length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
+#ifdef _MSC_VER
+void LCodeGen::MakeSureStackPagesMapped(int offset) {
+ const int kPageSize = 4 * KB;
+ for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
+ __ movq(Operand(rsp, offset), rax);
+ }
}
+#endif
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). rcx is zero for method calls and non-zero for function
- // calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ testq(rcx, rcx);
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, receiver_offset), kScratchRegister);
- __ bind(&ok);
- }
-
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS function.
+ // Strict mode functions need to replace the receiver with undefined
+ // when called as functions (without an explicit receiver
+ // object). rcx is zero for method calls and non-zero for function
+ // calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ testq(rcx, rcx);
+ __ j(zero, &ok, Label::kNear);
+ StackArgumentsAccessor args(rsp, scope()->num_parameters());
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ movq(args.GetReceiverOperand(), kScratchRegister);
+ __ bind(&ok);
+ }
+ }
+
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ ASSERT(!frame_is_built_);
+ frame_is_built_ = true;
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ info()->AddNoFrameRange(0, masm_->pc_offset());
+ }
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
+ __ subq(rsp, Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ MakeSureStackPagesMapped(slots * kPointerSize);
+#endif
+ __ push(rax);
__ Set(rax, slots);
- __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE);
+ __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
Label loop;
__ bind(&loop);
- __ push(kScratchRegister);
+ __ movq(MemOperand(rsp, rax, times_pointer_size, 0),
+ kScratchRegister);
__ decl(rax);
__ j(not_zero, &loop);
+ __ pop(rax);
} else {
__ subq(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
- // On windows, you may not access the stack more than one page below
- // the most recently mapped page. To make the allocated area randomly
- // accessible, we write to each page in turn (the value is irrelevant).
- const int kPageSize = 4 * KB;
- for (int offset = slots * kPointerSize - kPageSize;
- offset > 0;
- offset -= kPageSize) {
- __ movq(Operand(rsp, offset), rax);
- }
+ MakeSureStackPagesMapped(slots * kPointerSize);
#endif
}
+
+ if (info()->saves_caller_doubles()) {
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ movsd(MemOperand(rsp, count * kDoubleSize),
+ XMMRegister::FromAllocationIndex(save_iterator.Current()));
+ save_iterator.Advance();
+ count++;
+ }
+ }
}
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
@@ -212,39 +223,64 @@ bool LCodeGen::GeneratePrologue() {
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
- }
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
- if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- instr->CompileToNative(this);
- }
- }
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- return !is_aborted();
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ subq(rsp, Immediate(slots * kPointerSize));
}
bool LCodeGen::GenerateJumpTable() {
+ Label needs_frame;
+ if (jump_table_.length() > 0) {
+ Comment(";;; -------------------- Jump table --------------------");
+ }
for (int i = 0; i < jump_table_.length(); i++) {
__ bind(&jump_table_[i].label);
- __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
+ Address entry = jump_table_[i].address;
+ Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ if (jump_table_[i].needs_frame) {
+ __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
+ if (needs_frame.is_bound()) {
+ __ jmp(&needs_frame);
+ } else {
+ __ bind(&needs_frame);
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(rsi);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ Move(rsi, Smi::FromInt(StackFrame::STUB));
+ __ push(rsi);
+ __ movq(rsi, MemOperand(rsp, kPointerSize));
+ __ call(kScratchRegister);
+ }
+ } else {
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ }
}
return !is_aborted();
}
@@ -255,11 +291,38 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- Comment(";;; Deferred code @%d: %s.",
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
+
+ Comment(";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
code->instruction_index(),
+ code->instr()->hydrogen_value()->id(),
code->instr()->Mnemonic());
+ __ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ // Build the frame in such a way that esi isn't trashed.
+ __ push(rbp); // Caller's frame pointer.
+ __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Push(Smi::FromInt(StackFrame::STUB));
+ __ lea(rbp, Operand(rsp, 2 * kPointerSize));
+ Comment(";;; Deferred code");
+ }
code->Generate();
+ if (NeedsDeferredFrame()) {
+ __ bind(code->done());
+ Comment(";;; Destroy frame");
+ ASSERT(frame_is_built_);
+ frame_is_built_ = false;
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ }
__ jmp(code->exit());
}
}
@@ -302,7 +365,13 @@ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsInteger32();
+ chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
+ return op->IsConstantOperand() &&
+ chunk_->LookupLiteralRepresentation(op).IsSmi();
}
@@ -312,14 +381,18 @@ bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
}
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(constant->HasInteger32Value());
return constant->Integer32Value();
}
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
+}
+
+
double LCodeGen::ToDouble(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(constant->HasDoubleValue());
@@ -327,10 +400,17 @@ double LCodeGen::ToDouble(LConstantOperand* op) const {
}
+ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasExternalReferenceValue());
+ return constant->ExternalReferenceValue();
+}
+
+
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return constant->handle();
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
}
@@ -338,41 +418,23 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
// Does not handle registers. In X64 assembler, plain registers are not
// representable as an Operand.
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return Operand(rbp, -(index + 3) * kPointerSize);
- } else {
- // Incoming parameter. Skip the return address.
- return Operand(rbp, -(index - 1) * kPointerSize);
- }
+ return Operand(rbp, StackSlotOffset(op->index()));
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count) {
+ Translation* translation) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
+ int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *arguments_index = -environment->parameter_count();
- *arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- arguments_index,
- arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ WriteTranslation(environment->outer(), translation);
+ bool has_closure_id = !info()->closure().is_null() &&
+ !info()->closure().is_identical_to(environment->closure());
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
@@ -396,68 +458,63 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
}
- // Inlined frames which push their arguments cause the index to be
- // bumped and a new stack area to be used for materialization.
- if (environment->entry() != NULL &&
- environment->entry()->arguments_pushed()) {
- *arguments_index = *arguments_index < 0
- ? GetStackSlotCount()
- : *arguments_index + *arguments_count;
- *arguments_count = environment->entry()->arguments_count() + 1;
- }
-
+ int object_index = 0;
+ int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- *arguments_index,
- *arguments_count);
- }
- }
-
- AddToTranslation(translation,
+ AddToTranslation(environment,
+ translation,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
+ &object_index,
+ &dematerialized_index);
}
}
-void LCodeGen::AddToTranslation(Translation* translation,
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
} else if (is_uint32) {
@@ -485,7 +542,7 @@ void LCodeGen::AddToTranslation(Translation* translation,
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -500,8 +557,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
int argc) {
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
__ call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
@@ -523,13 +578,13 @@ void LCodeGen::CallCode(Handle<Code> code,
void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr) {
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
ASSERT(instr != NULL);
ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ CallRuntime(function, num_arguments);
+ __ CallRuntime(function, num_arguments, save_doubles);
+
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
}
@@ -562,8 +617,6 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
int frame_count = 0;
int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
@@ -571,7 +624,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
+ WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
@@ -582,26 +635,90 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
- Abort("bailout was not prepared");
+ Abort(kBailoutWasNotPrepared);
return;
}
- if (cc == no_condition) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ ASSERT(FLAG_deopt_every_n_times == 0); // Not yet implemented on x64.
+
+ if (info()->ShouldTrapOnDeopt()) {
+ Label done;
+ if (cc != no_condition) {
+ __ j(NegateCondition(cc), &done, Label::kNear);
+ }
+ __ int3();
+ __ bind(&done);
+ }
+
+ ASSERT(info()->IsStub() || frame_is_built_);
+ if (cc == no_condition && frame_is_built_) {
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
- jump_table_.last().address != entry) {
- jump_table_.Add(JumpTableEntry(entry), zone());
+ jump_table_.last().address != entry ||
+ jump_table_.last().needs_frame != !frame_is_built_ ||
+ jump_table_.last().bailout_type != bailout_type) {
+ Deoptimizer::JumpTableEntry table_entry(entry,
+ bailout_type,
+ !frame_is_built_);
+ jump_table_.Add(table_entry, zone());
+ }
+ if (cc == no_condition) {
+ __ jmp(&jump_table_.last().label);
+ } else {
+ __ j(cc, &jump_table_.last().label);
+ }
+ }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment) {
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ DeoptimizeIf(cc, environment, bailout_type);
+}
+
+
+void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
+ ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
+ maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
+ }
}
- __ j(cc, &jump_table_.last().label);
+ }
+#ifdef VERIFY_HEAP
+ // This disables verification of weak embedded objects after full GC.
+ // AddDependentCode can cause a GC, which would observe the state where
+ // this code is not yet in the depended code lists of the embedded maps.
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
+#endif
+ for (int i = 0; i < maps.length(); i++) {
+ maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
+ }
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
}
}
@@ -612,16 +729,19 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<DeoptimizationInputData> data =
factory()->NewDeoptimizationInputData(length, TENURED);
- Handle<ByteArray> translations = translations_.CreateByteArray();
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
+ { AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
}
- data->SetLiteralArray(*literals);
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
@@ -710,7 +830,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -722,18 +842,26 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordPosition(int position) {
+void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
}
void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_,
+ label->hydrogen_value()->id(),
+ label->block_id(),
+ LabelType(label));
__ bind(label->label());
current_block_ = label->block_id();
DoGap(label);
@@ -771,38 +899,28 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::RegExpExec: {
RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -812,118 +930,121 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
+ GenerateOsrPrologue();
}
void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
-
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+ HMod* hmod = instr->hydrogen();
+ HValue* left = hmod->left();
+ HValue* right = hmod->right();
+ if (hmod->HasPowerOf2Divisor()) {
+ // TODO(svenpanne) We should really do the strength reduction on the
+ // Hydrogen level.
+ Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(ToRegister(instr->result())));
- if (divisor < 0) divisor = -divisor;
+ // Note: The code below even works when right contains kMinInt.
+ int32_t divisor = Abs(right->GetInteger32Constant());
- Label positive_dividend, done;
- __ testl(dividend, dividend);
- __ j(not_sign, &positive_dividend, Label::kNear);
- __ negl(dividend);
- __ andl(dividend, Immediate(divisor - 1));
- __ negl(dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ j(not_zero, &done, Label::kNear);
- DeoptimizeIf(no_condition, instr->environment());
- } else {
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ testl(left_reg, left_reg);
+ __ j(not_sign, &left_is_not_negative, Label::kNear);
+ __ negl(left_reg);
+ __ andl(left_reg, Immediate(divisor - 1));
+ __ negl(left_reg);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
__ jmp(&done, Label::kNear);
}
- __ bind(&positive_dividend);
- __ andl(dividend, Immediate(divisor - 1));
+
+ __ bind(&left_is_not_negative);
+ __ andl(left_reg, Immediate(divisor - 1));
__ bind(&done);
- } else {
- Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
+
+ } else if (hmod->fixed_right_arg().has_value) {
Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(ToRegister(instr->result())));
Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
+ int32_t divisor = hmod->fixed_right_arg().value;
+ ASSERT(IsPowerOf2(divisor));
+
+ // Check if our assumption of a fixed right operand still holds.
+ __ cmpl(right_reg, Immediate(divisor));
+ DeoptimizeIf(not_equal, instr->environment());
+
+ Label left_is_not_negative, done;
+ if (left->CanBeNegative()) {
+ __ testl(left_reg, left_reg);
+ __ j(not_sign, &left_is_not_negative, Label::kNear);
+ __ negl(left_reg);
+ __ andl(left_reg, Immediate(divisor - 1));
+ __ negl(left_reg);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ __ jmp(&done, Label::kNear);
+ }
+
+ __ bind(&left_is_not_negative);
+ __ andl(left_reg, Immediate(divisor - 1));
+ __ bind(&done);
+
+ } else {
+ Register left_reg = ToRegister(instr->left());
ASSERT(left_reg.is(rax));
- ASSERT(result_reg.is(rdx));
+ Register right_reg = ToRegister(instr->right());
ASSERT(!right_reg.is(rax));
ASSERT(!right_reg.is(rdx));
+ Register result_reg = ToRegister(instr->result());
+ ASSERT(result_reg.is(rdx));
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ Label done;
+ // Check for x % 0, idiv would signal a divide error. We have to
+ // deopt in this case because we can't return a NaN.
+ if (right->CanBeZero()) {
__ testl(right_reg, right_reg);
DeoptimizeIf(zero, instr->environment());
}
- __ testl(left_reg, left_reg);
- __ j(zero, &remainder_eq_dividend, Label::kNear);
- __ j(sign, &slow, Label::kNear);
-
- __ testl(right_reg, right_reg);
- __ j(not_sign, &both_positive, Label::kNear);
- // The sign of the divisor doesn't matter.
- __ neg(right_reg);
-
- __ bind(&both_positive);
- // If the dividend is smaller than the nonnegative
- // divisor, the dividend is the result.
- __ cmpl(left_reg, right_reg);
- __ j(less, &remainder_eq_dividend, Label::kNear);
-
- // Check if the divisor is a PowerOfTwo integer.
- Register scratch = ToRegister(instr->temp());
- __ movl(scratch, right_reg);
- __ subl(scratch, Immediate(1));
- __ testl(scratch, right_reg);
- __ j(not_zero, &do_subtraction, Label::kNear);
- __ andl(left_reg, scratch);
- __ jmp(&remainder_eq_dividend, Label::kNear);
-
- __ bind(&do_subtraction);
- const int kUnfolds = 3;
- // Try a few subtractions of the dividend.
- __ movl(scratch, left_reg);
- for (int i = 0; i < kUnfolds; i++) {
- // Reduce the dividend by the divisor.
- __ subl(left_reg, right_reg);
- // Check if the dividend is less than the divisor.
- __ cmpl(left_reg, right_reg);
- __ j(less, &remainder_eq_dividend, Label::kNear);
+ // Check for kMinInt % -1, idiv would signal a divide error. We
+ // have to deopt if we care about -0, because we can't return that.
+ if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
+ Label no_overflow_possible;
+ __ cmpl(left_reg, Immediate(kMinInt));
+ __ j(not_zero, &no_overflow_possible, Label::kNear);
+ __ cmpl(right_reg, Immediate(-1));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ Set(result_reg, 0);
+ __ jmp(&done, Label::kNear);
+ }
+ __ bind(&no_overflow_possible);
}
- __ movl(left_reg, scratch);
- // Slow case, using idiv instruction.
- __ bind(&slow);
- // Sign extend eax to edx.
- // (We are using only the low 32 bits of the values.)
+ // Sign extend dividend in eax into edx:eax, since we are using only the low
+ // 32 bits of the values.
__ cdq();
- // Check for (0 % -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (left->CanBeNegative() &&
+ hmod->CanBeZero() &&
+ hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label positive_left;
- Label done;
__ testl(left_reg, left_reg);
__ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
-
- // Test the remainder for 0, because then the result would be -0.
__ testl(result_reg, result_reg);
- __ j(not_zero, &done, Label::kNear);
-
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(zero, instr->environment());
+ __ jmp(&done, Label::kNear);
__ bind(&positive_left);
- __ idivl(right_reg);
- __ bind(&done);
- } else {
- __ idivl(right_reg);
}
- __ jmp(&done, Label::kNear);
-
- __ bind(&remainder_eq_dividend);
- __ movl(result_reg, left_reg);
-
+ __ idivl(right_reg);
__ bind(&done);
}
}
@@ -1002,7 +1123,7 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
__ neg(reg1);
DeoptimizeIf(zero, instr->environment());
}
- __ movq(reg2, multiplier, RelocInfo::NONE);
+ __ movq(reg2, multiplier, RelocInfo::NONE64);
// Result just fit in r64, because it's int32 * uint32.
__ imul(reg2, reg1);
@@ -1013,6 +1134,60 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
+ if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
+ Register dividend = ToRegister(instr->left());
+ int32_t divisor =
+ HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+ int32_t test_value = 0;
+ int32_t power = 0;
+
+ if (divisor > 0) {
+ test_value = divisor - 1;
+ power = WhichPowerOf2(divisor);
+ } else {
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ __ cmpl(dividend, Immediate(kMinInt));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ test_value = - divisor - 1;
+ power = WhichPowerOf2(-divisor);
+ }
+
+ if (test_value != 0) {
+ if (instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ Label done, negative;
+ __ cmpl(dividend, Immediate(0));
+ __ j(less, &negative, Label::kNear);
+ __ sarl(dividend, Immediate(power));
+ if (divisor < 0) __ negl(dividend);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&negative);
+ __ negl(dividend);
+ __ sarl(dividend, Immediate(power));
+ if (divisor > 0) __ negl(dividend);
+ __ bind(&done);
+ return; // Don't fall through to "__ neg" below.
+ } else {
+ // Deoptimize if remainder is not 0.
+ __ testl(dividend, Immediate(test_value));
+ DeoptimizeIf(not_zero, instr->environment());
+ __ sarl(dividend, Immediate(power));
+ }
+ }
+
+ if (divisor < 0) __ negl(dividend);
+
+ return;
+ }
+
LOperand* right = instr->right();
ASSERT(ToRegister(instr->result()).is(rax));
ASSERT(ToRegister(instr->left()).is(rax));
@@ -1023,13 +1198,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
Register right_reg = ToRegister(right);
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(right_reg, right_reg);
DeoptimizeIf(zero, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ testl(left_reg, left_reg);
__ j(not_zero, &left_not_zero, Label::kNear);
@@ -1038,8 +1213,8 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_zero);
}
- // Check for (-kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // Check for (kMinInt / -1).
+ if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
__ cmpl(left_reg, Immediate(kMinInt));
__ j(not_zero, &left_not_min_int, Label::kNear);
@@ -1052,9 +1227,20 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cdq();
__ idivl(right_reg);
- // Deoptimize if remainder is not 0.
- __ testl(rdx, rdx);
- DeoptimizeIf(not_zero, instr->environment());
+ if (instr->is_flooring()) {
+ Label done;
+ __ testl(rdx, rdx);
+ __ j(zero, &done, Label::kNear);
+ __ xorl(rdx, right_reg);
+ __ sarl(rdx, Immediate(31));
+ __ addl(rax, rdx);
+ __ bind(&done);
+ } else if (!instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ // Deoptimize if remainder is not 0.
+ __ testl(rdx, rdx);
+ DeoptimizeIf(not_zero, instr->environment());
+ }
}
@@ -1063,13 +1249,17 @@ void LCodeGen::DoMulI(LMulI* instr) {
LOperand* right = instr->right();
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ movl(kScratchRegister, left);
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ movq(kScratchRegister, left);
+ } else {
+ __ movl(kScratchRegister, left);
+ }
}
bool can_overflow =
instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (right->IsConstantOperand()) {
- int right_value = ToInteger32(LConstantOperand::cast(right));
+ int32_t right_value = ToInteger32(LConstantOperand::cast(right));
if (right_value == -1) {
__ negl(left);
} else if (right_value == 0) {
@@ -1110,9 +1300,19 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ imull(left, left, Immediate(right_value));
}
} else if (right->IsStackSlot()) {
- __ imull(left, ToOperand(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ SmiToInteger64(left, left);
+ __ imul(left, ToOperand(right));
+ } else {
+ __ imull(left, ToOperand(right));
+ }
} else {
- __ imull(left, ToRegister(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ SmiToInteger64(left, left);
+ __ imul(left, ToRegister(right));
+ } else {
+ __ imull(left, ToRegister(right));
+ }
}
if (can_overflow) {
@@ -1122,9 +1322,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Bail out if the result is supposed to be negative zero.
Label done;
- __ testl(left, left);
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ testq(left, left);
+ } else {
+ __ testl(left, left);
+ }
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
+ // Constant can't be represented as Smi due to immediate size limit.
+ ASSERT(!instr->hydrogen_value()->representation().IsSmi());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr->environment());
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
@@ -1132,11 +1338,19 @@ void LCodeGen::DoMulI(LMulI* instr) {
DeoptimizeIf(less, instr->environment());
}
} else if (right->IsStackSlot()) {
- __ orl(kScratchRegister, ToOperand(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ or_(kScratchRegister, ToOperand(right));
+ } else {
+ __ orl(kScratchRegister, ToOperand(right));
+ }
DeoptimizeIf(sign, instr->environment());
} else {
// Test the non-zero operand for negative sign.
- __ orl(kScratchRegister, ToRegister(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ or_(kScratchRegister, ToRegister(right));
+ } else {
+ __ orl(kScratchRegister, ToRegister(right));
+ }
DeoptimizeIf(sign, instr->environment());
}
__ bind(&done);
@@ -1151,7 +1365,7 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(left->IsRegister());
if (right->IsConstantOperand()) {
- int right_operand = ToInteger32(LConstantOperand::cast(right));
+ int32_t right_operand = ToInteger32(LConstantOperand::cast(right));
switch (instr->op()) {
case Token::BIT_AND:
__ andl(ToRegister(left), Immediate(right_operand));
@@ -1160,7 +1374,11 @@ void LCodeGen::DoBitI(LBitI* instr) {
__ orl(ToRegister(left), Immediate(right_operand));
break;
case Token::BIT_XOR:
- __ xorl(ToRegister(left), Immediate(right_operand));
+ if (right_operand == int32_t(~0)) {
+ __ notl(ToRegister(left));
+ } else {
+ __ xorl(ToRegister(left), Immediate(right_operand));
+ }
break;
default:
UNREACHABLE();
@@ -1169,13 +1387,13 @@ void LCodeGen::DoBitI(LBitI* instr) {
} else if (right->IsStackSlot()) {
switch (instr->op()) {
case Token::BIT_AND:
- __ andl(ToRegister(left), ToOperand(right));
+ __ and_(ToRegister(left), ToOperand(right));
break;
case Token::BIT_OR:
- __ orl(ToRegister(left), ToOperand(right));
+ __ or_(ToRegister(left), ToOperand(right));
break;
case Token::BIT_XOR:
- __ xorl(ToRegister(left), ToOperand(right));
+ __ xor_(ToRegister(left), ToOperand(right));
break;
default:
UNREACHABLE();
@@ -1185,13 +1403,13 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(right->IsRegister());
switch (instr->op()) {
case Token::BIT_AND:
- __ andl(ToRegister(left), ToRegister(right));
+ __ and_(ToRegister(left), ToRegister(right));
break;
case Token::BIT_OR:
- __ orl(ToRegister(left), ToRegister(right));
+ __ or_(ToRegister(left), ToRegister(right));
break;
case Token::BIT_XOR:
- __ xorl(ToRegister(left), ToRegister(right));
+ __ xor_(ToRegister(left), ToRegister(right));
break;
default:
UNREACHABLE();
@@ -1210,6 +1428,9 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
ASSERT(ToRegister(right).is(rcx));
switch (instr->op()) {
+ case Token::ROR:
+ __ rorl_cl(ToRegister(left));
+ break;
case Token::SAR:
__ sarl_cl(ToRegister(left));
break;
@@ -1228,9 +1449,14 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
break;
}
} else {
- int value = ToInteger32(LConstantOperand::cast(right));
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count != 0) {
+ __ rorl(ToRegister(left), Immediate(shift_count));
+ }
+ break;
case Token::SAR:
if (shift_count != 0) {
__ sarl(ToRegister(left), Immediate(shift_count));
@@ -1246,7 +1472,11 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
break;
case Token::SHL:
if (shift_count != 0) {
- __ shll(ToRegister(left), Immediate(shift_count));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ shl(ToRegister(left), Immediate(shift_count));
+ } else {
+ __ shll(ToRegister(left), Immediate(shift_count));
+ }
}
break;
default:
@@ -1266,9 +1496,17 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ subl(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
} else if (right->IsRegister()) {
- __ subl(ToRegister(left), ToRegister(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ subq(ToRegister(left), ToRegister(right));
+ } else {
+ __ subl(ToRegister(left), ToRegister(right));
+ }
} else {
- __ subl(ToRegister(left), ToOperand(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ subq(ToRegister(left), ToOperand(right));
+ } else {
+ __ subl(ToRegister(left), ToOperand(right));
+ }
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
@@ -1278,11 +1516,15 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
__ Set(ToRegister(instr->result()), instr->value());
}
+void LCodeGen::DoConstantS(LConstantS* instr) {
+ __ Move(ToRegister(instr->result()), instr->value());
+}
+
+
void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
XMMRegister res = ToDoubleRegister(instr->result());
@@ -1300,28 +1542,14 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
}
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
- if (value->IsSmi()) {
- __ Move(ToRegister(instr->result()), value);
- } else {
- __ LoadHeapObject(ToRegister(instr->result()),
- Handle<HeapObject>::cast(value));
- }
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ movq(result, FieldOperand(array, JSArray::kLengthOffset));
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ LoadAddress(ToRegister(instr->result()), instr->value());
}
-void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ movq(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Handle<Object> value = instr->value(isolate());
+ __ Move(ToRegister(instr->result()), value);
}
@@ -1351,8 +1579,11 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(input, &done, Label::kNear);
+
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ // If the object is a smi return the object.
+ __ JumpIfSmi(input, &done, Label::kNear);
+ }
// If the object is not a value type, return the object.
__ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
@@ -1381,7 +1612,8 @@ void LCodeGen::DoDateField(LDateField* instr) {
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ movq(kScratchRegister, stamp);
+ Operand stamp_operand = __ ExternalOperand(stamp);
+ __ movq(kScratchRegister, stamp_operand);
__ cmpq(kScratchRegister, FieldOperand(object,
JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
@@ -1391,13 +1623,8 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
-#ifdef _WIN64
- __ movq(rcx, object);
- __ movq(rdx, index, RelocInfo::NONE);
-#else
- __ movq(rdi, object);
- __ movq(rsi, index, RelocInfo::NONE);
-#endif
+ __ movq(arg_reg_1, object);
+ __ movq(arg_reg_2, index, RelocInfo::NONE64);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&done);
@@ -1405,10 +1632,33 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->value();
- ASSERT(input->Equals(instr->result()));
- __ not_(ToRegister(input));
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ Register string = ToRegister(instr->string());
+ Register index = ToRegister(instr->index());
+ Register value = ToRegister(instr->value());
+ String::Encoding encoding = instr->encoding();
+
+ if (FLAG_debug_code) {
+ __ push(value);
+ __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(equal, kUnexpectedStringType);
+ __ pop(value);
+ }
+
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
+ value);
+ } else {
+ __ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
+ value);
+ }
}
@@ -1426,19 +1676,40 @@ void LCodeGen::DoThrow(LThrow* instr) {
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- if (right->IsConstantOperand()) {
- __ addl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
- } else if (right->IsRegister()) {
- __ addl(ToRegister(left), ToRegister(right));
+ if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
+ if (right->IsConstantOperand()) {
+ int32_t offset = ToInteger32(LConstantOperand::cast(right));
+ __ leal(ToRegister(instr->result()),
+ MemOperand(ToRegister(left), offset));
+ } else {
+ Operand address(ToRegister(left), ToRegister(right), times_1, 0);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ lea(ToRegister(instr->result()), address);
+ } else {
+ __ leal(ToRegister(instr->result()), address);
+ }
+ }
} else {
- __ addl(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ if (right->IsConstantOperand()) {
+ __ addl(ToRegister(left),
+ Immediate(ToInteger32(LConstantOperand::cast(right))));
+ } else if (right->IsRegister()) {
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ addq(ToRegister(left), ToRegister(right));
+ } else {
+ __ addl(ToRegister(left), ToRegister(right));
+ }
+ } else {
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ addq(ToRegister(left), ToOperand(right));
+ } else {
+ __ addl(ToRegister(left), ToOperand(right));
+ }
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
}
@@ -1448,7 +1719,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
HMathMinMax::Operation operation = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsInteger32()) {
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
Label return_left;
Condition condition = (operation == HMathMinMax::kMathMin)
? less_equal
@@ -1457,17 +1728,26 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
if (right->IsConstantOperand()) {
Immediate right_imm =
Immediate(ToInteger32(LConstantOperand::cast(right)));
- __ cmpq(left_reg, right_imm);
+ ASSERT(!instr->hydrogen_value()->representation().IsSmi());
+ __ cmpl(left_reg, right_imm);
__ j(condition, &return_left, Label::kNear);
__ movq(left_reg, right_imm);
} else if (right->IsRegister()) {
Register right_reg = ToRegister(right);
- __ cmpq(left_reg, right_reg);
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ cmpq(left_reg, right_reg);
+ } else {
+ __ cmpl(left_reg, right_reg);
+ }
__ j(condition, &return_left, Label::kNear);
__ movq(left_reg, right_reg);
} else {
Operand right_op = ToOperand(right);
- __ cmpq(left_reg, right_op);
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ cmpq(left_reg, right_op);
+ } else {
+ __ cmpl(left_reg, right_op);
+ }
__ j(condition, &return_left, Label::kNear);
__ movq(left_reg, right_op);
}
@@ -1485,7 +1765,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
@@ -1527,16 +1807,21 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
break;
case Token::DIV:
__ divsd(left, right);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a mulsd depending on the result
+ __ movaps(left, left);
break;
- case Token::MOD:
+ case Token::MOD: {
+ XMMRegister xmm_scratch = double_scratch0();
__ PrepareCallCFunction(2);
- __ movaps(xmm0, left);
+ __ movaps(xmm_scratch, left);
ASSERT(right.is(xmm1));
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movaps(result, xmm0);
+ __ movaps(result, xmm_scratch);
break;
+ }
default:
UNREACHABLE();
break;
@@ -1550,26 +1835,19 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
-
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
+ int next_block = GetNextEmittedBlock();
- if (right_block == left_block) {
+ if (right_block == left_block || cc == no_condition) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
@@ -1584,62 +1862,91 @@ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
}
-void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
+template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
+ int false_block = instr->FalseDestination(chunk_);
+ __ j(cc, chunk_->GetAssemblyLabel(false_block));
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ int3();
+}
+
+void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
+ ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
__ testl(reg, reg);
- EmitBranch(true_block, false_block, not_zero);
+ EmitBranch(instr, not_zero);
+ } else if (r.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ Register reg = ToRegister(instr->value());
+ __ testq(reg, reg);
+ EmitBranch(instr, not_zero);
} else if (r.IsDouble()) {
+ ASSERT(!info()->IsStub());
XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
- EmitBranch(true_block, false_block, not_equal);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(reg, xmm_scratch);
+ EmitBranch(instr, not_equal);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
} else if (type.IsSmi()) {
+ ASSERT(!info()->IsStub());
__ SmiCompare(reg, Smi::FromInt(0));
- EmitBranch(true_block, false_block, not_equal);
+ EmitBranch(instr, not_equal);
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, no_condition);
+ } else if (type.IsHeapNumber()) {
+ ASSERT(!info()->IsStub());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
+ EmitBranch(instr, not_equal);
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ EmitBranch(instr, not_equal);
} else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
if (expected.Contains(ToBooleanStub::UNDEFINED)) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ j(equal, false_label);
+ __ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::BOOLEAN)) {
// true -> true.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ j(equal, true_label);
+ __ j(equal, instr->TrueLabel(chunk_));
// false -> false.
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ j(equal, false_label);
+ __ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
- __ j(equal, false_label);
+ __ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ Cmp(reg, Smi::FromInt(0));
- __ j(equal, false_label);
- __ JumpIfSmi(reg, true_label);
+ __ j(equal, instr->FalseLabel(chunk_));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ testb(reg, Immediate(kSmiTagMask));
@@ -1654,14 +1961,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
// Undetectable -> false.
__ testb(FieldOperand(map, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, false_label);
+ __ j(not_zero, instr->FalseLabel(chunk_));
}
}
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(above_equal, true_label);
+ __ j(above_equal, instr->TrueLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::STRING)) {
@@ -1670,35 +1977,43 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string, Label::kNear);
__ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
- __ j(not_zero, true_label);
- __ jmp(false_label);
+ __ j(not_zero, instr->TrueLabel(chunk_));
+ __ jmp(instr->FalseLabel(chunk_));
__ bind(&not_string);
}
+ if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ // Symbol value -> true.
+ __ CmpInstanceType(map, SYMBOL_TYPE);
+ __ j(equal, instr->TrueLabel(chunk_));
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &not_heap_number, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
- __ j(zero, false_label);
- __ jmp(true_label);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
+ __ j(zero, instr->FalseLabel(chunk_));
+ __ jmp(instr->TrueLabel(chunk_));
__ bind(&not_heap_number);
}
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(no_condition, instr->environment());
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(no_condition, instr->environment());
+ }
}
}
}
void LCodeGen::EmitGoto(int block) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
+ if (!IsNextEmittedBlock(block)) {
+ __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
}
}
@@ -1715,6 +2030,10 @@ inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
case Token::EQ_STRICT:
cond = equal;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = not_equal;
+ break;
case Token::LT:
cond = is_unsigned ? below : less;
break;
@@ -1736,41 +2055,54 @@ inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cc = TokenToCondition(instr->op(), instr->is_double());
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+ __ j(parity_even, instr->FalseLabel(chunk_));
} else {
int32_t value;
if (right->IsConstantOperand()) {
value = ToInteger32(LConstantOperand::cast(right));
- __ cmpl(ToRegister(left), Immediate(value));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ Cmp(ToRegister(left), Smi::FromInt(value));
+ } else {
+ __ cmpl(ToRegister(left), Immediate(value));
+ }
} else if (left->IsConstantOperand()) {
value = ToInteger32(LConstantOperand::cast(left));
- if (right->IsRegister()) {
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (right->IsRegister()) {
+ __ Cmp(ToRegister(right), Smi::FromInt(value));
+ } else {
+ __ Cmp(ToOperand(right), Smi::FromInt(value));
+ }
+ } else if (right->IsRegister()) {
__ cmpl(ToRegister(right), Immediate(value));
} else {
__ cmpl(ToOperand(right), Immediate(value));
}
// We transposed the operands. Reverse the condition.
cc = ReverseCondition(cc);
+ } else if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (right->IsRegister()) {
+ __ cmpq(ToRegister(left), ToRegister(right));
+ } else {
+ __ cmpq(ToRegister(left), ToOperand(right));
+ }
} else {
if (right->IsRegister()) {
__ cmpl(ToRegister(left), ToRegister(right));
@@ -1779,69 +2111,44 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
}
}
}
- EmitBranch(true_block, false_block, cc);
+ EmitBranch(instr, cc);
}
}
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ cmpq(left, right);
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- __ cmpq(left, Immediate(instr->hydrogen()->right()));
- EmitBranch(true_block, false_block, equal);
+ if (instr->right()->IsConstantOperand()) {
+ Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
+ __ Cmp(left, right);
+ } else {
+ Register right = ToRegister(instr->right());
+ __ cmpq(left, right);
+ }
+ EmitBranch(instr, equal);
}
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ Cmp(input_reg, factory()->the_hole_value());
+ EmitBranch(instr, equal);
return;
}
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ CompareRoot(reg, nil_value);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, equal);
- } else {
- Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ j(equal, true_label);
- __ CompareRoot(reg, other_nil_value);
- __ j(equal, true_label);
- __ JumpIfSmi(reg, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = ToRegister(instr->temp());
- __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, not_zero);
- }
+ XMMRegister input_reg = ToDoubleRegister(instr->object());
+ __ ucomisd(input_reg, input_reg);
+ EmitFalseBranch(instr, parity_odd);
+
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(MemOperand(rsp, 0), input_reg);
+ __ addq(rsp, Immediate(kDoubleSize));
+
+ int offset = sizeof(kHoleNanUpper32);
+ __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
+ EmitBranch(instr, equal);
}
@@ -1873,21 +2180,21 @@ Condition LCodeGen::EmitIsObject(Register input,
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ Condition true_cond = EmitIsObject(
+ reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
- Condition true_cond = EmitIsObject(reg, false_label, true_label);
-
- EmitBranch(true_block, false_block, true_cond);
+ EmitBranch(instr, true_cond);
}
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
+
Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
return cond;
@@ -1898,20 +2205,18 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- Condition true_cond = EmitIsString(reg, temp, false_label);
+ Condition true_cond = EmitIsString(
+ reg, temp, instr->FalseLabel(chunk_), check_needed);
- EmitBranch(true_block, false_block, true_cond);
+ EmitBranch(instr, true_cond);
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
Condition is_smi;
if (instr->value()->IsRegister()) {
Register input = ToRegister(instr->value());
@@ -1920,7 +2225,7 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
Operand input = ToOperand(instr->value());
is_smi = masm()->CheckSmi(input);
}
- EmitBranch(true_block, false_block, is_smi);
+ EmitBranch(instr, is_smi);
}
@@ -1928,29 +2233,26 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(temp, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, not_zero);
+ EmitBranch(instr, not_zero);
}
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
__ testq(rax, rax);
- EmitBranch(true_block, false_block, condition);
+ EmitBranch(instr, condition);
}
@@ -1977,15 +2279,12 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
__ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
}
@@ -2005,12 +2304,9 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
__ testl(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
@@ -2028,7 +2324,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
- if (class_name->IsEqualTo(CStrVector("Function"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2059,7 +2355,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
- if (class_name->IsEqualTo(CStrVector("Object"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
__ j(not_equal, is_true);
} else {
__ j(not_equal, is_false);
@@ -2070,13 +2366,13 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
__ movq(temp, FieldOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is a symbol because it's a literal.
- // The name in the constructor is a symbol because of the way the context is
- // booted. This routine isn't expected to work for random API-created
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
// classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are symbols it is sufficient to use an identity
- // comparison.
- ASSERT(class_name->IsSymbol());
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
+ ASSERT(class_name->IsInternalizedString());
__ Cmp(temp, class_name);
// End with the answer in the z flag.
}
@@ -2088,25 +2384,18 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register temp2 = ToRegister(instr->temp2());
Handle<String> class_name = instr->hydrogen()->class_name();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Register reg = ToRegister(instr->value());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
__ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
@@ -2114,7 +2403,7 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
InstanceofStub stub(InstanceofStub::kNoFlags);
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
__ testq(rax, rax);
__ j(zero, &true_value, Label::kNear);
@@ -2127,15 +2416,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
@@ -2160,9 +2449,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register map = ToRegister(instr->temp());
__ movq(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
- Handle<JSGlobalPropertyCell> cache_cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
- __ movq(kScratchRegister, cache_cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
+ __ movq(kScratchRegister, cache_cell, RelocInfo::CELL);
__ cmpq(map, Operand(kScratchRegister, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Patched to load either true or false.
@@ -2201,7 +2489,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub stub(flags);
__ push(ToRegister(instr->value()));
- __ PushHeapObject(instr->function());
+ __ Push(instr->function());
static const int kAdditionalDelta = 10;
int delta =
@@ -2213,7 +2501,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// safepoint with two arguments because stub is going to
// remove the third argument from the stack before jumping
// to instanceof builtin on the slow path.
- CallCodeGeneric(stub.GetCode(),
+ CallCodeGeneric(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS,
@@ -2240,7 +2528,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
@@ -2256,21 +2544,52 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Preserve the return value on the stack and rely on the runtime
// call to return the value in the same register.
__ push(rax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- __ movq(rsp, rbp);
- __ pop(rbp);
- __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
+ if (info()->saves_caller_doubles()) {
+ ASSERT(NeedsEagerFrame());
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(rsp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
+ int no_frame_start = -1;
+ if (NeedsEagerFrame()) {
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ no_frame_start = masm_->pc_offset();
+ }
+ if (instr->has_constant_parameter_count()) {
+ __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
+ rcx);
+ } else {
+ Register reg = ToRegister(instr->parameter_count());
+ // The argument count parameter is a smi
+ __ SmiToInteger32(reg, reg);
+ Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
+ __ PopReturnAddressTo(return_addr_reg);
+ __ shl(reg, Immediate(kPointerSizeLog2));
+ __ addq(rsp, reg);
+ __ jmp(return_addr_reg);
+ }
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
}
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ LoadGlobalCell(result, instr->hydrogen()->cell());
+ __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
@@ -2292,7 +2611,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
+ Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -2302,14 +2621,14 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// We have a temp because CompareRoot might clobber kScratchRegister.
Register cell = ToRegister(instr->temp());
ASSERT(!value.is(cell));
- __ movq(cell, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ movq(cell, cell_handle, RelocInfo::CELL);
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
// Store the value.
__ movq(Operand(cell, 0), value);
} else {
// Store the value.
- __ movq(kScratchRegister, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ movq(kScratchRegister, cell_handle, RelocInfo::CELL);
__ movq(Operand(kScratchRegister, 0), value);
}
// Cells are always rescanned, so no write barrier here.
@@ -2364,9 +2683,9 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ movq(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
int offset = Context::SlotOffset(instr->slot_index());
Register scratch = ToRegister(instr->temp());
__ RecordWriteContextSlot(context,
@@ -2383,122 +2702,35 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
- } else {
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ movq(result, FieldOperand(object, offset + type->instance_size()));
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ if (instr->object()->IsConstantOperand()) {
+ ASSERT(result.is(rax));
+ __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
} else {
- // Non-negative property indices are in the properties array.
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
- }
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
- } else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
- Handle<Map>(current->map()));
- DeoptimizeIf(not_equal, env);
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
+ Register object = ToRegister(instr->object());
+ __ Load(result, MemOperand(object, offset), access.representation());
}
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ return;
}
-}
-
-// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
-// prototype chain, which causes unbounded code generation.
-static bool CompactEmit(SmallMapList* list,
- Handle<String> name,
- int i,
- Isolate* isolate) {
- Handle<Map> map = list->at(i);
- // If the map has ElementsKind transitions, we will generate map checks
- // for each kind in __ CompareMap(..., ALLOW_ELEMENTS_TRANSITION_MAPS).
- if (map->HasElementsTransition()) return false;
- LookupResult lookup(isolate);
- map->LookupDescriptor(NULL, *name, &lookup);
- return lookup.IsField() || lookup.IsConstantFunction();
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(no_condition, instr->environment());
+ if (FLAG_track_double_fields &&
+ instr->hydrogen()->representation().IsDouble()) {
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ movsd(result, FieldOperand(object, offset));
return;
}
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- bool all_are_compact = true;
- for (int i = 0; i < map_count; ++i) {
- if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
- all_are_compact = false;
- break;
- }
- }
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
- if (last && !need_generic) {
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- } else {
- Label next;
- bool compact = all_are_compact ? true :
- CompactEmit(instr->hydrogen()->types(), name, i, isolate());
- __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
- __ bind(&next);
- }
- }
- if (need_generic) {
- __ Move(rcx, name);
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ Register result = ToRegister(instr->result());
+ if (!access.IsInobject()) {
+ __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ object = result;
}
- __ bind(&done);
+ __ Load(result, FieldOperand(object, offset), access.representation());
}
@@ -2553,38 +2785,9 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, ok, fail;
- __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(equal, &done, Label::kNear);
- __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &done, Label::kNear);
- Register temp((result.is(rax)) ? rbx : rax);
- __ push(temp);
- __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
- __ and_(temp, Immediate(Map::kElementsKindMask));
- __ shr(temp, Immediate(Map::kElementsKindShift));
- __ cmpl(temp, Immediate(GetInitialFastElementsKind()));
- __ j(less, &fail, Label::kNear);
- __ cmpl(temp, Immediate(TERMINAL_FAST_ELEMENTS_KIND));
- __ j(less_equal, &ok, Label::kNear);
- __ cmpl(temp, Immediate(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ j(less, &fail, Label::kNear);
- __ cmpl(temp, Immediate(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ j(less_equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed");
- __ bind(&ok);
- __ pop(temp);
- __ bind(&done);
- }
+ __ LoadRoot(result, instr->index());
}
@@ -2599,71 +2802,113 @@ void LCodeGen::DoLoadExternalArrayPointer(
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- if (instr->index()->IsRegister()) {
- __ subl(length, ToRegister(instr->index()));
+
+ if (instr->length()->IsConstantOperand() &&
+ instr->index()->IsConstantOperand()) {
+ int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+ StackArgumentsAccessor args(arguments, const_length,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(result, args.GetArgumentOperand(const_index));
} else {
- __ subl(length, ToOperand(instr->index()));
+ Register length = ToRegister(instr->length());
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ if (instr->index()->IsRegister()) {
+ __ subl(length, ToRegister(instr->index()));
+ } else {
+ __ subl(length, ToOperand(instr->index()));
+ }
+ StackArgumentsAccessor args(arguments, length,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(result, args.GetArgumentOperand(0));
}
- __ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
}
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
- Register result = ToRegister(instr->result());
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
if (!key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
+ // Even though the HLoad/StoreKeyed (in this case) instructions force
+ // the input representation for the key to be an integer, the input
+ // gets replaced during bound check elimination with the index argument
+ // to the bounds check, which can be tagged, so that case must be
+ // handled here, too.
+ if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
}
}
+ Operand operand(BuildFastArrayOperand(
+ instr->elements(),
+ key,
+ elements_kind,
+ 0,
+ instr->additional_index()));
- // Load the result.
- __ movq(result,
- BuildFastArrayOperand(instr->elements(),
- key,
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- Condition smi = __ CheckSmi(result);
- DeoptimizeIf(NegateCondition(smi), instr->environment());
- } else {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ XMMRegister result(ToDoubleRegister(instr->result()));
+ __ movss(result, operand);
+ __ cvtss2sd(result, result);
+ } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movsd(ToDoubleRegister(instr->result()), operand);
+ } else {
+ Register result(ToRegister(instr->result()));
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ __ movsxbq(result, operand);
+ break;
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
+ __ movzxbq(result, operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ __ movsxwq(result, operand);
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ movzxwq(result, operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ __ movsxlq(result, operand);
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ movl(result, operand);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ testl(result, result);
+ DeoptimizeIf(negative, instr->environment());
+ }
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
}
}
}
-void LCodeGen::DoLoadKeyedFastDoubleElement(
- LLoadKeyedFastDoubleElement* instr) {
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
LOperand* key = instr->key();
if (!key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force the input
+ // Even though the HLoad/StoreKeyed instructions force the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
+ if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
@@ -2693,6 +2938,55 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ Register result = ToRegister(instr->result());
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force
+ // the input representation for the key to be an integer, the input
+ // gets replaced during bound check elimination with the index
+ // argument to the bounds check, which can be tagged, so that
+ // case must be handled here, too.
+ if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
+ }
+
+ // Load the result.
+ __ movq(result,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ FixedArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index()));
+
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ Condition smi = __ CheckSmi(result);
+ DeoptimizeIf(NegateCondition(smi), instr->environment());
+ } else {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_external()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
+ } else {
+ DoLoadKeyedFixedArray(instr);
+ }
+}
+
+
Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
@@ -2702,9 +2996,9 @@ Operand LCodeGen::BuildFastArrayOperand(
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) {
- int constant_value = ToInteger32(LConstantOperand::cast(key));
+ int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
if (constant_value & 0xF0000000) {
- Abort("array index constant value too big");
+ Abort(kArrayIndexConstantValueTooBig);
}
return Operand(elements_pointer_reg,
((constant_value + additional_index) << shift_size)
@@ -2719,80 +3013,6 @@ Operand LCodeGen::BuildFastArrayOperand(
}
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
- LLoadKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
- Operand operand(BuildFastArrayOperand(
- instr->external_pointer(),
- key,
- elements_kind,
- 0,
- instr->additional_index()));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(ToDoubleRegister(instr->result()), operand);
- } else {
- Register result(ToRegister(instr->result()));
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsxbq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- __ movzxbq(result, operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsxwq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzxwq(result, operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ movsxlq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(result, operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ testl(result, result);
- DeoptimizeIf(negative, instr->environment());
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rax));
@@ -2806,7 +3026,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(rsp, -2 * kPointerSize));
+ __ lea(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
} else {
// Check for arguments adapter frame.
Label done, adapted;
@@ -2928,7 +3148,9 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ testl(length, length);
__ j(zero, &invoke, Label::kNear);
__ bind(&loop);
- __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
+ StackArgumentsAccessor args(elements, length,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ push(args.GetArgumentOperand(0));
__ decl(length);
__ j(not_zero, &loop);
@@ -2936,7 +3158,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(rax);
@@ -2979,7 +3200,7 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(rsi); // The context is the first argument.
- __ PushHeapObject(instr->hydrogen()->pairs());
+ __ Push(instr->hydrogen()->pairs());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
CallRuntime(Runtime::kDeclareGlobals, 3, instr);
}
@@ -2999,19 +3220,21 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
int arity,
LInstruction* instr,
CallKind call_kind,
RDIState rdi_state) {
- bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
- function->shared()->formal_parameter_count() == arity;
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
if (can_invoke_directly) {
if (rdi_state == RDI_UNINITIALIZED) {
- __ LoadHeapObject(rdi, function);
+ __ Move(rdi, function);
}
// Change context.
@@ -3019,13 +3242,13 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Set rax to arguments count if adaption is not needed. Assumes that rax
// is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
+ if (dont_adapt_arguments) {
__ Set(rax, arity);
}
// Invoke function.
__ SetCallKind(rcx, call_kind);
- if (*function == *info()->closure()) {
+ if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
__ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
@@ -3038,7 +3261,9 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
- __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+ ParameterCount expected(formal_parameter_count);
+ __ InvokeFunction(
+ function, expected, count, CALL_FUNCTION, generator, call_kind);
}
// Restore context.
@@ -3048,7 +3273,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
- CallKnownFunction(instr->function(),
+ CallKnownFunction(instr->hydrogen()->function(),
+ instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
CALL_AS_METHOD,
@@ -3056,116 +3282,117 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
}
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(not_equal, instr->environment());
- Label done;
+ Label slow, allocated, done;
Register tmp = input_reg.is(rax) ? rcx : rax;
Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
- Label negative;
__ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive, just
// return it. We do not need to patch the stack since |input| and
// |result| are the same register and |input| will be restored
// unchanged by popping safepoint registers.
__ testl(tmp, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative);
- __ jmp(&done);
-
- __ bind(&negative);
+ __ j(zero, &done);
- Label allocated, slow;
__ AllocateHeapNumber(tmp, tmp2, &slow);
- __ jmp(&allocated);
+ __ jmp(&allocated, Label::kNear);
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
-
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
// Set the pointer to the new heap number in tmp.
- if (!tmp.is(rax)) {
- __ movq(tmp, rax);
- }
-
+ if (!tmp.is(rax)) __ movq(tmp, rax);
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
__ bind(&allocated);
- __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ MoveDouble(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ shl(tmp2, Immediate(1));
__ shr(tmp2, Immediate(1));
- __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
+ __ MoveDouble(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
}
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ testl(input_reg, input_reg);
Label is_positive;
- __ j(not_sign, &is_positive);
+ __ j(not_sign, &is_positive, Label::kNear);
__ negl(input_reg); // Sets flags.
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
}
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
+ Register input_reg = ToRegister(instr->value());
+ __ testq(input_reg, input_reg);
+ Label is_positive;
+ __ j(not_sign, &is_positive, Label::kNear);
+ __ neg(input_reg); // Sets flags.
+ DeoptimizeIf(negative, instr->environment());
+ __ bind(&is_positive);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
- LUnaryMathOperation* instr_;
+ LMathAbs* instr_;
};
ASSERT(instr->value()->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
+ XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
- __ andpd(input_reg, scratch);
+ __ andps(input_reg, scratch);
} else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
+ } else if (r.IsSmi()) {
+ EmitSmiMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
Register input_reg = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiToInteger32(input_reg, input_reg);
- EmitIntegerMathAbs(instr);
- __ Integer32ToSmi(input_reg, input_reg);
+ EmitSmiMathAbs(instr);
__ bind(deferred->exit());
}
}
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- XMMRegister xmm_scratch = xmm0;
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+ XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope scope(SSE4_1);
+ CpuFeatureScope scope(masm(), SSE4_1);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Deoptimize if minus zero.
__ movq(output_reg, input_reg);
@@ -3178,7 +3405,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
DeoptimizeIf(equal, instr->environment());
} else {
Label negative_sign, done;
- // Deoptimize on negative inputs.
+ // Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
DeoptimizeIf(parity_even, instr->environment());
@@ -3207,7 +3434,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
__ bind(&negative_sign);
// Truncate, then compare and compensate.
__ cvttsd2si(output_reg, input_reg);
- __ cvtlsi2sd(xmm_scratch, output_reg);
+ __ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
@@ -3218,62 +3445,76 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- const XMMRegister xmm_scratch = xmm0;
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ const XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
+ static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
+ static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
- Label done;
- // xmm_scratch = 0.5
- __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
+ Label done, round_to_zero, below_one_half, do_not_compensate, restore;
+ __ movq(kScratchRegister, one_half, RelocInfo::NONE64);
__ movq(xmm_scratch, kScratchRegister);
- Label below_half;
__ ucomisd(xmm_scratch, input_reg);
- // If input_reg is NaN, this doesn't jump.
- __ j(above, &below_half, Label::kNear);
- // input = input + 0.5
- // This addition might give a result that isn't the correct for
- // rounding, due to loss of precision, but only for a number that's
- // so big that the conversion below will overflow anyway.
+ __ j(above, &below_one_half);
+
+ // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
__ addsd(xmm_scratch, input_reg);
- // Compute Math.floor(input).
- // Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x80000000));
+ __ RecordComment("D2I conversion overflow");
+ DeoptimizeIf(equal, instr->environment());
+ __ jmp(&done);
+
+ __ bind(&below_one_half);
+ __ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64);
+ __ movq(xmm_scratch, kScratchRegister);
+ __ ucomisd(xmm_scratch, input_reg);
+ __ j(below_equal, &round_to_zero);
+
+ // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
+ // compare and compensate.
+ __ movq(kScratchRegister, input_reg); // Back up input_reg.
+ __ subsd(input_reg, xmm_scratch);
+ __ cvttsd2si(output_reg, input_reg);
+ // Catch minint due to overflow, and to prevent overflow when compensating.
+ __ cmpl(output_reg, Immediate(0x80000000));
+ __ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
+
+ __ Cvtlsi2sd(xmm_scratch, output_reg);
+ __ ucomisd(input_reg, xmm_scratch);
+ __ j(equal, &restore, Label::kNear);
+ __ subl(output_reg, Immediate(1));
+ // No overflow because we already ruled out minint.
+ __ bind(&restore);
+ __ movq(input_reg, kScratchRegister); // Restore input_reg.
__ jmp(&done);
- __ bind(&below_half);
+ __ bind(&round_to_zero);
+ // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
+ // we can ignore the difference between a result of -0 and +0.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bailout if negative (including -0).
__ movq(output_reg, input_reg);
__ testq(output_reg, output_reg);
+ __ RecordComment("Minus zero");
DeoptimizeIf(negative, instr->environment());
- } else {
- // Bailout if below -0.5, otherwise round to (positive) zero, even
- // if negative.
- // xmm_scrach = -0.5
- __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE);
- __ movq(xmm_scratch, kScratchRegister);
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below, instr->environment());
}
- __ xorl(output_reg, output_reg);
-
+ __ Set(output_reg, 0);
__ bind(&done);
}
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ sqrtsd(input_reg, input_reg);
}
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- XMMRegister xmm_scratch = xmm0;
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
@@ -3283,7 +3524,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
Label done, sqrt;
// Check base for -Infinity. According to IEEE-754, double-precision
// -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
- __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64);
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
// Comparing -Infinity with NaN results in "unordered", which sets the
@@ -3309,12 +3550,7 @@ void LCodeGen::DoPower(LPower* instr) {
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
- // Choose register conforming to calling convention (when bailing out).
-#ifdef _WIN64
Register exponent = rdx;
-#else
- Register exponent = rdi;
-#endif
ASSERT(!instr->right()->IsRegister() ||
ToRegister(instr->right()).is(exponent));
ASSERT(!instr->right()->IsDoubleRegister() ||
@@ -3322,7 +3558,10 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
- if (exponent_type.IsTagged()) {
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(exponent, &no_deopt);
__ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
@@ -3342,160 +3581,131 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-
- // Choose the right register for the first argument depending on
- // calling convention.
-#ifdef _WIN64
- ASSERT(ToRegister(instr->global_object()).is(rcx));
- Register global_object = rcx;
-#else
- ASSERT(ToRegister(instr->global_object()).is(rdi));
- Register global_object = rdi;
-#endif
-
+ // Assert that register size is twice the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
- __ movq(global_object,
- FieldOperand(global_object, GlobalObject::kNativeContextOffset));
+ // Load native context
+ Register global_object = ToRegister(instr->global_object());
+ Register native_context = global_object;
+ __ movq(native_context, FieldOperand(
+ global_object, GlobalObject::kNativeContextOffset));
+
+ // Load state (FixedArray of the native context's random seeds)
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
- // rbx: FixedArray of the native context's random seeds
+ Register state = native_context;
+ __ movq(state, FieldOperand(native_context, kRandomSeedOffset));
// Load state[0].
- __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize));
- // If state[0] == 0, call runtime to initialize seeds.
- __ testl(rax, rax);
- __ j(zero, deferred->entry());
+ Register state0 = ToRegister(instr->scratch());
+ __ movl(state0, FieldOperand(state, ByteArray::kHeaderSize));
// Load state[1].
- __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize));
+ Register state1 = ToRegister(instr->scratch2());
+ __ movl(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- // Only operate on the lower 32 bit of rax.
- __ movl(rdx, rax);
- __ andl(rdx, Immediate(0xFFFF));
- __ imull(rdx, rdx, Immediate(18273));
- __ shrl(rax, Immediate(16));
- __ addl(rax, rdx);
+ Register scratch3 = ToRegister(instr->scratch3());
+ __ movzxwl(scratch3, state0);
+ __ imull(scratch3, scratch3, Immediate(18273));
+ __ shrl(state0, Immediate(16));
+ __ addl(state0, scratch3);
// Save state[0].
- __ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax);
+ __ movl(FieldOperand(state, ByteArray::kHeaderSize), state0);
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movl(rdx, rcx);
- __ andl(rdx, Immediate(0xFFFF));
- __ imull(rdx, rdx, Immediate(36969));
- __ shrl(rcx, Immediate(16));
- __ addl(rcx, rdx);
+ __ movzxwl(scratch3, state1);
+ __ imull(scratch3, scratch3, Immediate(36969));
+ __ shrl(state1, Immediate(16));
+ __ addl(state1, scratch3);
// Save state[1].
- __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx);
+ __ movl(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ shll(rax, Immediate(14));
- __ andl(rcx, Immediate(0x3FFFF));
- __ addl(rax, rcx);
+ Register random = state0;
+ __ shll(random, Immediate(14));
+ __ andl(state1, Immediate(0x3FFFF));
+ __ addl(random, state1);
- __ bind(deferred->exit());
// Convert 32 random bits in rax to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm2, rcx);
- __ movd(xmm1, rax);
- __ cvtss2sd(xmm2, xmm2);
- __ xorps(xmm1, xmm2);
- __ subsd(xmm1, xmm2);
+ XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister scratch4 = double_scratch0();
+ __ movq(scratch3, V8_INT64_C(0x4130000000000000),
+ RelocInfo::NONE64); // 1.0 x 2^20 as double
+ __ movq(scratch4, scratch3);
+ __ movd(result, random);
+ __ xorps(result, scratch4);
+ __ subsd(result, scratch4);
}
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Return value is in rax.
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ XMMRegister input = ToDoubleRegister(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister temp0 = double_scratch0();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
}
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+void LCodeGen::DoMathLog(LMathLog* instr) {
+ ASSERT(instr->value()->Equals(instr->result()));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
+ Label positive, done, zero;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
+ __ j(above, &positive, Label::kNear);
+ __ j(equal, &zero, Label::kNear);
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ Operand nan_operand = masm()->ExternalOperand(nan);
+ __ movsd(input_reg, nan_operand);
+ __ jmp(&done, Label::kNear);
+ __ bind(&zero);
+ ExternalReference ninf =
+ ExternalReference::address_of_negative_infinity();
+ Operand ninf_operand = masm()->ExternalOperand(ninf);
+ __ movsd(input_reg, ninf_operand);
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive);
+ __ fldln2();
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(Operand(rsp, 0), input_reg);
+ __ fld_d(Operand(rsp, 0));
+ __ fyl2x();
+ __ fstp_d(Operand(rsp, 0));
+ __ movsd(input_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ bind(&done);
}
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
-
- default:
- UNREACHABLE();
- }
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3503,15 +3713,16 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->function()).is(rdi));
ASSERT(instr->HasPointerMap());
- if (instr->known_function().is_null()) {
+ Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
- CallKnownFunction(instr->known_function(),
+ CallKnownFunction(known_function,
+ instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
CALL_AS_METHOD,
@@ -3551,7 +3762,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -3570,7 +3781,8 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
- CallKnownFunction(instr->target(),
+ CallKnownFunction(instr->hydrogen()->target(),
+ instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
CALL_AS_FUNCTION,
@@ -3582,29 +3794,138 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->constructor()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
+ __ Set(rax, instr->arity());
+ // No cell in ebx for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+ __ Move(rbx, undefined_value);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->constructor()).is(rdi));
+ ASSERT(ToRegister(instr->result()).is(rax));
+
__ Set(rax, instr->arity());
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ Move(rbx, instr->hydrogen()->property_cell());
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+ ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here
+ // look at the first argument
+ __ movq(rcx, Operand(rsp, 0));
+ __ testq(rcx, rcx);
+ __ j(zero, &packed_case);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+ override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ jmp(&done);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ }
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
+ CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
+ __ movq(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ __ lea(result, Operand(base, instr->offset()));
}
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ Register value = ToRegister(instr->value());
+ if (instr->object()->IsConstantOperand()) {
+ ASSERT(value.is(rax));
+ ASSERT(!access.representation().IsSpecialization());
+ LConstantOperand* object = LConstantOperand::cast(instr->object());
+ __ store_rax(ToExternalReference(object));
+ } else {
+ Register object = ToRegister(instr->object());
+ __ Store(MemOperand(object, offset), value, representation);
+ }
+ return;
+ }
+
Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
- int offset = instr->offset();
+ Handle<Map> transition = instr->transition();
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (!IsSmiConstant(operand_value)) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ }
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (IsInteger32Constant(operand_value)) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ } else {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ Register value = ToRegister(instr->value());
+ Condition cc = masm()->CheckSmi(value);
+ DeoptimizeIf(cc, instr->environment());
+ }
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ XMMRegister value = ToDoubleRegister(instr->value());
+ __ movsd(FieldOperand(object, offset), value);
+ return;
+ }
- if (!instr->transition().is_null()) {
+ if (!transition.is_null()) {
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
- __ Move(FieldOperand(object, HeapObject::kMapOffset),
- instr->transition());
+ __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
Register temp = ToRegister(instr->temp());
- __ Move(kScratchRegister, instr->transition());
+ __ Move(kScratchRegister, transition);
__ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
// Update the write barrier for the map field.
__ RecordWriteField(object,
@@ -3618,37 +3939,42 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
- __ movq(FieldOperand(object, offset), value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- Register temp = ToRegister(instr->temp());
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(object,
- offset,
- value,
- temp,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+
+ Register write_register = object;
+ if (!access.IsInobject()) {
+ write_register = ToRegister(instr->temp());
+ __ movq(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
+ }
+
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (operand_value->IsRegister()) {
+ Register value = ToRegister(operand_value);
+ __ Store(FieldOperand(write_register, offset), value, representation);
+ } else {
+ Handle<Object> handle_value = ToHandle(operand_value);
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ __ Move(FieldOperand(write_register, offset), handle_value);
}
} else {
- Register temp = ToRegister(instr->temp());
- __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(temp, offset), value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWriteField(temp,
- offset,
- value,
- object,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
+ Register value = ToRegister(instr->value());
+ __ Store(FieldOperand(write_register, offset), value, representation);
+ }
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ Register value = ToRegister(instr->value());
+ Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
+ // Update the write barrier for the object for in-object properties.
+ __ RecordWriteField(write_register,
+ offset,
+ value,
+ temp,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
@@ -3665,26 +3991,79 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
- LStoreKeyedSpecializedArrayElement* instr) {
+void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+ Label done;
+ __ j(NegateCondition(cc), &done, Label::kNear);
+ __ int3();
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, check->environment());
+ }
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ if (instr->hydrogen()->skip_check()) return;
+
+ if (instr->length()->IsRegister()) {
+ Register reg = ToRegister(instr->length());
+ if (!instr->hydrogen()->length()->representation().IsSmi()) {
+ __ AssertZeroExtended(reg);
+ }
+ if (instr->index()->IsConstantOperand()) {
+ int32_t constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->hydrogen()->length()->representation().IsSmi()) {
+ __ Cmp(reg, Smi::FromInt(constant_index));
+ } else {
+ __ cmpq(reg, Immediate(constant_index));
+ }
+ } else {
+ Register reg2 = ToRegister(instr->index());
+ if (!instr->hydrogen()->index()->representation().IsSmi()) {
+ __ AssertZeroExtended(reg2);
+ }
+ __ cmpq(reg, reg2);
+ }
+ } else {
+ Operand length = ToOperand(instr->length());
+ if (instr->index()->IsConstantOperand()) {
+ int32_t constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->hydrogen()->length()->representation().IsSmi()) {
+ __ Cmp(length, Smi::FromInt(constant_index));
+ } else {
+ __ cmpq(length, Immediate(constant_index));
+ }
+ } else {
+ __ cmpq(length, ToRegister(instr->index()));
+ }
+ }
+ Condition condition =
+ instr->hydrogen()->allow_equality() ? below : below_equal;
+ ApplyCheckIf(condition, instr);
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
if (!key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
+ // Even though the HLoad/StoreKeyedFastElement instructions force
+ // the input representation for the key to be an integer, the input
+ // gets replaced during bound check elimination with the index
+ // argument to the bounds check, which can be tagged, so that case
+ // must be handled here, too.
+ if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
}
}
Operand operand(BuildFastArrayOperand(
- instr->external_pointer(),
+ instr->elements(),
key,
elements_kind,
0,
@@ -3729,79 +4108,58 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
}
-void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand) {
- if (value->representation().IsTagged() && !value->type().IsSmi()) {
- Condition cc;
- if (operand->IsRegister()) {
- cc = masm()->CheckSmi(ToRegister(operand));
- } else {
- cc = masm()->CheckSmi(ToOperand(operand));
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ XMMRegister value = ToDoubleRegister(instr->value());
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force
+ // the input representation for the key to be an integer, the
+ // input gets replaced during bound check elimination with the index
+ // argument to the bounds check, which can be tagged, so that case
+ // must be handled here, too.
+ if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
}
- DeoptimizeIf(NegateCondition(cc), environment);
}
-}
+ if (instr->NeedsCanonicalization()) {
+ Label have_value;
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->length(),
- instr->length());
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->index(),
- instr->index());
- if (instr->length()->IsRegister()) {
- Register reg = ToRegister(instr->length());
- if (!instr->hydrogen()->length()->representation().IsTagged()) {
- __ AssertZeroExtended(reg);
- }
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
- __ Cmp(reg, Smi::FromInt(constant_index));
- } else {
- __ cmpq(reg, Immediate(constant_index));
- }
- } else {
- Register reg2 = ToRegister(instr->index());
- if (!instr->hydrogen()->index()->representation().IsTagged()) {
- __ AssertZeroExtended(reg2);
- }
- __ cmpq(reg, reg2);
- }
- } else {
- Operand length = ToOperand(instr->length());
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
- __ Cmp(length, Smi::FromInt(constant_index));
- } else {
- __ cmpq(length, Immediate(constant_index));
- }
- } else {
- __ cmpq(length, ToRegister(instr->index()));
- }
+ __ ucomisd(value, value);
+ __ j(parity_odd, &have_value); // NaN.
+
+ __ Set(kScratchRegister, BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ __ movq(value, kScratchRegister);
+
+ __ bind(&have_value);
}
- DeoptimizeIf(below_equal, instr->environment());
+
+ Operand double_store_operand = BuildFastArrayOperand(
+ instr->elements(),
+ key,
+ FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index());
+
+ __ movsd(double_store_operand, value);
}
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->object());
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
LOperand* key = instr->key();
if (!key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
+ // Even though the HLoad/StoreKeyedFastElement instructions force
+ // the input representation for the key to be an integer, the
+ // input gets replaced during bound check elimination with the index
+ // argument to the bounds check, which can be tagged, so that case
+ // must be handled here, too.
+ if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
@@ -3809,75 +4167,55 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
}
Operand operand =
- BuildFastArrayOperand(instr->object(),
+ BuildFastArrayOperand(instr->elements(),
key,
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
+ if (instr->value()->IsRegister()) {
+ __ movq(operand, ToRegister(instr->value()));
+ } else {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (IsInteger32Constant(operand_value)) {
+ Smi* smi_value = Smi::FromInt(ToInteger32(operand_value));
+ __ Move(operand, smi_value);
+ } else {
+ Handle<Object> handle_value = ToHandle(operand_value);
+ __ Move(operand, handle_value);
+ }
+ }
if (instr->hydrogen()->NeedsWriteBarrier()) {
+ ASSERT(instr->value()->IsRegister());
+ Register value = ToRegister(instr->value());
ASSERT(!instr->key()->IsConstantOperand());
- HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
Register key_reg(ToRegister(key));
__ lea(key_reg, operand);
- __ movq(Operand(key_reg, 0), value);
__ RecordWrite(elements,
key_reg,
value,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
- } else {
- __ movq(operand, value);
}
}
-void LCodeGen::DoStoreKeyedFastDoubleElement(
- LStoreKeyedFastDoubleElement* instr) {
- XMMRegister value = ToDoubleRegister(instr->value());
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
- if (instr->NeedsCanonicalization()) {
- Label have_value;
-
- __ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
-
- __ Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
- __ movq(value, kScratchRegister);
-
- __ bind(&have_value);
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ if (instr->is_external()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
}
-
- Operand double_store_operand = BuildFastArrayOperand(
- instr->elements(),
- key,
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
-
- __ movsd(double_store_operand, value);
}
+
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rcx));
@@ -3892,61 +4230,65 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_temp());
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = from_map->elements_kind();
- ElementsKind to_kind = to_map->elements_kind();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
Label not_applicable;
__ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
__ j(not_equal, &not_applicable);
- __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
__ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier.
ASSERT_NE(instr->temp(), NULL);
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
ToRegister(instr->temp()), kDontSaveFPRegs);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(rdx));
- ASSERT(new_map_reg.is(rbx));
- __ movq(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
- RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(rdx));
- ASSERT(new_map_reg.is(rbx));
- __ movq(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
- RelocInfo::CODE_TARGET, instr);
} else {
- UNREACHABLE();
+ PushSafepointRegistersScope scope(this);
+ if (!object_reg.is(rax)) {
+ __ movq(rax, object_reg);
+ }
+ __ Move(rbx, to_map);
+ TransitionElementsKindStub stub(from_kind, to_kind);
+ __ CallStub(&stub);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
}
__ bind(&not_applicable);
}
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+ DeoptimizeIf(equal, instr->environment());
+ __ bind(&no_memento_found);
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
EmitPushTaggedOperand(instr->left());
EmitPushTaggedOperand(instr->right());
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ StringAddStub stub(instr->hydrogen()->flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -3978,7 +4320,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
// DoStringCharCodeAt above.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
__ Push(Smi::FromInt(const_index));
} else {
Register index = ToRegister(instr->index());
@@ -3993,12 +4335,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4011,8 +4355,9 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
Register result = ToRegister(instr->result());
ASSERT(!char_code.is(result));
- __ cmpl(char_code, Immediate(String::kMaxAsciiCharCode));
+ __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
__ j(above, deferred->entry());
+ __ movsxlq(char_code, char_code);
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
__ movq(result, FieldOperand(result,
char_code, times_pointer_size,
@@ -4040,22 +4385,27 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
}
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(string, String::kLengthOffset));
-}
-
-
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
if (input->IsRegister()) {
- __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
+ __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
} else {
- __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
+ __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
+ }
+}
+
+
+void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ LOperand* output = instr->result();
+ __ Integer32ToSmi(ToRegister(output), ToRegister(input));
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(overflow, instr->environment());
}
}
@@ -4071,6 +4421,22 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ LOperand* output = instr->result();
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange() ||
+ instr->hydrogen()->value()->range()->upper() == kMaxInt) {
+ // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
+ // interval, so we treat kMaxInt as a sentinel for this entire interval.
+ __ testl(ToRegister(input), Immediate(0x80000000));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ Integer32ToSmi(ToRegister(output), ToRegister(input));
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
@@ -4081,14 +4447,14 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagU(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4109,15 +4475,17 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
Label slow;
Register reg = ToRegister(instr->value());
Register tmp = reg.is(rax) ? rcx : rax;
+ XMMRegister temp_xmm = ToDoubleRegister(instr->temp());
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
Label done;
- // Load value into xmm1 which will be preserved across potential call to
+ // Load value into temp_xmm which will be preserved across potential call to
// runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
// XMM registers on x64).
- __ LoadUint32(xmm1, reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ LoadUint32(temp_xmm, reg, xmm_scratch);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, &slow);
@@ -4135,21 +4503,23 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
if (!reg.is(rax)) __ movq(reg, rax);
- // Done. Put the value in xmm1 into the value of the allocated heap
+ // Done. Put the value in temp_xmm into the value of the allocated heap
// number.
__ bind(&done);
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm1);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
__ StoreToSafepointRegisterSlot(reg, reg);
}
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4209,108 +4579,119 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
- bool deoptimize_on_undefined,
+ bool can_convert_undefined_to_nan,
bool deoptimize_on_minus_zero,
- LEnvironment* env) {
- Label load_smi, done;
+ LEnvironment* env,
+ NumberUntagDMode mode) {
+ Label convert, load_smi, done;
- // Smi check.
- __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
- // Heap number map check.
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- if (deoptimize_on_undefined) {
- DeoptimizeIf(not_equal, env);
- } else {
- Label heap_number;
- __ j(equal, &heap_number, Label::kNear);
+ // Heap number map check.
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, env);
+ // On x64 it is safe to load at heap number offset before evaluating the map
+ // check, since all heap objects are at least two words long.
+ __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+
+ if (can_convert_undefined_to_nan) {
+ __ j(not_equal, &convert);
+ } else {
+ DeoptimizeIf(not_equal, env);
+ }
- // Convert undefined to NaN. Compute NaN as 0/0.
- __ xorps(result_reg, result_reg);
- __ divsd(result_reg, result_reg);
+ if (deoptimize_on_minus_zero) {
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, result_reg);
+ __ j(not_equal, &done, Label::kNear);
+ __ movmskpd(kScratchRegister, result_reg);
+ __ testq(kScratchRegister, Immediate(1));
+ DeoptimizeIf(not_zero, env);
+ }
__ jmp(&done, Label::kNear);
- __ bind(&heap_number);
- }
- // Heap number to XMM conversion.
- __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, result_reg);
- __ j(not_equal, &done, Label::kNear);
- __ movmskpd(kScratchRegister, result_reg);
- __ testq(kScratchRegister, Immediate(1));
- DeoptimizeIf(not_zero, env);
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+
+ // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(not_equal, env);
+
+ __ xorps(result_reg, result_reg);
+ __ divsd(result_reg, result_reg);
+ __ jmp(&done, Label::kNear);
+ }
+ } else {
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
- __ jmp(&done, Label::kNear);
// Smi to XMM conversion
__ bind(&load_smi);
__ SmiToInteger32(kScratchRegister, input_reg);
- __ cvtlsi2sd(result_reg, kScratchRegister);
+ __ Cvtlsi2sd(result_reg, kScratchRegister);
__ bind(&done);
}
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Label done, heap_number;
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
Register input_reg = ToRegister(instr->value());
- // Heap number map check.
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
-
if (instr->truncating()) {
- __ j(equal, &heap_number, Label::kNear);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
+ Label no_heap_number, check_bools, check_false;
+
+ // Heap number map check.
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &no_heap_number, Label::kNear);
+ __ TruncateHeapNumberToI(input_reg, input_reg);
+ __ jmp(done);
+
+ __ bind(&no_heap_number);
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
+ __ j(not_equal, &check_bools, Label::kNear);
__ Set(input_reg, 0);
- __ jmp(&done, Label::kNear);
+ __ jmp(done);
- __ bind(&heap_number);
+ __ bind(&check_bools);
+ __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &check_false, Label::kNear);
+ __ Set(input_reg, 1);
+ __ jmp(done);
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2siq(input_reg, xmm0);
- __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpq(input_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
- } else {
- // Deoptimize if we don't have a heap number.
+ __ bind(&check_false);
+ __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
+ __ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
-
+ __ Set(input_reg, 0);
+ __ jmp(done);
+ } else {
+ Label bailout;
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, xmm0);
- __ cvtlsi2sd(xmm_temp, input_reg);
- __ ucomisd(xmm0, xmm_temp);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(input_reg, input_reg);
- __ j(not_zero, &done);
- __ movmskpd(input_reg, xmm0);
- __ andl(input_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- }
+ __ TaggedToI(input_reg, input_reg, xmm_temp,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+
+ __ jmp(done);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
}
- __ bind(&done);
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_, done());
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -4318,12 +4699,16 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
-
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiToInteger32(input_reg, input_reg);
- __ bind(deferred->exit());
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiToInteger32(input_reg, input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiToInteger32(input_reg, input_reg);
+ __ bind(deferred->exit());
+ }
}
@@ -4336,10 +4721,15 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
XMMRegister result_reg = ToDoubleRegister(result);
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
+ instr->hydrogen()->can_convert_undefined_to_nan(),
instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment());
+ instr->environment(),
+ mode);
}
@@ -4353,36 +4743,45 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(result);
if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- __ cvttsd2siq(result_reg, input_reg);
- __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
- __ cmpq(result_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
+ __ TruncateDoubleToI(result_reg, input_reg);
} else {
- __ cvttsd2si(result_reg, input_reg);
- __ cvtlsi2sd(xmm0, result_reg);
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ testl(result_reg, result_reg);
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ andl(result_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ bind(&done);
- }
+ Label bailout, done;
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
}
}
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+
+ XMMRegister input_reg = ToDoubleRegister(input);
+ Register result_reg = ToRegister(result);
+
+ Label bailout, done;
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
+
+ __ Integer32ToSmi(result_reg, result_reg);
+ DeoptimizeIf(overflow, instr->environment());
+}
+
+
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
@@ -4391,9 +4790,11 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(cc, instr->environment());
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ LOperand* input = instr->value();
+ Condition cc = masm()->CheckSmi(ToRegister(input));
+ DeoptimizeIf(cc, instr->environment());
+ }
}
@@ -4443,54 +4844,79 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (isolate()->heap()->InNewSpace(*target)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
- __ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- __ cmpq(reg, Operand(kScratchRegister, 0));
- } else {
- __ Cmp(reg, target);
- }
+ __ Cmp(reg, instr->hydrogen()->object().handle());
DeoptimizeIf(not_equal, instr->environment());
}
-void LCodeGen::DoCheckMapCommon(Register reg,
- Handle<Map> map,
- CompareMapMode mode,
- LEnvironment* env) {
- Label success;
- __ CompareMap(reg, map, &success, mode);
- DeoptimizeIf(not_equal, env);
- __ bind(&success);
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ {
+ PushSafepointRegistersScope scope(this);
+ __ push(object);
+ CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
+ __ testq(rax, Immediate(kSmiTagMask));
+ }
+ DeoptimizeIf(zero, instr->environment());
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->CanOmitMapChecks()) return;
+
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->has_migration_target()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+ __ bind(deferred->check_maps());
+ }
+
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- SmallMapList* map_set = instr->hydrogen()->map_set();
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
+ __ CompareMap(reg, map, &success);
__ j(equal, &success);
}
- Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
+
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ __ CompareMap(reg, map, &success);
+ if (instr->hydrogen()->has_migration_target()) {
+ __ j(not_equal, deferred->entry());
+ } else {
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+
__ bind(&success);
}
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+ __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
}
@@ -4505,6 +4931,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
+ XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
@@ -4523,8 +4950,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg);
+ __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
// smi
@@ -4536,344 +4963,98 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register reg = ToRegister(instr->temp());
-
- Handle<JSObject> holder = instr->holder();
- Handle<JSObject> current_prototype = instr->prototype();
-
- // Load prototype object.
- __ LoadHeapObject(reg, current_prototype);
-
- // Check prototype maps up to the holder.
- while (!current_prototype.is_identical_to(holder)) {
- DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
- current_prototype =
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
- // Load next prototype object.
- __ LoadHeapObject(reg, current_prototype);
- }
-
- // Check the holder map.
- DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
-}
-
-
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
- LAllocateObject* instr_;
+ LAllocate* instr_;
};
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr);
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- // Allocate memory for the object. The initial map might change when
- // the constructor's prototype changes, but instance size and property
- // counts remain unchanged (if slack tracking finished).
- ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- no_reg,
- scratch,
- deferred->entry(),
- TAG_OBJECT);
+ Register temp = ToRegister(instr->temp());
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
}
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(scratch, constructor);
- __ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ }
- if (FLAG_debug_code) {
- __ AssertNotSmi(map);
- __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
- Immediate(instance_size >> kPointerSizeLog2));
- __ Assert(equal, "Unexpected instance size");
- __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
- Immediate(initial_map->pre_allocated_property_fields()));
- __ Assert(equal, "Unexpected pre-allocated property fields count");
- __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
- Immediate(initial_map->unused_property_fields()));
- __ Assert(equal, "Unexpected unused property fields count");
- __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
- Immediate(initial_map->inobject_properties()));
- __ Assert(equal, "Unexpected in-object property fields count");
- }
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ movq(FieldOperand(result, JSObject::kMapOffset), map);
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(result, JSObject::kElementsOffset), scratch);
- __ movq(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ movq(FieldOperand(result, property_offset), scratch);
+ __ bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ movl(temp, Immediate((size / kPointerSize) - 1));
+ } else {
+ temp = ToRegister(instr->size());
+ __ sar(temp, Immediate(kPointerSizeLog2));
+ __ decl(temp);
}
+ Label loop;
+ __ bind(&loop);
+ __ Move(FieldOperand(result, temp, times_pointer_size, 0),
+ isolate()->factory()->one_pointer_filler_map());
+ __ decl(temp);
+ __ j(not_zero, &loop);
}
}
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register result = ToRegister(instr->result());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Set(result, 0);
+ __ Move(result, Smi::FromInt(0));
PushSafepointRegistersScope scope(this);
- __ Push(Smi::FromInt(instance_size));
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(result, rax);
-}
-
-
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ movb(rbx, FieldOperand(rbx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(rbx, Immediate(Map::kElementsKindMask));
- __ cmpb(rbx, Immediate(boilerplate_elements_kind <<
- Map::kElementsKindShift));
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Set up the parameters to the stub/runtime call.
- __ PushHeapObject(literals);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ Push(isolate()->factory()->empty_fixed_array());
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ Integer32ToSmi(size, size);
+ __ push(size);
} else {
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset) {
- ASSERT(!source.is(rcx));
- ASSERT(!result.is(rcx));
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_offset = *offset + object_size;
- int elements_size = has_elements ? elements->Size() : 0;
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ lea(rcx, Operand(result, elements_offset));
- } else {
- __ movq(rcx, FieldOperand(source, i));
- }
- __ movq(FieldOperand(result, object_offset + i), rcx);
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(rcx, Operand(result, *offset));
- __ movq(FieldOperand(result, total_offset), rcx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
- __ movq(FieldOperand(result, total_offset), rcx);
- } else {
- __ movq(rcx, value, RelocInfo::NONE);
- __ movq(FieldOperand(result, total_offset), rcx);
- }
- }
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ movq(rcx, FieldOperand(source, i));
- __ movq(FieldOperand(result, elements_offset + i), rcx);
- }
-
- // Copy elements backing store content.
- int elements_length = elements->length();
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ movq(rcx, value, RelocInfo::NONE);
- __ movq(FieldOperand(result, total_offset), rcx);
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i));
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(rcx, Operand(result, *offset));
- __ movq(FieldOperand(result, total_offset), rcx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
- __ movq(FieldOperand(result, total_offset), rcx);
- } else {
- __ movq(rcx, value, RelocInfo::NONE);
- __ movq(FieldOperand(result, total_offset), rcx);
- }
- }
- } else {
- UNREACHABLE();
- }
- }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
- __ movq(rcx, FieldOperand(rbx, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ movb(rcx, FieldOperand(rcx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(rcx, Immediate(Map::kElementsKindMask));
- __ cmpb(rcx, Immediate(boilerplate_elements_kind <<
- Map::kElementsKindShift));
- DeoptimizeIf(not_equal, instr->environment());
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Push(Smi::FromInt(size));
}
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ Push(Smi::FromInt(size));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset);
- ASSERT_EQ(size, offset);
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- // Set up the parameters to the stub/runtime call.
- __ PushHeapObject(literals);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(constant_properties);
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= instr->hydrogen()->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- __ Push(Smi::FromInt(flags));
-
- // Pick the right runtime function or stub to call.
- int properties_count = constant_properties->length() / 2;
- if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
} else {
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
}
+ __ StoreToSafepointRegisterSlot(result, rax);
}
@@ -4892,7 +5073,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// rax = regexp literal clone.
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(rcx, instr->hydrogen()->literals());
+ __ Move(rcx, instr->hydrogen()->literals());
__ movq(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
@@ -4909,7 +5090,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
@@ -4937,18 +5118,17 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
- __ Push(shared_info);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ if (!pretenure && instr->hydrogen()->has_no_literals()) {
+ FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ instr->hydrogen()->is_generator());
+ __ Move(rbx, instr->hydrogen()->shared_info());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ push(rsi);
- __ Push(shared_info);
- __ PushRoot(pretenure ?
- Heap::kTrueValueRootIndex :
- Heap::kFalseValueRootIndex);
+ __ Push(instr->hydrogen()->shared_info());
+ __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
+ Heap::kFalseValueRootIndex);
CallRuntime(Runtime::kNewClosure, 3, instr);
}
}
@@ -4964,12 +5144,7 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
- Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
- if (object->IsSmi()) {
- __ Push(Handle<Smi>::cast(object));
- } else {
- __ PushHeapObject(Handle<HeapObject>::cast(object));
- }
+ __ Push(ToHandle(LConstantOperand::cast(operand)));
} else if (operand->IsRegister()) {
__ push(ToRegister(operand));
} else {
@@ -4980,15 +5155,12 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition final_branch_condition =
- EmitTypeofIs(true_label, false_label, input, instr->type_literal());
+ EmitTypeofIs(instr->TrueLabel(chunk_),
+ instr->FalseLabel(chunk_), input, instr->type_literal());
if (final_branch_condition != no_condition) {
- EmitBranch(true_block, false_block, final_branch_condition);
+ EmitBranch(instr, final_branch_condition);
}
}
@@ -4998,14 +5170,14 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Register input,
Handle<String> type_name) {
Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_symbol())) {
+ if (type_name->Equals(heap()->number_string())) {
__ JumpIfSmi(input, true_label);
__ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->string_symbol())) {
+ } else if (type_name->Equals(heap()->string_string())) {
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
__ j(above_equal, false_label);
@@ -5013,17 +5185,22 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = zero;
- } else if (type_name->Equals(heap()->boolean_symbol())) {
+ } else if (type_name->Equals(heap()->symbol_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ CmpObjectType(input, SYMBOL_TYPE, input);
+ final_branch_condition = equal;
+
+ } else if (type_name->Equals(heap()->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ j(equal, true_label);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = equal;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->undefined_symbol())) {
+ } else if (type_name->Equals(heap()->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ j(equal, true_label);
__ JumpIfSmi(input, false_label);
@@ -5033,7 +5210,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
- } else if (type_name->Equals(heap()->function_symbol())) {
+ } else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
@@ -5041,7 +5218,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->object_symbol())) {
+ } else if (type_name->Equals(heap()->object_string())) {
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
@@ -5066,11 +5243,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
EmitIsConstructCall(temp);
- EmitBranch(true_block, false_block, equal);
+ EmitBranch(instr, equal);
}
@@ -5083,7 +5258,7 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
__ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker, Label::kNear);
- __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+ __ movq(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
@@ -5093,6 +5268,7 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
@@ -5114,39 +5290,27 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(no_condition, instr->environment());
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
+ DeoptimizeIf(no_condition, instr->environment(), type);
}
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- EmitPushTaggedOperand(obj);
- EmitPushTaggedOperand(key);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- // Create safepoint generator that will also ensure enough space in the
- // reloc info for patching in deoptimization (since this is invoking a
- // builtin)
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ Push(Smi::FromInt(strict_mode_flag()));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
}
-void LCodeGen::DoIn(LIn* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- EmitPushTaggedOperand(key);
- EmitPushTaggedOperand(obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
}
@@ -5162,12 +5326,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5181,8 +5347,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
Label done;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &done, Label::kNear);
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
@@ -5212,15 +5379,13 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// properly registered for deoptimization and records the assembler's PC
// offset.
LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
+
+ GenerateOsrPrologue();
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 65b398016..f3f202a27 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -32,8 +32,10 @@
#include "checks.h"
#include "deoptimizer.h"
+#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
+#include "v8utils.h"
#include "x64/lithium-gap-resolver-x64.h"
namespace v8 {
@@ -43,46 +45,52 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
+ : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub() ||
+ info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
- int ToInteger32(LConstantOperand* op) const;
+ bool IsSmiConstant(LConstantOperand* op) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
+ ExternalReference ToExternalReference(LConstantOperand* op) const;
bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
@@ -99,28 +107,22 @@ class LCodeGen BASE_EMBEDDED {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagU(LNumberTagU* instr);
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+ void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
+ void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
-
- void DoCheckMapCommon(Register reg, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
// Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
@@ -128,27 +130,15 @@ class LCodeGen BASE_EMBEDDED {
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
+ HGraph* graph() const { return chunk()->graph(); }
- int GetNextEmittedBlock(int block);
+ XMMRegister double_scratch0() const { return xmm0; }
void EmitClassOfTest(Label* if_true,
Label* if_false,
@@ -158,21 +148,21 @@ class LCodeGen BASE_EMBEDDED {
Register scratch);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
- void Abort(const char* reason);
- void Comment(const char* format, ...);
+ void Abort(BailoutReason reason);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateJumpTable();
bool GenerateSafepointTable();
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS
@@ -191,7 +181,8 @@ class LCodeGen BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
@@ -212,25 +203,31 @@ class LCodeGen BASE_EMBEDDED {
// Generate a direct call to a known function. Expects the function
// to be in rdi.
void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
int arity,
LInstruction* instr,
CallKind call_kind,
RDIState rdi_state);
-
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode,
int argc);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
+ void DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void ApplyCheckIf(Condition cc, LBoundsCheck* check);
- void AddToTranslation(Translation* translation,
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
- int arguments_index,
- int arguments_count);
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+ void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -245,17 +242,8 @@ class LCodeGen BASE_EMBEDDED {
uint32_t offset,
uint32_t additional_index = 0);
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
+ void EmitIntegerMathAbs(LMathAbs* instr);
+ void EmitSmiMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
@@ -267,21 +255,21 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordPosition(int position);
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
- void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitNumberUntagD(Register input,
- XMMRegister result,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env);
-
-
- void DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand);
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition cc);
+ template<class InstrType>
+ void EmitFalseBranch(InstrType instr, Condition cc);
+ void EmitNumberUntagD(
+ Register input,
+ XMMRegister result,
+ bool allow_undefined_as_nan,
+ bool deoptimize_on_minus_zero,
+ LEnvironment* env,
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -303,18 +291,13 @@ class LCodeGen BASE_EMBEDDED {
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input,
Register temp1,
- Label* is_not_string);
+ Label* is_not_string,
+ SmiCheck check_needed);
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
@@ -324,36 +307,33 @@ class LCodeGen BASE_EMBEDDED {
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset);
-
- struct JumpTableEntry {
- explicit inline JumpTableEntry(Address entry)
- : label(),
- address(entry) { }
- Label label;
- Address address;
- };
-
- void EnsureSpaceForLazyDeopt(int space_needed);
-
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
+ int* offset,
+ AllocationSiteMode mode);
+
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+#ifdef _MSC_VER
+ // On windows, you may not access the stack more than one page below
+ // the most recently mapped page. To make the allocated area randomly
+ // accessible, we write an arbitrary value to each page in range
+ // rsp + offset - page_size .. rsp in turn.
+ void MakeSureStackPagesMapped(int offset);
+#endif
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> jump_table_;
+ ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -364,10 +344,11 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope BASE_EMBEDDED {
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->masm_->PushSafepointRegisters();
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
@@ -399,13 +380,14 @@ class LDeferredCode: public ZoneObject {
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() { }
+ virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
int instruction_index() const { return instruction_index_; }
protected:
@@ -416,6 +398,7 @@ class LDeferredCode: public ZoneObject {
LCodeGen* codegen_;
Label entry_;
Label exit_;
+ Label done_;
Label* external_exit_;
int instruction_index_;
};
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index 22183a2f8..8d1c2a283 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -27,7 +27,7 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "x64/lithium-gap-resolver-x64.h"
#include "x64/lithium-codegen-x64.h"
@@ -195,20 +195,34 @@ void LGapResolver::EmitMove(int index) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32Constant(constant_source)) {
+ if (cgen_->IsSmiConstant(constant_source)) {
+ __ Move(dst, cgen_->ToSmi(constant_source));
+ } else if (cgen_->IsInteger32Constant(constant_source)) {
__ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ __ Move(dst, cgen_->ToHandle(constant_source));
+ }
+ } else if (destination->IsDoubleRegister()) {
+ double v = cgen_->ToDouble(constant_source);
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ if (int_val == 0) {
+ __ xorps(dst, dst);
+ } else {
+ __ movq(kScratchRegister, int_val, RelocInfo::NONE64);
+ __ movq(dst, kScratchRegister);
}
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- if (cgen_->IsInteger32Constant(constant_source)) {
+ if (cgen_->IsSmiConstant(constant_source)) {
+ __ Move(dst, cgen_->ToSmi(constant_source));
+ } else if (cgen_->IsInteger32Constant(constant_source)) {
// Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
// value.
__ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
- __ LoadObject(kScratchRegister, cgen_->ToHandle(constant_source));
+ __ Move(kScratchRegister, cgen_->ToHandle(constant_source));
__ movq(dst, kScratchRegister);
}
}
@@ -248,7 +262,7 @@ void LGapResolver::EmitSwap(int index) {
// Swap two general-purpose registers.
Register src = cgen_->ToRegister(source);
Register dst = cgen_->ToRegister(destination);
- __ xchg(dst, src);
+ __ xchgq(dst, src);
} else if ((source->IsRegister() && destination->IsStackSlot()) ||
(source->IsStackSlot() && destination->IsRegister())) {
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.h b/deps/v8/src/x64/lithium-gap-resolver-x64.h
index d82845592..f218455b6 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.h
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.h
@@ -38,7 +38,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver BASE_EMBEDDED {
+class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 43fb8b9ba..8f26a09e9 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -27,11 +27,12 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "lithium-allocator-inl.h"
#include "x64/lithium-x64.h"
#include "x64/lithium-codegen-x64.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -43,31 +44,6 @@ namespace internal {
LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
#ifdef DEBUG
void LInstruction::VerifyCall() {
@@ -114,7 +90,11 @@ void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
- InputAt(i)->PrintTo(stream);
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
}
}
@@ -179,6 +159,7 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
case Token::SHL: return "sal-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
@@ -189,6 +170,11 @@ const char* LArithmeticT::Mnemonic() const {
}
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+
void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
@@ -200,7 +186,7 @@ void LBranch::PrintDataTo(StringStream* stream) {
}
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
@@ -209,15 +195,6 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
value()->PrintTo(stream);
@@ -287,14 +264,23 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
}
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + %d", offset());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("#%d / ", arity());
}
@@ -347,6 +333,15 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@@ -358,19 +353,20 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
return spill_slot_count_++;
}
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
// All stack slots are Double stack slots on x64.
// Alternatively, at some point, start using half-size
// stack slots for int32 values.
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
+ ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@@ -378,8 +374,7 @@ LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ hydrogen()->access().PrintTo(stream);
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -394,21 +389,35 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", additional_index());
+ } else {
+ stream->Add("]");
+ }
}
-void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", additional_index());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
}
@@ -430,8 +439,17 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
+ LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+ }
+ }
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -444,7 +462,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LCodeGen::Abort(const char* reason) {
+void LCodeGen::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -528,6 +546,11 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@@ -597,8 +620,10 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
+ &argument_index_accumulator,
+ &objects_to_materialize));
return instr;
}
@@ -606,6 +631,8 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+
#ifdef DEBUG
instr->VerifyCall();
#endif
@@ -638,7 +665,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -646,8 +673,12 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand =
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- operand->set_virtual_register(allocator_->GetVirtualRegister());
- if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
return operand;
}
@@ -671,8 +702,14 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
}
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
}
@@ -683,51 +720,39 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseFixed(right_value, rcx);
- }
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
} else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
+ right = UseFixed(right_value, rcx);
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ bool does_deopt = false;
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
- }
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -736,21 +761,22 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseFixedDouble(instr->BetterRightOperand(), xmm1);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
@@ -796,11 +822,15 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
HEnvironment* last_environment = pred->last_environment();
for (int i = 0; i < block->phis()->length(); ++i) {
HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
}
for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
}
block->UpdateEnvironment(last_environment);
// Pick up the outgoing argument count of one of the predecessors.
@@ -829,17 +859,68 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ ASSERT(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -848,15 +929,17 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+ LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
+ argument_index_accumulator,
+ objects_to_materialize);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
+ int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
LEnvironment* result = new(zone()) LEnvironment(
hydrogen_env->closure(),
hydrogen_env->frame_type(),
@@ -868,13 +951,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
hydrogen_env->entry(),
zone());
int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
+ int object_index = objects_to_materialize->length();
+ for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
+ LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
@@ -885,6 +970,39 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
+ for (int i = object_index; i < objects_to_materialize->length(); ++i) {
+ HValue* object_to_materialize = objects_to_materialize->at(i);
+ int previously_materialized_object = -1;
+ for (int prev = 0; prev < i; ++prev) {
+ if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
+ previously_materialized_object = prev;
+ break;
+ }
+ }
+ int length = object_to_materialize->OperandCount();
+ bool is_arguments = object_to_materialize->IsArgumentsObject();
+ if (previously_materialized_object >= 0) {
+ result->AddDuplicateObject(previously_materialized_object);
+ continue;
+ } else {
+ result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+ }
+ for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+ LOperand* op;
+ HValue* value = object_to_materialize->OperandAt(i);
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
+ } else {
+ ASSERT(!value->IsPushArgument());
+ op = UseAny(value);
+ }
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
+ }
+ }
+
if (hydrogen_env->frame_type() == JS_FUNCTION) {
*argument_index_accumulator = argument_index;
}
@@ -894,27 +1012,29 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
}
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- ASSERT(value->IsConstant());
- ASSERT(!value->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ HValue* value = instr->value();
LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
- // deoptimization environment.
+ // deoptimization environment. If the instruction is generic no
+ // environment is needed since all cases are handled.
+ ToBooleanStub::Types expected = instr->expected_input_types();
Representation rep = value->representation();
HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+ if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
+ !expected.IsGeneric()) {
return AssignEnvironment(result);
}
return result;
@@ -929,11 +1049,13 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+ info()->MarkAsRequiresFrame();
return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
}
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ info()->MarkAsRequiresFrame();
return DefineAsRegister(new(zone()) LArgumentsElements);
}
@@ -977,12 +1099,28 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
LOperand* argument = UseOrConstant(instr->argument());
return new(zone()) LPushArgument(argument);
}
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* inner_object) {
+ LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
+ LInnerAllocatedObject* result =
+ new(zone()) LInnerAllocatedObject(base_object);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
return instr->HasNoUses()
? NULL
@@ -991,7 +1129,14 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+ // If there is a non-return use, the context must be allocated in a register.
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->IsReturn()) {
+ return DefineAsRegister(new(zone()) LContext);
+ }
+ }
+
+ return NULL;
}
@@ -1019,96 +1164,166 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, rax), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* function = UseFixed(instr->function(), rdi);
- argument_count_ -= instr->argument_count();
LInvokeFunction* result = new(zone()) LInvokeFunction(function);
return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- case kMathFloor:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathSqrt:
- return DefineSameAsFirst(result);
- case kMathPowHalf:
- return DefineSameAsFirst(result);
- default:
- UNREACHABLE();
- return NULL;
- }
+ switch (instr->op()) {
+ case kMathFloor: return DoMathFloor(instr);
+ case kMathRound: return DoMathRound(instr);
+ case kMathAbs: return DoMathAbs(instr);
+ case kMathLog: return DoMathLog(instr);
+ case kMathSin: return DoMathSin(instr);
+ case kMathCos: return DoMathCos(instr);
+ case kMathTan: return DoMathTan(instr);
+ case kMathExp: return DoMathExp(instr);
+ case kMathSqrt: return DoMathSqrt(instr);
+ case kMathPowHalf: return DoMathPowHalf(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
}
}
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathFloor* result = new(zone()) LMathFloor(input);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathRound* result = new(zone()) LMathRound(input);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathAbs* result = new(zone()) LMathAbs(input);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathLog* result = new(zone()) LMathLog(input);
+ return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LMathSin* result = new(zone()) LMathSin(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LMathCos* result = new(zone()) LMathCos(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LMathTan* result = new(zone()) LMathTan(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* value = UseTempRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathSqrt* result = new(zone()) LMathSqrt(input);
+ return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input);
+ return DefineSameAsFirst(result);
+}
+
+
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
LOperand* key = UseFixed(instr->key(), rcx);
- argument_count_ -= instr->argument_count();
LCallKeyed* result = new(zone()) LCallKeyed(key);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallNamed, rax), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, rax), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, rax), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* constructor = UseFixed(instr->constructor(), rdi);
- argument_count_ -= instr->argument_count();
LCallNew* result = new(zone()) LCallNew(constructor);
return MarkAsCall(DefineFixed(result, rax), instr);
}
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* constructor = UseFixed(instr->constructor(), rdi);
+ LCallNewArray* result = new(zone()) LCallNewArray(constructor);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* function = UseFixed(instr->function(), rdi);
- argument_count_ -= instr->argument_count();
LCallFunction* result = new(zone()) LCallFunction(function);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, rax), instr);
}
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@@ -1125,40 +1340,31 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* input = UseRegisterAtStart(instr->value());
- LBitNotI* result = new(zone()) LBitNotI(input);
- return DefineSameAsFirst(result);
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ if (instr->HasPowerOf2Divisor()) {
+ ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+ LOperand* value = UseRegisterAtStart(instr->left());
+ LDivI* div =
+ new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
+ return AssignEnvironment(DefineSameAsFirst(div));
+ }
// The temporary operand is necessary to ensure that right is not allocated
// into rdx.
LOperand* temp = FixedTemp(rdx);
@@ -1166,26 +1372,14 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LOperand* divisor = UseRegister(instr->right());
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, rax));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
-HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
- // A value with an integer representation does not need to be transformed.
- if (dividend->representation().IsInteger32()) {
- return dividend;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (dividend->IsChange() &&
- HChange::cast(dividend)->from().IsInteger32()) {
- return HChange::cast(dividend)->value();
- }
- return NULL;
-}
-
-
HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
if (divisor->IsConstant() &&
HConstant::cast(divisor)->HasInteger32Value()) {
@@ -1193,12 +1387,31 @@ HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
return constant_val->CopyToRepresentation(Representation::Integer32(),
divisor->block()->zone());
}
+ // A value with an integer representation does not need to be transformed.
+ if (divisor->representation().IsInteger32()) {
+ return divisor;
+ // A change from an integer32 can be replaced by the integer32 value.
+ } else if (divisor->IsChange() &&
+ HChange::cast(divisor)->from().IsInteger32()) {
+ return HChange::cast(divisor)->value();
+ }
return NULL;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
HValue* right = instr->right();
+ if (!right->IsConstant()) {
+ ASSERT(right->representation().IsInteger32());
+ // The temporary operand is necessary to ensure that right is not allocated
+ // into rdx.
+ LOperand* temp = FixedTemp(rdx);
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineFixed(flooring_div, rax));
+ }
+
ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
int32_t divisor_si = HConstant::cast(right)->Integer32Value();
@@ -1223,52 +1436,57 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LInstruction* result;
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(left->representation().Equals(instr->representation()));
+ ASSERT(right->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LModI* mod =
- new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
- result = DefineSameAsFirst(mod);
+ ASSERT(!right->CanBeZero());
+ LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
+ UseOrConstant(right),
+ NULL);
+ LInstruction* result = DefineSameAsFirst(mod);
+ return (left->CanBeNegative() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero))
+ ? AssignEnvironment(result)
+ : result;
+ } else if (instr->fixed_right_arg().has_value) {
+ LModI* mod = new(zone()) LModI(UseRegister(left),
+ UseRegisterAtStart(right),
+ NULL);
+ return AssignEnvironment(DefineSameAsFirst(mod));
} else {
// The temporary operand is necessary to ensure that right is not
// allocated into edx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* value = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new(zone()) LModI(value, divisor, temp);
- result = DefineFixed(mod, rdx);
+ LModI* mod = new(zone()) LModI(UseFixed(left, rax),
+ UseRegister(right),
+ FixedTemp(rdx));
+ LInstruction* result = DefineFixed(mod, rdx);
+ return (right->CanBeZero() ||
+ (left->RangeCanInclude(kMinInt) &&
+ right->RangeCanInclude(-1) &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
+ (left->CanBeNegative() &&
+ instr->CanBeZero() &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)))
+ ? AssignEnvironment(result)
+ : result;
}
-
- return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero))
- ? AssignEnvironment(result)
- : result;
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = UseFixedDouble(instr->right(), xmm1);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstant(instr->BetterRightOperand());
LMulI* mul = new(zone()) LMulI(left, right);
if (instr->CheckFlag(HValue::kCanOverflow) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1278,16 +1496,15 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@@ -1299,28 +1516,37 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::SUB, instr);
}
}
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ if (instr->representation().IsSmiOrInteger32()) {
+ // Check to see if it would be advantageous to use an lea instruction rather
+ // than an add. This is the case when no overflow check is needed and there
+ // are multiple uses of the add's inputs, so using a 3-register add will
+ // preserve all input values for later uses.
+ bool use_lea = LAddI::UseLea(instr);
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ HValue* right_candidate = instr->BetterRightOperand();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ if (can_overflow) {
result = AssignEnvironment(result);
}
return result;
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
return NULL;
@@ -1330,11 +1556,11 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -1355,12 +1581,7 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
ASSERT(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), xmm2);
LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), xmm1) :
-#ifdef _WIN64
- UseFixed(instr->right(), rdx);
-#else
- UseFixed(instr->right(), rdi);
-#endif
+ UseFixedDouble(instr->right(), xmm1) : UseFixed(instr->right(), rdx);
LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
CAN_DEOPTIMIZE_EAGERLY);
@@ -1370,13 +1591,13 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
-#ifdef _WIN64
- LOperand* global_object = UseFixed(instr->global_object(), rcx);
-#else
- LOperand* global_object = UseFixed(instr->global_object(), rdi);
-#endif
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ LOperand* global_object = UseTempRegister(instr->global_object());
+ LOperand* scratch = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LOperand* scratch3 = TempRegister();
+ LRandom* result = new(zone()) LRandom(
+ global_object, scratch, scratch2, scratch3);
+ return DefineFixedDouble(result, xmm1);
}
@@ -1390,15 +1611,15 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -1412,30 +1633,25 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
}
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
}
}
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
}
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpConstantEqAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
+ return new(zone()) LCmpHoleAndBranch(value);
}
@@ -1516,19 +1732,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
LOperand* map = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LMapEnumLength(map));
@@ -1555,6 +1758,17 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegister(instr->index());
+ ASSERT(rcx.is_byte_register());
+ LOperand* value = UseFixed(instr->value(), rcx);
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+ return DefineSameAsFirst(result);
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = Use(instr->length());
@@ -1562,6 +1776,13 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -1591,15 +1812,33 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
+ // Only mark conversions that might need to allocate as calling rather than
+ // all changes. This makes simple, non-allocating conversion not have to force
+ // building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- if (instr->value()->type().IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
} else {
bool truncating = instr->CanTruncateToInt32();
@@ -1610,6 +1849,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp = TempRegister();
@@ -1617,12 +1857,18 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(instr->value());
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value)));
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToI(value)));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
@@ -1636,6 +1882,23 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
+ } else if (to.IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ LInstruction* result = NULL;
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ result = DefineAsRegister(new(zone()) LUint32ToSmi(value));
+ if (val->HasRange() && val->range()->IsInSmiRange() &&
+ val->range()->upper() != kMaxInt) {
+ return result;
+ }
+ } else {
+ result = DefineAsRegister(new(zone()) LInteger32ToSmi(value));
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return result;
+ }
+ }
+ return AssignEnvironment(result);
} else {
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
LOperand* temp = FixedTemp(xmm1);
@@ -1653,42 +1916,43 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckNonSmi(value));
}
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- LCheckInstanceType* result = new(zone()) LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LOperand* temp = TempRegister();
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
- return AssignEnvironment(result);
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
}
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
+ LCheckInstanceType* result = new(zone()) LCheckInstanceType(value);
+ return AssignEnvironment(result);
}
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = NULL;
+ if (!instr->CanOmitMapChecks()) {
+ value = UseRegisterAtStart(instr->value());
+ if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
+ }
LCheckMaps* result = new(zone()) LCheckMaps(value);
- return AssignEnvironment(result);
+ if (!instr->CanOmitMapChecks()) {
+ AssignEnvironment(result);
+ if (instr->has_migration_target()) return AssignPointerMap(result);
+ }
+ return result;
}
@@ -1701,7 +1965,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
} else if (input_rep.IsInteger32()) {
return DefineSameAsFirst(new(zone()) LClampIToUint8(reg));
} else {
- ASSERT(input_rep.IsTagged());
+ ASSERT(input_rep.IsSmiOrTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve xmm1 explicitly.
LClampTToUint8* result = new(zone()) LClampTToUint8(reg,
@@ -1712,17 +1976,23 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new(zone()) LReturn(UseFixed(instr->value(), rax));
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new(zone()) LReturn(UseFixed(instr->value(), rax),
+ parameter_count);
}
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
- if (r.IsInteger32()) {
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
LOperand* temp = TempRegister();
return DefineAsRegister(new(zone()) LConstantD(temp));
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new(zone()) LConstantE);
} else if (r.IsTagged()) {
return DefineAsRegister(new(zone()) LConstantT);
} else {
@@ -1793,29 +2063,22 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- ASSERT(instr->representation().IsTagged());
+ // Use the special mov rax, moffs64 encoding for external
+ // memory accesses with 64-bit word-sized values.
+ if (instr->access().IsExternalMemory() &&
+ instr->access().offset() == 0 &&
+ (instr->access().representation().IsSmi() ||
+ instr->access().representation().IsTagged() ||
+ instr->access().representation().IsHeapObject() ||
+ instr->access().representation().IsExternal())) {
+ LOperand* obj = UseRegisterOrConstantAtStart(instr->object());
+ return DefineFixed(new(zone()) LLoadNamedField(obj), rax);
+ }
LOperand* obj = UseRegisterAtStart(instr->object());
return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), rax);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, rax), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), rax);
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(object);
@@ -1830,9 +2093,8 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
}
@@ -1843,63 +2105,33 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
- HLoadKeyedFastElement* instr) {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* obj = UseRegisterAtStart(instr->object());
- bool clobbers_key = instr->key()->representation().IsTagged();
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastElement* result =
- new(zone()) LLoadKeyedFastElement(obj, key);
- if (instr->RequiresHoleCheck()) AssignEnvironment(result);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
- HLoadKeyedFastDoubleElement* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* elements = UseRegisterAtStart(instr->elements());
- bool clobbers_key = instr->key()->representation().IsTagged();
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastDoubleElement* result =
- new(zone()) LLoadKeyedFastDoubleElement(elements, key);
- return AssignEnvironment(DefineAsRegister(result));
-}
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsInteger32());
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyed* result = NULL;
+ if (!instr->is_external()) {
+ LOperand* obj = UseRegisterAtStart(instr->elements());
+ result = new(zone()) LLoadKeyed(obj, key);
+ } else {
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ LOperand* external_pointer = UseRegister(instr->elements());
+ result = new(zone()) LLoadKeyed(external_pointer, key);
+ }
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
- HLoadKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- bool clobbers_key = instr->key()->representation().IsTagged();
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedSpecializedArrayElement* result =
- new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
- LInstruction* load_instr = DefineAsRegister(result);
+ DefineAsRegister(result);
+ bool can_deoptimize = instr->RequiresHoleCheck() ||
+ (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
- AssignEnvironment(load_instr) : load_instr;
+ return can_deoptimize ? AssignEnvironment(result) : result;
}
@@ -1912,71 +2144,51 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
- HStoreKeyedFastElement* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- ASSERT(instr->value()->representation().IsTagged());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* obj = UseTempRegister(instr->object());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- bool clobbers_key = needs_write_barrier ||
- instr->key()->representation().IsTagged();
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedFastElement(obj, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
- HStoreKeyedFastDoubleElement* instr) {
- ASSERT(instr->value()->representation().IsDouble());
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
- LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
- bool clobbers_key = instr->key()->representation().IsTagged();
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
-}
+ if (!instr->is_external()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ LOperand* object = NULL;
+ LOperand* key = NULL;
+ LOperand* val = NULL;
+
+ if (instr->value()->representation().IsDouble()) {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ } else {
+ ASSERT(instr->value()->representation().IsSmiOrTagged());
+ object = UseTempRegister(instr->elements());
+ if (needs_write_barrier) {
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ val = UseRegisterOrConstantAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
+ }
+ return new(zone()) LStoreKeyed(object, key, val);
+ }
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
- HStoreKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* external_pointer = UseRegister(instr->external_pointer());
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->elements()->representation().IsExternal());
bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register
- ? UseTempRegister(instr->value())
+ LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
: UseRegister(instr->value());
- bool clobbers_key = instr->key()->representation().IsTagged();
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
- key, val);
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LOperand* external_pointer = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(external_pointer, key, val);
}
@@ -1997,54 +2209,89 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- ElementsKind from_kind = instr->original_map()->elements_kind();
- ElementsKind to_kind = instr->transitioned_map()->elements_kind();
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ LOperand* object = UseRegister(instr->object());
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister();
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
- return DefineSameAsFirst(result);
+ return result;
} else {
- LOperand* object = UseFixed(instr->object(), rax);
- LOperand* fixed_object_reg = FixedTemp(rdx);
- LOperand* new_map_reg = FixedTemp(rbx);
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object,
- new_map_reg,
- fixed_object_reg);
- return MarkAsCall(DefineFixed(result, rax), instr);
+ new(zone()) LTransitionElementsKind(object, NULL, NULL);
+ return AssignPointerMap(result);
}
}
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp);
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
+ bool is_external_location = instr->access().IsExternalMemory() &&
+ instr->access().offset() == 0;
bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
+ bool needs_write_barrier_for_map = instr->has_transition() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
if (needs_write_barrier) {
- obj = instr->is_in_object()
+ obj = is_in_object
? UseRegister(instr->object())
: UseTempRegister(instr->object());
+ } else if (is_external_location) {
+ ASSERT(!is_in_object);
+ ASSERT(!needs_write_barrier);
+ ASSERT(!needs_write_barrier_for_map);
+ obj = UseRegisterOrConstant(instr->object());
} else {
obj = needs_write_barrier_for_map
? UseRegister(instr->object())
: UseRegisterAtStart(instr->object());
}
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ bool can_be_constant = instr->value()->IsConstant() &&
+ HConstant::cast(instr->value())->NotInNewSpace() &&
+ !(FLAG_track_double_fields && instr->field_representation().IsDouble());
+
+ LOperand* val;
+ if (needs_write_barrier) {
+ val = UseTempRegister(instr->value());
+ } else if (is_external_location) {
+ val = UseFixed(instr->value(), rax);
+ } else if (can_be_constant) {
+ val = UseRegisterOrConstant(instr->value());
+ } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ val = UseTempRegister(instr->value());
+ } else if (FLAG_track_double_fields &&
+ instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
// We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property).
- LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
+ LOperand* temp = (!is_in_object || needs_write_barrier ||
needs_write_barrier_for_map) ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp);
+ LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
+ if (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject()) {
+ if (!instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
+ }
+ return result;
}
@@ -2080,33 +2327,17 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
}
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- LAllocateObject* result = new(zone()) LAllocateObject(TempRegister());
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* size = instr->size()->IsConstant()
+ ? UseConstant(instr->size())
+ : UseTempRegister(instr->size());
+ LOperand* temp = TempRegister();
+ LAllocate* result = new(zone()) LAllocate(size, temp);
return AssignPointerMap(DefineAsRegister(result));
}
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, rax), instr);
}
@@ -2117,14 +2348,6 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* object = UseAtStart(instr->object());
- LOperand* key = UseOrConstantAtStart(instr->key());
- LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2134,23 +2357,40 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ int index = static_cast<int>(instr->index());
+ Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
+ return DefineFixed(result, reg);
+ }
}
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallStub, rax), instr);
}
@@ -2164,10 +2404,26 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
}
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = Use(instr->index());
+ LOperand* length;
+ LOperand* index;
+ if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+ length = UseRegisterOrConstant(instr->length());
+ index = UseOrConstant(instr->index());
+ } else {
+ length = UseTempRegister(instr->length());
+ index = Use(instr->index());
+ }
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@@ -2197,20 +2453,7 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
+ instr->ReplayEnvironment(current_block_->last_environment());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
@@ -2231,6 +2474,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ info()->MarkAsDeferredCalling();
if (instr->is_function_entry()) {
return MarkAsCall(new(zone()) LStackCheck, instr);
} else {
@@ -2247,10 +2491,11 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->call_kind(),
- instr->inlining_kind());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
+ instr->inlining_kind(),
+ instr->undefined_receiver());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
}
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
@@ -2267,7 +2512,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
+ ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
@@ -2278,14 +2523,6 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* key = UseOrConstantAtStart(instr->key());
- LOperand* object = UseOrConstantAtStart(instr->object());
- LIn* result = new(zone()) LIn(key, object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LOperand* object = UseFixed(instr->enumerable(), rax);
LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 6cf4af661..1a889b396 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -40,24 +40,16 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
- V(AllocateObject) \
+ V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
V(ArithmeticD) \
V(ArithmeticT) \
- V(ArrayLiteral) \
V(BitI) \
- V(BitNotI) \
V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \
@@ -67,36 +59,43 @@ class LCodeGen;
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
+ V(CheckMapValue) \
V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
- V(CmpConstantEqAndBranch) \
- V(CmpIDAndBranch) \
+ V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(ConstantD) \
+ V(ConstantE) \
V(ConstantI) \
+ V(ConstantS) \
V(ConstantT) \
V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
V(DeclareGlobals) \
- V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(DoubleToSmi) \
+ V(Drop) \
+ V(DummyUse) \
+ V(Dummy) \
V(ElementsKind) \
- V(FastLiteral) \
- V(FixedArrayBaseLength) \
- V(MapEnumLength) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -104,44 +103,50 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
- V(In) \
+ V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Uint32ToDouble) \
+ V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
+ V(LoadRoot) \
+ V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyedFastDoubleElement) \
- V(LoadKeyedFastElement) \
+ V(LoadKeyed) \
V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathCos) \
+ V(MathExp) \
+ V(MathFloor) \
V(MathFloorOfDiv) \
+ V(MathLog) \
V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSin) \
+ V(MathSqrt) \
+ V(MathTan) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
V(NumberUntagD) \
- V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -150,51 +155,50 @@ class LCodeGen;
V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyedFastDoubleElement) \
- V(StoreKeyedFastElement) \
+ V(StoreKeyed) \
V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
V(StringCompareAndBranch) \
- V(StringLength) \
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
+ V(Uint32ToDouble) \
+ V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop)
+ V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -204,14 +208,15 @@ class LCodeGen;
}
-class LInstruction: public ZoneObject {
+class LInstruction : public ZoneObject {
public:
LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false) { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) {
+ }
- virtual ~LInstruction() { }
+ virtual ~LInstruction() {}
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
@@ -252,19 +257,27 @@ class LInstruction: public ZoneObject {
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- void MarkAsCall() { is_call_ = true; }
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
// Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsCall() const { return IsCall(); }
virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
+ virtual LOperand* result() const = 0;
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
#ifdef DEBUG
void VerifyCall();
#endif
@@ -279,10 +292,12 @@ class LInstruction: public ZoneObject {
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
+ class IsCallBits: public BitField<bool, 0, 1> {};
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
- bool is_call_;
+ int bit_field_;
};
@@ -290,13 +305,15 @@ class LInstruction: public ZoneObject {
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
+class LTemplateInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
+ LOperand* result() const { return results_[0]; }
protected:
EmbeddedContainer<LOperand*, R> results_;
@@ -305,15 +322,15 @@ class LTemplateInstruction: public LInstruction {
private:
// Iterator support.
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
};
-class LGap: public LTemplateInstruction<0, 0, 0> {
+class LGap : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
@@ -324,8 +341,8 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -362,30 +379,35 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap: public LGap {
+class LInstructionGap V8_FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return !IsRedundant();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
};
-class LGoto: public LTemplateInstruction<0, 0, 0> {
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
- int block_id() const { return block_id_; }
+ int block_id() const { return block_->block_id(); }
private:
- int block_id_;
+ HBasicBlock* block_;
};
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -401,23 +423,44 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel: public LGap {
+class LLabel V8_FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
Label* label() { return &label_; }
LLabel* replacement() const { return replacement_; }
void set_replacement(LLabel* label) { replacement_ = label; }
@@ -429,13 +472,16 @@ class LLabel: public LGap {
};
-class LParameter: public LTemplateInstruction<1, 0, 0> {
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -446,30 +492,60 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> {
};
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
- virtual bool IsControl() const { return true; }
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
private:
HControlInstruction* hydrogen() {
return HControlInstruction::cast(this->hydrogen_value());
}
+
+ Label* false_label_;
+ Label* true_label_;
};
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -483,7 +559,7 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -504,7 +580,7 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -518,11 +594,11 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -534,14 +610,14 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModI: public LTemplateInstruction<1, 2, 1> {
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -558,7 +634,7 @@ class LModI: public LTemplateInstruction<1, 2, 1> {
};
-class LDivI: public LTemplateInstruction<1, 2, 1> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -570,12 +646,14 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
+ bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
+
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
@@ -594,7 +672,7 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
};
-class LMulI: public LTemplateInstruction<1, 2, 0> {
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -609,9 +687,9 @@ class LMulI: public LTemplateInstruction<1, 2, 0> {
};
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
@@ -619,84 +697,175 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LUnaryMathOperation(LOperand* value) {
+ explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
+
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathRound(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ explicit LMathAbs(LOperand* value) {
+ inputs_[0] = value;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
+ explicit LMathLog(LOperand* value) {
+ inputs_[0] = value;
}
- LOperand* left() { return inputs_[0]; }
+ LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
};
-class LIsNilAndBranch: public LControlInstruction<1, 1> {
+class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LIsNilAndBranch(LOperand* value, LOperand* temp) {
+ explicit LMathSin(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
- virtual void PrintDataTo(StringStream* stream);
+class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathCos(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+
+class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathTan(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
+};
+
+
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSqrt(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathPowHalf(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
};
-class LIsObjectAndBranch: public LControlInstruction<1, 0> {
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+};
+
+
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranch(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsObjectAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -707,11 +876,11 @@ class LIsObjectAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -724,11 +893,11 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -739,11 +908,11 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -757,11 +926,11 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
explicit LStringCompareAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -775,13 +944,13 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> {
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Token::Value op() const { return hydrogen()->token(); }
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -793,11 +962,11 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -810,7 +979,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -822,11 +992,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -842,11 +1012,11 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -863,21 +1033,7 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LIn: public LTemplateInstruction<1, 2, 0> {
- public:
- LIn(LOperand* key, LOperand* object) {
- inputs_[0] = key;
- inputs_[1] = object;
- }
-
- LOperand* key() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -891,7 +1047,7 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -909,7 +1065,8 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -918,7 +1075,7 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -933,7 +1090,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
};
-class LBitI: public LTemplateInstruction<1, 2, 0> {
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -950,7 +1107,7 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -971,7 +1128,7 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
};
-class LSubI: public LTemplateInstruction<1, 2, 0> {
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -986,7 +1143,7 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -995,7 +1152,16 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD: public LTemplateInstruction<1, 0, 1> {
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
public:
explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
@@ -1010,81 +1176,65 @@ class LConstantD: public LTemplateInstruction<1, 0, 1> {
};
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- Handle<Object> value() const { return hydrogen()->handle(); }
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
};
-class LBranch: public LControlInstruction<1, 0> {
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- explicit LBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
- virtual void PrintDataTo(StringStream* stream);
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
};
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
+class LBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
- explicit LCmpMapAndBranch(LOperand* value) {
+ explicit LBranch(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- virtual bool IsControl() const { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LJSArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
- explicit LFixedArrayBaseLength(LOperand* value) {
+ explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1096,7 +1246,7 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
@@ -1109,7 +1259,7 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> {
};
-class LValueOf: public LTemplateInstruction<1, 1, 0> {
+class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LValueOf(LOperand* value) {
inputs_[0] = value;
@@ -1122,7 +1272,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 0> {
};
-class LDateField: public LTemplateInstruction<1, 1, 0> {
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDateField(LOperand* date, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1131,39 +1281,51 @@ class LDateField: public LTemplateInstruction<1, 1, 0> {
LOperand* date() { return inputs_[0]; }
Smi* index() const { return index_; }
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
private:
Smi* index_;
};
-class LThrow: public LTemplateInstruction<0, 1, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
+ LSeqStringSetChar(String::Encoding encoding,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) : encoding_(encoding) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ inputs_[2] = value;
}
- LOperand* value() { return inputs_[0]; }
+ String::Encoding encoding() { return encoding_; }
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
};
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LBitNotI(LOperand* value) {
+ explicit LThrow(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+ DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
-class LAddI: public LTemplateInstruction<1, 2, 0> {
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1173,12 +1335,17 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+ static bool UseLea(HAdd* add) {
+ return !add->CheckFlag(HValue::kCanOverflow) &&
+ add->BetterLeftOperand()->UseCount() > 1;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1188,12 +1355,12 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
};
-class LPower: public LTemplateInstruction<1, 2, 0> {
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1208,20 +1375,29 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
-class LRandom: public LTemplateInstruction<1, 1, 0> {
+class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
- explicit LRandom(LOperand* global_object) {
+ LRandom(LOperand* global_object,
+ LOperand* scratch,
+ LOperand* scratch2,
+ LOperand* scratch3) {
inputs_[0] = global_object;
+ temps_[0] = scratch;
+ temps_[1] = scratch2;
+ temps_[2] = scratch3;
}
LOperand* global_object() { return inputs_[0]; }
+ LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
+ LOperand* scratch3() const { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1233,16 +1409,18 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1254,28 +1432,41 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LReturn: public LTemplateInstruction<0, 1, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
- explicit LReturn(LOperand* value) {
+ explicit LReturn(LOperand* value, LOperand* parameter_count) {
inputs_[0] = value;
+ inputs_[1] = parameter_count;
}
LOperand* value() { return inputs_[0]; }
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ ASSERT(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+ LOperand* parameter_count() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+ DECLARE_HYDROGEN_ACCESSOR(Return)
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1288,20 +1479,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-
- LOperand* object() { return inputs_[0]; }
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
@@ -1315,7 +1493,7 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
@@ -1328,19 +1506,17 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
};
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
@@ -1353,60 +1529,30 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
+ bool is_external() const {
+ return hydrogen()->is_external();
}
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
- "load-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
-
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
@@ -1420,14 +1566,14 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadGlobalGeneric(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1442,7 +1588,7 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1457,7 +1603,7 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LStoreGlobalGeneric(LOperand* global_object,
LOperand* value) {
@@ -1476,7 +1622,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1489,11 +1635,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1510,11 +1656,11 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1526,7 +1672,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LDrop: public LTemplateInstruction<0, 0, 0> {
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1539,20 +1685,54 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ temps_[0] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInnerAllocatedObject(LOperand* base_object) {
+ inputs_[0] = base_object;
+ }
+
+ LOperand* base_object() { return inputs_[0]; }
+ int offset() { return hydrogen()->offset(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
+ DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
+};
+
+
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext: public LTemplateInstruction<1, 0, 0> {
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
@@ -1564,20 +1744,20 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
-class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
+class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
};
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1589,7 +1769,7 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
};
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
@@ -1601,7 +1781,7 @@ class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
};
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInvokeFunction(LOperand* function) {
inputs_[0] = function;
@@ -1612,14 +1792,13 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
- Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
};
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
inputs_[0] = key;
@@ -1630,25 +1809,25 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
LOperand* key() { return inputs_[0]; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallFunction(LOperand* function) {
inputs_[0] = function;
@@ -1662,31 +1841,30 @@ class LCallFunction: public LTemplateInstruction<1, 1, 0> {
};
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<JSFunction> target() const { return hydrogen()->target(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1697,23 +1875,45 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallNewArray(LOperand* constructor) {
+ inputs_[0] = constructor;
+ }
+
+ LOperand* constructor() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1725,7 +1925,20 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1739,7 +1952,20 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -1751,7 +1977,7 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberTagU(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1765,7 +1991,7 @@ class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberTagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1776,11 +2002,12 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
@@ -1795,8 +2022,21 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
};
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+};
+
+
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LTaggedToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1807,13 +2047,13 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -1825,7 +2065,7 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -1838,7 +2078,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -1855,7 +2095,7 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
@@ -1870,16 +2110,16 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
+ Handle<Map> transition() const { return hydrogen()->transition_map(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreNamedGeneric(LOperand* object, LOperand* value) {
inputs_[0] = object;
@@ -1892,84 +2132,37 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedFastDoubleElement(LOperand* elements,
- LOperand* key,
- LOperand* value) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
+ bool is_external() const { return hydrogen()->is_external(); }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
- "store-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
-
- virtual void PrintDataTo(StringStream* stream);
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* value) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
-
- ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -1984,13 +2177,13 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
@@ -2008,14 +2201,34 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LTrapAllocationMemento(LOperand* object,
+ LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+ "trap-allocation-memento")
+};
+
+
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2030,7 +2243,7 @@ class LStringAdd: public LTemplateInstruction<1, 2, 0> {
};
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
inputs_[0] = string;
@@ -2045,7 +2258,7 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
};
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringCharFromCode(LOperand* char_code) {
inputs_[0] = char_code;
@@ -2058,33 +2271,20 @@ class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
};
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
- }
-
- LOperand* string() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
-};
-
-
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -2097,7 +2297,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value) {
inputs_[0] = value;
@@ -2110,23 +2310,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
- public:
- explicit LCheckPrototypeMaps(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
- Handle<JSObject> holder() const { return hydrogen()->holder(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2138,7 +2322,7 @@ class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2150,7 +2334,7 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2162,7 +2346,7 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped,
LOperand* temp_xmm) {
@@ -2177,7 +2361,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2186,60 +2370,40 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
};
-class LAllocateObject: public LTemplateInstruction<1, 0, 1> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- explicit LAllocateObject(LOperand* temp) {
+ LAllocate(LOperand* size, LOperand* temp) {
+ inputs_[0] = size;
temps_[0] = temp;
}
+ LOperand* size() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
};
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2252,7 +2416,7 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
@@ -2264,7 +2428,7 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2277,11 +2441,11 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -2295,44 +2459,18 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- LDeleteProperty(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-};
-
-
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry();
+ LOsrEntry() {}
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
};
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2344,7 +2482,7 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> {
};
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInPrepareMap(LOperand* object) {
inputs_[0] = object;
@@ -2356,7 +2494,7 @@ class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2372,7 +2510,7 @@ class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2386,7 +2524,7 @@ class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2401,17 +2539,17 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk: public LChunk {
+class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
};
-class LChunkBuilder BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
@@ -2424,21 +2562,32 @@ class LChunkBuilder BASE_EMBEDDED {
next_block_(NULL),
argument_count_(0),
allocator_(allocator),
- position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
- static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathTan(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+
private:
enum Status {
UNUSED,
@@ -2457,7 +2606,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* reason);
+ void Abort(BailoutReason reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2495,6 +2644,9 @@ class LChunkBuilder BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
@@ -2540,7 +2692,8 @@ class LChunkBuilder BASE_EMBEDDED {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize);
void VisitInstruction(HInstruction* current);
@@ -2549,7 +2702,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LPlatformChunk* chunk_;
CompilationInfo* info_;
@@ -2561,7 +2714,6 @@ class LChunkBuilder BASE_EMBEDDED {
HBasicBlock* next_block_;
int argument_count_;
LAllocator* allocator_;
- int position_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 77506741a..a18ff0d27 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -27,15 +27,17 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
#include "bootstrapper.h"
#include "codegen.h"
+#include "cpu-profiler.h"
#include "assembler-x64.h"
#include "macro-assembler-x64.h"
#include "serialize.h"
#include "debug.h"
#include "heap.h"
+#include "isolate-inl.h"
namespace v8 {
namespace internal {
@@ -154,7 +156,7 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) {
}
}
// Size of movq(destination, src);
- return 10;
+ return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
}
@@ -162,7 +164,7 @@ void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address());
if (is_int32(address) && !Serializer::enabled()) {
if (emit_debug_code()) {
- movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
}
push(Immediate(static_cast<int32_t>(address)));
return;
@@ -284,16 +286,17 @@ void MacroAssembler::InNewSpace(Register object,
cmpq(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
- ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
+ ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
intptr_t new_space_start =
- reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+ reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
+ movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
if (scratch.is(object)) {
addq(scratch, kScratchRegister);
} else {
lea(scratch, Operand(object, kScratchRegister, times_1, 0));
}
- and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
+ and_(scratch,
+ Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
j(cc, branch, distance);
}
}
@@ -342,8 +345,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
}
}
@@ -376,8 +379,8 @@ void MacroAssembler::RecordWriteArray(Register object,
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
}
}
@@ -442,14 +445,14 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
}
}
-void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (emit_debug_code()) Check(cc, msg);
+void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
+ if (emit_debug_code()) Check(cc, reason);
}
@@ -465,16 +468,16 @@ void MacroAssembler::AssertFastElements(Register elements) {
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedCOWArrayMapRootIndex);
j(equal, &ok, Label::kNear);
- Abort("JSObject with fast elements map has slow elements");
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
bind(&ok);
}
}
-void MacroAssembler::Check(Condition cc, const char* msg) {
+void MacroAssembler::Check(Condition cc, BailoutReason reason) {
Label L;
j(cc, &L, Label::kNear);
- Abort(msg);
+ Abort(reason);
// Control will not return here.
bind(&L);
}
@@ -507,12 +510,13 @@ void MacroAssembler::NegativeZeroTest(Register result,
}
-void MacroAssembler::Abort(const char* msg) {
+void MacroAssembler::Abort(BailoutReason reason) {
// We want to pass the msg string like a smi to avoid GC
// problems, however msg is not guaranteed to be aligned
// properly. Instead, we pass an aligned pointer that is
// a proper v8 smi, but also pass the alignment difference
// from the real pointer as a smi.
+ const char* msg = GetBailoutReason(reason);
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
// Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
@@ -522,13 +526,19 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ int3();
+ return;
+ }
#endif
+
push(rax);
- movq(kScratchRegister, p0, RelocInfo::NONE);
+ movq(kScratchRegister, p0, RelocInfo::NONE64);
push(kScratchRegister);
movq(kScratchRegister,
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
- RelocInfo::NONE);
+ RelocInfo::NONE64);
push(kScratchRegister);
if (!has_frame_) {
@@ -546,13 +556,14 @@ void MacroAssembler::Abort(const char* msg) {
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
+ Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
+ ASSERT(allow_stub_calls_ ||
+ stub->CompilingCallsToThisStubIsGCSafe(isolate()));
+ Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -564,7 +575,7 @@ void MacroAssembler::StubReturn(int argc) {
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+ return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
}
@@ -595,22 +606,9 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
}
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(rax, function->nargs);
- LoadAddress(rbx, ExternalReference(function, isolate()));
- CEntryStub ces(1, kSaveFPRegs);
- CallStub(&ces);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -625,7 +623,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Set(rax, num_arguments);
LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size);
+ CEntryStub ces(f->result_size, save_doubles);
CallStub(&ces);
}
@@ -644,8 +642,8 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size) {
// ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : argument num_arguments - 1
+ // -- rsp[0] : return address
+ // -- rsp[8] : argument num_arguments - 1
// ...
// -- rsp[8 * num_arguments] : argument 0 (receiver)
// -----------------------------------
@@ -677,37 +675,33 @@ static int Offset(ExternalReference ref0, ExternalReference ref1) {
void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
-#if defined(_WIN64) && !defined(__MINGW64__)
- // We need to prepare a slot for result handle on stack and put
- // a pointer to it into 1st arg register.
- EnterApiExitFrame(arg_stack_space + 1);
-
- // rcx must be used to pass the pointer to the return value slot.
- lea(rcx, StackSpaceOperand(arg_stack_space));
-#else
EnterApiExitFrame(arg_stack_space);
-#endif
}
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- int stack_space) {
- Label empty_result;
+void MacroAssembler::CallApiFunctionAndReturn(
+ Address function_address,
+ Address thunk_address,
+ Register thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
Label prologue;
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label write_back;
Factory* factory = isolate()->factory();
ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
+ ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
const int kLimitOffset = Offset(
- ExternalReference::handle_scope_limit_address(),
+ ExternalReference::handle_scope_limit_address(isolate()),
next_address);
const int kLevelOffset = Offset(
- ExternalReference::handle_scope_level_address(),
+ ExternalReference::handle_scope_level_address(isolate()),
next_address);
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address(isolate());
@@ -720,20 +714,52 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
addl(Operand(base_reg, kLevelOffset), Immediate(1));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1);
+ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ bool* is_profiling_flag =
+ isolate()->cpu_profiler()->is_profiling_address();
+ STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
+ movq(rax, is_profiling_flag, RelocInfo::EXTERNAL_REFERENCE);
+ cmpb(Operand(rax, 0), Immediate(0));
+ j(zero, &profiler_disabled);
+
+ // Third parameter is the address of the actual getter function.
+ movq(thunk_last_arg, function_address, RelocInfo::EXTERNAL_REFERENCE);
+ movq(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE);
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ // Call the api function!
+ movq(rax, reinterpret_cast<Address>(function_address),
+ RelocInfo::EXTERNAL_REFERENCE);
+
+ bind(&end_profiler_check);
+
// Call the api function!
- movq(rax, reinterpret_cast<int64_t>(function_address),
- RelocInfo::RUNTIME_ENTRY);
call(rax);
-#if defined(_WIN64) && !defined(__MINGW64__)
- // rax keeps a pointer to v8::Handle, unpack it.
- movq(rax, Operand(rax, 0));
-#endif
- // Check if the result handle holds 0.
- testq(rax, rax);
- j(zero, &empty_result);
- // It was non-zero. Dereference to get the result value.
- movq(rax, Operand(rax, 0));
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1);
+ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ // Load the value from ReturnValue
+ movq(rax, return_value_operand);
bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
@@ -748,6 +774,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
movq(rsi, scheduled_exception_address);
Cmp(Operand(rsi, 0), factory->the_hole_value());
j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
#if ENABLE_EXTRA_CHECKS
// Check if the function returned a valid JavaScript value.
@@ -779,31 +806,30 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
CompareRoot(return_value, Heap::kNullValueRootIndex);
j(equal, &ok, Label::kNear);
- Abort("API call returned invalid object");
+ Abort(kAPICallReturnedInvalidObject);
bind(&ok);
#endif
- LeaveApiExitFrame();
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ movq(rsi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
- bind(&empty_result);
- // It was zero; the result is undefined.
- LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- jmp(&prologue);
-
bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
movq(prev_limit_reg, rax);
-#ifdef _WIN64
- LoadAddress(rcx, ExternalReference::isolate_address());
-#else
- LoadAddress(rdi, ExternalReference::isolate_address());
-#endif
+ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
LoadAddress(rax,
ExternalReference::delete_handle_scope_extensions(isolate()));
call(rax);
@@ -817,7 +843,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
CEntryStub ces(result_size);
- jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
+ jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -881,9 +907,8 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
}
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(rsp, i * kDoubleSize), reg);
}
@@ -896,12 +921,11 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion2,
Register exclusion3) {
if (fp_mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(rsp, i * kDoubleSize));
}
- addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
@@ -912,6 +936,42 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
}
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
+ xorps(dst, dst);
+ cvtlsi2sd(dst, src);
+}
+
+
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
+ xorps(dst, dst);
+ cvtlsi2sd(dst, src);
+}
+
+
+void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsByte()) {
+ movzxbl(dst, src);
+ } else if (r.IsInteger32()) {
+ movl(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsByte()) {
+ movb(dst, src);
+ } else if (r.IsInteger32()) {
+ movl(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
@@ -920,10 +980,11 @@ void MacroAssembler::Set(Register dst, int64_t x) {
} else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
- movq(dst, x, RelocInfo::NONE);
+ movq(dst, x, RelocInfo::NONE64);
}
}
+
void MacroAssembler::Set(const Operand& dst, int64_t x) {
if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
@@ -934,7 +995,10 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
}
-bool MacroAssembler::IsUnsafeInt(const int x) {
+// ----------------------------------------------------------------------------
+// Smi tagging, untagging and tag detection.
+
+bool MacroAssembler::IsUnsafeInt(const int32_t x) {
static const int kMaxBits = 17;
return !is_intn(x, kMaxBits);
}
@@ -942,7 +1006,7 @@ bool MacroAssembler::IsUnsafeInt(const int x) {
void MacroAssembler::SafeMove(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
- ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
+ ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
@@ -954,7 +1018,7 @@ void MacroAssembler::SafeMove(Register dst, Smi* src) {
void MacroAssembler::SafePush(Smi* src) {
- ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
+ ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Push(Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
@@ -965,9 +1029,6 @@ void MacroAssembler::SafePush(Smi* src) {
}
-// ----------------------------------------------------------------------------
-// Smi tagging, untagging and tag detection.
-
Register MacroAssembler::GetSmiConstant(Smi* source) {
int value = source->value();
if (value == 0) {
@@ -981,14 +1042,15 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
return kScratchRegister;
}
+
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (emit_debug_code()) {
movq(dst,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE);
+ RelocInfo::NONE64);
cmpq(dst, kSmiConstantRegister);
if (allow_stub_calls()) {
- Assert(equal, "Uninitialized kSmiConstantRegister");
+ Assert(equal, kUninitializedKSmiConstantRegister);
} else {
Label ok;
j(equal, &ok, Label::kNear);
@@ -1032,7 +1094,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
UNREACHABLE();
return;
default:
- movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
+ movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64);
return;
}
if (negative) {
@@ -1056,7 +1118,7 @@ void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
Label ok;
j(zero, &ok, Label::kNear);
if (allow_stub_calls()) {
- Abort("Integer32ToSmiField writing to non-smi location");
+ Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
} else {
int3();
}
@@ -1108,6 +1170,7 @@ void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
void MacroAssembler::SmiTest(Register src) {
+ AssertSmi(src);
testq(src, src);
}
@@ -1396,28 +1459,6 @@ void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
}
-void MacroAssembler::SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- // Does not assume that src is a smi.
- ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
-
- JumpIfNotSmi(src, on_not_smi_result, near_jump);
- Register tmp = (dst.is(src) ? kScratchRegister : dst);
- LoadSmiConstant(tmp, constant);
- addq(tmp, src);
- j(overflow, on_not_smi_result, near_jump);
- if (dst.is(src)) {
- movq(dst, tmp);
- }
-}
-
-
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1486,10 +1527,14 @@ void MacroAssembler::SmiAddConstant(Register dst,
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
+ Label done;
LoadSmiConstant(kScratchRegister, constant);
- addq(kScratchRegister, src);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
+ addq(dst, kScratchRegister);
+ j(no_overflow, &done, Label::kNear);
+ // Restore src.
+ subq(dst, kScratchRegister);
+ jmp(on_not_smi_result, near_jump);
+ bind(&done);
} else {
LoadSmiConstant(dst, constant);
addq(dst, src);
@@ -1589,6 +1634,29 @@ void MacroAssembler::SmiNeg(Register dst,
}
+template<class T>
+static void SmiAddHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (dst.is(src1)) {
+ Label done;
+ masm->addq(dst, src2);
+ masm->j(no_overflow, &done, Label::kNear);
+ // Restore src1.
+ masm->subq(dst, src2);
+ masm->jmp(on_not_smi_result, near_jump);
+ masm->bind(&done);
+ } else {
+ masm->movq(dst, src1);
+ masm->addq(dst, src2);
+ masm->j(overflow, on_not_smi_result, near_jump);
+ }
+}
+
+
void MacroAssembler::SmiAdd(Register dst,
Register src1,
Register src2,
@@ -1596,16 +1664,7 @@ void MacroAssembler::SmiAdd(Register dst,
Label::Distance near_jump) {
ASSERT_NOT_NULL(on_not_smi_result);
ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
+ SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1615,17 +1674,8 @@ void MacroAssembler::SmiAdd(Register dst,
Label* on_not_smi_result,
Label::Distance near_jump) {
ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- ASSERT(!src2.AddressUsesRegister(dst));
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
+ ASSERT(!src2.AddressUsesRegister(dst));
+ SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1638,44 +1688,47 @@ void MacroAssembler::SmiAdd(Register dst,
if (emit_debug_code()) {
movq(kScratchRegister, src1);
addq(kScratchRegister, src2);
- Check(no_overflow, "Smi addition overflow");
+ Check(no_overflow, kSmiAdditionOverflow);
}
lea(dst, Operand(src1, src2, times_1, 0));
} else {
addq(dst, src2);
- Assert(no_overflow, "Smi addition overflow");
+ Assert(no_overflow, kSmiAdditionOverflow);
}
}
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
+template<class T>
+static void SmiSubHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
if (dst.is(src1)) {
- cmpq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- subq(dst, src2);
+ Label done;
+ masm->subq(dst, src2);
+ masm->j(no_overflow, &done, Label::kNear);
+ // Restore src1.
+ masm->addq(dst, src2);
+ masm->jmp(on_not_smi_result, near_jump);
+ masm->bind(&done);
} else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
+ masm->movq(dst, src1);
+ masm->subq(dst, src2);
+ masm->j(overflow, on_not_smi_result, near_jump);
}
}
-void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT_NOT_NULL(on_not_smi_result);
ASSERT(!dst.is(src2));
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- subq(dst, src2);
- Assert(no_overflow, "Smi subtraction overflow");
+ SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1685,29 +1738,36 @@ void MacroAssembler::SmiSub(Register dst,
Label* on_not_smi_result,
Label::Distance near_jump) {
ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src2);
- cmpq(src1, kScratchRegister);
- j(overflow, on_not_smi_result, near_jump);
- subq(src1, kScratchRegister);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
+ ASSERT(!src2.AddressUsesRegister(dst));
+ SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2) {
+template<class T>
+static void SmiSubNoOverflowHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2) {
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
if (!dst.is(src1)) {
- movq(dst, src1);
+ masm->movq(dst, src1);
}
- subq(dst, src2);
- Assert(no_overflow, "Smi subtraction overflow");
+ masm->subq(dst, src2);
+ masm->Assert(no_overflow, kSmiSubtractionOverflow);
+}
+
+
+void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
+ ASSERT(!dst.is(src2));
+ SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ const Operand& src2) {
+ SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
}
@@ -2104,7 +2164,7 @@ void MacroAssembler::SelectNonSmi(Register dst,
#ifdef DEBUG
if (allow_stub_calls()) { // Check contains a stub call.
Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
- Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+ Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
}
#endif
STATIC_ASSERT(kSmiTag == 0);
@@ -2170,6 +2230,133 @@ void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
}
+void MacroAssembler::Push(Smi* source) {
+ intptr_t smi = reinterpret_cast<intptr_t>(source);
+ if (is_int32(smi)) {
+ push(Immediate(static_cast<int32_t>(smi)));
+ } else {
+ Register constant = GetSmiConstant(source);
+ push(constant);
+ }
+}
+
+
+void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
+ movq(scratch, src);
+ // High bits.
+ shr(src, Immediate(64 - kSmiShift));
+ shl(src, Immediate(kSmiShift));
+ push(src);
+ // Low bits.
+ shl(scratch, Immediate(kSmiShift));
+ push(scratch);
+}
+
+
+void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
+ pop(scratch);
+ // Low bits.
+ shr(scratch, Immediate(kSmiShift));
+ pop(dst);
+ shr(dst, Immediate(kSmiShift));
+ // High bits.
+ shl(dst, Immediate(64 - kSmiShift));
+ or_(dst, scratch);
+}
+
+
+void MacroAssembler::Test(const Operand& src, Smi* source) {
+ testl(Operand(src, kIntSize), Immediate(source->value()));
+}
+
+
+// ----------------------------------------------------------------------------
+
+
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ SmiToInteger32(
+ mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shrl(mask, Immediate(1));
+ subq(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ isolate()->factory()->heap_number_map(),
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ and_(scratch, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ shl(scratch, Immediate(kPointerSizeLog2 + 1));
+
+ Register index = scratch;
+ Register probe = mask;
+ movq(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache);
+
+ bind(&is_smi);
+ SmiToInteger32(scratch, object);
+ and_(scratch, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ shl(scratch, Immediate(kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ cmpq(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ movq(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+}
+
+
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
Label* not_string,
@@ -2202,7 +2389,8 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
ASSERT(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
@@ -2228,7 +2416,7 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
andl(scratch, Immediate(kFlatAsciiStringMask));
- cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
j(not_equal, failure, near_jump);
}
@@ -2248,7 +2436,8 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
ASSERT(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
@@ -2261,6 +2450,36 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
}
+template<class T>
+static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
+ T operand_or_register,
+ Label* not_unique_name,
+ Label::Distance distance) {
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ masm->testb(operand_or_register,
+ Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ masm->j(zero, &succeed, Label::kNear);
+ masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
+ masm->j(not_equal, not_unique_name, distance);
+
+ masm->bind(&succeed);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueName(Operand operand,
+ Label* not_unique_name,
+ Label::Distance distance) {
+ JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueName(Register reg,
+ Label* not_unique_name,
+ Label::Distance distance) {
+ JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
+}
+
void MacroAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
@@ -2270,106 +2489,84 @@ void MacroAssembler::Move(Register dst, Register src) {
void MacroAssembler::Move(Register dst, Handle<Object> source) {
- ASSERT(!source->IsFailure());
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Move(dst, Smi::cast(*source));
} else {
- movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(dst, source);
}
}
void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
- ASSERT(!source->IsFailure());
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Move(dst, Smi::cast(*source));
} else {
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
movq(dst, kScratchRegister);
}
}
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
- Move(kScratchRegister, source);
+ MoveHeapObject(kScratchRegister, source);
cmpq(dst, kScratchRegister);
}
}
void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
cmpq(dst, kScratchRegister);
}
}
void MacroAssembler::Push(Handle<Object> source) {
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Push(Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
push(kScratchRegister);
}
}
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
+void MacroAssembler::MoveHeapObject(Register result,
+ Handle<Object> object) {
+ AllowDeferredHandleDereference using_raw_address;
+ ASSERT(object->IsHeapObject());
if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ movq(result, cell, RelocInfo::CELL);
movq(result, Operand(result, 0));
} else {
- Move(result, object);
+ movq(result, object, RelocInfo::EMBEDDED_OBJECT);
}
}
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- movq(kScratchRegister, Operand(kScratchRegister, 0));
- push(kScratchRegister);
- } else {
- Push(object);
- }
-}
-
-
-void MacroAssembler::LoadGlobalCell(Register dst,
- Handle<JSGlobalPropertyCell> cell) {
+void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
if (dst.is(rax)) {
- load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
+ AllowDeferredHandleDereference embedding_raw_address;
+ load_rax(cell.location(), RelocInfo::CELL);
} else {
- movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ movq(dst, cell, RelocInfo::CELL);
movq(dst, Operand(dst, 0));
}
}
-void MacroAssembler::Push(Smi* source) {
- intptr_t smi = reinterpret_cast<intptr_t>(source);
- if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
- } else {
- Register constant = GetSmiConstant(source);
- push(constant);
- }
-}
-
-
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
addq(rsp, Immediate(stack_elements * kPointerSize));
@@ -2377,11 +2574,6 @@ void MacroAssembler::Drop(int stack_elements) {
}
-void MacroAssembler::Test(const Operand& src, Smi* source) {
- testl(Operand(src, kIntSize), Immediate(source->value()));
-}
-
-
void MacroAssembler::TestBit(const Operand& src, int bits) {
int byte_offset = bits / kBitsPerByte;
int bit_in_byte = bits & (kBitsPerByte - 1);
@@ -2409,8 +2601,8 @@ void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
int MacroAssembler::CallSize(ExternalReference ext) {
// Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
- const int kCallInstructionSize = 3;
- return LoadAddressSize(ext) + kCallInstructionSize;
+ return LoadAddressSize(ext) +
+ Assembler::kCallScratchRegisterInstructionLength;
}
@@ -2444,7 +2636,8 @@ void MacroAssembler::Call(Handle<Code> code_object,
#ifdef DEBUG
int end_position = pc_offset() + CallSize(code_object);
#endif
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ ASSERT(RelocInfo::IsCodeTarget(rmode) ||
+ rmode == RelocInfo::CODE_AGE_SEQUENCE);
call(code_object, rmode, ast_id);
#ifdef DEBUG
CHECK_EQ(end_position, pc_offset());
@@ -2547,7 +2740,8 @@ Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2596,7 +2790,8 @@ void MacroAssembler::JumpToHandlerEntry() {
// rax = exception, rdi = code object, rdx = state.
movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
shr(rdx, Immediate(StackHandler::kKindWidth));
- movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
+ movq(rdx,
+ FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
SmiToInteger64(rdx, rdx);
lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
jmp(rdi);
@@ -2605,7 +2800,8 @@ void MacroAssembler::JumpToHandlerEntry() {
void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2645,7 +2841,8 @@ void MacroAssembler::Throw(Register value) {
void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2696,9 +2893,9 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
if (is_uint16(bytes_dropped)) {
ret(bytes_dropped);
} else {
- pop(scratch);
+ PopReturnAddressTo(scratch);
addq(rsp, Immediate(bytes_dropped));
- push(scratch);
+ PushReturnAddressFrom(scratch);
ret(0);
}
}
@@ -2769,7 +2966,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
Register elements,
Register index,
XMMRegister xmm_scratch,
- Label* fail) {
+ Label* fail,
+ int elements_offset) {
Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
@@ -2788,7 +2986,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
bind(&not_nan);
movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ movsd(FieldOperand(elements, index, times_8,
+ FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
jmp(&done);
@@ -2810,8 +3009,9 @@ void MacroAssembler::StoreNumberToDoubleElements(
// Value is a smi. convert to a double and store.
// Preserve original value.
SmiToInteger32(kScratchRegister, maybe_number);
- cvtlsi2sd(xmm_scratch, kScratchRegister);
- movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ Cvtlsi2sd(xmm_scratch, kScratchRegister);
+ movsd(FieldOperand(elements, index, times_8,
+ FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
bind(&done);
}
@@ -2819,38 +3019,21 @@ void MacroAssembler::StoreNumberToDoubleElements(
void MacroAssembler::CompareMap(Register obj,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
+ Label* early_success) {
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- j(equal, early_success, Label::kNear);
- Cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(current_map));
- }
- }
- }
}
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
+ SmiCheckType smi_check_type) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
Label success;
- CompareMap(obj, map, &success, mode);
+ CompareMap(obj, map, &success);
j(not_equal, fail);
bind(&success);
}
@@ -2891,22 +3074,124 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
}
-static double kUint32Bias =
- static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
-
-
void MacroAssembler::LoadUint32(XMMRegister dst,
Register src,
XMMRegister scratch) {
+ if (FLAG_debug_code) {
+ cmpq(src, Immediate(0xffffffff));
+ Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
+ }
+ cvtqsi2sd(dst, src);
+}
+
+
+void MacroAssembler::SlowTruncateToI(Register result_reg,
+ Register input_reg,
+ int offset) {
+ DoubleToIStub stub(input_reg, result_reg, offset, true);
+ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
+ Register input_reg) {
+ Label done;
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2siq(result_reg, xmm0);
+ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
+ cmpq(result_reg, kScratchRegister);
+ j(not_equal, &done, Label::kNear);
+
+ // Slow case.
+ if (input_reg.is(result_reg)) {
+ subq(rsp, Immediate(kDoubleSize));
+ movsd(MemOperand(rsp, 0), xmm0);
+ SlowTruncateToI(result_reg, rsp, 0);
+ addq(rsp, Immediate(kDoubleSize));
+ } else {
+ SlowTruncateToI(result_reg, input_reg);
+ }
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result_reg,
+ XMMRegister input_reg) {
Label done;
- cmpl(src, Immediate(0));
+ cvttsd2siq(result_reg, input_reg);
movq(kScratchRegister,
- reinterpret_cast<int64_t>(&kUint32Bias),
- RelocInfo::NONE);
- movsd(scratch, Operand(kScratchRegister, 0));
- cvtlsi2sd(dst, src);
- j(not_sign, &done, Label::kNear);
- addsd(dst, scratch);
+ V8_INT64_C(0x8000000000000000),
+ RelocInfo::NONE64);
+ cmpq(result_reg, kScratchRegister);
+ j(not_equal, &done, Label::kNear);
+
+ subq(rsp, Immediate(kDoubleSize));
+ movsd(MemOperand(rsp, 0), input_reg);
+ SlowTruncateToI(result_reg, rsp, 0);
+ addq(rsp, Immediate(kDoubleSize));
+
+ bind(&done);
+}
+
+
+void MacroAssembler::DoubleToI(Register result_reg,
+ XMMRegister input_reg,
+ XMMRegister scratch,
+ MinusZeroMode minus_zero_mode,
+ Label* conversion_failed,
+ Label::Distance dst) {
+ cvttsd2si(result_reg, input_reg);
+ Cvtlsi2sd(xmm0, result_reg);
+ ucomisd(xmm0, input_reg);
+ j(not_equal, conversion_failed, dst);
+ j(parity_even, conversion_failed, dst); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ Label done;
+ // The integer converted back is equal to the original. We
+ // only have to test if we got -0 as an input.
+ testl(result_reg, result_reg);
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, input_reg);
+ // Bit 0 contains the sign of the double in input_reg.
+ // If input was positive, we are ok and return 0, otherwise
+ // jump to conversion_failed.
+ andl(result_reg, Immediate(1));
+ j(not_zero, conversion_failed, dst);
+ bind(&done);
+ }
+}
+
+
+void MacroAssembler::TaggedToI(Register result_reg,
+ Register input_reg,
+ XMMRegister temp,
+ MinusZeroMode minus_zero_mode,
+ Label* lost_precision,
+ Label::Distance dst) {
+ Label done;
+ ASSERT(!temp.is(xmm0));
+
+ // Heap number map check.
+ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ j(not_equal, lost_precision, dst);
+
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2si(result_reg, xmm0);
+ Cvtlsi2sd(temp, result_reg);
+ ucomisd(xmm0, temp);
+ RecordComment("Deferred TaggedToI: lost precision");
+ j(not_equal, lost_precision, dst);
+ RecordComment("Deferred TaggedToI: NaN");
+ j(parity_even, lost_precision, dst); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ testl(result_reg, result_reg);
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, xmm0);
+ andl(result_reg, Immediate(1));
+ j(not_zero, lost_precision, dst);
+ }
bind(&done);
}
@@ -2932,6 +3217,7 @@ void MacroAssembler::EnumLength(Register dst, Register map) {
void MacroAssembler::DispatchMap(Register obj,
+ Register unused,
Handle<Map> map,
Handle<Code> success,
SmiCheckType smi_check_type) {
@@ -2953,7 +3239,7 @@ void MacroAssembler::AssertNumber(Register object) {
j(is_smi, &ok, Label::kNear);
Cmp(FieldOperand(object, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
- Check(equal, "Operand is not a number");
+ Check(equal, kOperandIsNotANumber);
bind(&ok);
}
}
@@ -2962,7 +3248,7 @@ void MacroAssembler::AssertNumber(Register object) {
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
- Check(NegateCondition(is_smi), "Operand is a smi");
+ Check(NegateCondition(is_smi), kOperandIsASmi);
}
}
@@ -2970,7 +3256,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
- Check(is_smi, "Operand is not a smi");
+ Check(is_smi, kOperandIsNotASmi);
}
}
@@ -2978,7 +3264,7 @@ void MacroAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertSmi(const Operand& object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
- Check(is_smi, "Operand is not a smi");
+ Check(is_smi, kOperandIsNotASmi);
}
}
@@ -2986,9 +3272,9 @@ void MacroAssembler::AssertSmi(const Operand& object) {
void MacroAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
ASSERT(!int32_register.is(kScratchRegister));
- movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
+ movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64);
cmpq(kScratchRegister, int32_register);
- Check(above_equal, "32 bit value in register is not zero-extended");
+ Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
}
}
@@ -2996,24 +3282,37 @@ void MacroAssembler::AssertZeroExtended(Register int32_register) {
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
- Check(not_equal, "Operand is a smi and not a string");
+ Check(not_equal, kOperandIsASmiAndNotAString);
push(object);
movq(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
pop(object);
- Check(below, "Operand is not a string");
+ Check(below, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAName);
+ push(object);
+ movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, LAST_NAME_TYPE);
+ pop(object);
+ Check(below_equal, kOperandIsNotAName);
}
}
void MacroAssembler::AssertRootValue(Register src,
Heap::RootListIndex root_value_index,
- const char* message) {
+ BailoutReason reason) {
if (emit_debug_code()) {
ASSERT(!src.is(kScratchRegister));
LoadRoot(kScratchRegister, root_value_index);
cmpq(src, kScratchRegister);
- Check(equal, message);
+ Check(equal, reason);
}
}
@@ -3030,6 +3329,16 @@ Condition MacroAssembler::IsObjectStringType(Register heap_object,
}
+Condition MacroAssembler::IsObjectNameType(Register heap_object,
+ Register map,
+ Register instance_type) {
+ movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
+ return below_equal;
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Label* miss,
@@ -3128,7 +3437,7 @@ void MacroAssembler::DebugBreak() {
LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(1);
ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
}
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -3246,6 +3555,7 @@ void MacroAssembler::InvokeFunction(Register function,
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -3254,14 +3564,13 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- LoadHeapObject(rdi, function);
+ Move(rdi, function);
movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- ParameterCount expected(function->shared()->formal_parameter_count());
InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
}
@@ -3342,6 +3651,30 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ push(rbp); // Caller's frame pointer.
+ movq(rbp, rsp);
+ push(rsi); // Callee's context.
+ Push(Smi::FromInt(StackFrame::STUB));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
+ } else {
+ push(rbp); // Caller's frame pointer.
+ movq(rbp, rsp);
+ push(rsi); // Callee's context.
+ push(rdi); // Callee's JS function.
+ }
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(rbp);
movq(rbp, rsp);
@@ -3354,7 +3687,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
cmpq(Operand(rsp, 0), kScratchRegister);
- Check(not_equal, "code object not properly patched");
+ Check(not_equal, kCodeObjectNotProperlyPatched);
}
}
@@ -3363,7 +3696,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
Move(kScratchRegister, Smi::FromInt(type));
cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
- Check(equal, "stack frame types must match");
+ Check(equal, kStackFrameTypesMustMatch);
}
movq(rsp, rbp);
pop(rbp);
@@ -3373,9 +3706,10 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
// Set up the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
- ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ ASSERT(ExitFrameConstants::kCallerSPDisplacement ==
+ kFPOnStackSize + kPCOnStackSize);
+ ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
+ ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
push(rbp);
movq(rbp, rsp);
@@ -3403,11 +3737,11 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kNumRegisters * kDoubleSize +
+ int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
arg_stack_space * kPointerSize;
subq(rsp, Immediate(space));
int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
@@ -3451,7 +3785,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// r15 : argv
if (save_doubles) {
int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
}
@@ -3464,26 +3798,27 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// from the caller stack.
lea(rsp, Operand(r15, 1 * kPointerSize));
- // Push the return address to get ready to return.
- push(rcx);
+ PushReturnAddressFrom(rcx);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(true);
}
-void MacroAssembler::LeaveApiExitFrame() {
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
movq(rsp, rbp);
pop(rbp);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(restore_context);
}
-void MacroAssembler::LeaveExitFrameEpilogue() {
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
Operand context_operand = ExternalOperand(context_address);
- movq(rsi, context_operand);
+ if (restore_context) {
+ movq(rsi, context_operand);
+ }
#ifdef DEBUG
movq(context_operand, Immediate(0));
#endif
@@ -3509,7 +3844,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
cmpq(scratch, Immediate(0));
- Check(not_equal, "we should not have an empty lexical context");
+ Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
int offset =
@@ -3521,7 +3856,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
if (emit_debug_code()) {
Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
isolate()->factory()->native_context_map());
- Check(equal, "JSGlobalObject::native_context should be a native context.");
+ Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
}
// Check if both contexts are the same.
@@ -3540,12 +3875,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
movq(holder_reg,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
CompareRoot(holder_reg, Heap::kNullValueRootIndex);
- Check(not_equal, "JSGlobalProxy::context() should not be null.");
+ Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
// Read the first word and compare to native_context_map(),
movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
- Check(equal, "JSGlobalObject::native_context should be a native context.");
+ Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
pop(holder_reg);
}
@@ -3680,8 +4015,8 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Just return if allocation top is already known.
if ((flags & RESULT_CONTAINS_TOP) != 0) {
@@ -3689,9 +4024,9 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
ASSERT(!scratch.is_valid());
#ifdef DEBUG
// Assert that result actually contains top on entry.
- Operand top_operand = ExternalOperand(new_space_allocation_top);
+ Operand top_operand = ExternalOperand(allocation_top);
cmpq(result, top_operand);
- Check(equal, "Unexpected allocation top");
+ Check(equal, kUnexpectedAllocationTop);
#endif
return;
}
@@ -3699,40 +4034,43 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
// Move address of new object to result. Use scratch register if available,
// and keep address in scratch until call to UpdateAllocationTopHelper.
if (scratch.is_valid()) {
- LoadAddress(scratch, new_space_allocation_top);
+ LoadAddress(scratch, allocation_top);
movq(result, Operand(scratch, 0));
} else {
- Load(result, new_space_allocation_top);
+ Load(result, allocation_top);
}
}
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
- Register scratch) {
+ Register scratch,
+ AllocationFlags flags) {
if (emit_debug_code()) {
testq(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, "Unaligned allocation in new space");
+ Check(zero, kUnalignedAllocationInNewSpace);
}
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Update new top.
if (scratch.is_valid()) {
// Scratch already contains address of allocation top.
movq(Operand(scratch, 0), result_end);
} else {
- Store(new_space_allocation_top, result_end);
+ Store(allocation_top, result_end);
}
}
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3752,9 +4090,20 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ if (isolate()->heap_profiler()->is_tracking_allocations()) {
+ RecordObjectAllocation(isolate(), result, object_size);
+ }
+
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
+ testq(result, Immediate(kDoubleAlignmentMask));
+ Check(zero, kAllocationIsNotDoubleAligned);
+ }
+
// Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
Register top_reg = result_end.is_valid() ? result_end : result;
@@ -3763,81 +4112,49 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
}
addq(top_reg, Immediate(object_size));
j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
+ Operand limit_operand = ExternalOperand(allocation_limit);
cmpq(top_reg, limit_operand);
j(above, gc_required);
// Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch);
+ UpdateAllocationTopHelper(top_reg, scratch, flags);
+ bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
- if ((flags & TAG_OBJECT) != 0) {
+ if (tag_result) {
subq(result, Immediate(object_size - kHeapObjectTag));
} else {
subq(result, Immediate(object_size));
}
- } else if ((flags & TAG_OBJECT) != 0) {
+ } else if (tag_result) {
// Tag the result if requested.
- addq(result, Immediate(kHeapObjectTag));
+ ASSERT(kHeapObjectTag == 1);
+ incq(result);
}
}
-void MacroAssembler::AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- movl(result, Immediate(0x7091));
- movl(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- movl(scratch, Immediate(0x7291));
- }
- // Register element_count is not modified by the function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- // We assume that element_count*element_size + header_size does not
- // overflow.
+void MacroAssembler::Allocate(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT((flags & SIZE_IN_WORDS) == 0);
lea(result_end, Operand(element_count, element_size, header_size));
- addq(result_end, result);
- j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
- cmpq(result_end, limit_operand);
- j(above, gc_required);
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-
- // Tag the result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
- }
+ Allocate(result_end, result, result_end, scratch, gc_required, flags);
}
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT((flags & SIZE_IN_WORDS) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3856,20 +4173,31 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ if (isolate()->heap_profiler()->is_tracking_allocations()) {
+ RecordObjectAllocation(isolate(), result, object_size);
+ }
+
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
+ testq(result, Immediate(kDoubleAlignmentMask));
+ Check(zero, kAllocationIsNotDoubleAligned);
+ }
+
// Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
if (!object_size.is(result_end)) {
movq(result_end, object_size);
}
addq(result_end, result);
j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
+ Operand limit_operand = ExternalOperand(allocation_limit);
cmpq(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
+ UpdateAllocationTopHelper(result_end, scratch, flags);
// Tag the result if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3887,7 +4215,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
Operand top_operand = ExternalOperand(new_space_allocation_top);
#ifdef DEBUG
cmpq(object, top_operand);
- Check(below, "Undo allocation of non allocated memory");
+ Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
movq(top_operand, object);
}
@@ -3897,12 +4225,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch,
- no_reg,
- gc_required,
- TAG_OBJECT);
+ Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
// Set the map.
LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
@@ -3930,14 +4253,14 @@ void MacroAssembler::AllocateTwoByteString(Register result,
}
// Allocate two byte string in new space.
- AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqTwoByteString::kHeaderSize,
+ times_1,
+ scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
@@ -3957,7 +4280,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
+ const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
kObjectAlignmentMask;
movl(scratch1, length);
ASSERT(kCharSize == 1);
@@ -3968,14 +4291,14 @@ void MacroAssembler::AllocateAsciiString(Register result,
}
// Allocate ASCII string in new space.
- AllocateInNewSpace(SeqAsciiString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqOneByteString::kHeaderSize,
+ times_1,
+ scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
@@ -3992,12 +4315,8 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
@@ -4009,13 +4328,33 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+
+ Load(scratch1, high_promotion_mode);
+ testb(scratch1, Immediate(1));
+ j(zero, &allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+
+ jmp(&install_map);
+
+ bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ bind(&install_map);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
@@ -4028,12 +4367,8 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
@@ -4046,12 +4381,8 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
@@ -4074,7 +4405,7 @@ void MacroAssembler::CopyBytes(Register destination,
ASSERT(min_length >= 0);
if (emit_debug_code()) {
cmpl(length, Immediate(min_length));
- Assert(greater_equal, "Invalid min_length");
+ Assert(greater_equal, kInvalidMinLength);
}
Label loop, done, short_string, short_loop;
@@ -4092,12 +4423,12 @@ void MacroAssembler::CopyBytes(Register destination,
// we keep source aligned for the rep movs operation by copying the odd bytes
// at the end of the ranges.
movq(scratch, length);
- shrl(length, Immediate(3));
+ shrl(length, Immediate(kPointerSizeLog2));
repmovsq();
// Move remaining bytes of length.
- andl(scratch, Immediate(0x7));
- movq(length, Operand(source, scratch, times_1, -8));
- movq(Operand(destination, scratch, times_1, -8), length);
+ andl(scratch, Immediate(kPointerSize - 1));
+ movq(length, Operand(source, scratch, times_1, -kPointerSize));
+ movq(Operand(destination, scratch, times_1, -kPointerSize), length);
addq(destination, scratch);
if (min_length <= kLongStringLimit) {
@@ -4158,7 +4489,7 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (emit_debug_code()) {
CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
Heap::kWithContextMapRootIndex);
- Check(not_equal, "Variable resolved to with context.");
+ Check(not_equal, kVariableResolvedToWithContext);
}
}
@@ -4231,6 +4562,15 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
+void MacroAssembler::LoadArrayFunction(Register function) {
+ movq(function,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
+ movq(function,
+ Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
@@ -4240,7 +4580,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
jmp(&ok);
bind(&fail);
- Abort("Global functions must have initial map");
+ Abort(kGlobalFunctionsMustHaveInitialMap);
bind(&ok);
}
}
@@ -4360,6 +4700,19 @@ void MacroAssembler::CheckPageFlag(
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ Move(scratch, map);
+ movq(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ SmiToInteger32(scratch, scratch);
+ and_(scratch, Immediate(Map::Deprecated::kMask));
+ j(not_zero, if_deprecated);
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register bitmap_scratch,
Register mask_scratch,
@@ -4503,7 +4856,7 @@ void MacroAssembler::EnsureNotWhite(
bind(&not_external);
// Sequential string, either ASCII or UC16.
- ASSERT(kAsciiStringTag == 0x04);
+ ASSERT(kOneByteStringTag == 0x04);
and_(length, Immediate(kStringEncodingMask));
xor_(length, Immediate(kStringEncodingMask));
addq(length, Immediate(0x04));
@@ -4563,6 +4916,58 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
j(not_equal, &next);
}
+void MacroAssembler::TestJSArrayForAllocationMemento(
+ Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ lea(scratch_reg, Operand(receiver_reg,
+ JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
+ movq(kScratchRegister, new_space_start);
+ cmpq(scratch_reg, kScratchRegister);
+ j(less, no_memento_found);
+ cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
+ j(greater, no_memento_found);
+ CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
+ Heap::kAllocationMementoMapRootIndex);
+}
+
+
+void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
+ Register object,
+ Register object_size) {
+ FrameScope frame(this, StackFrame::EXIT);
+ PushSafepointRegisters();
+ PrepareCallCFunction(3);
+ // In case object is rdx
+ movq(kScratchRegister, object);
+ movq(arg_reg_3, object_size);
+ movq(arg_reg_2, kScratchRegister);
+ movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
+ CallCFunction(
+ ExternalReference::record_object_allocation_function(isolate), 3);
+ PopSafepointRegisters();
+}
+
+
+void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
+ Register object,
+ int object_size) {
+ FrameScope frame(this, StackFrame::EXIT);
+ PushSafepointRegisters();
+ PrepareCallCFunction(3);
+ movq(arg_reg_2, object);
+ movq(arg_reg_3, Immediate(object_size));
+ movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
+ CallCFunction(
+ ExternalReference::record_object_allocation_function(isolate), 3);
+ PopSafepointRegisters();
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index cc057ac54..24374349a 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -35,18 +35,6 @@
namespace v8 {
namespace internal {
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1
-};
-
-
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
@@ -180,6 +168,10 @@ class MacroAssembler: public Assembler {
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
@@ -290,6 +282,9 @@ class MacroAssembler: public Assembler {
void DebugBreak();
#endif
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
// Enter specific kind of exit frame; either in normal or
// debug mode. Expects the number of arguments in register rax and
// sets up the number of arguments in register rdi and the pointer
@@ -310,7 +305,7 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects/provides the return value in
// register rax (untouched).
- void LeaveApiExitFrame();
+ void LeaveApiExitFrame(bool restore_context);
// Push and pop the registers that can hold pointers.
void PushSafepointRegisters() { Pushad(); }
@@ -361,6 +356,7 @@ class MacroAssembler: public Assembler {
CallKind call_kind);
void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -382,10 +378,15 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
+ // Support for constant splitting.
+ bool IsUnsafeInt(const int32_t x);
+ void SafeMove(Register dst, Smi* src);
+ void SafePush(Smi* src);
+
void InitializeSmiConstantRegister() {
movq(kSmiConstantRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE);
+ RelocInfo::NONE64);
}
// Conversions between tagged smi values and non-tagged integer values.
@@ -534,15 +535,6 @@ class MacroAssembler: public Assembler {
// Smis represent a subset of integers. The subset is always equivalent to
// a two's complement interpretation of a fixed number of bits.
- // Optimistically adds an integer constant to a supposed smi.
- // If the src is not a smi, or the result is not a smi, jump to
- // the label.
- void SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
// Add an integer constant to a tagged smi, giving a tagged smi as result.
// No overflow testing on the result is done.
void SmiAddConstant(Register dst, Register src, Smi* constant);
@@ -580,8 +572,8 @@ class MacroAssembler: public Assembler {
Label::Distance near_jump = Label::kFar);
// Adds smi values and return the result as a smi.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
+ // If dst is src1, then src1 will be destroyed if the operation is
+ // successful, otherwise kept intact.
void SmiAdd(Register dst,
Register src1,
Register src2,
@@ -598,18 +590,13 @@ class MacroAssembler: public Assembler {
Register src2);
// Subtracts smi values and return the result as a smi.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
+ // If dst is src1, then src1 will be destroyed if the operation is
+ // successful, otherwise kept intact.
void SmiSub(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump = Label::kFar);
-
- void SmiSub(Register dst,
- Register src1,
- Register src2);
-
void SmiSub(Register dst,
Register src1,
const Operand& src2,
@@ -618,6 +605,10 @@ class MacroAssembler: public Assembler {
void SmiSub(Register dst,
Register src1,
+ Register src2);
+
+ void SmiSub(Register dst,
+ Register src1,
const Operand& src2);
// Multiplies smi values and return the result as a smi,
@@ -727,12 +718,31 @@ class MacroAssembler: public Assembler {
}
void Push(Smi* smi);
+
+ // Save away a 64-bit integer on the stack as two 32-bit integers
+ // masquerading as smis so that the garbage collector skips visiting them.
+ void PushInt64AsTwoSmis(Register src, Register scratch = kScratchRegister);
+ // Reconstruct a 64-bit integer from two 32-bit integers masquerading as
+ // smis on the top of stack.
+ void PopInt64AsTwoSmis(Register dst, Register scratch = kScratchRegister);
+
void Test(const Operand& dst, Smi* source);
// ---------------------------------------------------------------------------
// String macros.
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found);
+
// If object is a string, its map is loaded into object_map.
void JumpIfNotString(Register object,
Register object_map,
@@ -765,21 +775,32 @@ class MacroAssembler: public Assembler {
Label* on_fail,
Label::Distance near_jump = Label::kFar);
+ // Checks if the given register or operand is a unique name
+ void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
+ Label::Distance distance = Label::kFar);
+ void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
+ Label::Distance distance = Label::kFar);
+
// ---------------------------------------------------------------------------
// Macro instructions.
+ // Load/store with specific representation.
+ void Load(Register dst, const Operand& src, Representation r);
+ void Store(const Operand& dst, Register src, Representation r);
+
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
void Set(const Operand& dst, int64_t x);
+ // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
+ // hinders register renaming and makes dependence chains longer. So we use
+ // xorps to clear the dst register before cvtsi2sd to solve this issue.
+ void Cvtlsi2sd(XMMRegister dst, Register src);
+ void Cvtlsi2sd(XMMRegister dst, const Operand& src);
+
// Move if the registers are not identical.
void Move(Register target, Register source);
- // Support for constant splitting.
- bool IsUnsafeInt(const int x);
- void SafeMove(Register dst, Smi* src);
- void SafePush(Smi* src);
-
// Bit-field support.
void TestBit(const Operand& dst, int bit_index);
@@ -794,25 +815,22 @@ class MacroAssembler: public Assembler {
// Load a heap object and handle the case of new-space objects by
// indirecting via a global cell.
- void LoadHeapObject(Register result, Handle<HeapObject> object);
- void PushHeapObject(Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Move(result, object);
- }
- }
+ void MoveHeapObject(Register result, Handle<Object> object);
// Load a global cell into a register.
- void LoadGlobalCell(Register dst, Handle<JSGlobalPropertyCell> cell);
+ void LoadGlobalCell(Register dst, Handle<Cell> cell);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the rsp register.
void Drop(int stack_elements);
void Call(Label* target) { call(target); }
+ void Push(Register src) { push(src); }
+ void Pop(Register dst) { pop(dst); }
+ void PushReturnAddressFrom(Register src) { push(src); }
+ void PopReturnAddressTo(Register dst) { pop(dst); }
+ void MoveDouble(Register dst, const Operand& src) { movq(dst, src); }
+ void MoveDouble(const Operand& dst, Register src) { movq(dst, src); }
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
@@ -827,7 +845,7 @@ class MacroAssembler: public Assembler {
// The size of the code generated for different call instructions.
int CallSize(Address destination, RelocInfo::Mode rmode) {
- return kCallInstructionLength;
+ return kCallSequenceLength;
}
int CallSize(ExternalReference ext);
int CallSize(Handle<Code> code_object) {
@@ -895,7 +913,8 @@ class MacroAssembler: public Assembler {
Register elements,
Register index,
XMMRegister xmm_scratch,
- Label* fail);
+ Label* fail,
+ int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
@@ -903,8 +922,7 @@ class MacroAssembler: public Assembler {
// sequences branches to early_success.
void CompareMap(Register obj,
Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ Label* early_success);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@@ -913,13 +931,13 @@ class MacroAssembler: public Assembler {
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
+ SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified map and branch to a
// specified target if equal. Skip the smi check if not required (object is
// known to be a heap object)
void DispatchMap(Register obj,
+ Register unused,
Handle<Map> map,
Handle<Code> success,
SmiCheckType smi_check_type);
@@ -933,6 +951,15 @@ class MacroAssembler: public Assembler {
Register map,
Register instance_type);
+ // Check if the object in register heap_object is a name. Afterwards the
+ // register map contains the object map and the register instance_type
+ // contains the instance_type. The registers map and instance_type can be the
+ // same in which case it contains the instance type afterwards. Either of the
+ // registers map and instance_type can be the same as heap_object.
+ Condition IsObjectNameType(Register heap_object,
+ Register map,
+ Register instance_type);
+
// FCmp compares and pops the two values on top of the FPU stack.
// The flag results are similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
@@ -944,6 +971,20 @@ class MacroAssembler: public Assembler {
XMMRegister temp_xmm_reg,
Register result_reg);
+ void SlowTruncateToI(Register result_reg, Register input_reg,
+ int offset = HeapNumber::kValueOffset - kHeapObjectTag);
+
+ void TruncateHeapNumberToI(Register result_reg, Register input_reg);
+ void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
+
+ void DoubleToI(Register result_reg, XMMRegister input_reg,
+ XMMRegister scratch, MinusZeroMode minus_zero_mode,
+ Label* conversion_failed, Label::Distance dst = Label::kFar);
+
+ void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
+ MinusZeroMode minus_zero_mode, Label* lost_precision,
+ Label::Distance dst = Label::kFar);
+
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
void LoadInstanceDescriptors(Register map, Register descriptors);
@@ -976,11 +1017,14 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
// Abort execution if argument is not the root value with the given index,
// enabled via --debug-code.
void AssertRootValue(Register src,
Heap::RootListIndex root_value_index,
- const char* message);
+ BailoutReason reason);
// ---------------------------------------------------------------------------
// Exception handling
@@ -1023,38 +1067,47 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Allocation support
- // Allocate an object in new space. If the new space is exhausted control
- // continues at the gc_required label. The allocated object is returned in
- // result and end of the new object is returned in result_end. The register
- // scratch can be passed as no_reg in which case an additional object
- // reference will be added to the reloc info. The returned pointers in result
- // and result_end have not yet been tagged as heap objects. If
- // result_contains_top_on_entry is true the content of result is known to be
- // the allocation top on entry (could be result_end from a previous call to
- // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
+ // Allocate an object in new space or old pointer space. If the given space
+ // is exhausted control continues at the gc_required label. The allocated
+ // object is returned in result and end of the new object is returned in
+ // result_end. The register scratch can be passed as no_reg in which case
+ // an additional object reference will be added to the reloc info. The
+ // returned pointers in result and result_end have not yet been tagged as
+ // heap objects. If result_contains_top_on_entry is true the content of
+ // result is known to be the allocation top on entry (could be result_end
+ // from a previous call). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ // Record a JS object allocation if allocations tracking mode is on.
+ void RecordObjectAllocation(Isolate* isolate,
+ Register object,
+ Register object_size);
+
+ void RecordObjectAllocation(Isolate* isolate,
+ Register object,
+ int object_size);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
@@ -1165,6 +1218,7 @@ class MacroAssembler: public Assembler {
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
+ void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same.
@@ -1183,13 +1237,20 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
// Call a runtime function and save the value of XMM registers.
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ }
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
@@ -1221,10 +1282,15 @@ class MacroAssembler: public Assembler {
// from handle and propagates exceptions. Clobbers r14, r15, rbx and
// caller-save registers. Restores context. On return removes
// stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Address function_address, int stack_space);
+ void CallApiFunctionAndReturn(Address function_address,
+ Address thunk_address,
+ Register thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand);
// Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, arguments must be stored in esp[0], esp[4],
+ // After aligning the frame, arguments must be stored in rsp[0], rsp[8],
// etc., not pushed. The argument count assumes all arguments are word sized.
// The number of slots reserved for arguments depends on platform. On Windows
// stack slots are reserved for the arguments passed in registers. On other
@@ -1291,15 +1357,15 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, const char* msg);
+ void Assert(Condition cc, BailoutReason reason);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg);
+ void Check(Condition cc, BailoutReason reason);
// Print a message to stdout and abort execution.
- void Abort(const char* msg);
+ void Abort(BailoutReason msg);
// Check that the stack is aligned.
void CheckStackAlignment();
@@ -1326,6 +1392,26 @@ class MacroAssembler: public Assembler {
void CheckEnumCache(Register null_value,
Label* call_runtime);
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, condition flags are set to equal.
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ j(equal, memento_found);
+ bind(&no_memento_found);
+ }
+
private:
// Order general registers are pushed by Pushad.
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
@@ -1368,7 +1454,7 @@ class MacroAssembler: public Assembler {
// accessible via StackSpaceOperand.
void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
- void LeaveExitFrameEpilogue();
+ void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
// Loads the top of new-space into the result register.
@@ -1377,9 +1463,12 @@ class MacroAssembler: public Assembler {
void LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags);
+
// Update allocation top with value in result_end register.
// If scratch is valid, it contains the address of the allocation top.
- void UpdateAllocationTopHelper(Register result_end, Register scratch);
+ void UpdateAllocationTopHelper(Register result_end,
+ Register scratch,
+ AllocationFlags flags);
// Helper for PopHandleScope. Allowed to perform a GC and returns
// NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
@@ -1413,9 +1502,9 @@ class MacroAssembler: public Assembler {
return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
}
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class StandardFrame;
};
@@ -1478,23 +1567,26 @@ inline Operand StackSpaceOperand(int index) {
}
+inline Operand StackOperandForReturnAddress(int32_t disp) {
+ return Operand(rsp, disp);
+}
+
#ifdef GENERATED_CODE_COVERAGE
extern void LogGeneratedCodeCoverage(const char* file_line);
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) { \
- byte* x64_coverage_function = \
- reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
- masm->pushfd(); \
- masm->pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
- masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
- masm->pop(rax); \
- masm->popad(); \
- masm->popfd(); \
- } \
+#define ACCESS_MASM(masm) { \
+ Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
+ masm->pushfq(); \
+ masm->Pushad(); \
+ masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
+ masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE); \
+ masm->pop(rax); \
+ masm->Popad(); \
+ masm->popfq(); \
+ } \
masm->
#else
#define ACCESS_MASM(masm) masm->
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 86f7bfe6c..ca834e277 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -27,8 +27,9 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
+#include "cpu-profiler.h"
#include "serialize.h"
#include "unicode.h"
#include "log.h"
@@ -120,7 +121,7 @@ RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
- masm_(Isolate::Current(), NULL, kRegExpCodeSize),
+ masm_(zone->isolate(), NULL, kRegExpCodeSize),
no_root_array_scope_(&masm_),
code_relative_fixup_positions_(4, zone),
mode_(mode),
@@ -226,101 +227,6 @@ void RegExpMacroAssemblerX64::CheckCharacterLT(uc16 limit, Label* on_less) {
}
-void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
-#ifdef DEBUG
- // If input is ASCII, don't even bother calling here if the string to
- // match contains a non-ASCII character.
- if (mode_ == ASCII) {
- ASSERT(String::IsAscii(str.start(), str.length()));
- }
-#endif
- int byte_length = str.length() * char_size();
- int byte_offset = cp_offset * char_size();
- if (check_end_of_string) {
- // Check that there are at least str.length() characters left in the input.
- __ cmpl(rdi, Immediate(-(byte_offset + byte_length)));
- BranchOrBacktrack(greater, on_failure);
- }
-
- if (on_failure == NULL) {
- // Instead of inlining a backtrack, (re)use the global backtrack target.
- on_failure = &backtrack_label_;
- }
-
- // Do one character test first to minimize loading for the case that
- // we don't match at all (loading more than one character introduces that
- // chance of reading unaligned and reading across cache boundaries).
- // If the first character matches, expect a larger chance of matching the
- // string, and start loading more characters at a time.
- if (mode_ == ASCII) {
- __ cmpb(Operand(rsi, rdi, times_1, byte_offset),
- Immediate(static_cast<int8_t>(str[0])));
- } else {
- // Don't use 16-bit immediate. The size changing prefix throws off
- // pre-decoding.
- __ movzxwl(rax,
- Operand(rsi, rdi, times_1, byte_offset));
- __ cmpl(rax, Immediate(static_cast<int32_t>(str[0])));
- }
- BranchOrBacktrack(not_equal, on_failure);
-
- __ lea(rbx, Operand(rsi, rdi, times_1, 0));
- for (int i = 1, n = str.length(); i < n; ) {
- if (mode_ == ASCII) {
- if (i + 8 <= n) {
- uint64_t combined_chars =
- (static_cast<uint64_t>(str[i + 0]) << 0) ||
- (static_cast<uint64_t>(str[i + 1]) << 8) ||
- (static_cast<uint64_t>(str[i + 2]) << 16) ||
- (static_cast<uint64_t>(str[i + 3]) << 24) ||
- (static_cast<uint64_t>(str[i + 4]) << 32) ||
- (static_cast<uint64_t>(str[i + 5]) << 40) ||
- (static_cast<uint64_t>(str[i + 6]) << 48) ||
- (static_cast<uint64_t>(str[i + 7]) << 56);
- __ movq(rax, combined_chars, RelocInfo::NONE);
- __ cmpq(rax, Operand(rbx, byte_offset + i));
- i += 8;
- } else if (i + 4 <= n) {
- uint32_t combined_chars =
- (static_cast<uint32_t>(str[i + 0]) << 0) ||
- (static_cast<uint32_t>(str[i + 1]) << 8) ||
- (static_cast<uint32_t>(str[i + 2]) << 16) ||
- (static_cast<uint32_t>(str[i + 3]) << 24);
- __ cmpl(Operand(rbx, byte_offset + i), Immediate(combined_chars));
- i += 4;
- } else {
- __ cmpb(Operand(rbx, byte_offset + i),
- Immediate(static_cast<int8_t>(str[i])));
- i++;
- }
- } else {
- ASSERT(mode_ == UC16);
- if (i + 4 <= n) {
- uint64_t combined_chars = *reinterpret_cast<const uint64_t*>(&str[i]);
- __ movq(rax, combined_chars, RelocInfo::NONE);
- __ cmpq(rax,
- Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
- i += 4;
- } else if (i + 2 <= n) {
- uint32_t combined_chars = *reinterpret_cast<const uint32_t*>(&str[i]);
- __ cmpl(Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)),
- Immediate(combined_chars));
- i += 2;
- } else {
- __ movzxwl(rax,
- Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
- __ cmpl(rax, Immediate(str[i]));
- i++;
- }
- }
- BranchOrBacktrack(not_equal, on_failure);
- }
-}
-
-
void RegExpMacroAssemblerX64::CheckGreedyLoop(Label* on_equal) {
Label fallthrough;
__ cmpl(rdi, Operand(backtrack_stackpointer(), 0));
@@ -393,8 +299,13 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ j(not_equal, on_no_match); // Definitely not equal.
__ subb(rax, Immediate('a'));
__ cmpb(rax, Immediate('z' - 'a'));
- __ j(above, on_no_match); // Weren't letters anyway.
-
+ __ j(below_equal, &loop_increment); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ subb(rax, Immediate(224 - 'a'));
+ __ cmpb(rax, Immediate(254 - 224));
+ __ j(above, on_no_match); // Weren't Latin-1 letters.
+ __ cmpb(rax, Immediate(247 - 224)); // Check for 247.
+ __ j(equal, on_no_match);
__ bind(&loop_increment);
// Increment pointers into match and capture strings.
__ addq(r11, Immediate(1));
@@ -432,7 +343,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Set byte_length.
__ movq(r8, rbx);
// Isolate.
- __ LoadAddress(r9, ExternalReference::isolate_address());
+ __ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#else // AMD64 calling convention
// Compute byte_offset2 (current position = rsi+rdi).
__ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -443,14 +354,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Set byte_length.
__ movq(rdx, rbx);
// Isolate.
- __ LoadAddress(rcx, ExternalReference::isolate_address());
+ __ LoadAddress(rcx, ExternalReference::isolate_address(isolate()));
#endif
{ // NOLINT: Can't find a way to open this scope without confusing the
// linter.
AllowExternalCallThatCantCauseGC scope(&masm_);
ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
+ ExternalReference::re_case_insensitive_compare_uc16(isolate());
__ CallCFunction(compare, num_arguments);
}
@@ -486,7 +397,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
// Fail on partial or illegal capture (start of capture after end of capture).
// This must not happen (no back-reference can reference a capture that wasn't
// closed before in the reg-exp).
- __ Check(greater_equal, "Invalid capture referenced");
+ __ Check(greater_equal, kInvalidCaptureReferenced);
// Succeed on empty capture (including non-participating capture)
__ j(equal, &fallthrough);
@@ -610,7 +521,7 @@ void RegExpMacroAssemblerX64::CheckBitInTable(
Label* on_bit_set) {
__ Move(rax, table);
Register index = current_character();
- if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
+ if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ movq(rbx, current_character());
__ and_(rbx, Immediate(kTableMask));
index = rbx;
@@ -631,29 +542,23 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
case 's':
// Match space-characters
if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ cmpl(current_character(), Immediate(' '));
- __ j(equal, &success);
+ __ j(equal, &success, Label::kNear);
// Check range 0x09..0x0d
__ lea(rax, Operand(current_character(), -'\t'));
__ cmpl(rax, Immediate('\r' - '\t'));
- BranchOrBacktrack(above, on_no_match);
+ __ j(below_equal, &success, Label::kNear);
+ // \u00a0 (NBSP).
+ __ cmpl(rax, Immediate(0x00a0 - '\t'));
+ BranchOrBacktrack(not_equal, on_no_match);
__ bind(&success);
return true;
}
return false;
case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- __ cmpl(current_character(), Immediate(' '));
- BranchOrBacktrack(equal, on_no_match);
- __ lea(rax, Operand(current_character(), -'\t'));
- __ cmpl(rax, Immediate('\r' - '\t'));
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- }
+ // The emitted code for generic character classes is good enough.
return false;
case 'd':
// Match ASCII digits ('0'..'9')
@@ -811,7 +716,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Label stack_ok;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_.isolate());
+ ExternalReference::address_of_stack_limit(isolate());
__ movq(rcx, rsp);
__ movq(kScratchRegister, stack_limit);
__ subq(rcx, Operand(kScratchRegister, 0));
@@ -856,7 +761,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// position registers.
__ movq(Operand(rbp, kInputStartMinusOne), rax);
-#ifdef WIN32
+#if V8_OS_WIN
// Ensure that we have written to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
@@ -866,7 +771,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
i += kRegistersPerPage) {
__ movq(register_location(i), rax); // One write every page.
}
-#endif // WIN32
+#endif // V8_OS_WIN
// Initialize code object pointer.
__ Move(code_object_pointer(), masm_.CodeObject());
@@ -1056,15 +961,15 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Microsoft passes parameters in rcx, rdx, r8.
// First argument, backtrack stackpointer, is already in rcx.
__ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
- __ LoadAddress(r8, ExternalReference::isolate_address());
+ __ LoadAddress(r8, ExternalReference::isolate_address(isolate()));
#else
// AMD64 ABI passes parameters in rdi, rsi, rdx.
__ movq(rdi, backtrack_stackpointer()); // First argument.
__ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
- __ LoadAddress(rdx, ExternalReference::isolate_address());
+ __ LoadAddress(rdx, ExternalReference::isolate_address(isolate()));
#endif
ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_.isolate());
+ ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
@@ -1093,7 +998,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_.GetCode(&code_desc);
- Isolate* isolate = ISOLATE;
+ Isolate* isolate = this->isolate();
Handle<Code> code = isolate->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP),
masm_.CodeObject());
@@ -1267,7 +1172,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ lea(rdi, Operand(rsp, -kPointerSize));
#endif
ExternalReference stack_check =
- ExternalReference::re_check_stack_guard_state(masm_.isolate());
+ ExternalReference::re_check_stack_guard_state(isolate());
__ CallCFunction(stack_check, num_arguments);
}
@@ -1283,7 +1188,6 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
@@ -1305,7 +1209,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1336,7 +1240,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
@@ -1486,7 +1390,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
// Check for preemption.
Label no_preempt;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_.isolate());
+ ExternalReference::address_of_stack_limit(isolate());
__ load_rax(stack_limit);
__ cmpq(rsp, rax);
__ j(above, &no_preempt);
@@ -1500,7 +1404,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
void RegExpMacroAssemblerX64::CheckStackLimit() {
Label no_stack_overflow;
ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_.isolate());
+ ExternalReference::address_of_regexp_stack_limit(isolate());
__ load_rax(stack_limit);
__ cmpq(backtrack_stackpointer(), rax);
__ j(above, &no_stack_overflow);
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h
index a082cf2df..b230ea47f 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h
@@ -55,10 +55,6 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
@@ -272,6 +268,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Increments the stack pointer (rcx) by a word size.
inline void Drop();
+ Isolate* isolate() const { return masm_.isolate(); }
+
MacroAssembler masm_;
MacroAssembler::NoRootArrayScope no_root_array_scope_;
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index b120efb37..28e2a8962 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -27,8 +27,9 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
+#include "arguments.h"
#include "ic-inl.h"
#include "codegen.h"
#include "stub-cache.h"
@@ -53,7 +54,7 @@ static void ProbeTable(Isolate* isolate,
ASSERT(kPointerSizeLog2 == kHeapObjectTagSize + 1);
ScaleFactor scale_factor = times_2;
- ASSERT_EQ(24, sizeof(StubCache::Entry));
+ ASSERT_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
// The offset register holds the entry offset times four (due to masking
// and shifting optimizations).
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
@@ -106,38 +107,34 @@ static void ProbeTable(Isolate* isolate,
}
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<String> name,
- Register r0,
- Register r1) {
- ASSERT(name->IsSymbol());
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(name->IsUniqueName());
+ ASSERT(!receiver.is(scratch0));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1);
__ IncrementCounter(counters->negative_lookups_miss(), 1);
- __ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movq(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
const int kInterceptorOrAccessCheckNeededMask =
(1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
// Bail out if the receiver has a named interceptor or requires access checks.
- __ testb(FieldOperand(r0, Map::kBitFieldOffset),
+ __ testb(FieldOperand(scratch0, Map::kBitFieldOffset),
Immediate(kInterceptorOrAccessCheckNeededMask));
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
__ j(below, miss_label);
// Load properties array.
- Register properties = r0;
+ Register properties = scratch0;
__ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
@@ -146,12 +143,12 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
__ j(not_equal, miss_label);
Label done;
- StringDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- properties,
- name,
- r1);
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ properties,
+ name,
+ scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
@@ -171,8 +168,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
USE(extra2); // The register extra2 is not used on the X64 platform.
USE(extra3); // The register extra2 is not used on the X64 platform.
// Make sure that code is valid. The multiplying code relies on the
- // entry size being 24.
- ASSERT(sizeof(Entry) == 24);
+ // entry size being 3 * kPointerSize.
+ ASSERT(sizeof(Entry) == 3 * kPointerSize);
// Make sure the flags do not name a specific type.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@@ -193,7 +190,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
- __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
+ __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
// Use only the low 32 bits of the map pointer.
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, Immediate(flags));
@@ -205,7 +202,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
// Primary miss: Compute hash for secondary probe.
- __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
+ __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, Immediate(flags));
__ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
@@ -303,32 +300,28 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length directly from the string.
__ movq(rax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
- __ j(not_equal, miss);
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
+ __ j(not_equal, miss);
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
+ __ ret(0);
}
@@ -343,26 +336,21 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
- Handle<JSObject> holder,
- int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ movq(dst, FieldOperand(src, offset));
- } else {
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ int offset = index * kPointerSize;
+ if (!inobject) {
// Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ offset = offset + FixedArray::kHeaderSize;
__ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- __ movq(dst, FieldOperand(dst, offset));
+ src = dst;
}
+ __ movq(dst, FieldOperand(src, offset));
}
@@ -371,6 +359,11 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
@@ -378,8 +371,6 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(kScratchRegister);
__ push(receiver);
__ push(holder);
- __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
- __ PushAddress(ExternalReference::isolate_address());
}
@@ -394,7 +385,7 @@ static void CompileCallLoadPropertyWithInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate());
- __ Set(rax, 6);
+ __ Set(rax, StubCache::kInterceptorArgsLength);
__ LoadAddress(rbx, ref);
CEntryStub stub(1);
@@ -403,7 +394,7 @@ static void CompileCallLoadPropertyWithInterceptor(
// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = 4;
+static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
// Reserves space for the extra arguments to API function in the
@@ -415,12 +406,14 @@ static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// -- rsp[0] : return address
// -- rsp[8] : last argument in the internal frame of the caller
// -----------------------------------
- __ movq(scratch, Operand(rsp, 0));
+ __ movq(scratch, StackOperandForReturnAddress(0));
__ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
- __ movq(Operand(rsp, 0), scratch);
+ __ movq(StackOperandForReturnAddress(0), scratch);
__ Move(scratch, Smi::FromInt(0));
- for (int i = 1; i <= kFastApiCallArguments; i++) {
- __ movq(Operand(rsp, i * kPointerSize), scratch);
+ StackArgumentsAccessor args(rsp, kFastApiCallArguments,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ for (int i = 0; i < kFastApiCallArguments; i++) {
+ __ movq(args.GetArgumentOperand(i), scratch);
}
}
@@ -428,15 +421,17 @@ static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// Undoes the effects of ReserveSpaceForFastApiCall.
static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// ----------- S t a t e -------------
- // -- rsp[0] : return address.
- // -- rsp[8] : last fast api call extra argument.
+ // -- rsp[0] : return address.
+ // -- rsp[8] : last fast api call extra argument.
// -- ...
- // -- rsp[kFastApiCallArguments * 8] : first fast api call extra argument.
+ // -- rsp[kFastApiCallArguments * 8] : first fast api call extra
+ // argument.
// -- rsp[kFastApiCallArguments * 8 + 8] : last argument in the internal
// frame.
// -----------------------------------
- __ movq(scratch, Operand(rsp, 0));
- __ movq(Operand(rsp, kFastApiCallArguments * kPointerSize), scratch);
+ __ movq(scratch, StackOperandForReturnAddress(0));
+ __ movq(StackOperandForReturnAddress(kFastApiCallArguments * kPointerSize),
+ scratch);
__ addq(rsp, Immediate(kPointerSize * kFastApiCallArguments));
}
@@ -444,51 +439,56 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// Generates call to API function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
- // -- rsp[8] : object passing the type check
- // (last fast api call extra argument,
- // set by CheckPrototypes)
- // -- rsp[16] : api function
- // (first fast api call extra argument)
- // -- rsp[24] : api call data
- // -- rsp[32] : isolate
- // -- rsp[40] : last argument
+ // -- rsp[8] - rsp[56] : FunctionCallbackInfo, incl.
+ // : object passing the type check
+ // (set by CheckPrototypes)
+ // -- rsp[64] : last argument
// -- ...
- // -- rsp[(argc + 4) * 8] : first argument
- // -- rsp[(argc + 5) * 8] : receiver
+ // -- rsp[(argc + 7) * 8] : first argument
+ // -- rsp[(argc + 8) * 8] : receiver
// -----------------------------------
+ typedef FunctionCallbackArguments FCA;
+ StackArgumentsAccessor args(rsp, argc + kFastApiCallArguments);
+
+ // Save calling context.
+ int offset = argc + kFastApiCallArguments;
+ __ movq(args.GetArgumentOperand(offset - FCA::kContextSaveIndex), rsi);
+
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(rdi, function);
+ __ Move(rdi, function);
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Pass the additional arguments.
- __ movq(Operand(rsp, 2 * kPointerSize), rdi);
+ // Construct the FunctionCallbackInfo on the stack.
+ __ movq(args.GetArgumentOperand(offset - FCA::kCalleeIndex), rdi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data());
+ Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ Move(rcx, api_call_info);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
- __ movq(Operand(rsp, 3 * kPointerSize), rbx);
+ __ movq(args.GetArgumentOperand(offset - FCA::kDataIndex), rbx);
} else {
- __ Move(Operand(rsp, 3 * kPointerSize), call_data);
+ __ Move(args.GetArgumentOperand(offset - FCA::kDataIndex), call_data);
}
- __ movq(kScratchRegister, ExternalReference::isolate_address());
- __ movq(Operand(rsp, 4 * kPointerSize), kScratchRegister);
+ __ movq(kScratchRegister,
+ ExternalReference::isolate_address(masm->isolate()));
+ __ movq(args.GetArgumentOperand(offset - FCA::kIsolateIndex),
+ kScratchRegister);
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueDefaultValueIndex),
+ kScratchRegister);
+ __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueOffset),
+ kScratchRegister);
// Prepare arguments.
- __ lea(rbx, Operand(rsp, 4 * kPointerSize));
+ STATIC_ASSERT(kFastApiCallArguments == 7);
+ __ lea(rbx, Operand(rsp, 1 * kPointerSize));
-#if defined(__MINGW64__)
- Register arguments_arg = rcx;
-#elif defined(_WIN64)
- // Win64 uses first register--rcx--for returned value.
- Register arguments_arg = rdx;
-#else
- Register arguments_arg = rdi;
-#endif
+ // Function address is a foreign pointer outside V8's heap.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -496,20 +496,72 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ PrepareCallApiFunction(kApiStackSpace);
- __ movq(StackSpaceOperand(0), rbx); // v8::Arguments::implicit_args_.
- __ addq(rbx, Immediate(argc * kPointerSize));
- __ movq(StackSpaceOperand(1), rbx); // v8::Arguments::values_.
- __ Set(StackSpaceOperand(2), argc); // v8::Arguments::length_.
- // v8::Arguments::is_construct_call_.
+ __ movq(StackSpaceOperand(0), rbx); // FunctionCallbackInfo::implicit_args_.
+ __ addq(rbx, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
+ __ movq(StackSpaceOperand(1), rbx); // FunctionCallbackInfo::values_.
+ __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
+ // FunctionCallbackInfo::is_construct_call_.
__ Set(StackSpaceOperand(3), 0);
+#if defined(__MINGW64__) || defined(_WIN64)
+ Register arguments_arg = rcx;
+ Register callback_arg = rdx;
+#else
+ Register arguments_arg = rdi;
+ Register callback_arg = rsi;
+#endif
+
// v8::InvocationCallback's argument.
__ lea(arguments_arg, StackSpaceOperand(0));
- // Function address is a foreign pointer outside V8's heap.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- __ CallApiFunctionAndReturn(function_address,
- argc + kFastApiCallArguments + 1);
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+
+ StackArgumentsAccessor args_from_rbp(rbp, kFastApiCallArguments,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
+ kFastApiCallArguments - 1 - FCA::kContextSaveIndex);
+ Operand return_value_operand = args_from_rbp.GetArgumentOperand(
+ kFastApiCallArguments - 1 - FCA::kReturnValueOffset);
+ __ CallApiFunctionAndReturn(
+ function_address,
+ thunk_address,
+ callback_arg,
+ argc + kFastApiCallArguments + 1,
+ return_value_operand,
+ restore_context ? &context_restore_operand : NULL);
+}
+
+
+// Generate call to api function.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+ ASSERT(!receiver.is(scratch));
+
+ const int fast_api_call_argc = argc + kFastApiCallArguments;
+ StackArgumentsAccessor args(rsp, fast_api_call_argc);
+ // argc + 1 is the argument number before FastApiCall arguments, 1 ~ receiver
+ const int kHolderIndex = argc + 1 +
+ kFastApiCallArguments - 1 - FunctionCallbackArguments::kHolderIndex;
+ __ movq(scratch, StackOperandForReturnAddress(0));
+ // Assign stack space for the call arguments and receiver.
+ __ subq(rsp, Immediate((fast_api_call_argc + 1) * kPointerSize));
+ __ movq(StackOperandForReturnAddress(0), scratch);
+ // Write holder to stack frame.
+ __ movq(args.GetArgumentOperand(kHolderIndex), receiver);
+ __ movq(args.GetReceiverOperand(), receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ ASSERT(!receiver.is(values[i]));
+ ASSERT(!scratch.is(values[i]));
+ __ movq(args.GetArgumentOperand(i + 1), values[i]);
+ }
+
+ GenerateFastApiCall(masm, optimization, argc, true);
}
@@ -527,7 +579,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
void Compile(MacroAssembler* masm,
Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name,
+ Handle<Name> name,
LookupResult* lookup,
Register receiver,
Register scratch1,
@@ -559,7 +611,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register scratch3,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
- Handle<String> name,
+ Handle<Name> name,
const CallOptimization& optimization,
Label* miss_label) {
ASSERT(optimization.is_constant_call());
@@ -623,12 +675,14 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiCall(masm, optimization, arguments_.immediate(), false);
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- __ InvokeFunction(optimization.constant_function(), arguments_,
+ Handle<JSFunction> fun = optimization.constant_function();
+ ParameterCount expected(fun);
+ __ InvokeFunction(fun, expected, arguments_,
JUMP_FUNCTION, NullCallWrapper(), call_kind);
}
@@ -652,7 +706,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<String> name,
+ Handle<Name> name,
Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
@@ -669,7 +723,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
@@ -709,78 +763,92 @@ class CallInterceptorCompiler BASE_EMBEDDED {
};
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Handle<Code> code = (kind == Code::LOAD_IC)
- ? masm->isolate()->builtins()->LoadIC_Miss()
- : masm->isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ bind(label);
+ __ Move(this->name(), name);
+ }
}
-void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
- Handle<Code> code =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(code, RelocInfo::CODE_TARGET);
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<PropertyCell> cell =
+ JSGlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ __ Move(scratch, cell);
+ __ Cmp(FieldOperand(scratch, Cell::kValueOffset),
+ masm->isolate()->factory()->the_hole_value());
+ __ j(not_equal, miss);
}
-// Both name_reg and receiver_reg are preserved on jumps to miss_label,
-// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name,
- Register receiver_reg,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- LookupResult lookup(masm->isolate());
- object->Lookup(*name, &lookup);
- if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
- // In sloppy mode, we could just return the value and be done. However, we
- // might be in strict mode, where we have to throw. Since we cannot tell,
- // go into slow case unconditionally.
- __ jmp(miss_label);
- return;
+void StoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
}
+}
- // Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK, mode);
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
- // Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- if (lookup.IsFound()) {
- holder = lookup.holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- // We need an extra register, push
- __ push(name_reg);
- Label miss_pop, done_check;
- CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, &miss_pop);
- __ jmp(&done_check);
- __ bind(&miss_pop);
- __ pop(name_reg);
- __ jmp(miss_label);
- __ bind(&done_check);
- __ pop(name_reg);
+// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
+// store is successful.
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register unused,
+ Label* miss_label,
+ Label* slow) {
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ Cmp(value_reg, constant);
+ __ j(not_equal, miss_label);
+ } else if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ AllocateHeapNumber(storage_reg, scratch1, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiToInteger32(scratch1, value_reg);
+ __ Cvtlsi2sd(xmm0, scratch1);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK);
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
}
// Stub never generated for non-global objects that require access
@@ -788,14 +856,15 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
+ if (details.type() == FIELD &&
+ object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
- __ pop(scratch1); // Return address.
+ __ PopReturnAddressTo(scratch1);
__ push(receiver_reg);
__ Push(transition);
- __ push(rax);
- __ push(scratch1);
+ __ push(value_reg);
+ __ PushReturnAddressFrom(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
masm->isolate()),
@@ -804,87 +873,191 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- if (!transition.is_null()) {
- // Update the map of the object.
- __ Move(scratch1, transition);
- __ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
-
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- name_reg,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ // Update the map of the object.
+ __ Move(scratch1, transition);
+ __ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ scratch2,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ ASSERT(value_reg.is(rax));
+ __ ret(0);
+ return;
}
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(FieldOperand(receiver_reg, offset), rax);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ movq(FieldOperand(receiver_reg, offset), storage_reg);
+ } else {
+ __ movq(FieldOperand(receiver_reg, offset), value_reg);
+ }
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, rax);
- __ RecordWriteField(
- receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ movq(storage_reg, value_reg);
+ }
+ __ RecordWriteField(
+ receiver_reg, offset, storage_reg, scratch1, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch1, offset), rax);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ movq(FieldOperand(scratch1, offset), storage_reg);
+ } else {
+ __ movq(FieldOperand(scratch1, offset), value_reg);
+ }
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, rax);
- __ RecordWriteField(
- scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ movq(storage_reg, value_reg);
+ }
+ __ RecordWriteField(
+ scratch1, offset, storage_reg, receiver_reg, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
+ }
}
// Return the value (register rax).
+ ASSERT(value_reg.is(rax));
__ ret(0);
}
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- __ Move(scratch, cell);
- __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- masm->isolate()->factory()->the_hole_value());
- __ j(not_equal, miss);
+// Both name_reg and receiver_reg are preserved on jumps to miss_label,
+// but may be destroyed if store is successful.
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ int index = lookup->GetFieldIndex().field_index();
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ // Load the double storage.
+ if (index < 0) {
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ movq(scratch1, FieldOperand(receiver_reg, offset));
+ } else {
+ __ movq(scratch1,
+ FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ movq(scratch1, FieldOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiToInteger32(scratch2, value_reg);
+ __ Cvtlsi2sd(xmm0, scratch2);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK);
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ __ bind(&do_store);
+ __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
+ // Return the value (register rax).
+ ASSERT(value_reg.is(rax));
+ __ ret(0);
+ return;
+ }
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ movq(FieldOperand(receiver_reg, offset), value_reg);
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, value_reg);
+ __ RecordWriteField(
+ receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array (optimistically).
+ __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ movq(FieldOperand(scratch1, offset), value_reg);
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, value_reg);
+ __ RecordWriteField(
+ scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
+ }
+ }
+
+ // Return the value (register rax).
+ ASSERT(value_reg.is(rax));
+ __ ret(0);
}
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Register scratch,
- Label* miss) {
+void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
Handle<JSObject> current = object;
while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
+ if (current->IsJSGlobalObject()) {
GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
+ Handle<JSGlobalObject>::cast(current),
name,
scratch,
miss);
@@ -893,6 +1066,12 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
}
}
+
+void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
+ __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
#undef __
#define __ ACCESS_MASM((masm()))
@@ -903,9 +1082,15 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register holder_reg,
Register scratch1,
Register scratch2,
- Handle<String> name,
+ Handle<Name> name,
int save_at_depth,
- Label* miss) {
+ Label* miss,
+ PrototypeCheckType check) {
+ // Make sure that the type feedback oracle harvests the receiver map.
+ // TODO(svenpanne) Remove this hack when all ICs are reworked.
+ __ Move(scratch1, Handle<Map>(object->map()));
+
+ Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -917,8 +1102,13 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register reg = object_reg;
int depth = 0;
+ StackArgumentsAccessor args(rsp, kFastApiCallArguments,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ const int kHolderIndex = kFastApiCallArguments - 1 -
+ FunctionCallbackArguments::kHolderIndex;
+
if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), object_reg);
+ __ movq(args.GetArgumentOperand(kHolderIndex), object_reg);
}
// Check the maps in the prototype chain.
@@ -935,11 +1125,12 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- name = factory()->LookupSymbol(name);
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
}
ASSERT(current->property_dictionary()->FindEntry(*name) ==
- StringDictionary::kNotFound);
+ NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
scratch1, scratch2);
@@ -954,8 +1145,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Save the map in scratch1 for later.
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
}
- __ CheckMap(reg, Handle<Map>(current_map),
- miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
@@ -976,7 +1168,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), reg);
+ __ movq(args.GetArgumentOperand(kHolderIndex), reg);
}
// Go to the next object in the prototype chain.
@@ -987,9 +1179,10 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- // Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()),
- miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, Handle<Map>(holder->map()), miss, DONT_DO_SMI_CHECK);
+ }
// Perform security check for access to the global object.
ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
@@ -1007,186 +1200,202 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ jmp(success);
+ __ bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ }
+}
- // Check the prototype chain.
- Register reg = CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
- // Get the value from the properties.
- GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
- __ ret(0);
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ jmp(success);
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ }
}
-void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- ASSERT(!receiver.is(scratch1));
- ASSERT(!receiver.is(scratch2));
- ASSERT(!receiver.is(scratch3));
-
- // Load the properties dictionary.
- Register dictionary = scratch1;
- __ movq(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- miss,
+Register LoadStubCompiler::CallbackHandlerFrontend(
+ Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* success,
+ Handle<Object> callback) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ ASSERT(!reg.is(scratch2()));
+ ASSERT(!reg.is(scratch3()));
+ ASSERT(!reg.is(scratch4()));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch4();
+ __ movq(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &miss,
&probe_done,
dictionary,
- name_reg,
- scratch2,
- scratch3);
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch3 contains the
- // index into the dictionary. Check that the value is the callback.
- Register index = scratch3;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(scratch2,
- Operand(dictionary, index, times_pointer_size,
- kValueOffset - kHeapObjectTag));
- __ movq(scratch3, callback, RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(scratch2, scratch3);
- __ j(not_equal, miss);
+ this->name(),
+ scratch2(),
+ scratch3());
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // index into the dictionary. Check that the value is the callback.
+ Register index = scratch3();
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ movq(scratch2(),
+ Operand(dictionary, index, times_pointer_size,
+ kValueOffset - kHeapObjectTag));
+ __ movq(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
+ __ cmpq(scratch2(), scratch3());
+ __ j(not_equal, &miss);
+ }
+
+ HandlerFrontendFooter(name, success, &miss);
+ return reg;
}
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ movq(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
+}
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- GenerateDictionaryLoadCallback(
- reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
- }
+void LoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+}
- // Insert additional parameters into the stack frame above return address.
- ASSERT(!scratch2.is(reg));
- __ pop(scratch2); // Get return address to place it below.
- __ push(receiver); // receiver
- __ push(reg); // holder
+void LoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
+ // Insert additional parameters into the stack frame above return address.
+ ASSERT(!scratch4().is(reg));
+ __ PopReturnAddressTo(scratch4());
+
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+ __ push(receiver()); // receiver
if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch1, callback);
- __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset)); // data
+ ASSERT(!scratch2().is(reg));
+ __ Move(scratch2(), callback);
+ __ push(FieldOperand(scratch2(),
+ ExecutableAccessorInfo::kDataOffset)); // data
} else {
- __ Push(Handle<Object>(callback->data()));
+ __ Push(Handle<Object>(callback->data(), isolate()));
}
- __ PushAddress(ExternalReference::isolate_address()); // isolate
- __ push(name_reg); // name
- // Save a pointer to where we pushed the arguments pointer.
- // This will be passed as the const AccessorInfo& to the C++ callback.
+ ASSERT(!kScratchRegister.is(reg));
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ push(kScratchRegister); // return value
+ __ push(kScratchRegister); // return value default
+ __ PushAddress(ExternalReference::isolate_address(isolate()));
+ __ push(reg); // holder
+ __ push(name()); // name
+ // Save a pointer to where we pushed the arguments pointer. This will be
+ // passed as the const PropertyAccessorInfo& to the C++ callback.
-#if defined(__MINGW64__)
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+
+#if defined(__MINGW64__) || defined(_WIN64)
+ Register getter_arg = r8;
Register accessor_info_arg = rdx;
Register name_arg = rcx;
-#elif defined(_WIN64)
- // Win64 uses first register--rcx--for returned value.
- Register accessor_info_arg = r8;
- Register name_arg = rdx;
#else
+ Register getter_arg = rdx;
Register accessor_info_arg = rsi;
Register name_arg = rdi;
#endif
- ASSERT(!name_arg.is(scratch2));
+ ASSERT(!name_arg.is(scratch4()));
__ movq(name_arg, rsp);
- __ push(scratch2); // Restore return address.
+ __ PushReturnAddressFrom(scratch4());
- // 4 elements array for v8::Arguments::values_ and handler for name.
- const int kStackSpace = 5;
+ // v8::Arguments::values_ and handler for name.
+ const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
// Allocate v8::AccessorInfo in non-GCed stack space.
const int kArgStackSpace = 1;
__ PrepareCallApiFunction(kArgStackSpace);
- __ lea(rax, Operand(name_arg, 4 * kPointerSize));
+ __ lea(rax, Operand(name_arg, 1 * kPointerSize));
- // v8::AccessorInfo::args_.
+ // v8::PropertyAccessorInfo::args_.
__ movq(StackSpaceOperand(0), rax);
// The context register (rsi) has been saved in PrepareCallApiFunction and
// could be used to pass arguments.
__ lea(accessor_info_arg, StackSpaceOperand(0));
- Address getter_address = v8::ToCData<Address>(callback->getter());
- __ CallApiFunctionAndReturn(getter_address, kStackSpace);
-}
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ // The name handler is counted as an argument.
+ StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
+ Operand return_value_operand = args.GetArgumentOperand(
+ PropertyCallbackArguments::kArgsLength - 1 -
+ PropertyCallbackArguments::kReturnValueOffset);
+ __ CallApiFunctionAndReturn(getter_address,
+ thunk_address,
+ getter_arg,
+ kStackSpace,
+ return_value_operand,
+ NULL);
+}
-void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSFunction> value,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
- __ LoadHeapObject(rax, value);
+ __ Move(rax, value);
__ ret(0);
}
-void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Label* miss) {
+void LoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<Name> name) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
@@ -1195,8 +1404,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo()) {
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
compile_followup_inline = callback->getter() != NULL &&
callback->IsCompatibleReceiver(*object);
}
@@ -1206,17 +1416,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
// Preserve the receiver register explicitly whenever it is different from
// the holder and it is needed should the interceptor return without any
// result. The CALLBACKS case needs the receiver to be passed into C++ code,
// the FIELD case might cause a miss during the prototype check.
bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+ bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
(lookup->type() == CALLBACKS || must_perfrom_prototype_check);
// Save necessary data before invoking an interceptor.
@@ -1225,18 +1432,18 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
- __ push(receiver);
+ __ push(receiver());
}
__ push(holder_reg);
- __ push(name_reg);
+ __ push(this->name());
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
+ receiver(),
holder_reg,
- name_reg,
+ this->name(),
interceptor_holder);
// Check if interceptor provided a value for property. If it's
@@ -1248,80 +1455,32 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
__ ret(0);
__ bind(&interceptor_failed);
- __ pop(name_reg);
+ __ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
- __ pop(receiver);
+ __ pop(receiver());
}
// Leave the internal frame.
}
- // Check that the maps from interceptor's holder to lookup's holder
- // haven't changed. And load lookup's holder into |holder| register.
- if (must_perfrom_prototype_check) {
- holder_reg = CheckPrototypes(interceptor_holder,
- holder_reg,
- Handle<JSObject>(lookup->holder()),
- scratch1,
- scratch2,
- scratch3,
- name,
- miss);
- }
-
- if (lookup->IsField()) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Retrieve a field from field's holder.
- GenerateFastPropertyLoad(masm(), rax, holder_reg,
- Handle<JSObject>(lookup->holder()),
- lookup->GetFieldIndex());
- __ ret(0);
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- Handle<AccessorInfo> callback(
- AccessorInfo::cast(lookup->GetCallbackObject()));
- ASSERT(callback->getter() != NULL);
-
- // Tail call to runtime.
- // Important invariant in CALLBACKS case: the code above must be
- // structured to never clobber |receiver| register.
- __ pop(scratch2); // return address
- __ push(receiver);
- __ push(holder_reg);
- __ Move(holder_reg, callback);
- __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
- __ PushAddress(ExternalReference::isolate_address());
- __ push(holder_reg);
- __ push(name_reg);
- __ push(scratch2); // restore return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
- isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- __ pop(scratch2); // save old return address
- PushInterceptorArguments(masm(), receiver, holder_reg,
- name_reg, interceptor_holder);
- __ push(scratch2); // restore old return address
+ __ PopReturnAddressTo(scratch2());
+ PushInterceptorArguments(masm(), receiver(), holder_reg,
+ this->name(), interceptor_holder);
+ __ PushReturnAddressFrom(scratch2());
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
__ Cmp(rcx, name);
__ j(not_equal, miss);
@@ -1331,15 +1490,12 @@ void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name,
+ Handle<Name> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the maps haven't changed.
@@ -1349,12 +1505,12 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Label* miss) {
// Get the value from the cell.
__ Move(rdi, cell);
- __ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
+ __ movq(rdi, FieldOperand(rdi, Cell::kValueOffset));
// Check that the cell contains the same function.
if (heap()->InNewSpace(*function)) {
@@ -1388,8 +1544,8 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
- Handle<String> name) {
+ PropertyIndex index,
+ Handle<Name> name) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -1403,9 +1559,8 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1414,7 +1569,8 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, rdi,
name, &miss);
- GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
+ GenerateFastPropertyLoad(masm(), rdi, reg, index.is_inobject(holder),
+ index.translate(holder), Representation::Tagged());
// Check that the function really is a function.
__ JumpIfSmi(rdi, &miss);
@@ -1425,7 +1581,7 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
// necessary.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
// Invoke the function.
@@ -1444,12 +1600,59 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
}
+Handle<Code> CallStubCompiler::CompileArrayCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Cell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name,
+ Code::StubType type) {
+ Label miss;
+
+ // Check that function is still array
+ const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
+ GenerateNameCheck(name, &miss);
+
+ if (cell.is_null()) {
+ __ movq(rdx, args.GetReceiverOperand());
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(rdx, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
+ } else {
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
+ site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
+ Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
+ __ movq(rax, Immediate(argc));
+ __ Move(rbx, site_feedback_cell);
+ __ Move(rdi, function);
+
+ ArrayConstructorStub stub(isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(type, name);
+}
+
+
Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
@@ -1464,9 +1667,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label miss;
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1482,7 +1685,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier;
+ Label attempt_to_grow_elements, with_write_barrier, check_double;
// Get the elements array of the object.
__ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1490,7 +1693,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
factory()->fixed_array_map());
- __ j(not_equal, &call_builtin);
+ __ j(not_equal, &check_double);
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1505,7 +1708,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ j(greater, &attempt_to_grow_elements);
// Check if value is a smi.
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
+ __ movq(rcx, args.GetArgumentOperand(1));
__ JumpIfNotSmi(rcx, &with_write_barrier);
// Save new length.
@@ -1521,6 +1724,34 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
+ __ bind(&check_double);
+
+ // Check that the elements are in double mode.
+ __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
+ factory()->fixed_double_array_map());
+ __ j(not_equal, &call_builtin);
+
+ // Get the array's length into rax and calculate new length.
+ __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
+ STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
+ __ addl(rax, Immediate(argc));
+
+ // Get the elements' length into rcx.
+ __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ cmpl(rax, rcx);
+ __ j(greater, &call_builtin);
+
+ __ movq(rcx, args.GetArgumentOperand(1));
+ __ StoreNumberToDoubleElements(
+ rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
+
+ // Save new length.
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+ __ Integer32ToSmi(rax, rax); // Return new length as smi.
+ __ ret((argc + 1) * kPointerSize);
+
__ bind(&with_write_barrier);
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
@@ -1532,6 +1763,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(rbx, &call_builtin);
+ __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(equal, &call_builtin);
// rdx: receiver
// rbx: map
@@ -1543,7 +1777,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&try_holey_map);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
// Restore edi.
__ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
__ jmp(&fast_object);
@@ -1555,7 +1791,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
rdi,
&call_builtin);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
__ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
__ bind(&fast_object);
} else {
@@ -1582,7 +1820,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&call_builtin);
}
- __ movq(rbx, Operand(rsp, argc * kPointerSize));
+ __ movq(rbx, args.GetArgumentOperand(1));
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
@@ -1631,7 +1869,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
// Restore receiver to rdx as finish sequence assumes it's here.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(rdx, args.GetReceiverOperand());
// Increment element's and array's sizes.
__ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
@@ -1655,16 +1893,17 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
@@ -1679,9 +1918,9 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
Label miss, return_undefined, call_builtin;
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1736,16 +1975,17 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -1758,6 +1998,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
Label miss;
Label name_miss;
@@ -1776,15 +2017,16 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
rax,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- rax, holder, rbx, rdx, rdi, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ rax, holder, rbx, rdx, rdi, name, &miss);
Register receiver = rbx;
Register index = rdi;
Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(receiver, args.GetReceiverOperand());
if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+ __ movq(index, args.GetArgumentOperand(1));
} else {
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
@@ -1815,16 +2057,17 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -1837,6 +2080,8 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
+
Label miss;
Label name_miss;
Label index_out_of_range;
@@ -1854,16 +2099,17 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
rax,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- rax, holder, rbx, rdx, rdi, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ rax, holder, rbx, rdx, rdi, name, &miss);
Register receiver = rax;
Register index = rdi;
Register scratch = rdx;
Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(receiver, args.GetReceiverOperand());
if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+ __ movq(index, args.GetArgumentOperand(1));
} else {
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
@@ -1884,7 +2130,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(rax, Heap::kempty_stringRootIndex);
__ ret((argc + 1) * kPointerSize);
}
__ bind(&miss);
@@ -1894,16 +2140,17 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
GenerateMissBranch();
// Return the generated code.
- return GetCode(function);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -1915,13 +2162,14 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetReceiverOperand());
__ JumpIfSmi(rdx, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
name, &miss);
@@ -1934,7 +2182,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// Load the char code argument.
Register code = rbx;
- __ movq(code, Operand(rsp, 1 * kPointerSize));
+ __ movq(code, args.GetArgumentOperand(1));
// Check the code is a smi.
Label slow;
@@ -1956,35 +2204,154 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), call_kind);
__ bind(&miss);
// rcx: function name.
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
- // TODO(872): implement this.
- return Handle<Code>::null();
+ Handle<String> name,
+ Code::StubType type) {
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- rsp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) {
+ return Handle<Code>::null();
+ }
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell.is_null()) {
+ __ movq(rdx, args.GetReceiverOperand());
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(rdx, &miss);
+
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
+ } else {
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into rax.
+ __ movq(rax, args.GetArgumentOperand(1));
+
+ // Check if the argument is a smi.
+ Label smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(rax, &smi);
+
+ // Check if the argument is a heap number and load its value into xmm0.
+ Label slow;
+ __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
+ __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+
+ // Check if the argument is strictly positive. Note this also discards NaN.
+ __ xorpd(xmm1, xmm1);
+ __ ucomisd(xmm0, xmm1);
+ __ j(below_equal, &slow);
+
+ // Do a truncating conversion.
+ __ cvttsd2si(rax, xmm0);
+
+ // Checks for 0x80000000 which signals a failed conversion.
+ Label conversion_failure;
+ __ cmpl(rax, Immediate(0x80000000));
+ __ j(equal, &conversion_failure);
+
+ // Smi tag and return.
+ __ Integer32ToSmi(rax, rax);
+ __ bind(&smi);
+ __ ret(2 * kPointerSize);
+
+ // Check if the argument is < 2^kMantissaBits.
+ Label already_round;
+ __ bind(&conversion_failure);
+ int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000);
+ __ movq(rbx, kTwoMantissaBits, RelocInfo::NONE64);
+ __ movq(xmm1, rbx);
+ __ ucomisd(xmm0, xmm1);
+ __ j(above_equal, &already_round);
+
+ // Save a copy of the argument.
+ __ movaps(xmm2, xmm0);
+
+ // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
+ __ addsd(xmm0, xmm1);
+ __ subsd(xmm0, xmm1);
+
+ // Compare the argument and the tentative result to get the right mask:
+ // if xmm2 < xmm0:
+ // xmm2 = 1...1
+ // else:
+ // xmm2 = 0...0
+ __ cmpltsd(xmm2, xmm0);
+
+ // Subtract 1 if the argument was less than the tentative result.
+ int64_t kOne = V8_INT64_C(0x3ff0000000000000);
+ __ movq(rbx, kOne, RelocInfo::NONE64);
+ __ movq(xmm1, rbx);
+ __ andpd(xmm1, xmm2);
+ __ subsd(xmm0, xmm1);
+
+ // Return a new heap number.
+ __ AllocateHeapNumber(rax, rbx, &slow);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ __ ret(2 * kPointerSize);
+
+ // Return the argument (when it's an already round heap number).
+ __ bind(&already_round);
+ __ movq(rax, args.GetArgumentOperand(1));
+ __ ret(2 * kPointerSize);
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+
+ __ bind(&miss);
+ // rcx: function name.
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<String> name,
+ Code::StubType type) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -1996,13 +2363,14 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetReceiverOperand());
__ JumpIfSmi(rdx, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
name, &miss);
@@ -2013,38 +2381,37 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into rax.
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rax, args.GetArgumentOperand(1));
// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(rax, &not_smi);
- __ SmiToInteger32(rax, rax);
+ // Branchless abs implementation, refer to below:
+ // http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs
// Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
// otherwise.
- __ movl(rbx, rax);
- __ sarl(rbx, Immediate(kBitsPerInt - 1));
+ __ movq(rbx, rax);
+ __ sar(rbx, Immediate(kBitsPerPointer - 1));
// Do bitwise not or do nothing depending on ebx.
- __ xorl(rax, rbx);
+ __ xor_(rax, rbx);
// Add 1 or do nothing depending on ebx.
- __ subl(rax, rbx);
+ __ subq(rax, rbx);
// If the result is still negative, go to the slow case.
// This only happens for the most negative smi.
Label slow;
__ j(negative, &slow);
- // Smi case done.
- __ Integer32ToSmi(rax, rax);
__ ret(2 * kPointerSize);
// Check if the argument is a heap number and load its value.
__ bind(&not_smi);
__ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
// Check the sign of the argument. If the argument is positive,
// just return it.
@@ -2052,7 +2419,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
const int sign_mask_shift =
(HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
__ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift,
- RelocInfo::NONE);
+ RelocInfo::NONE64);
__ testq(rbx, rdi);
__ j(not_zero, &negative_sign);
__ ret(2 * kPointerSize);
@@ -2062,7 +2429,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
__ bind(&negative_sign);
__ xor_(rbx, rdi);
__ AllocateHeapNumber(rax, rdx, &slow);
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
+ __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
__ ret(2 * kPointerSize);
// Tail call the full function. We do not have to patch the receiver
@@ -2071,15 +2438,16 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), call_kind);
__ bind(&miss);
// rcx: function name.
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+ return GetCode(type, name);
}
@@ -2087,7 +2455,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name) {
ASSERT(optimization.is_simple_api_call());
@@ -2103,9 +2471,9 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
Label miss, miss_before_stack_reserved;
GenerateNameCheck(name, &miss_before_stack_reserved);
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss_before_stack_reserved);
@@ -2123,10 +2491,11 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
name, depth, &miss);
// Move the return address on top of the stack.
- __ movq(rax, Operand(rsp, 4 * kPointerSize));
- __ movq(Operand(rsp, 0 * kPointerSize), rax);
+ __ movq(rax,
+ StackOperandForReturnAddress(kFastApiCallArguments * kPointerSize));
+ __ movq(StackOperandForReturnAddress(0), rax);
- GenerateFastApiCall(masm(), optimization, argc);
+ GenerateFastApiCall(masm(), optimization, argc, false);
__ bind(&miss);
__ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
@@ -2139,11 +2508,11 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
}
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function,
- Handle<String> name,
- CheckType check) {
+void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Label* success) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -2153,21 +2522,11 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
// rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
Label miss;
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
@@ -2191,81 +2550,105 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
// necessary.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
break;
case STRING_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- // Check that the object is a two-byte string or a symbol.
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
- __ j(above_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- rax, holder, rbx, rdx, rdi, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ // Check that the object is a string.
+ __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
+ __ j(above_equal, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ rax, holder, rbx, rdx, rdi, name, &miss);
break;
- case NUMBER_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(rdx, &fast);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- rax, holder, rbx, rdx, rdi, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case SYMBOL_CHECK:
+ // Check that the object is a symbol.
+ __ CmpObjectType(rdx, SYMBOL_TYPE, rax);
+ __ j(not_equal, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::SYMBOL_FUNCTION_INDEX, rax, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ rax, holder, rbx, rdx, rdi, name, &miss);
break;
- case BOOLEAN_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a boolean.
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &fast);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- rax, holder, rbx, rdx, rdi, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case NUMBER_CHECK: {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ JumpIfSmi(rdx, &fast);
+ __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax);
+ __ j(not_equal, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ rax, holder, rbx, rdx, rdi, name, &miss);
break;
+ }
+ case BOOLEAN_CHECK: {
+ Label fast;
+ // Check that the object is a boolean.
+ __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
+ __ j(equal, &fast);
+ __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
+ __ j(not_equal, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ rax, holder, rbx, rdx, rdi, name, &miss);
+ break;
+ }
}
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
+ __ jmp(success);
// Handle call cache miss.
__ bind(&miss);
GenerateMissBranch();
+}
+
+
+void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), call_kind);
+}
+
+
+Handle<Code> CallStubCompiler::CompileCallConstant(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Handle<JSFunction> function) {
+ if (HasCustomCallGenerator(function)) {
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<PropertyCell>::null(),
+ function, Handle<String>::cast(name),
+ Code::CONSTANT);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
+ }
+
+ Label success;
+
+ CompileHandlerFrontend(object, holder, name, check, &success);
+ __ bind(&success);
+ CompileHandlerBackend(function);
// Return the generated code.
return GetCode(function);
@@ -2274,7 +2657,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name) {
+ Handle<Name> name) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -2287,21 +2670,20 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Label miss;
GenerateNameCheck(name, &miss);
- // Get the number of arguments.
- const int argc = arguments().immediate();
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
&miss);
// Restore receiver.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the function really is a function.
__ JumpIfSmi(rax, &miss);
@@ -2312,7 +2694,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
// necessary.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
// Invoke the function.
@@ -2335,9 +2717,9 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<JSObject> object,
Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<PropertyCell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<Name> name) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -2349,7 +2731,9 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ Handle<Code> code = CompileCustomCall(
+ object, holder, cell, function, Handle<String>::cast(name),
+ Code::NORMAL);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
@@ -2357,15 +2741,14 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Label miss;
GenerateNameCheck(name, &miss);
- // Get the number of arguments.
- const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, arguments());
GenerateGlobalReceiverCheck(object, holder, name, &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
// Patch the receiver on the stack with the global proxy.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
// Set up the context (function already in rdi).
@@ -2395,77 +2778,47 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
}
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- rdx, rcx, rbx, rdi,
- &miss);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
- // Check that the maps haven't changed.
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(receiver, rdx, holder, rbx, r8, rdi, name, &miss);
-
- // Stub never generated for non-global objects that require access checks.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-
- __ pop(rbx); // remove the return address
- __ push(rdx); // receiver
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+
+ __ PopReturnAddressTo(scratch1());
+ __ push(receiver());
__ Push(callback); // callback info
- __ push(rcx); // name
- __ push(rax); // value
- __ push(rbx); // restore return address
+ __ Push(name);
+ __ push(value());
+ __ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 1, values);
// Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+ return GetCode(kind(), Code::CALLBACKS, name);
}
@@ -2493,8 +2846,9 @@ void StoreStubCompiler::GenerateStoreViaSetter(
__ push(rdx);
__ push(rax);
ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2515,190 +2869,23 @@ void StoreStubCompiler::GenerateStoreViaSetter(
#define __ ACCESS_MASM(masm())
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(receiver, rdx, holder, rbx, r8, rdi, name, &miss);
-
- GenerateStoreViaSetter(masm(), setter);
-
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> receiver,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(rdx, rbx, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
-
- __ pop(rbx); // remove the return address
- __ push(rdx); // receiver
- __ push(rcx); // name
- __ push(rax); // value
- __ Push(Smi::FromInt(strict_mode_));
- __ push(rbx); // restore return address
+ Handle<JSObject> object,
+ Handle<Name> name) {
+ __ PopReturnAddressTo(scratch1());
+ __ push(receiver());
+ __ push(this->name());
+ __ push(value());
+ __ Push(Smi::FromInt(strict_mode()));
+ __ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 4, 1);
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the global has not changed.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, &miss);
-
- // Compute the cell operand to use.
- __ Move(rbx, cell);
- Operand cell_operand = FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss);
-
- // Store the value in the cell.
- __ movq(cell_operand, rax);
- // Cells are always rescanned, so no write barrier here.
-
- // Return the value (register rax).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1);
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rcx, name);
- __ j(not_equal, &miss);
-
- // Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- rdx, rcx, rbx, rdi,
- &miss);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_store_field(), 1);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub =
- KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
-
- __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
+ return GetCode(kind(), Code::INTERCEPTOR, name);
}
@@ -2706,116 +2893,98 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
Label miss;
- __ JumpIfSmi(rdx, &miss, Label::kNear);
+ __ JumpIfSmi(receiver(), &miss, Label::kNear);
- __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movq(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
for (int i = 0; i < receiver_count; ++i) {
// Check map and tail call if there's a match
- __ Cmp(rdi, receiver_maps->at(i));
+ __ Cmp(scratch1(), receiver_maps->at(i));
if (transitioned_maps->at(i).is_null()) {
__ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
} else {
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
- __ movq(rbx, transitioned_maps->at(i), RelocInfo::EMBEDDED_OBJECT);
+ __ movq(transition_map(),
+ transitioned_maps->at(i),
+ RelocInfo::EMBEDDED_OBJECT);
__ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
}
__ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetICCode(
+ kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> last) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that receiver is not a smi.
- __ JumpIfSmi(rax, &miss);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<Name> name,
+ Handle<JSGlobalObject> global) {
+ Label success;
- // Check the maps of the full prototype chain. Also check that
- // global property cells up to (but not including) the last object
- // in the prototype chain are empty.
- CheckPrototypes(object, rax, last, rbx, rdx, rdi, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (last->IsGlobalObject()) {
- GenerateCheckPropertyCell(
- masm(), Handle<GlobalObject>::cast(last), name, rdx, &miss);
- }
+ NonexistentHandlerFrontend(object, last, name, &success, global);
+ __ bind(&success);
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
__ ret(0);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::NONEXISTENT, factory()->empty_string());
+ return GetCode(kind(), Code::NONEXISTENT, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- int index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { rax, rcx, rdx, rbx, rdi, r8 };
+ return registers;
+}
- GenerateLoadField(object, holder, rax, rbx, rdx, rdi, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
- // Return the generated code.
- return GetCode(Code::FIELD, name);
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { rdx, rax, rbx, rcx, rdi, r8 };
+ return registers;
}
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
- GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi, r8, callback,
- name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
+Register* StoreStubCompiler::registers() {
+ // receiver, name, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { rdx, rcx, rax, rbx, rdi, r8 };
+ return registers;
+}
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { rdx, rcx, rax, rbx, rdi, r8 };
+ return registers;
+}
+
+
+void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
+ Register name_reg,
+ Label* miss) {
+ __ Cmp(name_reg, name);
+ __ j(not_equal, miss);
+}
+
+
+void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
+ Register name_reg,
+ Label* miss) {
+ __ Cmp(name_reg, name);
+ __ j(not_equal, miss);
}
@@ -2824,6 +2993,7 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
// -- rax : receiver
@@ -2835,10 +3005,11 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(rax);
+ __ push(receiver);
ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2856,96 +3027,24 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(rax, &miss);
- CheckPrototypes(receiver, rax, holder, rbx, rdx, rdi, name, &miss);
-
- GenerateLoadViaGetter(masm(), getter),
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> value,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadConstant(object, holder, rax, rbx, rdx, rdi, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // TODO(368): Compile in the whole chain: all the interceptors in
- // prototypes and ultimate answer.
- GenerateLoadInterceptor(receiver, holder, &lookup, rax, rcx, rdx, rbx, rdi,
- name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
+ Handle<GlobalObject> global,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
bool is_dont_delete) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
+ Label success, miss;
+ // TODO(verwaest): Directly store to rax. Currently we cannot do this, since
+ // rax is used as receiver(), which we would otherwise clobber before a
+ // potential miss.
- // Check that the maps haven't changed.
- __ JumpIfSmi(rax, &miss);
- CheckPrototypes(object, rax, holder, rbx, rdx, rdi, name, &miss);
+ __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
+ HandlerFrontendHeader(
+ object, receiver(), Handle<JSObject>::cast(global), name, &miss);
// Get the value from the cell.
__ Move(rbx, cell);
- __ movq(rbx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
+ __ movq(rbx, FieldOperand(rbx, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
@@ -2953,396 +3052,57 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ j(equal, &miss);
} else if (FLAG_debug_code) {
__ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ Check(not_equal, "DontDelete cells can't contain the hole");
+ __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
}
+ HandlerFrontendFooter(name, &success, &miss);
+ __ bind(&success);
+
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
__ movq(rax, rbx);
__ ret(0);
- __ bind(&miss);
- __ IncrementCounter(counters->named_load_global_stub_miss(), 1);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::NORMAL, name);
+ return GetICCode(kind(), Code::NORMAL, name);
}
-Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- int index) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_field(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, name);
- __ j(not_equal, &miss);
-
- GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss);
-
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_field(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_callback(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, name);
- __ j(not_equal, &miss);
-
- GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, r8, callback,
- name, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_callback(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_constant_function(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, name);
- __ j(not_equal, &miss);
-
- GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
- value, name, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_constant_function(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_interceptor(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, name);
- __ j(not_equal, &miss);
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver, holder, &lookup, rdx, rax, rcx, rbx, rdi,
- name, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_interceptor(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_array_length(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, name);
- __ j(not_equal, &miss);
-
- GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_array_length(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_string_length(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, name);
- __ j(not_equal, &miss);
-
- GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_string_length(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, name);
- __ j(not_equal, &miss);
-
- GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_function_prototype(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
MapHandleList* receiver_maps,
- CodeHandleList* handler_ics) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
Label miss;
- __ JumpIfSmi(rdx, &miss);
- Register map_reg = rbx;
- __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
- for (int current = 0; current < receiver_count; ++current) {
- // Check map and tail call if there's a match
- __ Cmp(map_reg, receiver_maps->at(current));
- __ j(equal, handler_ics->at(current), RelocInfo::CODE_TARGET);
+ if (check == PROPERTY) {
+ GenerateNameCheck(name, this->name(), &miss);
}
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
-}
-
-
-// Specialized stub for constructing objects from functions which only have only
-// simple assignments of the form this.x = ...; in their body.
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rdi : constructor
- // -- rsp[0] : return address
- // -- rsp[4] : last argument
- // -----------------------------------
- Label generic_stub_call;
-
- // Use r8 for holding undefined which is used in several places below.
- __ Move(r8, factory()->undefined_value());
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kDebugInfoOffset));
- __ cmpq(rbx, r8);
- __ j(not_equal, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // rdi: constructor
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rbx, &generic_stub_call);
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ j(not_equal, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // rbx: initial map
- __ CmpInstanceType(rbx, JS_FUNCTION_TYPE);
- __ Check(not_equal, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject in new space.
- // rbx: initial map
- ASSERT(function->has_initial_map());
- int instance_size = function->initial_map()->instance_size();
-#ifdef DEBUG
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ shl(rcx, Immediate(kPointerSizeLog2));
- __ cmpq(rcx, Immediate(instance_size));
- __ Check(equal, "Instance size of initial map changed.");
-#endif
- __ AllocateInNewSpace(instance_size, rdx, rcx, no_reg,
- &generic_stub_call, NO_ALLOCATION_FLAGS);
-
- // Allocated the JSObject, now initialize the fields and add the heap tag.
- // rbx: initial map
- // rdx: JSObject (untagged)
- __ movq(Operand(rdx, JSObject::kMapOffset), rbx);
- __ Move(rbx, factory()->empty_fixed_array());
- __ movq(Operand(rdx, JSObject::kPropertiesOffset), rbx);
- __ movq(Operand(rdx, JSObject::kElementsOffset), rbx);
-
- // rax: argc
- // rdx: JSObject (untagged)
- // Load the address of the first in-object property into r9.
- __ lea(r9, Operand(rdx, JSObject::kHeaderSize));
- // Calculate the location of the first argument. The stack contains only the
- // return address on top of the argc arguments.
- __ lea(rcx, Operand(rsp, rax, times_pointer_size, 0));
-
- // rax: argc
- // rcx: first argument
- // rdx: JSObject (untagged)
- // r8: undefined
- // r9: first in-object property of the JSObject
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- // Check if the argument assigned to the property is actually passed.
- // If argument is not passed the property is set to undefined,
- // otherwise find it on the stack.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ movq(rbx, r8);
- __ cmpq(rax, Immediate(arg_number));
- __ cmovq(above, rbx, Operand(rcx, arg_number * -kPointerSize));
- // Store value in the property.
- __ movq(Operand(r9, i * kPointerSize), rbx);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
- __ Move(Operand(r9, i * kPointerSize), constant);
+ __ JumpIfSmi(receiver(), &miss);
+ Register map_reg = scratch1();
+ __ movq(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = receiver_maps->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ // Check map and tail call if there's a match
+ __ Cmp(map_reg, receiver_maps->at(current));
+ __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
}
}
+ ASSERT(number_of_handled_maps > 0);
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ movq(Operand(r9, i * kPointerSize), r8);
- }
-
- // rax: argc
- // rdx: JSObject (untagged)
- // Move argc to rbx and the JSObject to return to rax and tag it.
- __ movq(rbx, rax);
- __ movq(rax, rdx);
- __ or_(rax, Immediate(kHeapObjectTag));
-
- // rax: JSObject
- // rbx: argc
- // Remove caller arguments and receiver from the stack and return.
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rbx, times_pointer_size, 1 * kPointerSize));
- __ push(rcx);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
- __ IncrementCounter(counters->constructed_objects_stub(), 1);
- __ ret(0);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(code, RelocInfo::CODE_TARGET);
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode();
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetICCode(kind(), type, name, state);
}
@@ -3378,727 +3138,17 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch,
- XMMRegister xmm_scratch0,
- XMMRegister xmm_scratch1,
- Label* fail) {
- // Check that key is a smi or a heap number containing a smi and branch
- // if the check fails.
- Label key_ok;
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- masm->isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
- __ movsd(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
- __ cvttsd2si(scratch, xmm_scratch0);
- __ cvtlsi2sd(xmm_scratch1, scratch);
- __ ucomisd(xmm_scratch1, xmm_scratch0);
- __ j(not_equal, fail);
- __ j(parity_even, fail); // NaN.
- __ Integer32ToSmi(key, scratch);
- __ bind(&key_ok);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(rcx, rax);
- __ cmpq(rax, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &miss_force_generic);
-
- // rax: index (as a smi)
- // rdx: receiver (JSObject)
- // rcx: untagged index
- // rbx: elements array
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ movsd(xmm0, Operand(rbx, rcx, times_8, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // rax: index
- // rdx: receiver
- // For integer array types:
- // rcx: value
- // For floating-point array type:
- // xmm0: value as double.
-
- ASSERT(kSmiValueSize == 32);
- if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // For the UnsignedInt array type, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
-
- __ JumpIfUIntNotValidSmiValue(rcx, &box_int, Label::kNear);
-
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- // The value is zero-extended since we loaded the value from memory
- // with movl.
- __ cvtqsi2sd(xmm0, rcx);
-
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else {
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
- }
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
-
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Miss case: Jump to runtime.
- __ bind(&miss_force_generic);
-
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(rdi, rcx); // Untag the index.
- __ cmpq(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &miss_force_generic);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- Label check_heap_number;
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Float to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(rax, &slow);
- } else {
- __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
- }
- // No more branches to slow case on this path. Key and receiver not needed.
- __ SmiToInteger32(rdx, rax);
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- { // Clamp the value to [0..255].
- Label done;
- __ testl(rdx, Immediate(0xFFFFFF00));
- __ j(zero, &done, Label::kNear);
- __ setcc(negative, rdx); // 1 if negative, 0 if positive.
- __ decb(rdx); // 0 if negative, 255 if positive.
- __ bind(&done);
- }
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ cvtlsi2ss(xmm0, rdx);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ cvtlsi2sd(xmm0, rdx);
- __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- __ ret(0);
-
- // TODO(danno): handle heap number -> pixel array conversion
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- __ bind(&check_heap_number);
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
- __ j(not_equal, &slow);
- // No more branches to slow case on this path.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rdi: untagged index
- // rbx: base pointer of external storage
- // top of FPU stack: value
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ cvtsd2ss(xmm0, xmm0);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- __ ret(0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
- // Fast path: use machine instruction to convert to int64. If that
- // fails (out-of-range), go into the runtime.
- __ cvttsd2siq(r8, xmm0);
- __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpq(r8, kScratchRegister);
- __ j(equal, &slow);
-
- // rdx: value (converted to an untagged integer)
- // rdi: untagged index
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(Operand(rbx, rdi, times_1, 0), r8);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(Operand(rbx, rdi, times_2, 0), r8);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(Operand(rbx, rdi, times_4, 0), r8);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- __ ret(0);
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
-
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Miss case: call runtime.
- __ bind(&miss_force_generic);
-
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rcx);
-
- // Check that the key is within bounds.
- __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Load the result and make sure it's not the hole.
- SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rcx,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss_force_generic);
- __ movq(rax, rbx);
- __ ret(0);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
__ bind(&miss_force_generic);
- Code* code = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- Handle<Code> ic(code);
- __ jmp(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rcx);
-
- // Check that the key is within bounds.
- __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Check for the hole
- __ SmiToInteger32(kScratchRegister, rax);
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmpl(FieldOperand(rcx, kScratchRegister, times_8, offset),
- Immediate(kHoleNanUpper32));
- __ j(equal, &miss_force_generic);
-
- // Always allocate a heap number for the result.
- __ movsd(xmm0, FieldOperand(rcx, kScratchRegister, times_8,
- FixedDoubleArray::kHeaderSize));
- __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
- // Set the value.
- __ movq(rax, rcx);
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
-
- __ bind(&slow_allocate_heapnumber);
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, finish_store, grow;
- Label check_capacity, slow;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(rax, &transition_elements_kind);
- }
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- // Check that the key is within bounds.
- if (is_js_array) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
- }
-
- __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &miss_force_generic);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- __ SmiToInteger32(rcx, rcx);
- __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- } else {
- // Do the store and update the write barrier.
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ SmiToInteger32(rcx, rcx);
- __ lea(rcx,
- FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
- // Make sure to preserve the value in register rax.
- __ movq(rbx, rax);
- __ RecordWrite(rdi, rcx, rbx, kDontSaveFPRegs);
- }
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss_force_generic);
- Handle<Code> ic_force_generic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
- __ j(not_equal, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ Move(FieldOperand(rdi, JSObject::kMapOffset),
- masm->isolate()->factory()->fixed_array_map());
- __ Move(FieldOperand(rdi, FixedArray::kLengthOffset),
- Smi::FromInt(JSArray::kPreallocatedArrayElements));
- __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ movq(FieldOperand(rdi, FixedArray::SizeFor(i)), rbx);
- }
-
- // Store the element at index zero.
- __ movq(FieldOperand(rdi, FixedArray::SizeFor(0)), rax);
-
- // Install the new backing store in the JSArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
- __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
- __ ret(0);
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub.
- __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &miss_force_generic);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmpq(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
- Smi::FromInt(1));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, finish_store;
- Label grow, slow, check_capacity;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rdi);
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
- }
-
- // Handle smi values specially
- __ bind(&finish_store);
- __ SmiToInteger32(rcx, rcx);
- __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &transition_elements_kind);
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic_force_generic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- // Restore smi-tagging of rcx.
- __ Integer32ToSmi(rcx, rcx);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(rax, &value_is_smi);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
- __ j(not_equal, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Initialize the new FixedDoubleArray. Leave elements unitialized for
- // efficiency, they are guaranteed to be initialized before use.
- __ Move(FieldOperand(rdi, JSObject::kMapOffset),
- masm->isolate()->factory()->fixed_double_array_map());
- __ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset),
- Smi::FromInt(JSArray::kPreallocatedArrayElements));
-
- // Install the new backing store in the JSArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
- __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&finish_store);
-
- __ bind(&check_capacity);
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmpq(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
- Smi::FromInt(1));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic_slow, RelocInfo::CODE_TARGET);
- }
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
}
diff --git a/deps/v8/src/zone-inl.h b/deps/v8/src/zone-inl.h
index e312b2089..f257382a2 100644
--- a/deps/v8/src/zone-inl.h
+++ b/deps/v8/src/zone-inl.h
@@ -40,7 +40,6 @@ namespace internal {
inline void* Zone::New(int size) {
- ASSERT(scope_nesting_ > 0);
// Round up the requested size to fit the alignment.
size = RoundUp(size, kAlignment);
@@ -75,7 +74,7 @@ T* Zone::NewArray(int length) {
bool Zone::excess_allocation() {
- return segment_bytes_allocated_ > zone_excess_limit_;
+ return segment_bytes_allocated_ > kExcessLimit;
}
@@ -110,14 +109,9 @@ void* ZoneList<T>::operator new(size_t size, Zone* zone) {
}
-ZoneScope::ZoneScope(Zone* zone, ZoneScopeMode mode)
- : zone_(zone), mode_(mode) {
- zone_->scope_nesting_++;
-}
-
-
-bool ZoneScope::ShouldDeleteOnExit() {
- return zone_->scope_nesting_ == 1 && mode_ == DELETE_ON_EXIT;
+template <typename T>
+void* ZoneSplayTree<T>::operator new(size_t size, Zone* zone) {
+ return zone->New(static_cast<int>(size));
}
diff --git a/deps/v8/src/zone.cc b/deps/v8/src/zone.cc
index 51b8113a0..9ee00edcb 100644
--- a/deps/v8/src/zone.cc
+++ b/deps/v8/src/zone.cc
@@ -68,39 +68,20 @@ class Segment {
Zone::Zone(Isolate* isolate)
- : zone_excess_limit_(256 * MB),
+ : allocation_size_(0),
segment_bytes_allocated_(0),
position_(0),
limit_(0),
- scope_nesting_(0),
segment_head_(NULL),
isolate_(isolate) {
}
-unsigned Zone::allocation_size_ = 0;
-ZoneScope::~ZoneScope() {
- if (ShouldDeleteOnExit()) zone_->DeleteAll();
- zone_->scope_nesting_--;
-}
-
-
-// Creates a new segment, sets it size, and pushes it to the front
-// of the segment chain. Returns the new segment.
-Segment* Zone::NewSegment(int size) {
- Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
- adjust_segment_bytes_allocated(size);
- if (result != NULL) {
- result->Initialize(segment_head_, size);
- segment_head_ = result;
- }
- return result;
-}
+Zone::~Zone() {
+ DeleteAll();
+ DeleteKeptSegment();
-// Deletes the given segment. Does not touch the segment chain.
-void Zone::DeleteSegment(Segment* segment, int size) {
- adjust_segment_bytes_allocated(-size);
- Malloced::Delete(segment);
+ ASSERT(segment_bytes_allocated_ == 0);
}
@@ -111,19 +92,15 @@ void Zone::DeleteAll() {
#endif
// Find a segment with a suitable size to keep around.
- Segment* keep = segment_head_;
- while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) {
- keep = keep->next();
- }
-
+ Segment* keep = NULL;
// Traverse the chained list of segments, zapping (in debug mode)
// and freeing every segment except the one we wish to keep.
- Segment* current = segment_head_;
- while (current != NULL) {
+ for (Segment* current = segment_head_; current != NULL; ) {
Segment* next = current->next();
- if (current == keep) {
+ if (keep == NULL && current->size() <= kMaximumKeptSegmentSize) {
// Unlink the segment we wish to keep from the list.
- current->clear_next();
+ keep = current;
+ keep->clear_next();
} else {
int size = current->size();
#ifdef DEBUG
@@ -157,10 +134,43 @@ void Zone::DeleteAll() {
void Zone::DeleteKeptSegment() {
+#ifdef DEBUG
+ // Constant byte value used for zapping dead memory in debug mode.
+ static const unsigned char kZapDeadByte = 0xcd;
+#endif
+
+ ASSERT(segment_head_ == NULL || segment_head_->next() == NULL);
if (segment_head_ != NULL) {
- DeleteSegment(segment_head_, segment_head_->size());
+ int size = segment_head_->size();
+#ifdef DEBUG
+ // Zap the entire kept segment (including the header).
+ memset(segment_head_, kZapDeadByte, size);
+#endif
+ DeleteSegment(segment_head_, size);
segment_head_ = NULL;
}
+
+ ASSERT(segment_bytes_allocated_ == 0);
+}
+
+
+// Creates a new segment, sets it size, and pushes it to the front
+// of the segment chain. Returns the new segment.
+Segment* Zone::NewSegment(int size) {
+ Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
+ adjust_segment_bytes_allocated(size);
+ if (result != NULL) {
+ result->Initialize(segment_head_, size);
+ segment_head_ = result;
+ }
+ return result;
+}
+
+
+// Deletes the given segment. Does not touch the segment chain.
+void Zone::DeleteSegment(Segment* segment, int size) {
+ adjust_segment_bytes_allocated(-size);
+ Malloced::Delete(segment);
}
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h
index 01e887e77..bd7cc39b0 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone.h
@@ -39,13 +39,6 @@ namespace v8 {
namespace internal {
-// Zone scopes are in one of two modes. Either they delete the zone
-// on exit or they do not.
-enum ZoneScopeMode {
- DELETE_ON_EXIT,
- DONT_DELETE_ON_EXIT
-};
-
class Segment;
class Isolate;
@@ -65,7 +58,7 @@ class Isolate;
class Zone {
public:
explicit Zone(Isolate* isolate);
- ~Zone() { DeleteKeptSegment(); }
+ ~Zone();
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
// allocating new segments of memory on demand using malloc().
inline void* New(int size);
@@ -77,7 +70,8 @@ class Zone {
// small (size <= kMaximumKeptSegmentSize) segment around if it finds one.
void DeleteAll();
- // Deletes the last small segment kept around by DeleteAll().
+ // Deletes the last small segment kept around by DeleteAll(). You
+ // may no longer allocate in the Zone after a call to this method.
void DeleteKeptSegment();
// Returns true if more memory has been allocated in zones than
@@ -86,13 +80,12 @@ class Zone {
inline void adjust_segment_bytes_allocated(int delta);
- inline Isolate* isolate() { return isolate_; }
+ inline unsigned allocation_size() { return allocation_size_; }
- static unsigned allocation_size_;
+ inline Isolate* isolate() { return isolate_; }
private:
friend class Isolate;
- friend class ZoneScope;
// All pointers returned from New() have this alignment. In addition, if the
// object being allocated has a size that is divisible by 8 then its alignment
@@ -109,7 +102,10 @@ class Zone {
static const int kMaximumKeptSegmentSize = 64 * KB;
// Report zone excess when allocation exceeds this limit.
- int zone_excess_limit_;
+ static const int kExcessLimit = 256 * MB;
+
+ // The number of bytes allocated in this zone so far.
+ unsigned allocation_size_;
// The number of bytes allocated in segments. Note that this number
// includes memory allocated from the OS but not yet allocated from
@@ -124,10 +120,10 @@ class Zone {
// Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment.
- Segment* NewSegment(int size);
+ INLINE(Segment* NewSegment(int size));
// Deletes the given segment. Does not touch the segment chain.
- void DeleteSegment(Segment* segment, int size);
+ INLINE(void DeleteSegment(Segment* segment, int size));
// The free region in the current (front) segment is represented as
// the half-open interval [position, limit). The 'position' variable
@@ -135,8 +131,6 @@ class Zone {
Address position_;
Address limit_;
- int scope_nesting_;
-
Segment* segment_head_;
Isolate* isolate_;
};
@@ -162,6 +156,20 @@ class ZoneObject {
};
+// The ZoneScope is used to automatically call DeleteAll() on a
+// Zone when the ZoneScope is destroyed (i.e. goes out of scope)
+struct ZoneScope {
+ public:
+ explicit ZoneScope(Zone* zone) : zone_(zone) { }
+ ~ZoneScope() { zone_->DeleteAll(); }
+
+ Zone* zone() { return zone_; }
+
+ private:
+ Zone* zone_;
+};
+
+
// The ZoneAllocationPolicy is used to specialize generic data
// structures to allocate themselves and their elements in the Zone.
struct ZoneAllocationPolicy {
@@ -169,6 +177,7 @@ struct ZoneAllocationPolicy {
explicit ZoneAllocationPolicy(Zone* zone) : zone_(zone) { }
INLINE(void* New(size_t size));
INLINE(static void Delete(void *pointer)) { }
+ Zone* zone() { return zone_; }
private:
Zone* zone_;
@@ -193,7 +202,7 @@ class ZoneList: public List<T, ZoneAllocationPolicy> {
ZoneList(const ZoneList<T>& other, Zone* zone)
: List<T, ZoneAllocationPolicy>(other.length(),
ZoneAllocationPolicy(zone)) {
- AddAll(other, ZoneAllocationPolicy(zone));
+ AddAll(other, zone);
}
// We add some convenience wrappers so that we can pass in a Zone
@@ -201,8 +210,7 @@ class ZoneList: public List<T, ZoneAllocationPolicy> {
INLINE(void Add(const T& element, Zone* zone)) {
List<T, ZoneAllocationPolicy>::Add(element, ZoneAllocationPolicy(zone));
}
- INLINE(void AddAll(const List<T, ZoneAllocationPolicy>& other,
- Zone* zone)) {
+ INLINE(void AddAll(const List<T, ZoneAllocationPolicy>& other, Zone* zone)) {
List<T, ZoneAllocationPolicy>::AddAll(other, ZoneAllocationPolicy(zone));
}
INLINE(void AddAll(const Vector<T>& other, Zone* zone)) {
@@ -229,31 +237,6 @@ class ZoneList: public List<T, ZoneAllocationPolicy> {
};
-// ZoneScopes keep track of the current parsing and compilation
-// nesting and cleans up generated ASTs in the Zone when exiting the
-// outer-most scope.
-class ZoneScope BASE_EMBEDDED {
- public:
- INLINE(ZoneScope(Zone* zone, ZoneScopeMode mode));
-
- virtual ~ZoneScope();
-
- inline bool ShouldDeleteOnExit();
-
- // For ZoneScopes that do not delete on exit by default, call this
- // method to request deletion on exit.
- void DeleteOnExit() {
- mode_ = DELETE_ON_EXIT;
- }
-
- inline static int nesting();
-
- private:
- Zone* zone_;
- ZoneScopeMode mode_;
-};
-
-
// A zone splay tree. The config type parameter encapsulates the
// different configurations of a concrete splay tree (see splay-tree.h).
// The tree itself and all its elements are allocated in the Zone.
@@ -263,6 +246,11 @@ class ZoneSplayTree: public SplayTree<Config, ZoneAllocationPolicy> {
explicit ZoneSplayTree(Zone* zone)
: SplayTree<Config, ZoneAllocationPolicy>(ZoneAllocationPolicy(zone)) {}
~ZoneSplayTree();
+
+ INLINE(void* operator new(size_t size, Zone* zone));
+
+ void operator delete(void* pointer) { UNREACHABLE(); }
+ void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
};